prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
import sys, csv, string
def generate_R_input(report_path, output_path):
confidence_values = []
report_file = open(report_path, 'r')
first_line = True
for line in report_file:
if first_line:
first_line = False
continue
line = line.strip().split(',')
if line[5] == 'Yes':
confidence_values.append(line[6])
elif line[5] == 'No':
confid = 1 - float(line[6])
confidence_values.append(confid)
o | utput_file = open(output_path, 'w')
output_file.write('Confidence' + '\n')
first_val = True
for confid in confidence_values:
if not first_val:
output_file.w | rite(str(confid) + '\n')
else:
output_file.write(str(confid) + '\n')
first_val = False
output_file.close()
if __name__ =="__main__":
_report_path = sys.argv[1]
_output_path = sys.argv[2]
generate_R_input(_report_path, _output_path)
|
import requests,sys
from config_helper import get_proxies
def get_pem():
if get_proxies()==None:
return None
pem_resp = requests.get('http://curl.haxx.se/ca/cacert.pem', proxies=get_proxies())
if pem_resp.status_code != 200:
print "ERROR: Received bad response from api: %d" % pem_resp.status_code
print "API Message: "+p | em_resp.text
sys.exit()
f = open('..\pemfile.pem','w')
f.write(pem_resp.text.encode('utf-8'))
f.close()
| return '..\pemfile.pem'
def get(url, proxies=None, auth=None,verify = True):
resp = requests.get(url, proxies=get_proxies(), auth=auth, verify=verify)
if resp.status_code != 200:
print "ERROR: Received bad response from api: %d" % resp.status_code
print "API Message: "+resp.text
sys.exit()
return resp.json()
|
#!/usr/bin/env python
"""Transform a CSV file exported from the Recorded Future UI into Maltego entities."""
import json
import sys
import csv
import Tkinter, tkFileDialog
from MaltegoTransform import *
mt = MaltegoTransform()
# Use Tkinter to open up a file dialog.
root = Tkinter.Tk()
root.lift()
root.withdraw()
sys.stderr.write("Click the Python icon to select a file.")
csvfilename = tkFileDialog.askopenfilename()
data = csv.DictReader(open(csvfilename), delimiter=',',fieldnames=('Event Id','Event Type','Event Title','Start Time','End Time','Precision','Count','First Published Time','Last Published Time','Sample Fragment','Entities','Locations','Source Count','Positive Sentiment','Negative Sentiment'))
next(data)
for row in data:
event = row['Event Type']+"-"+row['Event Id']
rfevent = mt.addEntity("recfut.RFEvent",event);
rfevent.addAdditionalFields("eid","Event ID",False,row['Event Id']);
rfevent.addAdditionalFields("etype","Event Type",False,row['Event Type']);
rfevent.addAdditionalFields("title","Event Title",False,row['Event Title']);
rfevent.addAdditionalFields("starttime","Start Time",False,row['Start Time']);
rfevent.addAdditionalFields("stoptime","Stop Time",False,row['End Time']);
rfevent.addAdditionalFields("fragment","Fragment",False,row['Sample Fragment']);
rfevent.addAdditionalFields("precision","Precision",False,row['Precision']);
rfevent.addAdditionalFields("count","Count",False,row['Count']);
rfevent.addAdditionalFiel | ds("firstpublished","First Published",False,row['First Published Time']);
rfevent.addAdditionalFields("lastpublished","Last Published",False,row['La | st Published Time']);
rfevent.addAdditionalFields("sourcecount","Source Count",False,row['Source Count']);
rfevent.addAdditionalFields("pos_sentiment","Positive Sentiment",False,row['Positive Sentiment']);
rfevent.addAdditionalFields("neg_sentiment","Negative Sentiment",False,row['Negative Sentiment']);
mt.addUIMessage("RF event load completed!")
mt.returnOutput()
|
# -*- coding: utf-8 -*-
#
# amsn - a python client for the WLM Network
#
# Copyright (C) 2008 Dario Freddi <drf54321@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import papyon
import papyon.event
class AddressBookEvents(papyon.event.AddressBookEventInterface):
def __init__(self, client, amsn_core):
self._amsn_core = amsn_core
self._contactlist_manager = amsn_core._contactlist_manager
papyon.event.AddressBookEventInterface.__init__(self, client)
def on_addressbook_messenger_contact_added(self, contact):
self._contactlist_manager.on_contact_added(contact)
def on_addressbook_contact_deleted(self, contact):
self._contactlist_manager.on_contact_removed(contact)
def on_addressbook_contact_blocked(self, contact):
self._contactlist_manager.on_contact_blocked(contact)
def on_addre | ssbook_contact_unblocked(self, contact):
self._contactlist_manager.on_contact_unblocked(contact)
def on_addressbook_grou | p_added(self, group):
self._contactlist_manager.on_group_added(group)
def on_addressbook_group_deleted(self, group):
self._contactlist_manager.on_group_deleted(group)
def on_addressbook_group_renamed(self, group):
self._contactlist_manager.on_group_renamed(group)
def on_addressbook_group_contact_added(self, group, contact):
self._contactlist_manager.on_group_contact_added(group, contact)
def on_addressbook_group_contact_deleted(self, group, contact):
self._contactlist_manager.on_group_contact_deleted(group, contact)
|
""" XVM (c) www.modxvm.com 2013-2017 """
# PUBLIC
def initialize():
return _contacts.initialize()
def isAvailable():
return _contacts.is_available
def getXvmContactData(uid):
return _contacts.getXvmContactData(uid)
def setXvmContactData(uid, value):
return _contacts.setXvmContactData(uid, value)
# PRIVATE
from random import randint
import traceback
from gui import SystemMessages
import simplejson
from xfw import *
from xvm_main.python.consts import *
from xvm_main.python.loadurl import loadUrl
from xvm_main.python.logger import *
import xvm_main.python.config as config
import xvm_main.python.utils as utils
from xvm_main.python.xvm import l10n
_CONTACTS_DATA_VERSION = '1.0'
_SYSTEM_MESSAGE_TPL = '''<textformat tabstops="[130]"><img src="img://../xvm/res/icons/xvm/16x16t.png"
vspace="-5"> <a href="#XVM_SITE#"><font color="#E2D2A2">www.modxvm.com</font></a>\n\n%VALUE%</textformat>'''
class _Contacts:
def __init__(self):
self.cached_data = None
self.cached_token = None
self.is_available = False
self.contacts_disabled = False
def initialize(self):
try:
self.is_available = False
if not self.contacts_disabled:
self.contacts_disabled = not config.networkServicesSettings.comments
if self.contacts_disabled:
return
if config.token.online:
token = config.token.token
if token is None:
raise Exception('[TOKEN_NOT_INITIALIZED] {0}'.format(l10n('Network services unavailable')))
if self.cached_data is None or self.cached_token != token:
self.cached_token = token
json_data = self._doRequest('getComments')
data = {'ver':_CONTACTS_DATA_VERSION,'players':{}} if json_data is None else simplejson.loads(json_data)
if data['ver'] != _CONTACTS_DATA_VERSION:
pass # data = convertOldVersion(data)
self.cached_data = data
self.is_available = True
except Exception as ex:
self.contacts_disabled = True
self.is_available = False
self.cached_token = None
self.cached_data = None
errstr = _SYSTEM_MESSAGE_TPL.replace('%VALUE%', '<b>{0}</b>\n\n{1}\n\n{2}'.format(
l10n('Error loading comments'),
str(ex),
l10n('Comments disabled')))
SystemMessages.pushMessage(errstr, type=SystemMessages.SM_TYPE.Warning)
warn(traceback.format_exc())
#log(self.cached_data)
def getXvmContactData(self, uid):
nick = None
comment = None
if not self.contacts_disabled and self.cached_data is None:
self.initialize()
if not self.contacts_disabled and self.cached_data is not None and 'players' in self.cached_data:
| data = self.cached_data['players'].get(str(uid), None)
if data is not None:
nick = data.get('nick', None)
comment = data.get('comment', | None)
return {'nick':nick,'comment':comment}
def setXvmContactData(self, uid, value):
try:
if self.cached_data is None or 'players' not in self.cached_data:
raise Exception('[INTERNAL_ERROR]')
if (value['nick'] is None or value['nick'] == '') and (value['comment'] is None or value['comment'] == ''):
self.cached_data['players'].pop(str(uid), None)
else:
self.cached_data['players'][str(uid)] = value
json_data = simplejson.dumps(self.cached_data)
#log(json_data)
self._doRequest('addComments', json_data)
return True
except Exception as ex:
self.contacts_disabled = True
self.is_available = False
self.cached_token = None
self.cached_data = None
errstr = _SYSTEM_MESSAGE_TPL.replace('%VALUE%', '<b>{0}</b>\n\n{1}\n\n{2}'.format(
l10n('Error saving comments'),
str(ex),
l10n('Comments disabled')))
SystemMessages.pushMessage(errstr, type=SystemMessages.SM_TYPE.Error)
err(traceback.format_exc())
return False
# PRIVATE
def _doRequest(self, cmd, body=None):
req = '{0}/{1}'.format(cmd, self.cached_token)
server = XVM.SERVERS[randint(0, len(XVM.SERVERS) - 1)]
(response, duration, errStr) = loadUrl(server, req, body=body, api=XVM.API_VERSION_OLD)
if errStr:
raise Exception(errStr)
response = response.strip()
if response in ('', '[]', '{}'):
response = None
# log(utils.hide_guid(response))
return response
_contacts = _Contacts()
|
# coding=utf-8
"""Select users to be notified."""
__author__ = 'Christian Christelis <christian@kartoza.com>'
__project_name = 'watchkeeper'
__date__ = '27/05/15'
__copyright__ = 'kartoza.com'
__doc__ = ''
from celery import shared_task
from notifications.tasks.send_email import send_email_message
from notifications.tasks.send_sms import send_sms_message
from event_mapper.models.user import User
from event_mapper.models.event import Event
def generate_email_report(event):
"""Generate report for email as html
:param event: Event object
:return: A html string represent the report.
"""
html_report = """
<html>
<head>
<meta name=3 | D"generator" content=3D"Windows Mail 17.5.9600.20911">
<style data-externalstyle=3D"true"><!--
| p.MsoListParagraph, li.MsoListParagraph, div.MsoListParagraph {
margin-top:0in;
margin-right:0in;
margin-bottom:0in;
margin-left:.5in;
margin-bottom:.0001pt;
}
p.MsoNormal, li.MsoNormal, div.MsoNormal {
margin:0in;
margin-bottom:.0001pt;
}
p.MsoListParagraphCxSpFirst, li.MsoListParagraphCxSpFirst, div.MsoListParagraphCxSpFirst,=20
p.MsoListParagraphCxSpMiddle, li.MsoListParagraphCxSpMiddle, div.MsoListParagraphCxSpMiddle,=20
p.MsoListParagraphCxSpLast, li.MsoListParagraphCxSpLast, div.MsoListParagraphCxSpLast {
margin-top:0in;
margin-right:0in;
margin-bottom:0in;
margin-left:.5in;
margin-bottom:.0001pt;
line-height:115%;
}
--></style></head>
<body dir=3D"ltr">
<div><h2>"
</h2> <img tabindex=3D"-1" src=3D"http://watchkeeper.kartoza.com/static/event_mapper/css/images/logo.fa285e1ad75d.png">
"""
html_report += """
<table width=3D"699" tabindex=3D"-1" style=3D"border-collapse: collapse;" cellspacing=3D"0" cellpadding=3D"0">
<tbody>"""
html_report += event.html_table_row()
html_report += """</tbody>
</table><h2 style=3D"color: rgb(149, 55, 53); font-family: trebuchet MS; font-size: 12pt; font-weight: bold; margin-bottom: 0px;">"""
html_report += """
</div>
</body>
</html>
<br>
This email and any files transmitted with it are confidential and intended solely for the use of the individual or entity to whom they are addressed. If you have received this email in error please notify the system manager. Any views or opinions presented in this email are solely those of the author and do not necessarily represent those of iMMAP. The recipient should check this email and any attachments for the presence of viruses. iMMAP accepts no liability for any damage caused by any virus transmitted by this email.</font><br><div style=3D"font-family:Arial,Helvetica,sans-serif;font-size:1.3em"><div><font size=3D"1" style=3D"background-color:white"><br></font><div><font size=3D"1" style=3D"background-color:white"><font face=3D"Arial, Helvetica, sans-serif">iMMAP, 1300 Pennsylvania Avenue, N.W.,=C2=A0</font>Suite 470=C2=A0Washington DC 20004, <a href=3D"http://www.immap.org" target=3D"_blank">www.immap.org</a></font></div></div></div>
"""
return html_report
@shared_task
def notify_priority_users(event_id):
event = Event.objects.get(id=event_id)
users = User.objects.filter(
countries_notified__polygon_geometry__contains=event.location,
notify_immediately=True)
for user in users:
send_email_message(user, event.text_report(), event.html_report())
|
#!/usr/bin/env python
import os, sys, fcntl, socket, subprocess, struct, signal, time, logging
import datetime
from glob import glob
from ConfigParser import ConfigParser
#######################################################################
# CONFIG
#######################################################################
# Where to make a log file
LOGDIR = "/appl/logs/transcoder/"
#LOGFILE = LOGDIR + str(datetime.date.today()) + "_udpwatch.log"
#LOGFILE = LOGDIR + "udpwatch_" + str(datetime.date.today()) + ".log"
LOGFILE = LOGDIR + "udpwatch.log"
########################################################################
########################################################################
########################################################################
def script_running(lockfile):
global file_handle
file_handle = open(lockfile, 'w')
try:
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
return False
except IOError:
return True
def setup_logging():
logging.NORMAL = 25
logging.addLevelName(logging.NORMAL, "NORMAL")
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO, filename = LOGFILE)
#logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO)
logger = logging.getLogger("Transcoder")
logger.normal = lambda msg, *args: logger._log(logging.NORMAL, msg, args)
return logger
def create_udp_socket(ip, port, timeout):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((ip, port))
mreq = struct.pack("=4sl", socket.inet_aton(ip), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.settimeout(timeout)
return sock
except socket.error as msg:
logger.error(msg)
sock.close()
sock = None
def | get_enabled_channels(confdir):
CHANNELS = {}
for config_file in glob(confdir + "*.ini"):
config = ConfigParser()
config.read(config_file)
PATH, NAME = os.path.split(config_file)
NAME, EXT = os.path.splitext(NAME)
PID = get_ffmpeg_pid(config.get('General', 'MCAST_OUT_IP'), config.get('General', 'MCAST_OUT_PORT'))
CHANNELS[NAME] = {
"NAME": NAME,
"PID": PID,
"MCAST_IP": conf | ig.get('General', 'MCAST_OUT_IP'),
"MCAST_PORT": config.get('General', 'MCAST_OUT_PORT'),
"INPUT_STREAM": config.get('General', 'INPUT_STREAM'),
"VIDEO_BITRATE": config.get('General', 'VIDEO_BITRATE'),
"AUDIO_BITRATE": config.get('General', 'AUDIO_BITRATE'),
"CODEC": config.get('General', 'CODEC'),
"VIDEO_MAPPING": config.get('General', 'VIDEO_MAPPING'),
"AUDIO_MAPPING": config.get('General', 'AUDIO_MAPPING'),
"MUXRATE": config.get('General', 'MUXRATE'),
"LOGLEVEL": config.get('General', 'LOGLEVEL')
}
return CHANNELS
def get_ffmpeg_pid(ip, port):
p = subprocess.Popen(['pgrep', '-f' , ip+":"+str(port)], stdout=subprocess.PIPE)
pid, err = p.communicate()
if pid:
#return int(pid.rstrip())
return pid.rstrip()
else:
return False
def kill_pid(pid, channel_name):
logger.warning("%s Killing PID %s", channel_name, pid)
os.kill(int(pid), signal.SIGKILL)
def check_output(channel_name, mcast_ip, mcast_port, udp_data_timeout, probe_time):
logger.debug("Check output started")
PID = get_ffmpeg_pid(mcast_ip, mcast_port)
if PID != False:
logger.debug("%s PID %s is already running with %s:%s", channel_name, PID, mcast_ip, mcast_port)
# Create a UDP listening socket
s = create_udp_socket(mcast_ip, mcast_port, udp_data_timeout)
startTime = time.time()
bytes = 0
while time.time() - startTime < probe_time:
try:
data = False
data = s.recv(10240)
bytes += len(data)
logger.debug("%s PID %s Received %s bytes on %s:%s", channel_name, PID, len(data), mcast_ip, mcast_port)
#continue
except KeyboardInterrupt:
logger.info("Closing UDP socket")
s.close()
logger.info("Script terminated")
sys.exit(0)
except socket.timeout:
# socket receive timed out, means there's no data coming on that UDP
logger.error("%s PID %s - No mcast output on %s:%s", channel_name, PID, mcast_ip, mcast_port)
# Need to get the PID again here, to make sure there's something to kill,
# because ffmpeg might have died completely
PID = get_ffmpeg_pid(mcast_ip, mcast_port)
if PID != False:
kill_pid(PID, channel_name)
# and break out from the while loop
break
except socket.error:
# some other error happened on the socket
logger.error("%s Socket error", channel_name)
break
# END of while
if data != False:
# if there's UDP data again, let's log NORMAL message
logger.normal("%s PID %s received %s bytes on %s:%s", channel_name, PID, bytes, mcast_ip, mcast_port)
#logger.normal("%s PID %s is running with %s:%s", channel_name, PID, mcast_ip, mcast_port)
else:
logger.error("%s %s:%s is not running.", channel_name, mcast_ip, mcast_port)
def main():
# some dirty commandline argument parser ;)
if len(sys.argv) < 2:
logger.error("No arguments - Please specify command line arguments")
logger.info(sys.argv[0] + " <CHANNEL_NAME> <MCAST_IP> <MCAST_PORT> <UDP_DATA_TIMEOUT> <PROBE_TIME>")
logger.info("Example: " + sys.argv[0] + " 239.255.14.5 3199 RCKTV 5 10")
logger.info("Exiting...")
sys.exit(1)
else:
if script_running("/dev/shm/" + str(sys.argv[1]) + "_udpwatch.lock"):
logger.warning("Script is already running - exiting...")
sys.exit(0)
logger.debug("We have arguments: %s", sys.argv)
check_output(str(sys.argv[1]), str(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5]))
####################################################################
# MAIN #############################################################
####################################################################
# setup logging
logger = setup_logging()
# prevent multiple instances
file_handle = None
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2018-03-11 17:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('board', '0006_merge_20180311_1702'),
]
operations = [
migrations.CreateModel(
name='MainPoster',
fields=[
('basepost_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='board.BasePost')),
('title', models.CharField(max_length=128, verbose_name='제목')),
('title_ko', models.CharField(max_length=128, null=True, verbose_name='제목')),
('title_en', models.CharField(max_length=128, null=True, verbose_name='제목')),
('image', models.ImageField(upload_to='banner', verbose_name='이미지')),
],
options={
'verbose_name': '메인포스터',
'verbose_name_plural': '메인포스터(들)',
| },
bases=('board.basepost',),
),
migrations.AlterModelOptions(
name='boardbanner',
options={'verbose_name': '게시판 배너', 'verbose_name_plural': '게시판 배너(들)' | },
),
migrations.AlterField(
model_name='board',
name='role',
field=models.CharField(choices=[('DEFAULT', '기본'), ('PROJECT', '사업'), ('PLANBOOK', '정책자료집'), ('DEBATE', '논의'), ('ARCHIVING', '아카이빙'), ('WORKHOUR', '상근관리'), ('SPONSOR', '제휴리스트'), ('SWIPER', '격주보고'), ('STORE', '상점'), ('CONTACT', '산하기구')], default='DEFAULT', max_length=32, verbose_name='보드 역할'),
),
]
|
#!/usr/bin/env python
#
# Copyright 2021 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Delete files from the temporary directory on a Swarming bot."""
import os
import sys
if sys.platform == 'win32':
| os.system(r'forfiles /P c:\users\chrome~1\appdata\local\temp '
r'/M * /C "cmd /c if @isdir==FALSE del @file"')
os.system(r'forfiles /P c:\users\chrome~1\appdata\local\temp '
r'/M * /C "cmd /c if @isdir==TRUE rmdir /S /Q @file"')
else:
os.sys | tem(r'rm -rf /tmp/*')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 by Kai Blin
#
# Plunger is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
"""The plunger file handler for the MD3 format.
The module supports export only for now.
"""
import math
import struct
try:
from plunger import toolbox
except ImportError:
import sys
sys.path.append('..')
import toolbox
sys.path.pop()
format = "md3"
extension = ".md3"
needs_dir = False
does_export = True
does_import = False
# Info from http://icculus.org/homepages/phaethon/q3a/formats/md3format.html
# Augmented by the libmodelfile headers by Alistair Riddoch, as the specfile
# is kind of inaccurate.
MD3_IDENT = "IDP3"
MD3_VERSION = 15
MD3_MAX_FRAMES = 1024
MD3_MAX_TAGS = 16
MD3_MAX_SURFACES = 32
MD3_MAX_SHADERS = 256
MD3_MAX_VERTS = 4096
MD3_MAX_TRIANGLES = 8192
class Md3Frame:
def __init__(self):
self.min_bounds = [0,0,0]
self.max_bounds = [0,0,0]
self.local_origin = [0,0,0]
self.radius = 0.0
self.name = ""
self.fmt = "fff fff fff f 8s"
def packSize(self):
return struct.calcsize(self.fmt)
def pack(self):
pack_str = ""
pack_str += struct.pack("fff", self.min_bounds.split())
pack_str += struct.pack("fff", self.max_bounds.split())
pack_str += struct.pack("fff", self.local_origin.split())
pack_str += struct.pack("f", self.radius)
pack_str += struct.pack("8s", self.name)
return pack_str
class Md3Tag:
def __init__(self):
self.name = ""
self.origin = [0,0,0]
self.axis = [[1,0,0], [0,1,0], [0,0,1]]
self.fmt = "64s fff fff fff fff"
def packSize(self):
return struct.calcsize(self.fmt)
def pack(self):
pack_str = ""
pack_str += struct.pack("64s", self.name)
pack_str += struct.pack("fff", self.origin.split())
for row in self.axis:
pack_str += struct.pack("fff", row.split())
return pack_str
class Md3Shader:
def __init__(self):
self.name = ""
self.index = 0
self.fmt = "64s i"
def packSize(self):
return struct.calcsize(self.fmt)
def pack(self):
pack_str = ""
pack_str += struct.pack("64s", self.name)
pack_str += struct.pack("i", self.index)
class Md3Triangle:
def __init__(self):
self.indices = [0,0,0]
self.fmt = "iii"
def packSize(self):
return struct.calcsize(self.fmt)
def pack(self):
return struct.pack("iii", self.indices.split())
class Md3TexCoord:
def __init__(self):
self.uv_coords = [0,0]
self.fmt = "ff"
def packSize(self):
return struct.calcsize(self.fmt)
def pack(self):
return struct.pack(self.fmt, self.uv_coords.split())
class Md3Vertex:
def __init__(self):
self.coord = [0,0,0]
self.normal = [0,0]
self.factor = 1.0 / 64
self.fmt = "hhh BB"
def packSize(self):
return struct.calcsize(self.fmt)
def pack(self):
pack_str = ""
pack_str += struct.pack("hhh", self.coord.split())
pack_str += struct.pack("BB", self.normal.split())
return pack_str
def scaleDown(self, coords):
return [i * self.factor for i in coords]
class Md3Surface:
def __init__(self):
self.ident = MD3_IDENT
self.name = ""
self.num_frames = 0
self.num_shaders = 0
self.num_verts = 0
self.num_triangles = 0
self.shaders = []
self.triangles = []
self.uv_coords = []
self.vertices = []
self.fmt = "4s 68s iiiiiiiii"
def packSize(self):
size = struct.calcsize(self.fmt)
size += len(self.shaders) * Md3Shader().packSize()
size += len(self.triangles) * Md3Triangle().packSize()
size += len(self.uv_coords) * Md3TexCoord().packSize()
size += len(self.vertices) * Md3Vertex().packSize()
return size
def pack(self):
pack_str = ""
pack_str += struct.pack("4s", self.ident)
pack_str += struct.pack("68s", self.name)
pack_str += struct.pack("ii", self.num_frames, self.num_shaders)
pack_str += struct.pack("ii", self.num_verts, self.num_triangles)
ofs_shaders = struct.calcsize(self.fmt)
ofs_triangles = ofs_shaders + len(self.shaders) * Md3Shader().packSize()
ofs_uv_coords = ofs_triangles + len(self.triangles) * Md3Triangle().packSize()
ofs_vertices = ofs_uv_coords + len(self.uv_coords) * Md3TexCoord().packSize()
ofs_end = ofs_vertices + len(self.vertices) * Md3Vertex().packSize()
pack_str += struct.pack("ii", ofs_triangles, ofs_shaders)
pack_str += struct.pack("iii", ofs_uv_coords, ofs_vertices, ofs_end)
for shader in self.shaders:
pack_str += shader.pack()
for tri in self.triangles:
pack_str += tri.pack()
for texcoord in self.uv_coords:
pack_str += texcoord.pack()
for vert in self.vertices:
pack_str += vert.pack()
class MD3Object:
def __init__(self):
self.ident = MD3_IDENT
self.version = MD3_VERSION
self.name = ""
self.num_frames = 0
self.num_tags = 0
self.num_surfaces = 0
self.num_skins = 0
self.frames = []
self.tags = []
self.surfaces = []
def pack(self):
pack_str = ""
fmt = "4si68siiiiiiii"
pack_str += struct.pack("4s", self.ident)
pack_str += struct.pack("i", self.version)
pack_str += struct.pack("68s", self.name)
pack_str += struct.pack("i", self.num_frames)
pack_str += struct.pack("i", self.num_tags)
pack_str += struct.pack("i", self.num_surfaces)
pack_str += struct.pack("i", self.num_skins)
ofs_frames = struct.calcsize(fmt)
ofs_tags = ofs_frames + len(self.frames) * Md3Frame().packSize()
ofs_surfaces = ofs_tags + len(self.tags) * Md3Tag().packSize()
ofs_eof = ofs_surfaces + len(self.surfaces) * Md3Surface().packSize()
pack_str += struct.pack("i", ofs_frames)
pack_str += struct.pack("i", ofs_tags)
pack_str += struct.pack("i", ofs_surfaces)
pack_str += struct.pack("i", ofs_eof)
for frame in self.frames:
pack_str += frame.pack()
for tag in self.tags:
pack_str | += tag.pack()
for surface in self.surfaces:
pack_str += surface.pack()
return pack_str
def importAsset(model, asset):
raise NotImplementedError
def exportAsset(model, asset):
out = toolbox.writeAn | y(asset)
md3_object = MD3Object()
meshes = model.getMeshes()
#TODO: Put stuff into the MD3Object here()
out.write(md3_object.pack())
out.close()
def encodeNormal(x,y,z):
"""Returns (azimuth, zenith) angles of the normal vector
"""
azimuth = math.atan2(y, x) * 255 / (2 * math.pi)
zenith = math.acos(z) * 255 / (2 * math.pi)
return (azimuth, zenith)
|
from mpi4py import MPI
from cplpy import CPL
comm = MPI.COMM_WORLD
CPL = CPL()
CFD_COMM = CPL.init(CPL.CFD_REALM)
cart_comm = CFD_COMM.Create_cart([1, 1, 1])
CPL.setup_cfd(cart_comm, xyzL=[1.0, 1.0, 1.0],
xyz_orig=[0.0, 0.0, 0.0], ncxyz=[32, 32, 32])
recv_array, send_array = CPL.get_arrays(recv_size=4, send_size=1)
for time in range(5):
recv_array, ierr = CPL.recv(recv_array)
print("CFD", time, recv_array[0,0,0,0])
send_array[0,:,:,:] = 2.*time
CPL.send(send_array)
CPL.finalize()
#Start again
CFD_COMM = CPL.init(CPL.CFD_REALM)
CPL.setup_cfd(cart_comm, xyzL=[1.0, 1.0, 1.0],
xyz_orig=[0.0, 0.0, 0.0], ncxyz=[32, 32, 32])
recv_array, send_array = CPL.get_arrays(recv_size=4, send_size=1)
for time in range(5):
recv_array, ierr = CPL.recv(recv_array)
print("CFD", | time, recv_arr | ay[0,0,0,0])
send_array[0,:,:,:] = 2.*time
CPL.send(send_array)
CPL.finalize()
MPI.Finalize()
|
import logging
import traceback
from functools import wraps
import os
import re
from django.conf import settings
from django.db import connection
from django.db.models import ManyToManyField
logger = logging.getLogger(__name__)
def debug_pg_notices(f):
@wraps(f)
def wrapped(*args, **kwargs):
r = None
if connection.connection:
del connection.connection.notices[:]
try:
r = f(*args, **kwargs)
finally:
# Show triggers output
allnotices = []
current = ''
if connection.connection:
notices = []
for notice in connection.connection.notices:
try:
notice, context = notice.split('CONTEXT:', 1)
context = re.sub(r"\s+", " ", context)
except ValueError:
context = ''
notices.append((context, notice))
if context != current:
allnotices.append(notices)
notices = []
current = context
allnotices.append(notices)
current = ''
for notices in allnotices:
for context, notice in notices:
if context != current:
if context != '':
logger.debug('Context %s...:' % context.strip()[:80])
current = | context
notice = notice.replace('NOTICE: ', '')
prefix = ''
logger.debug('%s%s' % (prefix, notice.strip()))
return r
return wrapped
def load_sql_files(app, stage):
"""
Look for SQL files in Django app, and load them into database.
We remove RAISE NOTICE instructions from SQL outside unit testing
since they lead to interpolation errors of '%' character in python.
"""
| app_dir = app.path
sql_dir = os.path.normpath(os.path.join(app_dir, 'sql'))
custom_sql_dir = os.path.join(settings.VAR_DIR, 'conf/extra_sql', app.label)
sql_files = []
r = re.compile(r'^{}_.*\.sql$'.format(stage))
if os.path.exists(sql_dir):
sql_files += [
os.path.join(sql_dir, f) for f in os.listdir(sql_dir) if r.match(f) is not None
]
if os.path.exists(custom_sql_dir):
sql_files += [
os.path.join(custom_sql_dir, f) for f in os.listdir(custom_sql_dir) if r.match(f) is not None
]
sql_files.sort()
cursor = connection.cursor()
for sql_file in sql_files:
try:
logger.info("Loading initial SQL data from '%s'" % sql_file)
f = open(sql_file)
sql = f.read()
f.close()
if not settings.TEST and not settings.DEBUG:
# Remove RAISE NOTICE (/!\ only one-liners)
sql = re.sub(r"\n.*RAISE NOTICE.*\n", "\n", sql)
# TODO: this is the ugliest driver hack ever
sql = sql.replace('%', '%%')
# Replace curly braces with settings values
pattern = re.compile(r'{{\s*([^\s]*)\s*}}')
for m in pattern.finditer(sql):
value = getattr(settings, m.group(1))
sql = sql.replace(m.group(0), str(value))
# Replace sharp braces with schemas
pattern = re.compile(r'{#\s*([^\s]*)\s*#}')
for m in pattern.finditer(sql):
try:
value = settings.DATABASE_SCHEMAS[m.group(1)]
except KeyError:
value = settings.DATABASE_SCHEMAS.get('default', 'public')
sql = sql.replace(m.group(0), str(value))
cursor.execute(sql)
except Exception as e:
logger.critical("Failed to install custom SQL file '%s': %s\n" %
(sql_file, e))
traceback.print_exc()
raise
def set_search_path():
# Set search path with all existing schema + new ones
cursor = connection.cursor()
cursor.execute('SELECT schema_name FROM information_schema.schemata')
search_path = set([s[0] for s in cursor.fetchall() if not s[0].startswith('pg_')])
search_path |= set(settings.DATABASE_SCHEMAS.values())
search_path.discard('public')
search_path.discard('information_schema')
search_path = ('public', ) + tuple(search_path)
cursor.execute('SET search_path TO {}'.format(', '.join(search_path)))
def move_models_to_schemas(app):
"""
Move models tables to PostgreSQL schemas.
Views, functions and triggers will be moved in Geotrek app SQL files.
"""
default_schema = settings.DATABASE_SCHEMAS.get('default', 'public')
app_schema = settings.DATABASE_SCHEMAS.get(app.name, default_schema)
table_schemas = {}
for model in app.get_models():
model_name = model._meta.model_name
table_name = model._meta.db_table
model_schema = settings.DATABASE_SCHEMAS.get(model_name, app_schema)
table_schemas.setdefault(model_schema, []).append(table_name)
for field in model._meta.get_fields():
if isinstance(field, ManyToManyField):
table_schemas[model_schema].append(field.m2m_db_table())
cursor = connection.cursor()
for schema_name in table_schemas.keys():
sql = "CREATE SCHEMA IF NOT EXISTS %s;" % model_schema
cursor.execute(sql)
logger.info("Created schema %s" % model_schema)
for schema_name, tables in table_schemas.items():
for table_name in tables:
sql = "SELECT 1 FROM information_schema.tables WHERE table_name=%s AND table_schema!=%s"
cursor.execute(sql, [table_name, schema_name])
if cursor.fetchone():
sql = "ALTER TABLE %s SET SCHEMA %s;" % (table_name, schema_name)
cursor.execute(sql)
logger.info("Moved %s to schema %s" % (table_name, schema_name))
# For Django, search_path is set in connection options.
# But when accessing the database using QGis or ETL, search_path must be
# set database level (for all users, and for this database only).
if app.name == 'geotrek.common':
dbname = settings.DATABASES['default']['NAME']
dbuser = settings.DATABASES['default']['USER']
search_path = ', '.join(('public', ) + tuple(set(settings.DATABASE_SCHEMAS.values())))
sql = "ALTER ROLE %s IN DATABASE %s SET search_path=%s;" % (dbuser, dbname, search_path)
cursor.execute(sql)
|
"""This is just a playing around module. Please ignore it"""
import json
import six
from randomload.log import logging
from six.moves.urllib import parse
logger = logging.getLogger('randomload.actions.glance.usage')
class Controller(object):
def __init__(self, http_client):
self.http_client = http_client
def list(self, start, end, detailed=False, metadata=None):
if metadata is None:
metadata = {}
opts = {
'start': start.isoformat(),
'end': end.isoformat(), |
'detailed': int(bool(detailed))
}
if isinstance(metadata, dict):
metadata = json.dumps(metadata)
| if metadata:
opts['metadata'] = metadata
qparams = {}
for opt, val in opts.items():
if val:
if isinstance(val, six.text_type):
val = val.encode('utf-8')
qparams[opt] = val
query_string = '?%s' % parse.urlencode(qparams)
url = '/v2/usages%s' % query_string
return self.http_client.get(url)
def bytes_to_GB(size_in_B):
"""Return size in GB
:param size: Numeric
:returns: Float
"""
if size_in_B is None:
size_in_B = 0
return float(size_in_B) / 1024 / 1024 / 1024
def usage(clients, conf, start=None, end=None, metadata=None):
logger.info("Start: {0}".format(start))
logger.info("End: {0}".format(end))
logger.info("Metadata: {0}".format(metadata))
glance = clients.get_glance()
controller = Controller(glance.http_client)
resp, _ = controller.list(start, end, detailed=True, metadata=metadata)
for tenant_usage in resp.json().get('tenant_usages', []):
logger.info("Tenant id: {0}".format(tenant_usage.get('project_id')))
logger.info("Total GB Hours: {0}".format(
tenant_usage.get('total_gb_hours')
))
for usage in tenant_usage.get('image_usages', []):
logger.info(
"Name: {0} - Size: {1} GB - Status: {2}".format(
usage['name'],
bytes_to_GB(usage['size']),
usage['status']
)
)
|
import sys
import xbmc,xbmcaddon,xbmcvfs
import sqlite3
from subprocess import Popen
import datetime,time
# from vpnapi import VPNAPI
channel = sys.argv[1]
start = sys.argv[2]
ADDON = xbmcaddon.Addon(id='script.stargate.guide')
def adapt_datetime(ts):
# http://docs.python.org/2/library/sqlite3.html#registeri | ng-an-adapter-callable
return time.mktime(ts.timetuple())
def convert_datetime(ts):
try:
return datetime.datetime.fromtimestamp(float(ts))
except ValueError:
return None
sqlite3.register_adapter(datetime.datetime, adapt_datetime)
sqlite3.register_converter('timestamp', convert_datetime)
path = xbmc.translatePath('special://profile/addon_data/script.stargate.guide/source.db')
try:
conn = sqlite3.connect(path, detect_types=sqlite3.PARSE_DECLTY | PES)
conn.row_factory = sqlite3.Row
except Exception as detail:
xbmc.log("EXCEPTION: (script.stargate.guide) %s" % detail, xbmc.LOGERROR)
c = conn.cursor()
startDate = datetime.datetime.fromtimestamp(float(start))
c.execute('SELECT DISTINCT * FROM programs WHERE channel=? AND start_date = ?', [channel,startDate])
for row in c:
title = row["title"]
endDate = row["end_date"]
duration = endDate - startDate
before = int(ADDON.getSetting('autoplaywiths.before'))
after = int(ADDON.getSetting('autoplaywiths.after'))
extra = (before + after) * 60
#TODO start from now
#seconds = duration.seconds + extra
#if seconds > (3600*4):
seconds = 3600*4
break
# Find the channel's stream url
c.execute('SELECT stream_url FROM custom_stream_url WHERE channel=?', [channel])
row = c.fetchone()
url = ""
if row:
url = row[0]
if not url:
quit()
# Uncomment this if you want to use VPN Mgr filtering. Need to import VPNAPI.py
# else:
# if ADDON.getSetting('vpnmgr.connect') == "true":
# vpndefault = False
# if ADDON.getSetting('vpnmgr.default') == "true":
# vpndefault = True
# api = VPNAPI()
# if url[0:9] == 'plugin://':
# api.filterAndSwitch(url, 0, vpndefault, True)
# else:
# if vpndefault: api.defaultVPN(True)
# Find the actual url used to play the stream
#core = "dummy"
#xbmc.executebuiltin('PlayWith(%s)' % core)
player = xbmc.Player()
player.play(url)
count = 30
url = ""
while count:
count = count - 1
time.sleep(1)
if player.isPlaying():
url = player.getPlayingFile()
break
player.stop()
# Play with your own preferred player and paths
if url:
name = "%s = %s = %s" % (start,channel,title)
name = name.encode("cp1252")
filename = xbmc.translatePath("special://temp/%s.ts" % name)
#filename = "/storage/recordings/%s.ts" % name
ffmpeg = r"c:\utils\ffmpeg.exe"
ffmpeg = r"/usr/bin/ffmpeg"
cmd = [ffmpeg, "-y", "-i", url, "-c", "copy", "-t", str(seconds), filename]
p = Popen(cmd,shell=True)
#p = Popen(cmd,shell=False)
|
the TS6 protocol.
class HybridProtocol(TS6Protocol):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.casemapping = 'ascii'
self.hook_map = {'EOB': 'ENDBURST', 'TBURST': 'TOPIC', 'SJOIN': 'JOIN'}
self.protocol_caps -= {'slash-in-hosts'}
def post_connect(self):
"""Initializes a connection to a server."""
ts = self.start_ts
f = self.send
# https://github.com/grawity/irc-docs/blob/master/server/ts6.txt#L80
# Note: according to hybrid source code, +p is paranoia, noknock,
# AND rfc1459-style private, though the last isn't documented.
cmodes = {
# TS6 generic modes:
'op': 'o', 'halfop': 'h', 'voice': 'v', 'ban': 'b', 'key': 'k',
'limit': 'l', 'moderated': 'm', 'noextmsg': 'n',
'secret': 's', 'topiclock': 't', 'private': 'p',
# hybrid-specific modes:
'blockcolor': 'c', 'inviteonly': 'i', 'noctcp': 'C',
'regmoderated': 'M', 'operonly': 'O', 'regonly': 'R',
'sslonly': 'S', 'banexception': 'e', 'noknock': 'p',
'registered': 'r', 'invex': 'I', 'paranoia': 'p',
'banexception': 'e',
# Now, map all the ABCD type modes:
'*A': 'beI', '*B': 'k', '*C': 'l', '*D': 'cimnprstCMORS'
}
self.cmodes = cmodes
umodes = {
'oper': 'o', 'invisible': 'i', 'wallops': 'w', 'locops': 'l',
'cloak': 'x', 'hidechans': 'p', 'regdeaf': 'R', 'deaf': 'D',
'callerid': 'g', 'admin': 'a', 'deaf_commonchan': 'G', 'hideoper': 'H',
'webirc': 'W', 'sno_clientconnections': 'c', 'sno_badclientconnections': 'u',
'sno_rejectedclients': 'j', 'sno_skill': 'k', 'sno_fullauthblock': 'f',
'sno_remoteclientconnections': 'F', 'sno_stats': 'y', 'sno_debug': 'd',
'sno_nickchange': 'n', 'hideidle': 'q', 'registered': 'r',
'snomask': 's', 'ssl': 'S', 'sno_serverconnects': 'e', 'sno_botfloods': 'b',
# Now, map all the ABCD type modes:
'*A': '', '*B': '', '*C': '', '*D': 'DFGHRSWabcdefgijklnopqrsuwxy | '
}
self.umodes = umodes
self.extbans_matching.clear()
# halfops is mandatory on Hybrid
self.prefixmodes = {'o': '@', 'h': '%', 'v': '+'}
# https://github.com/grawity/irc-docs/blob/master/server/ts6.txt#L55
f('PASS %s TS 6 %s' % (self.serverdata["sendpass"], self.sid))
# We request the following capabilities (for hybrid):
# ENCAP: message encapsulati | on for certain commands
# EX: Support for ban exemptions (+e)
# IE: Support for invite exemptions (+e)
# CHW: Allow sending messages to @#channel and the like.
# KNOCK: Support for /knock
# SVS: Deal with extended NICK/UID messages that contain service IDs/stamps
# TBURST: Topic Burst command; we send this in topic_burst
# DLN: DLINE command
# UNDLN: UNDLINE command
# KLN: KLINE command
# UNKLN: UNKLINE command
# HOPS: Supports HALFOPS
# CHW: Can do channel wall (@#)
# CLUSTER: Supports server clustering
# EOB: Supports EOB (end of burst) command
f('CAPAB :TBURST DLN KNOCK UNDLN UNKLN KLN ENCAP IE EX HOPS CHW SVS CLUSTER EOB QS')
f('SERVER %s 0 :%s' % (self.serverdata["hostname"],
self.serverdata.get('serverdesc') or conf.conf['pylink']['serverdesc']))
# send endburst now
self.send(':%s EOB' % (self.sid,))
def spawn_client(self, nick, ident='null', host='null', realhost=None, modes=set(),
server=None, ip='0.0.0.0', realname=None, ts=None, opertype=None,
manipulatable=False):
"""
Spawns a new client with the given options.
Note: No nick collision / valid nickname checks are done here; it is
up to plugins to make sure they don't introduce anything invalid.
"""
server = server or self.sid
if not self.is_internal_server(server):
raise ValueError('Server %r is not a PyLink server!' % server)
uid = self.uidgen[server].next_uid()
ts = ts or int(time.time())
realname = realname or conf.conf['pylink']['realname']
realhost = realhost or host
raw_modes = self.join_modes(modes)
u = self.users[uid] = User(self, nick, ts, uid, server, ident=ident, host=host, realname=realname,
realhost=realhost, ip=ip, manipulatable=manipulatable)
self.apply_modes(uid, modes)
self.servers[server].users.add(uid)
self._send_with_prefix(server, "UID {nick} {hopcount} {ts} {modes} {ident} {host} {ip} {uid} "
"* :{realname}".format(ts=ts, host=host,
nick=nick, ident=ident, uid=uid,
modes=raw_modes, ip=ip, realname=realname,
hopcount=self.servers[server].hopcount))
return u
def update_client(self, target, field, text):
"""Updates the ident, host, or realname of a PyLink client."""
# https://github.com/ircd-hybrid/ircd-hybrid/blob/58323b8/modules/m_svsmode.c#L40-L103
# parv[0] = command
# parv[1] = nickname <-- UID works too -jlu5
# parv[2] = TS <-- Of the user, not the current time. -jlu5
# parv[3] = mode
# parv[4] = optional argument (services account, vhost)
field = field.upper()
ts = self.users[target].ts
if field == 'HOST':
self.users[target].host = text
# On Hybrid, it appears that host changing is actually just forcing umode
# "+x <hostname>" on the target. -jlu5
self._send_with_prefix(self.sid, 'SVSMODE %s %s +x %s' % (target, ts, text))
else:
raise NotImplementedError("Changing field %r of a client is unsupported by this protocol." % field)
def oper_notice(self, source, text):
"""
Send a message to all opers.
"""
self._send_with_prefix(source, 'GLOBOPS :%s' % text)
def set_server_ban(self, source, duration, user='*', host='*', reason='User banned'):
"""
Sets a server ban.
"""
# source: user
# parameters: target server mask, duration, user mask, host mask, reason
assert not (user == host == '*'), "Refusing to set ridiculous ban on *@*"
if not source in self.users:
log.debug('(%s) Forcing KLINE sender to %s as TS6 does not allow KLINEs from servers', self.name, self.pseudoclient.uid)
source = self.pseudoclient.uid
self._send_with_prefix(source, 'KLINE * %s %s %s :%s' % (duration, user, host, reason))
def topic_burst(self, numeric, target, text):
"""Sends a topic change from a PyLink server. This is usually used on burst."""
# <- :0UY TBURST 1459308205 #testchan 1459309379 dan!~d@localhost :sdf
if not self.is_internal_server(numeric):
raise LookupError('No such PyLink server exists.')
ts = self._channels[target].ts
servername = self.servers[numeric].name
self._send_with_prefix(numeric, 'TBURST %s %s %s %s :%s' % (ts, target, int(time.time()), servername, text))
self._channels[target].topic = text
self._channels[target].topicset = True
# command handlers
def handle_capab(self, numeric, command, args):
# We only get a list of keywords here. Hybrid obviously assumes that
# we know what modes it supports (indeed, this is a standard list).
# <- CAPAB :UNDLN UNKLN KLN TBURST KNOCK ENCAP DLN IE EX HOPS CHW SVS CLUSTER EOB QS
self._caps = caps = args[0].split()
for required_cap in ('SVS', 'EOB', 'HOPS', 'QS', 'TBURST'):
if required_cap not in caps:
raise ProtocolError('%s not found in TS6 capabilities list; this is required! (got %r)' % (required_cap, caps))
def handle_uid(self, numeric, command, args):
"""
Handles Hybrid-style UID commands (user introduction). This is INCOMPATIBLE
with standard TS6 impl |
#! /usr/bin/python
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import unittest
import autoconfig
import pygccxml
from pygccxml.utils import *
from pygccxml.parser import *
from pygccxml import declarations
class tester_t( unittest.TestCase ):
def __init__(self, *args ):
unittest.TestCase.__init__( self, *args )
def __test_split_impl(self, decl_string, name, args):
self.failUnless( ( name, args ) == declarations.call_invocation.split( decl_string ) )
def __test_split_recursive_impl(self, decl_string, control_seq):
self.failUnless( control_seq == declarations.call_invocation.split_recursive( decl_string ) )
def __test_is_call_invocation_impl( self, decl_string ):
self.failUnless( declarations.call_invocation.is_call_invocation( decl_string ) )
def test_split_on_vector(self):
self.__test_is_call_invocation_impl( "vector(int,std::allocator(int) )" )
self.__test_split_impl( "vector(int,std::allocator(int) )"
, "vector"
, [ "int", "std::allocator(int)" ] )
self.__test_split_recursive_impl( "vector(int,std::allocator(int) )"
, [ ( "vector", [ "int", "std::allocator(int)" ] )
| , ( "std::allocator", ["int"] ) ] )
def test_split_on_string(self):
self.__test_is_call_invocation_impl( "basic_string(char,std:: | char_traits(char),std::allocator(char) )" )
self.__test_split_impl( "basic_string(char,std::char_traits(char),std::allocator(char) )"
, "basic_string"
, [ "char", "std::char_traits(char)", "std::allocator(char)" ] )
def test_split_on_map(self):
self.__test_is_call_invocation_impl( "map(long int,std::vector(int, std::allocator(int) ),std::less(long int),std::allocator(std::pair(const long int, std::vector(int, std::allocator(int) ) ) ) )" )
self.__test_split_impl( "map(long int,std::vector(int, std::allocator(int) ),std::less(long int),std::allocator(std::pair(const long int, std::vector(int, std::allocator(int) ) ) ) )"
, "map"
, [ "long int"
, "std::vector(int, std::allocator(int) )"
, "std::less(long int)"
, "std::allocator(std::pair(const long int, std::vector(int, std::allocator(int) ) ) )" ] )
def test_join_on_vector(self):
self.failUnless( "vector( int, std::allocator(int) )"
== declarations.call_invocation.join("vector", ( "int", "std::allocator(int)" ) ) )
def test_find_args(self):
temp = 'x()()'
found = declarations.call_invocation.find_args( temp )
self.failUnless( (1,2) == found )
found = declarations.call_invocation.find_args( temp, found[1]+1 )
self.failUnless( (3, 4) == found )
temp = 'x(int,int)(1,2)'
found = declarations.call_invocation.find_args( temp )
self.failUnless( (1,9) == found )
found = declarations.call_invocation.find_args( temp, found[1]+1 )
self.failUnless( (10, 14) == found )
def test_bug_unmatched_brace( self ):
src = 'AlternativeName((&string("")), (&string("")), (&string("")))'
self.__test_split_impl( src
, 'AlternativeName'
, ['(&string(""))', '(&string(""))', '(&string(""))'] )
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite()
|
l):
util.info("Examining url " + url)
if MOVIES_GENRE in url:
return self.list_by_genres(url)
if self.is_most_popular(url):
if "movie" in url:
return self.list_movies_by_letter(url)
if "tv" in url:
return self.list_tv_shows_by_letter(url)
if self.is_recently_added(url):
util.debug("is recently added")
if "movie" in url:
return self.list_movie_recently_added(url)
if "tv" in url:
util.debug("is TV")
return self.list_tv_recently_added(url)
if self.is_search(url):
return self.list_search(url)
if self.is_base_url(url):
self.base_url = url
if "movie" in url:
return self.a_to_z(MOVIES_A_TO_Z_TYPE)
if "tv" in url:
return self.a_to_z(TV_SHOWS_A_TO_Z_TYPE)
if self.particular_letter(url):
if "movie" in url:
return self.list_movies_by_letter(url)
if "tv" in url:
return self.list_tv_shows_by_letter(url)
if self.has_tv_show_flag(url):
return self.list_tv_show(self.remove_flags(url))
if self.is_xml_letter(url):
util.debug("xml letter")
if "movie" in url:
return self.list_xml_letter(url)
return [self.dir_item(title="I failed", url="fail")]
def list_by_genres(self, url):
if "?" + GENRE_PARAM in url:
return self.list_xml_letter(url)
else:
result = []
page = util.request(url)
data = util.substr(page, '<select name=\"zanr\">', '</select')
for s in re.finditer('<option value=\"([^\"]+)\">([^<]+)</option>', data,
re.IGNORECASE | re.DOTALL):
item = {'url': url + "?" + GENRE_PARAM + "=" +
s.group(1), 'title': s.group(2), 'type': 'dir'}
self._filter(result, item)
return result
def list_xml_letter(self, url):
result = []
data = util.request(url)
tree = ET.fromstring(data)
for film in tree.findall('film'):
item = self.video_item()
try:
if ISO_639_1_CZECH in self.ISO_639_1_CZECH:
title = film.findtext('nazevcs').encode('utf-8')
else:
title = film.findtext('nazeven').encode('utf-8')
basetitle = '%s (%s)' % (title, film.findtext( | 'rokvydani'))
| item['title'] = '%s - %s' % (basetitle, film.findtext('kvalita').upper())
item['name'] = item['title']
item['img'] = film.findtext('obrazekmaly')
item['url'] = self.base_url + '/player/' + self.parent.make_name(
film.findtext('nazeven').encode('utf-8') + '-' + film.findtext('rokvydani'))
item['menu'] = {"[B][COLOR red]Add to library[/COLOR][/B]": {
'url': item['url'], 'action': 'add-to-library', 'name': basetitle}}
self._filter(result, item)
except Exception, e:
util.error("ERR TITLE: " + item['title'] + " | " + str(e))
pass
util.debug(result)
return result
def list_tv_show(self, url):
result = []
page = util.request(url)
data = util.substr(page, '<div class=\"content\">', '<script')
for s in re.finditer('<strong.+?</ul>', data, re.IGNORECASE | re.DOTALL):
serie = s.group(0)
serie_name = re.search('<strong>([^<]+)', serie).group(1)
for e in re.finditer('<li.+?</li>', serie, re.IGNORECASE | re.DOTALL):
episode = e.group(0)
item = self.video_item()
ep_name = re.search('<a href=\"#[^<]+<span>(?P<id>[^<]+)</span>(?P<name>[^<]+)',
episode)
if ep_name:
item['title'] = '%s %s %s' % (
serie_name, ep_name.group('id'), ep_name.group('name'))
item['epname'] = ep_name.group('name')
item['ep'] = ep_name
i = re.search('<div class=\"inner-item[^<]+<img src=\"(?P<img>[^\"]+).+?<a href=\"'
'(?P<url>[^\"]+)', episode, re.IGNORECASE | re.DOTALL)
if i:
item['img'] = self._url(i.group('img'))
item['url'] = i.group('url')
if i and ep_name:
self._filter(result, item)
if self.reverse_eps:
result.reverse()
return result
def add_video_flag(self, items):
flagged_items = []
for item in items:
flagged_item = self.video_item()
flagged_item.update(item)
flagged_items.append(flagged_item)
return flagged_items
def add_directory_flag(self, items):
flagged_items = []
for item in items:
flagged_item = self.dir_item()
flagged_item.update(item)
flagged_items.append(flagged_item)
return flagged_items
@cached(ttl=24)
def get_data_cached(self, url):
return util.request(url)
def list_by_letter(self, url):
result = []
page = self.get_data_cached(url)
data = util.substr(page, '<ul class=\"content', '</ul>')
subs = self.get_subs()
for m in re.finditer('<a class=\"title\" href=\"(?P<url>[^\"]+)[^>]+>(?P<name>[^<]+)', data,
re.IGNORECASE | re.DOTALL):
item = {'url': m.group('url'), 'title': m.group('name')}
if item['url'] in subs:
item['menu'] = {"[B][COLOR red]Remove from subscription[/COLOR][/B]": {
'url': m.group('url'), 'action': 'remove-subscription', 'name': m.group('name')}
}
else:
item['menu'] = {"[B][COLOR red]Add to library[/COLOR][/B]": {
'url': m.group('url'), 'action': 'add-to-library', 'name': m.group('name')}}
self._filter(result, item)
paging = util.substr(page, '<div class=\"pagination\"', '</div')
next = re.search('<li class=\"next[^<]+<a href=\"\?page=(?P<page>\d+)', paging,
re.IGNORECASE | re.DOTALL)
if next:
next_page = int(next.group('page'))
current = re.search('\?page=(?P<page>\d)', url)
current_page = 0
if self.is_most_popular(url) and next_page > 10:
return result
if current:
current_page = int(current.group('page'))
if current_page < next_page:
url = re.sub('\?.+?$', '', url) + '?page=' + str(next_page)
result += self.list_by_letter(url)
return result
def list_tv_recently_added(self, url):
result = []
page = self.get_data_cached(url)
data = util.substr(page, '<div class=\"content\"', '</ul>')
subs = self.get_subs()
for m in re.finditer('<a href=\"(?P<url>[^\"]+)[^>]+((?!<strong).)*<strong>S(?P<serie>\d+) '
'/ E(?P<epizoda>\d+)</strong>((?!<a href).)*<a href=\"(?P<surl>[^\"]+)'
'[^>]+class=\"mini\">((?!<span>).)*<span>\((?P<name>[^)]+)\)<',
data, re.IGNORECASE | re.DOTALL):
item = self.video_item()
item['url'] = m.group('url')
item['title'] = "Rada " + m.group('serie') + " Epizoda " + m.group(
'epizoda') + " - " + m.group('name')
if item['url'] in subs:
item['menu'] = {"[B][COLOR red]Remove from subscription[/COLOR][/B]": {
'url': m.group('url'), 'action': 'remove-subscription',
'name': m.group('name') + " S" + m.group('serie') + 'E' + m.group('epizoda')}}
else:
item['menu'] = {"[B][COLOR red]Add to library[/COLOR][/B]": {
'url': m.group('url'), 'action': |
import matplotlib.pyplot as plt
import numpy as np
import scattering
import scipy.constants as consts
import quantities as pq
def plot_csec(scatterer, d, var, name):
lam = scatterer.wavelength.rescale('cm')
plt.plot(d, var,
label='%.1f %s' % (lam, lam.dimensionality))
plt.xlabel('Diameter (%s)' % d.dimensionality)
plt.ylabel(name)
def plot_csecs(d, scatterers) | :
for s in scatterers:
plt.subplot(1,1,1)
plot_csec(s, d, np.rad2deg(np.unwrap(-np.angle(-s.S_bkwd[0,0].conj() *
s.S_bkwd[1,1]).squeeze())), 'delta')
plt.gca().set_ylim(-4, 20)
d = np.linspace(0.01, 0.7, 200).reshape(200, 1) * pq.cm
sband = pq.c / (2.8 * pq.GHz)
cband = pq.c / (5.4 * pq.GHz)
xband = pq.c / (9.4 * pq.GHz)
temp = 10.0
x_fixed = scattering.scatterer(xband, temp, 'water', diameters=d, shape='oblate')
x_fixed.set_scattering_model(' | tmatrix')
c_fixed = scattering.scatterer(cband, temp, 'water', diameters=d, shape='oblate')
c_fixed.set_scattering_model('tmatrix')
s_fixed = scattering.scatterer(sband, temp, 'water', diameters=d, shape='oblate')
s_fixed.set_scattering_model('tmatrix')
plot_csecs(d, [x_fixed, c_fixed, s_fixed])
plt.legend(loc = 'upper left')
plt.show()
|
# Copyright 2018 Tecnativa - David Vidal
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Stock Picking Procure Method',
'summary': 'Allows to force t | he procurement method from the picking',
'version': '12.0.1.0.0',
'category': 'Warehouse',
'author': 'Tecnativa,'
'Odoo Community | Association (OCA)',
'website': 'https://github.com/OCA/stock-logistics-warehouse',
'license': 'AGPL-3',
'depends': [
'stock',
],
'data': [
'views/stock_picking_views.xml',
],
'installable': True,
}
|
from django.utils.translation import ugettext as _
from rest_framework.authentication import BaseAuthentication
from rest_framework.exceptions import AuthenticationFailed
from allauth_api.account.rest_framework import authentication as account_auth
from allauth_api.settings import allauth_api_settings
from allauth_api.socialaccount.providers import registry
class SocialAuthentication(BaseAuthentication):
"""
An authentication method that hands the duty off to the specified provider
the settings.PROVIDER_PARAMETER_NAME must be present in the request data
"""
def authenticate(self, request):
provider_id = request.DATA.get(allauth_api_settings.PROVIDER_PARAMETER_NAME)
if provider_id:
provider = registry.by_id(provider_id)
if provider:
return provider.authneticate(request)
else:
msg = "%s %s" % (_("no provider found for"), provider_id)
raise AuthenticationFailed(msg)
else:
msg = "%s %s" % (allauth_api_settings.PROVIDER_PARAMETER_NAME,
_("parameter must be provided"))
raise AuthenticationFailed(msg)
class BasicLogin(account_auth.BasicLogin):
"""
A login cla | ss that just uses the standard Django authenticate mechanism
"""
auth_class = SocialAuthentication
class TokenLogin(account_auth.TokenLogin):
"""
A login class that returns a user authentication token. This method, in its default
configuration is only available if rest_framework.authtoken is | in installed_apps
"""
auth_class = SocialAuthentication
# class OAuth2Login(account_auth.OAuth2Login):
# """
# A login class that accepts oauth2 authentication requests and returns the appropriate
# access tokens. This login method, in its default configuration is only available if
# oauth2_provider is in installed_apps
# """
#
# auth_class = SocialAuthentication
|
#! /usr/bin/env python
"""
This program is for renaming files (through symbolic links) using a file
conversion table. The columns should be ordered as so: new directory, new id,
old directory, old file name. Columns can be separated using any standard
ASCII character.
If files are paired-end and follow the standard conventions for discriminating
forward from reverse reads (R1 and R2), then an asterics (*) can be used after
the file name (e.g samplename1_R*) instead of specifying each paired file
individually. The linked pairs will be differentiated using "forward" and
"reverse" in place of "R1" and "R2".
"""
from __future__ import print_function
import argparse
import glob
import locale
import re
import sys
import textwrap
from itertools import izip
from subprocess import Popen, PIPE
def format_io(old_name, new_name, ext=''):
extensions = {'fa': 'fasta', 'fasta': 'fasta', 'fna': 'fasta',
'fq': 'fastq', 'fastq': 'fastq', 'fnq': 'fastq'}
compress = ''
if ext:
file_end = ext
else:
old_name = old_name.split('.')
filetype = old_name[-1].strip()
if filetype in ["gz", "bz2", "zip"]:
compress = ".{}".format(filetype)
filetype = old_name[-2]
try:
extension = extensions[filetype]
except KeyError:
print(textwrap.fill("Error: unknown file type {}. Please make "
"sure the filenames end in one of the supported extensions "
"(fa, fna, fasta, fq, fnq, fastq)".format(filetype), 79),
file=sys.stderr)
sys.exit(1)
file_end = extensions[filetype] + compress
return "{}.{}".format(new_name, file_end)
def main():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile',
help="input conversion table file")
parser.add_argument('-e', '--ext',
help="extension of renamed file (optional)")
parser.add_argument('-s', '--sep',
default=',',
help="field separator character [default: ,]")
args = parser.parse_args()
with open(args.infile, 'rU') as in_h:
for line in in_h:
try:
new_dir, new_id, old_dir, old_name = line.split(args.sep)
except ValueError:
print(textwrap.fill("Error: failed to properly parse {}. The "
"conversion table should contain four columns. See usage "
"for details".format(infile), 79), file=sys.stderr)
sys.exit(1)
new_dir = new_dir.strip()
new_id = new_id.strip()
old_dir = old_dir.strip()
old_name = old_name.strip()
if old_name.strip()[-1] == '*':
strand_name = {'R1': 'forward', 'R2': 'reverse'}
forwards = glob.glob('{}/{}*R1_*'.format(old_dir, old_name[:-1]))
reverses = glob.glob('{}/{}*R2_*'.format(old_dir, old_name[:-1]))
if len(forwards) != len(reverses):
print(textwrap.fill("Warning: missing pair in {}. The use "
"of '*' should only be used for paired-end reads in "
"separate files".format(old_name), 79), file=sys.stderr)
continue
if len(forwards) > 1:
add_det = True
else:
add_det = False
for strand in (forwards, reverses):
for filename in strand:
if add_det:
seq_detail = re.search(r'L\d{3}_R[12]_\d{3}',
filename).group()
lane, pair, number = seq_detail.split('_')
new_name = format_io(filename, "{}.{}.{}_{}"
.format(new_id, strand_name[pair], lane,
number), args.ext)
else:
new_name = format_io(filename, "{}.{}"
.format(new_id, strand_name[pair]), args.ext)
ln_out, ln_err = (Popen(['ln', "-s", filename, "{}/{}"
.format(new_dir, new_name)], stdout=PIPE,
stderr=PIPE).communicate())
if ln_err:
print(ln_err.decode(locale.getdefaultlocale()[1]),
file=sys.stderr)
else:
new_name = format_i | o(old_name, new_id, args.ext)
new_path = new_dir + "/" + new_name
old_path = old_dir + "/" + old_name
ln_out, ln_err = (Popen(['ln', "-s", old_path, new_path],
stdout=PIPE, stderr=PIPE).communicate())
if ln_err:
print(ln_err.decode(locale.getdefaultlocale()[1]),
file=sys.stderr)
if __name__ == | '__main__':
main()
sys.exit(0)
|
__author__ = 'xubinggui'
class Studen | t(object):
def __init__(self, name, score):
self.name = name
self.score = score
def print_score(self):
print(self.score)
bart = Student('Bart Simpson', 59)
bart.print_sc | ore() |
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from tendenci.apps.perms.admin import TendenciBaseModelAdmin
from tendenci.apps.news.models import News
from tendenci.apps.news.forms import NewsForm
class NewsAdmin(TendenciBaseModelAdmin):
list_display = ['headline', 'update_dt', 'owner_link', 'admin_perms', 'admin_status']
list_filter = ['status_detail', 'owner_username']
prepopulated_fields = {'slug': ['headline']}
search_fields = ['headline', 'body']
fieldsets = (
(_('News Information'), {
'fields': ('headline',
'slug',
'summary',
| 'body',
'group',
'tags',
'source',
'website',
'release_dt',
| 'timezone',
)
}),
(_('Contributor'), {'fields': ('contributor_type',)}),
(_('Author'), {'fields': ('first_name',
'last_name',
'google_profile',
'phone',
'fax',
'email',
),
'classes': ('contact',),
}),
(_('Permissions'), {'fields': ('allow_anonymous_view',)}),
(_('Advanced Permissions'), {'classes': ('collapse',), 'fields': (
'user_perms',
'member_perms',
'group_perms',
)}),
(_('Publishing Status'), {'fields': (
'syndicate',
'status_detail',
)}),
)
form = NewsForm
ordering = ['-update_dt']
admin.site.register(News, NewsAdmin)
|
import argparse
import os
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(__file__)
class ExecException(Exception):
pass
class Exec(object):
@staticmethod
def run(cmd, workingdir=None):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=workingdir)
out = p.communicate()[0]
return out
@classmethod
def run_cmd(cls, args, workingdir=None):
return cls.run('cmd /c %s' % args, workingdir)
@classmethod
def cmake_version(cls):
cmd = 'cmake --version'
out = cls.run_cmd(cmd).decode()
if 'cmake version' not in out:
raise ExecException('Unable to find cmake, if it is installed, check your PATH variable.')
@classmethod
def vs_info(cls):
if 'VCINSTALLDIR' not in os.environ:
raise ExecException('Unable to detect build environment.')
cmd = 'cl'
out = cls.run_cmd(cmd).decode()
if 'x86' in out:
arch = 'x86'
elif 'x64' in out:
arch = 'x64'
else:
raise ExecException('Unable to detect build environment.')
if '15.00' in out:
version = 9
elif '16.00' in out:
version = 10
elif '17.00' in out:
version = 11
elif '18.00' in out:
version = 12
elif '19.00' in out:
version = 14
else:
raise ExecException('Unable to detect build environment.')
return arch, version
class CMake(object):
def __init__(self, arch, version):
assert version in [7, 8, 9, 10, 11, 12, 14], 'Unsupported version (%s)' % version
assert arch.lower() in ['x86', 'x64'], 'Unsupported arch (%s)' % arch
self.version = version
self.arch = arch
@property
def generator(self):
if self.arch.lower() == 'x64':
arch = 'Win64'
else:
arch = ''
return ('Visual Studio %s %s' % (self.version, arch)).strip()
def generate(arch=None, version=None):
cmake = CMake(arch=arch, version=version)
build_dir = os.path.join(SCRIPT_DIR, 'build', cmake.arch)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
result = Exec.run_cmd(
'cmake -G "%s" ../.. --debug-output' % cmake.generator, workingdir=build_dir).decode()
print(result)
def manage(generate_sln=True, build_sln=False, test_sln=False, **kwargs):
if tes | t_sln is True:
build_sln = True
if build_sln is T | rue:
generate_sln = True
if generate_sln is True:
generate(**kwargs)
else:
raise Exception('Did not enable at least one of "generate", "build" or "test"');
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description='Manage CMake projects')
parser.add_argument('--generate', dest='generate_sln', action='store_true', default=True,
help='Generate VS Solution file')
parser.add_argument('--build', dest='build_sln', action='store_true', default=False, help='Build source')
parser.add_argument('--test', dest='test_sln', action='store_true', default=False, help='Run tests')
parser.add_argument('--arch', default=None, choices=['x86', 'x64'],
help='Arch of Visual Studio if not run from VS command prompt')
parser.add_argument('--version', default=None, choices=[7, 8, 9, 10, 11, 12, 14], type=int,
help='Version of Visual Studio of not run from VS command prompt')
args = parser.parse_args(argv)
Exec.cmake_version() # Make sure we have located cmake
if args.arch is None or args.version is None:
vs_info = Exec.vs_info()
if args.arch is None:
args.arch = vs_info[0]
if args.version is None:
args.version = vs_info[1]
manage(**vars(args))
if __name__ == '__main__':
main()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes | may cause incorrect behavior and will be lost | if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageProfile(Model):
"""Specifies the storage settings for the virtual machine disks.
:param image_reference: Specifies information about the image to use. You
can specify information about platform images, marketplace images, or
virtual machine images. This element is required when you want to use a
platform image, marketplace image, or virtual machine image, but is not
used in other creation operations.
:type image_reference:
~azure.mgmt.compute.v2016_03_30.models.ImageReference
:param os_disk: Specifies information about the operating system disk used
by the virtual machine. <br><br> For more information about disks, see
[About disks and VHDs for Azure virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
:type os_disk: ~azure.mgmt.compute.v2016_03_30.models.OSDisk
:param data_disks: Specifies the parameters that are used to add a data
disk to a virtual machine. <br><br> For more information about disks, see
[About disks and VHDs for Azure virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
:type data_disks: list[~azure.mgmt.compute.v2016_03_30.models.DataDisk]
"""
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'OSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'},
}
def __init__(self, *, image_reference=None, os_disk=None, data_disks=None, **kwargs) -> None:
super(StorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" A collator for Mojo Application Manifests """
import argparse
import json
import shutil
import sys
import urlparse
def ParseJSONFile(filename):
with open(filename) as json_file:
try:
return json.load(json_file)
except ValueError:
print "%s is not a valid JSON document" % filename
return None
def main( | ):
parser = argparse.ArgumentParser(
description="Collate Mojo application manifests.")
parser.add_argument("--parent")
parser.add_argument("--output")
parser.add_argument("--application-name")
args, children = parser.parse_known_args()
parent = ParseJSONFile(args.parent)
if parent == None:
return 1
app_path = parent['name'].split(':')[1]
if app_path.startswith('//'):
raise ValueError("Application name path component '%s' must not start " \
| "with //" % app_path)
if args.application_name != app_path:
raise ValueError("Application name '%s' specified in build file does not " \
"match application name '%s' specified in manifest." %
(args.application_name, app_path))
applications = []
for child in children:
application = ParseJSONFile(child)
if application == None:
return 1
applications.append(application)
if len(applications) > 0:
parent['applications'] = applications
with open(args.output, 'w') as output_file:
json.dump(parent, output_file)
return 0
if __name__ == "__main__":
sys.exit(main())
|
from gevent import monkey
monkey.patch_all(subprocess=True)
import os
import sys
cwd = os.path.dirname(os.path.abspath(__file__))
scalrpy_dir = os.path.normpath(os.path.join(cwd, '../../..'))
sys.path.insert(0, scalrpy_dir)
scalrpytests_dir = os.path.join(cwd, '../..')
sys.path.insert(0, scalrpytests_dir)
import time
from gevent import pywsgi
from scalrpy.util import rpc
from scalrpy.util import helper
from scalrpy.load_statistics_cleaner import LoadStatisticsCleaner
from scalrpytests.steplib import lib
from scalrpytests.steplib.steps import *
from lettuce import step, before, after
class LoadStatisticsCleanerScript(lib.Script):
app_cls = LoadStatisticsCleaner
name = 'load_statistics_cleaner'
lib.ScriptCls = LoadStatisticsCleanerScript
@step(u"White | Rabbit has (\d+) farms in database")
def fill_tables(step, count):
db = dbmanager.DB(lib.world.config['connections']['mysql'])
lib.world.f | arms_ids = list()
for i in range(int(count)):
while True:
farm_id = random.randint(1, 9999)
query = "SELECT id FROM farms WHERE id={0}".format(farm_id)
if bool(db.execute(query)):
continue
break
query = "INSERT INTO farms (id) VALUES ({0})".format(farm_id)
db.execute(query)
try:
os.makedirs(os.path.join(lib.world.config['rrd_dir'], helper.x1x2(farm_id), str(farm_id)))
except OSError as e:
if e.args[0] != 17:
raise
lib.world.farms_ids.append(farm_id)
time.sleep(1)
@step(u"White Rabbit has (\d+) farms for delete")
def create_folder(step, count):
lib.world.farms_ids_for_delete = list()
for i in range(int(count)):
while True:
farm_id_for_delete = random.randint(1, 9999)
try:
os.makedirs('%s/%s/%s' % (
lib.world.config['rrd_dir'],
helper.x1x2(farm_id_for_delete),
farm_id_for_delete)
)
lib.world.farms_ids_for_delete.append(farm_id_for_delete)
break
except OSError as e:
if e.args[0] != 17:
raise
try:
os.makedirs('%s/wrongfolder' % lib.world.config['rrd_dir'])
except OSError as e:
if e.args[0] != 17:
raise
try:
os.makedirs('%s/x1x6/wrongfolder' % lib.world.config['rrd_dir'])
except OSError as e:
if e.args[0] != 17:
raise
@step(u"White Rabbit sees right folders were deleted")
def check_folders(step):
for farm_id_for_delete in lib.world.farms_ids_for_delete:
assert not os.path.exists('%s/%s/%s' % (
lib.world.config['rrd_dir'],
helper.x1x2(farm_id_for_delete),
farm_id_for_delete)
)
@step(u"White Rabbit sees right folders were not deleted")
def check_folders(step):
for farm_id in lib.world.farms_ids:
assert os.path.exists('%s/%s/%s' % (
lib.world.config['rrd_dir'],
helper.x1x2(farm_id),
farm_id)
), farm_id
|
#!/usr/bin/env python
###############################################################################
# $Id: paux.py 32163 2015-12-13 17:44:50Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read/write functionality for PAux driver.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2004, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
######################################################### | ######################
import sys
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Read test of simple byte reference data.
def paux_1():
tst = gdaltest.GDALTest( 'PAux', 'small16.raw', 2, 12816 )
| return tst.testOpen()
###############################################################################
# Test copying.
def paux_2():
tst = gdaltest.GDALTest( 'PAux', 'byte.tif', 1, 4672)
return tst.testCreateCopy( check_gt = 1 )
###############################################################################
# Test /vsimem based.
def paux_3():
tst = gdaltest.GDALTest( 'PAux', 'byte.tif', 1, 4672 )
return tst.testCreateCopy( vsimem = 1 )
###############################################################################
# Cleanup.
def paux_cleanup():
gdaltest.clean_tmp()
return 'success'
gdaltest_list = [
paux_1,
paux_2,
paux_3,
paux_cleanup ]
if __name__ == '__main__':
gdaltest.setup_run( 'paux' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
__author | __ = 'Anat | oli Kalysch'
|
n mediatypes) and not
xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.view.Disabled.%s)" % viewid)):
image = "special://skin/extras/viewthumbs/%s.jpg" % viewid
listitem = xbmcgui.ListItem(label=label, iconImage=image)
listitem.setProperty("viewid", viewid)
listitem.setProperty("icon", image)
all_views.append(listitem)
itemcount += 1
dialog = DialogSelect("DialogSelect.xml", "", listing=all_views,
windowtitle=self.addon.getLocalizedString(32012), richlayout=True)
dialog.autofocus_id = cur_view_select_id
dialog.doModal()
result = dialog.result
del dialog
if result:
viewid = result.getProperty("viewid")
label = result.getLabel().decode("utf-8")
return (viewid, label)
else:
return (None, None)
# pylint: disable-msg=too-many-local-variables
def enableviews(self):
'''show select dialog to enable/disable views'''
all_views = []
views_file = xbmc.translatePath('special://skin/extras/views.xml').decode("utf-8")
richlayout = self.params.get("richlayout", "") == "true"
if xbmcvfs.exists(views_file):
doc = parse(views_file)
listing = doc.documentElement.getElementsByTagName('view')
for view in listing:
view_id = view.attributes['value'].nodeValue
label = xbmc.getLocalizedString(int(view.attributes['languageid'].nodeValue))
desc = label + " (" + str(view_id) + ")"
image = "special://skin/extras/viewthumbs/%s.jpg" % view_id
listitem = xbmcgui.ListItem(label=label, label2=desc, iconImage=image)
listitem.setProperty("viewid", view_id)
if not xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.view.Disabled.%s)" % view_id):
listitem.select(selected=True)
excludefromdisable = False
try:
excludefromdisable = view.attributes['excludefromdisable'].nodeValue == "true"
except Exception:
pass
if not excludefromdisable:
all_views.append(listitem)
dialog = DialogSelect(
"DialogSelect.xml",
"",
listing=all_views,
windowtitle=self.addon.getLocalizedString(32013),
multiselect=True, richlayout=richlayout)
dialog.doModal()
result = dialog.result
del dialog
if result:
for item in result:
view_id = item.getProperty("viewid")
if item.isSelected():
# view is enabled
xbmc.executebuiltin("Skin.Reset(SkinHelper.view.Disabled.%s)" % view_id)
else:
# view is disabled
xbmc.executebuiltin("Skin.SetBool(SkinHelper.view.Disabled.%s)" % view_id)
# pylint: enable-msg=too-many-local-variables
def setforcedview(self):
'''helper that sets a forced view for a specific content type'''
content_type = self.params.get("contenttype")
if content_type:
current_view = xbmc.getInfoLabel("Skin.String(SkinHelper.ForcedViews.%s)" % content_type)
if not current_view:
current_view = "0"
view_id, view_label = self.selectview(content_type, current_view, True)
if view_id or view_label:
xbmc.executebuiltin("Skin.SetString(SkinHelper.ForcedViews.%s,%s)" % (content_type, view_id))
xbmc.executebuiltin("Skin.SetString(SkinHelper.ForcedViews.%s.label,%s)" % (content_type, view_label))
@staticmethod
def get_youtube_listing(searchquery):
'''get items from youtube plugin by query'''
lib_path = u"plugin://plugin.video.youtube/kodion/search/query/?q=%s" % searchquery
return KodiDb().files(lib_path)
def searchyoutube(self):
'''helper to search youtube for the given title'''
xbmc.executebuiltin("ActivateWindow(busydialog)")
title = self.params.get("title", "")
window_header = self.params.get("header", "")
results = []
for media in self.get_youtube_listing(title):
if not media["filetype"] == "directory":
label = media["label"]
label2 = media["plot"]
image = ""
if media.get('art'):
if media['art'].get('thumb'):
image = (media['art']['thumb'])
listitem = xbmcgui.ListItem(label=label, label2=label2, iconImage=image)
listitem.setProperty("path", media["file"])
results.append(listitem)
# finished lookup - display listing with results
xbmc.executebuiltin("dialog.Close(busydialog)")
dialog = DialogSelect("DialogSelect.xml", "", listing=results, windowtitle=window_header,
multiselect=False, richlayout=True)
dialog.doModal()
result = dialog.result
del dialog
if result:
if xbmc.getCondVisibility(
"Window.IsActive(script-skin_helper_service-CustomInfo.xml) | "
"Window.IsActive(movieinformation)"):
xbmc.executebuiltin("Dialog.Close(movieinformation)")
xbmc.executebuiltin("Dialog. | Close(script-skin_helper_service-CustomInfo.xml)")
xbmc.sleep(1000)
xbmc.executebuiltin('PlayMedia("%s")' % result.getProperty("path"))
del result
def getcastmedia(self):
'''helper to show a dialog with all media for a specific actor'''
xbmc.executebuiltin("ActivateWindow(busydialog)")
name = self.params.get("name", "")
window_header = self.params.get("name", | "")
results = []
items = self.kodidb.castmedia(name)
items = process_method_on_list(self.kodidb.prepare_listitem, items)
for item in items:
if item["file"].startswith("videodb://"):
item["file"] = "ActivateWindow(Videos,%s,return)" % item["file"]
else:
item["file"] = 'PlayMedia("%s")' % item["file"]
results.append(self.kodidb.create_listitem(item, False))
# finished lookup - display listing with results
xbmc.executebuiltin("dialog.Close(busydialog)")
dialog = DialogSelect("DialogSelect.xml", "", listing=results, windowtitle=window_header, richlayout=True)
dialog.doModal()
result = dialog.result
del dialog
if result:
while xbmc.getCondVisibility("System.HasModalDialog"):
xbmc.executebuiltin("Action(Back)")
xbmc.sleep(300)
xbmc.executebuiltin(result.getfilename())
del result
def setfocus(self):
'''helper to set focus on a list or control'''
control = self.params.get("control")
fallback = self.params.get("fallback")
position = self.params.get("position", "0")
relativeposition = self.params.get("relativeposition")
if relativeposition:
position = int(relativeposition) - 1
count = 0
if control:
while not xbmc.getCondVisibility("Control.HasFocus(%s)" % control):
if xbmc.getCondVisibility("Window.IsActive(busydialog)"):
xbmc.sleep(150)
continue
elif count == 20 or (xbmc.getCondVisibility(
"!Control.IsVisible(%s) | "
"!IntegerGreaterThan(Container(%s).NumItems,0)" % (control, control))):
if fallback:
xbmc.executebuiltin("Control.SetFocus(%s)" % fallback)
break
else:
xbmc.executebuiltin("Control.SetFocus(%s,%s)" % (control, position))
xbmc.sleep(50)
count += 1
d |
import logging
import pprint
import requests
logger = logging.getLogger(__name__)
class RocketChatBase(object):
settings = None
endpoint = None
headers = {}
method = 'get'
auth_token = None
auth_user_id = None
files = None
def __init__(self, settings=None, *args, **kwargs):
self.settings = settings
# Prepare for a call by fetching an Auth Token
self.set_auth_token()
self.set_auth_headers()
def set_auth_token(self):
if self.settings.get('token') and self.settings.get('user_id'):
self.auth_token = self.settings.get('token')
self.auth_user_id = self.settings.get('user_id')
return
url = '{domain}/api/v1/login'.format(
domain=self.settings['domain']
)
response = requests.post(url,
data={'user': self.settings['username'],
'password': self.settings['password']})
try:
self.auth_token = response.json()['data']['authToken']
self.auth_user_id = response.json()['data']['userId']
except KeyError:
response.raise_for_status()
def set_auth_headers(self):
self.headers['X-Auth-Token'] = self.auth_token
self.headers['X-User-Id'] = self.auth_user_id
def logoff(self):
url = '{domain}/api/v1/logout'.format(
domain=self.settings['domain']
)
requests.get(url, headers=self.headers)
def post_response(self, result):
return result
def build_endpoint(self, **kwargs):
"""
Build the endpoint for the user given some kwargs
from the initial calling.
:return:
"""
raise NotImplementedError()
def build_payload(self, **kwargs):
"""
Build a payload dict that will be passed directly to the
endpoint. If you need to pass this as plain text or whatever
you'll need to the dumping here.
:return:
"""
return None
def build_files(self, **kwargs):
"""
Build files
:param kwargs:
:return:
"""
return None
def call(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
timeout = kwargs.get('timeout', None)
url = '{domain}{endpoint}'.format(
domain=self.settings['domain'],
endpoint=self.build_endpoint(**kwargs)
)
result = requests.request(method=self.method, url=url,
data=self.build_payload(**kwargs),
headers=self.headers, timeout=timeout,
files=self.build_files(**kwargs))
request_data = {
'url': url,
'method': self.method,
'payload': self.build_payload(**kwargs),
'headers': self.headers,
'files': self.files
}
logger.debug('API Request - {request}'.format(
request=pprint.pformat(request_data)
))
result.raise_for_status()
self.logoff()
try:
logger.debug('API Response - {data}'.format(
data=pprint.pformat(result.json())
))
return self.post_re | sponse(result.json())
except Exception as e:
logger.error('RESTful {classname} call failed. {message}'.format(
classname=self.__class__.__name__, message=e),
exc_info=True)
raise e
class PostMixin(object):
meth | od = 'post'
|
#!/u | sr/bin/python3
#
# progress.py
#
# Author: Billy Wilson Arante
# Created: 2016/02/05 PHT
# Modified: 2016/08/19 PHT
#
def progress():
"""Progress icon
The Python 3 version of loading_icon.py.
"""
while True:
for i in ["/", "-", "|", "\\", "|"]:
print("%s\r" % i, end="")
def main():
"""Main"""
progress()
if __name__ == "__main__":
m | ain()
|
#!/usr/bin/env python
#
# Copyright (C) 2009-2020 the sqlparse authors and contributors
# <see AUTHORS file>
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
"""Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will
cause problems: the code will get executed twice:
- When you run `python -m sqlparse` python will execute
``__main__.py`` as a script. That means there won't be any
``sqlparse.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``sqlparse.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import argparse
import sys
f | rom io import TextIOWrapper
import sqlparse
from sqlparse.exceptions import SQLParseError
# TODO: Add CLI Tests
# TODO: Simplify formatter by using argparse `type` arguments
def create_parser():
_CASE_CHOICES = ['upper', 'lower', 'capitalize']
parser = argparse.ArgumentParser(
prog='sqlformat',
de | scription='Format FILE according to OPTIONS. Use "-" as FILE '
'to read from stdin.',
usage='%(prog)s [OPTIONS] FILE, ...',
)
parser.add_argument('filename')
parser.add_argument(
'-o', '--outfile',
dest='outfile',
metavar='FILE',
help='write output to FILE (defaults to stdout)')
parser.add_argument(
'--version',
action='version',
version=sqlparse.__version__)
group = parser.add_argument_group('Formatting Options')
group.add_argument(
'-k', '--keywords',
metavar='CHOICE',
dest='keyword_case',
choices=_CASE_CHOICES,
help='change case of keywords, CHOICE is one of {}'.format(
', '.join('"{}"'.format(x) for x in _CASE_CHOICES)))
group.add_argument(
'-i', '--identifiers',
metavar='CHOICE',
dest='identifier_case',
choices=_CASE_CHOICES,
help='change case of identifiers, CHOICE is one of {}'.format(
', '.join('"{}"'.format(x) for x in _CASE_CHOICES)))
group.add_argument(
'-l', '--language',
metavar='LANG',
dest='output_format',
choices=['python', 'php'],
help='output a snippet in programming language LANG, '
'choices are "python", "php"')
group.add_argument(
'--strip-comments',
dest='strip_comments',
action='store_true',
default=False,
help='remove comments')
group.add_argument(
'-r', '--reindent',
dest='reindent',
action='store_true',
default=False,
help='reindent statements')
group.add_argument(
'--indent_width',
dest='indent_width',
default=2,
type=int,
help='indentation width (defaults to 2 spaces)')
group.add_argument(
'--indent_after_first',
dest='indent_after_first',
action='store_true',
default=False,
help='indent after first line of statement (e.g. SELECT)')
group.add_argument(
'--indent_columns',
dest='indent_columns',
action='store_true',
default=False,
help='indent all columns by indent_width instead of keyword length')
group.add_argument(
'-a', '--reindent_aligned',
action='store_true',
default=False,
help='reindent statements to aligned format')
group.add_argument(
'-s', '--use_space_around_operators',
action='store_true',
default=False,
help='place spaces around mathematical operators')
group.add_argument(
'--wrap_after',
dest='wrap_after',
default=0,
type=int,
help='Column after which lists should be wrapped')
group.add_argument(
'--comma_first',
dest='comma_first',
default=False,
type=bool,
help='Insert linebreak before comma (default False)')
group.add_argument(
'--encoding',
dest='encoding',
default='utf-8',
help='Specify the input encoding (default utf-8)')
return parser
def _error(msg):
"""Print msg and optionally exit with return code exit_."""
sys.stderr.write('[ERROR] {}\n'.format(msg))
return 1
def main(args=None):
parser = create_parser()
args = parser.parse_args(args)
if args.filename == '-': # read from stdin
wrapper = TextIOWrapper(sys.stdin.buffer, encoding=args.encoding)
try:
data = wrapper.read()
finally:
wrapper.detach()
else:
try:
with open(args.filename, encoding=args.encoding) as f:
data = ''.join(f.readlines())
except OSError as e:
return _error(
'Failed to read {}: {}'.format(args.filename, e))
close_stream = False
if args.outfile:
try:
stream = open(args.outfile, 'w', encoding=args.encoding)
close_stream = True
except OSError as e:
return _error('Failed to open {}: {}'.format(args.outfile, e))
else:
stream = sys.stdout
formatter_opts = vars(args)
try:
formatter_opts = sqlparse.formatter.validate_options(formatter_opts)
except SQLParseError as e:
return _error('Invalid options: {}'.format(e))
s = sqlparse.format(data, **formatter_opts)
stream.write(s)
stream.flush()
if close_stream:
stream.close()
return 0
|
from django.db import models
class Log(models.Model):
entry = | models.CharField(max_length=255)
comments = models.TextField()
device_name = models.CharField(max_length=200)
classification = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __unicode__(self):
return self.device_name + " " + self.classification
def was_published_recently(self):
return self.pub_date >= timezone.now() - | datetime.timedelta(days=1)
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
|
# -*- coding: utf-8 -*-
# ###########################################################################
#
# Author: Luis Felipe Mileo
# Fernando Marcato Rodrigues
# Daniel Sadamo Hirayama
# Copyright 2015 KMEE - www.kmee.com.br
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
class PaymentOrderCreate(models.TransientModel):
_inherit = 'payment.order.create'
@api.multi
def extend_payment_order_domain(self, payment_order, domain):
super(PaymentOrderCreate, self).extend_payment_order_domain(
payment_order, domain)
if payment_order.mode.type.code == '240':
if payment_order.mode.payment_order_type == 'cobranca':
domain += [
('debit', '>', 0)
]
# TODO: Refactory this
index = domain.index(('invoice.payment_mode_id', '=', False))
del domain[index - 1]
domain.remove(('invoice.payment_mode_id', '=', False))
index = domain.index(('date_maturity', '<=', self.duedate))
del domain[index - 1]
domain.remove(('date_maturity', '=', False))
domain.remove(('date_maturity', '<=', self.duedate))
elif payment_order.mode.type.code == '400':
if payment_order.mode.payment_order_type == 'cobranca':
domain += [
('debit', '>', 0),
('account_id.type', '=', 'receivable'),
'&',
('payment_mode_id', '=', payment_order.mode.id),
'&',
('invoice.state', '=', 'open'),
('invoice.fiscal_category_id.property_journal.revenue_expense', '=', True)
]
# TODO: Refactory this
# TODO: domain do state da move_line.
# index = domain.index(('invoice | .payment_mode_id', '=', False))
# del domain[index - 1]
# domain.removemove(('invoice.payment_mode_id', '=', False))
# index = domain.index(('da | te_maturity', '<=', self.duedate))
# del domain[index - 1]
# domain.remove(('date_maturity', '=', False))
# domain.remove(('date_maturity', '<=', self.duedate))
elif payment_order.mode.type.code == '500':
if payment_order.mode.payment_order_type == 'payment':
domain += [
'&', ('credit', '>', 0),
('account_id.type', '=', 'payable')
]
# index = domain.index(('invoice.payment_mode_id', '=', False))
# del domain[index - 1]
# domain.remove(('invoice.payment_mode_id', '=', False))
# index = domain.index(('date_maturity', '<=', self.duedate))
# del domain[index - 1]
# domain.remove(('date_maturity', '=', False))
# domain.remove(('date_maturity', '<=', self.duedate))
index = domain.index(('account_id.type', '=', 'receivable'))
del domain[index - 1]
domain.remove(('account_id.type', '=', 'receivable'))
return True
@api.multi
def _prepare_payment_line(self, payment, line):
res = super(PaymentOrderCreate, self)._prepare_payment_line(
payment, line)
# res['communication2'] = line.payment_mode_id.comunicacao_2
res['percent_interest'] = line.payment_mode_id.cnab_percent_interest
if payment.mode.type.code == '400':
# write bool to move_line to avoid it being added on cnab again
self.write_cnab_rejected_bool(line)
return res
@api.multi
def filter_lines(self, lines):
""" Filter move lines before proposing them for inclusion
in the payment order.
This implementation filters out move lines that are already
included in draft or open payment orders. This prevents the
user to include the same line in two different open payment
orders. When the payment order is sent, it is assumed that
the move will be reconciled soon (or immediately with
account_banking_payment_transfer), so it will not be
proposed anymore for payment.
See also https://github.com/OCA/bank-payment/issues/93.
:param lines: recordset of move lines
:returns: list of move line ids
"""
self.ensure_one()
payment_lines = self.env['payment.line'].\
search([('order_id.state', 'in', ('draft', 'open', 'done')),
('move_line_id', 'in', lines.ids)])
# Se foi exportada e o cnab_rejeitado dela for true, pode adicionar
# de novo
to_exclude = set([l.move_line_id.id for l in payment_lines
if not l.move_line_id.is_cnab_rejected])
return [l.id for l in lines if l.id not in to_exclude]
@api.multi
def write_cnab_rejected_bool(self, line):
line.write({'is_cnab_rejected': False})
|
operty for **self.__splitter** attribute.
:return: self.__splitter.
:rtype: unicode
"""
return self.__splitter
@splitter.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def splitter(self, value):
"""
Setter for **self.__splitter** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "splitter"))
@splitter.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def splitter(self):
"""
Deleter for **self.__splitter** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "splitter"))
@property
def headers(self):
"""
Property for **self.__headers** attribute.
:return: self.__headers.
:rtype: unicode
"""
return self.__headers
@headers.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def headers(self, value):
"""
Setter for **self.__headers** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "headers"))
@headers.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def headers(self):
"""
Deleter for **self.__headers** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "headers"))
@property
def application_changes_url(self):
"""
Property for **self.__application_changes_url** attribute.
:return: self.__application_changes_url.
:rtype: unicode
"""
return self.__application_changes_url
@application_changes_url.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def application_changes_url(self, value):
"""
Setter for **self.__application_changes_url** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "application_changes_url"))
@application_changes_url.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def application_changes_url(self):
"""
Deleter for **self.__application_changes_url** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "application_changes_url"))
@property
def repository_url(self):
"""
Property for **self.__repository_url** attribute.
:return: self.__repository_url.
:rtype: unicode
"""
return self.__repository_url
@repository_url.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def repository_url(self, value):
"""
Setter for **self.__repository_url** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "repository_url"))
@repository_url.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def repository_url(self):
"""
Deleter for **self.__repository_url** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "repository_url"))
@property
def download_manager(self):
"""
Property for **self.__download_manager** attribute.
:return: self.__download_manager.
:rtype: object
"""
return self.__download_manager
@download_manager.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def download_manager(self, value):
"""
Setter for **self.__download_manager** attribute.
:param value: Attribute value.
:type value: object
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "download_manager"))
@download_manager.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def download_manager(self):
"""
Deleter for **self.__download_manager** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "download_manager"))
@property
def network_access_manager(self):
"""
Property for **self.__network_access_manager** attribute.
:return: self.__network_access_manager.
:rtype: QNetworkAccessManager
"""
return self.__network_access_manager
@network_access_manager.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def network_access_manager(self, value):
"""
Setter for **self.__network_access_manager** attribute.
:param value: Attribute value.
:type value: QNetworkAccessManager
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "network_access_manager"))
@network_access_manager.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def network_access_manager(self):
"""
Deleter for **self.__network_access_manager** attribute.
"""
raise foundations.exceptions.ProgrammingError(
| "{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "network_access_manager"))
def __initialize_ui(self):
"""
Initializes the Widget ui.
"""
umbra.ui.common.set_window_default_icon(self)
LOGGER.debug("> Initializing '{0}' ui.".format(self.__class__.__name__))
if Constants.application_name not in self.__releases:
self.sIBL_GUI_frame.hide()
self.Get_sIBL_GUI_pushButton.hide()
| else:
self.Logo_label.setPixmap(QPixmap(os.path.join(self.__ui_resources_directory, self.__ui_logo_image)))
self.Your_Version_label.setText(self.__releases[Constants.application_name].local_version)
self.Latest_Version_label.setText(self.__releases[Constants.application_name].repository_version)
self.Change_Log_webView.load(QUrl.fromEncoded(QByteArray(self.__application_changes_url)))
templates_releases = dict(self.__releases)
if Constants.application_name in self.__releases:
templates_releases.pop(Constants.application_name)
if not templates_releases:
self.Templates_frame.hide()
self.Get_Latest_Templates_pushButton.hide()
else:
self.Templates_label.setPixmap(
QPixmap(os.path.join(self.__ui_resources_directory, self.__ui_templates_image)))
self.Templates_tableWidget.setParent(None)
self.Templates_tableWidget = TemplatesReleases_QTableWidget(self, message="No Releases to view!")
self.Templates_tableWidget.setObjectName("Templates_tableWidget")
self.Templates_frame_gridLayout.addWidget(self.Templates_tableWidget, 1, 0)
self.__view = self.Templates_tableWidget
self.__vi |
ils
#from seq2seq.models import SimpleSeq2Seq
from gensim import models, matutils
from sklearn.utils import shuffle
from sklearn.preprocessing import MinMaxScaler
import theano
theano.config.openmp = True
from sklear | n.linear_model import LogisticRegression
#OMP_NUM_THREADS=24 python sentiment.py
def read_data(file_path, w2v_model, max_len = 300, hidden_dim = 32):
X = []
with open(file_path, "r") as f:
for line in f:
x = np.zeros(hidden_dim)
tokens = line.strip().split()
index = 0
for token in map(normalize, tokens):
if token in w2v_model:
x += w2v_model[token | ]
index += 1
if index >= max_len:
break
x /= index
X.append(x)
return np.array(X)
def normalize(word):
word = re.sub(r'[^\w\s]' , "", word.decode("utf-8"), re.UNICODE)
word = word.lower()
return word
class Generator:
def __init__(self, max_len = 300, hidden_dim = 32):
self.max_len = max_len
self.hidden_dim = hidden_dim
self.G = self.build_generative_model()
self.D = self.build_discriminative_model()
self.GAN = self.build_GAN(self.G, self.D)
self.G.compile(loss = "mse", optimizer = Adam(lr=0.0001))
self.GAN.compile(loss = "categorical_crossentropy", optimizer = Adam(lr=0.0001))
self.D.trainable = True
self.D.compile(loss = "categorical_crossentropy", optimizer = Adam(lr=0.0001))
def build_generative_model(self):
G = Sequential()
G.add(Dense(300, input_dim = self.hidden_dim, activation = "relu"))
#G.add(BatchNormalization())
G.add(Dense(300, activation = "relu"))
G.add(Dense(300, activation = "relu"))
G.add(Dense(300, activation = "relu"))
G.add(Dense(self.hidden_dim))
return G
def build_discriminative_model(self):
D = Sequential()
D.add(Dense(300, input_dim = self.hidden_dim, activation ="relu"))
D.add(Dense(300, activation = "relu"))
D.add(Dense(300, activation = "relu"))
D.add(Dense(2, activation = "softmax"))
return D
def build_GAN(self, G, D):
GAN = Sequential()
GAN.add(G)
D.trainable = False
GAN.add(D)
return GAN
def generate_noise(self, shape):
return np.random.uniform(-1, 1, size = shape)
def pre_trainG(self, X, batch_size = 128):
print "Pre-train G ..."
L = []
for index in range(0, len(X), batch_size):
batch = X[index:index+batch_size]
noise = self.generate_noise(batch.shape)
loss = self.G.train_on_batch(noise, batch)
L.append(loss)
print "loss = %f" % np.mean(loss)
def pre_trainD(self, X, batch_size = 128):
print "Pre-train D"
L = []
for index in range(0, len(X), batch_size):
batch = X[index:index+batch_size]
noise = self.generate_noise(batch.shape)
gen_batch = self.G.predict(noise)
Y = [1]*len(batch) + [0]*len(batch)
Y = np_utils.to_categorical(Y, nb_classes = 2)
combined_batch = np.concatenate((batch, gen_batch))
loss = self.D.train_on_batch(combined_batch, Y)
L.append(loss)
print "loss = %f" % np.mean(loss)
def train(self, X, batch_size = 128):
G_loss = []
D_loss = []
for index in range(0, len(X), batch_size):
batch = X[index:index+batch_size]
Y = [1]*len(batch) + [0]*len(batch)
Y = np_utils.to_categorical(Y, nb_classes = 2)
noise = self.generate_noise(batch.shape)
gen_batch = self.G.predict(noise)
combined_batch = np.concatenate((batch, gen_batch))
d_loss = self.D.train_on_batch(combined_batch, Y)
noise = self.generate_noise(batch.shape)
g_loss = self.GAN.train_on_batch(noise, np_utils.to_categorical([1]*len(batch), nb_classes = 2))
G_loss.append(g_loss)
D_loss.append(d_loss)
print "d_loss = %f, gan_loss = %f" %(np.mean(D_loss), np.mean(G_loss))
def generate(self, shape):
noise = self.generate_noise(shape)
gen_X = self.G.predict(noise)
return gen_X
def save(self, file_path):
self.G.save_weights(file_path+".G.h5")
self.D.save_weights(file_path+ ".D.h5")
self.GAN.save_weights(file_path+ ".GAN.h5")
def load(self, file_path):
self.G.load_weights(file_path+".G.h5")
self.D.load_weights(file_path+ ".D.h5")
self.GAN.load_weights(file_path+ ".GAN.h5")
class Classifier:
def __init__(self, max_len = 300, hidden_dim = 32):
self.CLF = LogisticRegression()
def train(self, X, Y):
#X, Y = shuffle(X, Y)
self.CLF.fit(X, Y)
#print X
#print self.CLF.score(X, Y)
#print self.CLF.coef_
def evaluate(self, testX, testY):
#print testX
#print self.CLF.predict(testX)
print "Accuracy on testing data :", self.CLF.score(testX, testY)
def save(self, file_path):
pass
def load(self, file_path):
pass
if __name__ == "__main__":
#RNN
np.random.seed(0)
print "Loading word vectors ..."
w2v_model = models.Word2Vec.load("../models/word2vec.mod")
print "Reading text data ..."
trainX_pos = read_data("../data/train-pos.small", w2v_model)
trainX_neg = read_data("../data/train-neg.small", w2v_model)
testX_pos = read_data("../data/test-pos.small", w2v_model)
testX_neg = read_data("../data/test-neg.small", w2v_model)
#scaler = MinMaxScaler(feature_range = (0, 1))
#trainX_pos = np.ones((500,32))*1.5
#trainX_neg = -np.ones((500,32))*1.5
#testX_pos = np.ones((500,32))*1.5
#testX_neg = -np.ones((500,32))*1.5
"""
X = np.concatenate((trainX_pos, trainX_neg, testX_pos, testX_neg))
scaler.fit(X)
trainX_pos = scaler.transform(trainX_pos)
trainX_neg = scaler.transform(trainX_neg)
testX_pos = scaler.transform(testX_pos)
testX_neg = scaler.transform(testX_neg)
"""
trainX = np.vstack((trainX_pos, trainX_neg))
trainY = [1]*len(trainX_pos)+ [0]*len(trainX_neg)
testX = np.vstack((testX_pos, testX_neg))
testY = [1]*len(testX_pos)+ [0]*len(testX_neg)
print len(testX)
print "Building the pos generative model..."
pos_gan = Generator()
"""
for epoch in range(1, 30):
print "==========Epoch %d===========" % (epoch)
pos_gan.pre_trainG(trainX_pos)
for epoch in range(1, 30):
print "==========Epoch %d===========" % (epoch)
pos_gan.pre_trainD(trainX_pos)
"""
#print "Training ..."
for epoch in range(1, 30):
print "==========Epoch %d===========" % (epoch)
pos_gan.train(trainX_pos)
posX = pos_gan.generate((50, 32))
pos_gan.save("../models/pos_basic_32")
pos_gan.load("../models/pos_basic_32")
print "Building the neg generative model..."
neg_gan = Generator()
"""
for epoch in range(1, 30):
print "==========Epoch %d===========" % (epoch)
neg_gan.pre_trainG(trainX_neg)
for epoch in range(1, 30):
print "==========Epoch %d===========" % (epoch)
neg_gan.pre_trainD(trainX_neg)
"""
print "Training ..."
for epoch in range(1, 30):
print "==========Epoch %d===========" % (epoch)
neg_gan.train(trainX_neg)
negX = neg_gan.generate((50, 32))
neg_gan.save("../models/neg_basic_32")
neg_gan.load("../models/neg_basic_32")
sample_trainX = np.vstack((posX, negX))
sample_trainX.dump("../sampleX.np")
sample_trainY = [1]*len(posX) + [0]*len(negX)
print "Building the basic classifier ..."
clf = Classifier()
print "Training the basic classifier ..."
clf.train(np.vstack((trainX[:10], trainX[-10:])), [1]*10+[0]*10)
#clf.train(trainX, trainY)
clf.evaluate(testX, testY)
print "Building the sampled classifier ..."
aug_clf = Classifier()
print "Training |
"""Test functions for simbad.util.pdb_util"""
__author__ = "Ada | m Simpkin"
__date__ = "19 Jan 2018"
import os
import tempfile
import unittest
import simbad.util
class Test(unittest.TestCase):
"""Unit test"""
def test_result_by_score_from_csv_1(self):
"""Test case for simbad.util.result_by_score_from_csv"""
csv_temp_file = tempfile.NamedTemporaryFile("w", delete=False)
csv_temp_file.write(
"""pdb_code,alt,a,b,c,alpha,bet | a,gamma,length_penalty,angle_penalty,total_penalty,volume_difference,probability_score
1DTX, ,23.15,39.06,73.53,90.0,90.0,90.0,0.418,0.0,0.418,398.847,0.842"""
)
csv_temp_file.close()
data = simbad.util.result_by_score_from_csv(csv_temp_file.name, "total_penalty")
reference_data = ["1DTX", 0.41799999999999998]
self.assertEqual(data, reference_data)
def test_result_by_score_from_csv_2(self):
"""Test case for simbad.util.result_by_score_from_csv"""
csv_temp_file = tempfile.NamedTemporaryFile("w", delete=False)
csv_temp_file.write(
"""pdb_code,ALPHA,BETA,GAMMA,CC_F,RF_F,CC_I,CC_P,Icp,CC_F_Z_score,CC_P_Z_score,Number_of_rotation_searches_producing_peak
2fbb,21.63,81.88,296.6,14.1,56.2,16.5,18.6,1.0,11.6,8.6,5.0
1f10,34.27,90.0,116.04,13.0,57.1,16.4,14.2,1.0,9.0,7.0,5.0
4w94,29.28,85.42,245.3,12.9,57.2,15.2,10.8,1.0,8.9,7.1,5.0
1xei,38.87,78.75,65.8,12.3,58.0,15.4,13.9,1.0,8.1,6.6,5.0
2z18,27.6,87.35,247.57,12.3,57.5,15.3,12.5,1.0,7.8,6.1,5.0
1ps5,33.92,86.37,67.25,12.6,57.3,15.6,14.8,1.0,7.7,7.4,5.0
1v7s,34.18,87.8,66.84,12.5,57.4,15.7,12.6,1.0,7.6,6.7,5.0
2vb1,37.1,85.56,66.78,12.1,57.3,16.2,12.3,1.0,7.6,6.6,5.0
4yeo,35.02,82.52,67.02,11.8,57.2,15.5,13.8,1.0,7.6,6.7,5.0
2b5z,1.4,38.12,229.38,12.4,57.9,15.4,10.4,1.0,7.6,6.5,5.0
1ykz,26.43,88.72,247.05,12.6,57.5,15.4,11.9,1.0,7.6,6.5,5.0
4xjf,26.78,88.44,245.77,12.9,57.8,15.4,12.7,1.0,7.6,6.5,5.0
2d4j,37.18,84.17,66.64,12.4,57.7,16.1,12.8,1.0,7.5,6.0,5.0
4p2e,29.05,83.8,246.58,12.5,56.9,15.4,12.1,1.0,7.5,7.1,5.0
3wvx,35.67,85.1,67.1,12.6,57.1,15.1,13.0,1.0,7.4,6.4,5.0
2x0a,28.59,85.11,245.89,12.3,57.4,14.8,11.4,1.0,7.4,6.5,5.0
2z19,38.05,79.03,64.98,11.8,57.7,15.9,12.8,1.0,7.1,5.9,5.0
1jj1,28.99,82.92,245.93,12.5,57.3,15.6,11.3,1.0,7.0,5.9,5.0
4j7v,28.54,86.74,246.59,12.0,57.4,14.2,10.2,1.0,7.0,5.7,5.0
2pc2,28.71,76.6,257.7,10.8,57.7,13.4,8.0,1.0,6.7,5.3,5.0"""
)
csv_temp_file.close()
data = simbad.util.result_by_score_from_csv(csv_temp_file.name, "CC_F_Z_score")
reference_data = ["2fbb", 11.6]
self.assertEqual(data, reference_data)
|
from ..base import ParametrizedValue
class BalancingAlgorithm(ParametrizedValue):
name_separator = ''
class BalancingAlgorithmWithBackup(BalancingAlgorithm):
def __init__(self, backup | _level=None):
self.backup_level = backup_level
super().__init__()
class WeightedRoundRobin(BalancingAlgorithmWithBackup):
"""Weighted round robin algorithm with backup support.
The default algorithm.
"""
name = 'wrr'
class LeastReferenceCount(BalancingAlgorithmWithBackup):
"""Least reference co | unt algorithm with backup support."""
name = 'lrc'
class WeightedLeastReferenceCount(BalancingAlgorithmWithBackup):
"""Weighted least reference count algorithm with backup support."""
name = 'wlrc'
class IpHash(BalancingAlgorithmWithBackup):
"""IP hash algorithm with backup support."""
name = 'iphash'
|
White = 15
class Coord(ctypes.Structure):
_fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
class SmallRect(ctypes.Structure):
_fields_ = [('Left', ctypes.c_short),
('Top', ctypes.c_short),
('Right', ctypes.c_short),
('Bottom', ctypes.c_short),
]
class ConsoleScreenBufferInfo(ctypes.Structure):
_fields_ | = [('dwSize', Coord),
('dwCursorPosition', Coord),
('wAttributes', ctypes.c_uint),
('srWindow', SmallRect),
('dwMaximumWindowSize', Coord),
]
class Win32API():
'''Some native methods for python calling' | ''
StdOutputHandle = -11
ConsoleOutputHandle = None
DefaultColor = None
@staticmethod
def SetConsoleColor(color):
'''Change the text color on console window'''
if not Win32API.DefaultColor:
if not Win32API.ConsoleOutputHandle:
Win32API.ConsoleOutputHandle = ctypes.windll.kernel32.GetStdHandle(Win32API.StdOutputHandle)
bufferInfo = ConsoleScreenBufferInfo()
ctypes.windll.kernel32.GetConsoleScreenBufferInfo(Win32API.ConsoleOutputHandle, ctypes.byref(bufferInfo))
Win32API.DefaultColor = int(bufferInfo.wAttributes & 0xFF)
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, color)
@staticmethod
def ResetConsoleColor():
'''Reset the default text color on console window'''
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, Win32API.DefaultColor)
class Logger():
LogFile = '@AutomationLog.txt'
LineSep = '\n'
@staticmethod
def Write(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
if printToStdout:
isValidColor = (consoleColor >= ConsoleColor.Black and consoleColor <= ConsoleColor.White)
if isValidColor:
Win32API.SetConsoleColor(consoleColor)
try:
sys.stdout.write(log)
except UnicodeError as e:
Win32API.SetConsoleColor(ConsoleColor.Red)
isValidColor = True
sys.stdout.write(str(type(e)) + ' can\'t print the log!\n')
if isValidColor:
Win32API.ResetConsoleColor()
if not writeToFile:
return
if IsPy3:
logFile = open(Logger.LogFile, 'a+', encoding = 'utf-8')
else:
logFile = codecs.open(Logger.LogFile, 'a+', 'utf-8')
try:
logFile.write(log)
# logFile.flush() # need flush in python 3, otherwise log won't be saved
except Exception as ex:
logFile.close()
sys.stdout.write('can not write log with exception: {0} {1}'.format(type(ex), ex))
@staticmethod
def WriteLine(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
Logger.Write(log + Logger.LineSep, consoleColor, writeToFile, printToStdout)
@staticmethod
def Log(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
t = time.localtime()
log = '{0}-{1:02}-{2:02} {3:02}:{4:02}:{5:02} - {6}{7}'.format(t.tm_year, t.tm_mon, t.tm_mday,
t.tm_hour, t.tm_min, t.tm_sec, log, Logger.LineSep)
Logger.Write(log, consoleColor, writeToFile, printToStdout)
@staticmethod
def DeleteLog():
if os.path.exists(Logger.LogFile):
os.remove(Logger.LogFile)
def GetMSBuildPath():
cmd = 'call "%VS120COMNTOOLS%..\\..\\VC\\vcvarsall.bat" x86\nwhere msbuild'
ftemp = open('GetMSBuildPath.bat', 'wt')
ftemp.write(cmd)
ftemp.close()
p = subprocess.Popen('GetMSBuildPath.bat', stdout = subprocess.PIPE)
p.wait()
lines = p.stdout.read().decode().splitlines()
os.remove('GetMSBuildPath.bat')
for line in lines:
if 'MSBuild.exe' in line:
return line
def GetIncrediBuildPath():
try:
key=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Classes\IncrediBuild.MonitorFile\shell\open\command')
value, typeId = winreg.QueryValueEx(key, '')
if value:
start = value.find('"')
end = value.find('"', start + 1)
path = value[start+1:end]
buildConsole = os.path.join(os.path.dirname(path), 'BuildConsole.exe')
return buildConsole
except FileNotFoundError as e:
Logger.WriteLine('can not find IncrediBuild', ConsoleColor.Red)
def UpdateCode():
# put git to path first
if not shutil.which('git.exe'):
Logger.Log('找不到git.exe. 请确认安装git时将git\bin目录路径加入到环境变量path中!!!\n, 跳过更新代码!!!', ConsoleColor.Yellow)
return false
oldDir = os.getcwd()
for dir in UpdateDir:
os.chdir(dir)
ret = os.system('git pull')
os.chdir(oldDir)
if ret != 0:
Logger.Log('update {0} failed'.format(dir), ConsoleColor.Yellow)
return false
return True
def BuildProject(cmd):
for i in range(6):
Logger.WriteLine(cmd, ConsoleColor.Cyan)
buildFailed = True
startTime = time.time()
p = subprocess.Popen(cmd) #IncrediBuild不能使用stdout=subprocess.PIPE,否则会导致p.wait()不返回,可能是IncrediBuild的bug
if IsPy3:
try:
buildFailed = p.wait(BuildTimeout)
except subprocess.TimeoutExpired as e:
Logger.Log('{0}'.format(e), ConsoleColor.Yellow)
p.kill()
else:
buildFailed = p.wait()
if not UseMSBuild:
#IncrediBuild的返回值不能说明编译是否成功,需要提取输出判断
fin = open('IncrediBuild.log')
for line in fin:
if line.startswith('=========='):
Logger.Write(line, ConsoleColor.Cyan, writeToFile = True if IsPy3 else False)
if IsPy3:
start = line.find('失败') + 3 #========== 生成: 成功 1 个,失败 0 个,最新 0 个,跳过 0 个 ==========
else:#为了兼容py2做的特殊处理,很恶心
start = 0
n2 = 0
while 1:
if line[start].isdigit():
n2 += 1
if n2 == 2:
break
start = line.find(' ', start)
start += 1
end = line.find(' ', start)
failCount = int(line[start:end])
buildFailed = failCount > 0
else:
Logger.Write(line, ConsoleColor.Red, writeToFile = True if IsPy3 else False, printToStdout = True if ' error ' in line else False)
fin.close()
costTime = time.time() - startTime
Logger.WriteLine('build cost time: {0:.1f}s\n'.format(costTime), ConsoleColor.Green)
if not buildFailed:
return True
return False
def BuildAllProjects():
buildSuccess = False
cmds = []
if UseMSBuild:
if IsRebuild:
if CleanAll:
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Debug'))
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerp |
"""Compare Pulsar and HabCat coordinates"""
import csv
import astropy.units as u
from astropy.coordinates import SkyCoord, Angle
from astropy import coordinates as coord
def flipra(coordinate):
"""Flips RA coordinates by 180 degrees"""
coordinate = coordinate + 180
if coordinate > 360:
coordinate = coordinate - 360
return coordinate
def flipde(coordinate):
"""Flips RA coordinates by 90 degrees"""
return coordinate * (-1.)
# Load Pulsar catalogue
pulsar_id = []
pulsar_ra = []
pulsar_de = []
pulsar_period = []
with open('pulsar.csv', 'r') as csvfile:
dataset = csv.reader(csvfile, delimiter=';')
for row in dataset:
pulsar_id.append(row[0])
ra = coord.Angle(row[1], unit=u.hour) # Define as hours
pulsar_ra.append(ra.degree) # Convert to degree
de = coord.Angle(row[2], unit=u.deg)
pulsar_de.append(de.degree)
pulsar_period.append(row[3])
print(len(pulsar_id), 'Pulsar datalines loaded')
# Load HabCat
habcat_id = []
habcat_ra = []
habcat_de = []
with open('habcat.csv', 'r') as csvfile:
dataset = csv.reader(csvfile, delimiter=';')
for row in dataset:
habcat_id.append(row[0])
ra = coord.Angle(row[1], unit=u.hour) # | Define as hours
habcat_ra.append(ra.degree) # Convert to degree
de = coord.Angle(row[2], unit=u.deg)
habcat_de.append(de.degree)
print(len(habcat_id), 'HabCat datalines loaded')
# Nested loop through all Pulsars to find closest 180deg HabCat for each
for currentpulsar in range(len(pulsar_id)): # Pulsar loop
shortest_distan | ce = 180 * 60 # set to max, in arcminutes
for currenthabcat in range(len(habcat_id)): # HabCat loop
# Correct calculation is very slow, thus only try the best candidates:
if (abs(habcat_ra[currenthabcat] -
flipra(pulsar_ra[currentpulsar])) < 5.
and abs(habcat_de[currenthabcat] -
flipde(pulsar_de[currentpulsar])) < 5.):
habcat_coordinate = SkyCoord(
habcat_ra[currenthabcat],
habcat_de[currenthabcat],
unit="deg")
pulsar_coordinate_flipped = SkyCoord( # flip pulsar coordinates
flipra(pulsar_ra[currentpulsar]),
flipde(pulsar_de[currentpulsar]),
unit="deg")
distance = pulsar_coordinate_flipped.separation(habcat_coordinate)
if distance.arcminute < shortest_distance:
shortest_distance = distance.arcminute # New best found
bestfit_pulsar_id = pulsar_id[currentpulsar]
bestfit_habcat_id = habcat_id[currenthabcat]
bestfit_pulsar_period = pulsar_period[currentpulsar]
print(currentpulsar, bestfit_pulsar_id, bestfit_habcat_id, shortest_distance / 60.) # deg
with open('result-atnf-full.csv', 'a') as fp: # Append each result to CSV
a = csv.writer(fp, delimiter=',')
a.writerow([
bestfit_pulsar_id,
bestfit_habcat_id,
shortest_distance / 60., # arcmin
bestfit_pulsar_period])
print('Done.') |
# -*- coding: utf-8 -*-
import re
import urllib.parse
from ..base.simple_downloader import SimpleDownloader
def decode_cloudflare_email(value):
email = ""
key = int(value[:2], 16)
for i in range(2, len(value), 2):
email += chr(int(value[i : i + 2], 16) ^ key)
return | email
class UpleaCom(SimpleDownloader):
__name__ = "UpleaCom"
__type__ = "downloader"
__version__ = "0.21"
__status__ = "testing"
__pattern__ = r"https?://(?:www\.)?uplea\.com/dl/\w{15}"
__config__ = [
| ("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Uplea.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("Redleon", None), ("GammaC0de", None)]
PLUGIN_DOMAIN = "uplea.com"
SIZE_REPLACEMENTS = [
("ko", "KiB"),
("mo", "MiB"),
("go", "GiB"),
("Ko", "KiB"),
("Mo", "MiB"),
("Go", "GiB"),
]
NAME_PATTERN = r'<span class="gold-text">(?P<N>.+?)</span>'
SIZE_PATTERN = (
r'<span class="label label-info agmd">(?P<S>[\d.,]+) (?P<U>[\w^_]+?)</span>'
)
OFFLINE_PATTERN = r">You followed an invalid or expired link"
LINK_PATTERN = r'"(https?://\w+\.uplea\.com/anonym/.*?)"'
PREMIUM_ONLY_PATTERN = (
r"You need to have a Premium subscription to download this file"
)
WAIT_PATTERN = r"timeText: ?(\d+),"
STEP_PATTERN = r'<a href="(/step/.+)">'
NAME_REPLACEMENTS = [
(
r'(<a class="__cf_email__" .+? data-cfemail="(\w+?)".+)',
lambda x: decode_cloudflare_email(x.group(2)),
)
]
def setup(self):
self.multi_dl = False
self.chunk_limit = 1
self.resume_download = True
def handle_free(self, pyfile):
m = re.search(self.STEP_PATTERN, self.data)
if m is None:
self.error(self._("STEP_PATTERN not found"))
self.data = self.load(urllib.parse.urljoin("http://uplea.com/", m.group(1)))
m = re.search(self.WAIT_PATTERN, self.data)
if m is not None:
self.wait(m.group(1), True)
self.retry()
m = re.search(self.LINK_PATTERN, self.data)
if m is None:
self.error(self._("LINK_PATTERN not found"))
self.link = m.group(1)
m = re.search(r".ulCounter\({'timer':(\d+)}\)", self.data)
if m is not None:
self.wait(m.group(1))
|
else:
return super(DoctestItem, self).repr_failure(excinfo)
def reportinfo(self):
return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name
def _get_flag_lookup():
import doctest
return dict(
DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
ELLIPSIS=doctest.ELLIPSIS,
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
ALLOW_UNICODE=_get_allow_unicode_flag(),
ALLOW_BYTES=_get_allow_bytes_flag(),
)
def get_optionflags(parent):
opt | ionflags_str = parent.config.getini("doctest_optionflags")
flag_lookup_table = _get_flag_lookup()
flag_acc = 0
for flag in optionflags_str:
flag_acc |= flag_lookup_table[flag]
return flag_acc
def _get_continue_on_failure(config):
| continue_on_failure = config.getvalue("doctest_continue_on_failure")
if continue_on_failure:
# We need to turn off this if we use pdb since we should stop at
# the first failure
if config.getvalue("usepdb"):
continue_on_failure = False
return continue_on_failure
class DoctestTextfile(pytest.Module):
obj = None
def collect(self):
import doctest
# inspired by doctest.testfile; ideally we would use it directly,
# but it doesn't support passing a custom checker
encoding = self.config.getini("doctest_encoding")
text = self.fspath.read_text(encoding)
filename = str(self.fspath)
name = self.fspath.basename
globs = {"__name__": "__main__"}
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=0,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
_fix_spoof_python2(runner, encoding)
parser = doctest.DocTestParser()
test = parser.get_doctest(text, globs, name, filename, 0)
if test.examples:
yield DoctestItem(test.name, self, runner, test)
def _check_all_skipped(test):
"""raises pytest.skip() if all examples in the given DocTest have the SKIP
option set.
"""
import doctest
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
if all_skipped:
pytest.skip("all tests skipped by +SKIP option")
def _is_mocked(obj):
"""
returns if a object is possibly a mock object by checking the existence of a highly improbable attribute
"""
return (
safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None)
is not None
)
@contextmanager
def _patch_unwrap_mock_aware():
"""
contextmanager which replaces ``inspect.unwrap`` with a version
that's aware of mock objects and doesn't recurse on them
"""
real_unwrap = getattr(inspect, "unwrap", None)
if real_unwrap is None:
yield
else:
def _mock_aware_unwrap(obj, stop=None):
if stop is None:
return real_unwrap(obj, stop=_is_mocked)
else:
return real_unwrap(obj, stop=lambda obj: _is_mocked(obj) or stop(obj))
inspect.unwrap = _mock_aware_unwrap
try:
yield
finally:
inspect.unwrap = real_unwrap
class DoctestModule(pytest.Module):
def collect(self):
import doctest
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""
a hackish doctest finder that overrides stdlib internals to fix a stdlib bug
https://github.com/pytest-dev/pytest/issues/3456
https://bugs.python.org/issue25532
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
doctest.DocTestFinder._find(
self, tests, obj, name, module, source_lines, globs, seen
)
if self.fspath.basename == "conftest.py":
module = self.config.pluginmanager._importconftest(self.fspath)
else:
try:
module = self.fspath.pyimport()
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
pytest.skip("unable to import module %r" % self.fspath)
else:
raise
# uses internal doctest module parsing mechanism
finder = MockAwareDocTestFinder()
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=0,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests
yield DoctestItem(test.name, self, runner, test)
def _setup_fixtures(doctest_item):
"""
Used by DoctestTextfile and DoctestItem to setup fixture information.
"""
def func():
pass
doctest_item.funcargs = {}
fm = doctest_item.session._fixturemanager
doctest_item._fixtureinfo = fm.getfixtureinfo(
node=doctest_item, func=func, cls=None, funcargs=False
)
fixture_request = FixtureRequest(doctest_item)
fixture_request._fillfixtures()
return fixture_request
def _get_checker():
"""
Returns a doctest.OutputChecker subclass that takes in account the
ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES
to strip b'' prefixes.
Useful when the same doctest should run in Python 2 and Python 3.
An inner class is used to avoid importing "doctest" at the module
level.
"""
if hasattr(_get_checker, "LiteralsOutputChecker"):
return _get_checker.LiteralsOutputChecker()
import doctest
import re
class LiteralsOutputChecker(doctest.OutputChecker):
"""
Copied from doctest_nose_plugin.py from the nltk project:
https://github.com/nltk/nltk
Further extended to also support byte literals.
"""
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
def check_output(self, want, got, optionflags):
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
if res:
return True
allow_unicode = optionflags & _get_allow_unicode_flag()
allow_bytes = optionflags & _get_allow_bytes_flag()
if not allow_unicode and not allow_bytes:
return False
else: # pragma: no cover
def remove_prefixes(regex, txt):
return re.sub(regex, r"\1\2", txt)
if allow_unicode:
want = remove_prefixes(self._unicode_literal_re, want)
got = remove_prefixes(self._unicode_literal_re, got)
if allow_bytes:
want = remove_prefixes(self._bytes_literal_re, want)
got = remove_prefixes(self._bytes_literal_re, got)
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
return res
_get_checker.LiteralsOutputChecker = LiteralsOutputChecker
return _get_checker.LiteralsOutputChecker()
def _get_allow_unicode_flag():
"""
Registers and returns the ALLOW_UNICODE flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_UNICODE")
def _get_allow_bytes_flag():
"""
Registers and returns the ALLOW_BYTES flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_BYTES")
def _get_report_choice(key):
"""
This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid
|
# coding=utf-8
import numpy as np
from numpy import random
# a=np.array([[2,3,4,1,2],[1,3,4,5,4],[4,2,3,10,2],[3,5,6,7,9],[10,3,4,2,9]])
# # for i in xrange(5):
# # a[i,i]=1000
# #b=a.argmin(axis=1)
# print a
# a=random.randint(1,100,size=(8,8))
MAX = 1000
MIN = 0
CARSEATS = 6
distMat = np.array([[0, 1, 15, 15, 26, 72, 10, 57],
[73, 0, 44, 98, 5, 22, 31, 89],
[46, 84, 0, 13, 28, 58, 42, 32],
[ 9, 37, 45, 0, 40, 39, 4, 49],
[53, 9, 2, 34, 0, 39, 26, 28],
[12, 93, 97, 74, 37, 0, 85, 84],
[38, 63, 83, 59, 40, 74, 0, 88],
[ 6, 86, 71, 48, 70, 20, 87, 0]])
#每个人到机场的距离
airportDist = np.array([67, 72, 96, 96, 96, 9, 25, 25])
#random.randint(1,100,size=(distMat.shape[0]))
print airportDist
#存储上车的人
idxMat = np.zeros([airportDist.shape[0]], dtype=int)
#第一个上车的人
initialIdx = np.argmax(airportDist)
idxMat[0] = initialIdx
#第一个人已经上车
airportDist[initialIdx] = MIN
print initialIdx
#排除点到点的距离
for k in xrange(distMat.shape[0]):
distMat[k][k] = MAX
# print("airport distance matrix: ")
# print airportDist
#
# print("distance matrix:")
# print distMat
for i in range(1,distMat.shape[0]):
if i%CARSEATS == 0:
print("new cars")
#第六个上车的人
distMat[:, idxMat[i-1]] = MAX
initialIdx = np.argmax(airportDist)
idxMat[i] = initialIdx
print initialIdx
# 第六个已经上车
airportDist[initialIdx] = MIN
# print idxMat
# print distMat
else:
tmpIdx = np.argmin(distMat[initialIdx,:])
idxMat[i] = tmpIdx
distMat[:,idxMat[i-1]] = MAX
| initialIdx = tmpIdx
ai | rportDist[tmpIdx] = MIN
print tmpIdx
print airportDist
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
FixedDistanceBuffer.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* | *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsWkbTypes,
QgsProcessing,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingParameterEnum,
QgsProcessingParameterFeatureSink)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from . import Buffer as buff
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class FixedDistanceBuffer(QgisAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
FIELD = 'FIELD'
DISTANCE = 'DISTANCE'
SEGMENTS = 'SEGMENTS'
DISSOLVE = 'DISSOLVE'
END_CAP_STYLE = 'END_CAP_STYLE'
JOIN_STYLE = 'JOIN_STYLE'
MITER_LIMIT = 'MITER_LIMIT'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'buffer.png'))
def group(self):
return self.tr('Vector geometry')
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterNumber(self.DISTANCE,
self.tr('Distance'), type=QgsProcessingParameterNumber.Double,
defaultValue=10.0))
self.addParameter(QgsProcessingParameterNumber(self.SEGMENTS,
self.tr('Segments'), type=QgsProcessingParameterNumber.Integer,
minValue=1, defaultValue=5))
self.addParameter(QgsProcessingParameterBoolean(self.DISSOLVE,
self.tr('Dissolve result'), defaultValue=False))
self.end_cap_styles = [self.tr('Round'),
'Flat',
'Square']
self.addParameter(QgsProcessingParameterEnum(
self.END_CAP_STYLE,
self.tr('End cap style'),
options=self.end_cap_styles, defaultValue=0))
self.join_styles = [self.tr('Round'),
'Miter',
'Bevel']
self.addParameter(QgsProcessingParameterEnum(
self.JOIN_STYLE,
self.tr('Join style'),
options=self.join_styles, defaultValue=0))
self.addParameter(QgsProcessingParameterNumber(self.MITER_LIMIT,
self.tr('Miter limit'), type=QgsProcessingParameterNumber.Double,
minValue=0, defaultValue=2))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Buffer'), QgsProcessing.TypeVectorPolygon))
def name(self):
return 'fixeddistancebuffer'
def displayName(self):
return self.tr('Fixed distance buffer')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
distance = self.parameterAsDouble(parameters, self.DISTANCE, context)
dissolve = self.parameterAsBool(parameters, self.DISSOLVE, context)
segments = self.parameterAsInt(parameters, self.SEGMENTS, context)
end_cap_style = self.parameterAsEnum(parameters, self.END_CAP_STYLE, context) + 1
join_style = self.parameterAsEnum(parameters, self.JOIN_STYLE, context) + 1
miter_limit = self.parameterAsDouble(parameters, self.MITER_LIMIT, context)
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
source.fields(), QgsWkbTypes.Polygon, source.sourceCrs())
buff.buffering(feedback, context, sink, distance, None, False, source, dissolve, segments, end_cap_style,
join_style, miter_limit)
return {self.OUTPUT: dest_id}
| |
from JumpScale import j
descr = "" | "
remove old redis cache from system
"""
organization = "jumpscale"
author = "deboeckj@codescalers.com"
license = "bsd"
version = "1.0"
category = "redis.cleanup"
period = 300 # always in sec
timeout = period * 0.2 # max runtime = 20% of period
order = 1
enable = True
async = True
log = False
roles = ['master']
def action():
import time
EXTRATIME = 120
now = time.time()
try:
import ujson as json
e | xcept:
import json
import JumpScale.grid.agentcontroller
acl = j.clients.agentcontroller.get()
rcl = j.clients.redis.getRedisClient('127.0.0.1', 9999)
for jobkey in rcl.keys('jobs:*'):
if jobkey == 'jobs:last':
continue
jobs = rcl.hgetall(jobkey)
for jobguid, jobstring in jobs.iteritems():
job = json.loads(jobstring)
if job['state'] in ['OK', 'ERROR', 'TIMEOUT']:
rcl.hdel(jobkey, jobguid)
elif job['timeStart'] + job['timeout'] + EXTRATIME < now:
rcl.hdel(jobkey, jobguid)
job['state'] = 'TIMEOUT'
eco = j.errorconditionhandler.getErrorConditionObject(msg='Job timed out')
j.errorconditionhandler.raiseOperationalCritical(eco=eco,die=False)
eco.tb = None
eco.jid = job['guid']
eco.type = str(eco.type)
job['result'] = json.dumps(eco.__dict__)
acl.saveJob(job)
if __name__ == '__name__':
action()
|
""" Run target checks using snactor """
from generic_runner import run, pprint, get_actor
d | ef check_target():
""" Run multiple checks at target machine """
targetinfo = {}
get_actor('check_target_group').execute(targetinfo)
get_actor('check_target').execute(targetinfo)
pprint(t | argetinfo['targetinfo'])
if __name__ == '__main__':
run(check_target, tags=['check_target'])
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014-2017 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
"""
Base class for Anitya tests.
"""
from __future__ import print_function
from functools import wraps
import unittest
import os
import vcr
import mock
import anitya.lib
import anitya.lib.model as model
#DB_PATH = 'sqlite:///:memory:'
## A file database is required to check the integrity, don't ask
DB_PATH = 'sqlite:////tmp/anitya_test.sqlite'
FAITOUT_URL = 'http://faitout.fedorainfracloud.org/'
if os.environ.get('BUILD_ID'):
try:
import requests
req = requests.get('%s/new' % FAITOUT_URL)
if req.status_code == 200:
DB_PATH = req.text
print('Using faitout at: %s' % DB_PATH)
except:
pass
def skip_jenkins(function):
""" Decorator to skip tests if AUTH is set to False """
@wraps(function)
def decorated_function(*args, **kwargs):
""" Decorated function, actually does the work. """
## We used to skip all these tests in jenkins, but now with vcrpy, we
## don't need to. We can replay the recorded request/response pairs
## for each test from disk.
#if os.environ.get('BUILD_ID'):
# raise unittest.SkipTest('Skip backend test on jenkins')
return function(*args, **kwargs)
return decorated_function
class Modeltests(unittest.TestCase):
""" Model tests. """
maxDiff = None
def __init__(self, method_name='runTest'):
""" Constructor. """
unittest.TestCase.__init__(self, method_name)
self.session = None
# pylint: disable=C0103
def setUp(self):
""" Set up the environnment, ran before every tests. """
if ':///' in DB_PATH:
dbfile = DB_PATH.split(':///')[1]
if os.path.exists(dbfile):
os.unlink(dbfile)
self.session = anitya.lib.init(DB_PATH, create=True, debug=False)
mock_query = mock.patch.object(
model.BASE, 'query', self.session.query_property(query_cls=model.BaseQuery))
mock_query.start()
self.addCleanup(mock_query.stop)
anitya.lib.plugins.load_plugins(self.session)
cwd = os.path.dirname(os.path.realpath(__file__))
self.vcr = vcr.use_cassette(os.path.join(cwd, 'request-data/', self.id()))
self.vcr.__enter__()
# pylint: disable=C0103
def tearDown(self):
""" Remove the test.db database if there is one. """
self.vcr.__exit__()
if '///' in DB_PATH:
dbfile = DB_PATH.split('///')[1]
if os.path.exists(dbfile):
os.unlink(dbfile)
self.session.rollback()
self.session.close()
if DB_PATH.startswith('postgres'):
db_name = DB_PATH.rsplit('/', 1)[1]
req = requests.get(
'%s/clean/%s' % (FAITOUT_URL, db_name))
print(req.text)
def create_distro(session):
""" Create some basic distro for testing. """
distro = model.Distro(
name='Fedora',
)
session.add(distro)
distro = model.Distro(
name='Debian',
)
session.add(distro)
session.commit()
def create_project(session):
""" Create some basic projects to work with. """
anitya.lib.create_project(
session,
name='geany',
homepage='http://www.geany.org/',
version_url='http://www.geany.org/Download/Releases',
regex='DEFAULT',
user_id='noreply@fedoraproject.org',
)
ani | tya.lib.create_project(
session,
name='subsurface',
homepage='http://subsurface.hohndel.org/',
version_url='http://subsurface.hohndel.org/downloads/',
regex='DEFAULT',
user_id='noreply@fedoraproject.org',
)
anitya.lib.create_project(
| session,
name='R2spec',
homepage='https://fedorahosted.org/r2spec/',
user_id='noreply@fedoraproject.org',
)
def create_ecosystem_projects(session):
""" Create some fake projects from particular upstream ecosystems
Each project name is used in two different ecosystems
"""
anitya.lib.create_project(
session,
name='pypi_and_npm',
homepage='https://example.com/not-a-real-pypi-project',
backend='PyPI',
user_id='noreply@fedoraproject.org'
)
anitya.lib.create_project(
session,
name='pypi_and_npm',
homepage='https://example.com/not-a-real-npmjs-project',
backend='npmjs',
user_id='noreply@fedoraproject.org'
)
anitya.lib.create_project(
session,
name='rubygems_and_maven',
homepage='https://example.com/not-a-real-rubygems-project',
backend='Rubygems',
user_id='noreply@fedoraproject.org'
)
anitya.lib.create_project(
session,
name='rubygems_and_maven',
homepage='https://example.com/not-a-real-maven-project',
backend='Maven Central',
user_id='noreply@fedoraproject.org'
)
def create_package(session):
""" Create some basic packages to work with. """
package = model.Packages(
project_id=1,
distro='Fedora',
package_name='geany',
)
session.add(package)
package = model.Packages(
project_id=2,
distro='Fedora',
package_name='subsurface',
)
session.add(package)
session.commit()
def create_flagged_project(session):
""" Create and flag a project. Returns the ProjectFlag. """
project = anitya.lib.create_project(
session,
name='geany',
homepage='http://www.geany.org/',
version_url='http://www.geany.org/Download/Releases',
regex='DEFAULT',
user_id='noreply@fedoraproject.org',
)
session.add(project)
flag = anitya.lib.flag_project(
session,
project,
"This is a duplicate.",
"dgay@redhat.com",
"user_openid_id",
)
session.add(flag)
session.commit()
return flag
if __name__ == '__main__':
unittest.main(verbosity=2)
|
This file is part of Autobump.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
Convert a Python codebase into a list of Units.
"""
import os
import ast
import sys
import codecs
import logging
import traceback
from autobump import config
from autobump.capir import Type, Field, Parameter, Signature, Function, Unit
logger = logging.getLogger(__name__)
_source_file_ext = ".py"
class _PythonType(Type):
pass
class _Dynamic(_PythonType):
def __str__(self):
return self.__repr__()
def __repr__(self):
return "dynamic"
class _StructuralType(_PythonType):
def __init__(self, attr_set):
self.name = str(attr_set)
self.attr_set = attr_set
def is_compatible(self, other):
if not isinstance(other, _StructuralType):
return False
return self.attr_set.issubset(other.attr_set)
def __str__(self):
return str(self.attr_set)
class _HintedType(_PythonType):
def __init__(self, name):
self.name = name
def is_compatible(self, other):
return self.__eq__(other)
def __eq__(self, other):
return self.name == other.name
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
_dynamic = _Dynamic()
def _is_public(member_name):
"""Determine visibility of a member based on its name."""
return not (member_name.startswith("_") and member_name != "__init__")
def _get_type_of_parameter(function, parameter):
"""Return the type of a parameter used in a function AST node.
In this case, 'type' means structural instead of nominal type.
Because Python is dynamically typed, it would be very hard to guess
wh | at type a parameter is without looking at usage. Instead of doing that,
this walks the AST node describing the function and considers the type to be
the set of all methods called on the parameter."""
assert isinstance(function, ast.FunctionDef), "Tried to get usage of parameter in a non-function."
# Check if there is a type hint fo | r this parameter
if config.type_hinting():
for arg in function.args.args:
if arg.arg == parameter:
if arg.annotation:
return _HintedType(arg.annotation.id)
if not config.structural_typing():
return _dynamic
# TODO: Don't completely ommit 'self' in class methods,
# it can be used to identify addition or removal of fields.
if parameter == "self":
return _StructuralType(set())
# Generators to filter out AST
def gen_no_inner_definitions(node):
"""Recursively yield all descendant nodes
without walking any function or class definitions."""
yield node
for n in ast.iter_child_nodes(node):
if isinstance(n, ast.FunctionDef) or \
isinstance(n, ast.ClassDef):
continue
yield from gen_no_inner_definitions(n)
def gen_only_attributes(node):
"""Yield only descendant nodes that represent attribute access,
without traversing any function or class definitions."""
for n in gen_no_inner_definitions(node):
if isinstance(n, ast.Attribute) and \
isinstance(n.value, ast.Name):
yield n
# Find the set of attributes for that parameter
attr_set = set()
for attr in gen_only_attributes(function):
name = attr.value.id
method = attr.attr
if name == parameter:
# TODO: Also consider method signature.
attr_set.add(method)
# Convert set of attribytes to structural type
return _StructuralType(attr_set)
def _get_signature(function):
"""Return the signature of a function AST node."""
parameters = []
args = function.args.args
# Map all None parameters to a "TrueNone" object
# because None indicates the absense of a default value.
class TrueNone(object):
pass
defaults = [TrueNone
if isinstance(a, ast.NameConstant) and a.value is None
else a
for a in function.args.defaults]
# Prepend no default values.
defaults = [None] * (len(args) - len(defaults)) + defaults
args_with_defaults = list(zip(args, defaults))
for arg_with_default in args_with_defaults:
arg, default = arg_with_default
if isinstance(default, ast.Name):
# TODO: This does not differentiate between
# "abc" and abc.
default = default.id
elif isinstance(default, ast.NameConstant):
default = default.value
elif isinstance(default, ast.Num):
default = default.n
elif isinstance(default, ast.Str):
default = default.s
type = _get_type_of_parameter(function, arg.arg)
parameters.append(Parameter(arg.arg, type, default))
# Note: we need to return a list with the signature inside
# because the common representation allows for overloading,
# which Python doesn't.
return [Signature(parameters)]
def _container_to_unit(name, container):
"""Convert a Python AST module or class to a Unit."""
fields = dict()
functions = dict()
units = dict()
for node in container.body:
if hasattr(node, "name") and not _is_public(node.name):
# Completely ignore any private things -
# they are irrelevant to the API.
continue
if isinstance(node, ast.ClassDef):
units[node.name] = _container_to_unit(node.name, node)
elif isinstance(node, ast.FunctionDef):
functions[node.name] = Function(node.name, _dynamic, _get_signature(node))
elif isinstance(node, ast.Assign):
# TODO: Handle other forms of assignment.
for target in [t for t in node.targets if isinstance(t, ast.Name) and _is_public(t.id)]:
fields[target.id] = Field(target.id, _dynamic)
return Unit(name, fields, functions, units)
def _module_to_unit(name, module):
"""Convert a Python AST module to a Unit."""
return _container_to_unit(name, module)
def python_codebase_to_units(location):
"""Returns a list of Units representing a Python codebase in 'location'."""
if config.type_hinting():
# When the handler is invoked, the 'ast' module needs to start
# pointing to 'ast35' from 'typed_ast' if type hinting is to be used.
# Note that 'ast' must be changed globally, as the other functions in this
# module rely on it as well.
global ast
from typed_ast import ast35
ast = ast35
units = dict()
for root, dirs, files in os.walk(location):
dirs[:] = [d for d in dirs if not config.dir_ignored(d)]
pyfiles = [f for f in files if f.endswith(_source_file_ext) and not config.file_ignored(f)]
for pyfile in pyfiles:
pymodule = pyfile[:-(len(_source_file_ext))] # Strip extension
with codecs.open(os.path.join(root, pyfile),
"r",
encoding="utf-8",
errors="replace") as f:
try:
units[pymodule] = _module_to_unit(pymodule, ast.parse(f.read()))
except Exception:
print(traceback.format_exc(), file=sys.stderr)
msg = "Failed to parse file {}".format(os.path.join(root, pyfile))
if config.python_omit_on_error():
logger.warning(msg)
else:
|
""" Helpers for making some things in PIL easier """
from PIL import ImageDraw, ImageFont, ImageStat
from math import ceil
def drawTextWithBorder(draw, text, coords,
fontname="Impact", fontsize=80,
color="#fff", strokecolor="#000"):
""" Draw text with a border. Although PIL doesn't support this, it can be
faked by drawing the text in the border color, with offsets, and then
drawing the text in the center on top.
See http://stackoverflow.com/a/8050556/4414003 """
# 3 looks good for 80px font. This allows for adjusting proportionally.
strokewidth = ceil(3 * fontsize / 80.0) # Use ceiling to prevent 0
font = ImageFont.truetype(fontname, fontsize)
x, y = coords
# Draw background
for c in ((x - strokewidth, y - strokewidth),
(x + strokewidth, y - strokewidth),
(x - strokewidth, y + strokewidth),
(x + strokewidth, y + strokewidth)):
draw.text(c, text, font=font, fill=strokecolor)
draw.text(coords, text, font=font, fill=color)
def labelImage(im, text):
""" Label an image with a string in the bottom l | eft, using a text color
that will ensure appropriate contrast. """
d = ImageDraw.Draw(im)
textsize = ImageFont.load_default().getsize(t | ext)
coords = im.size[0] - textsize[0] - 5, im.size[1] - textsize[1] - 5
# Check color of image where the text would go
textarea = im.crop(coords + im.size)
textareabrightness = ImageStat.Stat(textarea.convert("L")).mean[0]
color = (0, 0, 0) if textareabrightness > 128 else (255, 255, 255)
# Draw text
d.text(coords, text, fill=color)
def proportionalResize(im, width):
""" Resize an image to be a specified width while keeping aspect ratio """
w, h = im.size
aspect = float(h) / float(w)
out = im.resize((width, int(width * aspect))) # Resize to fit width
return out
def findFontSize(text, width, font="Impact", margin=10):
""" Find the largest font size that will fit `text` onto `im`, given a
margin (in percent) that must be left around an image border. """
w = int(width * (1 - margin / 100.0 * 2)) # Width accounting for margin
wAt40 = ImageFont.truetype(font, 40).getsize(text)[0] # find size at 40px
return 40 * w / wAt40 # Use a proportion to adjust that for image size
if __name__ == "__main__":
from PIL import Image, ImageDraw
# Blank test image
i = Image.new("RGB", (1024, 768), "#abcdef")
d = ImageDraw.Draw(i)
# Calculate font size
text = "OMG SUCH FONT"
fontsize = findFontSize(text, 1024)
# Render font onto canvas (102px is 10% margin)
drawTextWithBorder(d, text, (102, 102), fontsize=fontsize)
# Test proportional resizing to a larger width
proportionalResize(i, 2000).show()
# Test proportional resizing to a smaller widthAt40
proportionalResize(i, 400).show()
|
import bpy
from bpy.props import *
from mathutils import Vector
from ... base_types.node import AnimationNode
from . spline_evaluation_base import SplineEvaluationBase
class EvaluateSplineNode(bpy.types.Node, AnimationNode, SplineEvaluationBase):
bl_idname = "an_EvaluateSplineNode"
bl_label = "Evaluate Spline"
def create(self):
self.newInput("Spline", "Spline", "spline", defaultDrawType = "PROPERTY_ONLY")
self.newInput("Float", "Parameter", "parameter", value = 0.0)
self.newOutput("Vector", "Location", "location")
self.newOutput("Vector", "Tangent", "tangent")
def draw(self, layout):
layout.prop(self, "parameterType", text = "")
def drawAdvanced(self, layout):
col = layout.column()
col.active = self.parameterType == "UNIFORM"
col.prop(self, "resolution")
def execute(self, spline, parameter):
spline.update()
if spline.isEvaluable:
if self.parameterType == "UNIFORM":
spline.ensureUniformConverter(s | elf.resolution)
parameter = spline.toUniformParameter(parameter)
return spline. | evaluate(parameter), spline.evaluateTangent(parameter)
else:
return Vector((0, 0, 0)), Vector((0, 0, 0))
|
self.name = name
self.tmp_name = "kernelconfig_tmp_{!s}".format(self.name)
self.masters = None
self.packages = {}
self.categories = set()
kwargs.setdefault("logger_name", self.name)
super().__init__(overlay_dir, **kwargs)
# --- end of __init__ (...) ---
def get_masters_str(self):
return (
" ".join(self.masters) if self.masters is not None
else self.name
)
# --- end of get_masters_str (...) ---
def is_empty(self):
return not bool(self.packages)
def add_package(self, package_info):
cpv = package_info.cpv
if cpv in self.packages:
raise KeyError("duplicate entry for package {}".format(cpv))
self.categories.add(package_info.category)
self.packages[cpv] = package_info
self.logger.debug("packaged added: %s", package_info.cpv)
return True
# --- end of add_package (...) ---
def iter_packages(self):
return self.packages.values()
# --- end of iter_packages (...) ---
# --- end of AbstractTemporaryOverlay ---
class _TemporaryOverlay(AbstractTemporaryOverlay):
def populate(self):
# initially, try to symlink ebuilds,
# and fall back to copying if symlinks are not supported
copy_or_symlink = os.symlink
copy_method_name = "symlink"
for pkg_info in self.iter_packages():
# if pkg_info.tmp_ebuild_file is not None: ignored
pkg_dir = self.get_filepath(
fspath.join_relpath(pkg_info.category, pkg_info.name)
)
ebuild_dst = fspath.join_relpath(pkg_dir, pkg_info.ebuild_name)
self.logger.debug(
"Importing ebuild for %s as %s",
pkg_info.cpv, copy_method_name
)
self.logger.debug("ebuild file: %s", pkg_info.orig_ebuild_file)
fs.dodir(pkg_dir)
# unnecessary rmfile,
# except for running mkoverlays on the same dir again
fs.rmfile(ebuild_dst)
try:
copy_or_symlink(pkg_info.orig_ebuild_file, ebuild_dst)
except OSError as oserr:
if (
copy_or_symlink is os.symlink
and oserr.errno == errno.EPERM
):
self.logger.debug(
(
'symlinks seem to be unsupported by the fs,'
' falling back to copying'
)
)
copy_or_symlink = shutil.copyfile
copy_method_name = "file"
self.logger.debug(
"Trying to import ebuild for %s as %s",
pkg_info.cpv, copy_method_name
)
# raises:
copy_or_symlink(pkg_info.orig_ebuild_file, ebuild_dst)
else:
raise
# -- end try
pkg_info.tmp_ebuild_file = ebuild_dst
# -- end for
# --- end of populate (...) ---
def fs_init(self, eclass_importer=None):
self.logger.debug("Initializing overlay directory")
try:
self.fs_init_base()
self.fs_init_profiles()
self.fs_init_metadata()
self.fs_init_eclass(eclass_importer=eclass_importer)
except (OSError, IOError):
self.logger.error("Failed to initialize overlay!")
raise
# --- end of fs_init (...) ---
def fs_init_base(self):
# reinit() or init(), i.e. mkdir with exists_ok=True or plain mkdir?
fs.dodir(self.root)
# ---
def fs_init_profiles(self):
profiles_dir = self.get_filepath("profiles")
fs.dodir(profiles_dir)
# "/repo_name"
self.logger.debug("Creating profiles/repo_name") # overly verbose
with open(fspath.join_relpath(profiles_dir, "repo_name"), "wt") as fh:
fh.write("{!s}\n".format(self.tmp_name))
# --
# "/categories"
# dedup and sort categories
self.logger.debug("Creating profiles/categories") # overly verbose
categories = sorted(self.categories)
with open(fspath.join_relpath(profiles_dir, "categories"), "wt") as fh:
if categories:
fh.write("\n".join(categories))
fh.write("\n")
# --
# --- end of fs_init_profiles (...) ---
def fs_init_metadata(self):
metadata_dir = self.get_filepath("metadata")
fs.dodir(metadata_dir)
# "/layout.conf"
self.logger.debug("Creating metadata/layout.conf") # overly verbose
with open(
fspath.join_relpath(metadata_dir, "layout.conf"), "wt"
) as fh:
fh.write("repo_name = {!s}\n".format(self.tmp_name))
# trailing whitespace in absence of "masters": don't care
fh.write("masters = {!s}\n".format(self.get_masters_str()))
# --
# --- end of fs_init_metadata (...) ---
@abc.abstractmethod
def fs_init_eclass(self, eclass_importer):
raise NotImplementedError()
# --- end of fs_init_eclass (...) ---
# --- end of _TemporaryOver | lay ---
class TemporaryOverlay(_TemporaryOverlay):
"""
@ivar linux_info_eclass_src: path to the linux-info eclass file
in the original repo
(or in one of its master repos)
@type linux_info_eclass_src: C{str} (initially C{None}
"""
def __init__(self, overlay_dir, name, **kwargs):
| super().__init__(overlay_dir, name, **kwargs)
self.linux_info_eclass_src = None
# --- end of __init__ (...) ---
def assign_repo_config(self, port_iface, fallback_repo_config=True):
# pylint: disable=E1101
if fallback_repo_config is True:
fallback_repo_config = self.get_fallback_repo_config(port_iface)
try:
repo_config = port_iface.get_repo_config(self.name)
except KeyError:
self.logger.warning("Repo config for '%s' not found", self.name)
if not fallback_repo_config: # None, False
raise
# use the fallback repo config
self.logger.warning(
"Using main repo '%s' as fallback", fallback_repo_config.name
)
repo_config = fallback_repo_config
else:
self.logger.debug("Found repo config for '%s'", repo_config.name)
# --
# set masters
self.masters = [repo_config.name]
self.masters.extend((r.name for r in repo_config.masters))
eclasses = repo_config.eclass_db.eclasses
try:
linux_info_eclass_src_info = eclasses["linux-info"]
except KeyError:
# eclass not found
# That means, eclass not found in the original overlay
# nor in one of its master repos ([possibly] including "gentoo").
#
# This strongly contradicts the fact that only packages
# that have been successfully built in the past
# and whose ebuild inherits linux-info.eclass
# are added to *this* overlay.
#
# So, basically linux-info was present at pkg build time,
# but cannot be found now.
#
self.logger.error(
"linux-info.eclass not found - pm-integration cannot operate!"
)
raise # FIXME: raise a more specific exception
else:
self.linux_info_eclass_src = linux_info_eclass_src_info.location
# --- end of assign_repo_config (...) ---
def fs_init_eclass(self, eclass_importer):
eclass_dir = self.get_filepath("eclass")
fs.dodir(eclass_dir)
if not self.linux_info_eclass_src:
raise AssertionError("linux-info.eclass src is not set.")
linux_info_eclass_dst = fspath.join_relpath(
eclass_dir,
os.path.basename(self.linux_info_eclass_src) # "linux-info.eclass"
)
|
#!/usr/bin/env python3
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import argparse
import itertools
import sys
from benchexec import result, tablegenerator
from benchexec.tablegenerator import util
sys.dont_write_bytecode = True # prevent creation of .pyc files
def get_extract_value_function(column_identifier):
"""
returns a function that extracts the value for a column.
"""
def extract_value(run_result):
pos = None
for i, column in enumerate(run_result.columns):
if column.title == column_identifier:
pos = i
break
if pos is None:
sys.exit(f"CPU time missing for task {run_result.task_id}.")
return util.to_decimal(run_result.values[pos])
return extract_value
def main(args=None):
if args is None:
args = sys.argv
parser = argparse.ArgumentParser(
fromfile_prefix_chars="@",
description="""Create CSV tables for quantile plots with the results of a benchmark execution.
The CSV tables are similar to those produced with table-generator,
but have an additional first column with the index for the quantile plot,
and they are sorted.
The output is written to stdout.
Part of BenchExec: https://github.com/sosy-lab/benchexec/""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"result",
metavar="RESULT",
type=str,
nargs="+",
help="XML files with result produced by benchexec",
)
parser.add_argument(
"--correct-only",
action="store_true",
dest="correct_only",
help="only use correct results (recommended, implied if --score-based is used)",
)
parser.add_argument(
"--score-based",
action="store_true",
dest="score_based",
help="create data for score-based quantile plot",
)
parser.add_argument(
"--sort-by",
metavar="SORT",
default="cputime",
dest="column_identifier",
type=str,
help="column identifier for sorting the values, e.g. 'cputime' or 'walltime'",
)
options = parser.parse_args(args[1:])
# load results
| run_set_result = tablegenerator.RunSetResult.create_fro | m_xml(
options.result[0], tablegenerator.parse_results_file(options.result[0])
)
for results_file in options.result[1:]:
run_set_result.append(
results_file, tablegenerator.parse_results_file(results_file)
)
run_set_result.collect_data(options.correct_only or options.score_based)
# select appropriate results
if options.score_based:
start_index = 0
index_increment = lambda run_result: run_result.score # noqa: E731
results = []
for run_result in run_set_result.results:
if run_result.score is None:
sys.exit(
f"No score available for task {run_result.task_id}, "
f"cannot produce score-based quantile data."
)
if run_result.category == result.CATEGORY_WRONG:
start_index += run_result.score
elif run_result.category == result.CATEGORY_MISSING:
sys.exit(
f"Property missing for task {run_result.task_id}, "
f"cannot produce score-based quantile data."
)
elif run_result.category == result.CATEGORY_CORRECT:
results.append(run_result)
else:
assert run_result.category in {
result.CATEGORY_ERROR,
result.CATEGORY_UNKNOWN,
}
else:
start_index = 0
index_increment = lambda run_result: 1 # noqa: E731
if options.correct_only:
results = [
run_result
for run_result in run_set_result.results
if run_result.category == result.CATEGORY_CORRECT
]
else:
results = run_set_result.results
# sort data for quantile plot
results.sort(key=get_extract_value_function(options.column_identifier))
# extract information which id columns should be shown
for run_result in run_set_result.results:
run_result.id = run_result.task_id
relevant_id_columns = tablegenerator.select_relevant_id_columns(results)
# write output
index = start_index
for run_result in results:
index += index_increment(run_result)
task_ids = (
task_id for task_id, show in zip(run_result.id, relevant_id_columns) if show
)
result_values = (util.remove_unit(value or "") for value in run_result.values)
print(*itertools.chain([index], task_ids, result_values), sep="\t")
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit("Script was interrupted by user.")
|
def __unicode__(self):
return self.csa_theme
def __str__(self):
return self.csa_theme
def get_api_url(self):
"""
Get CSA theme URL as a reverse from model
:return: URL
:rtype: String
"""
| return reverse('csa_practice_api:csa_theme_detail', kwargs={'slug': self.slug})
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'CSA Practice Themes'
@property
def csa_practice_relation(self):
"""
Get related CSA practice
:return: Query result from the CSA practice model
:rtype: object/record
"""
| instance = self
qs = CsaPractice.objects.filter_by_model_type(instance)
return qs
@receiver(pre_save, sender=CsaTheme)
def pre_save_csa_theme_receiver(sender, instance, *args, **kwargs):
"""
Create a slug before save.
:param sender: Signal sending object
:param instance: Object instance
:param args: Any other argument
:param kwargs: Keyword arguments
:return: None
:rtype: None
"""
if not instance.slug:
instance.slug = create_slug(instance, CsaTheme, instance.csa_theme)
class PracticeLevel(AuthUserDetail, CreateUpdateTime):
"""
CSA level of practice model. Creates CSA practice level entity.
"""
slug = models.SlugField(max_length=150, unique=True, blank=True)
practice_level = models.CharField(max_length=150, unique=True)
def __unicode__(self):
return self.practice_level
def __str__(self):
return self.practice_level
def get_api_url(self):
"""
Get CSA practice level URL as a reverse from model
:return: URL
:rtype: String
"""
return reverse('csa_practice_api:practice_level_detail', kwargs={'slug': self.slug})
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'CSA Practice Levels'
@property
def csa_practice_relation(self):
"""
Get related CSA practice
:return: Query result from the CSA practice model
:rtype: object/record
"""
instance = self
qs = CsaPractice.objects.filter_by_model_type(instance)
return qs
@receiver(pre_save, sender=PracticeLevel)
def pre_save_practice_level_receiver(sender, instance, *args, **kwargs):
"""
Create a slug before save.
:param sender: Signal sending object
:param instance: Object instance
:param args: Any other argument
:param kwargs: Keyword arguments
:return: None
:rtype: None
"""
if not instance.slug:
instance.slug = create_slug(instance, PracticeLevel, instance.practice_level)
class PracticeType(AuthUserDetail, CreateUpdateTime):
"""
CSA practice type model. Creates CSA practice type entity.
"""
slug = models.SlugField(max_length=120, unique=True, blank=True)
practice_type = models.CharField(max_length=120, unique=True, verbose_name='Practice category')
def __unicode__(self):
return self.practice_type
def __str__(self):
return self.practice_type
def get_api_url(self):
"""
Get CSA practice type URL as a reverse from model
:return: URL
:rtype: String
"""
return reverse('csa_practice_api:practice_type_detail', kwargs={'slug': self.slug})
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'CSA Practice Types'
@property
def csa_practice_relation(self):
"""
Get related CSA practice
:return: Query result from the CSA practice model
:rtype: object/record
"""
instance = self
qs = CsaPractice.objects.filter_by_model_type(instance)
return qs
@receiver(pre_save, sender=PracticeType)
def pre_save_practice_type_receiver(sender, instance, *args, **kwargs):
"""
Create a slug before save.
:param sender: Signal sending object
:param instance: Object instance
:param args: Any other argument
:param kwargs: Keyword arguments
:return: None
:rtype: None
"""
if not instance.slug:
instance.slug = create_slug(instance, PracticeType, instance.practice_type)
class CsaPracticeManager(models.Manager):
"""
CSA practice model manager
"""
def filter_by_model_type(self, instance):
"""
Query related objects/model type
:param instance: Object instance
:return: Matching object else none
:rtype: Object/record
"""
obj_qs = model_foreign_key_qs(instance, self, CsaPracticeManager)
if obj_qs.exists():
return model_type_filter(self, obj_qs, CsaPracticeManager)
class CsaPractice(AuthUserDetail, CreateUpdateTime):
"""
CSA practice model. Creates CSA practice entity.
"""
slug = models.SlugField(unique=True, blank=True)
practice_code = models.CharField(max_length=6, unique=True, help_text='User defined CSA practice code')
csatheme = models.ForeignKey(CsaTheme, on_delete=models.PROTECT, verbose_name='CSA theme')
practicelevel = models.ForeignKey(PracticeLevel, on_delete=models.PROTECT, verbose_name='Practice level')
sub_practice_level = models.TextField(blank=True, null=True)
sub_subpractice_level = models.TextField(blank=True, null=True)
definition = models.TextField(blank=True, null=True)
practicetype = models.ForeignKey(PracticeType, on_delete=models.PROTECT, verbose_name='Practice category')
objects = CsaPracticeManager()
def __unicode__(self):
return self.sub_practice_level
def __str__(self):
return self.sub_practice_level
def get_api_url(self):
"""
Get CSA practice URL as a reverse from model
:return: URL
:rtype: String
"""
return reverse('csa_practice_api:csa_practice_detail', kwargs={'slug': self.slug})
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'CSA Practices'
@property
def research_csa_practice(self):
"""
Get related research CSA practice object/record
:return: Query result from the research CSA practice model
:rtype: object/record
"""
instance = self
qs = ResearchCsaPractice.objects.filter_by_model_type(instance)
return qs
@receiver(pre_save, sender=CsaPractice)
def pre_save_csa_practice_receiver(sender, instance, *args, **kwargs):
"""
Create a slug before save.
:param sender: Signal sending object
:param instance: Object instance
:param args: Any other argument
:param kwargs: Keyword arguments
:return: None
:rtype: None
"""
if not instance.slug:
instance.slug = create_slug(instance, CsaPractice, instance.practice_code)
class ResearchCsaPracticeManager(models.Manager):
"""
Research CSA practice model manager
"""
def filter_by_instance(self, instance):
"""
Query a related research CSA practice object/record from another model's object
:param instance: Object instance
:return: Query result from content type/model
:rtye: object/record
"""
return model_instance_filter(instance, self, ResearchCsaPracticeManager)
def filter_by_model_type(self, instance):
"""
Query related objects/model type
:param instance: Object instance
:return: Matching object else none
:rtype: Object/record
"""
obj_qs = model_foreign_key_qs(instance, self, ResearchCsaPracticeManager)
if obj_qs.exists():
return model_type_filter(self, obj_qs, ResearchCsaPracticeManager)
def create_by_model_type(self, model_type, pk, **kwargs):
"""
Create object by model type
:param model_type: Content/model type
:param pk: Primary key
:param kwargs: Fields to be created
:return: Data object
:rtype: Object
"""
return create_model_type(self, model_type, pk, slugify=False, **kwargs)
class ResearchCs |
"IsWeight",
"Weight",
tooltip).IsWeight = True
# Add the mass property for puntual weights
tooltip = str(QtGui.QApplication.translate(
"ship_weight",
"Mass [kg]",
None))
obj.addProperty("App::PropertyFloat",
"Mass",
"Weight",
tooltip).Mass = 0.0
# Add the density property for linear elements
tooltip = str(QtGui.QApplication.translate(
"ship_weight",
"Linear density [kg / m]",
None))
obj.addProperty("App::PropertyFloat",
"LineDens",
"Weight",
tooltip).LineDens = 0.0
# Add the area density property for surface elements
tooltip = str(QtGui.QApplication.translate(
"ship_weight",
"Area density [kg / m^2]",
None))
obj.addProperty("App::PropertyFloat",
"AreaDens",
"Weight",
tooltip).AreaDens = 0.0
# Add the density property for volumetric elements
tooltip = str(QtGui.QApplication.translate(
"ship_weight",
"Density [kg / m^3]",
None))
obj.addProperty("App::PropertyFloat",
"Dens",
"Weight",
tooltip).Dens = 0.0
# Set the subshapes
obj.Shape = Part.makeCompound(shapes)
obj.Proxy = self
def onChanged(self, fp, prop):
"""Detects the ship data changes.
Position arguments:
fp -- Part::FeaturePython object affected.
prop -- Modified property name.
"""
if prop == "Mass":
pass
def execute(self, fp):
"""Detects the entity recomputations.
Position arguments:
fp -- Part::FeaturePython object affected.
"""
pass
def _getPuntualMass(self, fp, shape):
"""Compute the mass of a puntual element.
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Vertex shape object.
"""
return Units.parseQuantity('{0} kg'.format(fp.Mass))
def _getLinearMass(self, fp, shape):
"""Compute the mass of a linear element.
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Edge shape object.
"""
rho = Units.parseQuantity('{0} kg/m'.format(fp.LineDens))
l = Units.Quantity(shape.Length, Units.Length)
return rho * l
def _getAreaMass(self, fp, shape):
"""Compute the mass of an area element.
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Face shape object.
"""
|
rho = Units.parseQuantity('{0} kg/m^2'. | format(fp.AreaDens))
a = Units.Quantity(shape.Area, Units.Area)
return rho * a
def _getVolumetricMass(self, fp, shape):
"""Compute the mass of a volumetric element.
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Solid shape object.
"""
rho = Units.parseQuantity('{0} kg/m^3'.format(fp.Dens))
v = Units.Quantity(shape.Volume, Units.Volume)
return rho * v
def getMass(self, fp):
"""Compute the mass of the object, already taking into account the
type of subentities.
Position arguments:
fp -- Part::FeaturePython object affected.
Returned value:
Object mass
"""
m = Units.parseQuantity('0 kg')
for s in fp.Shape.Solids:
m += self._getVolumetricMass(fp, s)
for f in fp.Shape.Faces:
m += self._getAreaMass(fp, f)
for e in fp.Shape.Edges:
m += self._getLinearMass(fp, e)
for v in fp.Shape.Vertexes:
m += self._getPuntualMass(fp, v)
return m
def _getPuntualMoment(self, fp, shape):
"""Compute the moment of a puntual element (respect to 0, 0, 0).
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Vertex shape object.
"""
m = self._getPuntualMass(fp, shape)
x = Units.Quantity(shape.X, Units.Length)
y = Units.Quantity(shape.Y, Units.Length)
z = Units.Quantity(shape.Z, Units.Length)
return (m * x, m * y, m * z)
def _getLinearMoment(self, fp, shape):
"""Compute the mass of a linear element (respect to 0, 0, 0).
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Edge shape object.
"""
m = self._getLinearMass(fp, shape)
cog = shape.CenterOfMass
x = Units.Quantity(cog.x, Units.Length)
y = Units.Quantity(cog.y, Units.Length)
z = Units.Quantity(cog.z, Units.Length)
return (m * x, m * y, m * z)
def _getAreaMoment(self, fp, shape):
"""Compute the mass of an area element (respect to 0, 0, 0).
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Face shape object.
"""
m = self._getAreaMass(fp, shape)
cog = shape.CenterOfMass
x = Units.Quantity(cog.x, Units.Length)
y = Units.Quantity(cog.y, Units.Length)
z = Units.Quantity(cog.z, Units.Length)
return (m * x, m * y, m * z)
def _getVolumetricMoment(self, fp, shape):
"""Compute the mass of a volumetric element (respect to 0, 0, 0).
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Solid shape object.
"""
m = self._getVolumetricMass(fp, shape)
cog = shape.CenterOfMass
x = Units.Quantity(cog.x, Units.Length)
y = Units.Quantity(cog.y, Units.Length)
z = Units.Quantity(cog.z, Units.Length)
return (m * x, m * y, m * z)
def getMoment(self, fp):
"""Compute the mass of the object, already taking into account the
type of subentities.
Position arguments:
fp -- Part::FeaturePython object affected.
Returned value:
List of moments toward x, y and z
"""
m = [Units.parseQuantity('0 kg*m'),
Units.parseQuantity('0 kg*m'),
Units.parseQuantity('0 kg*m')]
for s in fp.Shape.Solids:
mom = self._getVolumetricMoment(fp, s)
for i in range(len(m)):
m[i] = m[i] + mom[i]
for f in fp.Shape.Faces:
mom = self._getAreaMoment(fp, f)
for i in range(len(m)):
m[i] = m[i] + mom[i]
for e in fp.Shape.Edges:
mom = self._getLinearMoment(fp, e)
for i in range(len(m)):
m[i] = m[i] + mom[i]
for v in fp.Shape.Vertexes:
mom = self._getPuntualMoment(fp, v)
for i in range(len(m)):
m[i] = m[i] + mom[i]
return m
def getCenterOfMass(self, fp):
"""Compute the mass of the object, already taking into account the
type of subentities.
Position arguments:
fp -- Part::FeaturePython object affected.
Returned value:
Center of Mass vector
"""
mass = self.getMass(fp)
moment = self.getMoment(fp)
cog = []
for i in range(len(moment)):
cog.append(moment[i] / mass)
return Vector(cog[0].Value, cog[1].Value, cog[2].Value)
class ViewProviderWeight:
def __init__(self, obj):
"""Add this view provider to the selected object.
Keyword arguments:
obj -- Object which must be modified.
"""
obj.Proxy = self
def attach(self, obj):
"""Setup the scene sub-graph of the view provider, this m |
citable')
ins('excited')
ins('exciting')
ins('exotic')
ins('expensive')
ins('experienced')
ins('expert')
ins('extraneous')
ins('extroverted')
ins('extra-large')
ins('extra-small')
ins('fabulous')
ins('failing')
ins('faint')
ins('fair')
ins('faithful')
ins('fake')
ins('false')
ins('familiar')
ins('famous')
ins('fancy')
ins('fantastic')
ins('far')
ins('faraway')
ins('far-flung')
ins('far-off')
ins('fast')
ins('fat')
ins('fatal')
ins('fatherly')
ins('favorable')
ins('favorite')
ins('fearful')
ins('fearless')
ins('feisty')
ins('feline')
ins('female')
ins('feminine')
ins('few')
ins('fickle')
ins('filthy')
ins('fine')
ins('finished')
ins('firm')
ins('first')
ins('firsthand')
ins('fitting')
ins('fixed')
ins('flaky')
ins('flamboyant')
ins('flashy')
ins('flat')
ins('flawed')
ins('flawless')
ins('flickering')
ins('flimsy')
ins('flippant')
ins('flowery')
ins('fluffy')
ins('fluid')
ins('flustered')
ins('focused')
ins('fond')
ins('foolhardy')
ins('foolish')
ins('forceful')
ins('forked')
ins('formal')
ins('forsaken')
ins('forthright')
ins('fortunate')
ins('fragrant')
ins('frail')
ins('frank')
ins('frayed')
ins('free')
ins('French')
ins('fresh')
ins('frequent')
ins('friendly')
ins('frightened')
ins('frightening')
ins('frigid')
ins('frilly')
ins('frizzy')
ins('frivolous')
ins('front')
ins('frosty')
ins('frozen')
ins('frugal')
ins('fruitful')
ins('full')
ins('fumbling')
ins('functional')
ins('funny')
ins('fussy')
ins('fuzzy')
ins('gargantuan')
ins('gaseous')
ins('general')
ins('generous')
ins('gentle')
ins('genuine')
ins('giant')
ins('giddy')
ins('gigantic')
ins('gifted')
ins('giving')
ins('glamorous')
ins('glaring')
ins('glass')
ins('gleaming')
ins('gleeful')
ins('glistening')
ins('glittering')
ins('gloomy')
ins('glorious')
ins('glossy')
ins('glum')
ins('golden')
ins('good')
ins('good-natured')
ins('gorgeous')
ins('graceful')
ins('gracious')
ins('grand')
ins('grandiose')
ins('granular')
ins('grateful')
ins('grave')
ins('gray')
ins('great')
ins('greedy')
ins('green')
ins('gregarious')
ins('grim')
ins('grimy')
ins('gripping')
ins('grizzled')
ins('gross')
ins('grotesque')
ins('grouchy')
ins('grounded')
ins('growing')
ins('growling')
ins('grown')
ins('grubby')
ins('gruesome')
ins('grumpy')
ins('guilty')
ins('gullible')
ins('gummy')
ins('hairy')
ins('half')
ins('handmade')
ins('handsome')
ins('handy')
ins('happy')
ins('happy-go-lucky')
ins('hard')
ins('hard-to-find')
ins('harmful')
ins('harmless')
ins('harmonious')
ins('harsh')
ins('hasty')
ins('hateful')
ins('haunting')
ins('healthy')
ins('heartfelt')
ins('hearty')
ins('heavenly')
ins('heavy')
ins('hefty')
ins('helpful')
ins('helpless')
ins('hidden')
ins('hideous')
ins('high')
ins('high-level')
ins('hilarious')
ins('hoarse')
ins('hollow')
ins('homely')
ins('honest')
ins('honorable')
ins('honored')
ins('hopeful')
ins('horrible')
ins('hospitable')
ins('hot')
ins('huge')
ins('humble')
ins('humiliating')
ins('humming')
ins('humongous')
ins('hungry')
ins('hurtful')
ins('husky')
ins('icky')
ins('icy')
ins('ideal')
ins('idealistic')
ins('identical')
ins('idle')
ins('idiotic')
ins('idolized')
ins('ignorant')
ins('ill')
ins('illegal')
ins('ill-fated')
ins('ill-informed')
ins('illiterate')
ins('illustrious')
ins('imaginary')
ins('imaginative')
ins('immaculate')
ins('immaterial')
ins('immediate')
ins('immense')
ins('impassioned')
ins('impeccable')
ins('impartial')
ins('imperfect')
ins('imperturbable')
ins('impish')
ins('impolite')
ins('important')
ins('impossible')
ins('impractical')
ins('impressionable')
ins('impressive')
ins('improbable')
ins('impure')
ins('inborn')
ins('incomparable')
ins('incompatible')
ins('incomplete')
ins('inconsequential')
ins('incredible')
ins('indelible')
ins('inexperienced')
ins('indolent')
ins('infamous')
ins('infantile')
ins('infatuated')
ins('inferior')
ins('infinite')
ins('informal')
ins('innocent')
ins('insecure')
ins('insidious')
ins('insignificant')
ins('insistent')
ins('instructive')
ins('insubstantial')
ins('intelligent')
ins('intent')
ins('intentional')
ins('interesting')
ins('internal')
ins('international')
ins('intrepid')
ins('ironclad')
ins('irresponsible')
ins('irritating')
ins('itchy')
ins('jaded')
ins('jagged')
ins('jam-packed')
ins('jaunty')
ins('jealous')
ins('jittery')
ins('joint')
ins('jolly')
ins('jovial')
ins('joyful')
ins('joyous')
ins('jubilant')
ins('judicious')
ins('juicy')
ins('jumbo')
ins('junior')
ins('jumpy' | )
ins('juvenile')
ins('kaleidoscopic')
ins('keen')
ins('key')
ins('kind')
ins('kindhearted')
ins('kindly')
ins('klutzy')
ins('knobby')
ins('knotty')
ins('knowledgeable')
ins('knowing')
ins('known')
ins('kooky')
ins('kosher')
ins('lame')
ins('lanky')
ins('large')
ins('last')
ins('lasting')
ins('late')
ins('lavish')
ins('lawful')
ins('lazy')
ins('leading')
ins('lean')
ins('leafy')
ins | ('left')
ins('legal')
ins('legitimate')
ins('light')
ins('lighthearted')
ins('likable')
ins('likely')
ins('limited')
ins('limp')
ins('limping')
ins('linear')
ins('lined')
ins('liquid')
ins('little')
ins('live')
ins('lively')
ins('livid')
ins('loathsome')
ins('lone')
ins('lonely')
ins('long')
ins('long-term')
ins('loose')
ins('lopsided')
ins('lost')
ins('loud')
ins('lovable')
ins('lovely')
ins('loving')
ins('low')
ins('loyal')
ins('lucky')
ins('lumbering')
ins('luminous')
ins('lumpy')
ins('lustrous')
ins('luxurious')
ins('mad')
ins('made-up')
ins('magnificent')
ins('majestic')
ins('major')
ins('male')
ins('mammoth')
ins('married')
ins('marvelous')
ins('masculine')
ins('massive')
ins('mature')
ins('meager')
ins('mealy')
ins('mean')
ins('measly')
ins('meaty')
ins('medical')
ins('mediocre')
ins('medium')
ins('meek')
ins('mellow')
ins('melodic')
ins('memorable')
ins('menacing')
ins('merry')
ins('messy')
ins('metallic')
ins('mild')
ins('milky')
ins('mindless')
ins('miniature')
ins('minor')
ins('minty')
ins('miserable')
ins('miserly')
ins('misguided')
ins('misty')
ins('mixed')
ins('modern')
ins('modest')
ins('moist')
ins('monstrous')
ins('monthly')
ins('monumental')
ins('moral')
ins('mortified')
ins('motherly')
ins('motionless')
ins('mountainous')
ins('muddy')
ins('muffled')
ins('multicolored')
ins('mundane')
ins('murky')
ins('mushy')
ins('musty')
ins('muted')
ins('mysterious')
ins('naive')
ins('narrow')
ins('nasty')
ins('natural')
ins('naughty')
ins('nautical')
ins('near')
ins('neat')
ins('necessary')
ins('needy')
ins('negative')
ins('neglected')
ins('negligible')
ins('neighboring')
ins('nervous')
ins('new')
ins('next')
ins('nice')
ins('nifty')
ins('nimble')
ins('nippy')
ins('nocturnal')
ins('noisy')
ins('nonstop')
ins('normal')
ins('notable')
ins('noted')
ins('noteworthy')
ins('novel')
ins('noxious')
ins('numb')
ins('nutritious')
ins('nutty')
ins('obedient')
ins('obese')
ins('oily')
ins('oblong')
ins('obvious')
ins('occasional')
ins('odd')
ins('oddball')
ins('offbeat')
ins('offensive')
ins('official')
ins('old')
ins('old-fashioned')
ins('only')
ins('open')
ins('optimal')
ins('optimistic')
ins('opulent')
ins('orange')
ins('orderly')
ins('organic')
ins('ornate')
ins('ornery')
ins('ordinary')
ins('original')
ins('other')
ins('our')
ins('outlying')
ins('outgoing')
ins('outlandish')
ins('outrageous')
ins('outstanding')
ins('oval')
ins('overcooked')
ins('overdue')
ins('overjoyed')
ins('overlooked')
ins('palatable')
ins('pale')
ins('paltry')
ins('parallel')
ins('parched')
ins('partial')
ins('passionate')
ins('past')
ins('pastel')
ins('peaceful')
ins('peppery')
ins('perfect')
ins('perfumed')
ins('periodic')
ins('perky')
ins('personal')
ins('pertinent')
ins('pesky')
ins('pessimistic')
ins('petty')
ins('phony')
ins('physical')
ins('piercing')
ins('pink')
ins('pitiful')
ins('plain')
ins('plaintive')
ins('plastic')
ins('playful')
ins('pleasant')
ins('pleased')
ins('pleasing')
ins('plump')
ins('plush')
ins('polished')
ins('polite')
ins('political')
ins('pointed')
ins('pointless')
ins('poised')
ins('poor')
ins('popular')
ins('portly')
ins('posh')
ins('positive')
ins('possible')
ins('potable')
ins('powerful')
ins('powerless')
ins('practical')
ins('precious')
ins('present')
ins('prestigious')
ins('pretty')
ins('previous')
ins('pricey')
ins('prickly')
ins('primary')
ins('prime')
ins('pristine')
ins('private')
ins('prize')
ins('probable')
ins('productive')
ins('profitable')
ins('profuse')
i |
oring of completion configurations."""
from __future__ import annotations
import abc
import dataclasses
import os
import typing as t
from .constants import (
CONTROLLER_PYTHON_VERSIONS,
SUPPORTED_PYTHON_VERSIONS,
)
from .util import (
ANSIBLE_TEST_DATA_ | ROOT,
read_lines_without_comments,
)
from .data impo | rt (
data_context,
)
@dataclasses.dataclass(frozen=True)
class CompletionConfig(metaclass=abc.ABCMeta):
"""Base class for completion configuration."""
name: str
@property
@abc.abstractmethod
def is_default(self):
"""True if the completion entry is only used for defaults, otherwise False."""
@dataclasses.dataclass(frozen=True)
class PosixCompletionConfig(CompletionConfig, metaclass=abc.ABCMeta):
"""Base class for completion configuration of POSIX environments."""
@property
@abc.abstractmethod
def supported_pythons(self): # type: () -> t.List[str]
"""Return a list of the supported Python versions."""
@abc.abstractmethod
def get_python_path(self, version): # type: (str) -> str
"""Return the path of the requested Python version."""
def get_default_python(self, controller): # type: (bool) -> str
"""Return the default Python version for a controller or target as specified."""
context_pythons = CONTROLLER_PYTHON_VERSIONS if controller else SUPPORTED_PYTHON_VERSIONS
version = [python for python in self.supported_pythons if python in context_pythons][0]
return version
@property
def controller_supported(self): # type: () -> bool
"""True if at least one Python version is provided which supports the controller, otherwise False."""
return any(version in CONTROLLER_PYTHON_VERSIONS for version in self.supported_pythons)
@dataclasses.dataclass(frozen=True)
class PythonCompletionConfig(PosixCompletionConfig, metaclass=abc.ABCMeta):
"""Base class for completion configuration of Python environments."""
python: str = ''
python_dir: str = '/usr/bin'
@property
def supported_pythons(self): # type: () -> t.List[str]
"""Return a list of the supported Python versions."""
versions = self.python.split(',') if self.python else []
versions = [version for version in versions if version in SUPPORTED_PYTHON_VERSIONS]
return versions
def get_python_path(self, version): # type: (str) -> str
"""Return the path of the requested Python version."""
return os.path.join(self.python_dir, f'python{version}')
@dataclasses.dataclass(frozen=True)
class RemoteCompletionConfig(CompletionConfig):
"""Base class for completion configuration of remote environments provisioned through Ansible Core CI."""
provider: t.Optional[str] = None
@property
def platform(self):
"""The name of the platform."""
return self.name.partition('/')[0]
@property
def version(self):
"""The version of the platform."""
return self.name.partition('/')[2]
@property
def is_default(self):
"""True if the completion entry is only used for defaults, otherwise False."""
return not self.version
def __post_init__(self):
if not self.provider:
raise Exception(f'Remote completion entry "{self.name}" must provide a "provider" setting.')
@dataclasses.dataclass(frozen=True)
class InventoryCompletionConfig(CompletionConfig):
"""Configuration for inventory files."""
def __init__(self): # type: () -> None
super().__init__(name='inventory')
@property
def is_default(self): # type: () -> bool
"""True if the completion entry is only used for defaults, otherwise False."""
return False
@dataclasses.dataclass(frozen=True)
class PosixSshCompletionConfig(PythonCompletionConfig):
"""Configuration for a POSIX host reachable over SSH."""
def __init__(self, user, host): # type: (str, str) -> None
super().__init__(
name=f'{user}@{host}',
python=','.join(SUPPORTED_PYTHON_VERSIONS),
)
@property
def is_default(self): # type: () -> bool
"""True if the completion entry is only used for defaults, otherwise False."""
return False
@dataclasses.dataclass(frozen=True)
class DockerCompletionConfig(PythonCompletionConfig):
"""Configuration for Docker containers."""
image: str = ''
seccomp: str = 'default'
placeholder: bool = False
@property
def is_default(self):
"""True if the completion entry is only used for defaults, otherwise False."""
return False
def __post_init__(self):
if not self.image:
raise Exception(f'Docker completion entry "{self.name}" must provide an "image" setting.')
if not self.supported_pythons and not self.placeholder:
raise Exception(f'Docker completion entry "{self.name}" must provide a "python" setting.')
@dataclasses.dataclass(frozen=True)
class NetworkRemoteCompletionConfig(RemoteCompletionConfig):
"""Configuration for remote network platforms."""
collection: str = ''
connection: str = ''
@dataclasses.dataclass(frozen=True)
class PosixRemoteCompletionConfig(RemoteCompletionConfig, PythonCompletionConfig):
"""Configuration for remote POSIX platforms."""
placeholder: bool = False
def __post_init__(self):
if not self.supported_pythons:
if self.version and not self.placeholder:
raise Exception(f'POSIX remote completion entry "{self.name}" must provide a "python" setting.')
else:
if not self.version:
raise Exception(f'POSIX remote completion entry "{self.name}" is a platform default and cannot provide a "python" setting.')
@dataclasses.dataclass(frozen=True)
class WindowsRemoteCompletionConfig(RemoteCompletionConfig):
"""Configuration for remote Windows platforms."""
TCompletionConfig = t.TypeVar('TCompletionConfig', bound=CompletionConfig)
def load_completion(name, completion_type): # type: (str, t.Type[TCompletionConfig]) -> t.Dict[str, TCompletionConfig]
"""Load the named completion entries, returning them in dictionary form using the specified completion type."""
lines = read_lines_without_comments(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', '%s.txt' % name), remove_blank_lines=True)
if data_context().content.collection:
context = 'collection'
else:
context = 'ansible-core'
items = {name: data for name, data in [parse_completion_entry(line) for line in lines] if data.get('context', context) == context}
for item in items.values():
item.pop('context', None)
item.pop('placeholder', None)
completion = {name: completion_type(name=name, **data) for name, data in items.items()}
return completion
def parse_completion_entry(value): # type: (str) -> t.Tuple[str, t.Dict[str, str]]
"""Parse the given completion entry, returning the entry name and a dictionary of key/value settings."""
values = value.split()
name = values[0]
data = {kvp[0]: kvp[1] if len(kvp) > 1 else '' for kvp in [item.split('=', 1) for item in values[1:]]}
return name, data
def filter_completion(
completion, # type: t.Dict[str, TCompletionConfig]
controller_only=False, # type: bool
include_defaults=False, # type: bool
): # type: (...) -> t.Dict[str, TCompletionConfig]
"""Return a the given completion dictionary, filtering out configs which do not support the controller if controller_only is specified."""
if controller_only:
completion = {name: config for name, config in completion.items() if config.controller_supported}
if not include_defaults:
completion = {name: config for name, config in completion.items() if not config.is_default}
return completion
DOCKER_COMPLETION = load_completion('docker', DockerCompletionConfig)
REMOTE_COMPLETION = load_completion('remote', PosixRemoteCompletionConfig)
WINDOWS_COMPLETION = load_completion('windows', WindowsRemoteCompletionConfig)
NETWORK_COMPLETION = load_completion('network', NetworkRemoteComplet |
#!../../../.env/bin/python
import os
import numpy as np
import time
a = np.array([
[1,0,3],
[0,2,1],
[0.1,0,0],
])
print a
row = 1
col = 2
print a[row][col]
assert a[row][col] == 1
expected_max_rows = [0, 1, 0]
expected_max_values = [1, 2, 3]
print 'expected_max_rows:', expected_max_rows
print 'expe | cted_max_values:', expected_max_values
t0 = time.time()
actual_max_rows = list(np.argmax(a, axis=0))
td = time.time() - t0
actual_max_values = list(np.amax(a, axis=0))
print 'td:', round(td, 4)
print 'actual_max_rows:', actual_max_rows
pri | nt 'actual_max_values:', actual_max_values
assert actual_max_rows == expected_max_rows
assert actual_max_values == expected_max_values
|
"""
pyN | EAT
Copyright (C) 2007-2008 Brian Greer
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Fre | e Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
class Mutator:
GAUSSIAN = 0
COLDGAUSSIAN = 1
|
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GLUT.freeglut import *
import GlutWrapper
import math
ESCAPE = b'\033'
class GlutViewController(GlutWrapper.GlutWrapper):
"""docstring for GlutViewController"""
def __init__(self):
super(GlutViewController, self).__init__()
self.count = 0.0
def display(self, deltaTime):
self.drawAxis(50)
self.count += 1.0
glRotate(self.count, 0, 1, 0)
glutSolidTeapot(10)
if deltaTime > 0.0:
fpsString = "FPS: %.1f" % (1.0/deltaTime)
self.overlayString(fpsString, 0.0, 0.0)
self.overlayString("LB", 0.0, -1.0)
self.overlayString("RT", -20.0, 0.0)
self.overlayString("RB", -20.0, -1.0)
# User interface -----------------------------------
def mouse(self, button, state, x, y):
# print("MousePress: button: %d, x: %d, y:%d" % (button, x, y))
self.mouseState.button = button
self.mouseState.pressed = ~state
self.mouseState.x = x
self.mouseState.y = y
if button == 3:
self.camera.distance *= 0.875
elif button == 4:
self.camera.distance *= 1.125
def motion(self, x, y):
# print("MouseMove: x: %d, y: %d" % (x, y))
movedX = x - self.mouseState.x
movedY = y - self.mouseState.y
if self.mouseState.button == 0 & self.mouseState.pressed:
self.camera.pan += float(-movedX)/100.0
self.camera.tilt += float(movedY)/100.0
if self.camera.tilt > math.pi/2.0:
self.camera.tilt = math.pi/2.0-0.01
if self.camera.tilt < -math.pi/2.0:
self.camera.tilt = -(math.pi/2.0-0.01)
self.mouseState.x = x
self.mouseState.y = y
def keyboard(self, key, x, y):
prin | t("KeyboardPress: %s" % key)
if key == ESCAPE:
sys.exit()
elif key == b'p':
self.camera.distance *= 0.875
elif key == b'n':
self.camera.distance *= 1.125
def setColor(self, color):
glColor(color[0], color[1 | ], color[2])
glMaterial(GL_FRONT, GL_AMBIENT, color)
glMaterial(GL_FRONT, GL_DIFFUSE, color)
if __name__ == '__main__':
print("Hit ESC key to quit.")
view = GlutViewController()
view.frameTime = 1.0/60.0
view.startFramework()
|
, operator,
"operator should be %r, got %r" % (operator, audit_operator))
eq_(audit_notes, notes,
"notes should be %r, got %r" % (notes, audit_notes))
# Check we've only got one key left (audit_changes):
expected_keys = ['audit_changes']
found_keys = entry.keys()
eq_(expected_keys, found_keys, "Expected to find keys: %r, gor %r" %
(expected_keys, found_keys))
# Ensure that the new values were correctly recorded:
changes= entry['audit_changes']
eq_(changes['last_name'], (orig_name, self.longshot.last_name))
eq_(changes['age'], (orig_age, self.longshot.age))
eq_(changes['fastest_landing'], (orig_fastest_landing,
self.longshot.fastest_landing))
def test_dual_update(self):
"""Test that two log entries are generated for dual updates"""
self.apollo.age = 40
self.apollo.save()
self.apollo.age = 30
self.apollo.save()
log = list(self.apollo.get_audit_log())
eq_(len(log), 3, "There should be three entries in the log, got %d" %
len(log))
expected_ages = [(28, 40), (40, 30)]
for entry, age in zip(log[1:], expected_ages):
eq_(entry['audit_changes']['age'], age,
"Expected age to be %r, got %r" % (entry['audit_changes']['age'], age))
def test_delete(self):
"""Check that delete() records the final state of the model prior to deletion"""
# Define the lookup key we'll need parameters to look up the record:
pk = self.starbuck.pk
self.starbuck.delete()
# Delete another to make sure we don't get log cross-over:
apollo_pk = self.apollo.pk
self.apollo.set_audit_info(notes="Extra note")
self.apollo.delete()
# Get hold of the delete log:
log = list(Pilot.get_deleted_log(pk))
# Make sure there's only one entry:
eq_(len(log), 1,
"There should only be one deleted item for this pk (found %d)" %
len(log))
entry = log[0]
for field in Pilot.log_fields:
expected = getattr(PilotData.Starbuck, field)
found = entry[field]
eq_(expected, found,
"For field %r, expected %r, got %r" % (field, expected, found))
delete_note = "Object deleted. These are the attributes at delete time."
eq_(entry['audit_notes'], delete_note,
"Expected to find notes as: %r, got %r" %
(delete_note, entry['audit_notes']))
# Get hold of the delete log for apollo to check the delete note:
entry = list(Pilot.get_deleted_log(apollo_pk))[0]
got = entry['audit_notes']
expected = "%s\nExtra note" % delete_note
eq_(expected, got, "Expected note: %r, got %r" % (expected, got))
# Since we've deleted two items we can check that we've got the log for
# both of these:
log = list(Pilot.get_deleted_log())
eq_(len(log), 2,
"There should be two deleted log entries for this class (found %d)"
% len(log))
def test_arbitrary_audit(self):
"""Test the arbitrary auditing of data against a model"""
data = dict(hair_colour="Blond",
children=0,
kill_percentage=Decimal('98.7'))
self.starbuck.set_audit_info(**data)
self.starbuck.save()
log = list(self.starbuck.get_audit_log())
eq_(len(log), 2,
"There should only be two entries in the log (found %d)" % len(log))
entry = log[-1]
object_app = entry.pop('object_app')
object_model = entry.pop('object_model')
object_pk = entry.pop('object_pk')
id = entry.pop('_id')
audit_date_stamp = entry.pop('audit_date_stamp')
eq_(object_app, "bsg",
"object_app should be 'bsg', got %r" % object_app)
eq_(object_model, "Pilot",
"object_model should be 'Pilot', got %r" % object_model)
eq_(object_pk, self.starbuck.pk, "object_pk should be %r, got %r" %
(self.starbuck.pk, object_pk))
# Mongo stores Decimals as floats, so coerce what we expect:
data['kill_percentage'] = float(data['kill_percentage'])
eq_(entry, data, "Expecting %r, got %r" % (data, entry))
def test_foreign_keys(self):
"""Test the foreign keyed fields don't interfere with AuditedModel"""
# Due to a call in the metaclass of AuditedModel, the
# _meta.get_all_field_names does not behave correctly unless the cache
# is cleared after this call. Aggregation is one area where this
# manifests itself - here we're ensuring this doesn't fail:
field_names = Pilot._meta.get_all_field_names()
ok_("vessels" in field_names,
"The field names for the Pilot model should contain 'vessels', got "
"%s" % field_names)
# Now verify in aggregation this works:
vessel_sum = Pilot.objects.aggregate(Sum('vessels'))['vessels__sum']
eq_(vessel_sum, 1, "There should only be one vessel, got %r"
% vessel_sum)
def test_get_creation_log(self):
"""Test that the creation log can be retrieved correctly"""
# Create a new object:
hot_dog = Pilot(
first_name="Brendan",
last_name="Costanza",
call_sign="Hot Dog",
age=25,
last_flight=datetime(2000, 6, 4, 23, 01),
craft=1,
is_cylon=False,
fastest_landing=Decimal("101.67")
)
hot_dog.set_audit_info(ope | rator="Admin",
flight_deck="Port side")
hot_dog.save()
# Retrieve the log as a check:
initial_log = hot_dog.get_creation_log()
# Make another entry:
hot_dog.fastest_landing = Decimal("99.98")
hot_dog.save()
# Check we've got two items in the log now:
found_logs = len(list(hot_dog.get_audit_log()))
eq_(2, found_logs, "Expected to find 2 l | ogs, got %d" % found_logs)
# Now check the creation log:
creation_log = hot_dog.get_creation_log()
eq_(creation_log, initial_log, "Expecting initial log entry to be the "
"same as the creation log. Expected:\n%r,\n\ngot\n%r" %
(initial_log, creation_log))
# Test that fail gracefully when no creation log exists:
for item in hot_dog.get_audit_log():
self.auditing_collection.remove(item['_id'])
empty_log = hot_dog.get_creation_log()
eq_(empty_log, None, "The creation log should be None")
def test_get_deletion_log(self):
"""Test that deleted data can be retrieved"""
pre_delete_data = {}
for field in self.apollo.log_fields:
pre_delete_data[field] = getattr(self.apollo, field)
pk = self.apollo.pk
self.apollo.delete()
# Get the deletion log:
entry = list(Pilot.get_deleted_log(pk))[0]
object_app = entry.pop('object_app')
object_model = entry.pop('object_model')
object_pk = entry.pop('object_pk')
id = entry.pop('_id')
audit_date_stamp = entry.pop('audit_date_stamp')
audit_is_delete = entry.pop('audit_is_delete')
audit_notes = entry.pop('audit_notes')
ok_(audit_is_delete, "Should have audit_is_delete is True")
eq_(audit_notes,
'Object deleted. These are the attributes at delete time.')
eq_(pre_del |
import numpy as np
from pySDC.core.Sweeper import sweeper
from pySDC.implementations.collocation_classes.gauss_lobatto import CollGaussLobatto
class verlet(sweeper):
"""
Custom sweeper class, implements Sweeper.py
Second-order sweeper using velocity-Verlet as base integrator
Attributes:
QQ: 0-to-node collocation matrix (second order)
QT: 0-to-node trapezoidal matrix
Qx: 0-to-node Euler half-step for position update
qQ: update rule for final value (if needed)
"""
def __init__(self, params):
"""
Initialization routine for the custom sweeper
Args:
params: parameters for the sweeper
"""
if 'QI' not in params:
params['QI'] = 'IE'
if 'QE' not in params:
params['QE'] = 'EE'
# call parent's initialization routine
super(verlet, self).__init__(params)
# Trapezoidal rule, Qx and Double-Q as in the Boris-paper
[self.QT, self.Qx, self.QQ] = self.__get_Qd()
self.qQ = np.dot(self.coll.weights, self.coll.Qmat[1:, 1:])
def __get_Qd(self):
"""
Get integration matrices for 2nd-order SDC
Returns:
S: node-to-node collocation matrix (first order)
SQ: node-to-node collocation matrix (second order)
ST: node-to-node trapezoidal matrix
Sx: node-to-node Euler half-step for position update
"""
# set implicit and explicit Euler matrices
QI = self.get_Qdelta_implicit(self.coll, self.params.QI)
QE = self.get_Qdelta_explicit(self.coll, self.params.QE)
# trapezoidal rule
QT = 0.5 * (QI + QE)
# QT = QI
# Qx as in the paper
Qx = np.dot(QE, QT) + 0.5 * QE * QE
QQ = np.zeros(np.shape(self.coll.Qmat))
# if we have Gauss-Lobatto nodes, we can do a magic trick from the Book
# this takes Gauss-Lobatto IIIB and create IIIA out of this
if isinstance(self.coll, CollGaussLobatto):
for m in range(self.coll.num_nodes):
for n in range(self.coll.num_nodes):
QQ[m + 1, n + 1] = self.coll.weights[n] * (1.0 - self.coll.Qmat[n + 1, m + 1] /
self.coll.weights[m])
QQ = np.dot(self.coll.Qmat, QQ)
# if we do not have Gauss-Lobatto, just multiply Q (will not get a symplectic method, they say)
else:
QQ = np.dot(self.coll.Qmat, self.coll.Qmat)
return [QT, Qx, QQ]
def update_nodes(self):
"""
Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# only if the level has been touched before
assert L.status.unlocked
# get number of collocation nodes for easier access
M = self.coll.num_nodes
# gather all terms which are known already (e.g. from the previous iteration)
# get QF(u^k)
integral = self.integrate()
for m in range(M):
# get -QdF(u^k)_m
for j in range(1, M + 1):
integral[m].pos -= L.dt * (L.dt * self.Qx[m + 1, j] * L.f[j])
integral[m].vel -= L.dt * self.QT[m + 1, j] * L.f[j]
# add initial value
integral[m].pos += L.u[0].pos
integral[m].vel += L.u[0].vel
# add tau if associated
if L.tau[m] is not None:
integral[m] += L.tau[m]
# do the sweep
for m in range(0, M):
# build rhs, consisting of the known values from above and new values from previous nodes (at k+1)
L.u[m + 1] = P.dtype_u(integral[m])
for j in range(1, m + 1):
# add QxF(u^{k+1})
L.u[m + 1].pos += L.dt * (L.dt * self.Qx[m + 1, j] * L.f[j])
L.u[m + 1].vel += L.dt * self.QT[m + 1, j] * L.f[j]
# get RHS with new positions
L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
L.u[m + 1].vel += L.dt * self.QT[m + 1, m + 1] * L.f[m + 1]
# indicate | presence of new values at this level
L.status.updated = True
# # do the sweep (alternative description)
# for m in range(0, M):
# # build rhs, consisting of the known values from above and new values from previous nodes (at k+1)
# L.u[m + 1] = P.dtype_u(integral[m])
# for | j in range(1, m + 1):
# # add QxF(u^{k+1})
# L.u[m + 1].pos += L.dt * (L.dt * self.Qx[m + 1, j] * L.f[j])
#
# # get RHS with new positions
# L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
#
# for m in range(0, M):
# for n in range(0, M):
# L.u[m + 1].vel += L.dt * self.QT[m + 1, n + 1] * L.f[n + 1]
#
# # indicate presence of new values at this level
# L.status.updated = True
return None
def integrate(self):
"""
Integrates the right-hand side
Returns:
list of dtype_u: containing the integral as values
"""
# get current level and problem description
L = self.level
P = L.prob
# create new instance of dtype_u, initialize values with 0
p = []
for m in range(1, self.coll.num_nodes + 1):
p.append(P.dtype_u(P.init, val=0.0))
# integrate RHS over all collocation nodes, RHS is here only f(x)!
for j in range(1, self.coll.num_nodes + 1):
p[-1].pos += L.dt * (L.dt * self.QQ[m, j] * L.f[j]) + L.dt * self.coll.Qmat[m, j] * L.u[0].vel
p[-1].vel += L.dt * self.coll.Qmat[m, j] * L.f[j]
# we need to set mass and charge here, too, since the code uses the integral to create new particles
p[-1].m = L.u[0].m
p[-1].q = L.u[0].q
return p
def compute_end_point(self):
"""
Compute u at the right point of the interval
The value uend computed here is a full evaluation of the Picard formulation (always!)
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# start with u0 and add integral over the full interval (using coll.weights)
if (self.coll.right_is_node and not self.params.do_coll_update):
# a copy is sufficient
L.uend = P.dtype_u(L.u[-1])
else:
L.uend = P.dtype_u(L.u[0])
for m in range(self.coll.num_nodes):
L.uend.pos += L.dt * (L.dt * self.qQ[m] * L.f[m + 1]) + L.dt * self.coll.weights[m] * L.u[0].vel
L.uend.vel += L.dt * self.coll.weights[m] * L.f[m + 1]
# remember to set mass and charge here, too
L.uend.m = L.u[0].m
L.uend.q = L.u[0].q
# add up tau correction of the full interval (last entry)
if L.tau[-1] is not None:
L.uend += L.tau[-1]
return None
|
#!/usr/bin/env python
import numpy as np
import scipy.optimize as spo
def integer_optimize():
x = np.arange(1,101)
f = (x % 6)**2 % 7 - np.sin(x)
| return x[np.argmax(f)]
def f(x):
return -x**4 + 1000 * x**3 - 20 * x**2 + 4*x -6
if __name__ == '__main__':
print("Integer optimium: | x = {}\n".format(integer_optimize()))
max_x = spo.fmin(lambda x: -f(x), 0)
print("Rational optimum: x = {}\n".format(max_x))
|
from rest_framework import permissions
from . import models
class IsOwnerOrPublic(permissions.IsAuthenticatedOrReadOnly):
message = "Not object owner or public"
def has_object_permission(self, request, view, obj):
if request.user == obj.owner:
| return True
if request.method in permissions.SAFE_METHODS:
return obj.public
return False
class IsWidgetOwnerOrPublic(permissions.IsAuthenticatedOrReadOnly):
message = "Not object owner or public"
def has_permission(self, request, view):
widget = models.Widget. | objects.get(pk=view.kwargs["widget_pk"])
return IsOwnerOrPublic.has_object_permission(self, request, view, widget)
def has_object_permission(self, request, view, obj):
if request.user == obj.widget.owner:
return True
if request.method in permissions.SAFE_METHODS:
return obj.widget.public
return False
class CanSubscribe(permissions.IsAuthenticated):
def has_object_permission(self, request, view, obj):
if request.user == obj.owner:
return True
if obj.public:
return True
return False
class IsOwner(permissions.IsAuthenticated):
def has_object_permission(self, request, view, obj):
return request.user == obj.owner
|
#!/usr/bin/env python3.4
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, re, urllib, json, subprocess
import time
import urllib.request
import smtplib
from email.mime.text import MIMEText
# Function for fetching JSON via HTTPS
def getJSON(url, creds = None, cookie = None):
headers = {}
if creds and len(creds) > 0:
xcreds = creds.encode(encoding='ascii', errors='replace')
auth = base64.encodebytes(xcreds).decode('ascii', errors='replace').replace("\n", '')
headers = {"Content-type": "application/json",
"Accept": "*/*",
"Authorization": "Basic %s" % auth
}
request = urllib.request.Request(url, headers = headers)
result = urllib.request.urlopen(request)
return json.loads(result.read().decode('utf-8', errors = 'replace'))
# Get the current queue
js = getJSON("https://reporeq.apache.org/queue.json")
created = 0
# If queue is valid:
if js:
print("analysing %u items" % len(js))
# For each item:
# - Check that it hasn't been mirrored yet
# - Check that a repo with this name doesn't exist already
# - Check that name is valid
# - Mirror repo if all is okay
for item in js:
# Make sure this is a GH integration request AND it's been mirrored more than a day ago, so GH caught up.
if not 'githubbed' in item and item['github'] == True and 'mirrordate' in item and item['mirrordate'] < (time.time()-86400):
reponame = item['name']
# Check valid name
if len(reponame) < 5 or reponame.find("..") != -1 or reponame.find("/") != -1:
print("Invalid repo name!")
continue
# Set some vars
notify = item['notify']
description = item['description'] if 'description' in item else "Unknown"
# Make sure the repo exists!
if os.path.exists("/x1/git/mirrors/%s" % reponame):
print("%s is there, adding web hooks" % reponame)
try:
xreponame = reponame.replac | e(".git", "") # Cut off the .git part, so GH will n | ot bork
inp = subprocess.check_output("/usr/local/etc/git_self_serve/add-webhook.sh %s" % xreponame, shell = True).decode('ascii', 'replace')
except subprocess.CalledProcessError as err:
print("Borked: %s" % err.output)
continue
else:
print("Repo doesn't exist, ignoring this request...sort of")
# Notify reporeq that we've GH'ed this repository!
print("Notifying https://reporeq.apache.org/ss.lua?githubbed=%s" % reponame)
request = urllib.request.Request("https://reporeq.apache.org/ss.lua?githubbed=%s" % reponame)
result = urllib.request.urlopen(request)
# Inform infra@ and private@$pmc that the mirror has been set up
msg = MIMEText("New repository %s has now had GitHub integration enabled!\n\nWith regards,\nApache Infrastructure." % (reponame))
msg['Subject'] = 'Github integration set up: %s' % reponame
msg['From'] = "git@apache.org"
msg['Reply-To'] = "users@infra.apache.org"
msg['To'] = "users@infra.apache.org, private@%s.apache.org" % item['pmc']
s = smtplib.SMTP(host='mail.apache.org', port=2025)
s.send_message(msg)
s.quit()
# We made a thing!
created += 1
print("All done for today! Made %u new repos" % created)
|
have an even sum.
See Also
--------
is_valid_degree_sequence
Notes
-----
As described by Newman [1]_.
A non-graphical degree sequence (not realizable by some simple
graph) is allowed since this function returns graphs with self
loops and parallel edges. An exception is raised if the degree
sequence does not have an even sum.
This configuration model construction process can lead to
duplicate edges and loops. You can remove the self-loops and
parallel edges (see below) which will likely result in a graph
that doesn't have the exact degree sequence specified.
The density of self-loops and parallel edges tends to decrease
as the number of nodes increases. However, typically the number
of self-loops will approach a Poisson distribution with a nonzero
mean, and similarly for the number of parallel edges. Consider a
node with k stubs. The probability of being joined to another stub of
the same node is basically (k-1)/N where k is the degree and N is
the number of nodes. So the probability of a self-loop scales like c/N
for some constant c. As N grows, this means we expect c self-loops.
Similarly for parallel edges.
References
----------
.. [1] M.E.J. Newman, "The structure and function of complex n | etworks",
SIAM REVIEW 45-2, pp 167-256, 2003.
Exampl | es
--------
>>> from networkx.utils import powerlaw_sequence
>>> z=nx.utils.create_degree_sequence(100,powerlaw_sequence)
>>> G=nx.configuration_model(z)
To remove parallel edges:
>>> G=nx.Graph(G)
To remove self loops:
>>> G.remove_edges_from(G.selfloop_edges())
"""
if sum(deg_sequence) % 2 != 0:
msg = 'Invalid degree sequence: sum of degrees must be even, not odd'
raise nx.NetworkXError(msg)
if create_using is None:
create_using = nx.MultiGraph()
elif create_using.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
if not seed is None:
random.seed(seed)
# start with empty N-node graph
N=len(deg_sequence)
# allow multiedges and selfloops
G=nx.empty_graph(N,create_using)
if N==0 or max(deg_sequence)==0: # done if no edges
return G
# build stublist, a list of available degree-repeated stubs
# e.g. for deg_sequence=[3,2,1,1,1]
# initially, stublist=[1,1,1,2,2,3,4,5]
# i.e., node 1 has degree=3 and is repeated 3 times, etc.
stublist=[]
for n in G:
for i in range(deg_sequence[n]):
stublist.append(n)
# shuffle stublist and assign pairs by removing 2 elements at a time
random.shuffle(stublist)
while stublist:
n1 = stublist.pop()
n2 = stublist.pop()
G.add_edge(n1,n2)
G.name="configuration_model %d nodes %d edges"%(G.order(),G.size())
return G
def directed_configuration_model(in_degree_sequence,
out_degree_sequence,
create_using=None,seed=None):
"""Return a directed_random graph with the given degree sequences.
The configuration model generates a random directed pseudograph
(graph with parallel edges and self loops) by randomly assigning
edges to match the given degree sequences.
Parameters
----------
in_degree_sequence : list of integers
Each list entry corresponds to the in-degree of a node.
out_degree_sequence : list of integers
Each list entry corresponds to the out-degree of a node.
create_using : graph, optional (default MultiDiGraph)
Return graph of this type. The instance will be cleared.
seed : hashable object, optional
Seed for random number generator.
Returns
-------
G : MultiDiGraph
A graph with the specified degree sequences.
Nodes are labeled starting at 0 with an index
corresponding to the position in deg_sequence.
Raises
------
NetworkXError
If the degree sequences do not have the same sum.
See Also
--------
configuration_model
Notes
-----
Algorithm as described by Newman [1]_.
A non-graphical degree sequence (not realizable by some simple
graph) is allowed since this function returns graphs with self
loops and parallel edges. An exception is raised if the degree
sequences does not have the same sum.
This configuration model construction process can lead to
duplicate edges and loops. You can remove the self-loops and
parallel edges (see below) which will likely result in a graph
that doesn't have the exact degree sequence specified. This
"finite-size effect" decreases as the size of the graph increases.
References
----------
.. [1] Newman, M. E. J. and Strogatz, S. H. and Watts, D. J.
Random graphs with arbitrary degree distributions and their applications
Phys. Rev. E, 64, 026118 (2001)
Examples
--------
>>> D=nx.DiGraph([(0,1),(1,2),(2,3)]) # directed path graph
>>> din=list(d for n, d in D.in_degree())
>>> dout=list(d for n, d in D.out_degree())
>>> din.append(1)
>>> dout[0]=2
>>> D=nx.directed_configuration_model(din,dout)
To remove parallel edges:
>>> D=nx.DiGraph(D)
To remove self loops:
>>> D.remove_edges_from(D.selfloop_edges())
"""
if not sum(in_degree_sequence) == sum(out_degree_sequence):
raise nx.NetworkXError('Invalid degree sequences. '
'Sequences must have equal sums.')
if create_using is None:
create_using = nx.MultiDiGraph()
if not seed is None:
random.seed(seed)
nin=len(in_degree_sequence)
nout=len(out_degree_sequence)
# pad in- or out-degree sequence with zeros to match lengths
if nin>nout:
out_degree_sequence.extend((nin-nout)*[0])
else:
in_degree_sequence.extend((nout-nin)*[0])
# start with empty N-node graph
N=len(in_degree_sequence)
# allow multiedges and selfloops
G=nx.empty_graph(N,create_using)
if N==0 or max(in_degree_sequence)==0: # done if no edges
return G
# build stublists of available degree-repeated stubs
# e.g. for degree_sequence=[3,2,1,1,1]
# initially, stublist=[1,1,1,2,2,3,4,5]
# i.e., node 1 has degree=3 and is repeated 3 times, etc.
in_stublist=[]
for n in G:
for i in range(in_degree_sequence[n]):
in_stublist.append(n)
out_stublist=[]
for n in G:
for i in range(out_degree_sequence[n]):
out_stublist.append(n)
# shuffle stublists and assign pairs by removing 2 elements at a time
random.shuffle(in_stublist)
random.shuffle(out_stublist)
while in_stublist and out_stublist:
source = out_stublist.pop()
target = in_stublist.pop()
G.add_edge(source,target)
G.name="directed configuration_model %d nodes %d edges"%(G.order(),G.size())
return G
def expected_degree_graph(w, seed=None, selfloops=True):
r"""Return a random graph with given expected degrees.
Given a sequence of expected degrees `W=(w_0,w_1,\ldots,w_{n-1}`)
of length `n` this algorithm assigns an edge between node `u` and
node `v` with probability
.. math::
p_{uv} = \frac{w_u w_v}{\sum_k w_k} .
Parameters
----------
w : list
The list of expected degrees.
selfloops: bool (default=True)
Set to False to remove the possibility of self-loop edges.
seed : hashable object, optional
The seed for the random number generator.
Returns
-------
Graph
Examples
--------
>>> z=[10 for i in range(100)]
>>> G=nx.expected_degree_graph(z)
Notes
-----
The nodes have integer labels corresponding to index of expected degrees
input sequence.
The complexity of this algorithm is `\mathcal{O}(n+m)` where `n` is the
number of nodes and `m` is the expected number of edges.
The model in [1]_ includes the possibility of self-loop edges.
Set selfloops=False to produce a graph without self loops.
For fini |
import sys
import warnings
from setuptools import setup, find_packages, Extension
import numpy
if 'develop' not in sys.argv:
raise NotImplementedError("since Pylearn2 is under rapid, active "
"development, `python setup.py install` is "
"intentionally disabled to prevent other "
"problems. Run `python setup.py develop` to "
"install Pylearn2.")
# Detailed notes:
# This modification of setup.py is designed to prevent two problems
# novice users frequently encountered:
# 1) Novice users frequently used "git clone" to get a copy of Pylearn2,
# then ran setup.py install, then would use "git pull" to get a bug fix
# but would forget to run "setup.py install" again.
# 2) Novice users frequently used "sudo" to make an "installed" copy of
# Pylearn2, then try to use the tutorials in the "scripts" directory in
# the "installed" copy. Since the tutorials are then in a directory owned
# by root and need to create files in the local directory, some users
# would run the tutorials using "sudo". Besides being dangerous, this
# created additional problems because "sudo" does not just run the script
# with root privileges, it actually changes the user to root, and thus
# pylearn2-related environment variables configured in the user's
# .bashrc would no longer be available.
# Installing only in development mode avoids both problems because there
# is now only a single copy of the code and it is stored in a directory
# editable by the user.
# Note that none of the Pylearn2 installation documentation recommends
# using setup.py install or pip. Most of the Pylearn2 developers just
# obtain Pylearn2 via git clone and then add it to their PYTHONPATH
# manually.
# Because many people neglected to run the pylearn2/utils/setup.py script
# separately, we compile the necessary Cython extensions here but because
# Cython is not a | strict dependency, we issue a warning when it is not
# available.
try:
from Cython.Distutils import build_ext
cython_available = True
except ImportError:
warnings.warn("Cython was not found and hence pylearn2.utils._window_flip "
"and pyle | arn2.utils._video and classes that depend on them "
"(e.g. pylearn2.train_extensions.window_flip) will not be "
"available")
cython_available = False
if cython_available:
cmdclass = {'build_ext': build_ext}
ext_modules = [Extension("pylearn2.utils._window_flip",
["pylearn2/utils/_window_flip.pyx"],
include_dirs=[numpy.get_include()]),
Extension("pylearn2.utils._video",
["pylearn2/utils/_video.pyx"],
include_dirs=[numpy.get_include()])]
else:
cmdclass = {}
ext_modules = []
setup(
cmdclass=cmdclass,
ext_modules=ext_modules,
name='pylearn2',
version='0.1dev',
packages=find_packages(),
description='A machine learning library built on top of Theano.',
license='BSD 3-clause license',
long_description=open('README.rst').read(),
install_requires=['numpy>=1.5', 'theano', 'pyyaml', 'argparse'],
package_data={
'': ['*.cu', '*.cuh', '*.h'],
},
)
|
from matchers import (
linear_match as html_match,
prune_unmatched_elements
)
from spec import (
a,
accordion,
acc_body,
acc_group,
acc_heading,
div,
elem,
heading,
html,
img,
input,
option,
option_xhtml,
sele | ct,
text,
)
from formatters import (
pretty_html | ,
pretty_spec
)
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# Yo | u may obtain a copy of the License at
#
# http://www.a | pache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import http
from django.test import TestCase
from . import views
# Tests go here
|
import uuid
from google.appengine.api import memcache
|
cla | ss CollectionCache:
def __init__(self, timeout=480, hash=None):
self.contents = [];
if hash:
self.contents = memcache.get(hash)
self.timeout = timeout
def add(self, item):
hash = uuid.uuid1().hex
memcache.add(hash, item, time = self.timeout)
self.contents.append(hash)
return hash
def commit(self):
hash = uuid.uuid1().hex
memcache.add(hash, self.contents, time = self.timeout)
return hash
def fetchAll(self):
if not self.contents:
return []
return [[key,memcache.get(key)] for key in self.contents]
def fetch(self):
for key in self.contents:
item = memcache.get(key)
if item:
yield key,item |
from pygame import *
'''
The music must be in the same folder/project to work
You have to install pygame
command: pip install pygame
'''
mixer.init()
msc = input('Song Name: ')
mixer.music.load('{}.mp3'.format(msc))
mixer.music.play()
|
while mixer.music.get_busy():
time.Clock().tick(10)
if inpu | t() == 'pause':
break; |
the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_mlag_interface
version_added: "2.4"
short_description: Manages MLAG interfaces on HUAWEI CloudEngine switches.
description:
- Manages MLAG interface attributes on HUAWEI CloudEngine switches.
author:
- Li Yanfeng (@QijunPan)
notes:
- This module requires the netconf system service be enabled on the remote device being managed.
- Recommended connection is C(netconf).
- This module also works with C(local) connections for legacy playbooks.
options:
eth_trunk_id:
description:
- Name of the local M-LAG interface. The value is ranging from 0 to 511.
dfs_group_id:
description:
- ID of a DFS group.The value is 1.
default: present
mlag_id:
description:
- ID of the M-LAG. The value is an integer that ranges from 1 to 2048.
mlag_system_id:
description:
- M-LAG global LACP system MAC address. The value is a string of 0 to 255 characters. The default value
is the MAC address of the Ethernet port of MPU.
mlag_priority_id:
description:
- M-LAG global LACP system priority. The value is an integer ranging from 0 to 65535.
The default value is 32768.
interface:
description:
- Name of the interface that enters the Error-Down state when the peer-link fails.
The value is a string of 1 to 63 characters.
mlag_error_down:
description:
- Configure the interface on the slave device to enter the Error-Down state.
choices: ['enable','disable']
state:
| description:
- Specify desired state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: mlag interface module test
hosts: cloud | engine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Set interface mlag error down
ce_mlag_interface:
interface: 10GE2/0/1
mlag_error_down: enable
provider: "{{ cli }}"
- name: Create mlag
ce_mlag_interface:
eth_trunk_id: 1
dfs_group_id: 1
mlag_id: 4
provider: "{{ cli }}"
- name: Set mlag global attribute
ce_mlag_interface:
mlag_system_id: 0020-1409-0407
mlag_priority_id: 5
provider: "{{ cli }}"
- name: Set mlag interface attribute
ce_mlag_interface:
eth_trunk_id: 1
mlag_system_id: 0020-1409-0400
mlag_priority_id: 3
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: { "interface": "eth-trunk1",
"mlag_error_down": "disable",
"state": "present"
}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: { "mlagErrorDownInfos": [
{
"dfsgroupId": "1",
"portName": "Eth-Trunk1"
}
]
}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {}
updates:
description: command sent to the device
returned: always
type: list
sample: { "interface eth-trunk1",
"undo m-lag unpaired-port suspend"}
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import load_config
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_MLAG_INFO = """
<filter type="subtree">
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mlagInstances>
<mlagInstance>
%s
</mlagInstance>
</mlagInstances>
</mlag>
</filter>
"""
CE_NC_CREATE_MLAG_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mlagInstances>
<mlagInstance operation="merge">
<dfsgroupId>%s</dfsgroupId>
<mlagId>%s</mlagId>
<localMlagPort>%s</localMlagPort>
</mlagInstance>
</mlagInstances>
</mlag>
</config>
"""
CE_NC_DELETE_MLAG_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mlagInstances>
<mlagInstance operation="delete">
<dfsgroupId>%s</dfsgroupId>
<localMlagPort>%s</localMlagPort>
</mlagInstance>
</mlagInstances>
</mlag>
</config>
"""
CE_NC_GET_LACP_MLAG_INFO = """
<filter type="subtree">
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<TrunkIfs>
<TrunkIf>
<ifName>%s</ifName>
<lacpMlagIf>
<lacpMlagSysId></lacpMlagSysId>
<lacpMlagPriority></lacpMlagPriority>
</lacpMlagIf>
</TrunkIf>
</TrunkIfs>
</ifmtrunk>
</filter>
"""
CE_NC_SET_LACP_MLAG_INFO_HEAD = """
<config>
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<TrunkIfs>
<TrunkIf>
<ifName>%s</ifName>
<lacpMlagIf operation="merge">
"""
CE_NC_SET_LACP_MLAG_INFO_TAIL = """
</lacpMlagIf>
</TrunkIf>
</TrunkIfs>
</ifmtrunk>
</config>
"""
CE_NC_GET_GLOBAL_LACP_MLAG_INFO = """
<filter type="subtree">
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lacpSysInfo>
<lacpMlagGlobal>
<lacpMlagSysId></lacpMlagSysId>
<lacpMlagPriority></lacpMlagPriority>
</lacpMlagGlobal>
</lacpSysInfo>
</ifmtrunk>
</filter>
"""
CE_NC_SET_GLOBAL_LACP_MLAG_INFO_HEAD = """
<config>
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lacpSysInfo>
<lacpMlagGlobal operation="merge">
"""
CE_NC_SET_GLOBAL_LACP_MLAG_INFO_TAIL = """
</lacpMlagGlobal>
</lacpSysInfo>
</ifmtrunk>
</config>
"""
CE_NC_GET_MLAG_ERROR_DOWN_INFO = """
<filter type="subtree">
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<errordowns>
<errordown>
<dfsgroupId></dfsgroupId>
<portName></portName>
<portState></portState>
</errordown>
</errordowns>
</mlag>
</filter>
"""
CE_NC_CREATE_MLAG_ERROR_DOWN_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<errordowns>
<errordown operation="merge">
<dfsgroupId>1</dfsgroupId>
<portName>%s</portName>
</errordown>
</errordowns>
</mlag>
</config>
"""
CE_NC_DELETE_MLAG_ERROR_DOWN_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<errordowns>
<errordown operation="delete">
<dfsgroupId>1</dfsgroupId>
<portName>%s</portName>
</errordown>
</errordowns>
</mlag>
</config>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('ETH-TRUNK'):
iftype |
"""
Support for particulate matter sensors connected to a serial port.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.serial_pm/
"""
import logging
import voluptuous as vol
from homeassistant.const import CONF_NAME
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
REQUIREMENTS = ['pmsensor==0.4']
_LOGGER = logging.getLogger(__name__)
CONF_SERIAL_DEVICE = 'serial_device'
CONF_BRAND = 'brand'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_BRAND): cv.string,
vol.Required(CONF_SERIAL_DEVICE): cv.string,
vol.Optional(CONF_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available PM sensors."""
from pmsensor import serial_pm as pm
try:
coll = pm.PMDataCollector(
config.get(CONF_SERIAL_DEVICE),
pm.SUPPORTED_SENSORS[config.get(CONF_BRAND)]
)
except KeyError:
_LOGGER.error("Brand %s not supported\n supported brands: %s",
config.get(CONF_BRAND), pm.SUPPORTED_SENSORS.keys())
return
except OSError as err:
_LOGGER.error("Could not open serial connection to %s (%s)",
config.get(CONF_SERIAL_DEVICE), err)
return
dev = []
for pmname in coll.supported_values():
if config.get(CONF_NAME) is not None:
name = '{} PM{}'.format(config.get(CONF_NAME), pmname)
else:
name = 'PM{}'.format(pmname)
dev.append(ParticulateMatterSensor(coll, name, pmname))
add_entities(dev)
class ParticulateMatterSensor(Entity):
"""Representation of an Particulate matter sensor."""
def __init__(self, pmDataCollector, name, pmname):
"""Initialize a new PM sensor."""
self._name = name
self._pmname = pmname
self._state = None
self._collector = pmDataCollector
@property
def name(self):
""" | Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return "µg/m³"
def update(self):
"""Read from sensor and update the state."""
_LOGGER.debug("Reading data from PM sensor")
try:
| self._state = self._collector.read_data()[self._pmname]
except KeyError:
_LOGGER.error("Could not read PM%s value", self._pmname)
def should_poll(self):
"""Sensor needs polling."""
return True
|
from django.http import HttpResponse
from django.conf import settings
import json
def javascript_variables(request):
variables = {
'STATIC_PREFIX': settings.STATIC_URL
}
var_catalog = "// VARIABLE CATALOG FOR DJANGO VARIABLES\n\n"
var_catalog += "\n".join(("%s = %s;" % (key, json.dumps(value)) for key, value in variabl | es.items()))
return Http | Response(var_catalog, content_type = 'text/javascript') |
# Copyright 2017 Kevin Howell
#
# This file is part of sixoclock.
#
# sixoclock is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the | Free Soft | ware Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# sixoclock is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with sixoclock. If not, see <http://www.gnu.org/licenses/>.
import argparse
import humanize
import logging
import os.path
import time
from sixoclock.config import Configuration
from sixoclock.backends.file import FileBackend
from sixoclock.file import File
class Cli:
def __init__(self):
config = os.path.join(os.path.expanduser('~'), '.sixoclock.yml')
self.configuration = Configuration(config)
parser = argparse.ArgumentParser(description='Simple personal backups.')
parser.add_argument('--no-log', action='store_true', help='do not log')
parser.add_argument('--log-file', help='log file')
parser.set_defaults(function=lambda args: parser.print_usage(), log_file=None)
subparsers = parser.add_subparsers(title='commands')
backup_parser = subparsers.add_parser('backup', help='perform a backup')
backup_parser.add_argument('-c', '--collection', help='backup a specific collection')
backup_parser.add_argument('--dry-run', action='store_true', help='do not backup, show what would happen')
backup_parser.set_defaults(function=self.backup)
query_parser = subparsers.add_parser('query', help='find a file in configured sources or mirrors')
query_parser.add_argument('-c', '--collection', help='look only in a specific collection')
query_parser.add_argument('-m', '--mirror', help='look only in a specific mirror')
query_parser.add_argument('--path', help='relative path of the file')
query_parser.add_argument('--filename', help='base filename (ex. foo.txt)')
query_parser.add_argument('--file', help='file to use as a basis')
query_parser.add_argument('--md5', help='md5 hash')
query_parser.add_argument('--sha1', help='sha1 hash')
query_parser.add_argument('--sha256', help='sha256 hash')
query_parser.add_argument('--size', help='file size in bytes')
query_parser.set_defaults(function=self.query)
status_parser = subparsers.add_parser('status', help='show backup status')
status_parser.add_argument('-c', '--collection', help='show status of a specific collection')
status_parser.set_defaults(function=self.status)
refresh_parser = subparsers.add_parser('refresh-cache', help='refresh cache')
refresh_parser.add_argument('-c', '--collection', help='refresh mirror caches for a specific collection')
refresh_parser.add_argument('-m', '--mirror', help='refresh mirror caches for a specific mirror')
refresh_parser.add_argument('--rebuild', action='store_true', help='remove entries and rebuild the cache')
refresh_parser.set_defaults(function=self.refresh_cache)
for name, backend in self.configuration.backends.items():
if backend.has_subparser():
backend_parser = subparsers.add_parser(name, help='{} backend subcommands'.format(name))
backend.contribute_to_subparser(backend_parser)
self.parser = parser
def main(self):
args = self.parser.parse_args()
log_filename = args.log_file or 'sixoclock.{}.log'.format(int(time.time()))
if not args.no_log:
logging.basicConfig(filename=log_filename, level=logging.INFO)
args.function(args)
def backup(self, args):
for name, collection in self.configuration.collections.items():
if args.collection and name != collection:
continue
print('Backing up collection: {}'.format(name))
actions = collection.backup(args.dry_run)
if args.dry_run:
for action in actions:
print('Would back up {} to {}'.format(action.file, action.destination))
def query(self, args):
filters = []
if args.path:
filters.append(File.path == args.path)
if args.file:
filebackend = FileBackend()
file = filebackend.get(args.file)
filters.append(File.sha1 == file.sha1)
filters.append(File.path.like('%/{}'.format(os.path.basename(args.file))))
if args.filename:
filters.append(File.path.like('%/{}'.format(args.filename)))
if args.md5:
filters.append(File.md5 == args.md5)
if args.sha1:
filters.append(File.sha1 == args.sha1)
if args.sha256:
filters.append(File.sha256 == args.sha256)
if args.size:
filters.append(File.size == args.size)
collections = self.configuration.collections.values()
if args.collection:
collections = [self.configuration.collections[args.collection]]
if args.mirror:
filters.append(File.mirror_uri == args.mirror)
for collection in collections:
collection.refresh_cache()
for match in collection.query(*filters):
print('Match: {}'.format(match.uri))
def status(self, args):
for name, collection in self.configuration.collections.items():
if args.collection and name != args.collection:
continue
print('Collection: {}'.format(name))
stats = collection.stats()
print(' # Source files: {}'.format(stats.source_file_count))
size = humanize.naturalsize(stats.size)
percentage = 100.0
if stats.size > 0:
percentage = stats.backed_up_size / stats.size
print(' Total size: {}, {}% backed up'.format(size, percentage))
def refresh_cache(self, args):
for name, collection in self.configuration.collections.items():
if args.collection and name != args.collection:
continue
collection.refresh_cache(mirror=args.mirror, reset=args.refresh)
|
import os
import random
import time
from flask import Flask, request, render_template, session, flash, redirect, \
url_for, jsonify
from flask.ext.mail import Mail, Message
from flask.ext.sqlalchemy import SQLAlchemy
from celery import Celery
app = Flask(__name__)
app.config['SECRET_KEY'] = 'top-secret!'
# Flask-Mail configuration
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = "bdh931101@gmail.com"
app.config['MAIL_PASSWORD'] = "1Alzkdpf*^^*go"
app.config['MAIL_DEFAULT_SENDER'] = 'bdh931101@gmail.com'
# Celery configuration
app.config['CELERY_BROKER_URL'] = 'redis://localhost:6379/0'
app.config['CELERY_RESULT_BACKEND'] = 'redis://localhost:6379/0'
app.config.from_object(os.environ['APP_SETTINGS'])
db = SQLAlchemy(app)
from .models import MapInfo
# Initialize extensions
mail = Mail(app)
# Initialize Celery
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
@celery.task
def send_async_email(msg):
"""Background task to send an email with Flask-Mail."""
with app.app_context():
mail.send(msg)
@celery.task(bind=True)
def long_task(self):
"""Background task that runs a long function with progress reports."""
verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']
adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']
noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']
message = ''
total = random.randint(10, 50)
for i in range(total):
if not message or random.random() < 0.25:
message = '{0} {1} {2}...'.format(random.choice(verb),
random.choice(adjective),
random.choice(noun))
self.update_state(state='PROGRESS',
meta={'current': i, 'total': total,
'status': message})
time.sleep(1)
return {'current': 100, 'total': 100, 'status': 'Task completed!',
'result': 42}
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html', email=session.get('email', ''))
email = request.form['email']
session['email'] = email
# send the email
msg = Message('Hello from Flask',
recipients=[request.form['email']])
msg.body = 'This is a test email sent from a background Celery task.'
if request.form['submit'] == 'Send':
# send right away
send_async_email.delay(msg)
flash('Sending email to {0}'.format(email))
else:
# send in one minute
send_async_email.apply_async(args=[msg], countdown=60)
flash('An email will be sent to {0} in one minute'.format(email))
return redirect(url_for('index'))
@app.route('/longtask', methods=['POST'])
def longtask():
task = long_task.apply_async()
return jsonify({}), 202, {'Location': url_for('taskstatus',
task_id=task.id)}
@app.route('/status/<task_id | >')
def taskstatus(task_id):
task = long_task.AsyncResult(task_id)
if task.state == 'PENDING':
response = {
'state': task.state,
'current': 0,
'total': 1,
'status': 'Pending...'
}
elif task.state != | 'FAILURE':
response = {
'state': task.state,
'current': task.info.get('current', 0),
'total': task.info.get('total', 1),
'status': task.info.get('status', '')
}
if 'result' in task.info:
response['result'] = task.info['result']
else:
# something went wrong in the background job
response = {
'state': task.state,
'current': 1,
'total': 1,
'status': str(task.info), # this is the exception raised
}
return jsonify(response)
if __name__ == '__main__':
app.run(debug=True)
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# li | mitations under the License.
#
# pytype: skip-file
import logging
import math
import time
from apache_beam.metrics.metric import Metrics
from apitools.base.py import exceptions
from apitools.base.py import http_wrapper
from apitools.base.py import util
_LOGGER = logging.getLogger(__name__)
class GcsIOOverrides(ob | ject):
"""Functions for overriding Google Cloud Storage I/O client."""
_THROTTLED_SECS = Metrics.counter('StorageV1', "cumulativeThrottlingSeconds")
@classmethod
def retry_func(cls, retry_args):
# handling GCS download throttling errors (BEAM-7424)
if (isinstance(retry_args.exc, exceptions.BadStatusCodeError) and
retry_args.exc.status_code == http_wrapper.TOO_MANY_REQUESTS):
_LOGGER.debug(
'Caught GCS quota error (%s), retrying.', retry_args.exc.status_code)
else:
return http_wrapper.HandleExceptionsAndRebuildHttpConnections(retry_args)
http_wrapper.RebuildHttpConnections(retry_args.http)
_LOGGER.debug(
'Retrying request to url %s after exception %s',
retry_args.http_request.url,
retry_args.exc)
sleep_seconds = util.CalculateWaitForRetry(
retry_args.num_retries, max_wait=retry_args.max_retry_wait)
cls._THROTTLED_SECS.inc(math.ceil(sleep_seconds))
time.sleep(sleep_seconds)
|
#!/usr/bin/python
from fsm import parse_automaton, accept
import re
__author__ = 'Roland'
import sys
keywords = ['float', 'char', 'print', 'input', 'break', 'continue', 'return', 'def', 'if', 'elif',
'else', 'while', 'or', 'and', 'not']
operators = ['=', '<', '>', '==', '>=', '<=', '!=', '+', '-', '*', '/', '%']
separators = ['[', ']', '(', ')', ',', ':']
codif = ['var', 'const', '\n', 'indent', 'dedent'] + keywords + operators + separators
def error(line_nr, msg):
"""
Show an error message `msg` found at line number `line_nr`
"""
print("Lexical error at line %d: %s" % (line_nr, msg))
def value_or_none(tree):
"""
Helper function to return string, even if given a tree, string or None
"""
if tree is None:
return 'None'
else:
if type(tree) == str:
return tree
return str(tree.value)
class binary_tree(object):
"""
Binary search tree. It remembers the order in which elements were added.
"""
def __init__(self, value):
"""
Constructor
"""
self.value = value
if self.value:
self.elements = [value]
else:
self.elements = []
self.left = None
self.right = None
def add(self, value):
"""
Add `value` to the tree to the correct place
"""
if self.value is None:
self.value = value
elif value < self.value:
if self.left:
self.left.add(value)
else:
self.left = binary_tree(value)
else:
if self.right:
self.right.add(value)
else:
| self.right = binary_tree(value)
def __contains__(self, value):
"""
Search for `value` in the tree.
"""
if value == self.value:
return True
return (self.left and value in self.left) or (self.right and value in self.right)
def index(self, v | alue):
"""
Return the parent and sibling node of `value`. Return None if it is not found,
and (None, None) for root node.
"""
if self.value == value:
return (None, None)
if self.right and value == self.right.value:
return self.value, self.left
if self.left and value == self.left.value:
return self.value, self.right
if self.left and value in self.left:
return self.left.index(value)
if self.right and value in self.right:
return self.right.index(value)
def __str__(self):
"""
String representation of the tree, using a table with parent and sibling relations.
"""
s = ""
for i, element in enumerate(self.elements):
parent, sibling = self.index(element)
s += (str(i) + " | " + str(element) + " | " + value_or_none(parent) + " | " + value_or_none(sibling) + "\n")
return s
def get_poz(atom, ts):
"""
Get the position of `atom` in the tree `ts`, and insert it if it's not in the tree.
"""
if atom not in ts:
ts.add(atom)
ts.elements.append(atom)
parent, sibling = ts.index(atom)
return ts.elements.index(atom)
var_lang = ["i a-z s B",
"i A-Z s B",
"s a-z s F",
"s A-z s F",
"s 0-9 s F",
"s [ t",
"t 0-9 f",
"f 0-9 f",
"f ] l F"]
var_aut = parse_automaton(var_lang)
num_lang = ["i 0 s B",
"i 1-9 t B",
"s . n",
"t 0-9 f", "t . n", "f 0-9 f", "f . n", "n 0-9 n F"]
num_aut = parse_automaton(num_lang)
def lexer(program):
"""
Function to do the actual lexing.
"""
ts_const = binary_tree(None)
ts_ident = binary_tree(None)
fip = []
indentation = [0]
for i, line in enumerate(program.splitlines()):
indent_level = len(line) - len(line.lstrip())
if indent_level != indentation[-1]:
if indent_level > indentation[-1]:
indentation.append(indent_level)
fip.append((codif.index('indent'), 0))
else:
while len(indentation) and indentation[-1] != indent_level:
fip.append((codif.index('dedent'), 0))
indentation.pop()
if len(indentation) == 0:
error(i, "incorrect indentation")
in_string = ""
for atom in re.split("( |=|<|>|==|>=|<=|!=|\+|-|\*|/|%|\[|\]|\(|\)|,|:)", line):
if len(atom.strip()) == 0 and not in_string:
continue
if '"' in atom:
if in_string:
in_string += atom
if re.search('[^ "a-zA-Z0-9]', in_string):
error(i, " invalid character in string constant")
continue
fip.append((1, get_poz(in_string, ts_const)))
in_string = ""
continue
else:
in_string = atom
continue
if in_string:
in_string += atom
continue
if atom in keywords or atom in operators or atom in separators:
fip.append((codif.index(atom), 0))
else:
if accept(*var_aut, string=atom) == True:
fip.append((0, get_poz(atom, ts_ident)))
elif accept(*num_aut, string=atom) == True:
fip.append((1, get_poz(atom, ts_const)))
else:
error(i, " unidentified expression " + atom)
if in_string:
error(i, " unterminated string constant ")
fip.append((codif.index('\n'), 0))
return fip, ts_const, ts_ident
if __name__ == "__main__":
if len(sys.argv) == 1:
print("You must give file to analyze as argument")
file = sys.argv[1]
f = open(file, "rb")
fip, ts_const, ts_ident = lexer(f.read())
print(fip)
print(ts_const)
print(ts_ident)
|
import logging
from ..direc | tives import directives_by_section
logger = logging.getLogger(__name__)
class Stanza(object):
"""
Subclass for config file stanzas.
In an HAProxy config file, a stanza is in the form of::
stanza header
directive
directive
directive
Stanza instances have a `header` attribute for the header and a list of
`lines`, one for each directive line.
"""
def __init__(self, section_name):
self.section_name = s | ection_name
self.header = section_name
self.lines = []
def add_lines(self, lines):
"""
Simple helper method for adding multiple lines at once.
"""
for line in lines:
self.add_line(line)
def add_line(self, line):
"""
Adds a given line string to the list of lines, validating the line
first.
"""
if not self.is_valid_line(line):
logger.warn(
"Invalid line for %s section: '%s'",
self.section_name, line
)
return
self.lines.append(line)
def is_valid_line(self, line):
"""
Validates a given line against the associated "section" (e.g. 'global'
or 'frontend', etc.) of a stanza.
If a line represents a directive that shouldn't be within the stanza
it is rejected. See the `directives.json` file for a condensed look
at valid directives based on section.
"""
adjusted_line = line.strip().lower()
return any([
adjusted_line.startswith(directive)
for directive in directives_by_section[self.section_name]
])
def __str__(self):
"""
Returns the string representation of a Stanza, meant for use in
config file content.
if no lines are defined an empty string is returned.
"""
if not self.lines:
return ""
return self.header + "\n" + "\n".join([
"\t" + line
for line in self.lines
])
|
ssh:// from url
url = url.lstrip("ssh://")
# get port
port, url = _extract_ssh_port_url(url)
args = (_rsync_exec, "--no-motd", "--compress", "--progress",
"--stats", "--inplace", "--timeout=%d" % (self.__timeout,),
"-e", "ssh -p %s" % (port,))
if self.__speedlimit:
args += ("--bwlimit=%d" % (self.__speedlimit,),)
if not self.__resume:
args += ("--whole-file",)
else:
args += ("--partial",)
args += (url, self.__path_to_save,)
# args to rsync to get remote file size
list_args = (_rsync_exec, "--no-motd", "--list-only", "-e",
"ssh -p %s" % (port,), url)
return list_args, args
def _rsync_download(self):
"""
rsync based downloader. It uses rsync executable.
"""
list_args, args = self._setup_rsync_args()
# rsync executable environment
rsync_environ = {}
# setup proxy support
proxy_data = self.__system_settings['system']['proxy']
if proxy_data['rsync']:
rsync_environ['RSYNC_PROXY'] = proxy_data['rsync']
def rsync_stats_extractor(output_line):
const_debug_write(__name__,
"rsync_stats_extractor(%s): %s" % (self.__th_id, output_line,))
data = output_line.split()
if len(data) != 4:
# it's just garbage here
self._update_speed()
return
bytes_read, pct, speed_kbsec, eta = data
try:
| bytes_read = int(bytes_read)
except ValueError:
bytes_read = 0
try:
average = int(pct.strip("%"))
except ValueError:
average = 0
# update progress info
# _rsync_commit
self.__downloadedsize = bytes_read
if average > 100:
average = 100
s | elf.__average = average
self._update_speed()
if self.__show_speed:
self.handle_statistics(self.__th_id, self.__downloadedsize,
self.__remotesize, self.__average, self.__oldaverage,
self.__updatestep, self.__show_speed, self.__datatransfer,
self.__time_remaining, self.__time_remaining_secs
)
self.update()
self.__oldaverage = self.__average
def rsync_list_extractor(output_line):
data = output_line.split()
if len(data) == 5:
try:
# perms, size, date, time, file name
self.__remotesize = float(data[1])/1000
except ValueError:
pass
const_debug_write(__name__,
"spawning rsync fetch(%s): %s, %s, %s" % (
self.__th_id, list_args, rsync_environ, rsync_list_extractor,))
sts = self.__fork_cmd(list_args, rsync_environ, rsync_list_extractor)
const_debug_write(__name__,
"spawned rsync fetch(%s): status: %s" % (self.__th_id, sts,))
if sts != 0:
self.__rsync_close(True)
self.__status = UrlFetcher.GENERIC_FETCH_ERROR
return self.__status
const_debug_write(__name__,
"spawning rsync fetch(%s): %s, %s, %s" % (
self.__th_id, args, rsync_environ, rsync_stats_extractor,))
sts = self.__fork_cmd(args, rsync_environ, rsync_stats_extractor)
const_debug_write(__name__,
"spawned rsync fetch(%s): status: %s" % (self.__th_id, sts,))
if sts != 0:
self.__rsync_close(True)
self.__status = UrlFetcher.GENERIC_FETCH_ERROR
return self.__status
# kill thread
self.__rsync_close(False)
return self.__prepare_return()
def __rsync_close(self, errored):
if (not self.__existed_before) and errored:
try:
os.remove(self.__path_to_save)
except OSError:
pass
def _setup_urllib_proxy(self):
"""
Setup urllib proxy data
"""
mydict = {}
proxy_data = self.__system_settings['system']['proxy']
if proxy_data['ftp']:
mydict['ftp'] = proxy_data['ftp']
if proxy_data['http']:
mydict['http'] = proxy_data['http']
if mydict:
mydict['username'] = proxy_data['username']
mydict['password'] = proxy_data['password']
add_proxy_opener(urlmod, mydict)
else:
# unset
urlmod._opener = None
def _urllib_download(self):
"""
urrlib2 based downloader. This is the default for HTTP and FTP urls.
"""
self._setup_urllib_proxy()
self.__setup_urllib_resume_support()
# we're going to feed the md5 digestor on the way.
self.__use_md5_checksum = True
url = self.__encode_url(self.__url)
url_protocol = UrlFetcher._get_url_protocol(self.__url)
uname = os.uname()
user_agent = "Entropy/%s (compatible; %s; %s: %s %s %s)" % (
etpConst['entropyversion'],
"Entropy",
os.path.basename(url),
uname[0],
uname[4],
uname[2],
)
if url_protocol in ("http", "https"):
# Handle HTTP Basic auth
if self.__http_basic_user and self.__http_basic_pwd:
basic_header = base64.encodestring('%s:%s' % (
self.__http_basic_user, self.__http_basic_pwd)).replace('\n', '')
headers = {
'User-Agent': user_agent,
'Authorization': 'Basic %s' % basic_header,
}
else:
headers = {'User-Agent': user_agent,}
req = urlmod.Request(url, headers = headers)
else:
req = url
u_agent_error = False
do_return = False
while True:
# get file size if available
try:
if url_protocol in ("https") and \
not self.__https_validate_cert:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.__remotefile = urlmod.urlopen(req, None, self.__timeout,
context=ctx)
else:
self.__remotefile = urlmod.urlopen(req, None, self.__timeout)
except KeyboardInterrupt:
self.__urllib_close(False)
raise
except httplib.InvalidURL:
# malformed url!
self.__urllib_close(True)
self.__status = UrlFetcher.GENERIC_FETCH_ERROR
do_return = True
except urlmod_error.HTTPError as e:
if (e.code == 405) and not u_agent_error:
# server doesn't like our user agent
req = url
u_agent_error = True
continue
self.__urllib_close(True)
self.__status = UrlFetcher.GENERIC_FETCH_ERROR
do_return = True
except urlmod_error.URLError as err: # timeout error
self.__urllib_close(True)
self.__status = UrlFetcher.GENERIC_FETCH_ERROR
do_return = True
except httplib.BadStatusLine:
# obviously, something to cope with
self.__urllib_close(True)
self.__status = UrlFetcher.GENERIC_FETCH_ERROR
do_return = True
except socket.timeout:
# arghv!!
self.__urllib_close(True)
self.__status = UrlFetcher.TIMEOUT_FETCH_ERROR
do_return = True
except socket.error:
# connection reset by peer?
self.__urllib_close(True)
self.__status = |
"""
KaraCos - web platform engine - http://karacos.org/
Copyright (C) 2009-2010 Nicolas Karageuzian - Cyril Gratecis
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
| the Free Software Foundation, either version 3 of the License, or
(at your option) an | y later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__license__ = 'AGPL'
import smtplib
import karacos
import random
import string
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def valid_email(email):
import re
reg = re.compile("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$")
return reg.match(email)
def send_mail(destmail, msg):
"""
"""
try:
server = smtplib.SMTP(karacos.config.get('mail','smtp_server'),
karacos.config.get('mail','smtp_server_port'))
server.ehlo()
if karacos.config.has_option('mail', 'smtp_ssl'):
if karacos.config.get('mail', 'smtp_ssl') == "True" or karacos.config.get('mail', 'smtp_ssl'):
server.starttls()
server.ehlo()
if karacos.config.has_option('mail', 'smtp_password'):
src = karacos.config.get('mail','from_addr')
password = karacos.config.get('mail','smtp_password')
server.login(src, password)
server.sendmail(karacos.config.get('mail','from_addr'), destmail, msg)
print "mail sent"
server.close()
except Exception,e:
import sys
print sys.exc_info()
raise e
def send_domain_mail(domain, destmail, msg):
server = smtplib.SMTP(domain['site_email_service_host'],
domain['site_email_service_port'])
server.ehlo()
if 'site_email_service_secure' in domain:
if domain['site_email_service_secure'] or domain['site_email_service_secure'] == True:
server.starttls()
server.ehlo()
if 'site_email_service_password' in domain:
server.login(domain['site_email_service_username'], domain['site_email_service_password'])
server.sendmail(domain['site_email_from'], destmail, msg)
server.close() |
s['msg'] = 'One or more items failed'
else:
res['msg'] = 'All items completed'
else:
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
else:
display.debug("calling self._execute()")
res = self._execute()
display.debug("_execute() done")
# make sure changed is set in the result, if it's not present
if 'changed' not in res:
res['changed'] = False
def _clean_res(res):
if isinstance(res, dict):
for k in res.keys():
res[k] = _clean_res(res[k])
elif isinstance(res, list):
for idx,item in enumerate(res):
res[idx] = _clean_res(item)
elif isinstance(res, UnsafeProxy):
return res._obj
elif isinstance(res, binary_type):
return to_unicode(res, errors='strict')
return res
display.debug("dumping result to json")
res = _clean_res(res)
display.debug("done dumping result, returning")
return res
except AnsibleError as e:
return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr'))
except Exception as e:
return dict(failed=True, msg='Unexpected failure during module execution.', exception=to_unicode(traceback.format_exc()), stdout='')
finally:
try:
self._connection.close()
except AttributeError:
pass
except Exception as e:
display.debug(u"error closing connection: %s" % to_unicode(e))
def _get_loop_items(self):
'''
Loads a lookup plugin to handle the with_* portion of a task (if specified),
and returns the items result.
'''
# save the play context variables to a temporary dictionary,
# so that we can modify the job vars without doing a full copy
# and later restore them to avoid modifying things too early
play_context_vars = dict()
self._play_context.update_vars(play_context_vars)
old_vars = dict()
for k in play_context_vars.keys():
if k in self._job_vars:
old_vars[k] = self._job_vars[k]
self._job_vars[k] = play_context_vars[k]
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
items = None
if self._task.loop:
if self._task.loop in self._shared_loader_obj.lookup_loader:
#TODO: remove convert_bare true and deprecate this in with_
if self._task.loop == 'first_found':
# first_found loops are special. If the item is undefined
# then we want to fall through to the next value rather
# than failing.
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=False, convert_bare=True)
loop_terms = [t for t in loop_terms if not templar._contains_vars(t)]
else:
try:
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True)
except AnsibleUndefinedVariable as e:
display.deprecated("Skipping task due to undefined Error, in the future this will be a fatal error.: %s" % to_bytes(e))
return None
items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=self._job_vars, wantlist=True)
else:
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop)
# now we restore any old job variables that may have been modified,
# and delete them if they were in the play context vars but not in
# the old variables dictionary
for k in play_context_vars.keys():
if k in old_vars:
self._job_vars[k] = old_vars[k]
else:
del self._job_vars[k]
if items:
from ansible.vars.unsafe_proxy import UnsafeProxy
for idx, item in enumerate(items):
if item is not None and not isinstance(item, UnsafeProxy):
items[idx] = UnsafeProxy(item)
return items
def _run_loop(self, items):
'''
Runs the task with the loop items specified and collates the result
into an array named 'results' which is inserted into the final result
along with the item for which the loop ran.
'''
results = []
# make copies of the job vars and task so we can add the item to
# the variables and re-validate the task with the item variable
#task_vars = self._job_vars.copy()
task_vars = self._job_vars
loop_var = 'item'
if self._task.loop_control:
# the value may be 'None', so we stil | l need to default it back to 'item'
loop_var = self._task.loop_control.loop_var or 'item'
if loop_var in task_vars:
raise AnsibleError("the loop variable '%s' is already in use. You should set the `loop_var` value in the `loop_control` option for the task to something else to avoid variable collisions" % loop_var)
| items = self._squash_items(items, loop_var, task_vars)
for item in items:
task_vars[loop_var] = item
try:
tmp_task = self._task.copy()
tmp_play_context = self._play_context.copy()
except AnsibleParserError as e:
results.append(dict(failed=True, msg=to_unicode(e)))
continue
# now we swap the internal task and play context with their copies,
# execute, and swap them back so we can do the next iteration cleanly
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
res = self._execute(variables=task_vars)
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
# now update the result with the item info, and append the result
# to the list of results
res[loop_var] = item
res['_ansible_item_result'] = True
self._rslt_q.put(TaskResult(self._host, self._task, res), block=False)
results.append(res)
del task_vars[loop_var]
return results
def _squash_items(self, items, loop_var, variables):
'''
Squash items down to a comma-separated list for certain modules which support it
(typically package management modules).
'''
# _task.action could contain templatable strings (via action: and
# local_action:) Template it before comparing. If we don't end up
# optimizing it here, the templatable string might use template vars
# that aren't available until later (it could even use vars from the
# with_items loop) so don't make the templated string permanent yet.
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
task_action = self._task.action
if templar._contains_vars(task_action):
task_action = templar.template(task_action, fail_on_undefined=False)
if len(items) > 0 and task_action in self.SQUASH_ACTIONS:
if all(isinstance(o, string_types) for o in items):
final_items = []
name = None
for allowed in ['name', 'pkg', 'package']:
name |
__au | thor__ = 'sandro'
from distutils.core import setup
setup(
author='Sandro Covo',
author_email="sandro@covo.ch",
packages=['brainfuck'],
scripts=['scripts/pyfuck'],
name="Pyfuc | k",
description="Brainfuck interpreter written in python"
) |
#-*- coding: utf- | 8 -*-
| |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
__author__ = 'Florents Tselai'
from datetime import datetime
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from pycargr.model import Car
class SearchResultPageParser:
def __init__(self, search_page_url):
self.search_page_url = search_page_url
req = Request(
search_page_url,
data=None,
headers={
'User-Agent': UserAgent().chrome
}
)
self.html = urlopen(req).read() | .decode('utf-8')
self.soup = BeautifulSoup(self.html, 'html.parser')
self.num_results = None
for f in self.soup.find_all('strong'):
if 'αγγελίες' in f.text:
if f.text.split()[0].isdigit():
self.num_results = int(f.text.split()[0])
def parse(self):
for a in self.soup.find_all('a', class_='vehicle list-group | -item clsfd_list_row'):
yield str(int(a.get('href').replace('/', '').split('-')[0].replace('classifiedscarsview', '')))
def __len__(self):
return self.num_results
class CarItemParser:
def __init__(self, car_id):
self.car_id = car_id
self.req = Request(
'https://www.car.gr/%s' % self.car_id,
data=None,
headers={
'User-Agent': UserAgent().chrome
}
)
self.html = urlopen(self.req).read().decode('utf-8')
self.soup = BeautifulSoup(self.html, 'html.parser')
def parse_km(self):
try:
for td in self.soup.find_all('td'):
if 'χλμ' in td.text:
return float(td.text.replace('.', '').replace('χλμ', ''))
except Exception:
return None
return None
def parse_bhp(self):
try:
for td in self.soup.find_all('td'):
if 'bhp' in td.text:
return int(td.text.replace(' bhp', ''))
except Exception:
return None
return None
def parse_title(self):
try:
return self.soup.find('title').text
except Exception:
return None
def parse_price(self):
try:
return float(self.soup.find(itemprop='price').text.replace('.', '').replace('€ ', ''))
except Exception:
return None
def parse_release_date(self):
try:
date_str = self.soup.find(itemprop='releaseDate').text.strip()
return datetime.strptime(date_str, "%m / %Y").strftime("%b %Y")
except Exception:
return None
def parse_engine(self):
try:
return int(self.soup.find(id='clsfd_engine_%s' % self.car_id).text.replace(' cc', '').replace('.', ''))
except Exception:
return None
def parse_color(self):
try:
return self.soup.find(itemprop='color').text
except Exception:
return None
def parse_fueltype(self):
try:
return self.soup.find(id='clsfd_fueltype_%s' % self.car_id).text
except Exception:
return None
def parse_description(self):
try:
return self.soup.find(itemprop='description').text
except Exception:
return None
def parse_city(self):
try:
return self.soup.find('span', itemprop='addressLocality').text
except Exception:
return None
def parse_region(self):
try:
return self.soup.find('span', itemprop='addressRegion').text
except Exception:
return None
def parse_postal_code(self):
try:
return int(self.soup.find('span', itemprop='postalCode').text)
except Exception:
return None
def parse_transmission(self):
try:
return self.soup.find(id='clsfd_transmision_%s' % self.car_id).text
except Exception:
return None
def parse_images(self):
try:
images_urls = []
for img in self.soup.find_all('img', class_='bigphoto'):
images_urls.append(img.get('src').replace(r'//', 'https://').replace('_v', '_b'))
return images_urls
except Exception:
return None
def parse(self):
c = Car(self.car_id)
c.title = self.parse_title()
c.price = self.parse_price()
c.release_date = self.parse_release_date()
c.engine = self.parse_engine()
c.km = self.parse_km()
c.bhp = self.parse_bhp()
c.url = self.req.full_url
c.color = self.parse_color()
c.fueltype = self.parse_fueltype()
c.description = self.parse_description()
c.city = self.parse_city()
c.region = self.parse_region()
c.postal_code = self.parse_postal_code()
c.transmission = self.parse_transmission()
c.images = self.parse_images()
c.html = self.html
c.scraped_at = datetime.now().isoformat()
return c
# Utility methods
def parse_search_results(search_url):
car_ids = SearchResultPageParser(search_url).parse()
for car_id in car_ids:
yield parse_car_page(car_id)
def parse_car_page(car_id):
car = CarItemParser(car_id).parse()
return car
|
import requests
import time
from selenium import webdriver
# file path
import os
BASE_DIR = os.path.dirname(__file__)
phjs_pa | th = os.path.join(BASE_DIR,'login.phjs.js')
print ('*******'+phjs_path+'*******')
driver = webdriver.PhantomJS(executable_path= | phjs_path)
driver.get('http://autoinsights.autodmp.com/user/login')
time.sleep(3)
print(driver.find_element_by_tag_name('form').text)
driver.close() |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.Model import Model
logger = logging.getLogger(__name__)
class ModelType(Model):
MODEL_MAP = {
'attributes': {
'system': {'required': True, 'type': 'AnyUriType'},
},
'elements': [
{'tag_name': 'param', 'class': 'ParamType', 'dict': 'params', 'key': 'name', 'min': 0, 'max': None},
],
}
def score(self, host, benchmark, profile_id):
from scap.model.xccdf_1_1.GroupType import GroupType
from scap.model.xccdf_1_1.RuleType import RuleType
if self.system == 'urn:xccdf:scoring:default':
### Score.Group.Init
# If the node is a Group or the Benchmark, assign a count of 0, a
# score s of 0.0, and an accumulator a of 0.0.
count = 0
score = 0.0
accumulator = 0.0
### Score.Group.Recurse
# For each selected child of this Group or Benchmark, do the following:
# (1) compute the count and weighted score for the child using this
# algorithm,
# (2) if the child’s count value is not 0, then add the child’s
# weighted score to this node’s score s, add 1 to this node’s count,
# and add the child’s weight value to the accumulator a.
for item_id in benchmark.items:
item = benchmark.items[item_id]
if not isinstance(item, GroupType) \
and not isinstance(item, RuleType):
continue
if not item.selected:
continue
item_score = item.score(host, benchmark, profile_id, self.system)
if item_score[item_id]['score'] is None:
continue
if item_score[item_id]['count'] != 0:
score += item_score[item_id]['score'] * item_score[item_id]['weight']
count += 1
accumulator += item_score[item_id]['weight']
### Score.Group.Normalize
# Normalize this node’s score: compute s = s / a.
if accumulator == 0.0:
if score != 0.0:
raise ValueError('Got to score normalization with score ' + str(score) + ' / ' + str(accumulator))
else:
score = 0.0
else:
score = score / accumulator
logger.debug(self.system + ' score: ' + str(score))
host.facts['checklist'][benchmark.id]['profile'][profile_id]['scores'].append({'score': score, 'system': self.system})
elif self.system == 'urn:xccdf:scoring:flat':
scores = {}
for item_id in benchmark.items:
item = benchmark.items[item_id]
if not isinstance(item, GroupType) \
and not isinstance(item, RuleType):
continue
# just pass the scores upstream for processing
scores.update(item.score(host, benchmark, profile_id, self.system))
score = 0.0
max_score = 0.0
for rule_id in scores:
if scores[rule_id]['result'] in ['notapplicable', 'notchecked', 'informational', 'notselected']:
continue
max_score += scores[rule_id]['weight']
if scores[rule_id]['result'] in ['pass', 'fixed']:
score += scores[rule_id]['weight']
logger.debug(self.system + ' score: ' + str(score) + ' / ' + str(max_score))
host.facts['checklist'][benchmark.id]['profile'][profile_id]['scores'].append({'score': score, 'max_score': max_score, 'system': self.system})
elif self.system == 'urn:xccdf:scoring:flat-unweighted':
scores = {}
for item_id in benchmark.items:
item = benchmark.items[item_id]
if not isinstance(item, GroupType) \
and not isinstance(item, RuleType):
continue
# just pass the scores upstream for processing
scores.update(item.score(host, benchmark, profile_id, self.system))
score = 0.0
max_score = 0.0
for rule_id in scores:
if scores[rule_id]['result'] in ['notapplicable', 'notchecked', 'informational', 'notselected']:
continue
max_score += 1.0
if scores[rule_id]['result'] in ['pass', 'fixed']:
score += 1.0
logger.debug(self.system + ' score: ' + str(score) + ' / ' + str(max_score))
host.facts['checklist'][benchmark.id]['profile'][profile_id]['scores'].append({'score': score, 'max_score': max_score, | 'system': self.system} | )
elif self.system == 'urn:xccdf:scoring:absolute':
scores = {}
for item_id in benchmark.items:
item = benchmark.items[item_id]
if not isinstance(item, GroupType) \
and not isinstance(item, RuleType):
continue
# just pass the scores upstream for processing
scores.update(item.score(host, benchmark, profile_id, self.system))
score = 0.0
max_score = 0.0
for rule_id in scores:
if scores[rule_id]['result'] in ['notapplicable', 'notchecked', 'informational', 'notselected']:
continue
max_score += scores[rule_id]['weight']
if scores[rule_id]['result'] in ['pass', 'fixed']:
score += scores[rule_id]['weight']
if score == max_score:
score = 1.0
else:
score = 0.0
logger.debug(self.system + ' score: ' + str(score))
host.facts['checklist'][benchmark.id]['profile'][profile_id]['scores'].append({'score': score, 'system': self.system})
else:
raise NotImplementedError('Scoring model ' + self.system + ' is not implemented')
|
__author__ = 'Archana V | Menon, | Sujith V'
|
"""This file implements the gym environment of minitaur.
"""
import math
import random
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from gym import spaces
import numpy as np
from pybullet_envs.minitaur.envs import minitaur_gym_env
import pybullet_data
GOAL_DISTANCE_THRESHOLD = 0.8
GOAL_REWARD = 1000.0
REWARD_SCALING = 1e-3
INIT_BALL_ANGLE = math.pi / 3
INIT_BALL_DISTANCE = 5.0
ACTION_EPS = 0.01
class MinitaurBallGymEnv(minitaur_gym_env.MinitaurGymEnv):
"""The gym environment for the minitaur and a ball.
It simulates a minitaur (a quadruped robot) and a ball. The state space
includes the angle and distance of the ball relative to minitaur's base.
The action space is a steering command. The reward function is based
on how far the ball is relative to the minitaur's base.
"""
def __init__(self,
urdf_root=pybullet_data.getDataPath(),
self_collision_enabled=True,
pd_control_enabled=False,
leg_model_enabled=True,
on_rack=False,
render=False):
"""Initialize the minitaur and ball gym environment.
Args:
urdf_root: The path to the urdf data folder.
self_collision_enabled: Whether to enable self collision in the sim.
pd_control_enabled: Whether to use PD controller for each motor.
leg_model_enabled: Whether to use a leg motor to reparameterize the action
space.
on_rack: Whether to place the minitaur on rack. This is only used to debug
the walking gait. In this mode, the minitaur's base is hanged midair so
that its walking gait is clearer to | visualize.
render: Whether to render the simulation.
"""
super(MinitaurBallGymEnv, self).__init__(urdf_root=urdf_root,
self_collision_enabled=self_collision_enabled,
pd_control_enabled=pd_control_enabled,
leg_model_enabled=leg_model_enabled,
on_rack=on_rack,
| render=render)
self._cam_dist = 2.0
self._cam_yaw = -70
self._cam_pitch = -30
self.action_space = spaces.Box(np.array([-1]), np.array([1]))
self.observation_space = spaces.Box(np.array([-math.pi, 0]), np.array([math.pi, 100]))
def reset(self):
self._ball_id = 0
super(MinitaurBallGymEnv, self).reset()
self._init_ball_theta = random.uniform(-INIT_BALL_ANGLE, INIT_BALL_ANGLE)
self._init_ball_distance = INIT_BALL_DISTANCE
self._ball_pos = [
self._init_ball_distance * math.cos(self._init_ball_theta),
self._init_ball_distance * math.sin(self._init_ball_theta), 1
]
self._ball_id = self._pybullet_client.loadURDF(
"%s/sphere_with_restitution.urdf" % self._urdf_root, self._ball_pos)
return self._get_observation()
def _get_observation(self):
world_translation_minitaur, world_rotation_minitaur = (
self._pybullet_client.getBasePositionAndOrientation(self.minitaur.quadruped))
world_translation_ball, world_rotation_ball = (
self._pybullet_client.getBasePositionAndOrientation(self._ball_id))
minitaur_translation_world, minitaur_rotation_world = (self._pybullet_client.invertTransform(
world_translation_minitaur, world_rotation_minitaur))
minitaur_translation_ball, _ = (self._pybullet_client.multiplyTransforms(
minitaur_translation_world, minitaur_rotation_world, world_translation_ball,
world_rotation_ball))
distance = math.sqrt(minitaur_translation_ball[0]**2 + minitaur_translation_ball[1]**2)
angle = math.atan2(minitaur_translation_ball[0], minitaur_translation_ball[1])
self._observation = [angle - math.pi / 2, distance]
return self._observation
def _transform_action_to_motor_command(self, action):
if self._leg_model_enabled:
for i, action_component in enumerate(action):
if not (-self._action_bound - ACTION_EPS <= action_component <=
self._action_bound + ACTION_EPS):
raise ValueError("{}th action {} out of bounds.".format(i, action_component))
action = self._apply_steering_to_locomotion(action)
action = self.minitaur.ConvertFromLegModel(action)
return action
def _apply_steering_to_locomotion(self, action):
# A hardcoded feedforward walking controller based on sine functions.
amplitude_swing = 0.5
amplitude_extension = 0.5
speed = 200
steering_amplitude = 0.5 * action[0]
t = self.minitaur.GetTimeSinceReset()
a1 = math.sin(t * speed) * (amplitude_swing + steering_amplitude)
a2 = math.sin(t * speed + math.pi) * (amplitude_swing - steering_amplitude)
a3 = math.sin(t * speed) * amplitude_extension
a4 = math.sin(t * speed + math.pi) * amplitude_extension
action = [a1, a2, a2, a1, a3, a4, a4, a3]
return action
def _distance_to_ball(self):
world_translation_minitaur, _ = (self._pybullet_client.getBasePositionAndOrientation(
self.minitaur.quadruped))
world_translation_ball, _ = (self._pybullet_client.getBasePositionAndOrientation(
self._ball_id))
distance = math.sqrt((world_translation_ball[0] - world_translation_minitaur[0])**2 +
(world_translation_ball[1] - world_translation_minitaur[1])**2)
return distance
def _goal_state(self):
return self._observation[1] < GOAL_DISTANCE_THRESHOLD
def _reward(self):
reward = -self._observation[1]
if self._goal_state():
reward += GOAL_REWARD
return reward * REWARD_SCALING
def _termination(self):
if self._goal_state():
return True
return False
|
import os
import pytest
from pyleus.cli.storm_cluster import _get_storm_cmd_env
from pyleus.cli.storm_cluster import STORM_JAR_JVM_OPTS
from pyleus.cli.storm_cluster import StormCluster
from pyleus.cli.storm_cluster import TOPOLOGY_BUILDER_CLASS
from pyleus.testing import mock
class TestGetStormCmdEnd(object):
@pytest.fixture(autouse=True)
def mock_os_environ(self, monkeypatch):
monkeypatch.setattr(os, 'environ', {})
def test_jvm_opts_unset(self):
assert _get_storm_cmd_env(None) is None
def test_jvm_opts_set(self):
jvm_opts = "-Dfoo=bar"
env = _get_storm_cmd_env(jvm_opts)
assert env[STORM_JAR_JVM_OPTS] == jvm_opts
class TestStormCluster(object):
@pytest.fixture
def cluster(self):
return StormCluster(
mock.sentinel.storm_cmd_path,
mock.sentinel.nimbus_host,
mock.sentinel.nimbus_port, |
mock.sentinel.verbose,
mock.sentinel.jvm_opts,
)
def test__build_storm_cmd_no_port(self, cluster):
cluster.nimbus_host = "test-host"
c | luster.nimbus_port = None
storm_cmd = cluster._build_storm_cmd(["a", "cmd"])
assert storm_cmd == [mock.sentinel.storm_cmd_path, "a", "cmd", "-c",
"nimbus.host=test-host"]
def test__build_storm_cmd_with_port(self, cluster):
cluster.nimbus_host = "test-host"
cluster.nimbus_port = 4321
storm_cmd = cluster._build_storm_cmd(["another", "cmd"])
assert storm_cmd == [mock.sentinel.storm_cmd_path, "another", "cmd", "-c",
"nimbus.host=test-host", "-c",
"nimbus.thrift.port=4321"]
def test_submit(self, cluster):
with mock.patch.object(cluster, '_exec_storm_cmd', autospec=True) as mock_exec:
cluster.submit(mock.sentinel.jar_path)
mock_exec.assert_called_once_with(["jar", mock.sentinel.jar_path, TOPOLOGY_BUILDER_CLASS])
|
#
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated d | ocumentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
#!/usr/bin/python
import socket
import sys
import os
import xml.dom
import xml.dom.minidom
import httplib
import time
def parse_spooler(spooler_node):
answer_node = spooler_node.getElementsByTagName('answer')[0]
ok_node = answer_node.getElementsByTagName('ok')[0]
task_node = ok_node.getElementsByTagName('task')[0]
task_id = task_node.getAttribute('id')
return task_id
def sendJob(filename):
"""
Send a job to the render system, and parse the job number from the reply
@param filename: the name of the xml file with the request data
@return: the job id from the render machine or None if there was an error
@rtype: string
"""
addOrderStart = "<add_order job_chain=\"renderscenechain\"><xml_payload>"
addOrderEnd = "</xml_payload></add_order>"
hostname = "render1"
port = 4446
f = file(filename)
command = addOrderStart + f.read() + addOrderEnd
#create an INET, STREAMing socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((hostname, port))
s.send(command)
reply = ""
while reply.find('</spooler>') == -1:
reply = reply + s.recv(4096)
s.close()
# Right now, the reply contains a null
# do some hackery to remove it
null_index = reply.find('\0')
if null_index != -1:
reply = reply[0:null_index]
print reply
try:
replyDom = xml.dom.minidom.parseString(reply)
spooler_node = replyDom.getElementsByTagName('spooler')[0]
answer_node = spooler_node.getElementsByTagName('answer')[0]
ok_node = answer_node.getElementsByTagName('ok')[0]
task_node = ok_node.getElementsByTagName('order')[0]
task_id = task_node.getAttribute('id')
return task_id
except:
print 'Unable to parse reply:'
print reply
return None
def sendRequests(folder, output_folder):
result_hostname = 'facebook.multiverse.net'
result_port = 8087
result_folder = 'machinima'
files = os.listdir(folder)
tasks = {}
for filename in files:
if filename.endswith('.xml'):
task_id = sendJob(os.path.join(folder, filename))
if task_id is not None:
tasks[task_id] = filename[0:-4] # strip off the .xml
print 'Render job %s submitted with task id %s' % (filename, task_id)
# TODO: Automatically check the status of the postcards, and when they are ready,
# pull them locally
# sleep for 30 seconds, plus 20 seconds per postcard
sleep_time = 30 + 20 * len(tasks)
print 'Sleeping for %d seconds' % sleep_time
time.sleep(sleep_time)
conn = httplib.HTTPConnection(result_hostname, result_port)
conn.connect()
for key, value in tasks.items():
conn.request('GET', '/%s/%s.png' % (result_folder, key))
response = conn.getresponse()
if response.status == 200:
output_file = os.path.join(output_folder, '%s.png' % value)
imgData = response.read()
out = open(output_file, 'w')
out.write(imgData)
out.close()
print 'Wrote image: %s' % output_file
else:
print 'Status = %d' % response.status
print response.reason
conn.close()
source_folder = ''
dest_folder = ''
if len(sys.argv) >= 2:
source_folder = sys.argv[1]
# default to setting dest folder to source folder
dest_folder = sys.argv[1]
if len(sys.argv) >= 3:
dest_folder = sys.argv[2]
# To generate sample poses:
# sendXmlJobs.py sample_poses
# To generate sample postcards:
# sendXmlJobs.py sample_postcards
sendRequests(source_folder, dest_folder)
|
# Ant
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections
import threading
import logging
import Queue
from ant.base.ant import Ant
from ant.base.message import Message
from ant.easy.channel import Channel
from ant.easy.filter import wait_for_event, wait_for_response, wait_for_special
_logger = logging.getLogger("garmin.ant.easy.node")
class Node():
def __init__(self):
self._responses_cond = threading.Condition()
self._responses = collections.deque()
self._event_cond = threading.Condition()
self._events = collections.deque()
self._datas = Queue.Queue()
self.channels = {}
self.ant = Ant()
self._running = True
self._worker_thread = threading.Thread(target=self._worker, name="ant.easy")
self._worker_thread.start()
def new_channel(self, ctype):
channel = Channel(0, self, self.ant)
self.channels[0] = channel
channel._assign(ctype, 0x00)
return channel
def request_message(self, messageId):
_logger.debug("requesting message %#02x", messageId)
self.ant.request_message(0, messageId)
_logger.debug("done requesting message %#02x", messageId)
return self.wait_for_special(messageId)
def set_network_key(self, network, key):
self.ant.set_network_key(network, key)
return self.wait_for_response(Message.ID.SET_NETWORK_KEY)
def wait_for_event(self, ok_codes):
return wait_for_event(ok_codes, self._events, self._event_cond)
def wait_for_response(self, event_id):
return wait_for_response(event_id, self._responses, self._responses_cond)
def wait_for_special(self, event_id):
return wait_for_special(event_id, self._responses, self._responses_cond)
def _worker_response(self, channel, event, data):
self._responses_cond.acquire()
self._responses.append((channel, event, data))
self._responses_cond.notify()
self._responses_cond.release()
def _worker_event(self, channel, event, data):
if event == Message.Code.EVENT_RX_BURST_PACKET:
self._datas.put(('burst', channel, data))
elif event == Message.Code.EVENT_RX_BROADCAST:
self._datas.put(('broadcast', channel, data))
else:
self._event_cond.acquire()
self._events.append((channel, event, data))
self._event_cond.notify()
self._event_cond.release()
def _worker(self):
self.ant.response_function = self._worker_response
self.ant.channel_event_function = self._worker_event
# TODO: check capabilities
self.ant.start()
def _main(self):
while self._running:
try:
(data_type, channel, data) = self._datas.get(True, 1.0)
self._datas.task_done()
if data_type == 'broadcast':
self.channels[channel].on_broadcast_data(data)
elif data_type == 'burst':
self.channels[channel].on_burst_data(data)
else | :
_logger.warning("Unknown data type '%s': %r", data_type, data)
except Queue.Empty as e:
pass
def sta | rt(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.easy")
self._running = False
self.ant.stop()
self._worker_thread.join()
|
://code.google.com/p/sickbeard | /
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in | the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import datetime
import sickbeard
import generic
import cookielib
import urllib
import requests
from sickbeard.bs4_parser import BS4Parser
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import show_name_helpers
from sickbeard import db
from sickbeard import helpers
from unidecode import unidecode
from sickbeard import classes
from sickbeard.helpers import sanitizeSceneName
class XthorProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "Xthor")
self.supportsBacklog = True
self.public = False
self.cj = cookielib.CookieJar()
self.url = "https://xthor.bz"
self.urlsearch = "https://xthor.bz/browse.php?search=\"%s\"%s"
self.categories = "&searchin=title&incldead=0"
self.enabled = False
self.username = None
self.password = None
self.ratio = None
def isEnabled(self):
return self.enabled
def imageName(self):
return 'xthor.png'
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + '.' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + '.' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + '.S%02d' % int(ep_obj.scene_season) # 1) showName.SXX
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
if self.show.air_by_date:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
str(ep_obj.airdate).replace('-', '|')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
"%i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
search_string['Episode'].append(re.sub('\s+', '.', ep_string))
return [search_string]
def _get_title_and_url(self, item):
title, url = item
if title:
title = u'' + title
title = title.replace(' ', '.')
if url:
url = str(url).replace('&', '&')
return (title, url)
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _doLogin(self):
if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {'username': self.username,
'password': self.password,
'submitme': 'X'
}
logger.log('Performing authentication to Xthor', logger.DEBUG)
response = self.getURL(self.url + '/takelogin.php', post_data=login_params, timeout=30)
if not response:
logger.log(u'Unable to connect to ' + self.name + ' provider.', logger.ERROR)
return False
if re.search('donate.php', response):
logger.log(u'Login to ' + self.name + ' was successful.', logger.DEBUG)
return True
else:
logger.log(u'Login to ' + self.name + ' was unsuccessful.', logger.DEBUG)
return False
return True
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
logger.log(u"_doSearch started with ..." + str(search_params), logger.DEBUG)
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
# check for auth
if not self._doLogin():
return results
for mode in search_params.keys():
for search_string in search_params[mode]:
if isinstance(search_string, unicode):
search_string = unidecode(search_string)
searchURL = self.urlsearch % (urllib.quote(search_string), self.categories)
logger.log(u"Search string: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
continue
with BS4Parser(data, features=["html5lib", "permissive"]) as html:
resultsTable = html.find("table", { "class" : "table2 table-bordered2" })
if resultsTable:
rows = resultsTable.findAll("tr")
for row in rows:
link = row.find("a",href=re.compile("details.php"))
if link:
title = link.text
logger.log(u"Xthor title : " + title, logger.DEBUG)
downloadURL = self.url + '/' + row.find("a",href=re.compile("download.php"))['href']
logger.log(u"Xthor download URL : " + downloadURL, logger.DEBUG)
item = title, downloadURL
items[mode].append(item)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return results
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
search_params = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearc |
cense for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.utils.urlmatch.
The tests are mostly inspired by Chromium's:
https://cs.chromium.org/chromium/src/extensions/common/url_pattern_unittest.cc
Currently not tested:
- The match_effective_tld attribute as it doesn't exist yet.
- Nested filesystem:// URLs as we don't have those.
- Unicode matching because QUrl doesn't like those URLs.
- Any other features we don't need, such as .GetAsString() or set operations.
"""
import re
import sys
import string
import pytest
import hypothesis
import hypothesis.strategies as hst
from PyQt5.QtCore import QUrl
from qutebrowser.utils import urlmatch
@pytest.mark.parametrize('pattern, error', [
# Chromium: PARSE_ERROR_MISSING_SCHEME_SEPARATOR
# ("http", "No scheme given"),
("http:", "Invalid port: Port is empty"),
("http:/", "Invalid port: Port is empty"),
("about://", "Pattern without path"),
("http:/bar", "Invalid port: Port is empty"),
# Chromium: PARSE_ERROR_EMPTY_HOST
("http://", "Pattern without host"),
("http:///", "Pattern without host"),
("http:// /", "Pattern without host"),
("http://:1234/", "Pattern without host"),
# Chromium: PARSE_ERROR_EMPTY_PATH
# We deviate from Chromium and allow this for ease of use
# ("http://bar", "..."),
# Chromium: PARSE_ERROR_INVALID_HOST
("http://\0www/", "May not contain NUL byte"),
# Chromium: PARSE_ERROR_INVALID_HOST_WILDCARD
("http://*foo/bar", "Invalid host wildcard"),
("http://foo.*.bar/baz", "Invalid host wildcard"),
("http://fo.*.ba:123/baz", "Invalid host wildcard"),
("http://foo.*/bar", "TLD wildcards are not implemented yet"),
# Chromium: PARSE_ERROR_INVALID_PORT
("http://foo:/", "Invalid port: Port is empty"),
("http://*.foo:/", "Invalid port: Port is empty"),
("http://foo:com/",
"Invalid port: invalid literal for int() with base 10: 'com'"),
pytest.param("http://foo:123456/",
"Invalid port: Port out of range 0-65535",
marks=pytest.mark.skipif(
sys.hexversion < 0x03060000,
reason="Doesn't show an error on Python 3.5")),
("http://foo:80:80/monkey",
"Invalid port: invalid literal for int() with base 10: '80:80'"),
("chrome://foo:1234/bar", "Ports are unsupported with chrome scheme"),
# Additional tests
("http://[", "Invalid IPv6 URL"),
])
def test_invalid_patterns(pattern, error):
with pytest.raises(urlmatch.ParseError, match=re.escape(error)):
urlmatch.UrlPattern(pattern)
@pytest.mark.parametrize('pattern, port', [
("http://foo:1234/", 1234),
("http://foo:1234/bar", 1234),
("http://*.foo:1234/", 1234),
("http://*.foo:1234/bar", 1234),
("http://*:1234/", 1234),
("http://*:*/", None),
("http://foo:*/", None),
("file://foo:1234/bar", None),
# Port-like strings in the path should not trigger a warning.
("http://*/:1234", None),
("http://*.foo/bar:1234", None),
("http://foo/bar:1234/path", None),
# We don't implement ALLOW_WILDCARD_FOR_EFFECTIVE_TLD yet.
# ("http://*.foo.*/:1234", None),
])
def test_port(pattern, port):
up = urlmatch.UrlPattern(pattern)
assert up._port == port
@pytest.mark.parametrize('pattern, path', [
("http://foo/", '/'),
("http://foo/*", None),
])
def test_parse_path(pattern, path):
up = urlmatch.UrlPattern(pattern)
assert up._path == path
@pytest.mark.parametrize('pattern, scheme, host, path', [
("http://example.com", 'http', 'example.com', None), # no path
("example.com/path", None, 'example.com', '/path'), # no scheme
("example.com", None, 'example.com', None), # no scheme and no path
("example.com:1234", None, 'example.com', None), # no scheme/path but port
("data:monkey", 'data', None, 'monkey'), # existing scheme
])
def test_lightweight_patterns(pattern, scheme, host, path):
"""Make sure we can leave off parts of an URL.
This is a deviation from Chromium to make patterns more user-friendly.
"""
up = urlmatch.UrlPattern(pattern)
assert up._scheme == scheme
assert up._host == host
assert up._path == path
class TestMatchAllPagesForGivenScheme:
@pytest.fixture
def up(self):
return urlmatch.UrlPattern("http://*/*")
def test_attrs(self, up):
assert up._scheme == 'http'
assert up._host is None
assert up._match_subdomains
assert not up._match_all
assert up._path is None
@pytest.mark.parametrize('url, expected', [
("http://google.com", True),
("http://yahoo.com", True),
("http://google.com/foo", True),
("https://google.com", False),
("http://74.125.127.100/search", True),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchAllDomains:
@pytest.fixture
def up(self):
return urlmatch.UrlPattern("https://*/foo*")
def test_attrs(self, up):
assert up._scheme == 'https'
assert up._host is None
assert up._match_subdomains
assert not up._match_all
assert up._path == '/foo*'
@pytest.mark.parametrize('url, expected', [
("https://google.com/foo", True),
("https://google.com/foobar", True),
("http://google.com/foo", False),
("https://google.com/", False),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchSubdomains:
@pytest.fixture
def up(self):
return urlmatch.UrlPattern("http://*.google.com/foo*bar")
def test_attrs(self, up):
assert up._scheme == 'http'
assert up._host == 'google.com'
assert up._match_subdomains
assert not up._match_all
assert up._path == '/foo*bar'
@pytest.mark.parametrize('url, expected', [
("http://google.com/foobar", True),
# FIXME The ?bar seems to be treated as path by GURL but as query by
# QUrl.
# ("http://www.google.com/foo?bar", True),
("http://monkey.images.google.com/foooobar", True),
("http://yahoo.com/foobar", False),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchGlobEscaping:
@pytest.fixture
def up(self):
return urlmatch.UrlPattern(r"file:///foo-bar\*baz")
def test_attrs(self, up):
assert up._scheme == 'file'
assert up._host is None
assert not up._ | match_subdomains
assert not up._match_all
assert up._path == r'/foo-bar\*baz'
@pytest.mark.parametrize('url, expected', [
# We use - instead of ? so it doesn't get treated as query
(r"file:///foo-bar\hellobaz", True),
(r"file:///fooXbar\hell | obaz", False),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchIpAddresses:
@pytest.mark.parametrize('pattern, host, match_subdomains', [
("http://127.0.0.1/*", "127.0.0.1", False),
("http://*.0.0.1/*", "0.0.1", True),
])
def test_attrs(self, pattern, host, match_subdomains):
up = urlmatch.UrlPattern(pattern)
assert up._scheme == 'http'
assert up._host == host
assert up._match_subdomains == match_subdomains
assert not up._match_all
assert up._path is None
@pytest.mark.parametrize('pattern, expected', [
("http://127.0.0.1/*", True),
# No subdomain matching is done with IPs
("http://*.0.0.1/*", False),
])
def test_urls(self, pattern, expected):
up = urlmatch.UrlPattern(pattern)
assert up.matches(QUrl("http://127.0.0.1")) == expected
class TestMatchChromeUrls:
@pytest.fixture
def up(self):
return urlmatch.UrlPattern("chrome://favicon/*")
def test_attrs(self, up):
assert up._scheme == 'chrome'
assert up._host == 'favicon'
assert not up._ |
"""
PASSIVE Plugin for Tes | ting_for_SSL-TLS_(OWASP-CM-001)
"""
from framework.dependency_management.dependency_resolver import ServiceLocator
DESCRIPTION = "Third party resources"
def run(PluginInfo):
# Vuln search box to be built in core and resued in different plugins:
resource = ServiceLocator.get_component("resource").GetResources('PassiveSSL')
Content = ServiceLocator.get_component("plugin_helper").ResourceLinkList('Online Resources', resource)
return Conte | nt
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Publi | c License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU | General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Countries_Greece():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
countries=["Greece"])) |
# -*- coding: utf-8 -*-
# vim: set ts=4 et
import cgi
import requests
from six.moves.html_parser import HTMLParser
from plugin import *
content_types = (
'text/html',
'text/xml',
'application/xhtml+xml',
'application/xml'
)
class TitleParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.match = False
self.title = ''
def handle_starttag(self, tag, attrs):
if tag == 'meta':
og_title = False
for attr in attrs:
if attr == ('property', 'og:title'):
og_title = True
if og_title:
for attr in attrs:
if attr[0] == 'content':
self.title = attr[1]
self.match = True if not self.title and tag == 'title' else False
def handle_data(self, data):
if self.match:
self.title = data.strip()
self.match = False
class Plugin(BasePlugin):
default_priority = 1
@hook
def any_url(self, msg, domain, url):
default_ua = 'Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; " \
"compatible; Googlebot/2.1; +http://www.google.com/bot.html) " \
"Safari/537.36'
user_agent = self.bot.config.get(self.name, 'user-agent', fallback=default_ua)
headers = {
'User-Agent': user_agent
}
try:
r = requests.get(url, stream=True, headers=headers, time | out=10)
except requests.exceptions.ReadTimeout:
msg.reply('URL Timeout')
return
content_type, params = cgi.parse_header(r.headers['Content-Type'])
if not content_type in content_types:
return
r.encoding = 'utf-8'
if 'charset' in params:
r.encoding = params['charset'].strip("'\"")
parser | = TitleParser()
for line in r.iter_lines(chunk_size=1024, decode_unicode=True):
parser.feed(line)
if parser.title:
break
msg.reply('\x031,0URL\x03 %s' % parser.title)
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS | IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Determine which implementation of the protobuf API is used in this process.
"""
import os
import sys
try:
# pylint: disable=g-import-not-at-top
from google.protobuf.internal import _api_implementation
# The compile-time constants in the _api_implementation module can be used to
# switch to a certain implementation of the Python API at build time.
_api_version = _api_implementation.api_version
_proto_extension_modules_exist_in_build = True
except ImportError:
_api_version = -1 # Unspecified by compiler flags.
_proto_extension_modules_exist_in_build = False
if _api_version == 1:
raise ValueError('api_version=1 is no longer supported.')
if _api_version < 0: # Still unspecified?
try:
# The presence of this module in a build allows the proto implementation to
# be upgraded merely via build deps rather than a compiler flag or the
# runtime environment variable.
# pylint: disable=g-import-not-at-top
from google.protobuf import _use_fast_cpp_protos
# Work around a known issue in the classic bootstrap .par import hook.
if not _use_fast_cpp_protos:
raise ImportError('_use_fast_cpp_protos import succeeded but was None')
del _use_fast_cpp_protos
_api_version = 2
except ImportError:
if _proto_extension_modules_exist_in_build:
if sys.version_info[0] >= 3: # Python 3 defaults to C++ impl v2.
_api_version = 2
# TODO(b/17427486): Make Python 2 default to C++ impl v2.
_default_implementation_type = (
'python' if _api_version <= 0 else 'cpp')
# This environment variable can be used to switch to a certain implementation
# of the Python API, overriding the compile-time constants in the
# _api_implementation module. Right now only 'python' and 'cpp' are valid
# values. Any other value will be ignored.
_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION',
_default_implementation_type)
if _implementation_type != 'python':
_implementation_type = 'cpp'
# This environment variable can be used to switch between the two
# 'cpp' implementations, overriding the compile-time constants in the
# _api_implementation module. Right now only 1 and 2 are valid values. Any other
# value will be ignored.
_implementation_version_str = os.getenv(
'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION', '2')
if _implementation_version_str != '2':
raise ValueError(
'unsupported PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION: "' +
_implementation_version_str + '" (supported versions: 2)'
)
_implementation_version = int(_implementation_version_str)
# Usage of this function is discouraged. Clients shouldn't care which
# implementation of the API is in use. Note that there is no guarantee
# that differences between APIs will be maintained.
# Please don't use this function if possible.
def Type():
return _implementation_type
# See comment on 'Type' above.
def Version():
return _implementation_version
|
ENTITY_CONDITIONS
from homeassistant.const import STATE_ON, STATE_OFF, CONF_PLATFORM
from homeassistant.setup import async_setup_component
import homeassistant.components.automation as automation
from homeassistant.helpers import device_registry
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_mock_service,
mock_device_registry,
mock_registry,
async_get_device_automations,
async_get_device_automation_capabilities,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock serivce."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a binary_sensor."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for device_class in DEVICE_CLASSES:
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES[device_class].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": condition["type"],
"device_id": device_entry.id,
"entity_id": platform.ENTITIES[device_class].entity_id,
}
for device_class in DEVICE_CLASSES
for condition in ENTITY_CONDITIONS[device_class]
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert conditions == expected_conditions
async def test_get_condition_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a binary_sensor condition."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
assert capabilities == expected_capabilities
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_bat_low",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_not_bat_low",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_on event - test_event1"
hass.states.async_set(sensor1.entity_id, STATE_OFF)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_off event - test_event2"
async def test_if_fires_on_for_condition(hass, calls):
"""Test for firing if condition is on with delay."""
point1 = dt_util.utcnow()
point2 = point1 + timedelta(seconds=10)
point3 = point2 + timedelta(seconds=10)
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = point1
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": {
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_not_bat_low",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "is_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
("platform", "event.event_type")
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await h | ass.async_block_till_done()
assert len(calls) == 0
# Time travel 10 secs into the future
mock_utcnow.return_value = point2
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, STATE_OFF)
hass.bus.async_fire("test_event1")
await hass.asy | nc_block_till_done()
assert len(calls) == 0
# Time travel 20 secs into the future
mock_utcnow.ret |
from PyQt4.QtGui import QFileDialog
def get_pcv_filename():
"""Opens the P | CV file with a QFileDialog."""
return QFileDialog.getOpenFileName(None,
"Open Pcoords graph", "",
"Pcoords Files (*.pgdl * | .pcv)")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.