prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
from __future__ import unicode_literals
from django.forms import MediaDefiningClass, Media
from django.forms.utils import flatatt
from django.utils.text import slugify
from django.utils.safestring import mark_safe
from django.utils.six import text_type
from django.utils.six import with_metaclass
from wagtail.utils.compat import render_to_string
from wagtail.wagtailcore import hooks
class MenuItem(with_metaclass(MediaDefiningClass)):
template = 'wagtailadmin/shared/menu_item.html'
def __init__(self, label, url, name=None, classnames='', attrs=None, order=1000):
self.label = label
self.url = url
self.classnames = classnames
self.name = (name or slugify(text_type(label)))
self.order = order
if attrs:
self.attr_string = flatatt(attrs)
else:
self.attr_string = ""
def is_shown(self, request):
"""
Whether this menu item should be shown for the given request; permission
checks etc should go here. By default, menu items are shown all the time
"""
return True
def is_active(self, request):
return request.path.startswith(self.url)
def render_html(self, request):
return render_to_string(self.template, {
'name': self.name,
'url': self.url,
'classnames': self.classnames,
'attr_string': self.attr_string,
'label': self.label,
'active': self.is_active(request)
}, request=request)
class Menu(object):
def __init__(self, register_hook_name, construct_hook_na | me=None):
self.register_hook_name = register_hook_name
self.construct_hook_name = construct_hook | _name
# _registered_menu_items will be populated on first access to the
# registered_menu_items property. We can't populate it in __init__ because
# we can't rely on all hooks modules to have been imported at the point that
# we create the admin_menu and settings_menu instances
self._registered_menu_items = None
@property
def registered_menu_items(self):
if self._registered_menu_items is None:
self._registered_menu_items = [fn() for fn in hooks.get_hooks(self.register_hook_name)]
return self._registered_menu_items
def menu_items_for_request(self, request):
return [item for item in self.registered_menu_items if item.is_shown(request)]
def active_menu_items(self, request):
return [item for item in self.menu_items_for_request(request) if item.is_active(request)]
@property
def media(self):
media = Media()
for item in self.registered_menu_items:
media += item.media
return media
def render_html(self, request):
menu_items = self.menu_items_for_request(request)
# provide a hook for modifying the menu, if construct_hook_name has been set
if self.construct_hook_name:
for fn in hooks.get_hooks(self.construct_hook_name):
fn(request, menu_items)
rendered_menu_items = []
for item in sorted(menu_items, key=lambda i: i.order):
try:
rendered_menu_items.append(item.render_html(request))
except TypeError:
# fallback for older render_html methods that don't accept a request arg
rendered_menu_items.append(item.render_html(request))
return mark_safe(''.join(rendered_menu_items))
class SubmenuMenuItem(MenuItem):
template = 'wagtailadmin/shared/menu_submenu_item.html'
"""A MenuItem which wraps an inner Menu object"""
def __init__(self, label, menu, **kwargs):
self.menu = menu
super(SubmenuMenuItem, self).__init__(label, '#', **kwargs)
@property
def media(self):
return Media(js=['wagtailadmin/js/submenu.js']) + self.menu.media
def is_shown(self, request):
# show the submenu if one or more of its children is shown
return bool(self.menu.menu_items_for_request(request))
def is_active(self, request):
return bool(self.menu.active_menu_items(request))
def render_html(self, request):
return render_to_string(self.template, {
'name': self.name,
'url': self.url,
'classnames': self.classnames,
'attr_string': self.attr_string,
'menu_html': self.menu.render_html(request),
'label': self.label,
'request': request,
'active': self.is_active(request)
}, request=request)
admin_menu = Menu(register_hook_name='register_admin_menu_item', construct_hook_name='construct_main_menu')
settings_menu = Menu(register_hook_name='register_settings_menu_item')
|
from django.db import models
class ThingItem(object):
def __init__(self, value, display):
self.value = value
self.display = display
def __iter__(self):
return ( | x for x in [self.value, self.display])
def __len__(self):
return 2
class Things(object):
def __iter__(self):
return (x for x in [ThingItem(1, 2), ThingItem(3, 4)])
class ThingWithIterableChoices(models.Model):
# Testing | choices= Iterable of Iterables
# See: https://code.djangoproject.com/ticket/20430
thing = models.CharField(max_length=100, blank=True, choices=Things())
|
------------End of Important Marks-------------------------
#Library import
import subprocess
import socket
import os
import sys
import time
import random
import threading
import json
import Queue
import ipaddress
import resource
from jsoncomment import JsonComment
#Custom import
from SonarPulse import Pulse, PulseTarget
#-------------------------------------------------------------------
# Variables and Setting
#
#Error Exit Value
ERR = 1
Revision = "$Revision: 18 $"
# Define Default Configuration File
# Note that avoiding to use point symbol(meaning current directory in this context) in file
# path to assure the portability(we found Python scripts called by NEPI do not recognize this
# symbol)
# Refer to https://infohost.nmt.edu/tcc/help/pubs/python/web/new-str-format.html to
# get more information about Python string format's usage.
CURRENTDIR = os.path.dirname(os.path.realpath(__file__))+'/' # for example : /Users/qipengsong/Documents/First_LISP_measurement
ConfigFile = '{0}LISP-Sonar-Config.json'.format(CURRENTDIR)
#-------------------------------------------------------------------
# SubRoutines
#
######
# Logs Directory & Files Verification
#
def BootstrapFilesCheck(TimeStamp):
#Check if the root log directory exists, if not create it.
itexists = os.path.isdir(LogRootDirectory)
if itexists == False :
try:
os.makedirs(LogRootDirectory)
except os.error:
print '=====> Critical Error: Creating ' + LogRootDirectory
sys.exit(ERR)
print '\tRoot Log Dir. [Created]\t: ' + LogRootDirectory
else:
print '\tRoot Log Dir. [Found]\t: ' + LogRootDirectory
#Get Date to check/create date-based directory tree
rundate = time.gmtime(TimeStamp)
DateDirectory = str(rundate.tm_year) + '/' + str(rundate.tm_mon) + '/' + str(rundate.tm_mday) +'/'
#Check if the date-based sub-directory exists, if not create it.
itexists = os.path.isdir(LogRootDirectory + DateDirectory)
if itexists == False :
try:
os.makedirs(LogRootDirectory + DateDirectory)
except os.error:
print '=====> Critical Error: Creating ' + LogRootDirectory + DateDirectory
sys.exit(ERR)
print '\tDate Directory [Created]: ' + LogRootDirectory + DateDirectory
else:
print '\tDate Directory [Found]\t: ' + LogRootDirectory + DateDirectory
return LogRootDirectory + DateDirectory
######
# Read a list from file shuffle the order and return it
#
def LoadList(FILE):
try:
F = open( FILE, "r" )
except IOError:
print '=====> Critical Error:' + FILE + ' Not Found!!!'
sys.exit(ERR)
LLIST = F.read().split('\n')
F.close()
if LLIST.count('') > 0:
#If closing empty line exists remove it
LLIST.remove('')
# Randomize List so to not follow the same order at each experiment
random.shuffle(LLIST)
return LLIST
######
# Pulse Thread Class
#
class SonarThread (threading.Thread):
def __init__(self, threadID, tname, p | rqueue):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = tname
self.prqueue = prqueue
def run(self):
while True:
item = self.prqueue.get()
if item is None:
break # End Loop and finish thread
#print 'Thread ' + self.name + ' Working on: ' + str(item.eid) + '\n'
Evalue = Pulse(item)
if not (Evalue is None):
print '\tError \t(!)\t\t: ' + st | r(Evalue)
print >> sys.stderr, 'LISP-Sonar Error: ' + str(Evalue)
#-------------------------------------------------------------------
# Main
#
TimeStamp = int(time.time())
print 'LISP-Sonar \t\t\t: ' + Revision
print '\tRun \t\t\t: '+ time.strftime("%d.%m.%Y %H:%M:%S")
# Identify Machine and Date to Mark Logs
HOST = socket.gethostname()
print '\tHost Name \t\t: ' + HOST
# Read Configuration File
if (len(sys.argv) > 2):
print '=====> Exiting! Too many arguments... \n'
sys.exit(ERR)
if (len(sys.argv) == 2):
#Always take the first argument as configuration file
ConfigFile = str(sys.argv[1])
try:
JsonFile = open(ConfigFile)
except:
print '=====> Exiting! Error opening configuration file: '+ConfigFile+'\n'
sys.exit(ERR)
Cfg = json.load(JsonFile)
JsonFile.close()
try:
# Remember to replace "CURRENTDIR" with real current directory path
# for example, for item "DirsConfig"
# "DirsConfig":
# {
# "LogRootDirectory":"CURRENTDIR/SonarOutput/",
# "MRListDirectory":"CURRENTDIR",
# "MRListFile":"MR-Current-List.txt",
# "EIDListDirectory":"CURRENTDIR",
# "EIDListFile":"EID-Current-List.txt"
# },
# Replace "CURRENTDIR" with variable CURRENTDIR defined at the beginning
LogRootDirectory = Cfg["DirsConfig"]["LogRootDirectory"].replace("$CURRENTDIR", CURRENTDIR)
MRListDirectory = Cfg["DirsConfig"]["MRListDirectory"].replace("$CURRENTDIR", CURRENTDIR)
MRListFile = Cfg["DirsConfig"]["MRListFile"]
EIDListDirectory = Cfg["DirsConfig"]["EIDListDirectory"].replace("$CURRENTDIR", CURRENTDIR)
EIDListFile = Cfg["DirsConfig"]["EIDListFile"]
SpawnTimeGap = Cfg["ThreadSpawn"]["TimeGap"]
SpawnRandomization = Cfg["ThreadSpawn"]["Randomization"]
SpawnMaxThreads = Cfg["ThreadSpawn"]["MaxThreads"]
LIGRequestTimeOut = Cfg["Lig"]["TimeOut"]
LIGMaxRetries = Cfg["Lig"]["MaxTries"]
LIGSrcAddr = Cfg["Lig"]["SourceAddress"]
except KeyError:
print '=====> Exiting! Configuration Error for '+str(sys.exc_value)+' in file '+ConfigFile+'\n'
sys.exit(ERR)
# Final directory where results of this instance will be written
InstanceDirectory = BootstrapFilesCheck(TimeStamp)
#Load and shuffle list of Map-Resolvers
MRList = LoadList(MRListDirectory + MRListFile)
print '\tMR List File \t\t: ' + MRListDirectory + MRListFile
print '\tMR Loaded \t\t: ' + str(len(MRList))
#Load and shuffle list of EID to lookup
EIDList = LoadList(EIDListDirectory + EIDListFile)
print '\tEID List File \t\t: ' + EIDListDirectory + EIDListFile
print '\tEID Loaded \t\t: ' + str(len(EIDList))
# CHeck Valid Source Address
if (LIGSrcAddr != "None"):
try:
LIGSrcIP = ipaddress.ip_address(LIGSrcAddr)
except ValueError:
print 'Not Valid Source Address: ' + LIGSrcAddr
sys.exit(ERR)
else:
LIGSrcIP = None
print '\tQuery Source Address \t: ' + str(LIGSrcIP)
# Spawn sonar threads
threads = []
threadID = 1
resource.setrlimit(resource.RLIMIT_NOFILE,(SpawnMaxThreads*4+256, resource.getrlimit(resource.RLIMIT_NOFILE)[1]))
PulseRequestQueue = Queue.Queue(SpawnMaxThreads)
for t in range(SpawnMaxThreads):
# Create the pool of threads
tName = 'Sonar Thread ' + `threadID`
thread = SonarThread(threadID, tName, PulseRequestQueue)
thread.start()
threads.append(thread)
threadID += 1
print '\tThreads [Now Working]\t: ' + str(SpawnMaxThreads) + ' [' + str(SpawnTimeGap) + ' +/- ' + str(SpawnRandomization) + ']'
for EID in EIDList:
for MR in MRList:
# Validate Addresses
try:
EIDIP = ipaddress.ip_address(EID)
except ValueError:
print 'Not Valid EID address: ' + str(EID)
print >> sys.stderr, 'Not Valid EID address: ' + str(EID)
continue
try:
MRIP = ipaddress.ip_address(MR)
except ValueError:
print 'Not Valid MR address: ' + str(MR)
print >> sys.stderr, 'Not Valid MR address: ' + str(MR)
continue
# Put Metadata for Pulse Request in the queue only if
# LIGSrcIP and MR are in the same family.
if (LIGSrcIP and (LIGSrcIP.version != MRIP.version)):
continue
Target = PulseTarget(HOST, TimeStamp, EIDIP, MRIP, InstanceDirectory, LIGRequestTimeOut, LIGMaxRetries, LIGSrcIP)
PulseRequestQueue.put(Target)
# Let's put some more randomization just avoiding threads to trigger
# requests at the same time
time.sleep(SpawnTimeGap + random.uniform(-SpawnRandomization, SpawnRandomization))
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import warnings
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ha_to_eV, bohr_to_ang
from pymatgen.io.abinit.abiobjects import *
from pymatgen.util.testing import PymatgenTest
class LatticeFromAbivarsTest(PymatgenTest):
def test_rprim_acell(self):
l1 = lattice_from_abivars(acell=3 * [10], rprim=np.eye(3))
self.assertAlmostEqual(l1.volume, bohr_to_ang ** 3 * 1000)
assert l1.angles == (90, 90, 90)
l2 = lattice_from_abivars(acell=3 * [10], angdeg=(90, 90, 90))
assert l1 == l2
l2 = lattice_from_abivars(acell=3 * [8], angdeg=(60, 60, 60))
abi_rprimd = (
np.reshape(
[
4.6188022,
0.0000000,
6.5319726,
-2.3094011,
4.0000000,
6.5319726,
-2.3094011,
-4.0000000,
6.5319726,
],
(3, 3),
)
* bohr_to_ang
)
self.assertArrayAlmostEqual(l2.matrix, abi_rprimd)
l3 = lattice_from_abivars(acell=[3, 6, 9], angdeg=(30, 40, 50))
abi_rprimd = (
np.reshape(
[
3.0000000,
0.0000000,
0.0000000,
3.8567257,
4.5962667,
0.0000000,
6.8944000,
4.3895544,
3.7681642,
],
(3, 3),
)
* bohr_to_ang
)
self.assertArrayAlmostEqual(l3.matrix, abi_rprimd)
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(90, 90, 90), rprim=np.eye(3))
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(-90, 90, 90))
def test_znucl_typat(self):
"""Test the order of typat and znucl in the Abinit input and enforce_typat, enforce_znucl."""
# Ga Ga1 1 0.33333333333333 0.666666666666667 0.500880 1.0
# Ga Ga2 1 0.66666666666667 0.333333333333333 0.000880 1.0
# N N3 1 0.333333333333333 0.666666666666667 0.124120 1.0
# N N4 1 0.666666666666667 0.333333333333333 0.624120 1.0
gan = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "abinit", "gan.cif"))
# By default, znucl is filled using the first new type found in sites.
def_vars = structure_to_abivars(gan)
def_znucl = def_vars["znucl"]
self.assertArrayEqual(def_znucl, [31, 7])
def_typat = def_vars["typat"]
self.assertArrayEqual(def_typat, [1, 1, 2, 2])
# But it's possible to enforce a particular value of typat and znucl.
enforce_znucl = [7, 31]
enforce_typat = [2, 2, 1, 1]
enf_vars = structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=enforce_typat)
self.assertArrayEqual(enf_vars["znucl"], enforce_znucl)
self.assertArrayEqual(enf_vars["typat"], enforce_typat)
self.assertArrayEqual(def_vars["xred"], enf_vars["xred"])
assert [s.symbol for s in species_by_znucl(gan)] == ["Ga", "N"]
for itype1, itype2 in zip(def_typat, enforce_typat):
assert def_znucl[itype1 - 1] == enforce_znucl[itype2 - 1]
with self.assertRaises(Exception):
structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=None)
class SpinModeTest(PymatgenTest):
def test_base(self):
polarized = SpinMode.as_spinmode("polarized")
other_polarized = SpinMode.as_spinmode("polarized")
unpolarized = SpinMode.as_spinmode("unpolarized")
polarized.to_abivars()
self.assertTrue(polarized is other_polarized)
self.assertTrue(polarized == other_polarized)
self.assertTrue(polarized != unpolarized)
# Test pickle
self.serialize_with_pickle(polarized)
# Test dict methods
self.assertMSONable(polarized)
self.assertMSONable(unpolarized)
class SmearingTest(PymatgenTest):
def test_base(self):
fd1ev = Smearing.as_smearing("fermi_dirac:1 eV")
fd1ev.to_abivars()
self.assertTrue(fd1ev)
same_fd = Smearing.as_smearing("fermi_dirac:" + str(1.0 / Ha_to_eV))
self.assertTrue(same_fd == fd1ev)
nosmear = Smearing.nosmearing()
assert nosmear == Smearing.as_smearing("nosmearing")
self.assertFalse(nosmear)
self.assertTrue(nosmear != fd1ev)
self.assertMSONable(nosmear)
new_fd1ev = Smearing.from_dict(fd1ev.as_dict())
self.assertTrue(new_fd1ev == fd1ev)
# Test pickle
self.serialize_with_pickle(fd1ev)
# Test dict methods
self.assertMSONable(fd1ev)
class ElectronsAlgorithmTest(PymatgenTest):
def test_base(self):
algo = ElectronsAlgorithm(nstep=70)
abivars = algo.to_abivars()
# Test pickle
self.serialize_with_pickle(algo)
# Test dict methods
self.assertMSONable(algo)
|
class ElectronsTest(PymatgenTest):
def test_base(self):
default_electrons = Electrons() |
self.assertTrue(default_electrons.nsppol == 2)
self.assertTrue(default_electrons.nspinor == 1)
self.assertTrue(default_electrons.nspden == 2)
abivars = default_electrons.to_abivars()
# new = Electron.from_dict(default_electrons.as_dict())
# Test pickle
self.serialize_with_pickle(default_electrons, test_eq=False)
custom_electrons = Electrons(
spin_mode="unpolarized",
smearing="marzari4:0.2 eV",
algorithm=ElectronsAlgorithm(nstep=70),
nband=10,
charge=1.0,
comment="Test comment",
)
# Test dict methods
self.assertMSONable(custom_electrons)
class KSamplingTest(PymatgenTest):
def test_base(self):
monkhorst = KSampling.monkhorst((3, 3, 3), (0.5, 0.5, 0.5), 0, False, False)
gamma_centered = KSampling.gamma_centered((3, 3, 3), False, False)
monkhorst.to_abivars()
# Test dict methods
self.assertMSONable(monkhorst)
self.assertMSONable(gamma_centered)
class RelaxationTest(PymatgenTest):
def test_base(self):
atoms_and_cell = RelaxationMethod.atoms_and_cell()
atoms_only = RelaxationMethod.atoms_only()
atoms_and_cell.to_abivars()
# Test dict methods
self.assertMSONable(atoms_and_cell)
self.assertMSONable(atoms_only)
class PPModelTest(PymatgenTest):
def test_base(self):
godby = PPModel.as_ppmodel("godby:12 eV")
# print(godby)
# print(repr(godby))
godby.to_abivars()
self.assertTrue(godby)
same_godby = PPModel.as_ppmodel("godby:" + str(12.0 / Ha_to_eV))
self.assertTrue(same_godby == godby)
noppm = PPModel.get_noppmodel()
self.assertFalse(noppm)
self.assertTrue(noppm != godby)
new_godby = PPModel.from_dict(godby.as_dict())
self.assertTrue(new_godby == godby)
# Test pickle
self.serialize_with_pickle(godby)
# Test dict methods
self.assertMSONable(godby)
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Point of Sale',
'version': '1.0.1',
'category': 'Point Of Sale',
'sequence': 20,
'summary': 'Touchscreen Interface for Shops',
'description': """
Quick and Easy sale process
===========================
This module allows you to manage your shop sales very easily with a fully web based touchscreen interface.
It is compatible with all PC tablets and the iPad, offering multiple payment methods.
Product selection can be done in several ways:
* Using a barcode reader
* Browsing through categories of products or via a text search.
Main Features
-------------
* Fast encoding of the sale
* Choose one payment method (the quick way) or split the payment between several payment methods
* Computation of the amount of money to return
* Create and confirm the picking list automatically
* Allows the user to create an invoice automatically
* Refund previous sales
""",
'depends': ['stock_account', 'barcodes'],
'data': [
'security/point_of_sale_security.xml',
'security/ir.model.access.csv',
'data/default_barcode_patterns.xml',
'wizard/pos_box.xml',
'wizard/pos_details.xml',
'wizard/pos_discount.xml',
'wizard/pos_open_statement.xml',
'wizard/pos_payment.xml',
'views/pos_templates.xml',
'views/point_of_sale | _template.xml',
'views/point_of_sale_report.xml',
'views/point_of_sale_view.xml',
'views/pos_order_view.xml',
'views/product_view.xml',
'view | s/pos_category_view.xml',
'views/account_journal_view.xml',
'views/pos_config_view.xml',
'views/pos_session_view.xml',
'views/point_of_sale_sequence.xml',
'data/point_of_sale_data.xml',
'views/pos_order_report_view.xml',
'views/account_statement_view.xml',
'views/account_statement_report.xml',
'views/res_users_view.xml',
'views/res_partner_view.xml',
'views/res_config_view.xml',
'views/report_statement.xml',
'views/report_userlabel.xml',
'views/report_saledetails.xml',
'views/point_of_sale.xml',
'views/point_of_sale_dashboard.xml',
],
'demo': [
'data/point_of_sale_demo.xml',
],
'installable': True,
'application': True,
'qweb': ['static/src/xml/pos.xml'],
'website': 'https://www.odoo.com/page/point-of-sale',
}
|
dFound import settings
from codex.baseview import BaseView
from wechat.models import Lost, Found, User
__author__ = "Epsirom"
class WeChatHandler(object):
logger = logging.getLogger('WeChat')
def __init__(self, view, msg, user):
"""
:type view: WeChatView
:type msg: dict
:type user: User or None
"""
self.input = msg
self.user = user
self.view = view
def check(self):
raise NotImplementedError('You should implement check() in sub-class of WeChatHandler')
def handle(self):
raise NotImplementedError('You should implement handle() in sub-class of WeChatHandler')
def get_context(self, **extras):
return dict(
FromUserName=self.input['ToUserName'],
ToUserName=self.input['FromUserName'],
**extras
)
def reply_text(self, content):
return get_template('text.xml').render(self.get_context(
Content=content
))
def reply_news(self, articles):
if len(articles) > 10:
self.logger.warn('Reply with %d articles, keep only 10', len(articles))
return get_template('news.xml').render(self.get_context(
Articles=articles[:10]
))
def reply_single_news(self, article):
return self.reply_news([article])
def get_message(self, name, **data):
if name.endswith('.html'):
name = name[: -5]
return get_template('messages/' + name + '.html').render(dict(
handler=self, user=self.user, **data
))
def is_msg_type(self, check_type):
return self.input['MsgType'] == check_type
def is_text(self, *texts):
return self.is_msg_type('text') and (self.input['Content'].lower() in texts)
def is_event_click(self, *event_keys):
return self.is_msg_type('event') and (self.input['Event'] == 'CLICK') and (self.input['EventKey'] in event_keys)
def is_event(self, *events):
return self.is_msg_type('event') and (self.input['Event'] in events)
def is_text_command(self, *commands):
return self.is_msg_type('text') and ((self.input['Content'].split() or [None])[0] in commands)
def url_help(self):
return settings.get_url('u/help')
def url_lost_list(self):
return settings.get_url('u/lost/list', {'user': self.user.open_id})
def url_lost_new(self):
return settings.get_url('u/lost/new', {'user': self.user.open | _id})
def url_found_list(self):
return settings.get_url('u/found/list', {'user': self.user.open_id})
def url_mine(self):
return settings.get_url('u/mine',{'user':self.user.open_id})
class WeChatEmptyHandler(WeChatHandler):
def check(self):
return True
def handle(self):
retu | rn self.reply_text('The server is busy')
class WeChatError(Exception):
def __init__(self, errcode, errmsg, *args, **kwargs):
super(WeChatError, self).__init__(errmsg, *args, **kwargs)
self.errcode = errcode
self.errmsg = errmsg
def __repr__(self):
return '[errcode=%d] %s' % (self.errcode, self.errmsg)
class Sign:
def __init__(self, jsapi_ticket, url):
self.ret = {
'jsapi_ticket': jsapi_ticket,
'nonceStr': self.__create_nonce_str(),
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)])
print(string)
self.ret['signature'] = hashlib.sha1(string.encode('utf-8')).hexdigest()
return self.ret
class WeChatLib(object):
logger = logging.getLogger('wechatlib')
access_token = ''
access_token_expire = datetime.datetime.fromtimestamp(0)
jsapi_ticket = ''
jsapi_ticket_expire = datetime.datetime.fromtimestamp(0)
token = WECHAT_TOKEN
appid = WECHAT_APPID
secret = WECHAT_SECRET
def __init__(self, token, appid, secret):
super(WeChatLib, self).__init__()
self.token = token
self.appid = appid
self.secret = secret
def check_signature(self, signature, timestamp, nonce):
tmp_list = sorted([self.token, timestamp, nonce])
tmpstr = hashlib.sha1(''.join(tmp_list).encode('utf-8')).hexdigest()
return tmpstr == signature
@classmethod
def _http_get(cls, url):
req = urllib.request.Request(url=url)
res_data = urllib.request.urlopen(req)
res = res_data.read()
return res.decode()
@classmethod
def _http_post(cls, url, data):
req = urllib.request.Request(
url=url, data=data if isinstance(data, bytes) else data.encode()
)
res_data = urllib.request.urlopen(req)
res = res_data.read()
return res.decode()
@classmethod
def _http_post_dict(cls, url, data):
return cls._http_post(url, json.dumps(data, ensure_ascii=False))
@classmethod
def get_wechat_access_token(cls):
if datetime.datetime.now() >= cls.access_token_expire:
print("appid=%s secret=%s" %(cls.appid, cls.secret))
res = cls._http_get(
'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s' % (
cls.appid, cls.secret
)
)
rjson = json.loads(res)
if rjson.get('errcode'):
raise WeChatError(rjson['errcode'], rjson['errmsg'])
cls.access_token = rjson['access_token']
cls.access_token_expire = datetime.datetime.now() + datetime.timedelta(seconds=rjson['expires_in'] - 300)
cls.logger.info('Got access token %s', cls.access_token)
return cls.access_token
@classmethod
def get_wechat_jsapi_ticket(cls):
if datetime.datetime.now() >= cls.jsapi_ticket_expire:
at = cls.get_wechat_access_token()
print("access token=%s" %(at))
res = cls._http_get(
'https://api.weixin.qq.com/cgi-bin/ticket/getticket?access_token=%s&type=jsapi' % (at)
)
rjson = json.loads(res)
if rjson.get('errcode'):
raise WeChatError(rjson['errcode'], rjson['errmsg'])
cls.jsapi_ticket = rjson['ticket']
cls.jsapi_ticket_expire = datetime.datetime.now() + datetime.timedelta(seconds=rjson['expires_in'] - 300)
cls.logger.info('Got jsapi ticket %s', cls.jsapi_ticket)
return cls.jsapi_ticket
@classmethod
def get_wechat_wx_config(cls, url):
sign = Sign(cls.get_wechat_jsapi_ticket(), url)
config = sign.sign()
wx_config = {
'appId': settings.WECHAT_APPID,
'timestamp': config['timestamp'],
'nonceStr': config['nonceStr'],
'signature': config['signature']
}
return wx_config
def get_wechat_menu(self):
res = self._http_get(
'https://api.weixin.qq.com/cgi-bin/menu/get?access_token=%s' % (
self.get_wechat_access_token()
)
)
rjson = json.loads(res)
return rjson.get('menu', {}).get('button', [])
def set_wechat_menu(self, data):
res = self._http_post_dict(
'https://api.weixin.qq.com/cgi-bin/menu/create?access_token=%s' % (
self.get_wechat_access_token()
), data
)
rjson = json.loads(res)
if rjson.get('errcode'):
raise WeChatError(rjson['errcode'], rjson['errmsg'])
class WeChatView(BaseView):
logger = logging.getLogger('WeChat')
lib = WeChatLib('', '', '')
handlers = []
error_message_handler = WeChatEmptyHandler
default_handler = WeChatEmptyHandler
def _check_signature(self):
query = self.request.GET
return self.lib.check_signature(query['signature'], query['timestamp'], query['no |
from yowsup.layers import YowLayer, YowLayerEvent, YowProtocolLayer
from .protocolentities import *
class YowIbProtocolLayer(YowProtocolLayer):
def __ini | t__(self):
handleMap = {
"ib": (self.recvIb, self.sendIb),
"iq": (None, self.sendIb)
}
super(YowIbProtocolLayer, self).__init__(handle | Map)
def __str__(self):
return "Ib Layer"
def sendIb(self, entity):
if entity.__class__ == CleanIqProtocolEntity:
self.toLower(entity.toProtocolTreeNode())
def recvIb(self, node):
if node.getChild("dirty"):
self.toUpper(DirtyIbProtocolEntity.fromProtocolTreeNode(node))
elif node.getChild("offline"):
self.toUpper(OfflineIbProtocolEntity.fromProtocolTreeNode(node))
else:
raise ValueError("Unkown ib node %s" % node)
|
from django.db import migrations, models
cla | ss Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bar',
| fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
]
|
import datetime
import release
def | test_release() | :
rel = release.Release("mysql-3.23.22-beta", "1234-05-06")
print(vars(rel))
assert vars(rel) == {
"raw_label": "mysql-3.23.22-beta",
"raw_date": "1234-05-06",
"majormin": "3.23",
"pre": "mysql-",
"post": ".22-beta",
"date": datetime.datetime(1234, 5, 6, 0, 0),
}
|
# -*- coding: utf-8 -*-
c | lass Solution(object):
''' https://leetcode.com/problems/count-primes/
'''
def countPrimes(self, n):
if n <= 2:
return 0
is_prime = [True] * n
ret = 0
for i in range(2, n):
if not is_prime[i]:
continue
ret += 1
for m in range(2, n):
if i * m >= n:
continue
| is_prime[i*m] = False
return ret
|
#=======================================================================
# RegIncrSC.py
#=======================================================================
from pymtl import *
|
class RegIncrSC( SystemCModel ):
sclinetrace = True
|
def __init__( s ):
s.in_ = InPort ( Bits(32) )
s.out = OutPort( Bits(32) )
s.set_ports({
"clk" : s.clk,
"rst" : s.reset,
"in_" : s.in_,
"out" : s.out,
})
|
import time, os
from autotest.client import test, os_dep, utils
from autotest.client.shared import error
class btreplay(test.test):
version = 1
# http://brick.kernel.dk/snaps/blktrace-git-latest.tar.gz
def setup(self, tarball = 'blktrace-git-latest.tar.gz'):
tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
utils.extract_tarball_to_dir(tarball, self.srcdir)
self.job.setup_dep(['libaio'])
libs = '-L' + self.autodir + '/deps/libaio/lib -laio'
cflags = '-I ' + self.autodir + '/deps/libaio/include'
var_libs = 'LIBS="' + libs + '"'
var_cflags = 'CFLAGS="' + cflags + '"'
self.make_flags = var_libs + ' ' + var_cflags
os.chdir(self.srcdir)
utils.system('patch -p1 < ../Makefile.patch')
utils.system(self.make_flags + ' make')
def initialize(self):
self.job.require_gcc()
self.ldlib = 'LD_LIBRARY_PATH=%s/deps/libaio/lib'%(self.autodir)
self.results = []
def run_once( | self, dev="", devices="", extra_args='', tmpdir=None):
# @dev: The device against which the trace will be replayed.
# e.g. "sdb" or "md_d1"
# @devices: A space-separat | ed list of the underlying devices
# which make up dev, e.g. "sdb sdc". You only need to set
# devices if dev is an MD, LVM, or similar device;
# otherwise leave it as an empty string.
if not tmpdir:
tmpdir = self.tmpdir
os.chdir(self.srcdir)
alldevs = "-d /dev/" + dev
alldnames = dev
for d in devices.split():
alldevs += " -d /dev/" + d
alldnames += " " + d
# convert the trace (assumed to be in this test's base
# directory) into btreplay's required format
#
# TODO: The test currently halts here as there is no trace in the
# test's base directory.
cmd = "./btreplay/btrecord -d .. -D %s %s" % (tmpdir, dev)
self.results.append(utils.system_output(cmd, retain_output=True))
# time a replay that omits "thinktime" between requests
# (by use of the -N flag)
cmd = self.ldlib + " /usr/bin/time ./btreplay/btreplay -d "+\
tmpdir+" -N -W "+dev+" "+extra_args+" 2>&1"
self.results.append(utils.system_output(cmd, retain_output=True))
# trace a replay that reproduces inter-request delays, and
# analyse the trace with btt to determine the average request
# completion latency
utils.system("./blktrace -D %s %s >/dev/null &" % (tmpdir, alldevs))
cmd = self.ldlib + " ./btreplay/btreplay -d %s -W %s %s" %\
(tmpdir, dev, extra_args)
self.results.append(utils.system_output(cmd, retain_output=True))
utils.system("killall -INT blktrace")
# wait until blktrace is really done
slept = 0.0
while utils.system("ps -C blktrace > /dev/null",
ignore_status=True) == 0:
time.sleep(0.1)
slept += 0.1
if slept > 30.0:
utils.system("killall -9 blktrace")
raise error.TestError("blktrace failed to exit in 30 seconds")
utils.system("./blkparse -q -D %s -d %s/trace.bin -O %s >/dev/null" %
(tmpdir, tmpdir, alldnames))
cmd = "./btt/btt -i %s/trace.bin" % tmpdir
self.results.append(utils.system_output(cmd, retain_output=True))
def postprocess(self):
for n in range(len(self.results)):
if self.results[n].strip() == "==================== All Devices ====================":
words = self.results[n-2].split()
s = words[1].strip('sytem').split(':')
e = words[2].strip('elapsd').split(':')
break
systime = 0.0
for n in range(len(s)):
i = (len(s)-1) - n
systime += float(s[i]) * (60**n)
elapsed = 0.0
for n in range(len(e)):
i = (len(e)-1) - n
elapsed += float(e[i]) * (60**n)
q2c = 0.0
for line in self.results:
words = line.split()
if len(words) < 3:
continue
if words[0] == 'Q2C':
q2c = float(words[2])
break
self.write_perf_keyval({'time':elapsed, 'systime':systime,
'avg_q2c_latency':q2c})
|
self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc2, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC2
self.ldb_dc2.delete('<GUID=%s>' % self.ou1)
self.ldb_dc2.delete('<GUID=%s>' % self.ou2)
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=True)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
def test_ReplConflictsRemoteWin(self):
"""Tests that objects created in conflict become conflict DNs"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create conflicting objects on DC1 and DC2, with DC1 object created first
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Remote Conflict")
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Remote Conflict")
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check that DC2 got the DC1 object, and OU1 was make into conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
def test_ReplConflictsLocalWin(self):
"""Tests that objects created in conflict become conflict DNs"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create conflicting objects on DC1 and DC2, with DC2 object created first
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Local Conflict")
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Local Conflict")
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check that DC2 got the DC1 object, and OU2 was make into conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertTrue('CNF:%s' % self.ou2 in str(res2[0]["name"][0]), "Got %s for %s" % (str(res2[0]["name"][0]), self.ou2))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
def test_ReplConflictsRemoteWin_with_child(self):
"""Tests that objects created in conflict become conflict DNs"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create conflicting objects on DC1 and DC2, with DC1 object created first
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Parent Remote Conflict")
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Parent Remote Conflict")
# Create children on DC2
ou1_child = self._create_ou(self.ldb_dc1, "OU=Test Child,OU=Test Parent Remote Conflict")
ou2_child = self._create_ou(self.ldb_dc2, "OU=Test Child,OU=Test Parent Remote Conflict")
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check that DC2 got the DC1 object, and SELF.OU1 was make into conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % self.ou1, ["tree_delete:1"])
self.ldb_dc1.delete('<GUID=%s>' % self.ou2, ["tree_delete:1"])
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
self._check_deleted(self.ldb_dc1, self.ou1)
se | lf._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
self._check_deleted(self.ldb_dc1, ou1_child)
self._check_deleted(self.ldb_dc1, ou2_child)
| # Check deleted on DC2
self._check_deleted(self.ldb_dc2, ou1_child)
self._check_deleted(self.ldb_dc2, ou2_child)
def test_ReplConflictsRenamedVsNewRemoteWin(self):
"""Tests resolving a DN conflict between a renamed object and a new object"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create an OU and rename it on DC1
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Remote Rename Conflict orig")
self.ldb_dc1.rename("<GUID=%s>" % self.ou1, "OU=Test Remote Rename Conflict,%s" % self.domain_dn)
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
# create a conflicting object with the same DN on DC2
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Remote Rename Conflict")
s |
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers | . Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point an | d relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any charact |
#!/usr/bin/python
import urllib
prin | t dir(urllib)
|
help(urllib.urlopen)
|
#!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mariadbdatabase_info
version_added: "2.9"
short_description: Get Azure MariaDB Database facts
description:
- Get facts of MariaDB Database.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
type: str
server_name:
description:
- The name of the server.
required: True
type: str
name:
description:
- The name of the database.
type: str
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
- Matti Ranta (@techknowlogick)
'''
EXAMPLES = '''
- name: Get instance of MariaDB Database
azure_rm_mariadbdatabase_info:
resource_group: myResourceGroup
server_name: server_name
name: database_name
- name: List instances of MariaDB Database
azure_rm_mariadbdatabase_info:
resource_group: myResourceGroup
server_name: server_name
'''
RETURN = '''
databases:
description:
- A list of dictionaries containing facts for MariaDB Databases.
returned: always
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str |
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser
ver/databases/db1"
resource_group:
description:
- Resource group name.
returned: | always
type: str
sample: testrg
server_name:
description:
- Server name.
returned: always
type: str
sample: testserver
name:
description:
- Resource name.
returned: always
type: str
sample: db1
charset:
description:
- The charset of the database.
returned: always
type: str
sample: UTF8
collation:
description:
- The collation of the database.
returned: always
type: str
sample: English_United States.1252
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMMariaDbDatabaseInfo(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.resource_group = None
self.server_name = None
self.name = None
super(AzureRMMariaDbDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_mariadbdatabase_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_mariadbdatabase_facts' module has been renamed to 'azure_rm_mariadbdatabase_info'", version='2.13')
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if (self.resource_group is not None and
self.server_name is not None and
self.name is not None):
self.results['databases'] = self.get()
elif (self.resource_group is not None and
self.server_name is not None):
self.results['databases'] = self.list_by_server()
return self.results
def get(self):
response = None
results = []
try:
response = self.mariadb_client.databases.get(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Databases.')
if response is not None:
results.append(self.format_item(response))
return results
def list_by_server(self):
response = None
results = []
try:
response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
if response is not None:
for item in response:
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'resource_group': self.resource_group,
'server_name': self.server_name,
'name': d['name'],
'charset': d['charset'],
'collation': d['collation']
}
return d
def main():
AzureRMMariaDbDatabaseInfo()
if __name__ == '__main__':
main()
|
#
# Copyright (C) 2013 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see < | http://www.gnu.org/licenses/>.
#
"""Django URL configuration for messages tool"""
from django.conf.urls import url
from nav.web.messages import views
from nav.web.messages.feeds import ActiveMessagesFeed
urlpatterns = [
url(r'^$', views.redirect_to_active),
url(r'^active/$', views.active, name='messages-home'), |
url(r'^create/$', views.save, name='messages-create'),
url(r'^edit/(?P<message_id>\d+)$', views.save, name='messages-edit'),
url(r'^active/$', views.active, name='messages-active'),
url(r'^scheduled/$', views.planned, name='messages-planned'),
url(r'^archive/$', views.historic, name='messages-historic'),
url(r'^view/(?P<message_id>\d+)$', views.view, name='messages-view'),
url(r'^expire/(?P<message_id>\d+)$', views.expire, name='messages-expire'),
url(r'^followup/(?P<message_id>\d+)$', views.followup, name='messages-followup'),
url(r'^rss/$', ActiveMessagesFeed(), name='messages-rss'),
]
|
be properties (simplifies serializing)
self.lock2 = 0
self.verbose = 0
for key in kw.keys():
self.__dict__[key] = kw[ke | y]
def __str__(self):
attrs = []
for item in self:
if isinstance(item,Node):
attrs.append( str(item) )
else:
attrs.append( repr(item) )
attrs = ','.join(attrs)
return "%s(%s)"%(self.__class__.__name__,attrs)
def safe_repr( self, tank ):
tank[ str(self) ] = None
attrs = []
for item in self:
if isinstance(item,Node):
attrs.append( item.safe | _repr(tank) ) # can we use repr here ?
else:
attrs.append( repr(item) )
# this is the dangerous bit:
for key, val in self.__dict__.items():
if isinstance(val,Node):
if str(val) not in tank:
attrs.append( '%s=%s'%(key,val.safe_repr(tank)) )
else:
attrs.append( '%s=%s'%(key,repr(val)) )
attrs = ','.join(attrs)
return "%s(%s)"%(self.__class__.__name__,attrs)
def __repr__(self):
#attrs = ','.join( [repr(item) for item in self] + \
# [ '%s=%s'%(key,repr(val)) for key,val in self.__dict__.items() ] )
#return "%s%s"%(self.__class__.__name__,tuple(attrs))
return self.safe_repr({})
def __eq__(self,other):
if not isinstance(other,Node):
return 0
if len(self)!=len(other):
return 0
for i in range(len(self)):
if not self[i]==other[i]:
return 0
return 1
def __ne__(self,other):
return not self==other
def filter(self,cls):
return [x for x in self if isinstance(x,cls)]
#return filter( lambda x:isinstance(x,cls), self )
def deepfilter(self,cls):
" bottom-up "
return [x for x in self.nodes() if isinstance(x,cls)]
def find(self,cls):
for x in self:
if isinstance(x,cls):
return x
return None
def deepfind(self,cls):
" bottom-up isinstance search "
for x in self:
if isinstance(x,Node):
if isinstance(x,cls):
return x
node = x.deepfind(cls)
if node is not None:
return node
if isinstance(self,cls):
return self
return None
def leaves(self):
for i in self:
if isinstance( i, Node ):
for j in i.leaves():
yield j
else:
yield i
def nodes(self):
" bottom-up iteration "
for i in self:
if isinstance( i, Node ):
for j in i.nodes():
yield j
yield self
def deeplen(self):
i=0
if not self.lock2:
self.lock2=1
for item in self:
i+=1
if isinstance(item,Node):
i+=item.deeplen()
self.lock2=0
else:
i+=1
return i
def deepstr(self,level=0,comment=False,nl='\n',indent=' '):
if self.deeplen() < 4:
nl = ""; indent = ""
#else:
#nl="\n"; indent = " "
s = []
if not self.lock1:
self.lock1=1
for item in self:
if isinstance(item,Node):
s.append( indent*(level+1)+item.deepstr(level+1,False,nl,indent) )
else:
s.append( indent*(level+1)+repr(item) )
self.lock1=0
else:
for item in self:
if isinstance(item,Node):
s.append( indent*(level+1)+"<recursion...>" )
else:
s.append( indent*(level+1)+"%s"%repr(item) )
s = "%s(%s)"%(self.__class__.__name__,nl+string.join(s,","+nl))
if comment:
s = '#' + s.replace('\n','\n#')
return s
def clone(self):
items = []
for item in self:
if isinstance(item,Node):
item = item.clone()
items.append(item)
# we skip any attributes...
return self.__class__(*items)
def fastclone(self):
# XX is it faster ???
#print "clone"
nodes = [self]
idxs = [0]
itemss = [ [] ]
while nodes:
assert len(nodes)==len(idxs)==len(itemss)
node = nodes[-1]
items = itemss[-1]
assert idxs[-1] == len(items)
while idxs[-1]==len(node):
# pop
_node = node.__class__( *items )
_node.__dict__.update( node.__dict__ )
nodes.pop(-1)
idxs.pop(-1)
itemss.pop(-1)
if not nodes:
#for node0 in self.nodes():
#for node1 in _node.nodes():
#assert node0 is not node1
#assert _node == self
return _node # Done !!
node = nodes[-1]
items = itemss[-1]
items.append(_node) # set
idxs[-1] += 1
assert idxs[-1] == len(items)
#assert idxs[-1] < len(node), str( (node,nodes,idxs,itemss) )
_node = node[ idxs[-1] ]
# while idxs[-1]<len(node):
if isinstance(_node,Node):
# push
nodes.append( _node )
idxs.append( 0 )
itemss.append( [] )
else:
# next
items.append(_node)
idxs[-1] += 1
assert idxs[-1] == len(items)
def expose(self,cls):
' expose children of any <cls> instance '
# children first
for x in self:
if isinstance(x,Node):
x.expose(cls)
# now the tricky bit
i=0
while i < len(self):
if isinstance(self[i],cls):
node=self.pop(i)
for x in node:
assert not isinstance(x,cls)
# pass on some attributes
if hasattr(node,'lines') and not hasattr(x,'lines'):
x.lines=node.lines
if hasattr(node,'file') and not hasattr(x,'file'):
x.file=node.file
self.insert(i,x) # expose
i=i+1
assert i<=len(self)
else:
i=i+1
def get_parent( self, item ): # XX 25% CPU time here XX
assert self != item
if item in self:
return self
for child in self:
if isinstance(child, Node):
parent = child.get_parent(item)
if parent is not None:
return parent
return None
def expose_node( self, item ):
assert self != item
parent = self.get_parent(item)
idx = parent.index( item )
parent[idx:idx+1] = item[:]
def delete(self,cls):
' delete any <cls> subtree '
for x in self:
if isinstance(x,Node):
x.delete(cls)
# now the tricky bit
i=0
while i < len(self):
if isinstance(self[i],cls):
self.pop(i)
else:
i=i+1
def deeprm(self,item):
' remove any items matching <item> '
for x in self:
if isinstance(x,Node):
x.deeprm(item)
# now the tricky bit
i=0
while i < len(self):
if self[i] == item:
self.pop(i)
else:
i=i+1
def idem(self,cls):
" <cls> is made idempotent "
# children first
for x in self:
if isinstance(x,Node):
x.idem(cls)
if isinstance(self,cls):
# now the tricky bit
i=0
while i < len(self):
if isinstance(self[i],cls): |
import json, logging, os, re, subprocess, shlex
from tools import get_category_by_status
log = logging.getLogger()
meta_files = ['Disassembly', 'Stacktrace', 'Registers',
'SegvAnalysis', 'ProcMaps', "BootLog" , "CoreDump",
"BootDmesg", "syslog", "UbiquityDebug.gz", "Casper.gz",
"UbiquityPartman.gz", "UbiquityDm.gz", "GdmLog", "XorgLog"
"log", "Log"]
def get(metadata, bugdir):
indicators = {}
# look for file arg; this needs work TODO
cmdline = None
uri = None
for line in metadata[' | description'].splitlines():
if "proccmdline" in lin | e.lower():
cmdline = ":".join(line.split(":")[1:]).strip()
try:
toks = shlex.split(cmdline)
except ValueError as e:
log.error("error while parsing cmdline: %s" % cmdline)
log.exception(e)
continue
if len(toks) > 1:
if ("//" in toks[-1]) or ("." in toks[-1]):
uri = toks[-1].strip()
indicators['cmdline'] = cmdline
indicators['cmdline_uri'] = uri
# look for interesting attachments; ugly
interesting_files = []
for f in os.listdir(bugdir):
fpath = os.path.join(bugdir, f)
if not os.path.isfile(fpath):
continue
for fn in meta_files:
if fn.lower() in f.lower():
break
else:
# no break in loop above, i.e. still interested
out = subprocess.check_output(["file", fpath])
ftype = out.split(":")[-1]
if ftype.strip() == "empty":
continue
for tstr in ["ASCII", "text", "core file"]:
if tstr in ftype:
break
else:
# only runs if we didn't break, i.e., this might be interesting
interesting_files.append(f)
indicators['files'] = interesting_files
# TODO: look for recv, etc. in stacks (shoudl this be in exploitability maybe (remote?))
return indicators
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from ..config import BaseAnsibleContainerConfig
from ..utils.visibility import getLogger
logger = getLogger(__name__)
class K8sBaseConfig(BaseAnsibleContainerConfig):
@property
def image_namespace(self):
namespace = self.project_name
if self._config.get('settings', {}).get('k8s_namespace', {}).get('name'):
namespace = self._config['settings']['k8s_namespace']['name']
return namespace
def set_env(self, env):
| super(K8sBaseConfig, self).set_env(env)
if self._config.get('volumes'):
for vol_key in self._config['volumes']:
# Remove settings not meant for this engine
for engine_name in self | .remove_engines:
if engine_name in self._config['volumes'][vol_key]:
del self._config['volumes'][vol_key][engine_name]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import NeuralLayer
from deepy.utils import build_activation, FLOATX
import numpy as np
import theano
import theano.tensor as T
from collections import OrderedDict
OUTPUT_TYPES = ["sequence", "one"]
INPUT_TYPES = ["sequence", "one"]
class RNN(NeuralLayer):
"""
Recurrent neural network layer.
"""
def __init__(self, hidden_size, input_type="sequence", output_type="sequence", vector_core=None,
hidden_activation="tanh", hidden_init=None, input_init=None, steps=None,
persistent_state=False, reset_state_for_input=None, batch_size=None,
go_backwards=False, mask=None, second_input_size=None, second_input=None):
super(RNN, self).__init__("rnn")
self._hidden_size = hidden_size
self.output_dim = self._hidden_size
self._input_type = input_type
self._output_type = output_type
self._hidden_activation = hidden_activation
self._hidden_init = hidden_init
self._vector_core = vector_core
self._input_init = input_init
self.persistent_state = persistent_state
self.reset_state_for_input = reset_state_for_input
self.batch_size = batch_size
self._steps = steps
self._go_backwards = go_backwards
self._mask = mask.dimshuffle((1,0)) if mask else None
self._second_input_size = second_input_size
self._second_input = second_input
self._sequence_map = OrderedDict()
if input_type not in INPUT_TYPES:
raise Exception("Input type of RNN is wrong: %s" % input_type)
if output_type not in OUTPUT_TYPES:
raise Exception("Output type of RNN is wrong: %s" % output_type)
if self.persistent_state and not self.batch_size:
raise Exception("Batch size must be set for persistent state mode")
if mask and input_type == "one":
raise Exception("Mask only works with sequence input")
def _hidden_preact(self, h):
return T.dot(h, self.W_h) if not self._vector_core else h * self.W_h
def step(self, *vars):
# Parse sequence
sequence_map = dict(zip(self._sequence_map.keys(), vars[:len(self._sequence_map)]))
if self._input_type == "sequence":
x = sequence_map["x"]
h = vars[-1]
# Reset part of the state on condition
if self.reset_state_for_input != None:
h = h * T.neq(x[:, self.reset_state_for_input], 1).dimshuffle(0, 'x')
# RNN core step
z = x + self._hidden_preact(h) + self.B_h
else:
h = vars[-1]
z = self._hidden_preact(h) + self.B_h
# Second input
if "second_input" in sequence_map:
z += sequence_map["second_input"]
new_h = self._hidden_act(z)
# Apply mask
if "mask" in sequence_map:
mask = sequence_ | map["mask"].dimshuffle(0, 'x')
new_h = mask * new_h + (1 - mask) * h
return new_h
def produce_input_sequences(self, x, mask=None, second_input=None):
self._sequence_map.clear()
if self._input_type == "sequence":
self._sequence_map["x"] = T.dot(x, self.W_i)
# Mask
if mask:
# (batch)
self._sequence_map["mask"] = mask
| elif self._mask:
# (time, batch)
self._sequence_map["mask"] = self._mask
# Second input
if second_input:
self._sequence_map["second_input"] = T.dot(second_input, self.W_i2)
elif self._second_input:
self._sequence_map["second_input"] = T.dot(self._second_input, self.W_i2)
return self._sequence_map.values()
def produce_initial_states(self, x):
h0 = T.alloc(np.cast[FLOATX](0.), x.shape[0], self._hidden_size)
if self._input_type == "sequence":
if self.persistent_state:
h0 = self.state
else:
h0 = x
return [h0]
def output(self, x):
if self._input_type == "sequence":
# Move middle dimension to left-most position
# (sequence, batch, value)
sequences = self.produce_input_sequences(x.dimshuffle((1,0,2)))
else:
sequences = self.produce_input_sequences(None)
step_outputs = self.produce_initial_states(x)
hiddens, _ = theano.scan(self.step, sequences=sequences, outputs_info=step_outputs,
n_steps=self._steps, go_backwards=self._go_backwards)
# Save persistent state
if self.persistent_state:
self.register_updates((self.state, hiddens[-1]))
if self._output_type == "one":
return hiddens[-1]
elif self._output_type == "sequence":
return hiddens.dimshuffle((1,0,2))
def setup(self):
if self._input_type == "one" and self.input_dim != self._hidden_size:
raise Exception("For RNN receives one vector as input, "
"the hidden size should be same as last output dimension.")
self._setup_params()
self._setup_functions()
def _setup_functions(self):
self._hidden_act = build_activation(self._hidden_activation)
def _setup_params(self):
if not self._vector_core:
self.W_h = self.create_weight(self._hidden_size, self._hidden_size, suffix="h", initializer=self._hidden_init)
else:
self.W_h = self.create_bias(self._hidden_size, suffix="h")
self.W_h.set_value(self.W_h.get_value() + self._vector_core)
self.B_h = self.create_bias(self._hidden_size, suffix="h")
self.register_parameters(self.W_h, self.B_h)
if self.persistent_state:
self.state = self.create_matrix(self.batch_size, self._hidden_size, "rnn_state")
self.register_free_parameters(self.state)
else:
self.state = None
if self._input_type == "sequence":
self.W_i = self.create_weight(self.input_dim, self._hidden_size, suffix="i", initializer=self._input_init)
self.register_parameters(self.W_i)
if self._second_input_size:
self.W_i2 = self.create_weight(self._second_input_size, self._hidden_size, suffix="i2", initializer=self._input_init)
self.register_parameters(self.W_i2)
|
"""
Utility Mixins for unit tests
"""
import json
import sys
from django.conf import settings
from django.urls import clear_url_caches, resolve
from django.test import TestCase
from mock import patch
from util.db import CommitOnSuccessManager, OuterAtomic
class UrlResetMixin(object):
"""Mixin to reset urls.py before and after a test
Django memoizes the function that reads the urls module (whatever module
urlconf names). The module itself is also stored by python in sys.modules.
To fully reload it, we need to reload the python module, and also clear django's
cache of the parsed urls.
However, the order in which we do this doesn't matter, because neither one will
get reloaded until the next request
Doing this is expensive, so it should only be added to tests that modify settings
that affect the contents of urls.py
"""
URLCONF_MODULES = None
def reset_urls(self, urlconf_modules=None):
"""Reset `urls.py` for a set of Django apps."""
if urlconf_modules is None:
urlconf_modules = [settings.ROOT_URLCONF]
if self.URLCONF_MODULES is not None:
urlconf_modules.extend(self.URLCONF_MODULES)
for urlconf in urlconf_modules:
if urlconf in sys.modules:
reload(sys.modules[urlconf])
clear_url_caches()
# Resolve a URL so that the new urlconf gets loaded
resolve('/')
def setUp(self):
"""Reset Django urls before tests and after tests
If you need to reset `urls.py` from a particular Django app (or apps),
specify these modules by setting the URLCONF_MODULES class attribute.
Examples:
# Reload only the root urls.py
URLCONF_MODULES = None
# Reload urls from my_app
URLCONF_MODULES = ['myapp.url']
# Reload urls from my_app and another_app
URLCONF_MODULES = ['myapp.url', 'another_app.urls']
"""
super(UrlResetMixin, self).setUp()
self.reset_urls()
self.addCleanup(self.reset_urls)
class EventTestMixin(object):
"""
Generic mixin for verifying that events were emitted during a test.
"""
def setUp(self, tracker):
super(EventTestMixin, self).se | tUp()
patcher = patch(tracker)
self.mock_tracker = patcher.start()
self.addCleanup(patcher.stop)
def assert_no_events_were_emitted(self):
"""
Ensures no events were emitted since the last event r | elated assertion.
"""
self.assertFalse(self.mock_tracker.emit.called) # pylint: disable=maybe-no-member
def assert_event_emitted(self, event_name, **kwargs):
"""
Verify that an event was emitted with the given parameters.
"""
self.mock_tracker.emit.assert_any_call( # pylint: disable=maybe-no-member
event_name,
kwargs
)
def assert_event_emission_count(self, event_name, expected_count):
"""
Verify that the event with the given name was emitted
a specific number of times.
"""
actual_count = 0
for call_args in self.mock_tracker.emit.call_args_list:
if call_args[0][0] == event_name:
actual_count += 1
self.assertEqual(actual_count, expected_count)
def reset_tracker(self):
"""
Reset the mock tracker in order to forget about old events.
"""
self.mock_tracker.reset_mock()
def get_latest_call_args(self):
"""
Return the arguments of the latest call to emit.
"""
return self.mock_tracker.emit.call_args[0]
class PatchMediaTypeMixin(object):
"""
Generic mixin for verifying unsupported media type in PATCH
"""
def test_patch_unsupported_media_type(self):
response = self.client.patch(
self.url,
json.dumps({}),
content_type=self.unsupported_media_type
)
self.assertEqual(response.status_code, 415)
def patch_testcase():
"""
Disable commit_on_success decorators for tests in TestCase subclasses.
Since tests in TestCase classes are wrapped in an atomic block, we
cannot use transaction.commit() or transaction.rollback().
https://docs.djangoproject.com/en/1.8/topics/testing/tools/#django.test.TransactionTestCase
"""
def enter_atomics_wrapper(wrapped_func):
"""
Wrapper for TestCase._enter_atomics
"""
wrapped_func = wrapped_func.__func__
def _wrapper(*args, **kwargs):
"""
Method that performs atomic-entering accounting.
"""
CommitOnSuccessManager.ENABLED = False
OuterAtomic.ALLOW_NESTED = True
if not hasattr(OuterAtomic, 'atomic_for_testcase_calls'):
OuterAtomic.atomic_for_testcase_calls = 0
OuterAtomic.atomic_for_testcase_calls += 1
return wrapped_func(*args, **kwargs)
return classmethod(_wrapper)
def rollback_atomics_wrapper(wrapped_func):
"""
Wrapper for TestCase._rollback_atomics
"""
wrapped_func = wrapped_func.__func__
def _wrapper(*args, **kwargs):
"""
Method that performs atomic-rollback accounting.
"""
CommitOnSuccessManager.ENABLED = True
OuterAtomic.ALLOW_NESTED = False
OuterAtomic.atomic_for_testcase_calls -= 1
return wrapped_func(*args, **kwargs)
return classmethod(_wrapper)
# pylint: disable=protected-access
TestCase._enter_atomics = enter_atomics_wrapper(TestCase._enter_atomics)
TestCase._rollback_atomics = rollback_atomics_wrapper(TestCase._rollback_atomics)
def patch_sessions():
"""
Override the Test Client's session and login to support safe cookies.
"""
from openedx.core.djangoapps.safe_sessions.testing import safe_cookie_test_session_patch
safe_cookie_test_session_patch()
|
#! /usr/bin/env python2
import rif | t
rift.init("main.so")
print(rift. | call(lib.main, rift.c_int))
|
# -*- coding: utf-8 -*-
# --------------------------------------------------
# Задача 1
# --------------------------------------------------
"""
Напишите функцию-генератор, которая будет принимать
последовательность, где каждый элемент кортеж с двумя
значениями (длинна катетов треугольника) и возвращать
длинну гипотенузы.
"""
l = [(8, 4), (5, 7), (9, 2), (5, 4), (6, 4)]
# --------------------------------------------------
# Задача 2
# --------------------------------------------------
"""
Н | апишите генератор-выражение, которое будет вычислять
и возвращать длинну окружности. Каждый элемент является
радиусом.
"""
l = [7, 9.06, 44, 21. | 3, 6, 10.00001, 53]
# --------------------------------------------------
# Задача 3
# --------------------------------------------------
"""
Напишите пример реализации встроенной функции filter.
"""
def myfilter1(fun, l):
pass
# --------------------------------------------------
# Задача 4
# --------------------------------------------------
"""
Напишите пример реализации встроенной функции reduce.
"""
def myreduce1(fun, l):
pass
# --------------------------------------------------
# Задача 5
# --------------------------------------------------
"""
Перепишите функции из задач 3 и 4 так, чтобы они
стали генераторами.
"""
def myfilter2(fun, l):
pass
def myreduce21(fun, l):
pass
# --------------------------------------------------
# Задача 6
# --------------------------------------------------
"""
Перепишите вашу реализацию функций filter и map из
урока так, чтоб вторым аргументом принималось любое
количество последовательностей.
"""
|
from __future__ import annotations
import contextlib
import os
from typing import Generator
from typing import Sequence
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import PatchesT
from pre_commit.en | vcontext import Var
from pre_commit.hook import Hook
from pre_commit.languages import helpers
from pre_commit.prefix import Prefix
from pre_commit.util import clean_path_on_failure
ENVIRONMENT_DIR = 'coursier'
get_default_version = helpers.basic_get_default_version
healthy = helpers.basic_healthy
def install_environment(
prefix: Prefix,
version | : str,
additional_dependencies: Sequence[str],
) -> None: # pragma: win32 no cover
helpers.assert_version_default('coursier', version)
helpers.assert_no_additional_deps('coursier', additional_dependencies)
envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))
channel = prefix.path('.pre-commit-channel')
with clean_path_on_failure(envdir):
for app_descriptor in os.listdir(channel):
_, app_file = os.path.split(app_descriptor)
app, _ = os.path.splitext(app_file)
helpers.run_setup_cmd(
prefix,
(
'cs',
'install',
'--default-channels=false',
f'--channel={channel}',
app,
f'--dir={envdir}',
),
)
def get_env_patch(target_dir: str) -> PatchesT: # pragma: win32 no cover
return (
('PATH', (target_dir, os.pathsep, Var('PATH'))),
)
@contextlib.contextmanager
def in_env(
prefix: Prefix,
) -> Generator[None, None, None]: # pragma: win32 no cover
target_dir = prefix.path(
helpers.environment_dir(ENVIRONMENT_DIR, get_default_version()),
)
with envcontext(get_env_patch(target_dir)):
yield
def run_hook(
hook: Hook,
file_args: Sequence[str],
color: bool,
) -> tuple[int, bytes]: # pragma: win32 no cover
with in_env(hook.prefix):
return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
|
import time
import threading
import logging
import serial
import io
import sim900
import sys
if __name__ == "__main__":
#this is a bad file for recording the diode temps and voltages
#eventually it will be merged with recording the resistance bridges
#and actually use the sim900 file functions
#create an instance of the sim900 commands
sim = sim900.sim900()
#main function to records temps
try:
timestr = time.strftime("%Y%m%d-%H%M%S")
filename = "/home/heather/SRS/%s.txt" % timestr
f = open(filename, 'w+')
# The column headers for rox 3 were the opposite of the written data until 2014-10-10:
f.write("time, diode ch1 temp, dio ch 2 temp, dio 3 temp, dio 4 temp, dio 1 volts, dio 2 volts, dio 3 volts, dio 4 volts, rox 1 temp, rox 1 res, rox 2 temp, rox 2 res, rox 3 temp, rox 3 res\n")
while 1:
#get diode info
sim.connect_sim922()
dio_temps = sim.get_sim922_temp()
dio_temps = dio_temps.rstrip()
time.sleep(1)
dio_volts = sim.get_sim922_volts()
dio_v | olts = dio_volts.rstrip()
sim.close_sim922()
print "diode"
time.sleep(1)
#get rox1 info
sim.connect_sim921_1()
rox1_res = sim.get_resistance()
rox1_temp = sim.get_temp()
sim.close_sim921_1()
print "rox1"
time.sleep(1)
sim.connect_sim921()
rox2_res = sim.get_resistance()
rox2_temp = sim.get_temp()
sim.close_sim921()
| #get rox3 info
sim.connect_sim921_6()
rox3_res = sim.get_resistance()
rox3_temp = sim.get_temp()
sim.close_sim921_6()
print "rox2"
time.sleep(1)
#write it all to file
current_time = time.strftime("%Y%m%d-%H%M%S")
f.write("%s, %s, %s, %s, %s, %s, %s, %s, %s\n" % (current_time, dio_temps, dio_volts, rox1_temp, rox1_res, rox2_temp, rox2_res, rox3_temp, rox3_res))
f.flush()
except KeyboardInterrupt:
f.close()
print "done writing"
sim.close_sim922()
sim.close_sim900()
print "ports closed"
|
import virtool.subtractions.files
from sqlalchemy import select
from virtool.subtractions.models import SubtractionFile
async def test_create_subtraction_files(snapshot, tmp_path, pg, pg_session):
test_dir = tmp_path / "subtractions" / "foo"
test_dir.mkdir(parents=True)
test_dir.joinpath("subtraction.fa.gz").write_text("FASTA file")
test_dir.joinpath("subtraction.1.bt2").write_text("Bowtie2 file")
subtraction_files = ["subtraction.fa.gz", "subtraction.1.bt2"]
await virtool.subtractions.files.create_subtraction_files(
pg, "foo | ", subtraction_files, test_dir
)
rows = list()
async with pg_session as session:
assert (
await session. | execute(select(SubtractionFile))
).scalars().all() == snapshot
|
import _plotly_utils.basevalidators
class ViolinmodeValidator(_plotly_ | utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="violinmode", parent_name="layout", **kwargs):
super(ViolinmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["group", "overlay"]),
| **kwargs
)
|
se, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u'/home/devin/.config/ipython'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShel | l.show_rewritten_input = True
# Set the color scheme | (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# The Popen Command to launch the kernel. Override this if you have a custom
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = '127.0.0.1'
#
# c.KernelManager.transport = 'tcp'
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialiization via packer/unpacker traits, and
# |
import logging
import socket
from functools import wraps
from django.conf import settings
from django.http import (
Http404,
HttpResponse,
HttpResponseForbidden,
HttpResponseRedirect
)
from django.shortcuts import render
from django.utils.http import is_safe_url
from django.views.decorators.cache import never_cache
from celery.messaging import establish_connection
from elasticsearch.exceptions import ConnectionError, NotFoundError
from mobility.decorators import mobile_template
from fjord.base.models import Profile
from fjord.base.urlresolvers import reverse
from fjord.search.index import get_index, get_index_stats
log = logging.getLogger('i.services')
@mobile_template('{mobile/}new_user.html')
def new_user_view(request, template=None):
if request.user.is_anonymous():
# This is the AnonymousUser and they shouldn't be here
# so push them to the dashboard.
return HttpResponseRedirect(reverse('dashboard'))
try:
# If they have a profile, then this doesn't throw an error
# and we can let them see the new user view again, but it's
# not particularly interesting.
request.user.profile
except Profile.DoesNotExist:
# They aren't anonymous and don't have a profile, so create
# a profile for them.
#
# We could do more with this, but we're not at the moment.
Profile.objects.create(user=request.user)
next_url = request.GET.get('next', reverse('dashboard'))
if not is_safe_url(next_url):
next_url = reverse('dashboard')
return render(request, template, {
'next_url': next_url,
})
@mobile_template('{mobile/}login_failure.html')
def login_failure(request, template=None):
return render(request, template)
@mobile_template('{mobile/}csrf_failure.html')
def csrf_failure(request, reason='', template=None):
return HttpResponseForbidden(
render(request, template),
content_type='text/html'
)
def about_view(request):
template = 'about.html'
return render(request, template)
def robots_view(request):
"""Generate a robots.txt."""
template = render(request, 'robots.txt')
return HttpResponse(template, content_type='text/plain')
def contribute_view(request):
"""Generate a contribute.json."""
template = render(request, 'contribute.json')
return HttpResponse(template, content_type='application/json')
def test_memcached(host, port):
"""Connect to memcached.
:returns: True if test passed, False if test failed.
"""
try:
s = socket.socket()
s.connect((host, port))
return True
except Exception as exc:
| log.critical('Failed to connect to memcached (%r): %s' %
((host, port), exc))
return False
finally:
s.close()
def dev_or_authorized(func):
"""Show view for admin and developer instances, else 404"""
@wraps(func)
def _dev_or_authorized(request, *args, **kwargs):
if (request.user.is_superuser
or settings.SHOW_STAGE_NOTICE
or settings.DEBUG):
return func(request, * | args, **kwargs)
raise Http404
return _dev_or_authorized
ERROR = 'ERROR'
INFO = 'INFO'
@dev_or_authorized
@never_cache
def monitor_view(request):
"""View for services monitor."""
# Dict of infrastructure name -> list of output tuples of (INFO,
# msg) or (ERROR, msg)
status = {}
# Note: To add a new component, do your testing and then add a
# name -> list of output tuples map to status.
# Check memcached.
memcache_results = []
try:
for cache_name, cache_props in settings.CACHES.items():
result = True
backend = cache_props['BACKEND']
location = cache_props['LOCATION']
# LOCATION can be a string or a list of strings
if isinstance(location, basestring):
location = location.split(';')
if 'memcache' in backend:
for loc in location:
# TODO: this doesn't handle unix: variant
ip, port = loc.split(':')
result = test_memcached(ip, int(port))
memcache_results.append(
(INFO, '%s:%s %s' % (ip, port, result)))
if not memcache_results:
memcache_results.append((ERROR, 'memcache is not configured.'))
elif len(memcache_results) < 2:
memcache_results.append(
(ERROR, ('You should have at least 2 memcache servers. '
'You have %s.' % len(memcache_results))))
else:
memcache_results.append((INFO, 'memcached servers look good.'))
except Exception as exc:
memcache_results.append(
(ERROR, 'Exception while looking at memcached: %s' % str(exc)))
status['memcached'] = memcache_results
# Check ES.
es_results = []
try:
get_index_stats()
es_results.append(
(INFO, ('Successfully connected to ElasticSearch and index '
'exists.')))
except ConnectionError as exc:
es_results.append(
(ERROR, 'Cannot connect to ElasticSearch: %s' % str(exc)))
except NotFoundError:
es_results.append(
(ERROR, 'Index "%s" missing.' % get_index()))
except Exception as exc:
es_results.append(
(ERROR, 'Exception while looking at ElasticSearch: %s' % str(exc)))
status['ElasticSearch'] = es_results
# Check RabbitMQ.
rabbitmq_results = []
try:
rabbit_conn = establish_connection(connect_timeout=2)
rabbit_conn.connect()
rabbitmq_results.append(
(INFO, 'Successfully connected to RabbitMQ.'))
except (socket.error, IOError) as exc:
rabbitmq_results.append(
(ERROR, 'Error connecting to RabbitMQ: %s' % str(exc)))
except Exception as exc:
rabbitmq_results.append(
(ERROR, 'Exception while looking at RabbitMQ: %s' % str(exc)))
status['RabbitMQ'] = rabbitmq_results
status_code = 200
status_summary = {}
for component, output in status.items():
if ERROR in [item[0] for item in output]:
status_code = 500
status_summary[component] = False
else:
status_summary[component] = True
return render(request, 'services/monitor.html',
{'component_status': status,
'status_summary': status_summary},
status=status_code)
class IntentionalException(Exception):
pass
@dev_or_authorized
def throw_error(request):
"""Throw an error for testing purposes."""
raise IntentionalException("Error raised for testing purposes.")
|
# Copyright 2012 Free Software Foundation, Inc.
#
# This fil | e is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more det | ails.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Blocks and utilities for Video SDL module
'''
# The presence of this file turns this directory into a Python package
import os
try:
from video_sdl_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from video_sdl_swig import *
|
#!/usr/bin/env python
#encoding:utf-8
import os
import sys
import requests
import MySQLdb
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
if len(sys.argv) != 4:
print 'Invalid parameters!'
exit(1)
print '=' * 60
print 'start:', sys.argv
aim_category_id = int(sys.argv[1])
start_point = (int(sys.argv[2]), int(sys.argv[3]))
immediate_download = False
base_url = 'http://www.3che.com'
session = requests.Session()
username = ''
password = ''
record = {
'category': '',
'detail_category': '',
'post_url': '',
'filename': '',
'url': ''
}
sql_cnt = 0
connection = None
cursor = None
def record_to_mysql():
global sql_cnt, connection, cursor
if sql_cnt % 20 == 0:
if connection:
connection.commit()
connection.close()
cursor.close()
connection = MySQLdb.connect(host='', user='', passwd='', db='', port=3306, charset='utf8')
cursor = connection.cursor()
sql_cnt += 1
cursor.execute('insert into san_che(`category`, `detail_category`, `post_url`, `filename`, `url`) values (%s, %s, %s, %s, %s)',
(record['category'], record['detail_category'], record['post_url'], record['filename'], record['url']))
def login():
login_path = '/member.php?mod=logging&action=login&loginsubmit=yes&infloat=yes&lssubmit=yes&inajax=1'
session.post(base_url + login_path, {'username': username, 'password': password})
def enter_directory(name):
if immediate_download:
if not os.path.exists(name):
os.mkdir(name)
os.chdir(name)
def get_soup(url, parse_only=None):
text = session.get(url).text
return BeautifulSoup(text, 'lxml', parse_only=parse_only)
def download_file(url, filename):
print 'Downloading:', filename, '=>', url
record['url'] = url
record['filename'] = filename
if immediate_download:
with open(filename, 'w') as fp:
res = requests.get(url)
fp.write(res.content)
fp.close()
else:
record_to_mysql()
def crawl_file(url, filename):
try:
soup = get_s | oup(url, SoupStrainer(id='attachpayform'))
attach_form = soup.find('form', id='attachpayform')
link = attach_form.table.find_all('a')[-1]
except | Exception as e:
print 'Error! file url:', url
else:
download_file(link['href'], filename)
# Crawl detail data of one post.
def crawl_detail(detail_category, title, detail_url):
print '-' * 100
print 'Crawling Post:', detail_category, title, '=>', detail_url
record['detail_category'] = detail_category
record['post_url'] = detail_url
# Enter detail directory.
enter_directory(detail_category)
prefix = detail_url.rsplit('/', 1)[-1].split('.', 1)[0]
enter_directory(prefix + title)
soup = get_soup(detail_url, SoupStrainer('p', {'class': 'attnm'}))
attnms = soup.find_all('p', {'class': 'attnm'})
for attnm in attnms:
url = '{0}/{1}'.format(base_url, attnm.a['href'])
crawl_file(url, attnm.a.text.strip(u'[下载]'))
# Leave detail directory.
if immediate_download:
os.chdir('../..')
# Crawl data of one category.
def crawl_category(category, list_url):
print '=' * 100
print 'Crawling category:', category, '=>', list_url
record['category'] = category
# Create corresponding directory and enter.
enter_directory(category)
cur_page_id = 0
url = list_url
while url is not None:
cur_page_id += 1
print 'Crawling page url:', url
soup = get_soup(url, SoupStrainer('span'))
xsts = soup.find_all('span', {'class': 'xst'})
if cur_page_id >= start_point[0]:
cur_in_page_id = 0
for xst in xsts:
cur_in_page_id += 1
detail = xst.find('a', {'class': 'xst'})
if cur_page_id > start_point[0] or cur_in_page_id >= start_point[1]:
crawl_detail(xst.em and xst.em.a.text or '', detail.text, detail['href'])
page_footer = soup.find('span', id='fd_page_top')
next_link = page_footer.label.next_sibling
if next_link is not None:
url = next_link['href']
else:
url = None
# Leave the directory.
if immediate_download:
os.chdir('..')
if __name__ == '__main__':
login()
# Extract categories from home page.
soup = get_soup(base_url, SoupStrainer(id='nv'))
category_lis = soup.find('div', id='nv').ul.find_all('li')
categories = map(lambda x: (x.a.text, x.a['href']), category_lis)
categories = filter(lambda x: x[1] != '/', categories)
crawl_category(categories[aim_category_id][0], categories[aim_category_id][1])
# for category in categories:
# crawl_category(category[0], category[1])
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJA | NGO_SETTINGS_MODULE", "huntnet.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.a | rgv)
|
# Copyright | (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from ggrc import db
from ggrc.models.mixins import CustomAttributable, BusinessO | bject, Timeboxed
from ggrc.models.object_document import Documentable
from ggrc.models.object_person import Personable
from ggrc.models.object_owner import Ownable
from ggrc.models.relationship import Relatable
from ggrc.models.track_object_state import HasObjectState, track_state_for_class
class Threat(
HasObjectState, CustomAttributable, Documentable, Personable,
Relatable, Timeboxed, Ownable, BusinessObject, db.Model):
__tablename__ = 'threats'
_aliases = {
"contact": {
"display_name": "Contact",
"filter_by": "_filter_by_contact",
},
"secondary_contact": None,
"url": "Threat URL",
}
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('member', '0007_auto_20150501_2124'),
]
oper | ations = [
migrations.AddField(
model_name='member',
name='avatar',
field=models.URLField(default='https://dn-tmp.qbox.me/chendian/cat_mouse_reading.jpg', verbose_name='\u5934\u50cf', blank=True),
),
migrations.AlterField(
model_name='member', |
name='description',
field=models.TextField(default='', verbose_name='\u4e2a\u4eba\u4ecb\u7ecd', blank=True),
),
]
|
"""
I came up with this the first try. So, that's why this is posted in duplicate.
"""
import sys
try | :
columns = int(input("How many columns? "))
rows = int(input("How many rows? "))
tall = int(input("How tall should the boxes be? "))
wide = int(input("How wide should the boxes be? "))
except Exception as e:
print(e)
print("You hav | e fail")
print("Try type valid integer")
sys.exit(1)
i = 0
j = 0
k = 0
m = 0
while j <= rows:
print("+",end="")
while k < columns:
while i < wide:
print("-",end="")
i += 1
print("+",end="")
i = 0
k += 1
print('\r')
k = 0
if j < rows:
while m < tall:
print("|",end="")
while k < columns:
print(" "*wide,end="")
print("|",end="")
k += 1
k = 0
m += 1
print("\r")
m = 0
j += 1
sys.exit(0)
|
def extractTranslasiSanusiMe(i | tem):
'''
Parser for 'translasi.sanusi.me'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type | =tl_type)
return False
|
HT),
MultiContentEntryText(pos=(0, 40), size=(250, 20), font=0, text=_("Uphops: ") + uphops),
MultiContentEntryText(pos=(250, 40), size=(250, 20), font=0, text=_("Maxdown: ") + maxdown, flags=RT_HALIGN_RIGHT)]
return res
def CCcamShareViewListEntry(caidprovider, providername, numberofcards, numberofreshare):
screenwidth = getDesktop(0).size().width()
if screenwidth and screenwidth == 1920:
res | = [(caidprovider | , providername, numberofcards),
MultiContentEntryText(pos=(10, 5), size=(800, 35), font=1, text=providername),
MultiContentEntryText(pos=(1050, 5), size=(50, 35), font=1, text=numberofcards, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(1100, 5), size=(50, 35), font=1, text=numberofreshare, flags=RT_HALIGN_RIGHT)]
return res
else:
res = [(caidprovider, providername, numberofcards),
MultiContentEntryText(pos=(0, 0), size=(430, 20), font=0, text=providername),
MultiContentEntryText(pos=(430, 0), size=(50, 20), font=0, text=numberofcards, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(480, 0), size=(50, 20), font=0, text=numberofreshare, flags=RT_HALIGN_RIGHT)]
return res
def CCcamConfigListEntry(file):
screenwidth = getDesktop(0).size().width()
res = [file]
try:
f = open(CFG, "r")
org = f.read()
f.close()
except:
org = ""
(name, content) = getConfigNameAndContent(file)
if content == org:
png = lock_on
else:
png = lock_off
if screenwidth and screenwidth == 1920:
res.append(MultiContentEntryPixmapAlphaBlend(pos=(5, 5), size=(50, 50), png=png))
res.append(MultiContentEntryText(pos=(85, 5), size=(800, 35), font=1, text=name))
else:
res.append(MultiContentEntryPixmapAlphaBlend(pos=(2, 2), size=(25, 25), png=png))
res.append(MultiContentEntryText(pos=(35, 2), size=(550, 25), font=0, text=name))
return res
def CCcamMenuConfigListEntry(name, blacklisted):
screenwidth = getDesktop(0).size().width()
res = [name]
if blacklisted:
png = lock_off
else:
png = lock_on
if screenwidth and screenwidth == 1920:
res.append(MultiContentEntryPixmapAlphaBlend(pos=(5, 5), size=(50, 50), png=png))
res.append(MultiContentEntryText(pos=(85, 5), size=(800, 35), font=1, text=name))
else:
res.append(MultiContentEntryPixmapAlphaBlend(pos=(2, 2), size=(25, 25), png=png))
res.append(MultiContentEntryText(pos=(35, 2), size=(550, 25), font=0, text=name))
return res
#############################################################
class CCcamInfoMain(Screen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("CCcam Info"))
self.session = session
self["menu"] = CCcamList([])
self.working = False
self.Console = Console()
if config.cccaminfo.profile.value == "":
self.readConfig()
else:
self.url = config.cccaminfo.profile.value
self["actions"] = NumberActionMap(["CCcamInfoActions"],
{
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
"red": self.red,
"green": self.green,
"yellow": self.yellow,
"blue": self.blue,
"menu": self.menu,
"info": self.info,
"ok": self.okClicked,
"cancel": self.close,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right
}, -2)
self.onLayoutFinish.append(self.updateMenuList)
def updateMenuList(self):
self.working = True
self.menu_list = []
for x in self.menu_list:
del self.menu_list[0]
list = []
idx = 0
for x in menu_list:
if notBlackListed(x):
list.append(CCcamListEntry(x, idx))
self.menu_list.append(x)
idx += 1
self["menu"].setList(list)
self.working = False
def readConfig(self):
self.url = "http://127.0.0.1:16001"
username = None
password = None
try:
f = open(CFG, 'r')
for l in f:
if l.startswith('WEBINFO LISTEN PORT :'):
port = getConfigValue(l)
if port != "":
self.url = self.url.replace('16001', port)
elif l.startswith('WEBINFO USERNAME :'):
username = getConfigValue(l)
elif l.startswith('WEBINFO PASSWORD :'):
password = getConfigValue(l)
f.close()
except:
pass
if (username is not None) and (password is not None) and (username != "") and (password != ""):
self.url = self.url.replace('http://', ("http://%s:%s@" % (username, password)))
config.cccaminfo.profile.value = ""
config.cccaminfo.profile.save()
def profileSelected(self, url=None):
if url is not None:
self.url = url
config.cccaminfo.profile.value = self.url
config.cccaminfo.profile.save()
self.showInfo(_("New profile: ") + url, _("Profile"))
else:
self.showInfo(_("Using old profile: ") + self.url, _("Profile"))
def keyNumberGlobal(self, idx):
if self.working == False and (idx < len(self.menu_list)):
self.working = True
sel = self.menu_list[idx]
if sel == _("General"):
getPage(self.url).addCallback(self.showCCcamGeneral).addErrback(self.getWebpageError)
elif sel == _("Clients"):
getPage(self.url + "/clients").addCallback(self.showCCcamClients).addErrback(self.getWebpageError)
elif sel == _("Active clients"):
getPage(self.url + "/activeclients").addCallback(self.showCCcamClients).addErrback(self.getWebpageError)
elif sel == _("Servers"):
getPage(self.url + "/servers").addCallback(self.showCCcamServers).addErrback(self.getWebpageError)
elif sel == _("Shares"):
getPage(self.url + "/shares").addCallback(self.showCCcamShares).addErrback(self.getWebpageError)
elif sel == _("Share View"):
self.session.openWithCallback(self.workingFinished, CCcamShareViewMenu, self.url)
elif sel == _("Extended Shares"):
self.session.openWithCallback(self.workingFinished, CCcamInfoShareInfo, "None", self.url)
elif sel == _("Providers"):
getPage(self.url + "/providers").addCallback(self.showCCcamProviders).addErrback(self.getWebpageError)
elif sel == _("Entitlements"):
getPage(self.url + "/entitlements").addCallback(self.showCCcamEntitlements).addErrback(self.getWebpageError)
elif sel == _("ecm.info"):
self.session.openWithCallback(self.showEcmInfoFile, CCcamInfoEcmInfoSelection)
elif sel == _("Menu config"):
self.session.openWithCallback(self.updateMenuList, CCcamInfoMenuConfig)
elif sel == _("Local box"):
self.readConfig()
self.showInfo(_("Profile: Local box"), _("Local box"))
elif sel == _("Remote box"):
self.session.openWithCallback(self.profileSelected, CCcamInfoRemoteBoxMenu)
elif sel == _("Free memory"):
if not self.Console:
self.Console = Console()
self.working = True
self.Console.ePopen("free", self.showFreeMemory)
elif sel == _("Switch config"):
self.session.openWithCallback(self.workingFinished, CCcamInfoConfigSwitcher)
else:
self.showInfo(_("CCcam Info %s\nby AliAbdul %s\n\nThis plugin shows you the status of your CCcam.") % (VERSION, DATE), _("About"))
def red(self):
self.keyNumberGlobal(10)
def green(self):
self.keyNumberGlobal(11)
def yellow(self):
self.keyNumberGlobal(12)
def blue(self):
self.keyNumberGlobal(13)
def menu(self):
self.keyNumberGlobal(14)
def info(self):
self.keyNumberGlobal(15)
def okClicked(self):
self.keyNumberGlobal(self["menu"].getSelectedIndex())
def up(self):
if not self.working:
self["menu"].up()
def down(self):
if not self.working:
self["menu"].down()
def left(self):
if not self.working:
self["menu"].pageUp()
def right(self):
if not self.working:
self["menu"].pageDown()
def getWebpageError(self, error=""):
print str(error)
self.session.openWithCallback(self.workingFinished, MessageBox, _("Error reading webpage!"), MessageBox.TYPE_ERROR)
def showFile(self, file):
try:
f = open(file, "r")
content = f.read()
f.close()
except:
content = _("Could not open the file %s!") % file
self.showInfo(translateBlock(content), " ")
def showEcmInfoFile(self, file=None):
if file is not None:
self.showFile("/t |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-28 13:41
# flake8: noqa
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import normandy.recipes.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('recipes', '0033_migrate_surveys'),
]
operations = [
migrations.CreateModel(
name='RecipeRevision',
fields=[
('id', models.CharField(max_length=64, primary_key=True, serialize=False)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('comment', models.TextField()),
('name', models.CharField(max_length=255)),
('arguments_json', models.TextField(default='{}', validators=[normandy.recipes.validators.validate_json])),
('filter_expression', models.TextField()),
('action', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recipe_revisions', to='recipes.Action')),
('parent', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='child', to='recipes.RecipeRevision')),
],
),
migrations.RemoveField(
model_name='approva | l',
name='creator',
),
migrations.RemoveField(
model_name='appro | valrequest',
name='approval',
),
migrations.RemoveField(
model_name='approvalrequest',
name='creator',
),
migrations.RemoveField(
model_name='approvalrequest',
name='recipe',
),
migrations.RemoveField(
model_name='approvalrequestcomment',
name='approval_request',
),
migrations.RemoveField(
model_name='approvalrequestcomment',
name='creator',
),
migrations.AlterModelOptions(
name='recipe',
options={'ordering': ['-enabled', '-latest_revision__updated']},
),
migrations.RemoveField(
model_name='recipe',
name='approval',
),
migrations.DeleteModel(
name='Approval',
),
migrations.DeleteModel(
name='ApprovalRequest',
),
migrations.DeleteModel(
name='ApprovalRequestComment',
),
migrations.AddField(
model_name='reciperevision',
name='recipe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='revisions', to='recipes.Recipe'),
),
migrations.AddField(
model_name='reciperevision',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,related_name='recipe_revisions', to=settings.AUTH_USER_MODEL)
),
migrations.AddField(
model_name='recipe',
name='latest_revision',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='latest_for_recipe', to='recipes.RecipeRevision'),
),
migrations.AlterField(
model_name='recipe',
name='action',
field=models.ForeignKey(to='recipes.Action', null=True),
),
migrations.AlterField(
model_name='recipe',
name='name',
field=models.CharField(max_length=255, unique=False, null=True),
),
]
|
"""
Creates a list of studies currently being used for synthesis.
"""
import re
#from stephen_desktop_conf import *
from microbes import studytreelist as microbelist
from plants import studytreelist as plantslist
from metazoa import studytreelist as metalist
from fungi impor | t studytreelist as fungilist
studytreelist = []
studytreelist.extend(plantslist)
studytreelist.extend(met | alist)
studytreelist.extend(fungilist)
studytreelist.extend(microbelist)
for i in studytreelist:
studyid=i.split('_')[0]
print studyid+".json"
|
# before we break json, grab a copy of the orig_dumps function
_orig_dumps = json.dumps
delattr(json, 'loads')
reload(c)
if _orig_dumps:
# basic test of swift.common.client.json_loads using json.loads
data = {
'string': 'value',
'int': 0,
'bool': True,
'none': None,
}
json_string = _orig_dumps(data)
else:
# even more basic test using a hand encoded json string
data = ['value1', 'value2']
json_string = "['value1', 'value2']"
self.assertEquals(data, c.json_loads(json_string))
self.assertRaises(AttributeError, c.json_loads, self)
class MockHttpTest(unittest.TestCase):
def setUp(self):
def fake_http_connection(*args, **kwargs):
_orig_http_connection = c.http_connection
def wrapper(url, proxy=None):
parsed, _conn = _orig_http_connection(url, proxy=proxy)
conn = fake_http_connect(*args, **kwargs)()
def request(*args, **kwargs):
return
conn.request = request
conn.has_been_read = False
_orig_read = conn.read
def read(*args, **kwargs):
conn.has_been_read = True
return _orig_read(*args, **kwargs)
conn.read = read
return parsed, conn
return wrapper
self.fake_http_connection = fake_http_connection
def tearDown(self):
reload(c)
# TODO: following tests are placeholders, need more tests, better coverage
class TestGetAuth(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
url, token = c.get_auth('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(url, None)
self.assertEquals(token, None)
class TestGetAccount(MockHttpTest):
def test_no_content(self):
c.http_connection = self.fake_http_connection(204)
value = c.get_account('http://www.test.com', 'asdf')[1]
self.assertEquals(value, [])
class TestHeadAccount(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.head_account('http://www.tests.com', 'asdf')
# TODO: Hmm. This doesn't really test too much as it uses a fake that
# always returns the same dict. I guess it "exercises" the code, so
# I'll leave it for now.
self.assertEquals(type(value), dict)
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.head_account,
'http://www.tests.com', 'asdf')
class TestGetContainer(MockHttpTest):
def test_no_content(self):
c.http_connection = self.fake_http_connection(204)
value = c.get_container('http://www.test.com', 'asdf', 'asdf')[1]
self.assertEquals(value, [])
class TestHeadContainer(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.head_container,
'http://www.test.com', 'asdf', 'asdf',
)
class TestPutContainer(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.put_container('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(value, None)
class TestDeleteContainer(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.delete_container('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(value, None)
class TestGetObject(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.get_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
class TestHeadObject(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.head_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
class TestPutObject(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', 'asdf')
value = c.put_object(*args)
self.assertTrue(isinstance(value, basestring))
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', 'asdf')
self.assertRaises(c.ClientException, c.put_object, *args)
class TestPostObject(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', {})
value = c.post_object(*args)
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.post_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf', {})
class TestDeleteObject(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.delete_object('http://www.test.com', 'asdf', 'asdf', 'asdf')
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.delete_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
class TestConnection(MockHttpTest):
def test_instance(self):
conn = c.Connection('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(conn.retries, 5)
def test_retry(self):
c.http_connection = self.fake_http_connection(500)
def quick_sleep(*args):
pass
c.sleep = quick_sleep
conn = c.Connection('http://www.test.com', 'asdf', 'asdf')
self.assertRaises(c.ClientException, conn.head_account)
self.assertEquals(conn.attempts, conn.retries + 1)
def test_resp_read_on_server_error(self):
c.http_connection = self.fake_http_connection(500)
conn = c.Connection('http://www.test.com', 'asdf', 'asdf', retries=0)
def get_auth(*args, **kwargs):
return 'http://www.new.com', 'new'
conn.get_auth = get_auth
self.url, self.token = conn.get_auth()
method_signatures = (
(conn.head_account, []),
(conn.get_account, []),
(conn.head_container, ('asdf',)),
(conn.get_container, ('asdf',)),
(conn.put_container, ('asdf',)),
(conn.delete_container, ('asdf',)),
(conn.head_object, ('asdf', 'asdf')),
(conn.get_object, ('asdf', 'asdf')),
(conn.put_object, ('asdf', 'asdf', 'asdf')),
(conn.post_object, ('asdf', 'asdf', {})),
(conn.delete_object, ('asdf', 'asdf')),
)
for method, args in method_signatures:
self.assertRaises(c.ClientException, method, *args)
try:
self | .assertTrue(conn.http_conn[1].has_been_read)
except Asserti | onError:
msg = '%s did not read resp on server error' % method.__name__
self.fail(msg)
except Exception, e:
raise e.__class__("%s - %s" % (method.__name__, e))
def test_reauth(self):
c.http_connection = self.fake_http_connection(401)
def get_auth(*args, **kwargs):
return 'http://www.new.com', 'new'
def swap_sleep(*args):
self.swap_sleep_called = True
c.get_auth = get_auth
c.http_connection = self.fake_http_connection(200)
c.sleep = swap_sleep
self.swap_sleep_called = False
conn = c.Connection('http://www.test.com', 'asdf', 'asdf',
preauthurl='http:/ |
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
from fabric.api import *
import argparse
import os
import sys
import time
from fabric.api import lcd
from fabric.contrib.files import exists
from fabvenv import virtualenv
from dlab.notebook_lib import *
from dlab.actions_lib import *
from dlab.fab import *
from dlab.common_lib import *
parser = argparse.ArgumentParser()
parser.add_argument('--cluster_name', type=str, default='')
parser.add_argument('--dry_run', type=str, default='false')
parser.add_argument('--spark_version', type=str, default='')
parser.add_argument('--hadoop_version', type=str, default='')
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--spark_master', type=str, default='')
parser.add_argument('--region', type=str, default='')
parser.add_argument('--datalake_enabled', type=str, default='')
parser.add_argument('--r_enabled', type=str, default='')
args = parser.parse_args()
kernels_dir = '/home/' + args.os_user + '/.local/share/jupyter/kernels/'
cluster_dir = '/opt/' + args.cluster_name + '/'
local_jars_dir = '/opt/jars/'
spark_version = args.spark_version
hadoop_version = args.hadoop_version
scala_link = "http://www.scala-lang.org/files/archive/"
spark_link = "https://archive.apache.org/dist/spark/spark-" + spark_version + "/spark-" + spark_version + \
"-bin-hadoop" + hadoop_version + ".tgz"
def r_kernel(args):
spark_path = '/opt/{}/spark/'.format(args.cluster_name)
local('mkdir -p {}/r_{}/'.format(kernels_dir, args.cluster_name))
kernel_path = "{}/r_{}/kernel.json".format(kernels_dir, args.cluster_name)
template_file = "/tmp/{}/r_dataengine_template.json".format(args.cluster_name)
r_version = local("R --version | awk '/version / {print $3}'", capture = True)
with open(template_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('SPARK_PATH', spark_path)
text = text.replace('SPARK_VERSION', 'Spark-' + args.spark_version)
text = text.replace('R_KERNEL_VERSION', 'R-{}'.format(str(r_version)))
text = text.replace('SPARK_ACTION', 'init()')
text = text.replace('MASTER', args.spark_master)
with open(kernel_path, 'w') as f:
f.write(text)
def toree_kernel(args):
spark_path = '/opt/' + args.cluster_name + '/spark/'
scala_version = local('scala -e "println(scala.util.Properties.versionNumberString)"', capture=True)
local('mkdir -p ' + kernels_dir + 'toree_' + args.cluster_name + '/')
local('tar zxvf /tmp/{}/toree_kernel.tar.gz -C '.format(args.cluster_name) + kernels_dir + 'toree_' + args.cluster_name + '/')
kernel_path = kernels_dir + "toree_" + args.cluster_name + "/kernel.json"
template_file = "/tmp/{}/toree_dataengine_template.json".format(args.cluster_name)
with open(template_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('SPARK_VERSION', 'Spark-' + args.spark_version)
text = text.replace('SPARK_PATH', spark_path)
text = text.replace('OS_USER', args.os_user)
text = text.replace('MASTER', args.spark_master)
text = text.replace('SCALA_VERSION', scala_version)
with open(kernel_path, 'w') as f:
f.write(text)
local('touch /tmp/{}/kernel_var.json'.format(args.cluster_name))
local(
"PYJ=`find /opt/" + args.cluster_name +
"/spark/ -name '*py4j*.zip' | tr '\\n' ':' | sed 's|:$||g'`; cat " + kernel_path +
" | sed 's|PY4J|'$PYJ'|g' > /tmp/{}/kernel_var.json".format(args.cluster_name))
local('sudo mv /tmp/{}/kernel_var.json '.format(args.cluster_name) + kernel_path)
run_sh_path = kernels_dir + "toree_" + args.cluster_name + "/bin/run.sh"
template_sh_file = '/tmp/{}/run_template.sh'.format(args.cluster_name)
with open(template_sh_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('OS_USER', args.os_user)
with open(run_sh_path, 'w') as f:
f.write(text)
def pyspark_kernel(args):
spark_path = '/opt/' + args.cluster_name + '/spark/'
local('mkdir -p ' + kernels_dir + 'pyspark_' + args.cluster_name + '/')
kernel_path = kernels_dir + "pyspark_" + args.cluster_name + "/kernel.json"
template_file = "/tmp/{}/pyspark_dataengine_template.json".format(args.cluster_name)
with open(template_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('SPARK_VERSION', 'Spark-' + spark_version)
text = text.replace('SPARK_PATH', spark_path)
text = text.replace('PYTHON_SHORT_VERSION', '2.7')
text = text.replace('PYTHON_FULL_VERSION', '2.7')
text = text.replace('MASTER', args.spark_master)
text = text.replace('PYTHON_PATH', '/usr/bin/python2.7')
with open(kernel_path, 'w') as f:
f.write(text)
local('touch /tmp/{}/kernel_var.json'.format(args.cluster_name))
local(
"PYJ=`find /opt/{0}/spark/ -name '*py4j*.zip' | tr '\\n' ':' | sed 's|:$||g'`; cat {1} | sed 's|PY4J|'$PYJ'|g' | sed \'/PYTHONPATH\"\:/s|\(.*\)\"|\\1/home/{2}/caffe/python:/home/{2}/pytorch/build:\"|\' > /tmp/{0}/kernel_var.json".
| format(args.cluster_name, kernel_path, args.os_user))
local('sudo mv /tmp/{}/kernel_var.json '.format(args.cluster_name) + kernel_path)
local('mkdir -p | ' + kernels_dir + 'py3spark_' + args.cluster_name + '/')
kernel_path = kernels_dir + "py3spark_" + args.cluster_name + "/kernel.json"
template_file = "/tmp/{}/pyspark_dataengine_template.json".format(args.cluster_name)
with open(template_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('SPARK_VERSION', 'Spark-' + spark_version)
text = text.replace('SPARK_PATH', spark_path)
text = text.replace('MASTER', args.spark_master)
text = text.replace('PYTHON_SHORT_VERSION', '3.5')
text = text.replace('PYTHON_FULL_VERSION', '3.5')
text = text.replace('PYTHON_PATH', '/usr/bin/python3.5')
with open(kernel_path, 'w') as f:
f.write(text)
local('touch /tmp/{}/kernel_var.json'.format(args.cluster_name))
local(
"PYJ=`find /opt/{0}/spark/ -name '*py4j*.zip' | tr '\\n' ':' | sed 's|:$||g'`; cat {1} | sed 's|PY4J|'$PYJ'|g' | sed \'/PYTHONPATH\"\:/s|\(.*\)\"|\\1/home/{2}/caffe/python:/home/{2}/pytorch/build:\"|\' > /tmp/{0}/kernel_var.json".
format(args.cluster_name, kernel_path, args.os_user))
local('sudo mv /tmp/{}/kernel_var.json '.format(args.cluster_name) + kernel_path)
if __name__ == "__main__":
if args.dry_run == 'true':
parser.print_help()
else:
dataengine_dir_prepare('/opt/{}/'.format(args.cluster_name))
install_dataengine_spark(args.cluster_name, spark_link, spark_version, hadoop_version, cluster_dir, args.os_user,
args.datalake_enabled)
configure_dataengine_spark(args.cluster_name, local_jars_dir, cluster_dir, args.region, args.datalake_enabled)
pyspark_kernel(args)
toree_kernel(args)
if args.r_enabled == 'true':
r_kernel(args)
|
#!/usr/bin/env python
from nose.tools import ok_
from nose.tools import eq_
import networkx as nx
from networkx.algorithms.approximation import min_weighted_dominating_set
from networkx.algorithms.approximation import min_edge_dominating_set
class TestMinWeightDominatingSet:
def test_min_weighted_dominating_set(self):
graph = nx.Graph()
graph.add_edge(1, 2)
graph.add_edge(1, 5)
graph.add_edge(2, 3)
| g | raph.add_edge(2, 5)
graph.add_edge(3, 4)
graph.add_edge(3, 6)
graph.add_edge(5, 6)
vertices = set([1, 2, 3, 4, 5, 6])
# due to ties, this might be hard to test tight bounds
dom_set = min_weighted_dominating_set(graph)
for vertex in vertices - dom_set:
neighbors = set(graph.neighbors(vertex))
ok_(len(neighbors & dom_set) > 0, "Non dominating set found!")
def test_star_graph(self):
"""Tests that an approximate dominating set for the star graph,
even when the center node does not have the smallest integer
label, gives just the center node.
For more information, see #1527.
"""
# Create a star graph in which the center node has the highest
# label instead of the lowest.
G = nx.star_graph(10)
G = nx.relabel_nodes(G, {0: 9, 9: 0})
eq_(min_weighted_dominating_set(G), {9})
def test_min_edge_dominating_set(self):
graph = nx.path_graph(5)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges_iter():
if edge in dom_set:
continue
else:
u, v = edge
found = False
for dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]
ok_(found, "Non adjacent edge found!")
graph = nx.complete_graph(10)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges_iter():
if edge in dom_set:
continue
else:
u, v = edge
found = False
for dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]
ok_(found, "Non adjacent edge found!")
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import (BitcoinTestFramework, BITCOIND_PROC_WAIT_TIMEOUT)
from test_framework.util import (
assert_equal,
assert_raises,
assert_raises_jsonrpc,
assert_is_hex_string,
assert_is_hash_string,
)
class BlockchainTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [['-stopatheight=207']]
def run_test(self):
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
assert self.nodes[0].verifychain(4, 0)
def _test_getchaintxstats(self):
chaintxstats = self.nodes[0].getchaintxstats(1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 17000),
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
assert_equal(res['total_amount'], res3['total_amount'])
assert_equal(res['transactions'], res3['transactions'])
assert_equal(res['height'], res3['height'])
assert_equal(res['txouts'], res3['txouts'])
assert_equal(res['bogosize'], res3['bogosize'])
assert_equal(res['bestblock'], res3['bestblock'])
assert_ | equal(res['hash_serialized_2'], res3['hash_serialized_2'])
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_jsonrpc(-5, "Block not found",
node.getblockheader, "nonsense")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
| assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generate(6)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generate(1)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].process.wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
self.nodes[0] = self.start_node(0, self.options.tmpdir)
assert_equal(self.nodes[0].getblockcount(), 207)
if __name__ == '__main__':
BlockchainTest().main()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import flags
FLAGS = flags.FLAGS
flags.DECLARE('iscsi_num_targets', 'cinder.volume.driver')
flags.DECLARE('policy_file', 'cinder.policy')
flags.DECLARE('volume_driver', 'cinder.volume.manager')
flags.DECLARE('xiv_proxy', 'cinder.volume.xiv')
def_vol_type = 'fake_vol_type'
def set_defaults(conf):
conf.set_default('default_volume_type', def_vol_type)
conf.set_default('volume_driver', 'cinder.volume.driver.FakeISCSIDriver')
conf.set_default('connection_type', 'fake')
conf.set_d | efault('fake_rabbit', True)
conf.set_default('rpc_backend', 'cinder.openstack.common.rpc.impl_fake')
conf.set_default('iscsi_num_targets', 8)
conf.set_default('verbose', | True)
conf.set_default('sql_connection', "sqlite://")
conf.set_default('sqlite_synchronous', False)
conf.set_default('policy_file', 'cinder/tests/policy.json')
conf.set_default('xiv_proxy', 'cinder.tests.test_xiv.XIVFakeProxyDriver')
|
# -*- coding: utf-8 -*-
"""Define the base module for server test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
from influxdb.tests import using_pypy
from influxdb.tests.server_tests.influxdb_instance import InfluxDbInstance
from influxdb.client import InfluxDBClient
if not using_pypy:
from influxdb.dataframe_client import DataFrameClient
def _setup_influxdb_server(inst):
inst.influxd_inst = InfluxDbInstance(
inst.influxdb_template_conf,
udp_enabled=getattr(inst, 'influxdb_udp_enabled', False),
)
inst.cli = InfluxDBClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
if not using_pypy:
inst.cliDF = DataFrameClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
def _teardown_influxdb_server(inst):
remove_tree = sys.exc_info() == (None, None, None)
inst.influxd_inst.close(remove_tree=remove_tree)
class SingleTestCaseWithServerMixin(object):
"""Define the single testcase with server mixin.
A mixin for unittest.TestCase to start an influxdb server instance
in a temporary directory **for each test function/case**
"""
# 'influxdb_template_conf' attribute must be set
# on the TestCase class or instance.
setUp = _setup_influxdb_server
tearDown = _teardown_influxdb_server
class ManyTestCasesWithServerMixin(object):
"""Define the many testcase with server mixin.
Same as the SingleTestCaseWithServerMixin but this module creates
a single instance for the whole class. Also pre-create | s a fresh
database: 'db'.
"""
# 'influxdb_temp | late_conf' attribute must be set on the class itself !
@classmethod
def setUpClass(cls):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
_setup_influxdb_server(cls)
def setUp(self):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
self.cli.create_database('db')
@classmethod
def tearDownClass(cls):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
_teardown_influxdb_server(cls)
def tearDown(self):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
self.cli.drop_database('db')
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# mockredis
#
# This module helps start and stop redis instances for unit-testing
# redis must be pre-installed for this to work
#
import os
import signal
import subprocess
import logging
import socket
import time
import redis
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
redis_ver = '2.6.13'
redis_bdir = '/tmp/cache/' + os.environ['USER'] + '/systemless_test'
redis_url = redis_bdir + '/redis-'+redis_ver+'.tar.gz'
redis_exe = redis_bdir + '/bin/redis-server'
def install_redis():
if not os.path.exists(redis_url):
process = subprocess.Popen(['wget', '-P', redis_bdir,
'https://redis.googlecode.com/files/redis-'\
+ redis_ver + '.tar.gz'],
cwd=redis_bdir)
process.wait()
if process.returncode is not 0:
raise SystemError('wget '+redis_url)
if not os.path.exists(redis_bdir + '/redis-'+redis_ver):
process = sub | process.Popen(['tar', 'xzvf', redis_url],
cwd=redis_bdir)
process.wait()
if process.returncode is not 0:
raise SystemError('untar '+redis_url)
if not os.path.exists(redis_exe):
process = subprocess.Popen(['make', 'PREFIX=' + redis_bdir, 'install'],
| cwd=redis_bdir + '/redis-'+redis_ver)
process.wait()
if process.returncode is not 0:
raise SystemError('install '+redis_url)
def get_redis_path():
if not os.path.exists(redis_exe):
install_redis()
return redis_exe
def redis_version():
'''
Determine redis-server version
'''
return 2.6
'''
command = "redis-server --version"
logging.info('redis_version call 1')
process = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)
logging.info('redis_version call 2')
output, _ = process.communicate()
if "v=2.6" in output[0]:
return 2.6
else:
return 2.4
'''
def start_redis(port, password=None):
'''
Client uses this function to start an instance of redis
Arguments:
cport : An unused TCP port for redis to use as the client port
'''
exe = get_redis_path()
version = redis_version()
if version == 2.6:
redis_conf = "redis.26.conf"
else:
redis_conf = "redis.24.conf"
conftemplate = os.path.dirname(os.path.abspath(__file__)) + "/" +\
redis_conf
redisbase = "/tmp/redis.%s.%d/" % (os.getenv('USER', 'None'), port)
output, _ = call_command_("rm -rf " + redisbase)
output, _ = call_command_("mkdir " + redisbase)
output, _ = call_command_("mkdir " + redisbase + "cache")
logging.info('Redis Port %d' % port)
output, _ = call_command_("cp " + conftemplate + " " + redisbase +
redis_conf)
replace_string_(redisbase + redis_conf,
[("/var/run/redis_6379.pid", redisbase + "pid"),
("port 6379", "port " + str(port)),
("/var/log/redis_6379.log", redisbase + "log"),
("/var/lib/redis/6379", redisbase + "cache")])
if password:
replace_string_(redisbase + redis_conf,[("# requirepass foobared","requirepass " + password)])
command = exe + " " + redisbase + redis_conf
subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
r = redis.StrictRedis(host='localhost', port=port, db=0, password=password)
done = False
start_wait = os.getenv('CONTRIAL_ANALYTICS_TEST_MAX_START_WAIT_TIME', 15)
cnt = 0
while not done:
try:
r.ping()
except:
cnt += 1
if cnt > start_wait:
logging.info('Redis Failed. Logs below: ')
with open(redisbase + "log", 'r') as fin:
logging.info(fin.read())
return False
logging.info('Redis not ready')
time.sleep(1)
else:
done = True
logging.info('Redis ready')
return True
def stop_redis(port, password=None):
'''
Client uses this function to stop an instance of redis
This will only work for redis instances that were started by this module
Arguments:
cport : The Client Port for the instance of redis to be stopped
'''
r = redis.StrictRedis(host='localhost', port=port, db=0, password=password)
r.shutdown()
del r
redisbase = "/tmp/redis.%s.%d/" % (os.getenv('USER', 'None'), port)
output, _ = call_command_("rm -rf " + redisbase)
def replace_string_(filePath, findreplace):
"replaces all findStr by repStr in file filePath"
print filePath
tempName = filePath + '~~~'
input = open(filePath)
output = open(tempName, 'w')
s = input.read()
for couple in findreplace:
outtext = s.replace(couple[0], couple[1])
s = outtext
output.write(outtext)
output.close()
input.close()
os.rename(tempName, filePath)
def call_command_(command):
process = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return process.communicate()
if __name__ == "__main__":
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
start_redis(cport)
|
import web
from gothonweb import map
urls = (
'/game', 'GameEngine',
'/', 'Index',
)
app = web.applic | ation(urls, globals())
#little hack so that debug mode works with sessions
if web.config.get('_session') is None:
store = web.session.DiskStore('sessions')
session = web.session.Session(app, store,
initia | lizer={'room':None})
web.config._session = session
else:
session = web.config._session
render = web.template.render('templates/', base="layout")
class Index(object):
def GET(self):
# this is used to "setup" the session with starting values
session.room = map.START
web.seeother("/game")
class GameEngine(object):
def GET(self):
if session.room:
return render.show_room(room=session.room)
# else:
# # why is there here? do you need it?
# return render.you_died()
def POST(self):
form = web.input(action=None)
if session.room:
session.room = session.room.go(form.action)
web.seeother("/game")
if __name__ == "__main__":
app.run() |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a web interface for dumping graph data as JSON.
This is meant to be used with /load_from_prod in order to easily grab
data for a graph to a local server for testing.
"""
import base64
import json
from google.appengine.ext import ndb
from google.appengine.ext.ndb import model
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
_DEFAULT_MAX_POINTS = 500
# This is about the limit we want to return since we fetch many associated
# entities for each anomaly.
_DEFAULT_MAX_ANOMALIES = 30
class DumpGraphJsonHandler(request_handler.RequestHandler):
"""Handler for extracting entities from datastore."""
def get(self):
"""Handles dumping dashboard data."""
if self.request.get('sheriff'):
self._DumpAnomalyDataForSheriff()
elif self.request.get('test_path'):
self._DumpTestData()
else:
self.ReportError('No parameters specified.')
def _DumpTestData(self):
"""Dumps data for the requested test.
Request parameters:
test_path: A single full test path, including master/bot.
num_points: Max number of Row entities (optional).
end_rev: Ending revision number, inclusive (optional).
Outputs:
JSON array of encoded protobuf messages, which encode all of
the datastore entities relating to one test (including Master, Bot,
TestMetadata, Row, Anomaly and Sheriff entities).
"""
test_path = self.request.get('test_path')
num_points = int(self.request.get('num_points', _DEFAULT_MAX_POINTS))
end_rev = self.request.get('end_rev')
test_key = utils.TestKey(test_path)
if not test_ke | y or test_key.kind() != 'TestMetadata':
# Bad test_path passed in.
self.response.out.write(json.dumps([]))
return
# List of datastore entities that will be dumped.
entities = []
entities.extend(self._GetTestAncestors([test_key]))
# Get the Row enti | ties.
q = graph_data.Row.query()
print test_key
q = q.filter(graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
if end_rev:
q = q.filter(graph_data.Row.revision <= int(end_rev))
q = q.order(-graph_data.Row.revision)
entities += q.fetch(limit=num_points)
# Get the Anomaly and Sheriff entities.
alerts = anomaly.Anomaly.GetAlertsForTest(test_key)
sheriff_keys = {alert.sheriff for alert in alerts}
sheriffs = [sheriff.get() for sheriff in sheriff_keys]
entities += alerts
entities += sheriffs
# Convert the entities to protobuf message strings and output as JSON.
protobuf_strings = map(EntityToBinaryProtobuf, entities)
self.response.out.write(json.dumps(protobuf_strings))
def _DumpAnomalyDataForSheriff(self):
"""Dumps Anomaly data for all sheriffs.
Request parameters:
sheriff: Sheriff name.
num_points: Max number of Row entities (optional).
num_alerts: Max number of Anomaly entities (optional).
Outputs:
JSON array of encoded protobuf messages, which encode all of
the datastore entities relating to one test (including Master, Bot,
TestMetadata, Row, Anomaly and Sheriff entities).
"""
sheriff_name = self.request.get('sheriff')
num_points = int(self.request.get('num_points', _DEFAULT_MAX_POINTS))
num_anomalies = int(self.request.get('num_alerts', _DEFAULT_MAX_ANOMALIES))
sheriff = ndb.Key('Sheriff', sheriff_name).get()
if not sheriff:
self.ReportError('Unknown sheriff specified.')
return
anomalies = self._FetchAnomalies(sheriff, num_anomalies)
test_keys = [a.GetTestMetadataKey() for a in anomalies]
# List of datastore entities that will be dumped.
entities = []
entities.extend(self._GetTestAncestors(test_keys))
# Get the Row entities.
entities.extend(self._FetchRowsAsync(test_keys, num_points))
# Add the Anomaly and Sheriff entities.
entities += anomalies
entities.append(sheriff)
# Convert the entities to protobuf message strings and output as JSON.
protobuf_strings = map(EntityToBinaryProtobuf, entities)
self.response.out.write(json.dumps(protobuf_strings))
def _GetTestAncestors(self, test_keys):
"""Gets the TestMetadata, Bot, and Master entities preceding in path."""
entities = []
added_parents = set()
for test_key in test_keys:
if test_key.kind() != 'TestMetadata':
continue
parts = utils.TestPath(test_key).split('/')
for index, _, in enumerate(parts):
test_path = '/'.join(parts[:index + 1])
if test_path in added_parents:
continue
added_parents.add(test_path)
if index == 0:
entities.append(ndb.Key('Master', parts[0]).get())
elif index == 1:
entities.append(ndb.Key('Master', parts[0], 'Bot', parts[1]).get())
else:
entities.append(ndb.Key('TestMetadata', test_path).get())
return [e for e in entities if e is not None]
def _FetchRowsAsync(self, test_keys, num_points):
"""Fetches recent Row asynchronously across all 'test_keys'."""
rows = []
futures = []
for test_key in test_keys:
q = graph_data.Row.query()
q = q.filter(
graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
q = q.order(-graph_data.Row.revision)
futures.append(q.fetch_async(limit=num_points))
ndb.Future.wait_all(futures)
for future in futures:
rows.extend(future.get_result())
return rows
def _FetchAnomalies(self, sheriff, num_anomalies):
"""Fetches recent anomalies for 'sheriff'."""
q = anomaly.Anomaly.query(
anomaly.Anomaly.sheriff == sheriff.key)
q = q.order(-anomaly.Anomaly.timestamp)
return q.fetch(limit=num_anomalies)
def EntityToBinaryProtobuf(entity):
"""Converts an ndb entity to a protobuf message in binary format."""
# Encode in binary representation of the protocol buffer.
message = ndb.ModelAdapter().entity_to_pb(entity).Encode()
# Base64 encode the data to text format for json.dumps.
return base64.b64encode(message)
def BinaryProtobufToEntity(pb_str):
"""Converts a protobuf message in binary format to an ndb entity.
Args:
pb_str: Binary encoded protocol buffer which is encoded as text.
Returns:
A ndb Entity.
"""
message = model.entity_pb.EntityProto(base64.b64decode(pb_str))
return ndb.ModelAdapter().pb_to_entity(message)
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Fou | ndation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, | see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5.sc.EntityItemType import EntityItemType
logger = logging.getLogger(__name__)
class EntityItemEncryptMethodType(EntityItemType):
MODEL_MAP = {
'elements': [
],
'attributes': {
},
}
def get_value_enum(self):
return [
'DES',
'BSDi',
'MD5',
'Blowfish',
'Sun MD5',
'SHA-256',
'SHA-512',
'',
]
|
from setuptools import setup, find_packages
setup(
name='zeit.content.gallery',
version='2.9.2.dev0',
author='gocept, Zeit Online',
author_email='zon-backend@zeit.de',
url='http://www.zeit.de/',
description="vivi Content-Type Portraitbox",
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
license='BSD',
namespace_packages=['zeit', 'zeit.content'],
install_requires=[
'cssselect',
'Pillow',
'gocept.form',
'setuptools',
'zeit.cms >= 3.0.dev0',
'zeit.connector>=2.4.0.dev0',
'zeit.imp>=0.15.0.dev0',
'zeit.content.image',
| 'zeit.push>=1.21.0.dev0',
'zeit.wysiwyg',
'zope.app.appsetup',
'zope.app.testing',
'zope.component',
'zope.formlib',
'zope.interface',
'zope.publisher',
'zope.security',
'zope.testing',
],
entry_points={
'fanstatic | .libraries': [
'zeit_content_gallery=zeit.content.gallery.browser.resources:lib',
],
},
)
|
icCursor
from superdesk.upload import url_for_media
from superdesk.errors import SuperdeskApiError, ProviderError
from superdesk.media.media_operations import process_file_from_stream, decode_metadata
from superdesk.media.renditions import generate_renditions, delete_file_on_error, get_renditions_spec
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE
from superdesk.utc import utcnow, get_date, local_to_utc
import mimetypes
# scanpix preview size to use (if available) for superdesk rendition
# preview sizes are in order of preference, first found is used
REND2PREV = {
'thumbnail': ('generated_jpg', 'thumbnail', 'thumbnail_big'),
'viewImage': ('preview', 'thumbnail_big', 'thumbnail', 'preview_big'),
'baseImage': ('mp4_preview', 'mp4_thumbnail', 'preview_big', 'preview', 'thumbnail_big', 'thumbnail')}
logger = logging.getLogger('ntb:scanpix')
# Default timezone used to convert datetimes from scanpix api results to utc
SCANPIX_TZ = 'Europe/Oslo'
def extract_params(query, names):
if isinstance(names, str):
names = [names]
findall = re.findall(r'([\w]+):\(([-\w\s*]+)\)', query)
| params = {name: value for (name, value) in findall if name in names}
for name, value in findall:
query = query.replace('%s:(%s)' % (name, value), '')
quer | y = query.strip()
# escape dashes
for name, value in params.items():
params[name] = value.replace('-', r'\-')
if query:
params['q'] = query
return params
class ScanpixDatalayer(DataLayer):
def set_credentials(self, user, password):
self._user = user
self._password = password
def init_app(self, app):
app.config.setdefault('SCANPIX_SEARCH_URL', 'http://api.scanpix.no/v2')
self._app = app
self._user = None
self._password = None
self._headers = {
'Content-Type': 'application/json',
}
def fetch_file(self, url):
"""Get file stream for given image url.
It will fetch the file using predefined auth token.
:param url: pa image api url
"""
raise NotImplementedError
def find(self, resource, req, lookup):
"""
Called to execute a search against the Scanpix API. It attempts to translate the search request
passed in req to a suitable form for a search request against the API. It parses the response into a
suitable ElasticCursor.
:param resource:
:param req:
:param lookup:
:return:
"""
url = self._app.config['SCANPIX_SEARCH_URL'] + '/search'
data = {
'mainGroup': 'any'
}
if 'query' in req['query']['filtered']:
query = req['query']['filtered']['query']['query_string']['query'] \
.replace('slugline:', 'keywords:') \
.replace('description:', 'caption:')
# Black & White
try:
bw = bool(int(extract_params(query, 'bw')['bw']))
except KeyError:
pass
else:
if bw:
data['saturation'] = {'max': 1}
# Clear Edge
try:
clear_edge = bool(int(extract_params(query, 'clear_edge')['clear_edge']))
except KeyError:
pass
else:
if clear_edge:
data['clearEdge'] = True
text_params = extract_params(query, ('headline', 'keywords', 'caption', 'text'))
# combine all possible text params to use the q field.
data['searchString'] = ' '.join(text_params.values())
try:
ids = extract_params(query, 'id')['id'].split()
except KeyError:
pass
else:
data['refPtrs'] = ids
# subscription
data['subscription'] = 'subscription' # this is requested as a default value
# data['subscription'] is always equal to 'subscription', but we keep the test in case
# of the behaviour is changed again in the future.
if 'ntbtema' in resource and data['subscription'] == 'subscription':
# small hack for SDNTB-250
data['subscription'] = 'punchcard'
for criterion in req.get('post_filter', {}).get('and', {}):
if 'range' in criterion:
start = None
end = None
filter_data = criterion.get('range', {})
if 'firstcreated' in filter_data:
created = criterion['range']['firstcreated']
if 'gte' in created:
start = created['gte'][0:10]
if 'lte' in created:
end = created['lte'][0:10]
# if there is a special start and no end it's one of the date buttons
if start and not end:
if start == 'now-24H':
data['timeLimit'] = 'last24'
if start == 'now-1w':
data['timeLimit'] = 'lastweek'
if start == 'now-1M':
data['timeLimit'] = 'lastmonth'
elif start or end:
data['archived'] = {
'min': '',
'max': ''
}
if start:
data['archived']['min'] = start
if end:
data['archived']['max'] = end
if 'terms' in criterion:
if 'type' in criterion.get('terms', {}):
type_ = criterion['terms']['type']
if type_ == CONTENT_TYPE.VIDEO:
data['mainGroup'] = 'video'
offset, limit = int(req.get('from', '0')), max(10, int(req.get('size', '25')))
data['offset'] = offset
data['showNumResults'] = limit
r = self._request(url, data, resource)
hits = self._parse_hits(r.json())
return ElasticCursor(docs=hits['docs'], hits={'hits': hits})
def _request(self, url, data, resource):
"""Perform GET request to given url.
It adds predefined headers and auth token if available.
:param url
:param data
"""
r = requests.post(url, data=json.dumps(data), headers=self._headers, auth=(self._user, self._password))
if r.status_code < 200 or r.status_code >= 300:
logger.error('error fetching url=%s status=%s content=%s' % (url, r.status_code, r.content or ''))
raise ProviderError.externalProviderError("Scanpix request can't be performed", provider={'name': resource})
return r
def _parse_doc(self, doc):
new_doc = {}
new_doc['_id'] = doc['refPtr']
new_doc['guid'] = doc['refPtr']
try:
new_doc['description_text'] = doc['caption']
except KeyError:
pass
try:
new_doc['headline'] = doc['headline']
except KeyError:
pass
try:
new_doc['original_source'] = new_doc['source'] = doc['credit']
except KeyError:
pass
new_doc['versioncreated'] = new_doc['firstcreated'] = self._datetime(
local_to_utc(SCANPIX_TZ, get_date(doc['archivedTime']))
)
new_doc['pubstatus'] = 'usable'
# This must match the action
new_doc['_type'] = 'externalsource'
# entry that the client can use to identify the fetch endpoint
new_doc['fetch_endpoint'] = 'scanpix'
# mimetype is not directly found in Scanpix API
# so we use original filename to guess it
mimetype = mimetypes.guess_type("_{}".format(splitext(doc.get('originalFileName', ''))[1]))[0]
if mimetype is None:
# nothing found with filename, we try out luck with fileFormat
try:
format_ = doc['fileFormat'].split()[0]
except (KeyError, IndexError):
mimetype = None
else:
mimetype = mimetypes.guess_type('_.{}'.format(format_))[0]
if mimety |
"""Code for CLI base"""
import logging
import pathlib
import click
import coloredlogs
import yaml
from flask.cli import FlaskGroup, with_appcontext
# General, logging
from scout import __version__
from scout.commands.convert import convert
from scout.commands.delete import delete
from scout.commands.download import download as download_command
from scout.commands.export import export
from scout.commands.index_command import index as index_command
# Commands
from scout.commands.load import load as load_command
from scout.commands.serve import serve
from scout.commands.setup import setup as setup_command
from scout.commands.update import update as update_command
from scout.commands.view import view as view_command
from scout.commands.wipe_database import wipe
from scout.server.app import create_app
LOG_LEVELS = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
LOG = logging.getLogger(__name__)
@click.pass_context
def loglevel(ctx):
"""Set app cli log level"""
log_level = ctx.find_root().params.get("loglevel")
log_format = None
coloredlogs.install(level=log_level, fmt=log_format)
LOG.info("Running scout version %s", __version__)
LOG.debug("Debug logging enabled.")
@click.pass_context
def get_app(ctx=None):
"""Create an app with the correct config or with default app params"""
loglevel() # Set up log level even before creating the app object
# store provided params into a options variable
options = ctx.find_root()
cli_config = {}
# if a .yaml config file was provided use its params to intiate the app
if options.params.get("config"):
with open(options.params["config"], "r") as in_handle:
cli_config = yaml.load(in_handle, Loader=yaml.SafeLoader)
flask_conf = None
if options.params.get("flask_config"):
flask_conf = pathlib.Path(options.params["flask_config"]).absolute()
if options.params.get("demo"):
cli_config["demo"] = "scout-demo"
try:
app = create_app(
config=dict(
MONGO_DBNAME=options.params.get("mongodb")
or cli_config.get("demo")
or cli_config.get("mongodb")
or "scout",
MONGO_HOST=options.params.get("host") or cli_config.get("host"),
MONGO_PORT=options.params.get("port") or cli_config.get("port"),
MONGO_USERNAME=options.params.get("username") or cli_config.get("username"),
MONGO_PASSWORD=options.params.get("password") or cli_config.get("password"),
MONGO_URI=options.params.get("mongo_uri") or cli_config.get("mongo_uri"),
OMIM_API_KEY=cli_config.get("omim_api_key"),
),
config_file=flask_conf,
)
except SyntaxError as err:
LOG.error(err)
raise click.Abort
return app
@click.version_option(__version__)
@click.group(
cls=FlaskGroup,
create_app=get_app,
invoke_without_command=True,
add_default_commands=False,
add_version_option=False,
)
@click.option(
"-c",
"--config",
type=click.Path(exists=True),
help="Path to a YAML config file with database info.",
)
@click.option(
"--loglevel",
default="DEBUG",
type=click.Choice(LOG_LEVELS),
help="Set the level of log output.",
show_default=True,
)
@click.option("--demo", is_flag=True, help="If the demo database should be used")
@click.option("-db", "--mongodb", help="Name of mongo database [scout]")
@click.option("-uri", "--mongo-uri", help="MongoDB connection string")
@click.option("-u", "--username")
@click.option("-p", "--password")
@click.option("-a", "--authdb", help="database to use for authentication")
@click.option("-port", | "--port", help="Specify on what port to listen for the mongod")
@click.option("-h", "--host", help="Specify the host for the mongo database.")
@click.option(
"-f",
"--flask-config",
type=click.Path(exists=True),
help="Path to a PYTHON config file",
)
@with_appcontext
def cli(**_):
"""scout: manage interactions with a scout instance."""
cli.add_command(load_command)
cli.add_command(wipe)
cli.add_command(setup_command)
cli.add_command(delete)
cli.add_command(export) |
cli.add_command(convert)
cli.add_command(index_command)
cli.add_command(view_command)
cli.add_command(update_command)
cli.add_command(download_command)
cli.add_command(serve)
|
from distutils.core import setup
setup(
name="kafka-python",
version="0.1-alpha",
author="David Arthur",
author_email="mumrah@gmail.com",
url="h | ttps://github.com/mumrah/kafka-python",
packages=["kafka"],
license="Copyright 2012, David Arthur under Apache License, v2.0",
description="Pure Python client for Apache Kafka",
long_description=open("README.md").read(),
)
| |
self.call_backs[system_socket.fileno()] = (
system_socket, self.process_message)
self.system_socket = system_socket
def process_player_command(self, a_socket):
""" Process a command from the scenario player.
"""
# receive the command
command = a_socket.recv_pyobj()
self.logger.info('received command from scenario player: {0}'
.format(type(command)))
self.system_socket.send(self.message.to_message(command))
def process_message(self, a_socket):
""" Receive and forward a message from the system """
self.logger.info( 'Data from the system' )
# We do not know beforehand how big the blob is.
data = a_socket.recv( 2048 )
if data == "" :
# Connection was closed, so unregister and close the socket.
self.poller.unregister(a_socket)
del self.call_backs[a_socket.fileno()]
a_socket.close()
self.system_socket = None
else :
a_message = message.from_message(data)
self.logger.info('Copying data to player')
self.repeater_socket.send_pyobj(a_message)
def run(self):
# Catch any Control-C
signal.signal(signal.SIGINT, self.control_c_handler)
self.create_sockets()
while self.go_on :
# Note that poller uses fileno() as the key for non-zmq sockets.
socks = dict(self.poller.poll(60000)) # Timeout in ms, 1 minute
for socket_key in self.call_backs.copy() :
# Need copy here cause we might modify the call_backs
# while in the call back functions.
if socket_key in socks and socks[socket_key] == zmq.POLLIN:
if socket_key in self.call_backs:
cbp = self.call_backs[socket_key]
function = cbp[1]
function(cbp[0])
self.logger.info("Still alive")
self.run(socks)
self.logger.info("Stopping")
self.context.term()
#------------------------------------------------------------------------------
class TCPDispatcher(Dispatcher):
""" Dispatcher subclass for TCP connections"""
def __init__(self, dispatcher_type, dispatcher_id):
Dispatcher.__init__(self, name, dispatcher_id)
config = configparser.ConfigParser()
config.read('simulator.conf')
dispatcher_section = ('dispatcher-{0}-{1}'
.format(dispatcher_type, dispatcher_id))
if (dispatcher_section) in config.sections():
entries = config[dispatcher_section]
# path to the message class
self.message_path = entries['MessagePath']
if message_path is not None:
loader = importlib.machinery.SourceFileLoader('message',
message_path)
message_module = loader.exec_module('message')
message = message_module.Message()
# address and port to listen on for messages from the system
self.accept_address = entries['AcceptAddress']
self.listen_port = entries['ListenPort']
# port to listen on for commands from the player.
self.command_listen_port = entries['CommandListenPort']
# port to forward messages to the player.
self.message_forward_port = entries['MessageForwardPort']
else:
self.logger.critical('no valid tcp section found in config file')
def create_sockets(self):
""" Create the TCP sockets between the system and the
Scenario player
"""
self.logger.info('Creating sockets for {0} {1}'
.format(self.name, self.dispatcher_id))
# Open a tcp socket to listen for new connections
# from the system.
self.logger.info("Listening on address {0}"
.format(str(self.accept_address)))
self.logger.info("Listening on port {0}".format(str(self.listen_port)))
accept_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
accept_socket.bind((self.accept_address, self.port))
# Only handle one connection at a time.
accept_socket.listen(1)
# Let the superclass finish the creation of the rest of the
# sockets, because it is the same.
Dispatcher.create_sockets(self, accept_socket)
def run(self):
# TCP dispatcher has no extra steps to add to the default loop.
# We will just exit this method.
pass
#------------------------------------------------------------------------------
class SerialDispatcher(Dispatcher):
""" Dispatcher subclass for Serial connections"""
SERIAL_PARITY = {'none':serial.PARITY_NONE , 'even':serial.PARITY_EVEN ,
'odd':serial.PARITY_ODD , 'mark':serial.PARITY_MARK ,
'space':serial.PARITY_SPACE}
SERIAL_STOPBITS= {'one':serial.STOPBITS_ONE ,
'onePointFive': serial.STOPBITS_ONE_POINT_FIVE,
'two':serial.STOPBITS_TWO }
default_timeout = 60000
def __init__(self, dispatcher_type, dispatcher_id):
Dispatcher.__init__(self, dispatcher_type, dispatcher_id)
self.repeater_socket = None
self.poller = None
self.call_backs = None
self.serial_link = None
self.timeout = default_timeout
self.receiving = False
self.blob = ""
config = configparser.ConfigParser()
config.read('simulator.conf')
dispatcher_section = ('dispatcher-{0}-{1}'
.format(dispatcher_type, dispatcher_id))
if (dispatcher_section) in config.sections():
entries = config[dispatcher_section]
# path to the message class
self.message_path = entries['MessagePath']
if message_path is not None:
loader = importlib.machinery.SourceFileLoader('message',
message_path)
message_module = loader.exec_module('message')
message = message_module.Message()
# Settings for the serial link to the system.
self.serial_device = entries['Device']
self.serial_baudrate = int(entries['BaudRate'])
self.serial_bytesize = int(entries['ByteSize'])
self.serial_parity = SERIAL_PARITY.get(entries['Parity'])
self.serial_stopbits = SERIAL_STOPBITS.get(entries['StopBits'])
# port to listen on for commands from the player.
self.command_listen_port = entries['CommandListenPort']
# port to forward messages to the player.
self.message_forward_port = entries['MessageForwardPort']
else:
self.logger.critical('no valid serial section '
'found in config file')
def create_sockets(self):
""" Create the socket to the scenario player and set up the
serial link to the system
"""
self.logger.info('Creating sockets for {0} {1}'
.format(self.name, self.dispatcher_id))
# Setup a serial link to listen to the system
self.logger.info("Opening serial device {0} ".format(serial_device))
self.serial_link = serial.Serial(serial_device, serial_baudrate,
serial_parity, serial_bytes | ize,
serial_stopbits)
# Open a socket to listen for commands from the scenario player
address = "tcp://*:{0}".format(self.command_listen_port)
self.logger.info("Command subscription at {0}".format(address))
command_socket = self.context.socket(zmq.SUB)
command_socket.bind(address)
command | _socket.setsockopt( |
import os
path = | os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000045.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = l | ibsbml.readSBMLFromString(sbmlString) |
#-*- coding:utf-8 -*-
'''
显示命令的输出结果。
'''
import threading
from gi.repository import Gtk, Gdk, GObject, GLib, GtkSource, Pango
from VcEventPipe import *
class ViewLog:
'''
显示日志。
1,来了新命令,是否更新当前的日志。
2,命令来了新的日志,并显示后,是否滚动。
'''
# 设定一个栏目的枚举常量。
(
COLUMN_TAG_LINE_NO, # 行号
COLUMN_TAG_NAME, # Tag名字
NUM_COLUMNS) = range(3)
def __init__(self, vc_cmd_grp):
self.vc_cmd_grp = vc_cmd_grp # 当前执行的命令组
self.vc_cmd = None # 当前执行的命令
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
editor = GtkSource.View()
editor.set_cursor_visible(True)
editor.set_show_line_numbers(True) # 显示行号
editor.set_auto_indent(True) # 自动缩进
#editor.set_insert_spaces_instead_of_tabs(True) # 用空格代替tab
editor.set_tab_width(4) # tab宽度4
editor.set_highlight_current_line(True) # 高亮度显示当前行
editor.set_editable(False) # 只读
self._ide_set_font(editor, "Ubuntu mono 12") # 设置字体。
src_buffer = self.create_buffer()
editor.set_buffer(src_buffer)
sw.add(editor)
self.view = sw
self.taglistview = editor
VcEventPipe.register_event(VcEventPipe.EVENT_LOG_COMMAND_START, self.sm_start_new_cmd)
VcEventPipe.register_event(VcEventPipe.EVENT_LOG_APPEND_TEXT, self.sm_append_log)
self.set_scrollable(True)
self.set_show_new_cmd_log(True)
def layout(self):
self.taglistview.show()
self.view.show()
def unregister(self):
VcEventPipe.unregister_event(VcEventPipe.EVENT_LOG_COMMAND_START, self.sm_start_new_cmd)
VcEventPipe.unregister_event(VcEventPipe.EVENT_LOG_APPEND_TEXT, self.sm_append_log)
def _ide_set_font(self, widget, str_font_desc):
''' 设置控件的字体
widget Gtk.Widget 控件
str_font_desc String 字体的描述(名字 大小)
'''
font_desc = Pango.FontDescription.from_string(str_font_desc)
widget.modify_font(font_desc)
def create_buffer(self):
# TODO:寻找适合日志输出的语法。
# 支持的语言
# ada awk boo c c-sharp changelog chdr cpp css d def desktop diff
# docbook dot dpatch dtd eiffel erlang forth fortran gap gettext-translation
# gtk-doc gtkrc haddock haskell haskell-literate html idl ini java js latex
# libtool lua m4 makefile msil nemerle objc objective-caml ocl octave pascal
# perl php pkgconfig python r rpmspec ruby scheme sh sql tcl texinfo vala vbnet
# verilog vhdl xml xslt yacc
src_buffer = GtkSource.Buffer()
manager = GtkSource.LanguageManager()
language = manager.get_language("sh") # 设定语法的类型
src_buffer.set_language(language)
src_buffer.set_highlight_s | yntax(True) # 语法高亮
|
return src_buffer
def set_scrollable(self, is_scrollable):
# 更新日志后,不再滚动。
self.is_scrollable = is_scrollable
if is_scrollable: # 想滚动
self._scroll_to_end() # 马上滚动到最后
else: #不想滚动
pass # 什么都不用做。
def get_scrollable(self, is_scrollable):
# 查询当前是否滚动显示最新日志内容
return self.is_scrollable
def set_show_new_cmd_log(self, show):
self.is_show_new_cmd_log = show
if show:
# 如果需要显示最新执行的命令日志,则需要更新当前的情况
lastest_cmd = None
for cmd in self.vc_cmd_grp.commands:
if cmd.is_selected and cmd.process > 0:
lastest_cmd = cmd
if lastest_cmd is not None:
self.vc_cmd = lastest_cmd
self.set_log(lastest_cmd)
else:
# 如果不再需要显示最新的命令日志,则什么都不用做
pass
def get_show_new_cmd_log(self):
return self.is_show_new_cmd_log
def sm_start_new_cmd(self, vc_cmd):
# 如果命令不是这个命令组中的,就退出
if vc_cmd not in self.vc_cmd_grp.commands:
return
# 如果不是当前命令,且不需要显示新的命令,则不再接受新的命令输出。
if not self.is_show_new_cmd_log and self.vc_cmd != vc_cmd:
return
self.vc_cmd = vc_cmd
Gdk.threads_add_idle(GLib.PRIORITY_DEFAULT_IDLE, self.clean_log)
def clean_log(self):
''' 将当前的文本清除 '''
print "clean text"
editor = self.taglistview
src_buf = editor.get_buffer()
src_buf.delete(src_buf.get_start_iter(), src_buf.get_end_iter())
def sm_append_log(self, vc_cmd, text):
# 如果命令不是这个命令组中的,就退出
if vc_cmd not in self.vc_cmd_grp.commands:
return
# 如果不是当前命令,且不需要显示新的命令,则不再接受新的命令输出。
if not self.is_show_new_cmd_log and self.vc_cmd != vc_cmd:
return
Gdk.threads_add_idle(GLib.PRIORITY_DEFAULT_IDLE, self.append_log, text)
def append_log(self, text):
# thrd = threading.currentThread()
# print "append text : %s" % ( thrd.getName() )
#print "append " + text,
''' 添加一条信息。'''
editor = self.taglistview
src_buf = editor.get_buffer()
iter_ = src_buf.get_end_iter()
src_buf.insert(iter_, text)
if self.is_scrollable:
self._scroll_to_end()
def set_log(self, vc_cmd):
self.vc_cmd = vc_cmd
self.set_show_new_cmd_log(False)
self.clean_log()
self.append_log(vc_cmd.get_log())
def _scroll_to_end(self):
editor = self.taglistview
src_buf = editor.get_buffer()
iter_ = src_buf.get_end_iter()
# 移动到最后。(TODO:没有移动到最后)
editor.scroll_to_iter(iter_, 0.25, False, 0.0, 0.5) |
from vkapp.bot.models import Blogger, News, AdminReview, Publication
from .usersDAO import get_or_create_blogger
from datetime import datetime, timedelta, time
def new_news(link, media, uid, pic):
blogger = get_or_create_blogger(uid)
news = News(link=link, blogger=blogger, media=media, pic=pic)
news.save()
return news
def get_news_proposed_today(uid):
today = datetime.now().date()
tomorrow = today + timedelta(1)
today_start = datetime.combine(today, time())
today_end = datetime.combine(tomorrow, time())
news = News.objects.filter(blogger__vk_user__vk_id=uid).filter(date_time__lte=today_end,
date_time__gte=today_start)
return news
def news_by_blogger(uid):
blogger = get_or_create_blogger(uid)
news = News.objects.filter(blogger=blogger)
return news
def get_news_review_rating(news):
re | view = AdminReview.objects.filter(news=news)
if len(review)==0:
return 0
else:
return review[0].rating
def is_news_published(news):
published_info = Publi | cation.objects.filter(news=news)
if len(published_info) == 0:
return False
else:
return True
|
from modes import *
# mode_traffic
field_rate_down = 'rate_down'
field_bw_down = 'bw_down'
field_rate_up = 'rate_up'
field_bw_up = 'bw_up'
# mode_temp
field_cpum = 'cpum'
field_cpub = 'cpub'
field_sw = 'sw'
field_hdd = 'hdd'
# mode_fan_speed
field_fan_speed = 'fan_speed'
# mode_xdsl
field_snr_down = 'snr_down'
field_snr_up = 'snr_up'
# mode_xdsl_errors
field_fec = 'fec'
field_crc = 'crc'
field_hec = 'hec'
field_es = 'es'
field_ses = 'ses'
# mode_switch1
field_rx1 = 'rx_1'
field_tx1 = 'tx_1'
# mode_switch2
field_rx2 = 'rx_2'
field_tx2 = 'tx_2'
# mode_switch3
field_rx3 = 'rx_3'
field_tx3 = 'tx_3'
# mode_switch4
field_rx4 = 'rx_4'
field_tx4 = 'tx_4'
# mode_transmission_tasks
field_nb_tasks_stopped = 'nb_tasks_stopped'
field_nb_tasks_checking = 'nb_tasks_checking'
field_nb_tasks_queued = 'nb_tasks_queued'
field_nb_tasks_extracting = 'nb_tasks_extracting'
field_nb_tasks_done = 'nb_tasks_done'
field_nb_tasks_repairing = 'nb_tasks_repairing'
field_nb_tasks_downloading = 'nb_tasks_downloading'
field_nb_tasks_error = 'nb_tasks_error'
field_nb_tasks_stopping = 'nb_tasks_stopping'
field_nb_tasks_seeding = 'nb_tasks_seeding'
# field_nb_tasks_active = 'nb_tasks_active' # Total active
# nb_tasks = 'nb_tasks' # Total
# mode_transmission_rate
field_rx_throttling = 'throttling_rate.rx_rate'
field_tx_throttling = 'throttling_rate.tx_rate'
field_rx_rate = 'rx_rate'
field_tx_rate = 'tx_rate'
# mode connection
field_bytes_up = 'bytes_up'
field_bytes_down = 'bytes_down'
# mode ftth
field_has_sfp = 'has_sfp'
field_link = 'link'
field_sfp_alim_ok = 'sfp_alim_ok'
field_sfp_has_signal = 'sfp_has_signal'
field_sfp_present = 'sfp_present'
# mode switch-bytes
field_rx_bytes = 'rx_good_bytes'
field_tx_bytes = 'tx_bytes'
# mode switch-packets
field_rx_packets = 'rx_good_packets'
field_tx_packets = 'tx_packets'
field_rx_unicast_packets = 'rx_unicast_packets'
field_tx_unicast_packets = 'tx_unicast_packets'
field_rx_broadcast_packets = 'rx_broadcast_packets'
field_tx_broadcast_packets = 'tx_broadcast_packets'
# mode wifi-stations
field_stations = 'stations'
# mode wifi-bytes
field_wifi_rx_bytes = 'rx_bytes'
field_wifi_tx_bytes = 'tx_bytes'
fields = {
mode_traffic: [
field_rate_down,
field_bw_down,
field_rate_up,
field_bw_up
],
mode_temp: [
field_cpum,
field_cpub,
field_sw,
field_hdd
],
mode_fan_speed: [
field_fan_ | speed
],
mode_xdsl: [
field_snr_down,
field_snr_up
],
mode_xdsl_errors: [
field_fec,
field_crc,
field_hec,
field_es,
field_ses
| ],
mode_switch1: [
field_rx1,
field_tx1
],
mode_switch2: [
field_rx2,
field_tx2
],
mode_switch3: [
field_rx3,
field_tx3
],
mode_switch4: [
field_rx4,
field_tx4
],
mode_switch_bytes: [
field_rx_bytes,
field_tx_bytes,
],
mode_switch_packets: [
field_rx_packets,
field_tx_packets,
field_rx_unicast_packets,
field_tx_unicast_packets,
field_rx_broadcast_packets,
field_tx_broadcast_packets,
],
mode_transmission_tasks: [
field_nb_tasks_stopped,
field_nb_tasks_checking,
field_nb_tasks_queued,
field_nb_tasks_extracting,
field_nb_tasks_done,
field_nb_tasks_repairing,
field_nb_tasks_downloading,
field_nb_tasks_error,
field_nb_tasks_stopping,
field_nb_tasks_seeding
],
mode_transmission_traffic: [
field_rx_throttling,
field_tx_throttling,
field_rx_rate,
field_tx_rate,
],
mode_connection: [
field_bytes_up,
field_bytes_down,
],
mode_connection_log: [
field_bytes_up,
field_bytes_down,
],
mode_ftth: [
field_has_sfp,
field_link,
field_sfp_alim_ok,
field_sfp_has_signal,
field_sfp_present,
],
mode_wifi_stations: [
field_stations,
],
mode_wifi_bytes: [
field_wifi_rx_bytes,
field_wifi_tx_bytes,
],
mode_wifi_bytes_log: [
field_wifi_rx_bytes,
field_wifi_tx_bytes,
],
}
xdsl_errors_fields_descriptions = {
field_fec: 'FEC (Forward Error Connection)',
field_crc: 'CRC (Cyclic Redundancy Check)',
field_hec: 'HEC (Header Error Control)',
field_es: 'ES (Errored Seconds)',
field_ses: 'SES (Severely Errored Seconds)'
}
def get_fields(mode):
if mode not in fields:
print('Unknown mode {}'.format(mode))
return fields[mode]
|
ion values from a file which it wrote to in the previous run of the program",default=0)
mutex_parser_IC_THRESH.add_argument('--info_threshold_Wyatt_Clark_percentile','-WCTHRESHp',help="Provide the percentile p. All annotations having information content below p will be discarded")
mutex_parser_IC_THRESH.add_argument('--info_threshold_Wyatt_Clark','-WCTHRESH',help="Provide a threshold value t. All annotations having information content below t will be discarded")
mutex_parser_IC_THRESH.add_argument('--info_threshold_Phillip_Lord_percentile','-PLTHRESHp',help="Provide the percentile p. All annotations having information content below p will be discarded")
mutex_parser_IC_THRESH.add_argument('--info_threshold_Phillip_Lord','-PLTHRESH',help="Provide a threshold value t. All annotations having information content below t will be discarded")
parser.add_argument('--verbose','-v',help="Set this argument to 1 if you wish to view the outcome of each operation on console",default=0)
parser.add_argument('--date_before','-dbfr',help="The date entered here will be parsed by the parser from dateutil package. For more information on acceptable date formats please visit https://github.com/dateutil/dateutil/. All annotations made prior to this date will be picked up")
parser.add_argument('--date_after','-daftr',help="The date entered here will be parsed by the parser from dateutil package. For more information on acceptable date formats please visit ht | tps://github.com/dateutil/dateutil/. All annotations made after this date will be picked up")
parser.add_argument('--single_file','-single',default=0,help="Set to 1 in order to output the results of each individual species in a single file.")
mutex_parser_select_references.add_argument('--select_references','-selref',nargs='+',help='Provide the paths to files which contain references you wish to select. I | t is possible to include references in case you wish to select annotations made by a few references. This will prompt the program to interpret string which have the keywords \'GO_REF\',\'PMID\' and \'Reactome\' as a GO reference. Strings which do not contain that keyword will be interpreted as a file path which the program will except to contain a list of GO references. The program will accept a mixture of GO_REF and file names. It is also possible to choose all references of a particular category and a handful of references from another. For example if you wish to choose all PMID references, just put PMID. The program will then select all PMID references. Currently the program can accept PMID, GO_REF and Reactome')
mutex_parser_select_references.add_argument('--select_references_inverse','-selrefinv',nargs='+',help='Works like -selref but does not select the references which have been provided as input')
parser.add_argument('--report','-r',help="Provide the path where the report file will be stored. If you are providing a path please make sure your path ends with a '/'. Otherwise the program will assume the last string after the final '/' as the name of the report file. A single report file will be generated. Information for each species will be put into individual worksheets.")
parser.add_argument('--histogram','-hist',help="Set this option to 1 if you wish to view the histogram of GO_TERM frequency before and after debiasing is performed with respect to cutoffs based on number of proteins or annotations. If you wish to save the file then please enter a filepath. If you are providing a path please make sure your path ends with a '/'. Otherwise the program will assume the last string after the final '/' as the name of the image file. Separate histograms will be generated for each species.")
args = parser.parse_args()
return args
def createProteinToGOMapping( data ):
"""
This function creates a dictionary where key is a protein. Each protein refers to a list where the list consists of GO_TERMS.
"""
prot_to_go = dict()
all_GO = []
alt_id_to_id_map = cp.load( open( FILE_ALTERNATE_ID_TO_ID_MAPPING, "rb" ) )
for attnid in data:
annotation = data[attnid]
prot_id = annotation['DB'] + '_' + annotation['DB_Object_ID']
GO_term = annotation['GO_ID']
if GO_term in alt_id_to_id_map:
GO_term = alt_id_to_id_map[GO_term]
all_GO.append( GO_term )
if prot_id not in prot_to_go:
prot_to_go[prot_id] = []
if [GO_term, annotation['Aspect']] not in prot_to_go[prot_id]:
prot_to_go[prot_id].append( [GO_term, annotation['Aspect']] )
else:
if [GO_term, annotation['Aspect']] not in prot_to_go[prot_id]:
prot_to_go[prot_id].append( [GO_term, annotation['Aspect']] )
# vprint(prot_to_go[prot_id])
return prot_to_go, list( set( all_GO ) )
def propagateOntologies( Prot_to_GO_Map ):
"""
This function takes in each annotation and constructs the ancestors of that term from their respective Aspect
"""
mf_g = cp.load( open( FILE_MFO_ONTOLOGY_GRAPH, "rb" ) )
bp_g = cp.load( open( FILE_BPO_ONTOLOGY_GRAPH, "rb" ) )
cc_g = cp.load( open( FILE_CCO_ONTOLOGY_GRAPH, "rb" ) )
alt_id_to_id_map = cp.load( open( FILE_ALTERNATE_ID_TO_ID_MAPPING, "rb" ) )
# vprint(alt_id_to_id_map)
Prot_to_GO_Map_new = dict()
mf_ancestors=cp.load(open(FILE_MFO_ONTOLOGY_ANCESTORS_GRAPH,"rb"))
bp_ancestors=cp.load(open(FILE_BPO_ONTOLOGY_ANCESTORS_GRAPH,"rb"))
cc_ancestors=cp.load(open(FILE_CCO_ONTOLOGY_ANCESTORS_GRAPH,"rb"))
for eachprotein in Prot_to_GO_Map:
ancestors = []
annotations = Prot_to_GO_Map[eachprotein]
for annotation in annotations:
aspect = annotation[1]
GO_term = annotation[0]
if aspect == 'F':
ancestors.extend(mf_ancestors[GO_term])
if aspect == 'P':
ancestors.extend(bp_ancestors[GO_term])
if aspect == 'C':
ancestors.extend(cc_ancestors[GO_term])
ancestors = list( set( ancestors ) )
Prot_to_GO_Map_new[eachprotein] = ancestors
return Prot_to_GO_Map_new
def findFrequency( annotations, Prot_to_GO_Map ):
count = 0
if annotations == None:
return 0
for prot in Prot_to_GO_Map:
if set( annotations ).issubset( set( Prot_to_GO_Map[prot] ) ):
count += 1
return count
def assignProbabilitiesToOntologyTree( g, Prot_to_GO_Map, all_GO_Terms, ontology_to_ia_map,aspect ):
for node_num, node in enumerate( g.nodes() ):
if( node not in all_GO_Terms ):
ontology_to_ia_map[node] = [0, 0]
continue
if node_num % 100 == 0:
vprint( node_num , " proteins processed for ",aspect )
predecessor = g.successors( node )
# vprint(node,predecessor)
predecessor_with_node = []
predecessor_with_node.extend( predecessor )
predecessor_with_node.append( node )
denom = findFrequency( predecessor, Prot_to_GO_Map )
num = findFrequency( predecessor_with_node, Prot_to_GO_Map )
# vprint(node,g.successors(node))
"""vprint(predecessor_with_node,num)
vprint(predecessor,denom)"""
if( denom == 0 ):
prob = 0
else:
prob = num / denom
ontology_to_ia_map[node] = [prob, -math.log( prob, 2 )]
def assignProbabilitiesToOntologyGraphs( Prot_to_GO_Map, all_GO_Terms,aspects ):
mf_g = cp.load( open( FILE_MFO_ONTOLOGY_GRAPH, "rb" ) )
bp_g = cp.load( open( FILE_BPO_ONTOLOGY_GRAPH, "rb" ) )
cc_g = cp.load( open( FILE_CCO_ONTOLOGY_GRAPH, "rb" ) )
ontology_to_ia_map = dict()
assignProbabilitiesToOntologyTree( mf_g, Prot_to_GO_Map, all_GO_Terms, ontology_to_ia_map, 'MFO' )
assignProbabilitiesToOntologyTree( bp_g, Prot_to_GO_Map, all_GO_Terms, ontology_to_ia_map, 'BPO' )
assignProbabilitiesToOntologyTree( cc_g, Prot_to_GO_Map, all_GO_Terms, ontology_to_ia_map, 'CCO' )
"""for GO in ontology_to_ia_map:
vprint(ontology_to_ia_map[GO])"""
return ontology_to_ia_map
def calculateInformationAccretionFo |
import argparse
from models import Service
from models import Base
import helpers
import traceback
import sys
import os
import importlib
import shutil
@helpers.handle_dbsession()
def prepare_service_db(sqlsession, name, desc, models, uses_blueprint):
s = sqlsession.query(Service).filter_by(name=name).first()
if s:
print('Service %s exists yet. Aborting.' % name)
return False
if models:
pass
Base.metadata.create_all(helpers.engine, [m.__table__ for m in models])
s = Service(name=name, uses_blueprint=uses_blueprint)
sqlsession.add(s)
sqlsession.commit()
print('Successfully prepared DB new service %s: %s' % (name, desc))
if models:
print('%s contains the following fields:' % name)
for model in models:
print(str(model.__name__))
else:
print('%s contains no fields.' % name)
return True
def validate_service(path):
if os.path.isdir(path):
# servicename = os.path.basename(path)
if not os.path.isfile(os.path.join(path, '__init__.py')):
print('Service contains no __init__.py.')
return False
# m = importlib.import_module('%s' % servicename, '')
# if m.__uses_blueprint__:
# blueprint = os.path.join(path, 'blueprint')
# if not os.path.isdir(blueprint):
# print('Service contains no blueprint. Please place it in the blueprint dir.')
# return False
# if not os.path.isfile(os.path.join(blueprint, '__init__.py')):
# print('Service blueprint contains no __init__.py.')
# return False
# templates = os.path.join(blueprint, 'templates')
# if not os.path.isdir(templates):
# print('Warning: Service blueprint contains no template dir.')
# elif not os.listdir(templates):
# print('Warning: Service blueprint template dir is empty.')
return True
else:
print('%s is not a directory. Please check your input' % path)
return False
def register_service(path):
print('Importing service from %s.' % path)
if validate_service(path):
servicename = os.path.basename(path)
if os.path.isdir(os.path.join('services/', servicename)):
print('Service could not be imported due to a service using the same name existing | yet.')
return False
else:
destination = os.path.join('services/', servicename)
try:
shuti | l.copytree(path, destination)
except Exception as e:
print(e)
traceback.print_tb(sys.exc_info()[2])
shutil.rmtree(destination)
return False
else:
print('Service is faulty, please consult the errors.')
return False
print('Preparing the DB for service %s' % servicename)
try:
m = importlib.import_module('.%s' % servicename, 'services')
if prepare_service_db(m.__service_name__, m.__description__, m.__models__, m.__uses_blueprint__):
print('Successfully prepared DB for service %s' % servicename)
else:
print('Failed to prepare the DB fro service %s', servicename)
return False
except Exception as e:
print(e)
traceback.print_tb(sys.exc_info()[2])
print('Failed to load service %s due to a faulty module' % servicename)
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Service importer')
parser.add_argument('--path',
metavar='url',
type=str,
nargs='+',
help='Path to the service to import')
args = parser.parse_args()
if not args.path or len(args.path) < 1:
print('Please specify at least one service to import')
else:
for p in args.path:
if register_service(p):
print('Successfully registered new service %s' % p)
else:
print('Failed to register service %s' % p)
# prepare_service_db('basics', 'Basic services and commands', (
# ('text', 'txt', Type.text, '.', (('2345', 'adsd'), ('2345', 'adsd'), ('2345', 'adsd'))),
# ('int', 'd', Type.int, '', ()),
# ('bool', 'truefalse', Type.bool, '', ())
# )) |
import sys
def suite(n,s):
p = -1
fin = ''
c = 0
for i in range(0,n+1):
if i == n:
if s[i-1]==p:
fin = fin+str(c)+str(p)
else:
fin = fin+str(c)+str(p)
p = s[i]
c = 1
break
if p == -1: |
p = s[i]
c = 1
else:
if s[i]==p:
c = c+1
else:
fin = fin+str(c)+str(p)
p = s[i]
c = 1
print fin
return
if __name__ == '__main__':
n = int(raw_input())
s = raw_input()
| suite(n,s)
|
from django import forms
class PutForm(forms.Form):
body = forms.CharField(widget=forms.Textarea( | ))
tube = forms.CharField(initial='default')
priority = forms.IntegerField(initial=2147483648)
delay = forms.IntegerField(initial=0)
ttr = for | ms.IntegerField(initial=120)
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import requests
import subprocess
import sys
import time
if len(sys.argv) != 5:
sys.stderr.write('usage: program <github-username> <upstream-remote> <previous-release-branch> <current-release-branch>\n')
sys.stderr.write(" e.g., program myusername upstream 0.13.0-incubating 0.14.0-incubating")
sys.stderr.write(" It is also necessary to set a GIT_TOKEN environment variable containing a personal access token.")
sys.exit(1)
github_username = sys.argv[1]
upstream_remote = sys.argv[2]
previous_branch = sys.argv[3]
release_branch = sys.argv[4]
master_branch = "master"
upstream_master = "{}/{}".format(upstream_remote, master_branch)
upstream_previous = "{}/{}".format(upstream_remote, previous_branch)
upstream_release = "{}/{}".format(upstream_remote, release_branch)
command = "git log {}..{} --oneline | tail -1".format(upstream_master, upstream_previous)
# Find the commit where the previous release branch was cut from master
previous_branch_first_commit = subprocess.check_output(command, shell=True).decode('UTF-8')
match_result = re.match("(\w+) .*", previous_branch_first_commit)
previous_branch_first_commit = match_result.group(1)
print("Pre | vious branch: {}, first commit: {}".format(upstream_previous, previous_branch_first_comm | it))
# Find all commits between that commit and the current release branch
command = "git rev-list {}..{}".format(previous_branch_first_commit, upstream_release)
all_release_commits = subprocess.check_output(command, shell=True).decode('UTF-8')
for commit_id in all_release_commits.splitlines():
try:
# wait 3 seconds between calls to avoid hitting the rate limit
time.sleep(3)
search_url = "https://api.github.com/search/issues?q=type:pr+is:merged+is:closed+repo:apache/incubator-druid+SHA:{}"
resp = requests.get(search_url.format(commit_id), auth=(github_username, os.environ["GIT_TOKEN"]))
resp_json = resp.json()
milestone_found = False
closed_pr_nums = []
if (resp_json.get("items") is None):
print("Could not get PRs for commit ID {}, resp: {}".format(commit_id, resp_json))
continue
for pr in resp_json["items"]:
closed_pr_nums.append(pr["number"])
milestone = pr["milestone"]
if milestone is not None:
milestone_found = True
print("COMMIT: {}, PR#: {}, MILESTONE: {}".format(commit_id, pr["number"], pr["milestone"]["url"]))
if not milestone_found:
print("NO MILESTONE FOUND FOR COMMIT: {}, CLOSED PRs: {}".format(commit_id, closed_pr_nums))
except Exception as e:
print("Got exception for commitID: {} ex: {}".format(commit_id, e))
continue
|
equencies = atwork_subtour_frequency.isna()
logger.warning("WARNING Bad atwork subtour frequencies for %s work tours" % bad_tour_frequencies.sum())
logger.warning("WARNING Bad atwork subtour frequencies: num_tours\n%s" %
tour_counts[bad_tour_frequencies])
logger.warning("WARNING Bad atwork subtour frequencies: num_tours\n%s" %
subtours[subtours.parent_tour_id.isin(tour_counts[bad_tour_frequencies].index)].
sort_values('parent_tour_id'))
bug
atwork_subtour_frequency = reindex(atwork_subtour_frequency, tours[ASIM_TOUR_ID]).fillna('')
return atwork_subtour_frequency
def patch_trip_ids(tours, trips):
"""
replace survey trip_ids with asim standard trip_id
replace survey tour_id foreign key with asim standard tour_id
"""
# tour_id is a column, not index
assert ASIM_TOUR_ID in tours
# patch tour_id foreign key
# tours['household_id'] = reindex(persons.household_id, tours.person_id)
asim_tour_id = pd.Series(tours[ASIM_TOUR_ID].values, index=tours[SURVEY_TOUR_ID].values)
trips[ASIM_TOUR_ID] = reindex(asim_tour_id, trips[SURVEY_TOUR_ID])
# person_is_university = persons.pstudent == constants.PSTUDENT_UNIVERSITY
# tour_is_university = reindex(person_is_university, tours.person_id)
# tour_primary_purpose = tours.tour_type.where((tours.tour_type != 'school') | ~tour_is_university, 'univ')
# tour_primary_purpose = tour_primary_purpose.where(tours.tour_category!='atwork', 'atwork')
#
# trips['primary_purpose'] = reindex(tour_primary_purpose, trips.tour_id)
# if order is ambiguous if trips depart in same time slot - order by SURVEY_TRIP_ID hoping that increases with time
if 'trip_num' not in trips:
trips['trip_num'] = \
trips.sort_values(by=['tour_id', 'outbound', 'depart', SURVEY_TRIP_ID]).\
groupby(['tour_id', 'outbound']).\
cumcount() + 1
cid.set_trip_index(trips)
assert trips.index.name == ASIM_TRIP_ID
trips = trips.reset_index().rename(columns={'trip_id': ASIM_TRIP_ID})
return trips
def infer_stop_frequency(configs_dir, tours, trips):
# alt,out,in
# 0out_0in,0,0
# 0out_1in,0,1
# ...
alts = pd.read_csv(os.path.join(configs_dir, 'stop_frequency_alternatives.csv'), comment='#')
assert 'alt' in alts
assert 'in' in alts
assert 'out' in alts
freq = pd.DataFrame(index=tours[SURVEY_TOUR_ID])
# number of trips is one less than number of stops
freq['out'] = trips[trips.outbound].groupby(SURVEY_TOUR_ID).trip_num.max() - 1
freq['in'] = trips[~trips.outbound].groupby(SURVEY_TOUR_ID).trip_num.max() - 1
freq = pd.merge(freq.reset_index(), alts, on=['out', 'in'], how='left')
assert (freq[SURVEY_TOUR_ID] == tours[SURVEY_TOUR_ID]).all()
return freq.alt
def read_tables(input_dir, tables):
for table, info in tables.items():
table = pd.read_csv(os.path.join(input_dir, info['file_name']), index_col=info.get('index'))
# coerce missing data in string columns to empty strings, not NaNs
for c in table.columns:
# read_csv converts empty string to NaN, even if all non-empty values are strings
if table[c].dtype == 'object':
print("##### converting", c, table[c].dtype)
table[c] = table[c].fillna('').astype(str)
info['table'] = table
households = tables['households'].get('table')
persons = tables['persons'].get('table')
tours = tables['tours'].get('table')
joint_tour_participants = tables['joint_tour_participants'].get('table')
trips = tables['trips'].get('table')
return households, persons, tours, joint_tour_participants, trips
def check_controls(table_name, column_name):
table = survey_tables[table_name].get('table')
c_table = control_tables[table_name].get('table')
if column_name == 'index':
dont_match = (table.index != c_table.index)
else:
dont_match = (table[column_name] != c_table[column_name])
if dont_match.any():
print("check_controls %s.%s: %s out of %s do not match" %
(table_name, column_name, dont_match.sum(), len(table)))
print("control\n%s" % c_table[dont_match][[column_name]])
print("survey\n%s" % table[dont_match][[column_name]])
print("control\n%s" % c_table[dont_match][table.columns])
print("survey\n%s" % table[dont_match][table.columns])
return False
return True
def infer(configs_dir, input_dir, output_dir):
households, persons, tours, joint_tour_participants, trips = read_tables(input_dir, survey_tables)
# be explicit about all tour_ids to avoid confusion between asim and survey ids
tours = tours.rename(columns={'tour_id': SURVEY_TOUR_ID, 'parent_tour_id': SURVEY_PARENT_TOUR_ID})
joint_tour_participants = \
joint_tour_participants.rename(columns={'tour_id': SURVEY_TOUR_ID, 'participant_id': SURVEY_PARTICIPANT_ID})
trips = trips.rename(columns={'trip_id': SURVEY_TRIP_ID, 'tour_id': SURVEY_TOUR_ID})
# mangle survey tour ids to keep us honest
tours[SURVEY_TOUR_ID] = mangle_ids(tours[SURVEY_TOUR_ID])
tours[SURVEY_PARENT_TOUR_ID] = mangle_ids(tours[SURVEY_PARENT_TOUR_ID])
joint_tour_participants[SURVEY_TOUR_ID] = mangle_ids(joint_tour_participants[SURVEY_TOUR_ID])
joint_tour_participants[SURVEY_PARTICIPANT_ID] = mangle_ids(joint_tour_participants[SURVEY_PARTICIPANT_ID])
trips[SURVEY_TRIP_ID] = mangle_ids(trips[SURVEY_TRIP_ID])
trips[SURVEY_TOUR_ID] = mangle_ids(trips[SURVEY_TOUR_ID])
# persons.cdap_activity
persons['cdap_activity'] = infer_cdap_activity(persons, tours, joint_tour_participants)
# check but don't assert as this is not deterministic
skip_controls or check_controls('persons', 'cdap_activity')
# persons.mandatory_tour_frequency
persons['mandatory_tour_frequency'] = infer_mandatory_tour_frequency(persons, tours)
assert skip_controls or check_controls('persons', 'mandatory_tour_frequency')
# persons.non_mandatory_tour_frequency
tour_frequency = infer_non_mandatory_tour_frequency(configs_dir, persons, tours)
for c in tour_frequency.columns:
print("assigning persons", c)
persons[c] = tour_frequency[c]
assert skip_controls or check_controls('persons', 'non_mandatory_tour_frequency')
# patch_tour_ids
tours, joint_tour_participants = patch_tour_ids(persons, tours, joint_tour_participants)
survey_tables['tours']['table'] = tours
survey_tables['joint_tour_participants']['table'] = joint_tour_participants
assert skip_controls or check_controls('tours', 'index')
assert skip_controls or check_controls('joint_tour_participants', 'index')
# patch_tour_ids
trips = patch_trip_ids(tours, trips)
survey_tables['trips']['table'] = trips # so we can check_controls
assert skip_controls or check_controls('trips', 'index')
# households.joint_tour_frequency
households['joint_tour_frequency'] = infer_joint_tour_frequency(configs_dir, households, tours)
assert skip_controls or check_controls('househo | lds', 'joint_tour_frequency')
# tours.composition
tours['composition'] = infer_joint_tour_composition(persons, tours, joint_tour_participants)
assert skip_controls or check_controls('tours', 'composition')
# tours.tdd
tours['tdd'] = infer_tour_scheduling(configs_dir, tours)
assert skip_controls or check_controls('tours', 'tdd')
tours['atwork_subtour_frequency'] = infer_atwork_subtour_frequency(configs_dir, tours)
| assert skip_controls or check_controls('tours', 'atwork_subtour_frequency')
tours['stop_frequency'] = infer_stop_frequency(configs_dir, tours, trips)
assert skip_controls or check_controls('tours', 'stop_frequency')
# write output files
households.to_csv(os.path.join(output_dir, outputs['households']), index=True)
persons.to_csv(os.path.join(output_dir, outputs['persons']), index=True)
tours.to_csv(os.path.join(output_dir, outputs['tours']), index=False)
joint_tour_participants.to_c |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o | r agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
You can run this example like this:
.. code:: console
$ rm -rf '/tmp/bar'
$ luigi --module examples.foo examples.Foo --workers 2 --local-scheduler
"""
from __future__ import print_functio | n
import time
import luigi
class Foo(luigi.WrapperTask):
task_namespace = 'examples'
def run(self):
print("Running Foo")
def requires(self):
for i in range(10):
yield Bar(i)
class Bar(luigi.Task):
task_namespace = 'examples'
num = luigi.IntParameter()
def run(self):
time.sleep(1)
self.output().open('w').close()
def output(self):
"""
Returns the target output for this task.
:return: the target output for this task.
:rtype: object (:py:class:`~luigi.target.Target`)
"""
time.sleep(1)
return luigi.LocalTarget('/tmp/bar/%d' % self.num)
|
from __future__ import absolute_import
from scrapy.spiders import Spider
from scrapy.selector import Selector
from scrapy import Request
import sys
from Schoogle.items import O_Item
from sys import getsizeof
from datetime import datetime
import time
import re
mport string
def reduce(text):
return "".join([c for c in text if c in string.letters or c in (" ",)])
#return re.sub('\s+',' ', re.sub(r'([^\s\w]|_)+', '', text))
#@params:
#@html_list: this is list of html in a "List"(aka vector), we replace all of those annoying
# \t and \n's in clunkey html and return a string with the pages entire html contents,
# this object will later be used by postgreql for a full text search.
def prune(html_list):
toreturn = []
for i in html_list:
t = i.encode('ascii','ignore')
t = reduce(t)
if t != '' or ' ':
toreturn.append(t)
return " ".join(toreturn)
class O_Spider(Spider):
name = 'O_Spider'
allowed_domains = ['owu.edu']
start_urls = ['http://www.owu.edu']
# @params
# @response: this is a Scrapy.Response object containing much of the website information
# attibutes of this object will be used to flesh out our O_Item object
# @yield(1): this returns a single object each time next( this object ) is called
# first parse yields all items
# @yield(2): this is completed only after we have yielded an object from this webpage, it will
# recursively call parse on all links in a web page
def parse(self,response):
# here we use scrapy's request object to catch all invalid links when parsing our documnet
try:
links = response.xpath('//@href').extract()
for link in links:
try:
req = Request(link,callback = self.parse)
except ValueError:
pass # might want to log these eventually
except AttributeError:
pass # log these eventually
# fill up item with statistics
current_item = O_Item()
current_item['url'] = response.url
try:
current_item['title'] = response.xpath('//title/text()').extract()
current_item['timestamp'] = datetime.fromtimestamp(time.time()).strftime | ('%Y-%m-%d %H:%M:%S')
current_item['page_size'] = getsizeof(response.body)
current_item['full_html'] = response.body_as_unicode() # not sure if we really want this..
current_item['full_text'] = " ".join(prune(response.xpath('/ | /text()').extract()))
current_item['secure'] = 'https' in str(response.request)
current_item['links'] = links
yield current_item
except Exception as e:
print "______________________________________________________________"
print " ERROR THROW ON ITEM YIELD"
print e
pass
# recursive page search is below, this must happen after the item is pipelined to postgresql
# this is where we yield a requests object with parse as the callback and the real recursion kicks ins
try:
for link in response.xpath('//@href').extract():
try:
req = Request(link,callback = self.parse)
yield req
except ValueError:
pass # might want to log these eventually
except AttributeError:
pass # log these eventually
|
import math
curr = 0
goal = 1000000
potential_nums = [ | 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
outpu | t_num = []
if __name__ == '__main__':
for i in xrange(10, 0, -1):
print (curr, i, "outer loop")
for j in xrange(i + 1):
print (curr, j, "inner loop")
temp = math.factorial(i - 1) * j + curr
if temp >= goal:
print (temp)
curr += (math.factorial(i - 1) * (j-1))
print (curr, goal, i, j)
output_num.append(potential_nums[j-1])
potential_nums.remove(potential_nums[j-1])
break
print output_num
# SOLVED : 2783915460
|
yml" in config_file_paths and not app_config:
with open(config_file_paths["app_yml"]) as app_yml:
self._app_config = load(app_yml, self._YAMLLoader)
# If the overrides file exists, override the app config values
# with ones from app.override.yml
if "app_override_yml" in config_file_paths:
app_override_config = {}
with open(config_file_paths["app_override_yml"]) as app_override_yml:
app_override_config = load(app_override_yml, self._YAMLLoader)
self._app_config = self.__class__.merge_dicts(
self._app_config,
app_override_config
)
if "logging_yml" in config_file_paths and not logging_config:
with open(config_file_paths["logging_yml"]) as logging_yml:
self._logging_config = load(logging_yml, self._YAMLLoader)
if "bundles_yml" in config_file_paths and not webassets_env:
from webassets.loaders import YAMLLoader
self._webassets_env = YAMLLoader(config_file_paths["bundles_yml"]).load_environment()
if app_config:
self._app_config = dict(app_config)
try:
# Merge JSON from environment variable
self._app_config = self.__class__.merge_dicts(self._app_config, ENV_CONFIG)
except AttributeError:
if ENV_CONFIG: # not an empty dict
self._app_config = ENV_CONFIG
# Don't re-raise exception, self.validate() will do this later
if logging_config:
self._logging_config = dict(logging_config)
if webassets_env is not None:
self._webassets_env = webassets_env
self.validate() # Checks that all attributes are pre-populated
# Convert relative paths to absolute where needed
# self.validate() will fail if there's no app_config['controllers']
for _ in self._app_config['controllers']:
section = self._app_config['controllers'][_]
for r in section:
if isinstance(section[r], dict):
for __ in ['tools.staticdir.root',
'tools.staticfile.root']:
pth = section[r].get(__)
if pth is not None and not pth.startswith('/'):
self._app_config['controllers'][_][r][__] = \
os.path.join(CWD, pth)
# Convert relative paths of logs in handlers
# self.validate() will fail if there's no self._logging_config
for handler_name, handler_config in (getattr(self, '_logging_config', {}) or {}).get('handlers', {}).viewitems():
pth = handler_config.get('filename')
if pth is not None and not pth.startswith('/'):
self._logging_config['handlers'][handler_name]['filename'] = \
os.path.join(CWD, pth)
if environment == "backlash":
self.setup_backlash_environment()
@property
def config_file_paths(self):
if self._config_file_paths:
sorted_kv_pairs = tuple(((k, self._config_file_paths[k])
for k in sorted(self._config_file_paths.viewkeys())))
paths = collections.namedtuple("config_file_paths", [e[0] for e in sorted_kv_pairs])
return paths(*[e[1] for e in sorted_kv_pairs])
@property
def project_metadata(self):
return self.app_config["project_metadata"]
@property
def use_logging(self):
return self.app_config.get("global", {}).get("engine.logging.on", False)
@property
def use_redis(self):
if self.controllers_config:
for _, c | ontroller_config in self.controllers_config.viewitems():
controller_config = controller_config.copy()
controller_config.pop("controller")
for path_config in controller_config.viewvalues():
if path_config.get("tools.sessions.storage_type") == "redis":
return True
return False
@property
def use_sqlalchemy(self):
return self.app_config.get("globa | l", {}).get("engine.sqlalchemy.on", False)
@property
def use_jinja2(self):
return "jinja2" in self.app_config
@property
def use_webassets(self):
return self.use_jinja2 and self.app_config["jinja2"].get("use_webassets", False)
@property
def use_email(self):
return "email" in self.app_config
@property
def controllers_config(self):
return self.app_config.get("controllers")
@property
def app_config(self):
return self._app_config
@property
def logging_config(self):
return getattr(self, "_logging_config", None)
@property
def webassets_env(self):
return getattr(self, "_webassets_env", None)
@property
def jinja2_config(self):
if self.use_jinja2:
conf = self.app_config["jinja2"].copy()
conf.pop("use_webassets", None)
return conf
@property
def sqlalchemy_config(self):
if self.use_sqlalchemy:
if "sqlalchemy_engine" in self.app_config:
saconf = self.app_config["sqlalchemy_engine"].copy()
return {"sqlalchemy_engine": saconf}
else:
return dict([(k, v) for k, v in self.app_config.viewitems()
if k.startswith("sqlalchemy_engine")])
@property
def email_config(self):
return self.app_config.get("email")
def setup_backlash_environment(self):
"""
Returns a new copy of this configuration object configured to run under
the backlash defbugger environment and ensure it is created for
cherrypy's config object.
"""
try:
from backlash import DebuggedApplication
except ImportError:
warnings.warn("backlash not installed")
return
cherrypy._cpconfig.environments["backlash"] = {
"log.wsgi": True,
"request.throw_errors": True,
"log.screen": False,
"engine.autoreload_on": False
}
def remove_error_options(section):
section.pop("request.handler_error", None)
section.pop("request.error_response", None)
section.pop("tools.err_redirect.on", None)
section.pop("tools.log_headers.on", None)
section.pop("tools.log_tracebacks.on", None)
for k in section.copy().viewkeys():
if k.startswith("error_page.") or \
k.startswith("request.error_page."):
section.pop(k)
for section_name, section in self.app_config.viewitems():
if section_name.startswith("/") or section_name == "global":
remove_error_options(section)
wsgi_pipeline = []
if "/" in self.app_config:
wsgi_pipeline = self.app_config["/"].get("wsgi.pipeline", [])
else:
self.app_config["/"] = {}
wsgi_pipeline.insert(0, ("backlash", DebuggedApplication))
self.app_config["/"]["wsgi.pipeline"] = wsgi_pipeline
def validate(self):
# no need to check for cp config, which will be checked on startup
if not hasattr(self, "_app_config") or not self.app_config:
raise BlueberryPyNotConfiguredError("BlueberryPy application configuration not found.")
if self.use_sqlalchemy and not self.sqlalchemy_config:
raise BlueberryPyNotConfiguredError("SQLAlchemy configuration not found.")
if self.use_webassets:
if self.webassets_env is None:
raise BlueberryPyNotConfiguredError("Webassets configuration not found.")
elif len(self.webassets_env) == 0:
raise BlueberryPyNotConfiguredError("No bundles found in webassets env.")
if self.use_jinja2 and not self.jinja2_config:
raise BlueberryPyNotConfiguredError("Jinja2 configuration not foun |
import argparse
import sys
import os
from annotated_set import loadData
from data_structures import CanonicalDerivation
from canonical_parser import CanonicalParser
from derivation_tree import DerivationTree
from conversion.ghkm2tib import ghkm2tib
#from lib.amr.dag import Dag
class ExtractorCanSem:
def __init__(self):
pass
@classmethod
def help(self):
"""
Returns CanSem help message.
"""
return ExtractorCanSem.main(ExtractorCanSem(),"--help")
def main(self, *args):
| parser = argparse.ArgumentParser(description='CanSem Extraction Algorithm for SHRG',
fromfile_prefix_chars='@',
prog='%s extract-cansem'%sys.argv[0])
parser.add_argument('nl_file', type=str, help="Natural Language File")
parser.add_argument('mr_file', type=str, help="Meaning Representation File")
parser.add_argument('alignment_file', type=str, help="Alignment File")
parser.add | _argument('--ghkmDir', nargs='?', default='/home/kmh/Files/Tools/stanford-ghkm-2010-03-08', help="GHKM directory")
parser.add_argument('--tiburonLoc', nargs='?', default='/home/kmh/Files/Tools/newtib/tiburon', help="Tiburon executable file")
parser.add_argument('--prefix', nargs='?', default=False, help="Suffix for temporary and output files")
args = parser.parse_args(args=args)
if args.prefix == False:
args.prefix = "test"
args.parse_path = "%s.ptb"%args.prefix
args.align_path = "%s.a"%args.prefix
args.text_path = "%s.f"%args.prefix
args.ghkm_path = "%s.ghkm"%args.prefix
args.tib_path = "%s.tib"%args.prefix
# load input data into AnnotatedSet
data = loadData(args.nl_file,args.mr_file,args.alignment_file)
derivations = []
for sentence in data:
# Extraction
parser = CanonicalParser(sentence)
if len(parser.derivs_done) > 0:
derivations.append((sentence,parser.derivs_done[0]))
print len(derivations)
self.genGHKMfiles(args,derivations)
def genGHKMfiles(self,args,derivations):
parse_file = open(args.parse_path,'w')
align_file = open(args.align_path,'w')
text_file = open(args.text_path,'w')
for s,d in derivations:
x = DerivationTree.fromDerivation(d)
parse,align = x.getGHKMtriple_Java()
text = s["nl"].strip(' \t\n\r')
parse_file.write("%s\n"%parse)
align_file.write("%s\n"%align)
text_file.write("%s\n"%text)
parse_file.close()
align_file.close()
text_file.close()
print "Running GHKM Java rule extraction"
mem = "2g"
ghkm_opts = "-fCorpus %s -eParsedCorpus %s -align %s -joshuaFormat false -maxLHS 200 -maxRHS 15 -MaxUnalignedRHS 15" % (args.text_path,args.parse_path,args.align_path)
java_opts="-Xmx%s -Xms%s -cp %s/ghkm.jar:%s/lib/fastutil.jar -XX:+UseCompressedOops"%(mem,mem,args.ghkmDir,args.ghkmDir)
os.system("java %s edu.stanford.nlp.mt.syntax.ghkm.RuleExtractor %s > %s" % (java_opts,ghkm_opts,args.ghkm_path))
print "Converting GHKM rules to Tiburon format"
ghkm2tib(args.ghkm_path,args.tib_path)
|
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from calibre.gui2.complete2 import LineEdit
from calibre.gui2.widgets import history
class HistoryLineEdit2(LineEdit):
max_history_items = None
def __init__(self, parent=None, completer_widget=None, sort_func=lambda x:None):
LineEdit.__init__(self, parent=parent, completer_w | idget=completer_widget, sort_func=sort_func)
@property
def store_name(self):
return 'lineedit_history_'+ | self._name
def initialize(self, name):
self._name = name
self.history = history.get(self.store_name, [])
self.set_separator(None)
self.update_items_cache(self.history)
self.setText('')
self.editingFinished.connect(self.save_history)
def save_history(self):
ct = unicode(self.text())
if len(ct) > 2:
try:
self.history.remove(ct)
except ValueError:
pass
self.history.insert(0, ct)
if self.max_history_items is not None:
del self.history[self.max_history_items:]
history.set(self.store_name, self.history)
self.update_items_cache(self.history)
def clear_history(self):
self.history = []
history.set(self.store_name, self.history)
self.update_items_cache(self.history)
|
import re
import traceback
from urllib.parse import quote
from requests.utils import dict_from_cookiejar
from sickchill import logger
from sickchill.helper.common import convert_size, try_int
from sickchill.oldbeard import tvcache
from sickchill.oldbeard.bs4_parser import BS4Parser
from sickchill.providers.torrent.TorrentProvider import TorrentProvider
class Provider(TorrentProvider):
def __init__(self):
super().__init__("Pretome")
self.username = None
self.password = None
self.pin = None
self.minseed = 0
self.minleech = 0
self.urls = {
"base_url": "https://pretome.info",
"login": "https://pretome.info/takelogin.php",
"detail": "https://pretome.info/details.php?id=%s",
"search": "https://pretome.info/browse.php?search=%s%s",
"download": "https://pretome.info/download.php/%s/%s.torrent",
}
self.url = self.urls["base_url"]
self.categories = "&st=1&cat%5B%5D=7"
self.proper_strings = ["PROPER", "REPACK"]
self.cache = tvcache.TVCache(self)
def _check_auth(self):
if not self.username or not self.password or not self.pin:
logger.warning("Invalid username or password or pin. Check your settings")
return True
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {"username": self.username, "password": self.password, "login_pin": self.pin}
| response = self.get_url(self.urls["login"], post_data=login_params, returns="t | ext")
if not response:
logger.warning("Unable to connect to provider")
return False
if re.search("Username or password incorrect", response):
logger.warning("Invalid username or password. Check your settings")
return False
return True
def search(self, search_params, age=0, ep_obj=None):
results = []
if not self.login():
return results
for mode in search_params:
items = []
logger.debug(_("Search Mode: {mode}".format(mode=mode)))
for search_string in search_params[mode]:
if mode != "RSS":
logger.debug(_("Search String: {search_string}".format(search_string=search_string)))
search_url = self.urls["search"] % (quote(search_string), self.categories)
data = self.get_url(search_url, returns="text")
if not data:
continue
try:
with BS4Parser(data, "html5lib") as html:
# Continue only if one Release is found
empty = html.find("h2", text="No .torrents fit this filter criteria")
if empty:
logger.debug("Data returned from provider does not contain any torrents")
continue
torrent_table = html.find("table", style="border: none; width: 100%;")
if not torrent_table:
logger.exception("Could not find table of torrents")
continue
torrent_rows = torrent_table("tr", class_="browse")
for result in torrent_rows:
cells = result("td")
size = None
link = cells[1].find("a", style="font-size: 1.25em; font-weight: bold;")
torrent_id = link["href"].replace("details.php?id=", "")
try:
if link.get("title", ""):
title = link["title"]
else:
title = link.contents[0]
download_url = self.urls["download"] % (torrent_id, link.contents[0])
seeders = int(cells[9].contents[0])
leechers = int(cells[10].contents[0])
# Need size for failed downloads handling
if size is None:
torrent_size = cells[7].text
size = convert_size(torrent_size) or -1
except (AttributeError, TypeError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.debug(
"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
title, seeders, leechers
)
)
continue
item = {"title": title, "link": download_url, "size": size, "seeders": seeders, "leechers": leechers, "hash": ""}
if mode != "RSS":
logger.debug("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers))
items.append(item)
except Exception:
logger.exception("Failed parsing provider. Traceback: {0}".format(traceback.format_exc()))
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get("seeders", 0)), reverse=True)
results += items
return results
|
Ival:
self.transitionIval = finishIval
self.transitionIval.start()
else:
# Create a sequence that lerps the color out, then
# parents the fade to hidden
self.transitionIval = self.getFadeOutIval(t,finishIval)
self.transitionIval.start()
def fadeOutActive(self):
return self.fade and self.fade.getColor()[3] > 0
def fadeScreen(self, alpha=0.5):
"""
Put a semitransparent screen over the camera plane
to darken out the world. Useful for drawing attention to
a dialog box for instance
"""
#print "transitiosn: fadeScreen"
self.noTransitions()
self.loadFade()
self.fade.reparentTo(aspect2d, FADE_SORT_INDEX)
self.fade.setColor(self.alphaOn[0],
self.alphaOn[1],
self.alphaOn[2],
alpha)
def fadeScreenColor(self, color):
"""
Put a semitransparent screen over the camera plane
to darken out the world. Useful for drawing attention to
a dialog box for instance
"""
#print "transitiosn: fadeScreenColor"
self.noTransitions()
self.loadFade()
self.fade.reparentTo(aspect2d, FADE_SORT_INDEX)
self.fade.setColor(color)
def noFade(self):
"""
Removes any current fade tasks and parents the fade polygon away
"""
#print "transitiosn: noFade"
if self.transitionIval:
self.transitionIval.pause()
self.transitionIval = None
if self.fade:
# Make sure to reset the color, since fadeOutActive() is looking at it
self.fade.setColor(self.alphaOff)
self.fade.detachNode()
def setFadeColor(self, r, g, b):
self.alphaOn.set(r, g, b, 1)
self.alphaOff.set(r, g, b, 0)
##################################################
# Iris
##################################################
def loadIris(self):
if self.iris == None:
self.iris = loader.loadModel(self.IrisModelName)
self.iris.setPos(0, 0, 0)
def irisIn(self, t=0.5, finishIval=None):
"""
Play an iris in transition over t seconds.
Places a polygon on the aspect2d plane then lerps the scale
of the iris polygon up so it looks like we iris in. When the
scale lerp is finished, it parents the iris polygon to hidden.
"""
self.noTransitions()
self.loadIris()
if (t == 0):
self.iris.detachNode()
else:
self.iris.reparentTo(aspect2d, FADE_SORT_INDEX)
self.transitionIval = Sequence(LerpScaleInterval(self.iris, t,
scale = 0.18,
startScale = 0.01),
Func(self.iris.detachNode),
name = self.irisTaskName,
)
if finishIval:
self.transitionIval.append(finishIval)
self.transitionIval.start()
def irisOut(self, t=0.5, finishIval=None):
"""
Play an iris out transition over t seconds.
Places a polygon on the aspect2d plane then lerps the scale
of the iris down so it looks like we iris out. When the scale
lerp is finished, it leaves the iris polygon covering the
aspect2d plane until you irisIn or call noIris.
"""
self.noTransitions()
self.loadIris()
self.loadFade() # we need this to cover up the hole.
if (t == 0):
self.iris.detachNode()
self.fadeOut(0)
else:
self.iris.reparentTo(aspect2d, FADE_SORT_INDEX)
self.transitionIval = Sequence(LerpScaleInterval(self.iris, t,
scale = 0.01,
startScale = 0.18),
Func(self.iris.detachNode),
# Use the fade to cover up the hole that the iris would leave
Func(self.fadeOut, 0),
name = self.irisTaskName,
)
if finishIval:
self.transitionIval.append(finishIval)
self.transitionIval.start()
def noIris(self):
"""
Removes any current iris tasks and parents the iris polygon away
"""
if self.transitionIval:
self.transitionIval.pause()
self.transitionIval = None
if self.iris != None:
self.iris.detachNode()
# Actually we need to remove the fade too,
# because the iris effect uses it.
self.noFade()
def noTransitions(self):
"""
This call should immediately remove any and all transitions running
"""
self.noFade()
self.noIris()
# Letterbox is not really a transition, it is a screen overlay
# self.noLetterbox()
##################################################
# Letterbox
##################################################
def loadLetterbox(self):
if not self.letterbox:
# We create a DirectFrame for the fade polygon, instead of
# simply loading the polygon model and using it directly,
# so that it will also obscure mouse events for objects
# positioned behind it.
self.letterbox = NodePath("letterbox")
# Allow fade in and out of the bars
self.letterbox.setTransparency(1)
# Allow DirectLabels to be parented to the letterbox sensibly
self.letterbox.setBin('unsorted', 0)
# Allow a custom look to the letterbox graphic.
# TODO: This model isn't available everywhere. We should
# pass it in as a parameter.
button = loader.loadModel('models/gui/toplevel_gui',
okMissing = True)
barImage = None
if button:
barImage = button.find('**/generic_button')
self.letterboxTop = DirectFrame(
parent = self.letterbox,
guiId = 'letterboxTop',
relief = DGG.FLAT,
state = DGG.NORMAL,
frameColor = (0, 0, 0, 1),
borderWidth = (0, 0),
frameSize = (-1, 1, 0, 0.2),
pos = (0, 0, 0.8),
image = barImage,
image_scale = (2.25,1,.5),
image_pos = (0,0,.1),
image_color = (0.3,0.3,0.3,1),
sortOrder = 0,
)
self.letterboxBottom = DirectFrame(
parent = self.letterbox,
guiId = 'letterboxBottom',
relief = DGG.FLAT,
state = DGG.NORMAL,
frameColor = (0, 0, 0, 1),
borderWidth = (0, 0),
frameSize = (-1, 1, 0, 0.2),
pos = (0, 0, -1),
image = barImage,
image_scale = (2.25,1,.5),
image_pos = (0,0,.1),
image_color = (0.3,0.3,0.3,1),
sortOrder = 0,
)
# masad: always place these at the bottom of render
self.letterboxTop.setBin('sorted',0)
self.letterboxBottom.setBin('sorted',0)
self.letterbox.reparentTo(render2d, -1)
self.letterboxOff(0)
def noLetterbox(self):
"""
Removes any current letterbox tasks and parents the letterbox polygon away
"""
if self.letterboxIval:
self.letterboxIval.pause()
self.let | terboxIval = None
| if self.letterbox:
self.letterbox.stash()
def letterboxOn(self, t=0.25, finishIval=None):
"""
Move black bars in over t seconds.
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from resource_management.core.logger import Logger
class GenericManagerProperties(object):
"""
Class to keep all Package-manager depended properties. Each non-generic implementation should override properties
declared here
"""
empty_file = "/dev/null"
locked_output = None
repo_error = None
repo_manager_bin = None
pkg_manager_bin = None
repo_update_cmd = None
available_packages_cmd = None
installed_packages_cmd = None
all_packages_cmd = None
repo_definition_location = None
install_cmd = {
True: None,
False: None
}
remove_cmd = {
True: None,
False: None
}
verify_dependency_cmd = None
class GenericManager(object):
"""
Interface for all custom implementations. Provides the required base for any custom manager, to be smoothly integrated
"""
@property
def properties(self):
return GenericManagerProperties
def install_package(self, name, context):
"""
Install package
:type name str
:type context ambari_commons.shell.RepoCallContext
:raise ValueError if name is empty
"""
raise NotImplementedError()
def remove_package(self, name, context, ignore_dependencies=False):
"""
Remove package
:type name str
:type context ambari_commons.shell.RepoCallContext
:type ignore_dependencies bool
:raise ValueError if name is empty
"""
raise NotImplementedError()
def upgrade_package(self, name, context):
"""
Install package
:type name str
:type context ambari_commons.shell.RepoCallContext
:raise ValueError if name is empty
"""
raise NotImplementedError()
def check_uncompleted_transactions(self):
| """
Check package manager against uncompleted transactions.
:rtype bool
"""
return False
def print_uncompleted_transaction_hint(self):
"""
Print friendly messag | e about they way to fix the issue
"""
pass
def get_available_packages_in_repos(self, repositories):
"""
Gets all (both installed and available) packages that are available at given repositories.
:type repositories resource_management.libraries.functions.repository_util.CommandRepository
:return: installed and available packages from these repositories
"""
raise NotImplementedError()
def installed_packages(self, pkg_names=None, repo_filter=None):
raise NotImplementedError()
def available_packages(self, pkg_names=None, repo_filter=None):
raise NotImplementedError()
def all_packages(self, pkg_names=None, repo_filter=None):
raise NotImplementedError()
def get_installed_repos(self, hint_packages, all_packages, ignore_repos):
"""
Gets all installed repos by name based on repos that provide any package
contained in hintPackages
Repos starting with value in ignoreRepos will not be returned
hintPackages must be regexps.
"""
all_repos = []
repo_list = []
for hintPackage in hint_packages:
for item in all_packages:
if re.match(hintPackage, item[0]) and not item[2] in all_repos:
all_repos.append(item[2])
for repo in all_repos:
ignore = False
for ignoredRepo in ignore_repos:
if self.name_match(ignoredRepo, repo):
ignore = True
if not ignore:
repo_list.append(repo)
return repo_list
def get_installed_pkgs_by_repo(self, repos, ignore_packages, installed_packages):
"""
Get all the installed packages from the repos listed in repos
"""
packages_from_repo = []
packages_to_remove = []
for repo in repos:
sub_result = []
for item in installed_packages:
if repo == item[2]:
sub_result.append(item[0])
packages_from_repo = list(set(packages_from_repo + sub_result))
for package in packages_from_repo:
keep_package = True
for ignorePackage in ignore_packages:
if self.name_match(ignorePackage, package):
keep_package = False
break
if keep_package:
packages_to_remove.append(package)
return packages_to_remove
def get_installed_pkgs_by_names(self, pkg_names, all_packages_list=None):
"""
Gets all installed packages that start with names in pkgNames
:type pkg_names list[str]
:type all_packages_list list[str]
"""
return self.installed_packages(pkg_names)
def get_package_details(self, installed_packages, found_packages):
"""
Gets the name, version, and repoName for the packages
:type installed_packages list[tuple[str,str,str]]
:type found_packages list[str]
"""
package_details = []
for package in found_packages:
pkg_detail = {}
for installed_package in installed_packages:
if package == installed_package[0]:
pkg_detail['name'] = installed_package[0]
pkg_detail['version'] = installed_package[1]
pkg_detail['repoName'] = installed_package[2]
package_details.append(pkg_detail)
return package_details
def get_repos_to_remove(self, repos, ignore_list):
repos_to_remove = []
for repo in repos:
add_to_remove_list = True
for ignore_repo in ignore_list:
if self.name_match(ignore_repo, repo):
add_to_remove_list = False
continue
if add_to_remove_list:
repos_to_remove.append(repo)
return repos_to_remove
def get_installed_package_version(self, package_name):
raise NotImplementedError()
def verify_dependencies(self):
"""
Verify that we have no dependency issues in package manager. Dependency issues could appear because of aborted or terminated
package installation process or invalid packages state after manual modification of packages list on the host
:return True if no dependency issues found, False if dependency issue present
:rtype bool
"""
raise NotImplementedError()
def name_match(self, lookup_name, actual_name):
tokens = actual_name.strip().lower()
lookup_name = lookup_name.lower()
return " " not in lookup_name and lookup_name in tokens
def _executor_error_handler(self, command, error_log, exit_code):
"""
Error handler for ac_shell.process_executor
:type command list|str
:type error_log list
:type exit_code int
"""
if isinstance(command, (list, tuple)):
command = " ".join(command)
Logger.error("Command execution error: command = \"{0}\", exit code = {1}, stderr = {2}".format(
command, exit_code, "\n".join(error_log)))
|
# -*- coding: utf-8 -*-
'''
Rupture
version 1.4.0
build 5
'''
from bs4 import BeautifulSoup
import datetime
import requests
import socket
import pickle
import time
import ssl
from .utils import six
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
class Rupture(object):
parser = None # None or html.parser or lxml
encoding = None
def __init__(self, proxies=None, parser='html.parser', timeout=None, headers=None):
self.session = requests.Session()
if headers:
self.session.headers.update(headers)
self.proxies = proxies
self.parser = parser
self.timeout = timeout
def _wrap_response(self, obj, parser):
def get_soup(self):
if not hasattr(self, '_soup'):
start_time = datetime.datetime.now()
from_encoding = None if self.encoding == 'utf-8' else self.encoding
if isinstance(self.text, six.text_type):
from_encoding = None # Prevent UserWarning
self._soup = BeautifulSoup(self.text, self.parser, from_encoding=from_encoding)
self._soup.elapsed = datetime.datetime.now() - start_time
if self.parser == 'lxml':
import lxml
lxml.etree.clear_error_log()
return self._soup
def get__repr__(self):
if hasattr(self, 'text'):
return '<Response [%s]: %s>' % (self.status_code, self.text)
return '<Response [%s]>' % (self.status_code)
obj.__class__.parser = parser
obj.__class__.soup = property(get_soup)
obj.__class__.__repr__ = get__repr__
return obj
def http_request(self, method, url, params=None, data=None, timeout=None, proxies=None, encoding=None, parser=None, retries=None, retries_interval=None, **kwargs):
timeout = self.timeout if timeout is None else timeout
proxies = self.proxies if proxies is None else proxies
encoding = self.encoding if encoding is None else encoding
parser = self.parser if parser is None else parser
if not retries:
retries = 0
while True:
try:
proxies = {'http': proxies, 'https': proxies} if proxies else None
start_time = datetime.datetime.now()
r = self.session.request(method, url, params=params, data=data, timeout=timeout, proxies=proxies, **kwargs)
r.elapsed_all = datetime.datetime.now() - start_time
if encoding:
r.encoding = encoding
return self._wrap_response(r, parser)
except (ssl.SSLError) as e:
if retries > 0:
retries = retries - 1
if retries_interval:
time.sleep(retries_interval)
continue
raise requests.exceptions.RequestException('SSLError %s' % e)
except (socket.error) as e:
if retries > 0:
retries = retries - 1
if retries_interval:
time.sleep(retries_interval)
continue
raise requests.exceptions.RequestException('Socket Error %s' % e)
def http_get(self, url, params=None, **kwargs):
return self.http_request('GET', url, params=params, **kwargs)
def xml_get(self, url, params=None, headers=None, **kwargs):
xml_headers = {
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/json; charset=utf-8'
}
if headers:
headers = dict(xml_headers.items() + headers.items())
else:
headers = xml_headers
return self.http_get(url, params=params, headers=headers, **kwargs)
def http_post(self, url, data=None, **kwargs):
return self.http_request('POST', url, data=data, **kwargs)
def xml_post(self, url, data=None, headers=None, **kwargs):
xml_headers = {
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/json; charset=utf-8'
}
if headers:
headers = dict(xml_headers.items() + headers.items())
else:
headers = xml_headers
return self.http_post(url, data=data, headers=headers, **kwargs)
def http_download(self, url, filepath, method='get', **kwargs):
if method.lower() == 'get':
response = self.http_get(url, stream=True, **kwargs)
elif method.lower() == 'post':
response = self.http_post(url, stream=True, **kwargs)
else:
raise NotImplementedError()
if not response.ok:
raise requests.exceptions.RequestException('Response not okay' | )
with open(filepath, 'wb') as handle:
for block in response.iter_content(1024):
if not block:
break
| handle.write(block)
return filepath
def http_get_image(self, url, filepath, **kwargs):
return self.http_download(url, filepath, **kwargs)
def parse_float_or_none(self, s):
if s:
return float(str(s).strip().replace(',', '').replace('+', ''))
return s
def new_session(self):
self.session = requests.Session()
def serialize(self):
return pickle.dumps([self.session])
@classmethod
def _deserialize_key(cls, data, keys):
raw_results = pickle.loads(data)
entity = cls()
for i in range(len(keys)):
setattr(entity, keys[i], raw_results[i])
return entity
@classmethod
def deserialize(cls, data):
return cls._deserialize_key(data, ['session'])
def patch_ssl(self):
class SSLAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1)
if not getattr(self.session, 'is_patch', False):
self.session.is_patch = True
self.session.mount('https://', SSLAdapter())
|
from django.conf.urls.defaults import *
from django_de.apps.authors.models import Author
urlpatterns = patterns('django.vi | ews.generic.list_detail',
(r'^$', 'object_list',
dict(
queryset = Author.objects. | order_by('name', 'slug'),
template_object_name = 'author',
allow_empty=True,
),
)
)
|
from seleniumbase import BaseCase
class GitHubTests(BaseCase):
def test_github(self):
# Selenium can trigger GitHub's anti-automation system:
# "You have triggered an abuse detection mechanism."
# "Please wait a few minutes before you try again."
# To avoid this automation blocker, two steps are being taken:
# 1. self.slow_click() is being used to slow down Selenium actions.
# 2. The browser's User Agent is modified to avoid Selenium-detection
# when running in headless mode.
if self.headless:
self.get_new_driver(
agent="""Mozilla/5.0 """
"""AppleWebKit/537.36 (KHTML, like Gecko) """
"""Chrome/Version 96.0.4664.55 Safari/537.36"""
)
self.open("https://github.com/search?q=SeleniumBase")
self.slow_click('a[href="/seleniumbase/SeleniumBase"]')
self.click_if_visible('[data-action="click:signup-prompt#dismiss"]')
self.assert_element("div.repository-content")
self.assert_text("SeleniumBase", "h1")
self.slow_click('a[title="se | leniumbase"] | ')
self.slow_click('a[title="fixtures"]')
self.slow_click('a[title="base_case.py"]')
self.assert_text("Code", "nav a.selected")
|
from spacewiki.app import create_app
from spa | cewiki import model
from spacewiki.test import create_test_app
import unittest
class UiTestCase(unittest.TestCase):
def setUp(self):
self._app = create_test_app()
with self._app.app_context():
model.syncdb()
self.app = self._app.test_client()
def test_index(self):
self.assertEq | ual(self.app.get('/').status_code, 200)
def test_no_page(self):
self.assertEqual(self.app.get('/missing-page').status_code, 200)
def test_all_pages(self):
self.assertEqual(self.app.get('/.all-pages').status_code, 200)
def test_edit(self):
self.assertEqual(self.app.get('/index/edit').status_code, 200)
|
#######################################################################
# This file is part of Pyblosxom.
#
# Copyright (C) 2010-2011 by the Pyblosxom team. See AUTHORS.
#
# Pyblosxom is distributed under the MIT license. See the file
# LICENSE for distribution details.
#######################################################################
import tempfile
import shutil
import os
from Pyblosxom.tests import PluginTest
from Pyblosxom.plugins import tags
from Pyblosxom.pyblosxom import Request
class TagsTest(PluginTest):
def setUp(self):
PluginTest.setUp(self, tags)
self.tmpdir = tempfile.mkdtemp()
def get_datadir(self):
return os.path.join(self.tmpdir, "datadir")
def tearDown(self):
PluginTest.tearDown(self)
try:
shutil.rmtree(self.tmpdir)
exce | pt OSError:
pass
def test_get_tagsfile(self):
req = Request({"datadir": self.get_datadir()}, {}, {})
cfg = {"datadir": self.get_datadir()}
self.assertEquals(tags.get_tagsfile(cfg),
os.path.join(self.get_datadir(), os.pardir,
"tags.index"))
tags_filename = os.path.join(self.get_dat | adir(), "tags.db")
cfg = {"datadir": self.get_datadir(), "tags_filename": tags_filename}
self.assertEquals(tags.get_tagsfile(cfg), tags_filename)
def test_tag_cloud_no_tags(self):
# test no tags
self.request.get_data()["tagsdata"] = {}
tags.cb_head(self.args)
self.assertEquals(
str(self.args["entry"]["tagcloud"]),
"\n".join(
["<p>",
"</p>"]))
def test_tag_cloud_one_tag(self):
# test no tags
self.request.get_data()["tagsdata"] = {
"tag2": ["a"],
}
tags.cb_head(self.args)
self.assertEquals(
str(self.args["entry"]["tagcloud"]),
"\n".join(
["<p>",
'<a class="biggestTag" href="http://bl.og//tag/tag2">tag2</a>',
"</p>"]))
def test_tag_cloud_many_tags(self):
# test no tags
self.request.get_data()["tagsdata"] = {
"tag1": ["a", "b", "c", "d", "e", "f"],
"tag2": ["a", "b", "c", "d"],
"tag3": ["a"]
}
tags.cb_head(self.args)
self.assertEquals(
str(self.args["entry"]["tagcloud"]),
"\n".join(
["<p>",
'<a class="biggestTag" href="http://bl.og//tag/tag1">tag1</a>',
'<a class="biggestTag" href="http://bl.og//tag/tag2">tag2</a>',
'<a class="smallestTag" href="http://bl.og//tag/tag3">tag3</a>',
"</p>"]))
|
"""
This page is in the table of contents.
Winding is a script to set the winding profile for the skeinforge chain.
The displayed craft sequence is the sequence in which the tools craft the model and export the output.
On the winding dialog, clicking the 'Add Profile' button will duplicate the selected profile and give it the name in the input field. For example, if laser is selected and the name laser_10mm is in the input field, clicking the 'Add Profile' button will duplicate laser and save it as laser_10mm. The 'Delete Profile' button d | eletes the selected profile.
The profile selection is the setting. If you hit 'Save and Close' the selection will be saved, if you hit 'Cancel' the selection will not be saved. However; adding and deleting a profile is a permanent action, for example 'Cancel' will not bring back any deleted profiles.
To change the winding profile, in a shell in the profile_plugins folder type:
> python winding.py
"""
from __future__ import absolute_imp | ort
import __init__
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftSequence():
"Get the winding craft sequence."
return 'cleave preface coil flow feed home lash fillet limit unpause alteration export'.split()
def getNewRepository():
'Get new repository.'
return WindingRepository()
class WindingRepository:
"A class to handle the winding settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsSetCraftProfile( getCraftSequence(), 'free_wire', self, 'skeinforge_application.skeinforge_plugins.profile_plugins.winding.html')
def main():
"Display the export dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
|
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class HeaderByteCountTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'header_bytes': 'int',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'header_bytes': 'header_bytes',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, header_bytes=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""HeaderByteCountTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._header_bytes = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if header_bytes is not None:
self.header_bytes = header_bytes
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def header_bytes(self):
"""Gets the header_bytes of this HeaderByteCountTest. # noqa: E501
:return: The header_bytes of this HeaderByteCountTest. # noqa: E501
:rtype: int
"""
return self._header_bytes
@header_bytes.setter
def header_bytes(self, header_bytes):
"""Sets the header_bytes of this HeaderByteCountTest.
:param header_bytes: The header_bytes of this HeaderByteCountTest. # noqa: E501
:type: int
"""
self._header_bytes = header_bytes
@property
def reject_on_error(self):
"""Gets the reject_on_error of this HeaderByteCountTest. # noqa: E501
:return: The reject_on_error of this HeaderByteCountTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this HeaderByteCountTest.
:param reject_on_error: The reject_on_error of this HeaderByteCountTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this HeaderByteCountTest. # noqa: E501
:return: The checked of this He | aderByteCountTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this HeaderByteCountTest.
:param checked: The checked of this HeaderByteCountTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iter | items(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HeaderByteCountTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, HeaderByteCountTest):
return True
return self.to_dict() != other.to_dict()
|
_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=(('Admin', 'admin@fattie-breakie.com'),)):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
se | lf.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
if check_for_POST_params:
for k, v in self.brea | kfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body)
self.assertIn(v, body)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=(('Admin', 'admin@fattie-breakie.com'),)):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body)
self.assertIn('hash-brown-value', body)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body)
self.assertNotIn('bacon-value', body)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=(('Admin', 'admin@fattie-breakie.com'),)):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
class ExceptionReporterFilterTests(TestCase, ExceptionReportTestMixin):
"""
Ensure that sensitive information can be filtered out of error reports.
Refs #14614.
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Ensure that everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
Ensure that no POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
Ensure that the sensitive_variables decorator works with object
methods.
Refs #18379.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view,
check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view,
check_fo |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/gugu/w/calibre/src/calibre/gui2/dialogs/quickview.ui'
#
# Created: Thu Jul 19 23:32:31 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Quickview(object):
def setupUi(self, Quickview):
Quickview.setObjectName(_fromUtf8("Quickview"))
Quickview.resize(768, 342)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Quickview.sizePolicy().hasHeightForWidth())
Quickview.setSizePolicy(sizePolicy)
self.gridlayout = QtGui.QGridLayout(Quickview)
self.gridlayout.setObjectName(_fromUtf8("gridlayout"))
self.items_label = QtGui.QLabel(Quickview)
self.items_label.setObjectName(_fromUtf8("items_label"))
self.gridlayout.addWidget(self.items_label, 0, 0, 1, 1)
self.items = QtGui.QListWidget(Quickview)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.items.sizePolicy().hasHeightForWidth())
self.items.setSizePolicy(sizePolicy)
self.items.setObjectName(_fromUtf8("items"))
self.gridlayout.addWidget(self.items, 1, 0, 1, 1)
self.books_label = QtGui.QLabel(Quickview)
self.books_label.setObjectName(_fromUtf8("books_label"))
self.gridlayout.addWidget(self.books_label, 0, 1, 1, 1)
self.books_table = QtGui.QTableWidget(Quickview)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(4)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.books_table.sizePolicy().hasHeightForWidth())
self.books_table.setSizePolicy(sizePolicy)
self.books_table.setColumnCount(0)
| self.books_table.setRowCount(0)
| self.books_table.setObjectName(_fromUtf8("books_table"))
self.gridlayout.addWidget(self.books_table, 1, 1, 1, 1)
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setObjectName(_fromUtf8("hboxlayout"))
self.search_button = QtGui.QPushButton(Quickview)
self.search_button.setObjectName(_fromUtf8("search_button"))
self.hboxlayout.addWidget(self.search_button)
spacerItem = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.hboxlayout.addItem(spacerItem)
self.buttonBox = QtGui.QDialogButtonBox(Quickview)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setCenterButtons(False)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.hboxlayout.addWidget(self.buttonBox)
self.gridlayout.addLayout(self.hboxlayout, 3, 0, 1, 2)
self.retranslateUi(Quickview)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Quickview.reject)
QtCore.QMetaObject.connectSlotsByName(Quickview)
def retranslateUi(self, Quickview):
Quickview.setWindowTitle(_("Quickview"))
self.items_label.setText(_("Items"))
self.search_button.setText(_("Search"))
self.search_button.setToolTip(_("Search in the library view for the selected item"))
|
base_location=req.httprequest.url_root.rstrip('/'),
HTTP_HOST=wsgienv['HTTP_HOST'],
REMOTE_ADDR=wsgienv['REMOTE_ADDR'],
)
req.session.authenticate(db, login, key, env)
return set_cookie_and_redirect(req, redirect_url)
def set_cookie_and_redirect(req, redirect_url):
redirect = werkzeug.utils.redirect(redirect_url, 303)
redirect.autocorrect_location_header = False
cookie_val = urllib2.quote(simplejson.dumps(req.session_id))
redirect.set_cookie('instance0|session_id', cookie_val)
return redirect
def load_actions_from_ir_values(req, key, key2, models, meta):
Values = req.session.model('ir.values')
actions = Values.get(key, key2, models, meta, req.context)
return [(id, name, clean_action(req, action))
for id, name, action in actions]
def clean_action(req, action):
action.setdefault('flags', {})
action_type = action.setdefault('type', 'ir.actions.act_window_close')
if action_type == 'ir.actions.act_window':
return fix_view_modes(action)
return action
# I think generate_views,fix_view_modes should go into js ActionManager
def generate_views(action):
"""
While the server generates a sequence called "views" computing dependencies
between a bunch of stuff for views coming directly from the database
(the ``ir.actions.act_window model``), it's also possible for e.g. buttons
to return custom view dictionaries generated on the fly.
In that case, there is no ``views`` key available on the action.
Since the web client relies on ``action['views']``, generate it here from
``view_mode`` and ``view_id``.
Currently handles two different cases:
* no view_id, multiple view_mode
* single view_id, single view_mode
:param dict action: action descriptor dictionary to generate a views key for
"""
view_id = action.get('view_id') or False
if isinstance(view_id, (list, tuple)):
view_id = view_id[0]
# providing at least one view mode is a requirement, not an option
view_modes = action['view_mode'].split(',')
if len(view_modes) > 1:
if view_id:
raise ValueError('Non-db action dictionaries should provide '
'either multiple view modes or a single view '
'mode and an optional view id.\n\n Got view '
'modes %r and view id %r for action %r' % (
view_modes, view_id, action))
action['views'] = [(False, mode) for mode in view_modes]
return
action['views'] = [(view_id, view_modes[0])]
def fix_view_modes(action):
""" For historical reasons, OpenERP has weird dealings in relation to
view_mode and the view_type attribute (on window actions):
* one of the view modes is ``tree``, which stands for both list views
and tree views
* the choice is made by checking ``view_type``, which is either
``form`` for a list view or ``tree`` for an actual tree view
This methods simply folds the view_type into view_mode by adding a
new view mode ``list`` which is the result of the ``tree`` view_mode
in conjunction with the ``form`` view_type.
TODO: this should go into the doc, some kind of "peculiarities" section
:param dict action: an action descriptor
:returns: nothing, the action is modified in place
"""
if not action.get('views'):
generate_views(action)
if action.pop('view_type', 'form') != 'form':
return action
if 'view_mode' in action:
action['view_mode'] = ','.join(
mode if mode != 'tree' else 'list'
for mode in action['view_mode'].split(','))
action['views'] = [
[id, mode if mode != 'tree' else 'list']
for id, mode in action['views']
]
return action
def _local_web_translations(trans_file):
messages = []
try:
with open(trans_file) as t_file:
po = babel.messages.pofile.read_po(t_file)
except Exception:
return
for x in po:
if x.id and x.string and "openerp-web" in x.auto_comments:
messages.append({'id': x.id, 'string': x.string})
return messages
def xml2json_from_elementtree(el, preserve_whitespaces=False):
""" xml2json-direct
Simple and straightforward XML-to-JSON converter in Python
New BSD Licensed
http://code.google.com/p/xml2json-direct/
"""
res = {}
if el.tag[0] == "{":
ns, name = el.tag.rsplit("}", 1)
res["tag"] = name
res["namespace"] = ns[1:]
else:
res["tag"] = el.tag
res["attrs"] = {}
for k, v in el.items():
res["attrs"][k] = v
kids = []
if el.text and (preserve_whitespaces or el.text.strip() != ''):
kids.append(el.text)
for kid in el:
kids.append(xml2json_from_elementtree(kid, preserve_whitespaces))
if kid.tail and (preserve_whitespaces or kid.tail.strip() != ''):
kids.append(kid.tail)
res["children"] = kids
return res
def content_disposition(filename, req):
filename = fi | lename.encode('utf8')
escaped = urllib2.quote(filename)
browser = req.httprequest.user_agent.browser
version = int((req.httprequest.user_agent.version or '0').split('.')[0])
if browser == 'msie' and version < 9:
return "attachment; filename=%s" % escaped
elif browser == 'safari':
return "attachment; filename=%s" % filename
else:
return "attachment; filename*=UTF-8''%s" | % escaped
#----------------------------------------------------------
# OpenERP Web web Controllers
#----------------------------------------------------------
html_template = """<!DOCTYPE html>
<html style="height: 100%%">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"/>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>OpenERP</title>
<link rel="shortcut icon" href="/web/static/src/img/favicon.ico" type="image/x-icon"/>
<link rel="stylesheet" href="/web/static/src/css/full.css" />
%(css)s
%(js)s
<script type="text/javascript">
$(function() {
var s = new openerp.init(%(modules)s);
%(init)s
});
</script>
</head>
<body>
<!--[if lte IE 8]>
<script src="//ajax.googleapis.com/ajax/libs/chrome-frame/1/CFInstall.min.js"></script>
<script>CFInstall.check({mode: "overlay"});</script>
<![endif]-->
</body>
</html>
"""
class Home(openerpweb.Controller):
_cp_path = '/'
@openerpweb.httprequest
def index(self, req, s_action=None, db=None, **kw):
db, redir = db_monodb_redirect(req)
if redir:
return redirect_with_hash(req, redir)
js = "\n ".join('<script type="text/javascript" src="%s"></script>' % i for i in manifest_list(req, 'js', db=db))
css = "\n ".join('<link rel="stylesheet" href="%s">' % i for i in manifest_list(req, 'css', db=db))
r = html_template % {
'js': js,
'css': css,
'modules': simplejson.dumps(module_boot(req, db=db)),
'init': 'var wc = new s.web.WebClient();wc.appendTo($(document.body));'
}
return r
@openerpweb.httprequest
def login(self, req, db, login, key):
if db not in db_list(req, True):
return werkzeug.utils.redirect('/', 303)
return login_and_redirect(req, db, login, key)
class WebClient(openerpweb.Controller):
_cp_path = "/web/webclient"
@openerpweb.jsonrequest
def csslist(self, req, mods=None):
return manifest_list(req, 'css', mods=mods)
@openerpweb.jsonrequest
def jslist(self, req, mods=None):
return manifest_list(req, 'js', mods=mods)
@openerpweb.jsonrequest
def qweblist(self, req, mods=None):
return manifest_list(req, 'qweb', mods=mods)
@openerpweb.httprequest
def css(self, req, mods=None, db=None):
files = list(manifest_glob(req, 'css', addons=mods, db=db))
last_modified = get_last_modified(f[0] for f in fil |
"""Test that arguments passed to a script Menu.main(loop=True) execute
properly."""
##==============================================================#
## SECTION: Imports #
##==============================================================#
from testlib import *
##================================= | =============================#
## SECTION: Global Definitions #
##==============================================================#
SCRIPT = "script_1.py"
##==============================================================#
## SECTION: Class Definitions #
##=================== | ===========================================#
class TestCase(unittest.TestCase):
def _cleanup(self):
rmfile("foo")
rmfile("bar")
rmfile("caz")
def setUp(self):
self._cleanup()
self.assertFalse(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def tearDown(self):
self._cleanup()
def test_script_1(self):
result = os.system("python %s x" % SCRIPT)
self.assertEqual(0, result)
self.assertFalse(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def test_script_2(self):
result = os.system("python %s f" % SCRIPT)
self.assertEqual(0, result)
self.assertTrue(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def test_script_3(self):
result = os.system("python %s b" % SCRIPT)
self.assertEqual(0, result)
self.assertFalse(op.exists("foo"))
self.assertTrue(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def test_script_4(self):
result = os.system("python %s f b" % SCRIPT)
self.assertEqual(0, result)
self.assertTrue(op.exists("foo"))
self.assertTrue(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def test_script_5(self):
result = os.system("python %s c" % SCRIPT)
self.assertEqual(0, result)
self.assertFalse(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertTrue(op.exists("caz"))
def test_script_6(self):
result = os.system("python %s c f" % SCRIPT)
self.assertEqual(0, result)
self.assertTrue(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertTrue(op.exists("caz"))
def test_script_7(self):
result = os.system("python %s -d" % SCRIPT)
self.assertEqual(0, result)
self.assertFalse(op.exists("foo"))
self.assertTrue(op.exists("bar"))
self.assertFalse(op.exists("caz"))
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
|
tests.factories import WhitelistFactory, BlacklistFactory, \
RegistrationCenterFactory, RegistrationFactory
from register.tests.test_center_csv import CenterFileTestMixin
from libya_elections.phone_numbers import get_random_phone_number, format_phone_number
from libya_elections.tests.utils import ResponseCheckerMixin
from libya_site.tests.factories import UserFactory, DEFAULT_USER_PASSWORD
from polling_reports.models import StaffPhone
from polling_reports.tests.factories import StaffPhoneFactory
from staff.tests.base import StaffUserMixin
class ImportBlackWhitelistViewMixin(StaffUserMixin, ResponseCheckerMixin):
"""Base class for TestImportBlacklistView and TestImportWhitelistView.
This doesn't inherit from TestCase, so it isn't executed by itself.
"""
def setUp(self):
super(ImportBlackWhitelistViewMixin, self).setUp()
# self.url = None
# self.model = None
# self.factory = None
def test_staff_can_see_form(self):
rsp = self.client.get(self.url, follow=False)
form = rsp.context['form']
self.assertNotIn('password', form.fields)
self.assertIn('import_file', form.fields)
def test_nonstaff_cant_see_form(self):
self.client.logout()
self.nonstaff_user = UserFactory(username='joe', password='puppy')
self.client.login(username='joe', password='puppy')
self.assertForbidden(self.client.get(self.url))
def test_valid_form(self):
# with all | combinatio | ns of line endings (\r\n, \n, \r)
numbers = [get_random_phone_number() for i in range(4)]
punctuated_numbers = [format_phone_number(number)
for number in numbers]
file_content = ("""%s\r\n%s\n \n%s\r%s""" % (
punctuated_numbers[0],
punctuated_numbers[1],
punctuated_numbers[2],
punctuated_numbers[3],
)).encode()
blackwhitelist_file = ContentFile(file_content, name='bw.txt')
data = {'import_file': blackwhitelist_file}
rsp = self.client.post(self.url, data=data)
# Assert that we redirect
self.assertEqual(302, rsp.status_code)
bwlist = self.model.objects.values_list('phone_number', flat=True)
for number in numbers:
self.assertIn(number, bwlist)
self.assertEqual(len(bwlist), 4)
def test_import_number_twice_works(self):
"Importing a number that is already in list shouldn't cause an error"
number = get_random_phone_number()
self.factory(phone_number=number)
file_content = number.encode()
blackwhitelist_file = ContentFile(file_content, name='bw.txt')
data = {'import_file': blackwhitelist_file}
rsp = self.client.post(self.url, data=data)
# Assert that we redirect
self.assertEqual(302, rsp.status_code)
bwlist = self.model.objects.values_list('phone_number', flat=True)
self.assertEqual(len(bwlist), 1)
self.assertIn(number, bwlist)
def test_import_number_cant_start_with_2180(self):
"Ensures that the number doesn't start with 2180"
number = '218091234123'
file_content = number.encode()
blackwhitelist_file = ContentFile(file_content, name='bw.txt')
data = {'import_file': blackwhitelist_file}
rsp = self.client.post(self.url, data=data, follow=True)
self.assertEqual(200, rsp.status_code)
bwlist = self.model.objects.values_list('phone_number', flat=True)
self.assertEqual(len(bwlist), 0)
self.assertContains(rsp, 'Numbers on these lines not imported because '
'they are not valid phone numbers: 1.')
class TestImportBlacklistView(ImportBlackWhitelistViewMixin, LibyaTest):
"""Exercise uploading a list of blacklisted numbers"""
def setUp(self):
self.model = Blacklist
self.permissions = ('add_blacklist', 'browse_blacklist')
self.url = reverse('blacklisted-numbers-upload')
self.factory = BlacklistFactory
super(TestImportBlacklistView, self).setUp()
class TestImportWhitelistView(ImportBlackWhitelistViewMixin, LibyaTest):
"""Exercise uploading a list of whitelisted numbers"""
def setUp(self):
self.permissions = ('add_whitelist', 'browse_whitelist')
self.model = Whitelist
self.url = reverse('whitelisted-numbers-upload')
self.factory = WhitelistFactory
super(TestImportWhitelistView, self).setUp()
class BlackWhitelistEditFormMixin(StaffUserMixin, ResponseCheckerMixin):
"""Base class for TestBlacklistChangeForm and TestWhitelistChangeForm.
This doesn't inherit from TestCase, so it isn't executed by itself.
"""
def setUp(self):
super(BlackWhitelistEditFormMixin, self).setUp()
# self.factory = None
# self.form = None
def test_cleans_phone_number(self):
number = get_random_phone_number()
punctuated_number = format_phone_number(number)
form = self.form(data={'phone_number': punctuated_number})
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data['phone_number'], number)
def test_add_dupe_shows_form_error(self):
number = get_random_phone_number()
self.factory(phone_number=number)
form = self.form(data={'phone_number': number})
self.assertFalse(form.is_valid())
self.assertIn('Duplicate value for phone number', list(form.errors.values())[0])
def test_phone_number_cant_start_with_2180(self):
"Ensures the local prefix '0' isn't accidentally included in the phone number"
number = '218091234124'
form = self.form(data={'phone_number': number})
self.assertFalse(form.is_valid())
self.assertIn('Please enter a valid phone number', list(form.errors.values())[0][0])
class TestBlacklistChangeForm(BlackWhitelistEditFormMixin, TestCase):
"""Exercise Blacklist number editing"""
def setUp(self):
super(TestBlacklistChangeForm, self).setUp()
self.factory = BlacklistFactory
self.form = BlacklistedNumberEditForm
class TestWhitelistChangeForm(BlackWhitelistEditFormMixin, TestCase):
"""Exercise Whitelist number editing"""
def setUp(self):
super(TestWhitelistChangeForm, self).setUp()
self.factory = WhitelistFactory
self.form = WhitelistedNumberEditForm
class BlacklistDownload(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['read_blacklist']
model = Blacklist
def test_download_blacklist_file(self):
bl = BlacklistFactory()
rsp = self.client.get(reverse('blacklisted-numbers-download'))
self.assertOK(rsp)
self.assertIn(bl.phone_number, rsp.content.decode())
class WhitelistDownload(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['read_whitelist']
model = Whitelist
def test_download_whitelist_file(self):
wl = WhitelistFactory()
rsp = self.client.get(reverse('whitelisted-numbers-download'))
self.assertOK(rsp)
self.assertIn(wl.phone_number, rsp.content.decode())
class DeleteBlacklist(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['delete_blacklist', 'browse_blacklist']
model = Blacklist
def setUp(self):
super(DeleteBlacklist, self).setUp()
self.url = reverse('blacklisted-numbers-delete')
BlacklistFactory.create_batch(size=3)
def test_get_deleted_page(self):
rsp = self.client.get(self.url)
self.assertOK(rsp)
self.assertIn('Are you sure you want to delete all 3', rsp.content.decode())
def test_post_deleted_page(self):
rsp = self.client.post(self.url, data={'ok': True})
self.assertRedirects(rsp, reverse('browse_blacklistednumbers'))
self.assertEqual(Blacklist.objects.count(), 0)
class DeleteWhitelist(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['delete_whitelist', 'browse_whitelist']
model = Whitelist
def setUp(self):
super(DeleteWhitelist, self).setUp()
self.url = reverse('white |
h00020'},
(
(176, 0), (176, 1556), (295, 0), (330, 141), (334, 950),
(342, 141), (342, 318), (342, 780), (342, 950), (342, 1051),
(342, 1178), (342, 1556), (402, 59), (458, 1114), (492, 975),
(496, 119), (579, -20), (662, 975), (666, 119), (686, -20),
(686, 1114), (819, 119), (819, 975), (900, -20), (902, 1114),
(969, 342), (969, 551), (969, 765), (1141, 279), (1141, 819),
)
: {'char' : 'b', 'name' : 'glyph00021'},
(
(63, -264), (90, -160), (155, 146), (168, 238), (188, -264),
(241, -150), (339, 115), (350, 238), (365, 215),
)
: {'char' : ',', 'name' : 'glyph00022'},
(
(176, 0), (176, 1556), (332, 561), (340, 0), (340, 410),
(340, 561), (340, 676), (340, 731), (340, 1556), (383, 622),
(465, 518), (471, 721), (578, 629), (825, 1096), (852, 0),
(1022, 1096), (1053, 0),
)
: {'char' : 'k', 'name' : 'glyph00023'},
(
(0, 1096), (178, 1096), (414, 446), (416, 0), (494, 218),
(508, 150), (516, 150), (527, 203), (610, 0), (644, 536),
(848, 1096), (1026, 1096),
)
: {'char' : 'v', 'name' : 'glyph00024'},
(
(125, 372), (125, 733), (125, 959), (294, 1299), (307, 444),
(307, 731), (307, 1010), (476, -20), (576, 131), (586, 1331),
(613, 1483), (799, -20), (825, 131), (827, 1331), (829, 1483),
(978, 131), (993, 1331), (1022, -20), (1059, 1483), (1159, 1253),
(1174, 37), (1174, 186), (1231, 1399),
)
: {'char' : 'C', 'name' : 'glyph00025'},
(
(133, 1462), (174, 934), (279, 934), (319, 1462), (502, 1462),
(543, 934), (6 | 47, 934), (688, 1462),
)
: {'char' : '"', 'name' : 'glyph00026'},
(
(18, 1311), (18, 1462), (481, 0), (481, 1311), (651, 0),
(651, 1311), (1114, 1311), (1114, 1462),
)
: {'char' : 'T', 'name' : 'glyph00027'},
(
(201, 0), (201, 1462), (371, 147), (371, 1315), (578, 147),
(606, 0), (618, 1315), (649, 1462), (882, 147), (901, 1315),
(975, 0), (990, 1462), (1188, 446), (1188, 739), (1188, 1025),
(1368, 383), (1368, 745), (1368, 1084),
)
: {'char' : 'D' | , 'name' : 'glyph00028'},
(
(82, 0), (82, 133), (106, 1309), (106, 1462), (289, 154),
(858, 1309), (1065, 1329), (1065, 1462), (1087, 0), (1087, 154),
)
: {'char' : 'Z', 'name' : 'glyph00029'},
(
(201, 0), (201, 1462), (371, 0), (371, 1462),
)
: {'char' : 'l', 'name' : 'glyph00030'},
(
(133, 1462), (174, 934), (279, 934), (319, 1462),
)
: {'char' : "'", 'name' : 'glyph00031'},
(
(115, 278), (115, 814), (287, 336), (287, 750), (353, -20),
(355, 1116), (440, 119), (442, 977), (565, -20), (569, 1116),
(588, 977), (590, 119), (756, 119), (757, 977), (794, 1116),
(796, -20), (908, 297), (911, 147), (913, -492), (913, -23),
(913, 77), (913, 508), (913, 545), (913, 775), (915, 946),
(924, 147), (924, 946), (948, 1096), (1079, -492), (1079, 1096),
)
: {'char' : 'q', 'name' : 'glyph00032'},
(
(201, 0), (201, 1462), (342, 1227), (350, 1227), (358, 0),
(358, 831), (358, 1011), (393, 1462), (1149, 0), (1182, 560),
(1184, 623), (1184, 1462), (1190, 240), (1196, 267), (1198, 240),
(1343, 0), (1343, 1462),
)
: {'char' : 'N', 'name' : 'glyph00033'},
(
(201, 0), (201, 1462), (344, 1296), (352, 1296), (358, 0),
(358, 930), (358, 1142), (457, 1462), (848, 0), (920, 256),
(928, 256), (985, 0), (1395, 1462), (1479, 0), (1479, 942),
(1479, 1104), (1485, 1294), (1493, 1294), (1649, 0), (1649, 1462),
)
: {'char' : 'M', 'name' : 'glyph00034'},
(
(0, 0), (172, 0), (352, 465), (412, 618), (578, 1468),
(584, 1071), (625, 1186), (647, 1282), (682, 1157), (715, 1071),
(721, 1468), (885, 618), (938, 465), (1120, 0), (1296, 0),
)
: {'char' : 'A', 'name' : 'glyph00035'},
(
(27, 1384), (86, 1249), (183, 1298), (216, 1483), (240, 34),
(240, 106), (240, 242), (289, 403), (289, 457), (289, 574),
(308, -29), (333, 1335), (360, -29), (360, 242), (361, 725),
(412, 1335), (417, -29), (418, 242), (418, 403), (418, 436),
(418, 531), (422, 1483), (459, 809), (481, 42), (481, 172),
(483, 645), (535, 1335), (584, 731), (595, 924), (613, 1483),
(666, 1040), (666, 1122), (666, 1224), (710, 841), (786, 940),
(825, 1051), (825, 1124), (825, 1295),
)
: {'char' : '?', 'name' : 'glyph00036'},
(
(-111, -467), (-111, -332), (-52, -492), (-42, -352), (25, -352),
(43, -492), (103, -352), (162, 1337), (162, 1393), (162, 1450),
(176, -267), (176, -180), (176, 1096), (218, 1282), (218, 1503),
(300, 1282), (300, 1503), (342, -492), (342, -168), (342, 1096),
(358, 1337), (358, 1449),
)
: {'char' : 'j', 'name' : 'glyph00037'},
(
(27, 1462), (207, 1462), (416, 0), (438, 559), (486, 369),
(508, 215), (535, 398), (584, 0), (588, 573), (850, 1462),
(870, 973), (918, 1130), (940, 1262), (941, 1242), (993, 1044),
(1014, 979), (1030, 1462), (1305, 565), (1309, 0), (1353, 410),
(1386, 215), (1405, 357), (1458, 561), (1477, 0), (1688, 1462),
(1868, 1462),
)
: {'char' : 'W', 'name' : 'glyph00038'},
(
(201, 0), (201, 1462), (371, 145), (371, 692), (371, 836),
(371, 1315), (614, 1462), (621, 1315), (651, 836), (662, 692),
(676, 145), (711, 0), (813, 1315), (831, 836), (849, 692),
(853, 145), (881, 766), (881, 776), (905, 1462), (949, 0),
(989, 949), (989, 1083), (989, 1206), (1020, 801), (1032, 282),
(1032, 428), (1032, 564), (1165, 970), (1165, 1100), (1165, 1288),
(1214, 220), (1214, 416), (1214, 709),
)
: {'char' : 'B', 'name' : 'glyph00039'},
(
(201, 0), (201, 1462), (371, 0), (371, 688), (371, 840),
(371, 1462), (1141, 0), (1141, 688), (1141, 840), (1141, 1462),
(1311, 0), (1311, 1462),
)
: {'char' : 'H', 'name' : 'glyph00040'},
(
(106, 47), (106, 211), (125, 953), (125, 1114), (125, 1283),
(196, 173), (246, -20), (297, 1036), (297, 1112), (297, 1215),
(300, 753), (353, 939), (379, 1483), (408, 129), (449, 1331),
(486, 858), (504, 680), (506, -20), (512, 129), (584, 1331),
(588, 1483), (623, 809), (650, 628), (682, 129), (746, -20),
(755, 1331), (793, 546), (806, 1483), (853, 727), (854, 258),
(854, 373), (854, 449), (936, 1255), (989, 1403), (1026, 196),
(1026, 389), (1026, 539),
)
: {'char' : 'S', 'name' : 'glyph00041'},
(
(201, 0), (201, 1462), (371, 152), (371, 690), (371, 840),
(371, 1311), (977, 690), (977, 840), (1016, 0), (1016, 152),
(1016, 1311), (1016, 1462),
)
: {'char' : 'E', 'name' : 'glyph00042'},
(
(152, 34), (152, 106), (152, 242), (170, 1462), (220, -29),
(221, 403), (272, -29), (272, 242), (326, 403), (329, -29),
(330, 242), (377, 1462), (393, 42), (393, 172),
)
: {'char' : '!', 'name' : 'glyph00043'},
(
(201, 0), (201, 1462), (371, 0), (371, 625), (371, 776),
(371, 1311), (977, 625), (977, 776), (1016, 1311), (1016, 1462),
)
: {'char' : 'F', 'name' : 'glyph00044'},
(
(125, 375), (125, 735), (125, 1092), (305, 436), (305, 733),
(305, 1026), (476, -20), (477, 1485), (558, 129), (558, 1333),
(799, -20), (801, 1485), (1042, 129), (1043, 1333), (1115, -20),
(1116, 1485), (1290, 435), (1290, 733), (1290, 1028), (1470, 382),
(1470, 733), (1470, 1085),
)
: {'char' : 'O', 'name' : 'glyph00045'},
(
(-160, -358), (-160, -213), (-106, -385), (-89, -233), (-12, -385),
(-12, -233), (87, -233), (168, -385), (190, -113), (190, 0),
(190, 1462), (360, -176), (360, 14), (360, 1462),
)
: {'char' : 'J', 'name' : 'glyph00046'},
(
(86, 1090), (115, 1272), (221, 733), (397, 639), (463, 1059),
(463, 1556), (506, 1161), (557, 1001), (614, 1161), (657, 1059),
(657, 1556), (733, 639), (905, 733), (1012, 1272), (1038, 1090),
)
: {'char' : '*', 'name' : 'glyph00047'},
(
(39, 0), (59, 1096), (227, 0), (248, 1096), (440, 561),
(537, 444), (537, 676), (631, 561), (825, 1096), (844, 0),
(1012, 1096), (1032, 0),
)
: {'char' : 'x', 'name' : 'glyph00048'},
(
(94, 59), (94, 217), (102, 1331), (185, 15), (186, 1219),
(189, 170), (192, 1402), (295, 1289), (317, 684), (317, 827),
(3 |
"""Two different implementations of merge sort. First one is the standard sort
that creates the result to new list on each level. Second one is an in-place
sort that uses two alternating buffers and offsets to limit memory usage
to O(2n).
"""
def sort(lst):
"""Standard merge sort.
Args:
lst: List to sort
Returns:
Sorted copy of the list
"""
if len(lst) <= 1:
return lst
mid = len(lst) // 2
low = sort(lst[:mid])
high = sort(lst[mid:])
res = []
i = j = 0
while i < len(low) and j < len(high):
if low[i] < high[j]:
res.append(low[i])
i += 1
else:
res.append(high[j])
j += 1
res.extend(low[i:])
res.extend(high[j:])
return res
def helper(lst, buf, start, stop, to_buf):
"""Helper function for in-place sort with alternating buffers.
Args:
lst: List to sort
buf: Buffer to store the results
start: Start index
stop: Stop index
to_buf: Boolean flag telling where result should be written to.
In case of True result should be written to buf, if False then
result should be written to l.
"""
length = stop - start
if length <= 1:
if to_buf and length == 1:
buf[start] = lst[start]
return
mid = start + length // 2
helper(lst, buf, start, mid, not to_buf)
helper(lst, buf, mid, stop, not to_buf)
# If result goes to buf swap l & buf since following code will write
# from buf to result
if to_buf:
lst, buf = buf, lst
i = start
j = mid
to = start
while i < mid and j < stop:
if buf[i] < buf[j]:
lst[to] = buf[i]
i += 1
else:
lst[to] = buf[j]
j += 1
to += 1
for i in range(i, mid):
lst[to] = buf[i]
to += 1
for j in range(j, stop):
| lst[to] = buf[j]
to += 1
def sort_in_place(lst):
"""In-place merge sort.
Args:
lst: List to sort
"""
helper(lst, [None] * | len(lst), 0, len(lst), False)
|
#!/usr/bin/en | v python
from distutils.core import setup
setup(name='Ajax Select',
version='1.0',
description='Django-jQuery jQuery-powered auto-complete fields for ForeignKey and ManyToMany fields',
author='Crucial Felix',
author_email='crucialfelix@gmail.com',
url='http://code.google.com/p/django-ajax-selects/',
packages=['ajax_select', ],
) | |
ListaIdevice.persistenceVersion = 5
MultichoiceIdevice.persistenceVersion = 9
GenericIdevice.persistenceVersion = 11
MultiSelectIdevice.persistenceVersion = 1
OrientacionesalumnadofpdIdevice.persistenceVersion = 9
OrientacionestutoriafpdIdevice.persistenceVersion = 9
ParasabermasfpdIdevice.persistenceVersion = 9
QuizTestIdevice.persistenceVersion = 10
RecomendacionfpdIdevice.persistenceVersion = 9
ReflectionfpdIdevice.persistenceVersion = 9
ReflectionfpdmodifIdevice.persistenceVersion = 9
ReflectionIdevice.persistenceVersion = 8
SeleccionmultiplefpdIdevice.persistenceVersion = 2
TrueFalseIdevice.persistenceVersion = 11
VerdaderofalsofpdIdevice.persistenceVersion = 12
WikipediaIdevice.persistenceVersion = 9
Package.persistenceVersion = 13
def extractNode(self):
"""
Clones and extracts the currently selected node into a new package.
"""
newPackage = Package('NoName') # Name will be set once it is saved..
newPackage.title = self.currentNode.title
newPackage.style = self.style
newPackage.author = self.author
newPackage._nextNodeId = self._nextNodeId
# Copy the nodes from the original package
# and merge into the root of the new package
self.currentNode.copyToPackage(newPackage)
return newPackage
@staticmethod
def load(filename, newLoad=True, destinationPackage=None, fromxml=None):
"""
Load package from disk, returns a package.
"""
if not zipfile.is_zipfile(filename):
return None
zippedFile = zipfile.ZipFile(filename, "r")
xml = None
try:
xml = zippedFile.read(u"contentv3.xml")
except:
pass
if not xml:
try:
# Get the jellied package data
toDecode = zippedFile.read(u"content.data")
except KeyError:
log.info("no content.data, trying Common Cartridge/Content Package")
newPackage = loadCC(zippedFile, filename)
newPackage.tempFile = False
newPackage.isChanged = False
newPackage.filename = Path(filename)
return newPackage
# Need to add a TempDirPath because it is a nonpersistant member
resourceDir = TempDirPath()
# Extract resource files from package to temporary directory
for fn in zippedFile.namelist():
if unicode(fn, 'utf8') not in [u"content.data", u"content.xml", u"contentv2.xml", u"contentv3.xml", u"content.xsd" ]:
#JR: Hacemos las comprobaciones necesarias por si hay directorios
if ("/" in fn):
dir = fn[:fn.index("/")]
Dir = Path(resourceDir/dir)
if not Dir.exists():
Dir.mkdir()
Fn = Path(resourceDir/fn)
if not Fn.isdir():
outFile = open(resourceDir/fn, "wb")
outFile.write(zippedFile.read(fn))
outFile.flush()
outFile.close()
try:
validxml = False
if fromxml:
newPackage, validxml = decodeObjectFromXML(fromxml)
elif xml:
xmlinfo = zippedFile.getinfo(u"contentv3.xml")
if u"content.data" not in zippedFile.NameToInfo:
newPackage, validxml = decodeObjectFromXML(xml)
else:
datainfo = zippedFile.getinfo(u"content.data")
if xmlinfo.date_time >= datainfo.date_time:
newPackage, validxml = decodeObjectFromXML(xml)
if not validxml:
toDecode = zippedFile.read(u"content.data")
newPackage = decodeObjectRaw(toDecode)
try:
lomdata = zippedFile.read(u'imslrm.xml')
if 'LOM-ES' in lomdata:
importType = 'lomEs'
else:
importType = 'lom'
setattr(newPackage, importType, lomsubs.parseString(lomdata))
except:
pass
G.application.afterUpgradeHandlers = []
newPackage.resourceDir = resourceDir
G.application.afterUpgradeZombies2Delete = []
if not validxml and (xml or fromxml or "content.xml" in zippedFile.namelist()):
for key, res in newPackage.resources.items():
if len(res) < 1:
newPackage.resources.pop(key)
else:
if (hasattr(res[0], 'testForAndDeleteZombieResources')):
res[0].testForAndDeleteZombieResources()
if newLoad:
# provide newPackage to doUpgrade's versionUpgrade() to
# correct old corrupt extracted packages by setting the
# any corrupt package references to the new package:
#JR: Convertimos el nombre del paquete para evitar nombres problematicos
import string
validPackagenameChars = "-_. %s%s" % (string.ascii_letters, string.digits)
newPackage._name = ''.join(c for c in newPackage._name if c in validPackagenameChars).replace(' ','_')
#JR: Si por casualidad quedase vacio le damos un nombre por defecto
if newPackage._name == "":
newPack | age._name = "invalidpackagename"
log.debug( | "load() about to doUpgrade newPackage \""
+ newPackage._name + "\" " + repr(newPackage) )
if hasattr(newPackage, 'resourceDir'):
log.debug("newPackage resourceDir = "
+ newPackage.resourceDir)
else:
# even though it was just set above? should not get here:
log.error("newPackage resourceDir has NO resourceDir!")
doUpgrade(newPackage)
# after doUpgrade, compare the largest found field ID:
if G.application.maxFieldId >= Field.nextId:
Field.nextId = G.application.maxFieldId + 1
if hasattr(newPackage,'_docType'):
common.setExportDocType(newPackage.docType)
else:
newPackage.set_docType(toUnicode('XHTML'))
else:
# and when merging, automatically set package references to
# the destinationPackage, into which this is being merged:
log.debug("load() about to merge doUpgrade newPackage \""
+ newPackage._name + "\" " + repr(newPackage)
+ " INTO destinationPackage \""
+ destinationPackage._name + "\" "
+ repr(destinationPackage))
log.debug("using their resourceDirs:")
if hasattr(newPackage, 'resourceDir'):
log.debug(" newPackage resourceDir = "
+ newPackage.resourceDir)
else:
log.error("newPackage has NO resourceDir!")
if hasattr(destinationPackage, 'resourceDir'):
log.debug(" destinationPackage resourceDir = "
+ destinationPackage.resourceDir)
else:
log.error("destinationPackage has NO resourceDir!")
doUpgrade(destinationPackage,
isMerge=True, preMergePackage=newPackage)
# after doUpgrade, compare the largest found field ID:
if G.application.maxFieldId >= Field.nextId:
Field.nextId = G.application.maxFieldId + 1
except:
import traceback
traceback.print_exc()
raise
|
"""Defines the URL routes for the Team API."""
from django.conf import settings
from django.conf.urls import patterns, url
from .views import (
MembershipDetailView,
MembershipListView,
TeamsDetailView,
TeamsListView,
TopicDetailView,
TopicListView
)
TEAM_ID_PATTERN = r'(?P<team_id>[a-z\d_-]+)'
TOPIC_ID_PATTERN = r'(?P<topic_id>[A-Za-z\d_.-]+)'
urlpatterns = patterns(
'',
url(
r'^v0/teams/$',
TeamsListView.as_view(),
name="teams_list"
),
url(
r'^v0/teams/{team_id_pattern}$'.format(
team_id_pattern=TEAM_ID_PATTERN,
),
TeamsDetailView.as_view(),
name="teams_detail"
),
url(
r'^v0/topics/$',
TopicListView.as_view(),
name="topics_list"
),
url(
r'^v0/topics/{topic_id_pattern},{cours | e_id_pattern}$'.format(
topic_id_pattern=TOPIC_ID_PATTERN,
course_id_pattern=settings.COURSE_ID_PATTERN,
),
TopicDetailView.as_view(),
name="topics_detail"
),
url(
| r'^v0/team_membership/$',
MembershipListView.as_view(),
name="team_membership_list"
),
url(
r'^v0/team_membership/{team_id_pattern},{username_pattern}$'.format(
team_id_pattern=TEAM_ID_PATTERN,
username_pattern=settings.USERNAME_PATTERN,
),
MembershipDetailView.as_view(),
name="team_membership_detail"
)
)
|
import unittest
from pytba import VERSION
from pytba import api as client
class TestApiMethods(unittest.TestCase):
def setUp(self):
client.set_api_key("WesJordan", "PyTBA-Unit-Test", VERSION)
def test__tba_get(self):
# Query with proper key should succeed
team = client.tba_get('team/frc2363')
self.assertEqual(team['key'], 'frc2363')
# Query with invalid key should fail
with self.assertRaises(TypeError):
client.tba_get('team/frc2363', app_id='invalid key')
def test__event_get(self):
event = client.event_get('2016tes')
self.assertEqual(len(event.teams), 75)
self.assertEqual(event.info['name'], 'Tesla Division')
| self.assertEqual(len(event.matches), 140)
self.assertEqual(event.rankings[1][1], '2056')
def test__team_matches(self):
matches = client.team_matches('frc2363', 2016)
self.assertEqual(len(matches), 62)
self.assertEqual(matches[-1]['alliances | ']['opponent']['score'], 89)
if __name__ == '__main__':
unittest.main()
|
input = """
a(1).
a( | 2) | a(3).
ok1 :- #max{V:a(V)} = 3.
b(3).
b(1) | b(2).
ok2 :- #max{V:b(V)} = 3.
"""
output = """
a(1).
a(2) | a(3).
ok1 :- #max{V:a(V)} = 3.
b(3).
b(1) | b(2).
ok2 :- | #max{V:b(V)} = 3.
"""
|
'''
we all know the classic "guessing game" with higher or lower prompts. lets do a role reversal; you create a program that will guess numbers between 1-100, and respond appropriately based on whether users say that the number is too high or too low. Try to make a program that can guess your number based on user input and great code!
'''
import random
import numpy
got_answer = False
max = 100
min = 0
try_count = 0
while not got_answer:
try_count += 1
num = -1
while (num > 1) or (num < 0):
num = .125 * numpy.random.randn() + 0.5
print(num)
guess = int(((max - min) * num) + min)
print('1. Higher')
print('2. Correct!')
print('3. Lower')
print('\nIs your number {}'.format(guess))
response = input('> ')
if response == '2':
got_answer = True
if try_count > 1:
print('\nHurray! I guessed {} in {} tries!!!'.format(guess, try_count))
else:
print('\nHurray! I guessed {} in the first try!!! WOOHOO!'.format(guess, try_count))
elif response == '1':
min = guess + 1
elif response == '3':
ma | x = guess - 1
if min > max:
got_ | answer = True
print('ERROR! ERROR! ERROR! Master did not answer the questions properly!') |
import sht21
with sht21.SHT21(1) as sht21:
print "temp: %s"%sht21.read_temperature()
print "humi: %s"%sht21.read_humidi | ty()
| |
import os
import sys
from src import impl as rlcs
import utils as ut
import analysis as anls
import matplotlib.pyplot as plt
import logging
import pickle as pkl
import time
config = ut. | loadConfig('config')
sylbSimFolder=config['sylbSimFolder']
transFolder=config['transFolder']
lblDir=config['lblDir']
onsDir=config['onsDir']
resultDir=config['resultDir']
queryList = [['DHE','RE','DHE','RE','KI','TA','TA','KI','NA','TA','TA','KI','TA','TA','KI','NA'],['TA','TA','KI','TA','TA','KI','TA','TA','KI','TA','TA','KI','TA','TA','KI','TA'], ['TA','KI','TA','TA','KI','TA','TA','K | I'], ['TA','TA','KI','TA','TA','KI'], ['TA', 'TA','KI', 'TA'],['KI', 'TA', 'TA', 'KI'], ['TA','TA','KI','NA'], ['DHA','GE','TA','TA']]
queryLenCheck = [4,6,8,16]
for query in queryList:
if len(query) not in queryLenCheck:
print 'The query is not of correct length!!'
sys.exit()
masterData = ut.getAllSylbData(tPath = transFolder, lblDir = lblDir, onsDir = onsDir)
res = anls.getPatternsInTransInGTPos(masterData, queryList)
|
additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Abstraction for array data structures."""
from numbers import Integral
import tvm._ffi
from tvm._ffi.base import string_types
from tvm.runtime import Object, convert
from tvm.ir import PrimExpr, PointerType, PrimType
from . import _ffi_api
@tvm._ffi.register_object("tir.Buffer")
class Buffer(Object):
"""Symbolic data buffer in TVM.
Buffer provide a way to represent data layout
specialization of data structure in TVM.
Do not construct directly, use :py:func:`~decl_buffer` instead.
See the documentation of :py:func:`decl_buffer` for more details.
See Also
--------
decl_buffer : Declare a buffer
"""
READ = 1
WRITE = 2
def access_ptr(self, access_mask, ptr_type="handle", content_lanes=1, offset=0):
"""Get an access pointer to the head of buffer.
This is the recommended method to get buffer data
ptress when interacting with external functions.
Parameters
----------
access_mask : int
The access pattern MASK. Indicate whether the
access will read or write to the data content.
ptr_type : str, optional
The data type of the result pointer. Do not specify
unless we want to cast pointer to specific type.
content_lanes: int, optional
The number of lanes for the data type. This value
is greater than one for vector types.
offset: Expr, optional
The offset of pointer. We can use it to offset by
the number of elements from the address of ptr.
Examples
--------
.. code-block:: python
# Get access ptr for read
buffer.access_ptr("r")
# Get access ptr for read/write with bitmask
buffer.access_ptr(Buffer.READ | Buffer.WRITE)
# Get access ptr for read/write with str flag
buffer.access_ptr("rw")
# Get access ptr for read with offset
buffer.access_ptr("r", offset = 100)
"""
if isinstance(access_mask, string_types):
mask = 0
for value in access_mask:
if value == "r":
mask = mask | Buffer.READ
elif value == "w":
mask = mask | Buffer.WRITE
else:
raise ValueError("Unknown access_mask %s" % access_mask)
access_mask = mask
offset = convert(offset)
return _ffi_api.BufferAccessPtr(self, access_mask, ptr_type, content_lanes, offset)
def vload(self, begin, dtype=None):
"""Generate an Expr that loads dtype from begin index.
Parameters
----------
begin : Array of Expr
The beginning index in unit of Buffer.dtype
dtype : str
The data type to be loaded,
can be vector type which have lanes that is multiple of Buffer.dtype
Returns
-------
load : Expr
The corresponding load expression.
"""
begin = (begin,) if isinstance(begin, (int, PrimExpr)) else begin
dtype = dtype if dtype else self.dtype
return _ffi_api.BufferVLoad(self, begin, dtype)
def vstore(self, begin, value):
"""Generate a Stmt that store value into begin index.
Parameters
----------
begin : Array of Expr
The beginning index in unit of Buffer.dtype
value : Expr
The value to be stored.
Returns
-------
store : Stmt
The corresponding store stmt.
"""
begin = (begin,) if isinstance(begin, (int, PrimExpr)) else begin
return _ffi_api.BufferVStore(self, begin, value)
def decl_buffer(
shape,
dtype=None,
name="buffer",
data=None,
strides=None,
elem_offset=None,
scope="",
data_alignment=-1,
offset_factor=0,
buffer_type="",
):
"""Declare a new symbolic buffer.
Normally buffer is created automatically during lower and build.
This is only needed if user want to specify their own buffer layout.
See the note below for detailed discussion on usage of buffer.
Parameters
----------
shape : tuple of Expr
The shape of the buffer.
dtype : str, optional
The data type of the buffer.
name : str, optional
The name of the buffer.
data : Var, optional
The data pointer in the buffer.
strides: array of Expr
The stride of the buffer.
elem_offset: Expr, optional
The beginning offset of the array to data.
In terms of number of elements of dtype.
scope: str, optional
The storage scope of the buffer, if not global.
If scope equals empty string, it means it is global memory.
data_alignment: int, optional
The alignment of data pointer in bytes.
If -1 is passed, the alignment will be set to TVM's internal default.
offset_factor: int, optional
The factor of elem_offset field, when set,
elem_offset is required to be multiple of offset_factor.
If 0 is pssed, the alignment will be set to 1.
if non-zero is passed, we will created a Var for elem_offset if elem_offset is not None.
buffer_type: str, optional, {"", "auto_broadcast"}
auto_broadcast buffer allows one to implement broadcast computation
without considering whether dimension size equals to one.
TVM maps buffer[i][j][k] -> buffer[i][0][k] if dimension j's shape equals 1.
Returns
-------
buffer : Buffer
The created buffer
Example
-------
Here's an example of how broadcast buffer can be used to define a symbolic broadcast operation,
.. code-block:: python
m0, m1, m2 = te.var("m0"), te.var("m1"), te.var("m2")
n0, n1, n2 = te.var("n0"), te.var("n1"), te.var("n2")
o0, o1, o2 = te.var("o0"), te.var("o1"), te.var("o2")
A = te.placeholder((m0, m1, m2), name='A')
B = te.placeholder((n0, n1, n2), name='B')
C = te.compute((o0, o1, o2), lambda i, j, k: A[i, j, k] + B[i, j, k], name='C')
Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name="Ab", buffer_type="auto_broadcast")
Bb = tvm.tir.decl_buffer(B.shape, B.dtype, name="Bb", buffer_type="auto_broadcast")
s = te.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], target='llvm', name='bcast_add', binds={A:Ab, B:Bb})
ctx = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(2, 4, 3)).astype(A.dtype), ctx)
b = tvm.nd.array(np.random.uniform(size=(2, 1, 3)).astype(B.dtype), ctx)
c = tvm.nd.array(np.zeros((2, 4, 3), dtype=C.dtype), ctx)
fadd(a, b, c)
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy())
Note
----
Buffer data structure reflects the DLTensor structure in dlpack.
While DLTensor data structure is very general, it is usually helpful
to create function that only handles specific case of data structure
and make compiled function benefit from it.
If user pass strides and elem_offset is passed as None
when constructing the function, then the function will be specialized
for the DLTensor that is compact and aligned.
If user pass a fully generic symbolic array to the strides,
then the resulting function becomes fully generic.
"""
# pylint: disable=import-outsi | de-toplevel
from .expr import Var
shape = (shape,) | if isinstance(shape, (PrimExpr, Integral)) else sh |
#-*- coding: utf-8 -*-
'''
Created on 23 mar 2014
@author: mariusz
@author: tomasz
'''
import unittest
from selearea import get_ast, get_workareas
class seleareaTest(unittest.TestCase):
def get_fc_pages(self):
urls = {
"http://fc.put.poznan.pl",
"http://fc.put.poznan.pl/rekrutacja/post-powanie-kwalifikacyjne%2C29.html",
"http://fc.put.poznan.pl/o-wydziale/witamy%2C39.html"
}
return [get_ast(url) for url in urls]
def get_fce_pages(self):
urls = {
"http://www.bis.put.poznan.pl/",
"http://www.bis.put.poznan.pl/evPages/show/id/182"
}
return [get_ast(url) for url in urls]
def get_ident | ical_ | pages(self):
urls = {
"http://www.bis.put.poznan.pl/",
"http://www.bis.put.poznan.pl/"
}
return [get_ast(url) for url in urls]
def test_get_wrong_page(self):
url = "putpoznan.pl"
with self.assertRaises(ValueError):
get_ast(url)
def test_get_none_page(self):
with self.assertRaises(ValueError):
get_ast(None)
def test_get_workarea_identical_pages(self):
asts = self.get_identical_pages()
workareas = get_workareas(asts)
self.assertEqual(0, len(workareas), "AssertionFailed: work area found on identical pages.")
def test_get_ast_fc_count(self):
asts = self.get_fc_pages()
self.assertEqual(3, len(asts), "AssertionFailed: count for fc pages.")
def test_get_workarea_fc_content(self):
asts = self.get_fc_pages()
workareas = get_workareas(asts)
xpath = str("//html[@class='js']/body/div[@id='right']/div[@id='content']")
self.assertEqual(xpath, workareas[0], "AssertionFailed: xpaths for fc pages.")
def test_get_ast_fce_count(self):
asts = self.get_fce_pages()
self.assertEqual(2, len(asts), "AssertionFailed: count for fc pages.")
def test_get_workarea_fce_content(self):
asts = self.get_fce_pages()
workareas = get_workareas(asts)
xpath = str("//html/body/div[@id='main']/div/div[@id='left_menu']/div[@id='left_menu_box']")
self.assertEqual(xpath, workareas[1], "AssertionFailed: xpaths for fc pages.")
if __name__ == "__main__":
unittest.main()
|
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu>
"""
import sys
import os
import logging
from twisted.python import log
# default configuration
SERVER = {
'port' : 8080,
}
LOGGING = {
# configure logging to sentry via raven
# 'raven': {
# 'dsn': 'twisted+http://a888206fd60f4307a7b1a880d1fe04fe:15ecf70787b0490880c712d8469459bd@localhost:9000/2'
# },
'console': {
'level': 'INFO'
}
}
# guess where the html might be...
try:
if not 'docroot' in SERVER:
path = os.path.dirname(sys.modules[__name__].__file__)
path = os.path.join(path, "data") |
SERVER['docroot'] = path
except:
SERVER['docroot'] = None
class InverseFilter(logging.Filter):
| def filter(self, record):
return not logging.Filter.filter(self, record)
def start_logging():
observer = log.PythonLoggingObserver()
observer.start()
for logtype, config in LOGGING.iteritems():
if logtype == "raven":
from raven.handlers.logging import SentryHandler
lvl = getattr(logging, config.get('level', 'info').upper())
handler = SentryHandler(config["dsn"])
handler.setLevel(lvl)
# don't try to log sentry errors with sentry
handler.addFilter(InverseFilter('sentry'))
logging.getLogger().addHandler(handler)
print "Starting sentry logging [%s] with destination %s"% (
config.get('level', 'info').upper(), config["dsn"])
elif logtype == 'console':
console = logging.StreamHandler()
lvl = getattr(logging, config.get('level', 'info').upper())
console.setLevel(lvl)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger().addHandler(console)
print "Starting console logging [%s]" % config.get('level', 'info').upper()
|
# -*- coding: utf-8 -*-
'''
Created on 17/2/16.
@author: love
'''
import paho.mqtt.client as mqtt
import json
import ssl
def on_connect(client, userdata, flags, rc):
print("Connected with result code %d"%rc)
client.publish("Login/HD_Login/1", json.dumps({"userName": user, "passWord": "Hello,anyone!"}),qos=0,retain=False)
def on_message(client, userdata, msg):
print ('---------------')
print ("topic :"+msg.topic)
print ("payload :"+msg.payload)
client.subscribe([("chat",2),("aaa",2)])
client.unsubscribe(["chat"])
#client.publish("login/addUser", json.dumps({"user": user, "say": "Hello,anyone!"}),qos=2,retain=False)
#print(msg.topic+":"+str(msg.payload.decode()))
#print(msg.topic+":"+msg.payload.decode())
#payload = json.loads(msg.payload.decode())
#print(payload.get("user")+":"+payload.get("say"))
def mylog(self,userdata,level, buf):
print buf
if __name__ == '__main__':
client = mqtt.Client(protocol=mqtt.MQTTv31)
client.username_pw_set("admin", "password") # 必须设置,否则会返回「Connected with result code 4」
client.on_connect = on_connect
client.on_message = on_message
#链接测试服务器 需要用tls请求 python tls功能比较弱。
# 需要一个证书,这里使用的这个网站提供的证书https://curl.haxx.se/docs/caextract.html
HOST = "mqant.com"
# client.tls_set(ca_certs="caextract.pem", certfile=None, keyfile=None, cert_reqs=ssl.CERT_REQUIRED,
# tls_version=ssl.PROTOCOL_TLSv1, ciphers=None)
client.connect(HOST, 3563, 60)
#client.loop_forever()
|
user = raw_input("请输入用户名:")
client.user_data_set(user)
client.loop_start()
while True:
s = raw_input("请先输入'join'加入房间,然后输入任意聊天字符:\n")
if s:
if s=="join":
client.p | ublish("Chat/HD_JoinChat/2", json.dumps({"roomName": "mqant"}),qos=0,retain=False)
elif s=="start":
client.publish("Master/HD_Start_Process/2", json.dumps({"ProcessID": "001"}),qos=0,retain=False)
elif s=="stop":
client.publish("Master/HD_Stop_Process/2", json.dumps({"ProcessID": "001"}),qos=0,retain=False)
else:
client.publish("Chat/HD_Say/2", json.dumps({"roomName": "mqant","from":user,"target":"*","content": s}),qos=0,retain=False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.