id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/Locker_Project-1.1.3-py3-none-any.whl/Locker_Project/CMD_Process.py | import threading
import time
import socket
from Locker_Project import Func, MyTask_Finger, MyTask_Tag
from Locker_Project.Func import TaiCauTruc
class Class_Thread:
def __init__(self, name, ObjectThread):
self.Name = name
self.Object = ObjectThread
@property
def ThreadName(self):
return self.Name
@ThreadName.setter
def ThreadName(self, name):
self.Name = name
@property
def Thread_Object(self):
return self.Object
@Thread_Object.setter
def Thread_Object(self, thread):
self.Object = thread
pass
class Cmd_Process(threading.Thread):
exit_event = threading.Event()
condition = threading.Thread()
def __init__(self, finger, pn532, Cmd, condition, lst_input, lstLock, exitEvent, input1, input2, output1, output2,
host, Port, uart, tinhieuchot):
threading.Thread.__init__(self)
self.finger = finger
self.pn532 = pn532
self.Cmd = Cmd
self.condition = condition
self.ListThread = []
self.lstinput = lst_input
self.lstLock = lstLock
self._Exit = exitEvent
self._input1 = input1
self._input2 = input2
self._output1 = output1
self._output2 = output2
self.host = host
self.Port = Port
self.uart = uart
self.tinhieuchot = tinhieuchot
@property
def Exit(self):
return self._Exit
@Exit.setter
def Exit(self, exitEvent):
self._Exit = exitEvent
@property
def Host(self):
return self.host
@Host.setter
def Host(self, host):
self.host = host
ThreadFinger = Class_Thread(None, None) # dungf ddeer gan thread
ThreadTag = Class_Thread(None, None)
KiemsoatSoLanDocSaiThe = 0
def doConnect(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.host, self.Port))
ThreadMain = '<id>121</id><type>socket</type><data>send</data>' # <type>socket</type><data>send</data>
ThreadMain = ThreadMain.encode('utf-8')
size = len(ThreadMain)
sock.sendall(size.to_bytes(4, byteorder='big'))
sock.sendall(ThreadMain)
except socket.error:
sock.close()
pass
return sock
def run(self):
sock_send = self.doConnect()
t1 = MyTask_Finger.MyTask_Finger(
finger=self.finger,
namefileImg="fingerprint.jpg",
lstInput=self.lstinput,
lstLock=self.lstLock,
input1=self._input1,
input2=self._input2,
output1=self._output1,
output2=self._output2,
host=self.host,
Port=self.Port,
uart=self.uart,
main=self
)
t2 = MyTask_Tag.MyTask_Tag(
lstInput=self.lstinput,
lstLock=self.lstLock,
host=self.host,
Port=self.Port,
input1=self._input1,
input2=self._input2,
output1=self._output1,
output2=self._output2,
Pn532=self.pn532,
main=self
)
while 1:
if self._Exit.is_set():
break
self.condition.acquire()
while 1:
if len(self.Cmd) > 0:
dta = self.Cmd.pop().split(";")
if (dta[1] == 'Fused' or dta[1] == 'Cused') and dta[2] == "OK":
self.lstLock.acquire()
sic1 = {dta[3]: 1}
Func.UpdateDict(sic1, self.lstinput)
self.lstLock.release()
try:
if int(dta[3]) > 16:
self._output2[int(dta[3]) - 17].value = True
else:
self._output1[int(dta[3]) - 1].value = True
t10 = threading.Thread(
target=Func.CloseLocker,
args=[dta, self.host, self.Port, self._output1, self._output2, self._input1,
self._input2, self.tinhieuchot]
)
t10.start()
except Exception as Loi3:
print('Loi Chua co Board Io', str(Loi3))
break
if (dta[1] == 'Fused' and dta[2] != "OK") or dta[1] == 'Fopen' or dta[1] == 'FDK':
if self.ThreadFinger.ThreadName is None:
self.ThreadFinger.ThreadName = 'th'
self.ThreadFinger.Thread_Object = t1
break
elif self.ThreadFinger.Name != 'finger_print':
if self.ThreadTag.ThreadName is not None:
self.ThreadTag.Thread_Object.raise_exception()
self.ThreadFinger.Thread_Object.raise_exception()
t11 = MyTask_Finger.MyTask_Finger(
finger=self.finger, namefileImg="fingerprint.jpg",
lstInput=self.lstinput, lstLock=self.lstLock,
input1=self._input1, input2=self._input2,
output1=self._output1, output2=self._output2,
host=self.host, Port=self.Port,
uart=self.uart, main=self
)
self.ThreadFinger.ThreadName = 'finger_print'
self.ThreadFinger.Thread_Object = t11
self.ThreadFinger.Thread_Object.mes = dta
self.ThreadFinger.Thread_Object.TypeRead = dta[1]
self.ThreadFinger.Thread_Object.start()
break
elif self.ThreadFinger.Thread_Object.is_alive():
self.ThreadFinger.Thread_Object.mes = dta
self.ThreadFinger.Thread_Object.TypeRead = dta[1]
if (dta[1] == 'Cused' and dta[2] != "OK") or dta[1] == 'Copen':
if self.ThreadTag.ThreadName is None:
self.ThreadTag.ThreadName = 'te'
self.ThreadTag.Thread_Object = t2
break
elif self.ThreadTag.ThreadName != 'Cused':
if self.ThreadFinger.ThreadName is not None:
self.ThreadFinger.Thread_Object.raise_exception()
self.ThreadTag.Thread_Object.raise_exception()
t21 = MyTask_Tag.MyTask_Tag(
lstInput=self.lstinput, lstLock=self.lstLock,
host=self.host, Port=self.Port,
input1=self._input1, input2=self._input2,
output1=self._output1, output2=self._output2,
Pn532=self.pn532, main=self
)
self.ThreadTag.ThreadName = 'Cused'
self.ThreadTag.Thread_Object = t21
self.ThreadTag.Thread_Object.mes = dta
self.ThreadTag.Thread_Object.TypeRead = dta[1]
self.ThreadTag.Thread_Object.start()
break
elif self.ThreadTag.Thread_Object.is_alive():
self.ThreadTag.Thread_Object.mes = dta
self.ThreadTag.Thread_Object.TypeRead = dta[1]
if dta[1] == 'Cancel':
self.lstLock.acquire()
sic1 = {dta[2]: 0}
Func.UpdateDict(sic1, self.lstinput)
self.lstLock.release()
break
if dta[1] == 'Pused':
self.lstLock.acquire()
sic1 = {dta[2]: 1}
Func.UpdateDict(sic1, self.lstinput)
self.lstLock.release()
try:
if int(dta[2]) > 16:
self._output2[int(dta[2]) - 17].value = True
else:
self._output1[int(dta[2]) - 1].value = True
t5 = threading.Thread(
target=Func.CloseLocker,
args=[dta, self.host, self.Port, self._output1, self._output2, self._input1,
self._input2, self.tinhieuchot]
)
t5.start()
except Exception as Loi2:
print('Loi Chua co Board Io', str(Loi2))
break
if dta[1] == "Dooropen":
self.lstLock.acquire()
sic1 = {dta[2]: 0}
Func.UpdateDict(sic1, self.lstinput)
self.lstLock.release()
try:
if int(dta[2]) > 16:
self._output2[int(dta[2]) - 17].value = True
time.sleep(0.3)
self._output2[int(dta[2]) - 17].value = False
else:
self._output1[int(dta[2]) - 1].value = True
time.sleep(0.3)
self._output1[int(dta[2]) - 1].value = False
Chuoi = bytes(TaiCauTruc(dta[0], 'Dooropen', dta[2], GetData=3), 'utf-8')
print(Chuoi)
sock_send.sendall(len(Chuoi).to_bytes(4, 'big'))
sock_send.sendall(Chuoi)
except socket.error:
time.sleep(3)
sock_send = self.doConnect()
except Exception as Loi1:
print('Loi Chua co Board Io', str(Loi1))
sock_send = self.doConnect()
break
break
self.condition.wait()
self.condition.release() | PypiClean |
/Kuyruk-9.4.1.tar.gz/Kuyruk-9.4.1/kuyruk/signals.py | from blinker import Signal
#: Sent when the task decorator is applied.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
task_init = Signal()
#: Sent before the task is applied.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
task_preapply = Signal()
#: Sent after the task is applied.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
task_postapply = Signal()
#: Sent before the wrapped function is executed.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
task_prerun = Signal()
#: Sent after the wrapped function is executed.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
task_postrun = Signal()
#: Sent when the wrapped function is returned.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
task_success = Signal()
#: Sent when the wrapped function raises an exception.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
#: * exc_info: Return value of ``sys.exc_info()``
task_error = Signal()
#: Sent when the task fails after all retries(if any).
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
#: * exc_info: Return value of ``sys.exc_info()``
task_failure = Signal()
#: Sent before the task is sent to queue.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
#: * description: dict representation of the task
task_presend = Signal()
#: Sent after the task is sent to queue.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
#: * description: dict representation of the task
task_postsend = Signal()
#: Sent when the task fails.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * worker: The Worker object
#: * task: Task object
#: * args: Positional arguments to the task
#: * kwargs: Keyword arguments to the task
#: * description: dict representation of the task
#: * exc_info: Return value of ``sys.exc_info()``
worker_failure = Signal()
#: Sent when the worker is initialized.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * worker: The Worker object
worker_init = Signal()
#: Sent when the worker is started.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * worker: The Worker object
worker_start = Signal()
#: Sent when the worker shuts down.
#:
#: Arguments:
#: * sender: Kuyruk object
#: * worker: The Worker object
worker_shutdown = Signal() | PypiClean |
/Djaloha-0.4.2.tar.gz/Djaloha-0.4.2/djaloha/static/aloha.0.20/plugins/common/link/extra/slowlinklist.js | define(
[ 'aloha', 'aloha/jquery' ],
function ( Aloha, jQuery ) {
'use strict'
/**
* Internal data as array with following format:
*
* [
* { name: 'Aloha Editor - The HTML5 Editor', url:'http://aloha-editor.com', type:'website' },
* { name: 'Aloha Logo', url:'http://www.aloha-editor.com/images/aloha-editor-logo.png', type:'image' }
* ];
*
* @private
*/
var urlset = [
{ name: 'Aloha Test', url: '#alohatest', type: 'website' },
{ name: 'Test One', url: '#test1', type: 'website' },
{ name: 'Test Two', url: '#test2', type: 'website' },
{ name: 'Test Three', url: '#test3', type: 'website' },
{ name: 'Test Four', url: '#test4', type: 'image' }
];
new ( Aloha.AbstractRepository.extend( {
_constructor: function () {
this._super( 'slowlinklist' );
},
/**
* Internal folder structure
* @hide
*/
folder: [],
/**
* initalize LinkList, parse all links, build folder structure and add
* additional properties to the items
*/
init: function () {
// Add ECMA262-5 Array method filter if not supported natively.
// But we will be very conservative and add to this single array
// object so that we do not tamper with the native Array prototype
// object
if ( !( 'filter' in Array.prototype ) ) {
urlset.filter = function ( filter, that /*opt*/ ) {
var other = [],
v,
i = 0,
n = this.length;
for ( ; i < n; i++ ) {
if ( i in this && filter.call( that, v = this[ i ], i, this ) ) {
other.push( v );
}
}
return other;
};
}
var l = urlset.length;
// generate folder structure
for ( var i = 0; i < l; ++i ) {
var e = urlset[ i ];
e.repositoryId = this.repositoryId;
e.id = e.id ? e.id : e.url;
var u = e.uri = this.parseUri( e.url ),
// add hostname as root folder
path = this.addFolder( '', u.host ),
pathparts = u.path.split( '/' );
for ( var j = 0; j < pathparts.length; j++ ) {
if ( pathparts[ j ] &&
// It's a file because it has an extension.
// Could improve this one :)
pathparts[ j ].lastIndexOf( '.' ) < 0 ) {
path = this.addFolder( path, pathparts[ j ] );
}
}
e.parentId = path;
urlset[ i ] = new Aloha.RepositoryDocument( e );
}
this.repositoryName = 'Linklist';
},
/**
* @param {String} path
* @param {String} name
* @return {String}
*/
addFolder: function ( path, name ) {
var type = path ? 'folder' : 'hostname',
p = path ? path + '/' + name : name;
if ( name && !this.folder[ p ] ) {
this.folder[ p ] = new Aloha.RepositoryFolder( {
id: p,
name: name || p,
parentId: path,
type: 'host',
repositoryId: this.repositoryId
} );
}
return p;
},
/**
* Searches a repository for object items matching query if
* objectTypeFilter. If none is found it returns null.
*
* @param {Object} p
* @param {Function} callback
*/
query: function ( p, callback ) {
// Not supported; filter, orderBy, maxItems, skipcount, renditionFilter
var r = new RegExp( p.queryString, 'i' );
var d = urlset.filter( function ( e, i, a ) {
return (
( !p.queryString || e.name.match( r ) || e.url.match( r ) ) &&
( !p.objectTypeFilter || ( !p.objectTypeFilter.length ) || jQuery.inArray( e.type, p.objectTypeFilter ) > -1 ) &&
true //( !p.inFolderId || p.inFolderId == e.parentId )
);
} );
window.setTimeout( function () {
callback.call( this, d );
}, 2000 );
},
/**
* returns the folder structure as parsed at init
*
* @param {Object} p
* @param {Function} callback
*/
getChildren: function ( p, callback ) {
var d = [],
e;
for ( e in this.folder ) {
var l = this.folder[ e ].parentId;
if ( typeof this.folder[ e ] != 'function' && ( // extjs prevention
this.folder[ e ].parentId == p.inFolderId || // all subfolders
( !this.folder[ e ].parentId && p.inFolderId == this.repositoryId ) // the hostname
) ) {
d.push( this.folder[ e ] );
}
}
window.setTimeout( function () {
callback.call( this, d );
}, 2000 );
},
//parseUri 1.2.2
//(c) Steven Levithan <stevenlevithan.com>
//MIT License
//http://blog.stevenlevithan.com/archives/parseuri
parseUri: function(str) {
var o = {
strictMode: false,
key: [ "source","protocol","authority","userInfo","user","password","host","port","relative","path","directory","file","query","anchor"],
q: {
name: "queryKey",
parser: /(?:^|&)([^&=]*)=?([^&]*)/g
},
parser: {
strict: /^(?:([^:\/?#]+):)?(?:\/\/((?:(([^:@]*)(?::([^:@]*))?)?@)?([^:\/?#]*)(?::(\d*))?))?((((?:[^?#\/]*\/)*)([^?#]*))(?:\?([^#]*))?(?:#(.*))?)/,
loose: /^(?:(?![^:@]+:[^:@\/]*@)([^:\/?#.]+):)?(?:\/\/)?((?:(([^:@]*)(?::([^:@]*))?)?@)?([^:\/?#]*)(?::(\d*))?)(((\/(?:[^?#](?![^?#\/]*\.[^?#\/.]+(?:[?#]|$)))*\/?)?([^?#\/]*))(?:\?([^#]*))?(?:#(.*))?)/
}
},
m = o.parser[o.strictMode ? "strict" : "loose"].exec(str),
uri = {},
i = 14;
while (i--) uri[o.key[i]] = m[i] || "";
uri[o.q.name] = {};
uri[o.key[12]].replace(o.q.parser, function ($0, $1, $2) {
if ($1) uri[o.q.name][$1] = $2;
});
return uri;
},
/**
* Get the repositoryItem with given id
* Callback: {GENTICS.Aloha.Repository.Object} item with given id
* @param itemId {String} id of the repository item to fetch
* @param callback {function} callback function
*/
getObjectById: function ( itemId, callback ) {
var i = 0,
l = urlset.length,
d = [];
for ( ; i < l; i++ ) {
if ( urlset[ i ].id == itemId ) {
d.push( urlset[ i ] );
}
}
callback.call( this, d );
}
} ) )();
} ); | PypiClean |
/MusicOnPolytopes-0.1.0-py3-none-any.whl/polytopes/voiceleading_utilities.py | import random
_VERYLARGENUMBER = 1000000 # effectively infinity
_MODULUS = 12 # size of the octave
_HALFMODULUS = int(0.5 + _MODULUS/2.0)
"""
voiceleading_utilities version 1.0, (c) 2015 by Dmitri Tymoczko
Voiceleading_utilities is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Voiceleading_utilities
is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser
General Public License along with Voiceleading_utilities. If not, see <http://www.gnu.org/licenses/>.
A set of routines that identify the minimal voice leadings between sets of pitches and pitch classes.
1. bijective_vl finds the best bijective voice leading between pitch-class sets assuming a fixed number of voices
- use this if you want to control the number of voices exactly,
e.g., a 3-voice voice leading from [C, E, G] to [F, A, C], or
a 4-voice voice leading from [G, B, D, F] to [C, C, E, G]
- this routine will also rank all the voice-leadings by size, so that, e.g. you can use the second-most efficient voice leading if you want
NB: this routine passes back pairs of the form [startPC, path]
2. voicelead takes an input set of pitches and a target set of PCs, and outputs a set of pitches;
- this is useful if you are generating music, and have a specific C-major chord in register; it will tell you where each voice should go
there is an option here to randomly choose one of the N most efficient voice leadings, so you are not always restricted to the most efficient ones
3. nonbijective_vl allows notes to be doubled; sometimes this produces a more efficient voice leading than a bijective voice leading
- for this reason, you cannot always control the number of voices
NB: this routine passes back pairs of PCs, from which you may need to calculate paths
(For details on the nonbijective_vl algorithm, see Tymozko, D., "The Geometry of Musical Chords", Science, 2006.)
Sometimes, you want something in between, e.g. the best 4-voice voice leading between triads or from a 4-voice seventh to a triad; in this case,
you need to iterate bijective_vl over all possible doublings of the chords. This can be time consuming.
TODO: allow different choices of metric
"""
"""==============================================================================================================================
bijective_vl expects two SORTED equal-length sets of integers representing PCs (in any modulus).
the sort parameter sorts the possible bijective VLs by size; by default it is set to False. Set it to true only if you want to choose from
among the n most efficient VLs"""
def bijective_vl(firstPCs, secondPCs, sort = False):
if len(firstPCs) != len(secondPCs):
return False
bijective_vl.fullList = [] # collects all the bijective VLs along with their size
currentBest = [] # currentBest records the best VL we have found so far
currentBestSize = _VERYLARGENUMBER # currentBestSize is the size of the current best VL (starts at infinity)
for i in range(0, len(firstPCs)): # iterate through every inversion of the second PC
secondPCs = secondPCs[-1:] + secondPCs[:-1]
newSize = 0
newPaths = []
for i in range(0, len(firstPCs)):
path = (secondPCs[i] - firstPCs[i]) % _MODULUS # calculate most efficient path based on the pairs
if path > _HALFMODULUS: # negative numbers for descending paths
path -= _MODULUS
newPaths.append([firstPCs[i], path])
newSize += abs(path)
bijective_vl.fullList.append([newPaths, newSize])
if newSize < currentBestSize: # record the current best size
currentBestSize = newSize
currentBest = newPaths
bijective_vl.size = currentBestSize
if sort:
bijective_vl.fullList = sorted(bijective_vl.fullList, key = lambda x: x[1])
return currentBest
"""==============================================================================================================================
voicelead expects a source list of PITCHES and a target list of PCs, both should be the same length; it outputs one of the topN most efficient voice leadings
from the source pitches to the target PCs.
if topN is 1, it gives you the most efficient voice leading"""
def voicelead(inPitches, targetPCs, topN = 1):
inPCs = sorted([p % _MODULUS for p in inPitches]) # convert input pitches to PCs and sort them
targetPCs = sorted(targetPCs)
paths = bijective_vl(inPCs, targetPCs, topN != 1) # find the possible bijective VLs
if topN != 1: # randomly select on of the N most efficient possibilities
myRange = min(len(bijective_vl.fullList), topN)
paths = bijective_vl.fullList[random.randrange(0, myRange)][0]
output = []
tempPaths = paths[:] # copy the list of paths
for inPitch in inPitches:
for path in tempPaths: # when we find a path remove it from our list (so we don't duplicate paths)
if (inPitch % _MODULUS) == path[0]:
output.append(inPitch + path[1])
tempPaths.remove(path)
break
return output
"""==============================================================================================================================
nonbijective_vl expects a source list of PCs or pitches and a target list of PCs or pitches, of any lengths; it outputs the most efficient voice leading from
source to target. Voices can be arbitrarily doubled.
To see why this is interesting, compare bijective_vl([0, 4, 7, 11], [4, 8, 11, 3]) to nonbijective_vl([0, 4, 7, 11], [4, 8, 11, 3])
for PCs, nonbijective_vl iterates over every inversion of the target chord; for each inversion it builds a matrix showing the most efficient voice leading
such that the first note of source goes to the first note of target (see Tymoczko "The Geometry of Musical Chords" for details)
TODO: choose the smaller of source and target to iterate over??
"""
def nonbijective_vl(source, target, pcs = True):
curVL = []
curSize = _VERYLARGENUMBER
if pcs:
source = [x % _MODULUS for x in source]
target = [x % _MODULUS for x in target]
source = sorted(list(set(source)))
target = sorted(list(set(target)))
if pcs:
for i in range(len(target)): # for PCs, iterate over every inversion of the target
tempTarget = target[i:] + target[:i]
newSize = build_matrix(source, tempTarget) # generate the matrix for this pairing
if newSize < curSize: # save it if it is the most efficient we've found
curSize = newSize
curVL = find_matrix_vl()
curVL = curVL[:-1]
else:
curSize = build_matrix(source, tempTarget) # no need to iterate for pitches
curVL = find_matrix_vl()
return curSize, curVL
def build_matrix(source, target, pcs = True): # requires sorted source and target chords
global theMatrix
global outputMatrix
global globalSource
global globalTarget
if pcs:
source = source + [source[0]]
target = target + [target[0]]
distanceFunction = lambda x, y: min((x - y) % _MODULUS, (y - x) % _MODULUS) # add **2 for Euclidean distance
else:
distanceFunction = lambda x, y: abs(x - y)
globalSource = source
globalTarget = target
theMatrix = []
for targetItem in target:
theMatrix.append([])
for sourceItem in source:
theMatrix[-1].append(distanceFunction(targetItem, sourceItem))
outputMatrix = [x[:] for x in theMatrix]
for i in range(1, len(outputMatrix[0])):
outputMatrix[0][i] += outputMatrix[0][i-1]
for i in range(1, len(outputMatrix)):
outputMatrix[i][0] += outputMatrix[i-1][0]
for i in range(1, len(outputMatrix)):
for j in range(1, len(outputMatrix[i])):
outputMatrix[i][j] += min([outputMatrix[i][j-1], outputMatrix[i-1][j], outputMatrix[i-1][j-1]])
return outputMatrix[i][j] - theMatrix[i][j]
def find_matrix_vl(): # identifies the voice leading for each matrix
theVL = []
i = len(outputMatrix) - 1
j = len(outputMatrix[i-1]) - 1
theVL.append([globalSource[j], globalTarget[i]])
while (i > 0 or j > 0):
newi = i
newj = j
myMin = _VERYLARGENUMBER
if i > 0 and j > 0:
newi = i - 1
newj = j - 1
myMin = outputMatrix[i-1][j-1]
if outputMatrix[i-1][j] < myMin:
myMin = outputMatrix[i-1][j]
newj = j
if outputMatrix[i][j - 1] < myMin:
myMin = outputMatrix[i][j-1]
newi = i
i = newi
j = newj
elif i > 0:
i = i - 1
elif j > 0:
j = j - 1
theVL.append([globalSource[j], globalTarget[i]])
return theVL[::-1]
"""==============================================================================================================================
A simple routine to put voice leadings in 'normal form.' Essentially, we just apply the standard "left-packing" algorithm to the first element
in a list of [startPC, path] pairs.
"""
def vl_normal_form(inList): # list of [PC, path] pairs
myList = sorted([[k[0] % _MODULUS] + k[1:] for k in inList])
currentBest = [[(k[0] - myList[0][0]) % _MODULUS] + k[1:] for k in myList]
vl_normal_form.transposition = myList[0][0] * -1
for i in range(1, len(myList)):
newChallenger = myList[-i:] + myList[:-i]
transp = newChallenger[0][0] * -1
newChallenger = sorted([[(k[0] - newChallenger[0][0]) % _MODULUS] + k[1:] for k in newChallenger])
for j in reversed(range(len(myList))):
if newChallenger[j][0] < currentBest[j][0]:
currentBest = newChallenger
vl_normal_form.transposition = transp
else:
if newChallenger[j][0] > currentBest[j][0]:
break
return currentBest | PypiClean |
/AnkiServer-2.0.6.tar.gz/AnkiServer-2.0.6/anki-bundled/oldanki/sound.py | __docformat__ = 'restructuredtext'
import re, sys, threading, time, subprocess, os, signal, errno, atexit
import tempfile, shutil
from oldanki.hooks import addHook, runHook
# Shared utils
##########################################################################
def playFromText(text):
for match in re.findall("\[sound:(.*?)\]", text):
play(match)
def stripSounds(text):
return re.sub("\[sound:.*?\]", "", text)
def hasSound(text):
return re.search("\[sound:.*?\]", text) is not None
##########################################################################
# the amount of noise to cancel
NOISE_AMOUNT = "0.1"
# the amount of amplification
NORM_AMOUNT = "-3"
# the amount of bass
BASS_AMOUNT = "+0"
# the amount to fade at end
FADE_AMOUNT = "0.25"
noiseProfile = ""
processingSrc = "rec.wav"
processingDst = "rec.mp3"
processingChain = []
recFiles = ["rec2.wav", "rec3.wav"]
cmd = ["sox", processingSrc, "rec2.wav"]
processingChain = [
None, # placeholder
["sox", "rec2.wav", "rec3.wav", "norm", NORM_AMOUNT,
"bass", BASS_AMOUNT, "fade", FADE_AMOUNT],
["lame", "rec3.wav", processingDst, "--noreplaygain", "--quiet"],
]
tmpdir = None
# don't show box on windows
if sys.platform == "win32":
si = subprocess.STARTUPINFO()
try:
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
except:
# python2.7+
si.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
# tmp dir for non-hashed media
tmpdir = unicode(
tempfile.mkdtemp(prefix="oldanki"), sys.getfilesystemencoding())
else:
si = None
if sys.platform.startswith("darwin"):
# make sure lame, which is installed in /usr/local/bin, is in the path
os.environ['PATH'] += ":" + "/usr/local/bin"
dir = os.path.dirname(os.path.abspath(__file__))
dir = os.path.abspath(dir + "/../../../..")
os.environ['PATH'] += ":" + dir + "/audio"
def retryWait(proc):
# osx throws interrupted system call errors frequently
while 1:
try:
return proc.wait()
except OSError:
continue
# Noise profiles
##########################################################################
def checkForNoiseProfile():
global processingChain
if sys.platform.startswith("darwin"):
# not currently supported
processingChain = [
["lame", "rec.wav", "rec.mp3", "--noreplaygain", "--quiet"]]
else:
cmd = ["sox", processingSrc, "rec2.wav"]
if os.path.exists(noiseProfile):
cmd = cmd + ["noisered", noiseProfile, NOISE_AMOUNT]
processingChain[0] = cmd
def generateNoiseProfile():
try:
os.unlink(noiseProfile)
except OSError:
pass
retryWait(subprocess.Popen(
["sox", processingSrc, recFiles[0], "trim", "1.5", "1.5"],
startupinfo=si))
retryWait(subprocess.Popen(["sox", recFiles[0], recFiles[1],
"noiseprof", noiseProfile],
startupinfo=si))
processingChain[0] = ["sox", processingSrc, "rec2.wav",
"noisered", noiseProfile, NOISE_AMOUNT]
# Mplayer settings
##########################################################################
if sys.platform.startswith("win32"):
mplayerCmd = ["mplayer.exe", "-ao", "win32", "-really-quiet"]
dir = os.path.dirname(os.path.abspath(sys.argv[0]))
os.environ['PATH'] += ";" + dir
os.environ['PATH'] += ";" + dir + "\\..\\win\\top" # for testing
else:
mplayerCmd = ["mplayer", "-really-quiet"]
# Mplayer in slave mode
##########################################################################
mplayerQueue = []
mplayerManager = None
mplayerReader = None
mplayerEvt = threading.Event()
mplayerClear = False
class MplayerReader(threading.Thread):
"Read any debugging info to prevent mplayer from blocking."
def run(self):
while 1:
mplayerEvt.wait()
try:
mplayerManager.mplayer.stdout.read()
except:
pass
class MplayerMonitor(threading.Thread):
def run(self):
global mplayerClear
self.mplayer = None
self.deadPlayers = []
while 1:
mplayerEvt.wait()
if mplayerQueue:
# ensure started
if not self.mplayer:
self.startProcess()
# loop through files to play
while mplayerQueue:
item = mplayerQueue.pop(0)
if mplayerClear:
mplayerClear = False
extra = ""
else:
extra = " 1"
cmd = 'loadfile "%s"%s\n' % (item, extra)
try:
self.mplayer.stdin.write(cmd)
except:
# mplayer has quit and needs restarting
self.deadPlayers.append(self.mplayer)
self.mplayer = None
self.startProcess()
self.mplayer.stdin.write(cmd)
# wait() on finished processes. we don't want to block on the
# wait, so we keep trying each time we're reactivated
def clean(pl):
if pl.poll() is not None:
pl.wait()
return False
else:
return True
self.deadPlayers = [pl for pl in self.deadPlayers if clean(pl)]
mplayerEvt.clear()
def kill(self):
if not self.mplayer:
return
try:
self.mplayer.stdin.write("quit\n")
self.deadPlayers.append(self.mplayer)
except:
pass
self.mplayer = None
def startProcess(self):
try:
cmd = mplayerCmd + ["-slave", "-idle"]
self.mplayer = subprocess.Popen(
cmd, startupinfo=si, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
mplayerEvt.clear()
raise Exception("Audio player not found")
def queueMplayer(path):
ensureMplayerThreads()
while mplayerEvt.isSet():
time.sleep(0.1)
if tmpdir and os.path.exists(path):
# mplayer on windows doesn't like the encoding, so we create a
# temporary file instead. oddly, foreign characters in the dirname
# don't seem to matter.
(fd, name) = tempfile.mkstemp(suffix=os.path.splitext(path)[1],
dir=tmpdir)
f = os.fdopen(fd, "wb")
f.write(open(path, "rb").read())
f.close()
# it wants unix paths, too!
path = name.replace("\\", "/")
path = path.encode(sys.getfilesystemencoding())
else:
path = path.encode("utf-8")
mplayerQueue.append(path)
mplayerEvt.set()
runHook("soundQueued")
def clearMplayerQueue():
global mplayerClear
mplayerClear = True
mplayerEvt.set()
def ensureMplayerThreads():
global mplayerManager, mplayerReader
if not mplayerManager:
mplayerManager = MplayerMonitor()
mplayerManager.daemon = True
mplayerManager.start()
mplayerReader = MplayerReader()
mplayerReader.daemon = True
mplayerReader.start()
def stopMplayer():
if not mplayerManager:
return
mplayerManager.kill()
def onExit():
if tmpdir:
shutil.rmtree(tmpdir)
addHook("deckClosed", stopMplayer)
atexit.register(onExit)
# PyAudio recording
##########################################################################
try:
import pyaudio
import wave
PYAU_FORMAT = pyaudio.paInt16
PYAU_CHANNELS = 1
PYAU_RATE = 44100
PYAU_INPUT_INDEX = None
except:
pass
class _Recorder(object):
def postprocess(self, encode=True):
self.encode = encode
for c in processingChain:
#print c
if not self.encode and c[0] == 'lame':
continue
ret = retryWait(subprocess.Popen(c, startupinfo=si))
if ret:
raise Exception(_("""
Error processing audio.
If you're on Linux and don't have sox 14.1+, you
need to disable normalization. See the wiki.
Command was:\n""") + u" ".join(c))
class PyAudioThreadedRecorder(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.finish = False
def run(self):
chunk = 1024
try:
p = pyaudio.PyAudio()
except NameError:
raise Exception(
"Pyaudio not installed (recording not supported on OSX10.3)")
stream = p.open(format=PYAU_FORMAT,
channels=PYAU_CHANNELS,
rate=PYAU_RATE,
input=True,
input_device_index=PYAU_INPUT_INDEX,
frames_per_buffer=chunk)
all = []
while not self.finish:
try:
data = stream.read(chunk)
except IOError, e:
if e[1] == pyaudio.paInputOverflowed:
data = None
else:
raise
if data:
all.append(data)
stream.close()
p.terminate()
data = ''.join(all)
wf = wave.open(processingSrc, 'wb')
wf.setnchannels(PYAU_CHANNELS)
wf.setsampwidth(p.get_sample_size(PYAU_FORMAT))
wf.setframerate(PYAU_RATE)
wf.writeframes(data)
wf.close()
class PyAudioRecorder(_Recorder):
def __init__(self):
for t in recFiles + [processingSrc, processingDst]:
try:
os.unlink(t)
except OSError:
pass
self.encode = False
def start(self):
self.thread = PyAudioThreadedRecorder()
self.thread.start()
def stop(self):
self.thread.finish = True
self.thread.join()
def file(self):
if self.encode:
tgt = "rec%d.mp3" % time.time()
os.rename(processingDst, tgt)
return tgt
else:
return recFiles[1]
# Audio interface
##########################################################################
_player = queueMplayer
_queueEraser = clearMplayerQueue
def play(path):
_player(path)
def clearAudioQueue():
_queueEraser()
Recorder = PyAudioRecorder | PypiClean |
/Mainflux-0.0.1.tar.gz/Mainflux-0.0.1/lib/groups.py | import requests
from lib import response
from lib import errors
class Groups:
def __init__(self, url):
self.url = url
def create(self, group, token):
'''Creates group entity in the database'''
mf_resp = response.Response()
http_resp = requests.post(self.url + "/groups", json=group, headers={"Authorization": token})
if http_resp.status_code != 201:
mf_resp.error.status = 1
mf_resp.error.message = errors.handle_error(errors.groups["create"], http_resp.status_code)
else:
location = http_resp.headers.get("location")
mf_resp.value = location.split('/')[2]
return mf_resp
def get(self, group_id, token):
'''Gets a group entity'''
mf_resp = response.Response()
http_resp = requests.get(self.url + "/groups/" + group_id, headers={"Authorization": token})
if http_resp.status_code != 200:
mf_resp.error.status = 1
mf_resp.error.message = errors.handle_error(errors.groups["get"], http_resp.status_code)
else:
mf_resp.value = http_resp.json()
return mf_resp
def construct_query(self, params):
if params is None:
return ""
query = '?'
param_types = ['offset', 'limit', 'order', 'direction']
for pt in param_types:
if pt in params.keys():
query += "{}={}&".format(pt, params[pt])
return query
def get_all(self, group_id, token, query_params=None):
'''Gets all groups from database'''
query = self.construct_query(query_params)
url = self.url + '/groups/' + group_id + query
mf_resp = response.Response()
http_resp = requests.get(url, headers={"Authorization": token})
if http_resp.status_code != 200:
mf_resp.error.status = 1
mf_resp.error.message = errors.handle_error(errors.groups["get_all"], http_resp.status_code)
else:
mf_resp.value = http_resp.json()
return mf_resp
def update(self, group_id, token, group):
'''Updates group entity'''
http_resp = requests.put(self.url + "/groups/" + group_id, json=group, headers={"Authorization": token})
mf_resp = response.Response()
if http_resp.status_code != 200:
mf_resp.error.status = 1
mf_resp.error.message = errors.handle_error(errors.groups["update"], http_resp.status_code)
return mf_resp
def members(self, group_id, token):
'''Get list of members ID's from group'''
http_resp = requests.post(self.url + "/groups/" + group_id + "/members", headers={"Authorization": token})
mf_resp = response.Response()
if http_resp.status_code != 204:
mf_resp.error.status = 1
mf_resp.error.message = errors.handle_error(errors.groups["members"], http_resp.status_code)
return mf_resp
def assign(self, group_id, token, members):
'''Assign'''
mf_resp = response.Response()
http_resp = requests.post(self.url + "/groups/" + group_id + "/members", headers={"Authorization": token}, json=members)
if http_resp.status_code != 200:
mf_resp.error.status = 1
mf_resp.error.message = errors.handle_error(errors.groups["assign"], http_resp.status_code)
return mf_resp
def unassign(self, group_id, token, members):
'''Assign'''
mf_resp = response.Response()
http_resp = requests.delete(self.url + "/groups/" + group_id + "/members", headers={"Authorization": token}, json=members)
if http_resp.status_code != 204:
mf_resp.error.status = 1
mf_resp.error.message = errors.handle_error(errors.groups["unassign"], http_resp.status_code)
return mf_resp
def delete(self, group_id, token):
'''Deletes a group entity from database'''
http_resp = requests.delete(self.url + "/groups/" + group_id, headers={"Authorization": token})
mf_resp = response.Response()
if http_resp.status_code != 204:
mf_resp.error.status = 1
mf_resp.error.message = errors.handle_error(errors.groups["delete"], http_resp.status_code)
return mf_resp | PypiClean |
/Indomielibs-2.0.106.tar.gz/Indomielibs-2.0.106/pyrogram/dispatcher.py |
import asyncio
import inspect
import logging
from collections import OrderedDict
import pyrogram
from pyrogram import utils
from pyrogram.handlers import (
CallbackQueryHandler, MessageHandler, EditedMessageHandler, DeletedMessagesHandler,
UserStatusHandler, RawUpdateHandler, InlineQueryHandler, PollHandler,
ChosenInlineResultHandler, ChatMemberUpdatedHandler, ChatJoinRequestHandler
)
from pyrogram.raw.types import (
UpdateNewMessage, UpdateNewChannelMessage, UpdateNewScheduledMessage,
UpdateEditMessage, UpdateEditChannelMessage,
UpdateDeleteMessages, UpdateDeleteChannelMessages,
UpdateBotCallbackQuery, UpdateInlineBotCallbackQuery,
UpdateUserStatus, UpdateBotInlineQuery, UpdateMessagePoll,
UpdateBotInlineSend, UpdateChatParticipant, UpdateChannelParticipant,
UpdateBotChatInviteRequester
)
log = logging.getLogger(__name__)
class Dispatcher:
NEW_MESSAGE_UPDATES = (UpdateNewMessage, UpdateNewChannelMessage, UpdateNewScheduledMessage)
EDIT_MESSAGE_UPDATES = (UpdateEditMessage, UpdateEditChannelMessage)
DELETE_MESSAGES_UPDATES = (UpdateDeleteMessages, UpdateDeleteChannelMessages)
CALLBACK_QUERY_UPDATES = (UpdateBotCallbackQuery, UpdateInlineBotCallbackQuery)
CHAT_MEMBER_UPDATES = (UpdateChatParticipant, UpdateChannelParticipant)
USER_STATUS_UPDATES = (UpdateUserStatus,)
BOT_INLINE_QUERY_UPDATES = (UpdateBotInlineQuery,)
POLL_UPDATES = (UpdateMessagePoll,)
CHOSEN_INLINE_RESULT_UPDATES = (UpdateBotInlineSend,)
CHAT_JOIN_REQUEST_UPDATES = (UpdateBotChatInviteRequester,)
def __init__(self, client: "pyrogram.Client"):
self.client = client
self.loop = asyncio.get_event_loop()
self.handler_worker_tasks = []
self.locks_list = []
self.updates_queue = asyncio.Queue()
self.groups = OrderedDict()
async def message_parser(update, users, chats):
return (
await pyrogram.types.Message._parse(self.client, update.message, users, chats,
isinstance(update, UpdateNewScheduledMessage)),
MessageHandler
)
async def edited_message_parser(update, users, chats):
# Edited messages are parsed the same way as new messages, but the handler is different
parsed, _ = await message_parser(update, users, chats)
return (
parsed,
EditedMessageHandler
)
async def deleted_messages_parser(update, users, chats):
return (
utils.parse_deleted_messages(self.client, update),
DeletedMessagesHandler
)
async def callback_query_parser(update, users, chats):
return (
await pyrogram.types.CallbackQuery._parse(self.client, update, users),
CallbackQueryHandler
)
async def user_status_parser(update, users, chats):
return (
pyrogram.types.User._parse_user_status(self.client, update),
UserStatusHandler
)
async def inline_query_parser(update, users, chats):
return (
pyrogram.types.InlineQuery._parse(self.client, update, users),
InlineQueryHandler
)
async def poll_parser(update, users, chats):
return (
pyrogram.types.Poll._parse_update(self.client, update),
PollHandler
)
async def chosen_inline_result_parser(update, users, chats):
return (
pyrogram.types.ChosenInlineResult._parse(self.client, update, users),
ChosenInlineResultHandler
)
async def chat_member_updated_parser(update, users, chats):
return (
pyrogram.types.ChatMemberUpdated._parse(self.client, update, users, chats),
ChatMemberUpdatedHandler
)
async def chat_join_request_parser(update, users, chats):
return (
pyrogram.types.ChatJoinRequest._parse(self.client, update, users, chats),
ChatJoinRequestHandler
)
self.update_parsers = {
Dispatcher.NEW_MESSAGE_UPDATES: message_parser,
Dispatcher.EDIT_MESSAGE_UPDATES: edited_message_parser,
Dispatcher.DELETE_MESSAGES_UPDATES: deleted_messages_parser,
Dispatcher.CALLBACK_QUERY_UPDATES: callback_query_parser,
Dispatcher.USER_STATUS_UPDATES: user_status_parser,
Dispatcher.BOT_INLINE_QUERY_UPDATES: inline_query_parser,
Dispatcher.POLL_UPDATES: poll_parser,
Dispatcher.CHOSEN_INLINE_RESULT_UPDATES: chosen_inline_result_parser,
Dispatcher.CHAT_MEMBER_UPDATES: chat_member_updated_parser,
Dispatcher.CHAT_JOIN_REQUEST_UPDATES: chat_join_request_parser
}
self.update_parsers = {key: value for key_tuple, value in self.update_parsers.items() for key in key_tuple}
async def start(self):
if not self.client.no_updates:
for i in range(self.client.workers):
self.locks_list.append(asyncio.Lock())
self.handler_worker_tasks.append(
self.loop.create_task(self.handler_worker(self.locks_list[-1]))
)
log.info("Started %s HandlerTasks", self.client.workers)
async def stop(self):
if not self.client.no_updates:
for i in range(self.client.workers):
self.updates_queue.put_nowait(None)
for i in self.handler_worker_tasks:
await i
self.handler_worker_tasks.clear()
self.groups.clear()
log.info("Stopped %s HandlerTasks", self.client.workers)
def add_handler(self, handler, group: int):
async def fn():
for lock in self.locks_list:
await lock.acquire()
try:
if group not in self.groups:
self.groups[group] = []
self.groups = OrderedDict(sorted(self.groups.items()))
self.groups[group].append(handler)
finally:
for lock in self.locks_list:
lock.release()
self.loop.create_task(fn())
def remove_handler(self, handler, group: int):
async def fn():
for lock in self.locks_list:
await lock.acquire()
try:
if group not in self.groups:
raise ValueError(f"Group {group} does not exist. Handler was not removed.")
self.groups[group].remove(handler)
finally:
for lock in self.locks_list:
lock.release()
self.loop.create_task(fn())
async def handler_worker(self, lock):
while True:
packet = await self.updates_queue.get()
if packet is None:
break
try:
update, users, chats = packet
parser = self.update_parsers.get(type(update), None)
parsed_update, handler_type = (
await parser(update, users, chats)
if parser is not None
else (None, type(None))
)
async with lock:
for group in self.groups.values():
for handler in group:
args = None
if isinstance(handler, handler_type):
try:
if await handler.check(self.client, parsed_update):
args = (parsed_update,)
except Exception as e:
log.exception(e)
continue
elif isinstance(handler, RawUpdateHandler):
args = (update, users, chats)
if args is None:
continue
try:
if inspect.iscoroutinefunction(handler.callback):
await handler.callback(self.client, *args)
else:
await self.loop.run_in_executor(
self.client.executor,
handler.callback,
self.client,
*args
)
except pyrogram.StopPropagation:
raise
except pyrogram.ContinuePropagation:
continue
except Exception as e:
log.exception(e)
break
except pyrogram.StopPropagation:
pass
except Exception as e:
log.exception(e) | PypiClean |
/LabExT_pkg-2.2.0.tar.gz/LabExT_pkg-2.2.0/LabExT/View/MenuListener.py | import datetime
import json
import logging
import sys
import os
import webbrowser
from threading import Thread
from tkinter import filedialog, simpledialog, messagebox, Toplevel, Label, Frame, font
from LabExT.Utils import get_author_list, try_to_lift_window
from LabExT.View.AddonSettingsDialog import AddonSettingsDialog
from LabExT.View.Controls.DriverPathDialog import DriverPathDialog
from LabExT.View.ExperimentWizard.ExperimentWizardController import ExperimentWizardController
from LabExT.View.Exporter import Exporter
from LabExT.View.ExtraPlots import ExtraPlots
from LabExT.View.InstrumentConnectionDebugger import InstrumentConnectionDebugger
from LabExT.View.LiveViewer.LiveViewerController import LiveViewerController
from LabExT.View.ProgressBar.ProgressBar import ProgressBar
from LabExT.View.SearchForPeakPlotsWindow import SearchForPeakPlotsWindow
from LabExT.View.Movement import (
CalibrationWizard,
MoverWizard,
StageWizard,
MoveStagesRelativeWindow,
MoveStagesDeviceWindow,
LoadStoredCalibrationWindow
)
class MListener:
"""Listens to the events triggered by clicks on the menu bar.
"""
def __init__(self, experiment_manager, root):
"""Constructor.
Parameters
----------
experiment_manager : ExperimentManager
Instance of current ExperimentManager.
root : Tk
Tkinter parent window.
"""
self.logger = logging.getLogger()
self.logger.debug('Initialised MenuListener with parent: %s experiment_manager: %s', root, experiment_manager)
self._experiment_manager = experiment_manager
self._root = root
# toplevel tracking to simply raise window if already opened once instead of opening a new one
self.swept_exp_wizard_toplevel = None
self.exporter_toplevel = None
self.stage_configure_toplevel = None
self.stage_movement_toplevel = None
self.stage_device_toplevel = None
self.sfpp_toplevel = None
self.extra_plots_toplevel = None
self.live_viewer_toplevel = None
self.instrument_conn_debuger_toplevel = None
self.addon_settings_dialog_toplevel = None
self.stage_driver_settings_dialog_toplevel = None
self.about_toplevel = None
self.pgb = None
self.import_done = False
self.stage_setup_toplevel = None
self.mover_setup_toplevel = None
self.calibration_setup_toplevel = None
self.calibration_restore_toplevel = None
def client_new_experiment(self):
"""Called when user wants to start new Experiment. Calls the
ExperimentWizard.
"""
if try_to_lift_window(self.swept_exp_wizard_toplevel):
return
# start the measurement wizard
self.logger.debug('Opening new device sweep wizard window.')
self._experiment_wizard = ExperimentWizardController(self._root, self._experiment_manager)
self.swept_exp_wizard_toplevel = self._experiment_wizard.view.main_window
self._experiment_wizard.start_wizard()
def client_load_data(self):
"""Called when user wants to load data. Opens a file dialog
and then imports selected files.
"""
self.logger.debug('Client wants to load data.')
# tk returns this in tuples of strings
file_names_tuple = filedialog.askopenfilenames(
title='Select files for import',
filetypes=(('.json data', '*.json'), ('all files', '*.*')))
self.file_names = [*file_names_tuple]
self.logger.debug('Files to import: %s', self.file_names)
if not self.file_names:
self.logger.debug('Aborting file import. No files selected.')
return
self.import_done = False
# here we set up the progress bar
self.pgb = ProgressBar(self._root, 'Importing Files...')
# now we can start the import thread
Thread(target=self.import_runner).start()
# this little loop here updates the progress bar
while not self.import_done:
self.pgb.update_idletasks()
self.pgb.update()
# finally, we can destroy the progress bar
self.pgb.destroy()
def import_runner(self): # load measurements and append to current experiment's measurement list
loaded_files = []
for file_name in self.file_names:
try:
with open(file_name) as f:
raw_data = json.load(f)
self._experiment_manager.exp.load_measurement_dataset(raw_data, file_name)
loaded_files.append(file_name)
except Exception as exc:
msg = "Could not import file {:s} due to: {:s}".format(file_name, repr(exc))
self.logger.error(msg)
messagebox.showerror("Load Data Error", msg)
continue
self.logger.info('Finished data import of files: {:s}'.format(str(loaded_files)))
self.import_done = True
def client_import_chip(self):
"""Called when user wants to import a new chip. Opens a file
dialog, asks for a chip name and calls the experiment manager
to change chip.
"""
self.logger.debug('Client wants to import chip')
# open a file dialog and ask for location of chip
if self._experiment_manager.exp.to_do_list:
messagebox.showinfo(
'Error',
'Please finish your experiment before you import a chip.')
self.logger.warning('Cannot import new chip: there are still measurements to do.')
return
_chip_path = filedialog.askopenfilename(
title="Select chip layout file",
filetypes=(("chip layout", "*.txt"),
("chip layout", "*.json"),
("chip layout", "*.csv"),
("all files", "*.*")))
if _chip_path:
_chip_name = simpledialog.askstring(
title="Custom chip name",
prompt="Set individual chip name",
initialvalue="Chip_01")
if _chip_name:
try:
self._experiment_manager.import_chip(_chip_path, _chip_name)
except Exception as exc:
msg = "Could not import chip due to: " + repr(exc)
self.logger.error(msg)
messagebox.showerror('Chip Import Error', msg)
return
msg = "Chip with name {:s} and description " \
"file {:s} successfully imported.".format(_chip_name, _chip_path)
self.logger.info(msg)
messagebox.showinfo("Chip Import Success", msg)
# if user presses cancel when asked for custom name we abort
else:
self.logger.info('Chip import aborted by user (cancelled name setting).')
else:
self.logger.info('Chip import aborted by user (no file selected).')
def client_export_data(self):
"""Called when user wants to export data. Starts the Exporter.
"""
if try_to_lift_window(self.exporter_toplevel):
return
self.logger.debug('Client wants to export data')
exporter = Exporter(self._root, self._experiment_manager)
self.exporter_toplevel = exporter._meas_window
def client_quit(self):
"""
Called when use clicks Quit menu entry. Quit the application.
"""
sys.exit(0)
def client_restart(self):
"""
Called when user wants to restart the applications.
"""
os.execl(sys.executable, sys.executable, *sys.argv)
def client_setup_stages(self):
"""
Open wizard to setup the stages.
"""
if try_to_lift_window(self.stage_setup_toplevel):
return
self.stage_setup_toplevel = StageWizard(
self._root,
self._experiment_manager.mover,
experiment_manager=self._experiment_manager)
def client_setup_mover(self):
"""
Open wizard to setup mover.
"""
if try_to_lift_window(self.mover_setup_toplevel):
return
self.mover_setup_toplevel = MoverWizard(self._root, self._experiment_manager.mover)
def client_calibrate_stage(self):
"""
Open wizard to calibrate stages.
"""
if try_to_lift_window(self.calibration_setup_toplevel):
return
self.calibration_setup_toplevel = CalibrationWizard(
self._root,
self._experiment_manager.mover,
self._experiment_manager.chip,
experiment_manager=self._experiment_manager)
def client_move_stages(self):
"""
Called when the user wants to move the stages manually.
Opens a window with parameters for relative movement.
"""
if try_to_lift_window(self.stage_movement_toplevel):
return
self.stage_movement_toplevel = MoveStagesRelativeWindow(
self._root, self._experiment_manager.mover)
def client_move_device(self):
"""
Called when the user wants to move the stages to a specific device.
Opens a MoveDeviceWindow and uses mover to perform the movement.
"""
if try_to_lift_window(self.stage_device_toplevel):
return
self.stage_device_toplevel = MoveStagesDeviceWindow(
self._root,
self._experiment_manager.mover,
self._experiment_manager.chip)
def client_restore_calibration(self, chip):
"""
Opens a window to restore calibrations.
"""
if try_to_lift_window(self.calibration_restore_toplevel):
return
calibration_settings = self._experiment_manager.mover.load_stored_calibrations_for_chip(
chip=chip)
if not calibration_settings:
self.logger.debug(
f"No stored calibration found for {chip}")
return
last_updated_at = datetime.datetime.fromisoformat(
calibration_settings["last_updated_at"]).strftime("%d.%m.%Y %H:%M:%S")
if not messagebox.askyesno(
"Restore calibration",
f"Found mover calibration for chip: {chip.name}. \n Last updated at: {last_updated_at}. \n"
"Do you want to restore it?"):
return
self.calibration_restore_toplevel = LoadStoredCalibrationWindow(
self._root,
self._experiment_manager.mover,
calibration_settings=calibration_settings)
def client_search_for_peak(self):
"""Called when user wants to open plotting window for search for peak observation."""
if try_to_lift_window(self.sfpp_toplevel):
return
self.logger.debug('Opening new search for peak window.')
sfpp = SearchForPeakPlotsWindow(parent=self._root,
experiment_manager=self._experiment_manager)
self.sfpp_toplevel = sfpp.plot_window
def client_extra_plots(self):
""" Called when user wants to open extra plots. """
if try_to_lift_window(self.extra_plots_toplevel):
return
main_window = self._experiment_manager.main_window
meas_table = main_window.view.frame.measurement_table
self.logger.debug('Opening new extra plots window.')
main_window.extra_plots = ExtraPlots(meas_table, main_window.view.frame)
self.extra_plots_toplevel = main_window.extra_plots.cur_window
def client_side_windows(self):
raise DeprecationWarning("Open side windows is deprecated. Do not use.")
def client_live_view(self):
"""Called when user wants to start live view.
Creates a new instance of LiveViewer, which takes care of
settings, instruments and plotting.
"""
if try_to_lift_window(self.live_viewer_toplevel):
return
self.logger.debug('Opening new live viewer window.')
lv = LiveViewerController(self._root, self._experiment_manager) # blocking call until all settings have been made
self.live_viewer_toplevel = lv.current_window # reference to actual toplevel
def client_instrument_connection_debugger(self):
""" opens the instrument connection debugger """
if try_to_lift_window(self.instrument_conn_debuger_toplevel):
return
icd = InstrumentConnectionDebugger(self._root, self._experiment_manager)
self.instrument_conn_debuger_toplevel = icd.wizard_window
def client_addon_settings(self):
""" opens the addon settings dialog """
if try_to_lift_window(self.addon_settings_dialog_toplevel):
return
asd = AddonSettingsDialog(self._root, self._experiment_manager)
self.addon_settings_dialog_toplevel = asd.wizard_window
def client_stage_driver_settings(self):
""" opens the stage driver settings dialog """
if try_to_lift_window(self.stage_driver_settings_dialog_toplevel):
self._root.wait_window(
self.stage_driver_settings_dialog_toplevel)
else:
self.stage_driver_settings_dialog_toplevel = DriverPathDialog(
self._root,
settings_file_path="mcsc_module_path.txt",
title="Stage Driver Settings",
label="SmarAct MCSControl driver module path",
hint="Specify the directory where the module MCSControl_PythonWrapper is found.\nThis is external software,"
"provided by SmarAct GmbH and is available from them. See https://smaract.com.")
self._root.wait_window(
self.stage_driver_settings_dialog_toplevel)
if self.stage_driver_settings_dialog_toplevel.path_has_changed:
if messagebox.askokcancel(
"Stage Driver Path changed",
"The path to the driver of the SmarAct MCSControl Interface was successfully changed."\
"LabExT must be restarted for the changes to take effect. Do you want to restart LabExT now?",
parent=self._root):
self.client_restart()
def client_documentation(self):
""" Opens the documentation in a new browser session. """
self._experiment_manager.show_documentation(None)
def client_sourcecode(self):
"""Opens the sourcecode in a new browser session.
"""
webbrowser.open('https://github.com/LabExT/LabExT')
def client_load_about(self):
"""Opens an About window.
"""
if try_to_lift_window(self.about_toplevel):
return
self.logger.debug('Client opens about window')
self.about_toplevel = Toplevel(self._root)
self.about_toplevel.attributes('-topmost', 'true')
about_window = Frame(self.about_toplevel)
about_window.grid(row=0, column=0)
font_title = font.Font(size=12, weight='bold')
font_normal = font.Font(size=10)
label_title = Label(
about_window, text='LabExT - Laboratory Experiment Tool')
label_title.configure(font=font_title)
label_title.grid(row=0, column=0)
label_description = Label(
about_window,
text=
'a laboratory experiment software environment for performing measurements and visualizing data\n' +
f'Copyright (C) {datetime.date.today().strftime("%Y"):s} ETH Zurich and Polariton Technologies AG\n'
'released under GPL v3, see LICENSE file'
)
label_description.configure(font=font_normal)
label_description.grid(row=1, column=0)
# authors are loaded form AUTHORS.md file
authors = get_author_list()
label_credits = Label(
about_window,
text='\n'.join(authors)
)
label_credits.configure(font=font_normal)
label_credits.grid(row=9, column=0, rowspan=6) | PypiClean |
/CodeChat_Server-0.2.18.tar.gz/CodeChat_Server-0.2.18/CodeChat_Server/render_manager.py | import asyncio
from enum import Enum
import logging
import json
from pathlib import Path
import sys
from typing import (
Any,
cast,
Callable,
Coroutine,
Dict,
Optional,
Union,
)
import urllib.parse
import xml.dom.minidom
# Third-party imports
# -------------------
from lxml import etree as ElementTree
import websockets
import websockets.server
# Local imports
# -------------
from .constants import WEBSOCKET_PORT
from .renderer import is_win, ProjectConfFile, render_file
# RenderManager / render thread
# ==============================
logger = logging.getLogger(__name__)
# .. _GetResultType Py:
#
# These must match the `constants in the client <GetResultType JS>`.
class GetResultType(Enum):
# A URL indicating that new rendered content is available.
url = 0
# A build output message.
build = 1
# Errors from the build.
errors = 2
# A command, such as ``shutdown```.
command = 3
# Utilities
# =========
def GetResultReturn(get_result_type: GetResultType, text: str):
return {"get_result_type": get_result_type.value, "text": text}
# Convert a path to a URI component: make it absolute and use forward (POSIX) slashes. If the provided ``file_path`` is falsey, just return it.
def path_to_uri(file_path: str):
return Path(file_path).resolve().as_posix() if file_path else file_path
# Store data for about each client. This is a combination of data about the editor/IDE client and the associated CodeChat Client.
class ClientState:
def __init__(self):
# A queue of messages for the CodeChat Client.
self.q: asyncio.Queue = asyncio.Queue()
# The remaining data in this class should only be accessed by the rendering thread.
#
# The most recent HTML and editor text after rendering the specified file_path.
self._html: Optional[str] = None
self._editor_text: Optional[str] = None
self._file_path: Optional[str] = None
# The path to the CodeChat project configuration file if this is a project; None otherwise.
self._project_path: Optional[str] = None
# A flag to indicate if this has been placed in the renderer's job queue.
self._in_job_q: bool = False
# A flag to indicate that this client has work to perform.
self._needs_processing: bool = True
# A bucket to hold text and the associated file to render.
self._to_render_editor_text: Optional[str] = None
self._to_render_file_path: Optional[str] = None
self._to_render_is_dirty: Optional[bool] = None
# A bucket to hold a sync request.
#
# The index into either the editor text or HTML converted to text.
self._to_sync_index: Optional[int] = None
self._to_sync_from_editor: Optional[int] = None
# The HTML converted to text.
self._html_as_text: Optional[str] = None
# Shutdown is tricky; see `this discussion <shut down an editor client>`.
#
# A flag to request the worker to delete this client.
self._is_deleting: bool = False
# Use the contents of the provided ClientState to perform a render.
async def render_client_state(cs: ClientState) -> None:
# Provide a coroutine used by converters to write build results.
def co_build(_str: str) -> Coroutine[Any, Any, None]:
return cs.q.put(
GetResultReturn(
GetResultType.build,
_str,
)
)
(
is_converted,
project_path,
rendered_file_path,
html,
err_string,
) = await render_file(
cast(str, cs._to_render_editor_text),
cast(str, cs._to_render_file_path),
Path(cs._file_path) if cs._file_path else None,
co_build,
cast(bool, cs._to_render_is_dirty),
)
if not is_converted:
return
cs._project_path = project_path
cs._file_path = rendered_file_path
cs._html = html
cs._editor_text = cs._to_render_editor_text
# Send any errors. An empty error string will clear any errors from a previous build, and should still be sent.
await cs.q.put(GetResultReturn(GetResultType.errors, err_string))
# Sending the HTML signals the end of this build.
#
# For Windows, make the path contain forward slashes.
uri = path_to_uri(cs._file_path)
# Encode this, for Windows paths which contain a colon (or unusual Linux paths).
await cs.q.put(GetResultReturn(GetResultType.url, urllib.parse.quote(uri)))
class RenderManager:
def __init__(self, shutdown_event):
self.shutdown_event = shutdown_event
self._is_shutdown = False
# Provide a way to perform thread-safe access of methods in this class.
def __getattr__(self, name: str) -> Callable:
if name.startswith("threadsafe_"):
# Strip off ``threadsafe`` and look for the function.
internal_func = self.__getattr__(name[11:])
# Invoke it as an async if needed.
async def async_wrap(*args, **kwargs):
return internal_func(*args, **kwargs)
# See if we need to wrap this in an async.
async_func = (
internal_func
if asyncio.iscoroutinefunction(internal_func)
else async_wrap
)
# Wrap the async func in a threadsafe call.
def threadsafe_async(*args, **kwargs):
future = asyncio.run_coroutine_threadsafe(
async_func(*args, **kwargs), self._loop
)
return future.result()
return threadsafe_async
# Not found. Let Python raise the exception for us.
return self.__getattribute__(name)
# Determine if the provided id exists and is not being deleted. Return the ClientState for the id if so; otherwise, return False.
def get_client_state(self, id: int) -> Union[bool, ClientState]:
cs = self._client_state_dict.get(id)
# Signal an error if this client doesn't exist or is being deleted; otherwise, return it.
return cs if cs and not cs._is_deleting else False
# Add the provided client to the job queue.
def _enqueue(self, id: int) -> None:
# Add to the job queue unless it's already there.
cs = self._client_state_dict[id]
cs._needs_processing = True
if not cs._in_job_q:
self._job_q.put_nowait(id)
cs._in_job_q = True
# Create a new client. Returns the client id on success or False on failure. The client may optionally provide an id for a new client.
def create_client(self, id: Optional[int] = None) -> int:
if self._is_shutdown:
return -1
if id is None:
self._last_id += 1
id = self._last_id
if id in self._client_state_dict:
# Indicate failure if this id exists.
return False
self._client_state_dict[id] = ClientState()
return id
# `<-- <Delete step 3.>` _`Delete step 4.` `--> <Delete step 5.>` _`delete_client`: Request a worker to delete this MultiClient.
def delete_client(self, id: int) -> bool:
cs = self.get_client_state(id)
if not cs:
return False
assert isinstance(cs, ClientState)
# Tell the worker to delete this. We can't simply delete it now, since it may be in the middle of a render. Allowing the worker to delete it ensures it's in a safe (unused) state for deletion.
self._enqueue(id)
# Prevent any new entries in the queue by setting this flag; see ``get_client_state``.
cs._is_deleting = True
return True
# Place the item in the render queue; must be called from another (non-render) thread. Returns True on success, or False if the provided id doesn't exist.
def start_render(
self, editor_text: str, file_path: str, id: int, is_dirty: bool
) -> bool:
cs = self.get_client_state(id)
if not cs:
# Signal an error for an invalid client id.
return False
assert isinstance(cs, ClientState)
# Add to the job queue.
self._enqueue(id)
# Update the job parameters.
cs._to_render_editor_text = editor_text
cs._to_render_file_path = file_path
cs._to_render_is_dirty = is_dirty
# Indicate success
return True
# Get a CodeChat Client's queue.
def get_queue(self, id: int) -> Optional[asyncio.Queue]:
cs = self.get_client_state(id)
return cast(ClientState, cs).q if cs else None
# Return the results of rendering the provided URL:
#
# - If the URL matches with the latest render, return the resulting HTML for a non-project render. Return ``None`` for a project render, indicating that the render was stored to disk and the URL is a path to the rendered file.
# - If there's no match to the URL or the ID doesn't exist, return False. Note that the "HTML" can be None, meaning
def get_render_results(self, id: int, url_path: str) -> Union[None, str, bool]:
cs = self.get_client_state(id)
return (
cast(ClientState, cs)._html
if cs
and path_to_uri(cast(str, cast(ClientState, cs)._file_path)) == url_path
else False
)
# Communicate with a CodeChat Client via a websocket.
async def websocket_handler(
self, websocket: websockets.server.WebSocketServerProtocol, path: str
):
# First, read this client's ID.
try:
data = await websocket.recv()
except websockets.exceptions.WebSocketException:
# Give up if there's a websocket error.
return
# Find the queue for this CodeChat Client.
try:
id_ = json.loads(data)
except json.decoder.JSONDecodeError:
id_ = f"<invalid id {repr(data)}>"
q = self.get_queue(id_)
if not q:
try:
await websocket.send(
json.dumps(
GetResultReturn(
GetResultType.command, f"error: unknown client {id_}."
)
)
)
except websockets.exceptions.WebSocketException:
# Ignore any errors here, since we're closing the socket anyway.
pass
return
# Start one task to get read results from the websocket.
read_websocket_handler = asyncio.create_task(
self.read_websocket_handler(websocket, id_)
)
# Send messages until shutdown. However, this function should typically never exit using this conditional; instead, the shutdown code below should break out of the loop.
q_task = asyncio.create_task(q.get())
socket_closed_task = asyncio.create_task(websocket.wait_closed())
while not self._is_shutdown:
done, pending = await asyncio.wait(
[q_task, socket_closed_task], return_when=asyncio.FIRST_COMPLETED
)
# If the socket was closed, wrap up.
if socket_closed_task in done:
# Stop waiting on the queue.
q_task.cancel()
break
# The usual case: we have data to send over the websocket.
if q_task in done:
ret = q_task.result()
# Prepare for the next run.
q_task = asyncio.create_task(q.get())
try:
await websocket.send(json.dumps(ret))
except websockets.exceptions.WebSocketException:
# An error occurred -- close the websocket. The client will open another, so we can try again.
return
# Delete the client if this was a shutdown command.
if (ret["get_result_type"] == GetResultType.command.value) and (
ret["text"] == "shutdown"
):
# `<-- <Delete step 2.>` _`Delete step 3.` `--> <Delete step 4.>` The MultiClient must be kept working until it sends the CodeChat Client a shutdown message. The message is sent, so we can now delete the MultiClient.
logger.info(f"Sent shutdown command to CodeChat Client id {id_}.")
# Check that the queue is empty
if not q.empty():
logger.warning(
f"CodeChat warning: CodeChat Client id {id_} shut down with pending commands."
)
# Request MultiClient deletion.
assert self.delete_client(id_)
# Shut down this websocket.
break
# Wait for the read to shut down.
if not read_websocket_handler.done():
await read_websocket_handler
logger.info(f"Websocket for CodeChat Client id {id_} exiting.")
# _`read_websocket_handler`: this responds to `messages sent by the CodeChat Client <messages sent by the CodeChat Client>`.
async def read_websocket_handler(
self, websocket: websockets.server.WebSocketServerProtocol, id_: int
):
while not self._is_shutdown:
try:
ret = await websocket.recv()
except websockets.exceptions.WebSocketException:
# If anything breaks, exit.
return
msg, data = json.loads(ret)
if msg == "save_file":
print(
f"Save to {data['xml_node']} values:\n{data['file_contents'][:77]}..."
)
# Get the location of the project file.
csd = self._client_state_dict[id_]
pp = csd._project_path
if not pp:
print("Unable to save: no project file available.")
continue
# Read the source path from it.
project_conf = ProjectConfFile(
Path(pp), Path(cast(str, csd._file_path))
)
# Find the source file which matches this mapping.
xml_id_to_replace = data["xml_node"]
for (
source_file,
xml_id_list,
) in project_conf.load_pretext_mapping().items():
if xml_id_to_replace in xml_id_list:
try:
# Load in this source.
src_tree = ElementTree.parse(source_file)
# Parse the replacement source for it.
new_node = ElementTree.fromstring(data["file_contents"])
except Exception as e:
print(f"Unable to load file or parse new source: {e}")
break
# Find the node in this source file and replace it. Find only looks at children, so manually check the root element.
xml_id_attrib = "{http://www.w3.org/XML/1998/namespace}id"
xml_node_to_replace = (
src_tree.getroot()
if src_tree.getroot().get(xml_id_attrib)
== xml_id_to_replace
else src_tree.find(
f"//*[@{xml_id_attrib} = '{xml_id_to_replace}']"
)
)
if xml_node_to_replace is None:
print(
f"Unable to save: can't find node {xml_node_to_replace} in {source_file}."
)
break
# The correct method to replace this node is either ``_setroot`` (this is is the root element) or ``replace`` (otherwise).
parent = xml_node_to_replace.getparent()
if parent is None:
src_tree._setroot(new_node)
else:
parent.replace(xml_node_to_replace, new_node)
# Save the updated file. I can't find a way using lxml (including looking at the `docinfo <https://lxml.de/api/lxml.etree.DocInfo-class.html>`_ class) to determine if the loaded document contained an XML declaration (the ``<?xml version="1.0" encoding="UTF-8"?>`` header) or not. The following comes from `SO <https://stackoverflow.com/a/54942605/16038919>`__.
has_xml_declaration = bool(
xml.dom.minidom.parse(source_file).version
)
src_tree.write(
source_file,
encoding="utf-8",
xml_declaration=has_xml_declaration,
)
print(f"Saved to {source_file}.")
break
else:
print(
f"Unable to write: can't find source file containing {xml_id_to_replace}."
)
elif msg == "navigate_to_error":
# TODO
print(
f"TODO: navigate to error on line {data['line']} of file {data['file_path']}."
)
elif msg == "browser_navigation":
# TODO
print(f"TODO: browser navigation to {data['pathname']}.")
# Translate the path from a URL to a Path, removing the expected prefix of ``/client/id/``.
pathname_raw = urllib.parse.unquote(data["pathname"])
expected_pathname_prefix = f"/client/{id_}/"
assert pathname_raw.startswith(expected_pathname_prefix)
pathname = Path(pathname_raw[len(expected_pathname_prefix) :])
# Update the HTML path
cs = self._client_state_dict[id_]
cs._file_path = str(pathname)
# Get the location of the project file. TODO
pp = cs._project_path
if pp:
# For projects, we need to find the source file that maps to this HTML pathname.
pass
else:
print(f"Error: unknown message {msg} with data '{data}'.")
# `<-- <Delete step 1.>` _`Delete step 2.` `--> <Delete step 3.>` Begin the MultiClient shutdown process by sending a shutdown message to the CodeChat Client.
async def shutdown_client(self, id: int) -> bool:
q = self.get_queue(id)
# Fail if the ID is unknown.
if not q:
return False
# Send the shutdown command to the CodeChat Client.
await q.put(GetResultReturn(GetResultType.command, "shutdown"))
# In case the CodeChat Client is dead, shut down after a delay.
asyncio.create_task(self._delete_client_later(id))
# Indicate success.
return True
# Delete a CodeChat Client after a delay.
async def _delete_client_later(self, id: int):
await asyncio.sleep(1)
if self.delete_client(id):
logger.warning(f"CodeChat Client {id} not responding -- deleted it.")
# Shut down the render manager, called from another thread.
def threadsafe_shutdown(self):
# If the shutdown is in progress, don't do it again.
if self._is_shutdown:
return
# We can't wait for a result, since this causes the asyncio event loop to exit, but the result must be retrieved from a Future running within the event loop. Therefore, call without waiting.
self._loop.call_soon_threadsafe(asyncio.create_task, self.shutdown())
# Shut down the render manager.
async def shutdown(self):
logger.info("Render manager shutting down...")
assert not self._is_shutdown
self._is_shutdown = True
# Request a shutdown for each MultiClient.
for id in self._client_state_dict.keys():
# The await doesn't mean the shut down is complete, but only that the request was made.
await self.shutdown_client(id)
# Wait for all MultiClients to shut down. Special case: if the server never created a MultiClient, then skip this.
logger.info("Waiting for client shutdown...")
if len(self._client_state_dict):
await self._MultiClients_deleted.wait()
# Shut down the websocket, since only the MultiClient can use it.
logger.info("Waiting for websocket server to close...")
self.websocket_server.close()
await self.websocket_server.wait_closed()
# Shut the workers down now that the MultiClients have shut down.
logger.info("Shutting down workers...")
for i in range(self._num_workers):
await self._job_q.put(None)
# Start the render manager.
def run(self, *args, debug: bool = True, **kwargs) -> None:
# The default Windows event loop doesn't support asyncio subprocesses.
if is_win:
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
try:
asyncio.run(self._run(*args, **kwargs), debug=debug)
except Exception:
# If something goes wrong, don't try a clean shutdown of this thread, since the event loop already died. This must precede the event below, so that when the main thread tries calling ``threadsafe_shutdown``, this flag is already set.
self._is_shutdown = True
# Shut the server down instead of hanging.
self.shutdown_event.set()
raise
logger.info("Render manager is shut down.")
# Run the rendering thread with the given number of workers.
async def _run(self, server_host, num_workers: int = 1) -> None:
self._num_workers = num_workers
# Create a queue of jobs for the renderer to process. This must be created from within the main loop to avoid ``got Future <Future pending> attached to a different loop`` errors.
self._job_q: asyncio.Queue = asyncio.Queue()
# Keep a dict of id: ClientState for each client.
self._client_state_dict: Dict[int, ClientState] = {}
# The next ID will be 0.
self._last_id = -1
self._loop = asyncio.get_running_loop()
self._is_shutdown = False
self._MultiClients_deleted = asyncio.Event()
self.websocket_server = await websockets.serve( # type:ignore
self.websocket_handler, server_host, WEBSOCKET_PORT
)
# _`CODECHAT_READY`: let the user know that the server is now ready -- this is the last piece of it to start.
#
# NOTE: The ``CodeChat_Server start`` CLI command reads this line, then quits. This means (on Windows at least) that all future ``print`` statements will block, preventing the server from shutting down. Outputing info to the logger avoids this problem. Therefore, **do not include print statements after this point in the code**.
print("The CodeChat Server is ready.\nCODECHAT_READY", file=sys.stderr)
# Flush this since extension and test code waits for it before connecting to the server/running the rest of a test.
sys.stderr.flush()
await asyncio.gather(*[self._worker(i) for i in range(num_workers)])
# Process items in the render queue.
async def _worker(self, worker_index: int) -> None:
while True:
# Get an item to process.
id = await self._job_q.get()
# Check for shutdown.
if id is None:
logger.info(f"Render worker {worker_index} is shut down.")
break
cs = self._client_state_dict[id]
assert cs._in_job_q
# Every item in the queue should have some work to do.
assert cs._needs_processing
# Indicate that the current jobs in this ClientState will all be completed.
cs._needs_processing = False
# If the client should be deleted, ignore all other requests.
if cs._is_deleting:
# `<-- <Delete step 4.>` _`Delete step 5.` Now, the deletion can safely proceed. Done.
del self._client_state_dict[id]
# If there are no more MultiClients, shut down and signal that shutdown can proceed.
if len(self._client_state_dict) == 0:
self.shutdown_event.set()
# Indicate that all MultiClients have been deleted; shutdown can now proceed.
assert not self._MultiClients_deleted.is_set()
self._MultiClients_deleted.set()
else:
# Sync first.
# TODO: sync.
# Render next.
await render_client_state(cs)
# If this client received more work to do while working on the current job, add it back to the queue -- it can't safely be added to the queue while the job is in process. Otherwise, we would potentially allow two workers to render the same job in parallel, which would confuse the renderer.
if cs._needs_processing:
self._job_q.put_nowait(id)
else:
cs._in_job_q = False | PypiClean |
/BigJob2-0.54.post73.tar.gz/BigJob2-0.54.post73/examples/pilot-api/example-pilot-api-decentral.py | import sys
import os
import time
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
sys.path.insert(0, os.getcwd() + "/../")
from pilot import PilotComputeService, ComputeDataServiceDecentral, State
COORDINATION_URL = "redis://localhost:6379"
if __name__ == "__main__":
pilot_compute_service = PilotComputeService(coordination_url=COORDINATION_URL)
# create pilot job service and initiate a pilot job
pilot_compute_description = {
"service_url": 'fork://localhost',
"number_of_processes": 1,
"working_directory": os.path.join(os.getcwd(),"work"),
'affinity_datacenter_label': "eu-de-south",
'affinity_machine_label': "mymachine"
}
pilotjob = pilot_compute_service.create_pilot(pilot_compute_description=pilot_compute_description)
pilotjob2 = pilot_compute_service.create_pilot(pilot_compute_description=pilot_compute_description)
compute_data_service = ComputeDataServiceDecentral()
compute_data_service.add_pilot_compute_service(pilot_compute_service)
# start work unit
compute_unit_description = {
"executable": "/bin/sleep",
"arguments": ["0"],
"number_of_processes": 1,
"output": "stdout.txt",
"error": "stderr.txt",
}
for i in range(0,14):
compute_unit = compute_data_service.submit_compute_unit(compute_unit_description)
print("Finished setup. Waiting for scheduling of CU")
compute_data_service.wait()
while compute_unit != State.Done:
print("Final state check...")
state_cu = compute_unit.get_state()
print "PCS State %s" % pilot_compute_service
print "CU: %s State: %s"%(compute_unit, state_cu)
if state_cu==State.Done:
break
time.sleep(2)
print("Terminate Pilot Compute and Compute Data Service")
compute_data_service.cancel()
pilot_compute_service.cancel() | PypiClean |
/Caroline-presentation-0.2.4.tar.gz/Caroline-presentation-0.2.4/caroline/html_dist/js/mathjax/input/tex/extensions/unicode.js | !function(a){var n={};function o(t){if(n[t])return n[t].exports;var e=n[t]={i:t,l:!1,exports:{}};return a[t].call(e.exports,e,e.exports,o),e.l=!0,e.exports}o.m=a,o.c=n,o.d=function(t,e,a){o.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:a})},o.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},o.t=function(e,t){if(1&t&&(e=o(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var a=Object.create(null);if(o.r(a),Object.defineProperty(a,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var n in e)o.d(a,n,function(t){return e[t]}.bind(null,n));return a},o.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return o.d(e,"a",e),e},o.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},o.p="",o(o.s=0)}([function(t,e,a){"use strict";a(1)},function(t,e,a){"use strict";var n=a(2),o=function(t){{if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}}(a(3));(0,n.combineWithMathJax)({_:{input:{tex:{unicode:{UnicodeConfiguration:o}}}}})},function(t,e,a){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.combineConfig=MathJax._.components.global.combineConfig,e.combineDefaults=MathJax._.components.global.combineDefaults,e.combineWithMathJax=MathJax._.components.global.combineWithMathJax,e.MathJax=MathJax._.components.global.MathJax},function(t,e,a){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=a(4),p=a(5),o=a(6),d=a(7),s=a(8),f=a(9);e.UnicodeMethods={};var M={};e.UnicodeMethods.Unicode=function(t,e){var a=t.GetBrackets(e),n=null,o=null;a&&(o=a.replace(/ /g,"").match(/^(\d+(\.\d*)?|\.\d+),(\d+(\.\d*)?|\.\d+)$/)?(n=a.replace(/ /g,"").split(/,/),t.GetBrackets(e)):a);var i=d.default.trimSpaces(t.GetArgument(e)).replace(/^0x/,"x");if(!i.match(/^(x[0-9A-Fa-f]+|[0-9]+)$/))throw new p.default("BadUnicode","Argument to \\unicode must be a number");var r=parseInt(i.match(/^x/)?"0"+i:i);M[r]?o=o||M[r][2]:M[r]=[800,200,o,r],n&&(M[r][0]=Math.floor(1e3*parseFloat(n[0])),M[r][1]=Math.floor(1e3*parseFloat(n[1])));var u=t.stack.env.font,l={};o?(M[r][2]=l.fontfamily=o.replace(/'/g,"'"),u&&(u.match(/bold/)&&(l.fontweight="bold"),u.match(/italic|-mathit/)&&(l.fontstyle="italic"))):u&&(l.mathvariant=u);var c=t.create("token","mtext",l,f.numeric(i));s.default.setProperty(c,"unicode",!0),t.Push(c)},new o.CommandMap("unicode",{unicode:"Unicode"},e.UnicodeMethods),e.UnicodeConfiguration=n.Configuration.create("unicode",{handler:{macro:["unicode"]}})},function(t,e,a){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.Configuration=MathJax._.input.tex.Configuration.Configuration,e.ConfigurationHandler=MathJax._.input.tex.Configuration.ConfigurationHandler},function(t,e,a){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.default=MathJax._.input.tex.TexError.default},function(t,e,a){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.AbstractSymbolMap=MathJax._.input.tex.SymbolMap.AbstractSymbolMap,e.RegExpMap=MathJax._.input.tex.SymbolMap.RegExpMap,e.AbstractParseMap=MathJax._.input.tex.SymbolMap.AbstractParseMap,e.CharacterMap=MathJax._.input.tex.SymbolMap.CharacterMap,e.DelimiterMap=MathJax._.input.tex.SymbolMap.DelimiterMap,e.MacroMap=MathJax._.input.tex.SymbolMap.MacroMap,e.CommandMap=MathJax._.input.tex.SymbolMap.CommandMap,e.EnvironmentMap=MathJax._.input.tex.SymbolMap.EnvironmentMap},function(t,e,a){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.default=MathJax._.input.tex.ParseUtil.default},function(t,e,a){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.default=MathJax._.input.tex.NodeUtil.default},function(t,e,a){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.options=MathJax._.util.Entities.options,e.entities=MathJax._.util.Entities.entities,e.add=MathJax._.util.Entities.add,e.remove=MathJax._.util.Entities.remove,e.translate=MathJax._.util.Entities.translate,e.numeric=MathJax._.util.Entities.numeric}]); | PypiClean |
/KingSnake-2.0.0.tar.gz/KingSnake-2.0.0/bin/KingSnake.py | import pickle
import sys
import time
from king_snake.player import Player
from king_snake.chessboard import Chessboard
from king_snake.errors import ChessError
class ChessGame(object):
"""A chess game manager"""
def __init__(self):
self.white, self.black = Player(), Player()
self.chessboard = Chessboard()
self.chessboard.add_players(self.white, self.black)
self.greet()
def greet(self):
"""Greet the players and begin game."""
print("Welcome to KingSnake.\n"
"Enter moves directly or press Enter to enter the menu.")
self.get_move()
def get_move(self, message=""):
"""Show board and prompt for current move, then execute it."""
valid_move = False
while not valid_move:
self.show(message)
move = raw_input("Please enter your move (e.g. E2 E4) or enter to "
"access the menu: ")
if not move:
self.menu()
else:
try:
start_position, end_position = move.split()
self.chessboard.current_player.move(start_position.upper(),
end_position.upper())
valid_move = True
except ValueError:
message = ("Please enter your move in the following form:"
"\n{start position} {end position}\n"
"Positions are notated using their letter "
"followed by their number.\n"
"Example valid move request to move from A1 to "
"A2: 'A1 A2'")
except KeyError:
message = "Only valid fields are allowed."
except ChessError as error:
message = error
self.get_move(message)
def menu(self):
"""Allow user to do something other than move pieces"""
def quit_game():
"""Quit game."""
sys.exit()
def resign():
"""Resign."""
print("{} resigns. {} is the winner!".format(
self.chessboard.current_player.color.capitalize(),
self.chessboard.current_player.opponent.color.capitalize()))
time.sleep(10)
quit_game()
def restart():
"""Start new game."""
self.__init__()
def save_game():
"""Save game to file."""
file_name = raw_input("What file would you like to save to?: ")
try:
with open(file_name, "w") as saved_game:
pickle.dump(self.chessboard, saved_game)
except IOError:
self.get_move("The file you have chosen is invalid. "
"Please enter a valid filename.")
self.get_move("Game saved to {}.".format(file_name))
def load_game():
"""Load game from file."""
file_name = raw_input("What file would you like to load from?: ")
try:
with open(file_name) as saved_game:
self.chessboard = pickle.load(saved_game)
self.white = self.chessboard.players["white"]
self.black = self.chessboard.players["black"]
except IOError:
self.get_move("The file you have chosen is invalid. "
"Please enter a valid filename.")
def undo_turn():
"""Undo turn."""
self.chessboard.rollback()
self.get_move("Move restored.")
def return_to_game():
"""Resume play."""
self.get_move()
menu_choices = []
for function in (quit_game, resign, restart, save_game, load_game,
undo_turn, return_to_game):
menu_choices.append((function, function.__doc__))
valid_choice = False
while not valid_choice:
for number, choice in enumerate(menu_choices):
print("{}. {}".format(number + 1, choice[1]))
try:
decision = int(raw_input("What would you like to do?: ")) - 1
try:
valid_choice = True
menu_choices[decision][0]()
except IndexError:
valid_choice = False
print("Please enter a valid menu number.")
except (ValueError, IndexError):
print("Please enter a valid menu number.")
def show(self, message=""):
"""Show chessboard and print current player."""
print(self.chessboard)
print("{}\n"
"It's {}'s turn.".format(message,
self.chessboard.current_player.color))
game = ChessGame() | PypiClean |
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_2D/airfoil_inviscid.py | from aerosandbox.common import *
from aerosandbox.geometry import Airfoil
from aerosandbox.performance import OperatingPoint
from aerosandbox.aerodynamics.aero_2D.singularities import calculate_induced_velocity_line_singularities
import aerosandbox.numpy as np
from typing import Union, List, Optional
class AirfoilInviscid(ImplicitAnalysis):
"""
An implicit analysis for inviscid analysis of an airfoil (or family of airfoils).
Key outputs:
* AirfoilInviscid.Cl
"""
@ImplicitAnalysis.initialize
def __init__(self,
airfoil: Union[Airfoil, List[Airfoil]],
op_point: OperatingPoint,
ground_effect: bool = False,
):
if isinstance(airfoil, Airfoil):
self.airfoils = [airfoil]
else:
self.airfoils = airfoil
self.op_point = op_point
self.ground_effect = ground_effect
self._setup_unknowns()
self._enforce_governing_equations()
self._calculate_forces()
def __repr__(self):
return self.__class__.__name__ + "(\n\t" + "\n\t".join([
f"airfoils={self.airfoils}",
f"op_point={self.op_point}",
]) + "\n)"
def _setup_unknowns(self):
for airfoil in self.airfoils:
airfoil.gamma = self.opti.variable(
init_guess=0,
scale=self.op_point.velocity,
n_vars=airfoil.n_points()
)
airfoil.sigma = np.zeros(airfoil.n_points())
def calculate_velocity(self,
x_field,
y_field,
) -> [np.ndarray, np.ndarray]:
### Analyze the freestream
u_freestream = self.op_point.velocity * np.cosd(self.op_point.alpha)
v_freestream = self.op_point.velocity * np.sind(self.op_point.alpha)
u_field = u_freestream
v_field = v_freestream
for airfoil in self.airfoils:
### Add in the influence of the vortices and sources on the airfoil surface
u_field_induced, v_field_induced = calculate_induced_velocity_line_singularities(
x_field=x_field,
y_field=y_field,
x_panels=airfoil.x(),
y_panels=airfoil.y(),
gamma=airfoil.gamma,
sigma=airfoil.sigma,
)
u_field = u_field + u_field_induced
v_field = v_field + v_field_induced
### Add in the influence of a source across the open trailing-edge panel.
if airfoil.TE_thickness() != 0:
u_field_induced_TE, v_field_induced_TE = calculate_induced_velocity_line_singularities(
x_field=x_field,
y_field=y_field,
x_panels=[airfoil.x()[0], airfoil.x()[-1]],
y_panels=[airfoil.y()[0], airfoil.y()[-1]],
gamma=[0, 0],
sigma=[airfoil.gamma[0], airfoil.gamma[0]]
)
u_field = u_field + u_field_induced_TE
v_field = v_field + v_field_induced_TE
if self.ground_effect:
### Add in the influence of the vortices and sources on the airfoil surface
u_field_induced, v_field_induced = calculate_induced_velocity_line_singularities(
x_field=x_field,
y_field=y_field,
x_panels=airfoil.x(),
y_panels=-airfoil.y(),
gamma=-airfoil.gamma,
sigma=airfoil.sigma,
)
u_field = u_field + u_field_induced
v_field = v_field + v_field_induced
### Add in the influence of a source across the open trailing-edge panel.
if airfoil.TE_thickness() != 0:
u_field_induced_TE, v_field_induced_TE = calculate_induced_velocity_line_singularities(
x_field=x_field,
y_field=y_field,
x_panels=[airfoil.x()[0], airfoil.x()[-1]],
y_panels=-1 * np.array([airfoil.y()[0], airfoil.y()[-1]]),
gamma=[0, 0],
sigma=[airfoil.gamma[0], airfoil.gamma[0]]
)
u_field = u_field + u_field_induced_TE
v_field = v_field + v_field_induced_TE
return u_field, v_field
def _enforce_governing_equations(self):
for airfoil in self.airfoils:
### Compute normal velocities at the middle of each panel
x_midpoints = np.trapz(airfoil.x())
y_midpoints = np.trapz(airfoil.y())
u_midpoints, v_midpoints = self.calculate_velocity(
x_field=x_midpoints,
y_field=y_midpoints,
)
panel_dx = np.diff(airfoil.x())
panel_dy = np.diff(airfoil.y())
panel_length = (panel_dx ** 2 + panel_dy ** 2) ** 0.5
xp_hat_x = panel_dx / panel_length # x-coordinate of the xp_hat vector
xp_hat_y = panel_dy / panel_length # y-coordinate of the yp_hat vector
yp_hat_x = -xp_hat_y
yp_hat_y = xp_hat_x
normal_velocities = u_midpoints * yp_hat_x + v_midpoints * yp_hat_y
### Add in flow tangency constraint
self.opti.subject_to(normal_velocities == 0)
### Add in Kutta condition
self.opti.subject_to(airfoil.gamma[0] + airfoil.gamma[-1] == 0)
def _calculate_forces(self):
for airfoil in self.airfoils:
panel_dx = np.diff(airfoil.x())
panel_dy = np.diff(airfoil.y())
panel_length = (panel_dx ** 2 + panel_dy ** 2) ** 0.5
### Sum up the vorticity on this airfoil by integrating
airfoil.vorticity = np.sum(
(airfoil.gamma[1:] + airfoil.gamma[:-1]) / 2 *
panel_length
)
airfoil.Cl = 2 * airfoil.vorticity # TODO normalize by chord and freestream velocity etc.
self.total_vorticity = sum([airfoil.vorticity for airfoil in self.airfoils])
self.Cl = 2 * self.total_vorticity
def draw_streamlines(self, res=200, show=True):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
plt.xlim(-0.5, 1.5)
plt.ylim(-0.5, 0.5)
xrng = np.diff(np.array(ax.get_xlim()))
yrng = np.diff(np.array(ax.get_ylim()))
x = np.linspace(*ax.get_xlim(), int(np.round(res * xrng / yrng)))
y = np.linspace(*ax.get_ylim(), res)
X, Y = np.meshgrid(x, y)
shape = X.shape
X = X.flatten()
Y = Y.flatten()
U, V = self.calculate_velocity(X, Y)
X = X.reshape(shape)
Y = Y.reshape(shape)
U = U.reshape(shape)
V = V.reshape(shape)
# NaN out any points inside the airfoil
for airfoil in self.airfoils:
contains = airfoil.contains_points(X, Y)
U[contains] = np.nan
V[contains] = np.nan
speed = (U ** 2 + V ** 2) ** 0.5
Cp = 1 - speed ** 2
### Draw the airfoils
for airfoil in self.airfoils:
plt.fill(airfoil.x(), airfoil.y(), "k", linewidth=0, zorder=4)
plt.streamplot(
x,
y,
U,
V,
color=speed,
density=2.5,
arrowsize=0,
cmap=plt.get_cmap('coolwarm_r'),
)
CB = plt.colorbar(
orientation="horizontal",
shrink=0.8,
aspect=40,
)
CB.set_label(r"Relative Airspeed ($U/U_\infty$)")
plt.clim(0.6, 1.4)
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel(r"$x/c$")
plt.ylabel(r"$y/c$")
plt.title(rf"Inviscid Airfoil: Flow Field")
plt.tight_layout()
if show:
plt.show()
def draw_cp(self, show=True):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
for airfoil in self.airfoils:
surface_speeds = airfoil.gamma
C_p = 1 - surface_speeds ** 2
plt.plot(airfoil.x(), C_p)
plt.ylim(-4, 1.1)
plt.gca().invert_yaxis()
plt.xlabel(r"$x/c$")
plt.ylabel(r"$C_p$")
plt.title(r"$C_p$ on Surface")
plt.tight_layout()
if show:
plt.show()
if __name__ == '__main__':
a = AirfoilInviscid(
airfoil=[
# Airfoil("naca4408")
# .repanel(50)
Airfoil("e423")
.repanel(n_points_per_side=50),
Airfoil("naca6408")
.repanel(n_points_per_side=50)
.scale(0.4, 0.4)
.rotate(np.radians(-25))
.translate(0.9, -0.05),
],
op_point=OperatingPoint(
velocity=1,
alpha=5,
)
)
a.draw_streamlines()
a.draw_cp()
from aerosandbox import Opti
opti2 = Opti()
b = AirfoilInviscid(
airfoil=Airfoil("naca4408"),
op_point=OperatingPoint(
velocity=1,
alpha=5
),
opti=opti2
) | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/highlight/_base.js | if(!dojo._hasResource["dojox.highlight._base"]){
dojo._hasResource["dojox.highlight._base"]=true;
dojo.provide("dojox.highlight._base");
(function(){
var dh=dojox.highlight,_1="\\b(0x[A-Za-z0-9]+|\\d+(\\.\\d+)?)";
dh.constants={IDENT_RE:"[a-zA-Z][a-zA-Z0-9_]*",UNDERSCORE_IDENT_RE:"[a-zA-Z_][a-zA-Z0-9_]*",NUMBER_RE:"\\b\\d+(\\.\\d+)?",C_NUMBER_RE:_1,APOS_STRING_MODE:{className:"string",begin:"'",end:"'",illegal:"\\n",contains:["escape"],relevance:0},QUOTE_STRING_MODE:{className:"string",begin:"\"",end:"\"",illegal:"\\n",contains:["escape"],relevance:0},BACKSLASH_ESCAPE:{className:"escape",begin:"\\\\.",end:"^",relevance:0},C_LINE_COMMENT_MODE:{className:"comment",begin:"//",end:"$",relevance:0},C_BLOCK_COMMENT_MODE:{className:"comment",begin:"/\\*",end:"\\*/"},HASH_COMMENT_MODE:{className:"comment",begin:"#",end:"$"},C_NUMBER_MODE:{className:"number",begin:_1,end:"^",relevance:0}};
function _2(_3){
return _3.replace(/&/gm,"&").replace(/</gm,"<").replace(/>/gm,">");
};
function _4(_5){
return dojo.every(_5.childNodes,function(_6){
return _6.nodeType==3||String(_6.nodeName).toLowerCase()=="br";
});
};
function _7(_8){
var _9=[];
dojo.forEach(_8.childNodes,function(_a){
if(_a.nodeType==3){
_9.push(_a.nodeValue);
}else{
if(String(_a.nodeName).toLowerCase()=="br"){
_9.push("\n");
}else{
throw "Complex markup";
}
}
});
return _9.join("");
};
function _b(_c){
if(!_c.keywordGroups){
for(var _d in _c.keywords){
var kw=_c.keywords[_d];
if(kw instanceof Object){
_c.keywordGroups=_c.keywords;
}else{
_c.keywordGroups={keyword:_c.keywords};
}
break;
}
}
};
function _e(_f){
if(_f.defaultMode&&_f.modes){
_b(_f.defaultMode);
dojo.forEach(_f.modes,_b);
}
};
var _10=function(_11,_12){
this.langName=_11;
this.lang=dh.languages[_11];
this.modes=[this.lang.defaultMode];
this.relevance=0;
this.keywordCount=0;
this.result=[];
if(!this.lang.defaultMode.illegalRe){
this.buildRes();
_e(this.lang);
}
try{
this.highlight(_12);
this.result=this.result.join("");
}
catch(e){
if(e=="Illegal"){
this.relevance=0;
this.keywordCount=0;
this.partialResult=this.result.join("");
this.result=_2(_12);
}else{
throw e;
}
}
};
dojo.extend(_10,{buildRes:function(){
dojo.forEach(this.lang.modes,function(_13){
if(_13.begin){
_13.beginRe=this.langRe("^"+_13.begin);
}
if(_13.end){
_13.endRe=this.langRe("^"+_13.end);
}
if(_13.illegal){
_13.illegalRe=this.langRe("^(?:"+_13.illegal+")");
}
},this);
this.lang.defaultMode.illegalRe=this.langRe("^(?:"+this.lang.defaultMode.illegal+")");
},subMode:function(_14){
var _15=this.modes[this.modes.length-1].contains;
if(_15){
var _16=this.lang.modes;
for(var i=0;i<_15.length;++i){
var _17=_15[i];
for(var j=0;j<_16.length;++j){
var _18=_16[j];
if(_18.className==_17&&_18.beginRe.test(_14)){
return _18;
}
}
}
}
return null;
},endOfMode:function(_19){
for(var i=this.modes.length-1;i>=0;--i){
var _1a=this.modes[i];
if(_1a.end&&_1a.endRe.test(_19)){
return this.modes.length-i;
}
if(!_1a.endsWithParent){
break;
}
}
return 0;
},isIllegal:function(_1b){
var _1c=this.modes[this.modes.length-1].illegalRe;
return _1c&&_1c.test(_1b);
},langRe:function(_1d,_1e){
var _1f="m"+(this.lang.case_insensitive?"i":"")+(_1e?"g":"");
return new RegExp(_1d,_1f);
},buildTerminators:function(){
var _20=this.modes[this.modes.length-1],_21={};
if(_20.contains){
dojo.forEach(this.lang.modes,function(_22){
if(dojo.indexOf(_20.contains,_22.className)>=0){
_21[_22.begin]=1;
}
});
}
for(var i=this.modes.length-1;i>=0;--i){
var m=this.modes[i];
if(m.end){
_21[m.end]=1;
}
if(!m.endsWithParent){
break;
}
}
if(_20.illegal){
_21[_20.illegal]=1;
}
var t=[];
for(i in _21){
t.push(i);
}
_20.terminatorsRe=this.langRe("("+t.join("|")+")");
},eatModeChunk:function(_23,_24){
var _25=this.modes[this.modes.length-1];
if(!_25.terminatorsRe){
this.buildTerminators();
}
_23=_23.substr(_24);
var _26=_25.terminatorsRe.exec(_23);
if(!_26){
return {buffer:_23,lexeme:"",end:true};
}
return {buffer:_26.index?_23.substr(0,_26.index):"",lexeme:_26[0],end:false};
},keywordMatch:function(_27,_28){
var _29=_28[0];
if(this.lang.case_insensitive){
_29=_29.toLowerCase();
}
for(var _2a in _27.keywordGroups){
if(_29 in _27.keywordGroups[_2a]){
return _2a;
}
}
return "";
},buildLexemes:function(_2b){
var _2c={};
dojo.forEach(_2b.lexems,function(_2d){
_2c[_2d]=1;
});
var t=[];
for(var i in _2c){
t.push(i);
}
_2b.lexemsRe=this.langRe("("+t.join("|")+")",true);
},processKeywords:function(_2e){
var _2f=this.modes[this.modes.length-1];
if(!_2f.keywords||!_2f.lexems){
return _2(_2e);
}
if(!_2f.lexemsRe){
this.buildLexemes(_2f);
}
_2f.lexemsRe.lastIndex=0;
var _30=[],_31=0,_32=_2f.lexemsRe.exec(_2e);
while(_32){
_30.push(_2(_2e.substr(_31,_32.index-_31)));
var _33=this.keywordMatch(_2f,_32);
if(_33){
++this.keywordCount;
_30.push("<span class=\""+_33+"\">"+_2(_32[0])+"</span>");
}else{
_30.push(_2(_32[0]));
}
_31=_2f.lexemsRe.lastIndex;
_32=_2f.lexemsRe.exec(_2e);
}
_30.push(_2(_2e.substr(_31,_2e.length-_31)));
return _30.join("");
},processModeInfo:function(_34,_35,end){
var _36=this.modes[this.modes.length-1];
if(end){
this.result.push(this.processKeywords(_36.buffer+_34));
return;
}
if(this.isIllegal(_35)){
throw "Illegal";
}
var _37=this.subMode(_35);
if(_37){
_36.buffer+=_34;
this.result.push(this.processKeywords(_36.buffer));
if(_37.excludeBegin){
this.result.push(_35+"<span class=\""+_37.className+"\">");
_37.buffer="";
}else{
this.result.push("<span class=\""+_37.className+"\">");
_37.buffer=_35;
}
this.modes.push(_37);
this.relevance+=typeof _37.relevance=="number"?_37.relevance:1;
return;
}
var _38=this.endOfMode(_35);
if(_38){
_36.buffer+=_34;
if(_36.excludeEnd){
this.result.push(this.processKeywords(_36.buffer)+"</span>"+_35);
}else{
this.result.push(this.processKeywords(_36.buffer+_35)+"</span>");
}
while(_38>1){
this.result.push("</span>");
--_38;
this.modes.pop();
}
this.modes.pop();
this.modes[this.modes.length-1].buffer="";
return;
}
},highlight:function(_39){
var _3a=0;
this.lang.defaultMode.buffer="";
do{
var _3b=this.eatModeChunk(_39,_3a);
this.processModeInfo(_3b.buffer,_3b.lexeme,_3b.end);
_3a+=_3b.buffer.length+_3b.lexeme.length;
}while(!_3b.end);
if(this.modes.length>1){
throw "Illegal";
}
}});
function _3c(_3d,_3e,_3f){
if(String(_3d.tagName).toLowerCase()=="code"&&String(_3d.parentNode.tagName).toLowerCase()=="pre"){
var _40=document.createElement("div"),_41=_3d.parentNode.parentNode;
_40.innerHTML="<pre><code class=\""+_3e+"\">"+_3f+"</code></pre>";
_41.replaceChild(_40.firstChild,_3d.parentNode);
}else{
_3d.className=_3e;
_3d.innerHTML=_3f;
}
};
function _42(_43,str){
var _44=new _10(_43,str);
return {result:_44.result,langName:_43,partialResult:_44.partialResult};
};
function _45(_46,_47){
var _48=_42(_47,_7(_46));
_3c(_46,_46.className,_48.result);
};
function _49(str){
var _4a="",_4b="",_4c=2,_4d=str;
for(var key in dh.languages){
if(!dh.languages[key].defaultMode){
continue;
}
var _4e=new _10(key,_4d),_4f=_4e.keywordCount+_4e.relevance,_50=0;
if(!_4a||_4f>_50){
_50=_4f;
_4a=_4e.result;
_4b=_4e.langName;
}
}
return {result:_4a,langName:_4b};
};
function _51(_52){
var _53=_49(_7(_52));
if(_53.result){
_3c(_52,_53.langName,_53.result);
}
};
dojox.highlight.processString=function(str,_54){
return _54?_42(_54,str):_49(str);
};
dojox.highlight.init=function(_55){
_55=dojo.byId(_55);
if(dojo.hasClass(_55,"no-highlight")){
return;
}
if(!_4(_55)){
return;
}
var _56=_55.className.split(/\s+/),_57=dojo.some(_56,function(_58){
if(_58.charAt(0)!="_"&&dh.languages[_58]){
_45(_55,_58);
return true;
}
return false;
});
if(!_57){
_51(_55);
}
};
dh.Code=function(p,n){
dh.init(n);
};
})();
} | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/@types/node/ts4.8/index.d.ts |
// Reference required types from the default lib:
/// <reference lib="es2020" />
/// <reference lib="esnext.asynciterable" />
/// <reference lib="esnext.intl" />
/// <reference lib="esnext.bigint" />
// Base definitions for all NodeJS modules that are not specific to any version of TypeScript:
/// <reference path="assert.d.ts" />
/// <reference path="assert/strict.d.ts" />
/// <reference path="globals.d.ts" />
/// <reference path="async_hooks.d.ts" />
/// <reference path="buffer.d.ts" />
/// <reference path="child_process.d.ts" />
/// <reference path="cluster.d.ts" />
/// <reference path="console.d.ts" />
/// <reference path="constants.d.ts" />
/// <reference path="crypto.d.ts" />
/// <reference path="dgram.d.ts" />
/// <reference path="diagnostics_channel.d.ts" />
/// <reference path="dns.d.ts" />
/// <reference path="dns/promises.d.ts" />
/// <reference path="dns/promises.d.ts" />
/// <reference path="domain.d.ts" />
/// <reference path="dom-events.d.ts" />
/// <reference path="events.d.ts" />
/// <reference path="fs.d.ts" />
/// <reference path="fs/promises.d.ts" />
/// <reference path="http.d.ts" />
/// <reference path="http2.d.ts" />
/// <reference path="https.d.ts" />
/// <reference path="inspector.d.ts" />
/// <reference path="module.d.ts" />
/// <reference path="net.d.ts" />
/// <reference path="os.d.ts" />
/// <reference path="path.d.ts" />
/// <reference path="perf_hooks.d.ts" />
/// <reference path="process.d.ts" />
/// <reference path="punycode.d.ts" />
/// <reference path="querystring.d.ts" />
/// <reference path="readline.d.ts" />
/// <reference path="readline/promises.d.ts" />
/// <reference path="repl.d.ts" />
/// <reference path="stream.d.ts" />
/// <reference path="stream/promises.d.ts" />
/// <reference path="stream/consumers.d.ts" />
/// <reference path="stream/web.d.ts" />
/// <reference path="string_decoder.d.ts" />
/// <reference path="test.d.ts" />
/// <reference path="timers.d.ts" />
/// <reference path="timers/promises.d.ts" />
/// <reference path="tls.d.ts" />
/// <reference path="trace_events.d.ts" />
/// <reference path="tty.d.ts" />
/// <reference path="url.d.ts" />
/// <reference path="util.d.ts" />
/// <reference path="v8.d.ts" />
/// <reference path="vm.d.ts" />
/// <reference path="wasi.d.ts" />
/// <reference path="worker_threads.d.ts" />
/// <reference path="zlib.d.ts" />
/// <reference path="globals.global.d.ts" /> | PypiClean |
/ESMValCore-2.9.0rc1.tar.gz/ESMValCore-2.9.0rc1/doc/develop/preprocessor_function.rst | .. _preprocessor_function:
Preprocessor function
*********************
Preprocessor functions are located in :py:mod:`esmvalcore.preprocessor`.
To add a new preprocessor function, start by finding a likely looking file to
add your function to in
`esmvalcore/preprocessor <https://github.com/ESMValGroup/ESMValCore/tree/main/esmvalcore/preprocessor>`_.
Create a new file in that directory if you cannot find a suitable place.
The function should look like this:
.. code-block:: python
def example_preprocessor_function(
cube,
example_argument,
example_optional_argument=5,
):
"""Compute an example quantity.
A more extensive explanation of the computation can be added here. Add
references to scientific literature if available.
Parameters
----------
cube: iris.cube.Cube
Input cube.
example_argument: str
Example argument, the value of this argument can be provided in the
recipe. Describe what valid values are here. In this case, a valid
argument is the name of a dimension of the input cube.
example_optional_argument: int, optional
Another example argument, the value of this argument can optionally
be provided in the recipe. Describe what valid values are here.
Returns
-------
iris.cube.Cube
The result of the example computation.
"""
# Replace this with your own computation
cube = cube.collapsed(example_argument, iris.analysis.MEAN)
return cube
The above function needs to be imported in the file
`esmvalcore/preprocessor/__init__.py <https://github.com/ESMValGroup/ESMValCore/tree/main/esmvalcore/preprocessor/__init__.py>`__:
.. code-block:: python
from ._example_module import example_preprocessor_function
__all__ = [
...
'example_preprocessor_function',
...
]
The location in the ``__all__`` list above determines the default order in which
preprocessor functions are applied, so carefully consider where you put it
and ask for advice if needed.
The preprocessor function above can then be used from the :ref:`preprocessors`
like this:
.. code-block:: yaml
preprocessors:
example_preprocessor:
example_preprocessor_function:
example_argument: median
example_optional_argument: 6
The optional argument (in this example: ``example_optional_argument``) can be
omitted in the recipe.
Lazy and real data
==================
Preprocessor functions should support both
:ref:`real and lazy data <iris:real_and_lazy_data>`.
This is vital for supporting the large datasets that are typically used with
the ESMValCore.
If the data of the incoming cube has been realized (i.e. ``cube.has_lazy_data()``
returns ``False`` so ``cube.core_data()`` is a `NumPy <https://numpy.org/>`__
array), the returned cube should also have realized data.
Conversely, if the incoming cube has lazy data (i.e. ``cube.has_lazy_data()``
returns ``True`` so ``cube.core_data()`` is a
`Dask array <https://docs.dask.org/en/latest/array.html>`__), the returned
cube should also have lazy data.
Note that NumPy functions will often call their Dask equivalent if it exists
and if their input array is a Dask array, and vice versa.
Note that preprocessor functions should preferably be small and just call the
relevant :ref:`iris <iris_docs>` code.
Code that is more involved, e.g. lots of work with Numpy and Dask arrays,
and more broadly applicable, should be implemented in iris instead.
Metadata
========
Preprocessor functions may change the metadata of datasets.
An obvious example is :func:`~esmvalcore.preprocessor.convert_units`, which
changes units.
If cube metadata is changed in a preprocessor function, the :ref:`metadata.yml
<metadata_yml>` file is automatically updated with this information.
The following attributes are taken into account:
+------------------------------------+--------------------------------------------+
| Attribute in ``metadata.yml`` file | Updated from |
+====================================+============================================+
| ``standard_name`` | :attr:`iris.cube.Cube.standard_name` |
+------------------------------------+--------------------------------------------+
| ``long_name`` | :attr:`iris.cube.Cube.long_name` |
+------------------------------------+--------------------------------------------+
| ``short_name`` | :attr:`iris.cube.Cube.var_name` |
+------------------------------------+--------------------------------------------+
| ``units`` | :attr:`iris.cube.Cube.units` |
+------------------------------------+--------------------------------------------+
| ``frequency`` | ``iris.cube.Cube.attributes['frequency']`` |
+------------------------------------+--------------------------------------------+
If a given cube property is ``None``, the corresponding attribute is updated
with an empty string (``''``).
If a cube property is not given, the corresponding attribute is not updated.
Documentation
=============
The documentation in the function docstring will be shown in
the :ref:`preprocessor_functions` chapter.
In addition, you should add documentation on how to use the new preprocessor
function from the recipe in
`doc/recipe/preprocessor.rst <https://github.com/ESMValGroup/ESMValCore/tree/main/doc/recipe/preprocessor.rst>`__
so it is shown in the :ref:`preprocessor` chapter.
See the introduction to :ref:`documentation` for more information on how to
best write documentation.
Tests
=====
Tests are should be implemented for new or modified preprocessor functions.
For an introduction to the topic, see :ref:`tests`.
Unit tests
----------
To add a unit test for the preprocessor function from the example above, create
a file called
``tests/unit/preprocessor/_example_module/test_example_preprocessor_function.py``
and add the following content:
.. code-block:: python
"""Test function `esmvalcore.preprocessor.example_preprocessor_function`."""
import cf_units
import dask.array as da
import iris
import numpy as np
import pytest
from esmvalcore.preprocessor import example_preprocessor_function
@pytest.mark.parametrize('lazy', [True, False])
def test_example_preprocessor_function(lazy):
"""Test that the computed result is as expected."""
# Construct the input cube
data = np.array([1, 2], dtype=np.float32)
if lazy:
data = da.asarray(data, chunks=(1, ))
cube = iris.cube.Cube(
data,
var_name='tas',
units='K',
)
cube.add_dim_coord(
iris.coords.DimCoord(
np.array([0.5, 1.5], dtype=np.float64),
bounds=np.array([[0, 1], [1, 2]], dtype=np.float64),
standard_name='time',
units=cf_units.Unit('days since 1950-01-01 00:00:00',
calendar='gregorian'),
),
0,
)
# Compute the result
result = example_preprocessor_function(cube, example_argument='time')
# Check that lazy data is returned if and only if the input is lazy
assert result.has_lazy_data() is lazy
# Construct the expected result cube
expected = iris.cube.Cube(
np.array(1.5, dtype=np.float32),
var_name='tas',
units='K',
)
expected.add_aux_coord(
iris.coords.AuxCoord(
np.array([1], dtype=np.float64),
bounds=np.array([[0, 2]], dtype=np.float64),
standard_name='time',
units=cf_units.Unit('days since 1950-01-01 00:00:00',
calendar='gregorian'),
))
expected.add_cell_method(
iris.coords.CellMethod(method='mean', coords=('time', )))
# Compare the result of the computation with the expected result
print('result:', result)
print('expected result:', expected)
assert result == expected
In this test we used the decorator
`pytest.mark.parametrize <https://docs.pytest.org/en/stable/parametrize.html>`_
to test two scenarios, with both lazy and realized data, with a single test.
Sample data tests
-----------------
The idea of adding :ref:`sample data tests <sample_data_tests>` is to check that
preprocessor functions work with realistic data.
This also provides an easy way to add regression tests, though these should
preferably be implemented as unit tests instead, because using the sample data
for this purpose is slow.
To add a test using the sample data, create a file
``tests/sample_data/preprocessor/example_preprocessor_function/test_example_preprocessor_function.py``
and add the following content:
.. code-block:: python
"""Test function `esmvalcore.preprocessor.example_preprocessor_function`."""
from pathlib import Path
import esmvaltool_sample_data
import iris
import pytest
from esmvalcore.preprocessor import example_preprocessor_function
@pytest.mark.use_sample_data
def test_example_preprocessor_function():
"""Regression test to check that the computed result is as expected."""
# Load an example input cube
cube = esmvaltool_sample_data.load_timeseries_cubes(mip_table='Amon')[0]
# Compute the result
result = example_preprocessor_function(cube, example_argument='time')
filename = Path(__file__).with_name('example_preprocessor_function.nc')
if not filename.exists():
# Create the file the expected result if it doesn't exist
iris.save(result, target=str(filename))
raise FileNotFoundError(
f'Reference data was missing, wrote new copy to {filename}')
# Load the expected result cube
expected = iris.load_cube(str(filename))
# Compare the result of the computation with the expected result
print('result:', result)
print('expected result:', expected)
assert result == expected
This will use a file from the sample data repository as input.
The first time you run the test, the computed result will be stored in the file
``tests/sample_data/preprocessor/example_preprocessor_function/example_preprocessor_function.nc``
Any subsequent runs will re-load the data from file and check that it did not
change.
Make sure the stored results are small, i.e. smaller than 100 kilobytes, to
keep the size of the ESMValCore repository small.
Using multiple datasets as input
================================
The name of the first argument of the preprocessor function should in almost all
cases be ``cube``.
Only when implementing a preprocessor function that uses all datasets as input,
the name of the first argument should be ``products``.
If you would like to implement this type of preprocessor function, start by
having a look at the existing functions, e.g.
:py:func:`esmvalcore.preprocessor.multi_model_statistics` or
:py:func:`esmvalcore.preprocessor.mask_fillvalues`.
| PypiClean |
/HybridUI-0.0.1.tar.gz/HybridUI-0.0.1/hybrid/interface.py | from .elements.affix import Affix as affix
from .elements.anchour import Anchor as anchor
from .elements.alert import Alert as alert
from .elements.badge import Badge as badge
from .elements.breadcrumb import Breadcrumb as breadcrumb
from .elements.button import Button as button
from .elements.calendar import Calendar as calendar
from .elements.card import Card as card
from .elements.carousel import Carousel as carousel
from .elements.cascder import Cascder as cascder
from .elements.checkbox import Checkbox as checkbox
from .elements.collapse import Collapse as collapse
from .elements.column import Column as column
from .elements.datepicker import DatePicker as datepicker
from .elements.divider import Divider as divider
from .elements.drawer import Drawer as drawer
from .elements.dropdown import DropDown as dropdown
from .elements.empty import Empty as empty
from .elements.floatbutton import FloatButton as floatbutton
from .elements.form import Form as form
from .elements.icons import Icons as icon
from .elements.image import Image as image
from .elements.input import Input as input
from .elements.input import TextArea as textarea
from .elements.input import INputNummber as inputnummber
from .elements.input import InputSearch as searchinput
from .elements.input import Password as passwordinput
from .elements.layout import Layout as layout
from .elements.content import Content as content
from .elements.footer import Footer as footer
from .elements.list import UIList as list
from .elements.mentions import Mentions as mentions
from .elements.menu import Menu as menu
from .elements.message import Message as message
from .elements.modal import Modal as modal
from .elements.navigation import Navigation as navigation
from .elements.pagination import Pagination as pagination
from .elements.popconfirm import Popconfirm as popconfirm
from .elements.popover import Popover as popover
from .elements.progress import Progress as progress
from .elements.qrcode import QRcode as qrcode
from .elements.radio import RadioGroup as radiogroup
from .elements.radio import ButtonRadio as buttonradio
from .elements.rate import Rate as rate
from .elements.player import Player as player
from .elements.result import Result as result
from .elements.row import Row as row
from .elements.timepicker import RangePicker as rangepicker
from .elements.segmented import Segmented as segmented
from .elements.select import Select as select
#from .elements.skeleton import Skeleton as skeleton
from .elements.slider import Slider as slider
from .elements.space import Space as space
from .elements.spin import Spin as spin
from .elements.statistic import Statistic as statistic
from .elements.steps import Steps as steps
from .elements.switch import Switch as switch
from .elements.table import Table as table
from .elements.tag import Tag as tag
from .elements.timeline import Timeline as timeline
from .elements.tooltip import Tooltip as tooltip
from .elements.tour import Tour as tour
from .elements.transfer import Transfer as transfer
from .elements.tree import Tree as tree
from .elements.treeselect import TreeSelect as treeselect
from .elements.typography import Typography as typography
from .elements.typography import Text as text
from .elements.timepicker import TimePicker as timepicker
from .elements.typography import Title as title
from .elements.typography import Paragraph as paragraph
from .elements.upload import Upload as upload
from .elements.watermark import Watermark as watermark
from .hybrid import UI as compiler
from .element import ReactNode as reactnode
#______________________charts_______________________
from .charts.all_charts import DonutChart as donutChart
from .charts.all_charts import DualmultilineChart as dualmultilineChart
from .charts.all_charts import AreaChart as areaChart
from .charts.all_charts import BarChart as barChart
from .charts.all_charts import BulletChart as bulletChart
from .charts.all_charts import ColumnChart as columnChart
from .charts.all_charts import DualAxesChart as dualAxesChart
from .charts.all_charts import FunnelChart as funnelChart
from .charts.all_charts import LineChart as lineChart
from .charts.all_charts import PieChart as pieChart
from .charts.all_charts import RingProgressChart as ringProgressChart
from .charts.all_charts import RoseChart as roseChart
from .charts.all_charts import ViolinChart as violinChart
from .charts.all_charts import GaugeGradientChart as gaugeGradientChart
from .charts.all_charts import GaugeChart as gaugeChart
from .style import Style
from .hybrid import UI
from socketio import AsyncServer
""" app = UI()
socket:AsyncServer = app.outbox_socketio """ | PypiClean |
/OSlash-0.6.3-py3-none-any.whl/oslash/writer.py | from typing import Callable, Tuple, Any, TypeVar, Generic, Union, cast
from .typing import Functor
from .typing import Monad
from .typing import Monoid
TLog = TypeVar("TLog", str, Monoid)
TSource = TypeVar("TSource")
TResult = TypeVar("TResult")
class Writer(Generic[TSource, TLog]):
"""The writer monad."""
def __init__(self, value: TSource, log: TLog) -> None:
"""Initialize a new writer.
value Value to
"""
self._value: Tuple[TSource, TLog] = (value, log)
def map(self, func: Callable[[Tuple[TSource, TLog]], Tuple[TResult, TLog]]) -> 'Writer[TResult, TLog]':
"""Map a function func over the Writer value.
Haskell:
fmap f m = Writer $ let (a, w) = runWriter m in (f a, w)
Keyword arguments:
func -- Mapper function:
"""
a, w = self.run()
b, _w = func((a, w))
return Writer(b, _w)
def bind(self, func: Callable[[TSource], 'Writer[TResult, TLog]']) -> 'Writer[TResult, TLog]':
"""Flat is better than nested.
Haskell:
(Writer (x, v)) >>= f = let
(Writer (y, v')) = f x in Writer (y, v `append` v')
"""
a, w = self.run()
b, w_ = func(a).run()
w__ = w + w_
return Writer(b, w__)
@classmethod
def unit(cls, value: TSource) -> 'Writer[TSource, TLog]':
"""Wrap a single value in a Writer.
Use the factory method to create *Writer classes that
uses a different monoid than str, or use the constructor
directly.
"""
return Writer(value, log=cast(TLog, ""))
def run(self) -> Tuple[TSource, TLog]:
"""Extract value from Writer.
This is the inverse function of the constructor and converts the
Writer to s simple tuple.
"""
return self._value
@staticmethod
def apply_log(a: tuple, func: Callable[[Any], Tuple[TSource, TLog]]) -> Tuple[TSource, TLog]:
"""Apply a function to a value with a log.
Helper function to apply a function to a value with a log tuple.
"""
value, log = a
new, entry = func(value)
return new, log + entry
@classmethod
def create(cls, class_name: str, monoid_type=Union[Monoid, str]):
"""Create Writer subclass using specified monoid type. lets us
create a Writer that uses a different monoid than str for the
log.
Usage:
StringWriter = Writer.create("StringWriter", str)
IntWriter = Writer.create("IntWriter", int)
...
"""
def unit(cls, value):
if hasattr(monoid_type, "empty"):
log = monoid_type.empty()
else:
log = monoid_type()
return cls(value, log)
return type(class_name, (Writer, ), dict(unit=classmethod(unit)))
def __eq__(self, other) -> bool:
return self.run() == other.run()
def __str__(self) -> str:
return "%s :: %s" % self.run()
def __repr__(self) -> str:
return str(self)
class MonadWriter(Writer[Any, TLog]):
@classmethod
def tell(cls, log: TLog) -> 'MonadWriter':
return cls(None, log)
StringWriter = Writer.create("StringWriter", str)
assert(isinstance(Writer, Functor))
assert(isinstance(Writer, Monad)) | PypiClean |
/Grid2Op-1.9.3-py3-none-any.whl/grid2op/Episode/EpisodeReplay.py | import os
import warnings
import time
import imageio
import argparse
from grid2op.Exceptions import Grid2OpException
from grid2op.PlotGrid.PlotMatplot import PlotMatplot
from grid2op.Episode.EpisodeData import EpisodeData
class EpisodeReplay(object):
"""
This class allows to see visually what an agent has done during an episode. It uses for now the "PlotMatplot" as the
method to plot the different states of the system. It reads directly data from the runner.
Examples
--------
It can be used the following manner.
.. code-block:: python
import grid2op
agent_class = grid2op.Agent.DoNothingAgent # change that for studying other agent
env = grid2op.make() # make the default environment
runner = grid2op.Runner.Runner(**env.get_params_for_runner(), agentClass=agent_class)
path_log = "agent_log" # where the runner will output the standardized data when running the agent.
res = runner.run(nb_episode=1, path_save=path_log)
# and when it's done, you can visualize it this way:
episode_replay = EpisodeReplay(agent_path=path_log)
episode_id = res[0][1]
episode_replay.plot_episode(episode_id, max_fps=10)
# you can pause by clicking the "space" key
# At any time, you can quit by pressing the "esc" key or the "exit" button of the window.
Attributes
----------
agent_path: ``str``
The path were the log of the agent are stored. It is recommended to use a :class:`grid2op.Runner.Runner`
to save tha log of the agent.
episode_data: :class:`grid2op.EpisodeData.EpisodeData`, optional
The last data of the episode inspected.replay_cli
"""
def __init__(self, agent_path):
if not os.path.exists(agent_path):
raise Grid2OpException(
'Nothing is found at "{}" where an agent path should have been.'.format(
agent_path
)
)
self.agent_path = agent_path
self.episode_data = None
def replay_episode(
self,
episode_id,
fps=2.0,
gif_name=None,
display=True,
start_step=0,
end_step=-1,
line_info="rho",
load_info="p",
gen_info="p",
resolution=(1280, 720),
):
"""
When called, this function will start the display of the episode in a "mini movie" format.
Parameters
----------
episode_id: ``str``
ID of the episode to replay
fps: ``float``
Frames per second. When it's low, you will have more time to look at each frame, but the episode
will last longer. When it's high, episode will be faster, but frames will stay less time on the screen.
gif_name: ``str``
If provided, a .gif file is saved in the episode folder with the name :gif_name:.
The .gif extension is happened by this function
start_step: ``int``
Default to 0. The step at which to start generating the gif
end_step: ``int``
Default to -1. The step at which to stop generating the gif.
Set to -1 to specify no limit
load_info: ``str``
Defaults to "p". What kind of values to show on loads.
Can be oneof `["p", "v", None]`
gen_info: ``str``
Defaults to "p". What kind of values to show on generators.
Can be oneof `["p", "v", None]`
line_info: ``str``
Defaults to "rho". What kind of values to show on lines.
Can be oneof `["rho", "a", "p", "v", None]`
resolution: ``tuple``
Defaults to (1280, 720). The resolution to use for the gif.
"""
# Check args
path_ep = os.path.join(self.agent_path, episode_id)
if not os.path.exists(path_ep):
raise Grid2OpException('No episode is found at "{}".'.format(path_ep))
# Load episode observations
self.episode_data = EpisodeData.from_disk(
agent_path=self.agent_path, name=episode_id
)
all_obs = [el for el in self.episode_data.observations]
# Create a plotter
width, height = resolution
plot_runner = PlotMatplot(
self.episode_data.observation_space,
width=width,
height=height,
load_name=False,
gen_name=False,
)
# Some vars for gif export if enabled
frames = []
gif_path = None
if gif_name is not None:
gif_path = os.path.join(path_ep, gif_name + ".gif")
# Render loop
figure = None
time_per_frame = 1.0 / fps
for step, obs in enumerate(all_obs):
# Skip up to start_step
if step < start_step:
continue
# Terminate if reached end_step
if end_step > 0 and step >= end_step:
break
# Get a timestamp for current frame
start_time = time.perf_counter()
# Render the observation
fig = plot_runner.plot_obs(
observation=obs,
line_info=line_info,
gen_info=gen_info,
load_info=load_info,
figure=figure,
redraw=True,
)
if figure is None and display:
fig.show()
elif display:
fig.canvas.draw()
# Store figure for re-use
figure = fig
# Save pixel array if needed
if gif_name is not None:
frames.append(plot_runner.convert_figure_to_numpy_HWC(figure))
# Get the timestamp after frame is rendered
end_time = time.perf_counter()
delta_time = end_time - start_time
# Cap fps for display mode
if display:
wait_time = time_per_frame - delta_time
if wait_time > 0.0:
time.sleep(wait_time)
# Export all frames as gif if enabled
if gif_name is not None and len(frames) > 0:
try:
imageio.mimwrite(gif_path, frames, fps=fps)
# Try to compress
try:
from pygifsicle import optimize
optimize(gif_path, options=["-w", "--no-conserve-memory"])
except:
warn_msg = (
"Failed to optimize .GIF size, but gif is still saved:\n"
"Install dependencies to reduce size by ~3 folds\n"
"apt-get install gifsicle && pip3 install pygifsicle"
)
warnings.warn(warn_msg)
except Exception as e:
warnings.warn("Impossible to save gif with error :\n{}".format(e))
def episode_replay_cli():
parser = argparse.ArgumentParser(description="EpisodeReplay")
parser.add_argument("--agent_path", required=True, type=str)
parser.add_argument("--episode_id", required=True, type=str)
parser.add_argument("--display", required=False, default=False, action="store_true")
parser.add_argument("--fps", required=False, default=2.0, type=float)
parser.add_argument("--gif_name", required=False, default=None, type=str)
parser.add_argument("--gif_start", required=False, default=0, type=int)
parser.add_argument("--gif_end", required=False, default=-1, type=int)
args = parser.parse_args()
return args
def main(args=None):
if args is None:
args = episode_replay_cli()
er = EpisodeReplay(args.agent_path)
er.replay_episode(
args.episode_id,
fps=args.fps,
gif_name=args.gif_name,
start_step=args.gif_start,
end_step=args.gif_end,
display=args.display,
)
# Dev / Test by running this file
if __name__ == "__main__":
args = episode_replay_cli()
main(args) | PypiClean |
/Altair%20Smartworks%20SDK-0.0.1.tar.gz/Altair Smartworks SDK-0.0.1/openapi_client/model/thing_status_response.py | import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from openapi_client.model.properties import Properties
globals()['Properties'] = Properties
class ThingStatusResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'uid': (str,), # noqa: E501
'title': (str,), # noqa: E501
'space': (str,), # noqa: E501
'collection': (str,), # noqa: E501
'properties': (Properties,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'uid': 'uid', # noqa: E501
'title': 'title', # noqa: E501
'space': 'space', # noqa: E501
'collection': 'collection', # noqa: E501
'properties': 'properties', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ThingStatusResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
uid (str): [optional] # noqa: E501
title (str): [optional] # noqa: E501
space (str): [optional] # noqa: E501
collection (str): [optional] # noqa: E501
properties (Properties): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value) | PypiClean |
/Nosparser-0.11.tar.gz/Nosparser-0.11/src/nos_parser/template_miner.py | import base64
import logging
import re
import time
import jsonpickle
import zlib
from cachetools import LRUCache
from nos_parser.drain import Drain, LogCluster
from nos_parser.masking import LogMasker
from nos_parser.persistence_handler import PersistenceHandler
from nos_parser.simple_profiler import SimpleProfiler, NullProfiler, Profiler
from nos_parser.template_miner_config import TemplateMinerConfig
logger = logging.getLogger(__name__)
config_filename = 'nos_parser.ini'
class TemplateMiner:
def __init__(self,
persistence_handler: PersistenceHandler = None,
config: TemplateMinerConfig = None):
"""
Wrapper for Drain with persistence and masking support
:param persistence_handler: The type of persistence to use. When None, no persistence is applied.
:param config: Configuration object. When none, configuration is loaded from default .ini file (if exist)
"""
logger.info("Starting nos_parser template miner")
if config is None:
logger.info(f"Loading configuration from {config_filename}")
config = TemplateMinerConfig()
config.load(config_filename)
self.config = config
self.profiler: Profiler = NullProfiler()
if self.config.profiling_enabled:
self.profiler = SimpleProfiler()
self.persistence_handler = persistence_handler
param_str = self.config.mask_prefix + "*" + self.config.mask_suffix
self.drain = Drain(
sim_th=self.config.drain_sim_th,
depth=self.config.drain_depth,
max_children=self.config.drain_max_children,
max_clusters=self.config.drain_max_clusters,
extra_delimiters=self.config.drain_extra_delimiters,
profiler=self.profiler,
param_str=param_str
)
self.masker = LogMasker(self.config.masking_instructions, self.config.mask_prefix, self.config.mask_suffix)
self.last_save_time = time.time()
if persistence_handler is not None:
self.load_state()
def load_state(self):
logger.info("Checking for saved state")
state = self.persistence_handler.load_state()
if state is None:
logger.info("Saved state not found")
return
if self.config.snapshot_compress_state:
state = zlib.decompress(base64.b64decode(state))
drain: Drain = jsonpickle.loads(state, keys=True)
# json-pickle encoded keys as string by default, so we have to convert those back to int
# this is only relevant for backwards compatibility when loading a snapshot of drain <= v0.9.1
# which did not use json-pickle's keys=true
if len(drain.id_to_cluster) > 0 and isinstance(next(iter(drain.id_to_cluster.keys())), str):
drain.id_to_cluster = {int(k): v for k, v in list(drain.id_to_cluster.items())}
if self.config.drain_max_clusters:
cache = LRUCache(maxsize=self.config.drain_max_clusters)
cache.update(drain.id_to_cluster)
drain.id_to_cluster = cache
drain.profiler = self.profiler
self.drain = drain
logger.info("Restored {0} clusters with {1} messages".format(
len(drain.clusters), drain.get_total_cluster_size()))
def save_state(self, snapshot_reason):
state = jsonpickle.dumps(self.drain, keys=True).encode('utf-8')
if self.config.snapshot_compress_state:
state = base64.b64encode(zlib.compress(state))
logger.info(f"Saving state of {len(self.drain.clusters)} clusters "
f"with {self.drain.get_total_cluster_size()} messages, {len(state)} bytes, "
f"reason: {snapshot_reason}")
self.persistence_handler.save_state(state)
def get_snapshot_reason(self, change_type, cluster_id):
if change_type != "none":
return "{} ({})".format(change_type, cluster_id)
diff_time_sec = time.time() - self.last_save_time
if diff_time_sec >= self.config.snapshot_interval_minutes * 60:
return "periodic"
return None
def add_log_message(self, log_message: str) -> dict:
self.profiler.start_section("total")
self.profiler.start_section("mask")
masked_content = self.masker.mask(log_message)
self.profiler.end_section()
reg = r'\b\w+\b'
self.profiler.start_section("drain")
cluster, change_type = self.drain.add_log_message(masked_content)
self.profiler.end_section("drain")
result = {
"template_mined": cluster.get_template(),
"template_tokens": re.findall(reg,cluster.get_template()),
"parameter_list": self.get_parameter_list(cluster.get_template(),log_message),
"msg_val": self.get_value_list(cluster.get_template(),log_message)
}
if self.persistence_handler is not None:
self.profiler.start_section("save_state")
snapshot_reason = self.get_snapshot_reason(change_type, cluster.cluster_id)
if snapshot_reason:
self.save_state(snapshot_reason)
self.last_save_time = time.time()
self.profiler.end_section()
self.profiler.end_section("total")
self.profiler.report(self.config.profiling_report_sec)
return result
def match(self, log_message: str) -> LogCluster:
"""
Match against an already existing cluster. Match shall be perfect (sim_th=1.0).
New cluster will not be created as a result of this call, nor any cluster modifications.
:param log_message: log message to match
:return: Matched cluster or None of no match found.
"""
masked_content = self.masker.mask(log_message)
matched_cluster = self.drain.match(masked_content)
return matched_cluster
def get_value_list(self, log_template: str, content: str):
def check_float(x: str):
try:
float(x)
return True
except ValueError:
return False
parameters =self.get_parameter_list(log_template,content)
value_list = [p for p in parameters if p.isnumeric() or check_float(p)]
return value_list
def get_parameter_list(self, log_template, content):
escaped_prefix = re.escape(self.config.mask_prefix)
escaped_suffix = re.escape(self.config.mask_suffix)
template_regex = re.sub(escaped_prefix + r".{1,13}" + escaped_suffix, self.drain.param_str, log_template)
if self.drain.param_str not in template_regex:
return []
template_regex = re.escape(template_regex)
template_regex = re.sub(r'\\ +', r'\\s+', template_regex)
template_regex = "^" + template_regex.replace(escaped_prefix + r"\*" + escaped_suffix, "(.*?)") + "$"
for delimiter in self.config.drain_extra_delimiters:
content = re.sub(delimiter, ' ', content)
parameter_list = re.findall(template_regex, content)
parameter_list = parameter_list[0] if parameter_list else ()
parameter_list = list(parameter_list) if isinstance(parameter_list, tuple) else [parameter_list]
def is_mask(p: str):
return p.startswith(self.config.mask_prefix) and p.endswith(self.config.mask_suffix)
parameter_list = [p for p in list(parameter_list) if not is_mask(p)]
return parameter_list | PypiClean |
/Marketplace-0.9.2.tar.gz/Marketplace-0.9.2/marketplace/connection.py | import json
import logging
import urllib
import requests
from oauthlib import oauth1
log = logging.getLogger('marketplace.%s' % __name__)
class NotExpectedStatusCode(requests.exceptions.HTTPError):
""" Raise if status code returned from API is not the expected one
"""
pass
class Connection:
""" Keeps the oauth client class and provides the way to connect to the
Marketplace API
"""
def __init__(self, consumer_key, consumer_secret):
self.set_oauth_client(consumer_key, consumer_secret)
def set_oauth_client(self, consumer_key, consumer_secret):
"""Sets the oauth_client attribute
"""
self.oauth_client = oauth1.Client(consumer_key, consumer_secret)
def prepare_request(self, method, url, body=''):
"""Prepare the request body and headers
:returns: headers of the signed request
"""
headers = {
'Content-type': 'application/json',
}
# Note: we don't pass body to sign() since it's only for bodies that
# are form-urlencoded. Similarly, we don't care about the body that
# sign() returns.
uri, signed_headers, signed_body = self.oauth_client.sign(
url, http_method=method, headers=headers)
if body:
if method == 'GET':
body = urllib.urlencode(body)
else:
body = json.dumps(body)
headers.update(signed_headers)
return {"headers": headers, "data": body}
@staticmethod
def _get_error_reason(response):
"""Extract error reason from the response. It might be either
the 'reason' or the entire response
"""
try:
body = response.json()
if body and 'reason' in body:
return body['reason']
except ValueError:
pass
return response.content
def fetch(self, method, url, data=None, expected_status_code=None):
"""Prepare the headers, encode data, call API and provide
data it returns
"""
kwargs = self.prepare_request(method, url, data)
log.debug(json.dumps(kwargs))
response = getattr(requests, method.lower())(url, **kwargs)
log.debug(json.dumps(response.content))
if response.status_code >= 400:
response.raise_for_status()
if (expected_status_code
and response.status_code != expected_status_code):
raise NotExpectedStatusCode(self._get_error_reason(response))
return response
def fetch_json(self, method, url, data=None, expected_status_code=None):
"""Return json decoded data from fetch
"""
return self.fetch(method, url, data, expected_status_code).json() | PypiClean |
/HAllA-0.8.20-py3-none-any.whl/halla/utils/similarity.py | from sklearn.metrics import normalized_mutual_info_score, mutual_info_score
from scipy.stats import pearsonr, spearmanr, chi2_contingency
from scipy.spatial.distance import pdist, squareform
from pandas import crosstab
import numpy as np
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import FloatVector
XICOR = importr('XICOR')
def remove_missing_values(x, y):
'''Given x and y all in numpy arrays, remove pairs that contain missing values
'''
# nan != nan = TRUE
nas = np.logical_or(x != x, y != y)
return(x[~nas], y[~nas])
'''Similarity wrapper functions (note: not distance!)
Given x, y, returns a tuple (similarity, p-value);
p-value will be None if not provided
'''
def nmi(x, y, return_pval=False):
'''normalized mutual information, ranging from [0 .. 1]
0: no mutual information; 1: perfect correlation
'''
x, y = remove_missing_values(x, y)
if (np.unique(x).shape[0] == 1 or np.unique(y).shape[0] == 1):
if return_pval: return(0,1)
return(0)
if return_pval:
ch2, p, dof, ex = chi2_contingency(crosstab(x,y))
return(normalized_mutual_info_score(x, y), p)
return(normalized_mutual_info_score(x, y))
def mutual_info(x,y, return_pval=False):
x, y = remove_missing_values(x, y)
if (np.unique(x).shape[0] == 1 or np.unique(y).shape[0] == 1):
if return_pval: return(0,1)
return(0)
if return_pval:
ch2, p, dof, ex = chi2_contingency(crosstab(x,y))
return(mutual_info_score(x, y), p)
return(mutual_info_score(x, y))
def pearson(x, y, return_pval=False):
x, y = remove_missing_values(x, y)
if (np.unique(x).shape[0] == 1 or np.unique(y).shape[0] == 1):
if return_pval: return(0,1)
return(0)
corr, pval = pearsonr(x, y)
# TODO: enable tuning whether correlation should always be positive or not
if return_pval: return(corr, pval)
return(corr)
def spearman(x, y, return_pval=False):
x, y = remove_missing_values(x, y)
if (np.unique(x).shape[0] == 1 or np.unique(y).shape[0] == 1):
if return_pval: return(0,1)
return(0)
corr, pval = spearmanr(x, y)
# TODO: enable tuning whether correlation should always be positive or not
if return_pval: return(corr, pval)
return(corr)
def distcorr(x, y, return_pval=False):
'''Perform distance correlation [0 .. 1]
def src: https://en.wikipedia.org/wiki/Distance_correlation
distance corr = 0 iif x and y are independent
distance corr = 1 implies that dimensions of the linear subspaces spanned by x & y respectively are
almost surely equal and if we assume that these subspaces are equal, then in this subspace
y = A + bCx for some vector A, scalar b, and orthonormal matrix C
code src: https://gist.github.com/satra/aa3d19a12b74e9ab7941 - much faster than the library dcor
'''
x, y = remove_missing_values(x, y)
if (np.unique(x).shape[0] == 1 or np.unique(y).shape[0] == 1):
if return_pval: return(0,1)
return(0)
x, y = np.atleast_1d(x), np.atleast_1d(y)
# if 1D - add dummy axis
if np.prod(x.shape) == len(x): x = x[:, None]
if np.prod(y.shape) == len(y): y = y[:, None]
x = np.atleast_2d(x)
y = np.atleast_2d(y)
n = x.shape[0]
if x.shape[0] != y.shape[0]:
raise ValueError('Number of samples must match')
a, b = squareform(pdist(x)), squareform(pdist(y))
A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean()
B = b - b.mean(axis=0)[None, :] - b.mean(axis=1)[:, None] + b.mean()
dcov2_xy = (A * B).sum()/float(n * n)
dcov2_xx = (A * A).sum()/float(n * n)
dcov2_yy = (B * B).sum()/float(n * n)
dcor = np.sqrt(dcov2_xy)/np.sqrt(np.sqrt(dcov2_xx) * np.sqrt(dcov2_yy))
if return_pval: return(dcor, None)
return(dcor)
def xicor(x,y,return_pval=False):
x, y = remove_missing_values(x, y)
if return_pval: xi, sd, pval = XICOR.xicor(FloatVector(x),FloatVector(y), pvalue = True)
else: xi = XICOR.xicor(FloatVector(x),FloatVector(y), pvalue = False)
if return_pval: return(xi[0],pval[0])
return(xi[0])
def symmetric_xicor(x,y):
'''
This is used for computing trees. It isn't used for testing hypotheses
because there's no closed form expression for the p-value.
'''
xi_xy = xicor(x,y)
xi_yx = xicor(y,x)
return(max(xi_xy, xi_yx))
'''Constants
'''
SIM_FUNCS = {
'nmi': nmi,
'mi': mutual_info,
'pearson': pearson,
'spearman': spearman,
'dcor': distcorr,
'xicor': xicor,
'symmetric_xicor': symmetric_xicor
}
PVAL_PROVIDED = {
'nmi': True,
'mi': True,
'pearson': True,
'spearman': True,
'dcor': False,
'xicor': True,
}
def get_similarity_function(metric):
'''Retrieve the right distance function
according to the metric
'''
metric = metric.lower()
if metric not in SIM_FUNCS:
raise KeyError('The similarity metric is not available...')
# only return the similarity scores
return(SIM_FUNCS[metric])
def does_return_pval(metric):
return(PVAL_PROVIDED[metric])
def similarity2distance(scores, set_abs=True, convert_func=None):
'''Convert similarity scores to distance; by default, the conversion formula would be:
dist = 1 - transform(similarity scores) where transform is abs(scores) if set_abs is True else scores
reference of converting correlation by 1 - scores: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4498680/
- scores : the similarity values to be converted in a numpy array
- set_abs : if True, set scores to absolute
- convert_func: if specified, use the provided function to convert instead
'''
if type(scores) is not np.ndarray:
raise ValueError('scores argument should be a numpy array!')
if convert_func is not None:
return(convert_func(scores))
return(1 - (np.abs(scores) if set_abs else scores)) | PypiClean |
/NehorayRapid-0.0.1-py3-none-any.whl/mmedit/models/common/aspp.py | import torch
from mmcv.cnn import ConvModule
from torch import nn
from torch.nn import functional as F
from .separable_conv_module import DepthwiseSeparableConvModule
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels, conv_cfg, norm_cfg, act_cfg):
super().__init__(
nn.AdaptiveAvgPool2d(1),
ConvModule(
in_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, x):
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(
x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
"""ASPP module from DeepLabV3.
The code is adopted from
https://github.com/pytorch/vision/blob/master/torchvision/models/
segmentation/deeplabv3.py
For more information about the module:
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Args:
in_channels (int): Input channels of the module.
out_channels (int): Output channels of the module.
mid_channels (int): Output channels of the intermediate ASPP conv
modules.
dilations (Sequence[int]): Dilation rate of three ASPP conv module.
Default: [12, 24, 36].
conv_cfg (dict): Config dict for convolution layer. If "None",
nn.Conv2d will be applied. Default: None.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
separable_conv (bool): Whether replace normal conv with depthwise
separable conv which is faster. Default: False.
"""
def __init__(self,
in_channels,
out_channels=256,
mid_channels=256,
dilations=(12, 24, 36),
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
separable_conv=False):
super().__init__()
if separable_conv:
conv_module = DepthwiseSeparableConvModule
else:
conv_module = ConvModule
modules = []
modules.append(
ConvModule(
in_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
for dilation in dilations:
modules.append(
conv_module(
in_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
modules.append(
ASPPPooling(in_channels, mid_channels, conv_cfg, norm_cfg,
act_cfg))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
ConvModule(
5 * mid_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg), nn.Dropout(0.5))
def forward(self, x):
"""Forward function for ASPP module.
Args:
x (Tensor): Input tensor with shape (N, C, H, W).
Returns:
Tensor: Output tensor.
"""
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res) | PypiClean |
/FALCONN-1.3.1.tar.gz/FALCONN-1.3.1/external/pybind11/docs/advanced/cast/stl.rst | STL containers
##############
Automatic conversion
====================
When including the additional header file :file:`pybind11/stl.h`, conversions
between ``std::vector<>``/``std::list<>``/``std::array<>``,
``std::set<>``/``std::unordered_set<>``, and
``std::map<>``/``std::unordered_map<>`` and the Python ``list``, ``set`` and
``dict`` data structures are automatically enabled. The types ``std::pair<>``
and ``std::tuple<>`` are already supported out of the box with just the core
:file:`pybind11/pybind11.h` header.
The major downside of these implicit conversions is that containers must be
converted (i.e. copied) on every Python->C++ and C++->Python transition, which
can have implications on the program semantics and performance. Please read the
next sections for more details and alternative approaches that avoid this.
.. note::
Arbitrary nesting of any of these types is possible.
.. seealso::
The file :file:`tests/test_stl.cpp` contains a complete
example that demonstrates how to pass STL data types in more detail.
.. _cpp17_container_casters:
C++17 library containers
========================
The :file:`pybind11/stl.h` header also includes support for ``std::optional<>``
and ``std::variant<>``. These require a C++17 compiler and standard library.
In C++14 mode, ``std::experimental::optional<>`` is supported if available.
Various versions of these containers also exist for C++11 (e.g. in Boost).
pybind11 provides an easy way to specialize the ``type_caster`` for such
types:
.. code-block:: cpp
// `boost::optional` as an example -- can be any `std::optional`-like container
namespace pybind11 { namespace detail {
template <typename T>
struct type_caster<boost::optional<T>> : optional_caster<boost::optional<T>> {};
}}
The above should be placed in a header file and included in all translation units
where automatic conversion is needed. Similarly, a specialization can be provided
for custom variant types:
.. code-block:: cpp
// `boost::variant` as an example -- can be any `std::variant`-like container
namespace pybind11 { namespace detail {
template <typename... Ts>
struct type_caster<boost::variant<Ts...>> : variant_caster<boost::variant<Ts...>> {};
// Specifies the function used to visit the variant -- `apply_visitor` instead of `visit`
template <>
struct visit_helper<boost::variant> {
template <typename... Args>
static auto call(Args &&...args) -> decltype(boost::apply_visitor(args...)) {
return boost::apply_visitor(args...);
}
};
}} // namespace pybind11::detail
The ``visit_helper`` specialization is not required if your ``name::variant`` provides
a ``name::visit()`` function. For any other function name, the specialization must be
included to tell pybind11 how to visit the variant.
.. note::
pybind11 only supports the modern implementation of ``boost::variant``
which makes use of variadic templates. This requires Boost 1.56 or newer.
Additionally, on Windows, MSVC 2017 is required because ``boost::variant``
falls back to the old non-variadic implementation on MSVC 2015.
.. _opaque:
Making opaque types
===================
pybind11 heavily relies on a template matching mechanism to convert parameters
and return values that are constructed from STL data types such as vectors,
linked lists, hash tables, etc. This even works in a recursive manner, for
instance to deal with lists of hash maps of pairs of elementary and custom
types, etc.
However, a fundamental limitation of this approach is that internal conversions
between Python and C++ types involve a copy operation that prevents
pass-by-reference semantics. What does this mean?
Suppose we bind the following function
.. code-block:: cpp
void append_1(std::vector<int> &v) {
v.push_back(1);
}
and call it from Python, the following happens:
.. code-block:: pycon
>>> v = [5, 6]
>>> append_1(v)
>>> print(v)
[5, 6]
As you can see, when passing STL data structures by reference, modifications
are not propagated back the Python side. A similar situation arises when
exposing STL data structures using the ``def_readwrite`` or ``def_readonly``
functions:
.. code-block:: cpp
/* ... definition ... */
class MyClass {
std::vector<int> contents;
};
/* ... binding code ... */
py::class_<MyClass>(m, "MyClass")
.def(py::init<>())
.def_readwrite("contents", &MyClass::contents);
In this case, properties can be read and written in their entirety. However, an
``append`` operation involving such a list type has no effect:
.. code-block:: pycon
>>> m = MyClass()
>>> m.contents = [5, 6]
>>> print(m.contents)
[5, 6]
>>> m.contents.append(7)
>>> print(m.contents)
[5, 6]
Finally, the involved copy operations can be costly when dealing with very
large lists. To deal with all of the above situations, pybind11 provides a
macro named ``PYBIND11_MAKE_OPAQUE(T)`` that disables the template-based
conversion machinery of types, thus rendering them *opaque*. The contents of
opaque objects are never inspected or extracted, hence they *can* be passed by
reference. For instance, to turn ``std::vector<int>`` into an opaque type, add
the declaration
.. code-block:: cpp
PYBIND11_MAKE_OPAQUE(std::vector<int>);
before any binding code (e.g. invocations to ``class_::def()``, etc.). This
macro must be specified at the top level (and outside of any namespaces), since
it instantiates a partial template overload. If your binding code consists of
multiple compilation units, it must be present in every file (typically via a
common header) preceding any usage of ``std::vector<int>``. Opaque types must
also have a corresponding ``class_`` declaration to associate them with a name
in Python, and to define a set of available operations, e.g.:
.. code-block:: cpp
py::class_<std::vector<int>>(m, "IntVector")
.def(py::init<>())
.def("clear", &std::vector<int>::clear)
.def("pop_back", &std::vector<int>::pop_back)
.def("__len__", [](const std::vector<int> &v) { return v.size(); })
.def("__iter__", [](std::vector<int> &v) {
return py::make_iterator(v.begin(), v.end());
}, py::keep_alive<0, 1>()) /* Keep vector alive while iterator is used */
// ....
Please take a look at the :ref:`macro_notes` before using the
``PYBIND11_MAKE_OPAQUE`` macro.
.. seealso::
The file :file:`tests/test_opaque_types.cpp` contains a complete
example that demonstrates how to create and expose opaque types using
pybind11 in more detail.
.. _stl_bind:
Binding STL containers
======================
The ability to expose STL containers as native Python objects is a fairly
common request, hence pybind11 also provides an optional header file named
:file:`pybind11/stl_bind.h` that does exactly this. The mapped containers try
to match the behavior of their native Python counterparts as much as possible.
The following example showcases usage of :file:`pybind11/stl_bind.h`:
.. code-block:: cpp
// Don't forget this
#include <pybind11/stl_bind.h>
PYBIND11_MAKE_OPAQUE(std::vector<int>);
PYBIND11_MAKE_OPAQUE(std::map<std::string, double>);
// ...
// later in binding code:
py::bind_vector<std::vector<int>>(m, "VectorInt");
py::bind_map<std::map<std::string, double>>(m, "MapStringDouble");
When binding STL containers pybind11 considers the types of the container's
elements to decide whether the container should be confined to the local module
(via the :ref:`module_local` feature). If the container element types are
anything other than already-bound custom types bound without
``py::module_local()`` the container binding will have ``py::module_local()``
applied. This includes converting types such as numeric types, strings, Eigen
types; and types that have not yet been bound at the time of the stl container
binding. This module-local binding is designed to avoid potential conflicts
between module bindings (for example, from two separate modules each attempting
to bind ``std::vector<int>`` as a python type).
It is possible to override this behavior to force a definition to be either
module-local or global. To do so, you can pass the attributes
``py::module_local()`` (to make the binding module-local) or
``py::module_local(false)`` (to make the binding global) into the
``py::bind_vector`` or ``py::bind_map`` arguments:
.. code-block:: cpp
py::bind_vector<std::vector<int>>(m, "VectorInt", py::module_local(false));
Note, however, that such a global binding would make it impossible to load this
module at the same time as any other pybind module that also attempts to bind
the same container type (``std::vector<int>`` in the above example).
See :ref:`module_local` for more details on module-local bindings.
.. seealso::
The file :file:`tests/test_stl_binders.cpp` shows how to use the
convenience STL container wrappers.
| PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/prism/components/prism-nsis.min.js | Prism.languages.nsis={comment:{pattern:/(^|[^\\])(\/\*[\s\S]*?\*\/|[#;].*)/,lookbehind:!0},string:{pattern:/("|')(?:\\.|(?!\1)[^\\\r\n])*\1/,greedy:!0},keyword:{pattern:/(^\s*)(?:Abort|Add(?:BrandingImage|Size)|AdvSplash|Allow(?:RootDirInstall|SkipFiles)|AutoCloseWindow|Banner|BG(?:Font|Gradient|Image)|BrandingText|BringToFront|Call(?:InstDLL)?|Caption|ChangeUI|CheckBitmap|ClearErrors|CompletedText|ComponentText|CopyFiles|CRCCheck|Create(?:Directory|Font|ShortCut)|Delete(?:INISec|INIStr|RegKey|RegValue)?|Detail(?:Print|sButtonText)|Dialer|Dir(?:Text|Var|Verify)|EnableWindow|Enum(?:RegKey|RegValue)|Exch|Exec(?:Shell(?:Wait)?|Wait)?|ExpandEnvStrings|File(?:BufSize|Close|ErrorText|Open|Read|ReadByte|ReadUTF16LE|ReadWord|WriteUTF16LE|Seek|Write|WriteByte|WriteWord)?|Find(?:Close|First|Next|Window)|FlushINI|Get(?:CurInstType|CurrentAddress|DlgItem|DLLVersion(?:Local)?|ErrorLevel|FileTime(?:Local)?|FullPathName|Function(?:Address|End)?|InstDirError|LabelAddress|TempFileName)|Goto|HideWindow|Icon|If(?:Abort|Errors|FileExists|RebootFlag|Silent)|InitPluginsDir|Install(?:ButtonText|Colors|Dir(?:RegKey)?)|InstProgressFlags|Inst(?:Type(?:GetText|SetText)?)|Int(?:CmpU?|Fmt|Op)|IsWindow|Lang(?:DLL|String)|License(?:BkColor|Data|ForceSelection|LangString|Text)|LoadLanguageFile|LockWindow|Log(?:Set|Text)|Manifest(?:DPIAware|SupportedOS)|Math|MessageBox|MiscButtonText|Name|Nop|ns(?:Dialogs|Exec)|NSISdl|OutFile|Page(?:Callbacks)?|Pop|Push|Quit|Read(?:EnvStr|INIStr|RegDWORD|RegStr)|Reboot|RegDLL|Rename|RequestExecutionLevel|ReserveFile|Return|RMDir|SearchPath|Section(?:End|GetFlags|GetInstTypes|GetSize|GetText|Group|In|SetFlags|SetInstTypes|SetSize|SetText)?|SendMessage|Set(?:AutoClose|BrandingImage|Compress|Compressor(?:DictSize)?|CtlColors|CurInstType|DatablockOptimize|DateSave|Details(?:Print|View)|ErrorLevel|Errors|FileAttributes|Font|OutPath|Overwrite|RebootFlag|RegView|ShellVarContext|Silent)|Show(?:InstDetails|UninstDetails|Window)|Silent(?:Install|UnInstall)|Sleep|SpaceTexts|Splash|StartMenu|Str(?:CmpS?|Cpy|Len)|SubCaption|System|Unicode|Uninstall(?:ButtonText|Caption|Icon|SubCaption|Text)|UninstPage|UnRegDLL|UserInfo|Var|VI(?:AddVersionKey|FileVersion|ProductVersion)|VPatch|WindowIcon|Write(?:INIStr|Reg(?:Bin|DWORD|ExpandStr|MultiStr|None|Str)|Uninstaller)|XPStyle)\b/m,lookbehind:!0},property:/\b(?:admin|all|auto|both|colored|false|force|hide|highest|lastused|leave|listonly|none|normal|notset|off|on|open|print|show|silent|silentlog|smooth|textonly|true|user|ARCHIVE|FILE_(ATTRIBUTE_ARCHIVE|ATTRIBUTE_NORMAL|ATTRIBUTE_OFFLINE|ATTRIBUTE_READONLY|ATTRIBUTE_SYSTEM|ATTRIBUTE_TEMPORARY)|HK((CR|CU|LM)(32|64)?|DD|PD|U)|HKEY_(CLASSES_ROOT|CURRENT_CONFIG|CURRENT_USER|DYN_DATA|LOCAL_MACHINE|PERFORMANCE_DATA|USERS)|ID(ABORT|CANCEL|IGNORE|NO|OK|RETRY|YES)|MB_(ABORTRETRYIGNORE|DEFBUTTON1|DEFBUTTON2|DEFBUTTON3|DEFBUTTON4|ICONEXCLAMATION|ICONINFORMATION|ICONQUESTION|ICONSTOP|OK|OKCANCEL|RETRYCANCEL|RIGHT|RTLREADING|SETFOREGROUND|TOPMOST|USERICON|YESNO)|NORMAL|OFFLINE|READONLY|SHCTX|SHELL_CONTEXT|SYSTEM|TEMPORARY)\b/,constant:/\${[\w\.:\^-]+}|\$\([\w\.:\^-]+\)/i,variable:/\$\w+/i,number:/\b-?(?:0x[\dA-Fa-f]+|\d*\.?\d+(?:[Ee]-?\d+)?)\b/,operator:/--?|\+\+?|<=?|>=?|==?=?|&&?|\|\|?|[?*\/~^%]/,punctuation:/[{}[\];(),.:]/,important:{pattern:/(^\s*)!(?:addincludedir|addplugindir|appendfile|cd|define|delfile|echo|else|endif|error|execute|finalize|getdllversionsystem|ifdef|ifmacrodef|ifmacrondef|ifndef|if|include|insertmacro|macroend|macro|makensis|packhdr|pragma|searchparse|searchreplace|tempfile|undef|verbose|warning)\b/im,lookbehind:!0}}; | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/plugins/a11yhelp/dialogs/lang/hi.js | /*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang("a11yhelp","hi",{title:"Accessibility Instructions",contents:"Help Contents. To close this dialog press ESC.",legend:[{name:"सामान्य",items:[{name:"Editor Toolbar",legend:"Press ${toolbarFocus} to navigate to the toolbar. Move to the next and previous toolbar group with TAB and SHIFT-TAB. Move to the next and previous toolbar button with RIGHT ARROW or LEFT ARROW. Press SPACE or ENTER to activate the toolbar button."},{name:"Editor Dialog",legend:"Inside a dialog, press TAB to navigate to next dialog field, press SHIFT + TAB to move to previous field, press ENTER to submit dialog, press ESC to cancel dialog. For dialogs that have multiple tab pages, press ALT + F10 to navigate to tab-list. Then move to next tab with TAB OR RIGTH ARROW. Move to previous tab with SHIFT + TAB or LEFT ARROW. Press SPACE or ENTER to select the tab page."},
{name:"Editor Context Menu",legend:"Press ${contextMenu} or APPLICATION KEY to open context-menu. Then move to next menu option with TAB or DOWN ARROW. Move to previous option with SHIFT+TAB or UP ARROW. Press SPACE or ENTER to select the menu option. Open sub-menu of current option with SPACE or ENTER or RIGHT ARROW. Go back to parent menu item with ESC or LEFT ARROW. Close context menu with ESC."},{name:"Editor List Box",legend:"Inside a list-box, move to next list item with TAB OR DOWN ARROW. Move to previous list item with SHIFT + TAB or UP ARROW. Press SPACE or ENTER to select the list option. Press ESC to close the list-box."},
{name:"Editor Element Path Bar",legend:"Press ${elementsPathFocus} to navigate to the elements path bar. Move to next element button with TAB or RIGHT ARROW. Move to previous button with SHIFT+TAB or LEFT ARROW. Press SPACE or ENTER to select the element in editor."}]},{name:"Commands",items:[{name:" Undo command",legend:"Press ${undo}"},{name:" Redo command",legend:"Press ${redo}"},{name:" Bold command",legend:"Press ${bold}"},{name:" Italic command",legend:"Press ${italic}"},{name:" Underline command",
legend:"Press ${underline}"},{name:" Link command",legend:"Press ${link}"},{name:" Toolbar Collapse command",legend:"Press ${toolbarCollapse}"},{name:" Access previous focus space command",legend:"Press ${accessPreviousSpace} to access the closest unreachable focus space before the caret, for example: two adjacent HR elements. Repeat the key combination to reach distant focus spaces."},{name:" Access next focus space command",legend:"Press ${accessNextSpace} to access the closest unreachable focus space after the caret, for example: two adjacent HR elements. Repeat the key combination to reach distant focus spaces."},
{name:" Accessibility Help",legend:"Press ${a11yHelp}"}]}]}); | PypiClean |
/FoLiA-tools-2.5.4.tar.gz/FoLiA-tools-2.5.4/foliatools/xslt.py |
import lxml.etree
import sys
import glob
import getopt
import os.path
import io
def transform(xsltfilename, sourcefilename, targetfilename = None, encoding = 'utf-8', **kwargs):
xsldir = os.path.dirname(__file__)
if xsltfilename[0] != '/': xsltfilename = os.path.join(xsldir, xsltfilename)
if not os.path.exists(xsltfilename):
raise Exception("XSL Stylesheet not found: " + xsltfilename)
elif not os.path.exists(sourcefilename):
raise Exception("File not found: " + sourcefilename)
xslt = lxml.etree.parse(xsltfilename)
transformer = lxml.etree.XSLT(xslt)
parsedsource = lxml.etree.parse(sourcefilename)
kwargs = { k: lxml.etree.XSLT.strparam(v) for k,v in kwargs.items() }
transformed = transformer(parsedsource, **kwargs)
if targetfilename:
print("Wrote " + targetfilename,file=sys.stderr)
f = io.open(targetfilename, 'w',encoding='utf-8')
f.write(str(lxml.etree.tostring(transformed, pretty_print=False, encoding=encoding),encoding))
f.close()
else:
print(str(lxml.etree.tostring(transformed, pretty_print=False, encoding=encoding),encoding))
def usage():
print(settings.usage,file=sys.stderr)
print("",file=sys.stderr)
print("Parameters for output:" ,file=sys.stderr)
print(" -e [encoding] Output encoding (default: utf-8)" ,file=sys.stderr)
print("Parameters for processing directories:",file=sys.stderr)
print(" -r Process recursively",file=sys.stderr)
print(" -E [extension] Set extension (default: xml)",file=sys.stderr)
print(" -q Ignore errors",file=sys.stderr)
print(" -s [url] Associate a CSS Stylesheet (URL, may be relative)",file=sys.stderr)
print(" -T Retain tokenisation",file=sys.stderr)
print(" -t [textclass] Text class to output",file=sys.stderr)
class settings:
autooutput = False
extension = 'xml'
recurse = False
ignoreerrors = False
encoding = 'utf-8'
xsltfilename = "undefined.xsl"
outputextension = 'UNDEFINED'
usage = "UNDEFINED"
css = ""
textclass = "current"
def processdir(d):
print("Searching in " + d, file=sys.stderr)
for f in glob.glob(os.path.join(d,'*')):
if f[-len(settings.extension) - 1:] == '.' + settings.extension and f[-len(settings.outputextension) - 1:] != '.' + settings.outputextension:
process(f)
elif settings.recurse and os.path.isdir(f):
processdir(f)
def process(inputfilename):
try:
kwargs = {}
if settings.css:
kwargs['css'] = settings.css
if settings.textclass:
kwargs['textclass'] = settings.textclass
transform(settings.xsltfilename, inputfilename, None, settings.encoding, **kwargs)
except Exception as e:
if settings.ignoreerrors:
print("ERROR: An exception was raised whilst processing " + inputfilename + ":", e, file=sys.stderr)
else:
raise e
def main(xsltfilename, outputextension, usagetext):
try:
opts, args = getopt.getopt(sys.argv[1:], "o:E:hrqs:Tt:", ["help"])
except getopt.GetoptError as err:
print(str(err), file=sys.stderr)
usage()
sys.exit(2)
settings.xsltfilename = xsltfilename
settings.outputextension = outputextension
settings.usage = usagetext
for o, a in opts:
if o == '-h' or o == '--help':
usage()
sys.exit(0)
elif o == '-T':
settings.retaintokenisation = True
elif o == '-e':
settings.encoding = a
elif o == '-E':
settings.extension = a
elif o == '-r':
settings.recurse = True
elif o == '-q':
settings.ignoreerrors = True
elif o == '-s':
settings.css = a
elif o == '-t':
settings.textclass = a
else:
raise Exception("No such option: " + o)
if args:
for x in args:
if os.path.isdir(x):
processdir(x)
elif os.path.isfile(x):
process(x)
else:
print("ERROR: File or directory not found: " + x, file=sys.stderr)
sys.exit(3)
else:
print("ERROR: Nothing to do, specify one or more files or directories",file=sys.stderr) | PypiClean |
/KratosCompressiblePotentialFlowApplication-9.4-cp311-cp311-win_amd64.whl/KratosMultiphysics/CompressiblePotentialFlowApplication/compute_forces_on_nodes_process.py | import KratosMultiphysics
import KratosMultiphysics.CompressiblePotentialFlowApplication as CPFApp
def Factory(settings, Model):
if(not isinstance(settings, KratosMultiphysics.Parameters)):
raise Exception(
"expected input shall be a Parameters object, encapsulating a json string")
return ComputeForcesOnNodesProcess(Model, settings["Parameters"])
# all the processes python processes should be derived from "python_process"
class ComputeForcesOnNodesProcess(KratosMultiphysics.Process):
def __init__(self, Model, settings):
KratosMultiphysics.Process.__init__(self)
default_parameters = KratosMultiphysics.Parameters(r'''{
"model_part_name": "",
"create_output_file": false
}''')
settings.ValidateAndAssignDefaults(default_parameters)
self.body_model_part = Model[settings["model_part_name"].GetString()]
self.create_output_file = settings["create_output_file"].GetBool()
def ExecuteFinalizeSolutionStep(self):
self.Execute()
def Execute(self):
KratosMultiphysics.Logger.PrintInfo('ComputeForcesOnNodesProcess', 'Computing reactions on nodes')
KratosMultiphysics.VariableUtils().SetHistoricalVariableToZero(KratosMultiphysics.REACTION, self.body_model_part.Nodes)
free_stream_velocity = self.body_model_part.ProcessInfo.GetValue(CPFApp.FREE_STREAM_VELOCITY)
free_stream_density = self.body_model_part.ProcessInfo.GetValue(CPFApp.FREE_STREAM_DENSITY)
free_stream_velocity_norm = free_stream_velocity.norm_2()
dynamic_pressure = 0.5*free_stream_density*free_stream_velocity_norm**2
for cond in self.body_model_part.Conditions:
condition_normal = cond.GetGeometry().Normal()
pressure_coefficient = cond.GetValue(KratosMultiphysics.PRESSURE_COEFFICIENT)
for node in cond.GetNodes():
added_force = condition_normal*(pressure_coefficient/2.0)*dynamic_pressure
nodal_force = node.GetSolutionStepValue(KratosMultiphysics.REACTION) + added_force
node.SetSolutionStepValue(KratosMultiphysics.REACTION, nodal_force)
total_force = KratosMultiphysics.VariableUtils().SumHistoricalNodeVectorVariable(KratosMultiphysics.REACTION, self.body_model_part, 0)
KratosMultiphysics.Logger.PrintInfo('ComputeForcesOnNodesProcess','Lift Force = ', total_force[1])
KratosMultiphysics.Logger.PrintInfo('ComputeForcesOnNodesProcess','Drag Force = ', total_force[0])
KratosMultiphysics.Logger.PrintInfo('ComputeForcesOnNodesProcess','Side Force = ', total_force[2])
if self.create_output_file:
with open("cl_points_with_lift.dat", 'w') as cl_file:
cl_file.write('{0:15.12f}'.format(total_force[1]/dynamic_pressure)) | PypiClean |
/Flask-State-1.1.4.tar.gz/Flask-State-1.1.4/src/flask_state/migrations/versions/637920a840f7_.py | import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
from flask_state.conf.config import Config
# revision identifiers, used by Alembic.
revision = "637920a840f7"
down_revision = None
branch_labels = None
depends_on = None
def upgrade(engine_name):
if engine_name != Config.DEFAULT_BIND_SQLITE:
return
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
if engine_name != Config.DEFAULT_BIND_SQLITE:
return
globals()["downgrade_%s" % engine_name]()
def upgrade_flask_state_sqlite():
op.add_column(
"flask_state_host",
sa.Column(
"cpus", sa.String(length=128), nullable=True, server_default="[]"
),
)
def downgrade_flask_state_sqlite():
op.create_table(
"flask_state_host_dg_tmp",
sa.Column(
"id",
mysql.INTEGER(unsigned=True),
autoincrement=True,
nullable=False,
),
sa.Column(
"create_time",
mysql.DATETIME(),
server_default=sa.text("(CURRENT_TIMESTAMP)"),
nullable=True,
),
sa.Column(
"update_time",
mysql.DATETIME(),
server_default=sa.text("(CURRENT_TIMESTAMP)"),
nullable=True,
),
sa.Column(
"cpu",
mysql.FLOAT(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.Column(
"memory",
mysql.FLOAT(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.Column(
"load_avg", sa.String(length=32), server_default="", nullable=True
),
sa.Column(
"disk_usage",
mysql.FLOAT(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.Column(
"boot_seconds",
mysql.INTEGER(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.Column(
"ts",
mysql.BIGINT(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.Column(
"used_memory",
mysql.INTEGER(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.Column(
"used_memory_rss",
mysql.INTEGER(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.Column(
"connected_clients",
mysql.SMALLINT(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.Column(
"uptime_in_seconds",
mysql.INTEGER(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.Column(
"mem_fragmentation_ratio",
mysql.FLOAT(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.Column(
"keyspace_hits",
mysql.INTEGER(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.Column(
"keyspace_misses",
mysql.INTEGER(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.Column(
"hits_ratio",
mysql.FLOAT(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.Column(
"delta_hits_ratio",
mysql.FLOAT(unsigned=True),
server_default=sa.text("0"),
nullable=True,
),
sa.PrimaryKeyConstraint("id"),
)
op.execute(
"insert into flask_state_host_dg_tmp(id, create_time, update_time, cpu, memory, load_avg, disk_usage, boot_seconds, ts, used_memory, used_memory_rss, connected_clients, uptime_in_seconds, mem_fragmentation_ratio, keyspace_hits, keyspace_misses, hits_ratio, delta_hits_ratio) select id, create_time, update_time, cpu, memory, load_avg, disk_usage, boot_seconds, ts, used_memory, used_memory_rss, connected_clients, uptime_in_seconds, mem_fragmentation_ratio, keyspace_hits, keyspace_misses, hits_ratio, delta_hits_ratio from flask_state_host;"
)
op.drop_table("flask_state_host")
op.rename_table("flask_state_host_dg_tmp", "flask_state_host")
op.create_index(
"idx_host_ts", "flask_state_host", [sa.text("ts DESC")], unique=False
) | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/HTML-CSS/fonts/STIX/General/Bold/Latin1Supplement.js | MathJax.Hub.Insert(MathJax.OutputJax["HTML-CSS"].FONTDATA.FONTS["STIXGeneral-bold"],{160:[0,0,250,0,0],161:[501,203,333,82,252],162:[588,140,500,53,458],163:[684,16,500,21,477],164:[542,10,500,-26,526],165:[676,0,500,-64,547],166:[691,19,220,66,154],167:[691,132,500,57,443],168:[666,-537,333,-2,337],169:[691,19,747,26,721],170:[688,-397,300,-1,301],171:[415,-36,500,23,473],172:[399,-108,750,65,685],173:[287,-171,333,44,287],174:[691,19,747,26,721],175:[637,-565,333,1,331],176:[688,-402,400,57,343],177:[518,151,770,65,685],178:[688,-275,300,0,300],179:[688,-268,300,3,297],180:[713,-528,333,86,324],181:[461,206,556,33,536],182:[676,186,639,60,579],183:[417,-248,250,41,210],184:[0,218,333,68,294],185:[688,-275,300,28,273],186:[688,-397,330,18,312],187:[415,-36,500,27,477],188:[688,12,750,28,743],189:[688,12,750,-7,775],190:[688,12,750,23,733],191:[501,201,500,55,443],192:[963,0,722,9,689],193:[963,0,722,9,689],194:[954,0,722,9,689],195:[924,0,722,9,689],196:[916,0,722,9,689],197:[1000,0,722,9,689],198:[676,0,1000,4,951],199:[691,218,722,49,687],200:[963,0,667,16,641],201:[963,0,667,16,641],202:[954,0,667,16,641],203:[916,0,667,16,641],204:[963,0,389,20,370],205:[963,0,389,20,370],206:[954,0,389,20,370],207:[916,0,389,20,370],208:[676,0,722,6,690],209:[924,18,722,16,701],210:[963,19,778,35,743],211:[963,19,778,35,743],212:[954,19,778,35,743],213:[924,19,778,35,743],214:[916,19,778,35,743],215:[538,33,702,66,636],216:[737,74,778,35,743],217:[963,19,722,16,701],218:[963,19,722,16,701],219:[954,19,722,16,701],220:[916,19,722,16,701],221:[963,0,722,15,699],222:[676,0,611,16,600],223:[691,12,556,19,517],224:[713,14,500,25,488],225:[713,14,500,25,488],226:[704,14,500,25,488],227:[674,14,500,25,488],228:[666,14,500,25,488],229:[752,14,500,25,488],230:[473,14,722,33,694],231:[473,218,444,25,430],232:[713,14,444,25,427],233:[713,14,444,25,427],234:[704,14,444,25,427],235:[666,14,444,25,427],236:[713,0,278,14,257],237:[713,0,278,15,258],238:[704,0,278,-29,308],239:[666,0,278,-29,310],240:[691,14,500,25,476],241:[674,0,556,21,539],242:[713,14,500,25,476],243:[713,14,500,25,476],244:[704,14,500,25,476],245:[674,14,500,25,476],246:[666,14,500,25,476],247:[537,31,570,33,537],248:[549,92,500,25,476],249:[713,14,556,16,538],250:[713,14,556,16,538],251:[704,14,556,16,538],252:[666,14,556,16,538],253:[713,205,500,16,482],254:[676,205,556,19,524],255:[666,205,500,16,482]});MathJax.Ajax.loadComplete(MathJax.OutputJax["HTML-CSS"].fontDir+"/General/Bold/Latin1Supplement.js"); | PypiClean |
/BEATAALU-0.13.1.tar.gz/BEATAALU-0.13.1/econml/iv/nnet/_deepiv.py | import numpy as np
from ..._cate_estimator import BaseCateEstimator
from ...utilities import check_input_arrays, _deprecate_positional, deprecated, MissingModule
try:
import keras
from keras import backend as K
import keras.layers as L
from keras.models import Model
except ImportError as exn:
keras = K = L = Model = MissingModule("keras and tensorflow are no longer dependencies of the main econml "
"package; install econml[tf] or econml[all] to require them, or install "
"them separately, to use DeepIV", exn)
# TODO: make sure to use random seeds wherever necessary
# TODO: make sure that the public API consistently uses "T" instead of "P" for the treatment
# unfortunately with the Theano and Tensorflow backends,
# the straightforward use of K.stop_gradient can cause an error
# because the parameters of the intermediate layers are now disconnected from the loss;
# therefore we add a pointless multiplication by 0 to the values in each of the variables in vs
# so that those layers remain connected but with 0 gradient
def _zero_grad(e, vs):
if K.backend() == 'cntk':
return K.stop_gradient(e)
else:
z = 0 * K.sum(K.concatenate([K.batch_flatten(v) for v in vs]))
return K.stop_gradient(e) + z
def mog_model(n_components, d_x, d_t):
"""
Create a mixture of Gaussians model with the specified number of components.
Parameters
----------
n_components : int
The number of components in the mixture model
d_x : int
The number of dimensions in the layer used as input
d_t : int
The number of dimensions in the output
Returns
-------
A Keras model that takes an input of dimension `d_t` and generates three outputs: pi, mu, and sigma
"""
x = L.Input((d_x,))
pi = L.Dense(n_components, activation='softmax')(x)
mu = L.Reshape((n_components, d_t))(L.Dense(n_components * d_t)(x))
log_sig = L.Dense(n_components)(x)
sig = L.Lambda(K.exp)(log_sig)
return Model([x], [pi, mu, sig])
def mog_loss_model(n_components, d_t):
"""
Create a Keras model that computes the loss of a mixture of Gaussians model on data.
Parameters
----------
n_components : int
The number of components in the mixture model
d_t : int
The number of dimensions in the output
Returns
-------
A Keras model that takes as inputs pi, mu, sigma, and t and generates a single output containing the loss.
"""
pi = L.Input((n_components,))
mu = L.Input((n_components, d_t))
sig = L.Input((n_components,))
t = L.Input((d_t,))
# || t - mu_i || ^2
d2 = L.Lambda(lambda d: K.sum(K.square(d), axis=-1),
output_shape=(n_components,))(
L.Subtract()([L.RepeatVector(n_components)(t), mu])
)
# LL = C - log(sum(pi_i/sig^d * exp(-d2/(2*sig^2))))
# Use logsumexp for numeric stability:
# LL = C - log(sum(exp(-d2/(2*sig^2) + log(pi_i/sig^d))))
# TODO: does the numeric stability actually make any difference?
def make_logloss(d2, sig, pi):
return -K.logsumexp(-d2 / (2 * K.square(sig)) + K.log(pi / K.pow(sig, d_t)), axis=-1)
ll = L.Lambda(lambda dsp: make_logloss(*dsp), output_shape=(1,))([d2, sig, pi])
m = Model([pi, mu, sig, t], [ll])
return m
def mog_sample_model(n_components, d_t):
"""
Create a model that generates samples from a mixture of Gaussians.
Parameters
----------
n_components : int
The number of components in the mixture model
d_t : int
The number of dimensions in the output
Returns
-------
A Keras model that takes as inputs pi, mu, and sigma, and generates a single output containing a sample.
"""
pi = L.Input((n_components,))
mu = L.Input((n_components, d_t))
sig = L.Input((n_components,))
# CNTK backend can't randomize across batches and doesn't implement cumsum (at least as of June 2018,
# see Known Issues on https://docs.microsoft.com/en-us/cognitive-toolkit/Using-CNTK-with-Keras)
def sample(pi, mu, sig):
batch_size = K.shape(pi)[0]
if K.backend() == 'cntk':
# generate cumulative sum via matrix multiplication
cumsum = K.dot(pi, K.constant(np.triu(np.ones((n_components, n_components)))))
else:
cumsum = K.cumsum(pi, 1)
cumsum_shift = K.concatenate([K.zeros_like(cumsum[:, 0:1]), cumsum])[:, :-1]
if K.backend() == 'cntk':
import cntk as C
# Generate standard uniform values in shape (batch_size,1)
# (since we can't use the dynamic batch_size with random.uniform in CNTK,
# we use uniform_like instead with an input of an appropriate shape)
rndSmp = C.random.uniform_like(pi[:, 0:1])
else:
rndSmp = K.random_uniform((batch_size, 1))
cmp1 = K.less_equal(cumsum_shift, rndSmp)
cmp2 = K.less(rndSmp, cumsum)
# convert to floats and multiply to perform equivalent of logical AND
rndIndex = K.cast(cmp1, K.floatx()) * K.cast(cmp2, K.floatx())
if K.backend() == 'cntk':
# Generate standard normal values in shape (batch_size,1,d_t)
# (since we can't use the dynamic batch_size with random.normal in CNTK,
# we use normal_like instead with an input of an appropriate shape)
rndNorms = C.random.normal_like(mu[:, 0:1, :]) # K.random_normal((1,d_t))
else:
rndNorms = K.random_normal((batch_size, 1, d_t))
rndVec = mu + K.expand_dims(sig) * rndNorms
# exactly one entry should be nonzero for each b,d combination; use sum to select it
return K.sum(K.expand_dims(rndIndex) * rndVec, 1)
# prevent gradient from passing through sampling
samp = L.Lambda(lambda pms: _zero_grad(sample(*pms), pms), output_shape=(d_t,))
samp.trainable = False
return Model([pi, mu, sig], samp([pi, mu, sig]))
# three options: biased or upper-bound loss require a single number of samples;
# unbiased can take different numbers for the network and its gradient
def response_loss_model(h, p, d_z, d_x, d_y, samples=1, use_upper_bound=False, gradient_samples=0):
"""
Create a Keras model that computes the loss of a response model on data.
Parameters
----------
h : (tensor, tensor) -> Layer
Method for building a model of y given p and x
p : (tensor, tensor) -> Layer
Method for building a model of p given z and x
d_z : int
The number of dimensions in z
d_x : int
Tbe number of dimensions in x
d_y : int
The number of dimensions in y
samples: int
The number of samples to use
use_upper_bound : bool
Whether to use an upper bound to the true loss
(equivalent to adding a regularization penalty on the variance of h)
gradient_samples : int
The number of separate additional samples to use when calculating the gradient.
This can only be nonzero if user_upper_bound is False, in which case the gradient of
the returned loss will be an unbiased estimate of the gradient of the true loss.
Returns
-------
A Keras model that takes as inputs z, x, and y and generates a single output containing the loss.
"""
assert not (use_upper_bound and gradient_samples)
# sample: (() -> Layer, int) -> Layer
def sample(f, n):
assert n > 0
if n == 1:
return f()
else:
return L.average([f() for _ in range(n)])
z, x, y = [L.Input((d,)) for d in [d_z, d_x, d_y]]
if gradient_samples:
# we want to separately sample the gradient; we use stop_gradient to treat the sampled model as constant
# the overall computation ensures that we have an interpretable loss (y-h̅(p,x))²,
# but also that the gradient is -2(y-h̅(p,x))∇h̅(p,x) with *different* samples used for each average
diff = L.subtract([y, sample(lambda: h(p(z, x), x), samples)])
grad = sample(lambda: h(p(z, x), x), gradient_samples)
def make_expr(grad, diff):
return K.stop_gradient(diff) * (K.stop_gradient(diff + 2 * grad) - 2 * grad)
expr = L.Lambda(lambda args: make_expr(*args))([grad, diff])
elif use_upper_bound:
expr = sample(lambda: L.Lambda(K.square)(L.subtract([y, h(p(z, x), x)])), samples)
else:
expr = L.Lambda(K.square)(L.subtract([y, sample(lambda: h(p(z, x), x), samples)]))
return Model([z, x, y], [expr])
class DeepIV(BaseCateEstimator):
"""
The Deep IV Estimator (see http://proceedings.mlr.press/v70/hartford17a/hartford17a.pdf).
Parameters
----------
n_components : int
Number of components in the mixture density network
m : (tensor, tensor) -> Layer
Method for building a Keras model that featurizes the z and x inputs
h : (tensor, tensor) -> Layer
Method for building a model of y given t and x
n_samples : int
The number of samples to use
use_upper_bound_loss : bool, optional
Whether to use an upper bound to the true loss
(equivalent to adding a regularization penalty on the variance of h).
Defaults to False.
n_gradient_samples : int, optional
The number of separate additional samples to use when calculating the gradient.
This can only be nonzero if user_upper_bound is False, in which case the gradient of
the returned loss will be an unbiased estimate of the gradient of the true loss.
Defaults to 0.
optimizer : string, optional
The optimizer to use. Defaults to "adam"
first_stage_options : dictionary, optional
The keyword arguments to pass to Keras's `fit` method when training the first stage model.
Defaults to `{"epochs": 100}`.
second_stage_options : dictionary, optional
The keyword arguments to pass to Keras's `fit` method when training the second stage model.
Defaults to `{"epochs": 100}`.
"""
def __init__(self, *,
n_components,
m,
h,
n_samples, use_upper_bound_loss=False, n_gradient_samples=0,
optimizer='adam',
first_stage_options={"epochs": 100},
second_stage_options={"epochs": 100}):
self._n_components = n_components
self._m = m
self._h = h
self._n_samples = n_samples
self._use_upper_bound_loss = use_upper_bound_loss
self._n_gradient_samples = n_gradient_samples
self._optimizer = optimizer
self._first_stage_options = first_stage_options
self._second_stage_options = second_stage_options
super().__init__()
@BaseCateEstimator._wrap_fit
def fit(self, Y, T, *, X, Z, inference=None):
"""Estimate the counterfactual model from data.
That is, estimate functions τ(·, ·, ·), ∂τ(·, ·).
Parameters
----------
Y: (n × d_y) matrix or vector of length n
Outcomes for each sample
T: (n × dₜ) matrix or vector of length n
Treatments for each sample
X: (n × dₓ) matrix
Features for each sample
Z: (n × d_z) matrix
Instruments for each sample
inference: string, :class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of :class:`.BootstrapInference`)
Returns
-------
self
"""
Y, T, X, Z = check_input_arrays(Y, T, X, Z)
assert 1 <= np.ndim(X) <= 2
assert 1 <= np.ndim(Z) <= 2
assert 1 <= np.ndim(T) <= 2
assert 1 <= np.ndim(Y) <= 2
assert np.shape(X)[0] == np.shape(Y)[0] == np.shape(T)[0] == np.shape(Z)[0]
# in case vectors were passed for Y or T, keep track of trailing dims for reshaping effect output
d_x, d_y, d_z, d_t = [np.shape(a)[1] if np.ndim(a) > 1 else 1 for a in [X, Y, Z, T]]
x_in, y_in, z_in, t_in = [L.Input((d,)) for d in [d_x, d_y, d_z, d_t]]
n_components = self._n_components
treatment_network = self._m(z_in, x_in)
# the dimensionality of the output of the network
# TODO: is there a more robust way to do this?
d_n = K.int_shape(treatment_network)[-1]
pi, mu, sig = mog_model(n_components, d_n, d_t)([treatment_network])
ll = mog_loss_model(n_components, d_t)([pi, mu, sig, t_in])
model = Model([z_in, x_in, t_in], [ll])
model.add_loss(L.Lambda(K.mean)(ll))
model.compile(self._optimizer)
# TODO: do we need to give the user more control over other arguments to fit?
model.fit([Z, X, T], [], **self._first_stage_options)
lm = response_loss_model(lambda t, x: self._h(t, x),
lambda z, x: Model([z_in, x_in],
# subtle point: we need to build a new model each time,
# because each model encapsulates its randomness
[mog_sample_model(n_components, d_t)([pi, mu, sig])])([z, x]),
d_z, d_x, d_y,
self._n_samples, self._use_upper_bound_loss, self._n_gradient_samples)
rl = lm([z_in, x_in, y_in])
response_model = Model([z_in, x_in, y_in], [rl])
response_model.add_loss(L.Lambda(K.mean)(rl))
response_model.compile(self._optimizer)
# TODO: do we need to give the user more control over other arguments to fit?
response_model.fit([Z, X, Y], [], **self._second_stage_options)
self._effect_model = Model([t_in, x_in], [self._h(t_in, x_in)])
# TODO: it seems like we need to sum over the batch because we can only apply gradient to a scalar,
# not a general tensor (because of how backprop works in every framework)
# (alternatively, we could iterate through the batch in addition to iterating through the output,
# but this seems annoying...)
# Therefore, it's important that we use a batch size of 1 when we call predict with this model
def calc_grad(t, x):
h = self._h(t, x)
all_grads = K.concatenate([g
for i in range(d_y)
for g in K.gradients(K.sum(h[:, i]), [t])])
return K.reshape(all_grads, (-1, d_y, d_t))
self._marginal_effect_model = Model([t_in, x_in], L.Lambda(lambda tx: calc_grad(*tx))([t_in, x_in]))
def effect(self, X=None, T0=0, T1=1):
"""
Calculate the heterogeneous treatment effect τ(·,·,·).
The effect is calculated between the two treatment points
conditional on a vector of features on a set of m test samples {T0ᵢ, T1ᵢ, Xᵢ}.
Parameters
----------
T0: (m × dₜ) matrix
Base treatments for each sample
T1: (m × dₜ) matrix
Target treatments for each sample
X: optional (m × dₓ) matrix
Features for each sample
Returns
-------
τ: (m × d_y) matrix
Heterogeneous treatment effects on each outcome for each sample
Note that when Y is a vector rather than a 2-dimensional array, the corresponding
singleton dimension will be collapsed (so this method will return a vector)
"""
X, T0, T1 = check_input_arrays(X, T0, T1)
if np.ndim(T0) == 0:
T0 = np.repeat(T0, 1 if X is None else np.shape(X)[0])
if np.ndim(T1) == 0:
T1 = np.repeat(T1, 1 if X is None else np.shape(X)[0])
if X is None:
X = np.empty((np.shape(T0)[0], 0))
return (self._effect_model.predict([T1, X]) - self._effect_model.predict([T0, X])).reshape((-1,) + self._d_y)
def marginal_effect(self, T, X=None):
"""
Calculate the marginal effect ∂τ(·, ·) around a base treatment point conditional on features.
Parameters
----------
T: (m × dₜ) matrix
Base treatments for each sample
X: optional(m × dₓ) matrix
Features for each sample
Returns
-------
grad_tau: (m × d_y × dₜ) array
Heterogeneous marginal effects on each outcome for each sample
Note that when Y or T is a vector rather than a 2-dimensional array,
the corresponding singleton dimensions in the output will be collapsed
(e.g. if both are vectors, then the output of this method will also be a vector)
"""
T, X = check_input_arrays(T, X)
# TODO: any way to get this to work on batches of arbitrary size?
return self._marginal_effect_model.predict([T, X], batch_size=1).reshape((-1,) + self._d_y + self._d_t)
def predict(self, T, X):
"""Predict outcomes given treatment assignments and features.
Parameters
----------
T: (m × dₜ) matrix
Base treatments for each sample
X: (m × dₓ) matrix
Features for each sample
Returns
-------
Y: (m × d_y) matrix
Outcomes for each sample
Note that when Y is a vector rather than a 2-dimensional array, the corresponding
singleton dimension will be collapsed (so this method will return a vector)
"""
T, X = check_input_arrays(T, X)
return self._effect_model.predict([T, X]).reshape((-1,) + self._d_y) | PypiClean |
/netket-3.9.2.tar.gz/netket-3.9.2/netket/utils/moduletools.py |
import sys
def _hide_submodules(
module_name, *, remove_self=True, ignore=tuple(), hide_folder=tuple()
):
"""
Hide all submodules created by files (not folders) in module_name defined
at module_path.
If remove_self=True, also removes itself from the module.
"""
import os
module = sys.modules[module_name]
module_path = module.__path__[0]
for file in os.listdir(module_path):
if file.endswith(".py") and not file == "__init__.py":
mod_name = file[:-3]
elif file in hide_folder:
mod_name = file
else:
mod_name = None
if mod_name is not None:
if (
hasattr(module, mod_name)
and mod_name[0] != "_"
and mod_name not in ignore
):
new_name = "_" + mod_name
setattr(module, new_name, getattr(module, mod_name))
delattr(module, mod_name)
if remove_self and hasattr(module, "_hide_submodules"):
delattr(module, "_hide_submodules")
auto_export(module)
def rename_class(new_name):
"""
Decorator to renames a class
"""
def decorator(clz):
clz.__name__ = new_name
clz.__qualname__ = new_name
return clz
return decorator
def export(fn):
"""
Add the function `fn` to the list of exported attributes of this
module, `__all__`.
Args:
fn: the function or class to export.
"""
mod = sys.modules[fn.__module__]
if hasattr(mod, "__all__"):
mod.__all__.append(fn.__name__)
else:
mod.__all__ = [fn.__name__]
return fn
def auto_export(module):
"""
Automatically construct __all__ with all modules desired.
This is necessary to have correct paths in the documentation.
Args:
module: a module or module name
"""
if isinstance(module, str):
module = sys.modules[module]
elements = dir(module)
if not hasattr(module, "__all__"):
setattr(module, "__all__", [])
_all = module.__all__
for el in elements:
if el.startswith("_"):
continue
if el not in _all:
_all.append(el)
def hide_unexported(module_name):
"""
Overloads the `__dir__` function of the given module in order to
only show on autocompletion the attributes inside of `__all__`.
You can add to `__all__` by using the decorator :ref:`@export`.
Args:
module_name: the name of the module to process.
"""
module = sys.modules[module_name]
def __dir__():
return module.__all__
setattr(module, "__dir__", __dir__) | PypiClean |
/BlueWhale3_Network-1.4.2-cp38-cp38-macosx_10_9_x86_64.whl/orangecontrib/network/network/base.py | from functools import reduce, wraps, partial
from typing import Sequence
import numpy as np
import scipy.sparse as sp
from numpy.lib.stride_tricks import as_strided
class Edges:
directed = None
def __init__(self,
edges, # row=from, column=to
edge_data: Sequence = None,
name: str = ""):
self.edges = edges.tocsr(copy=True)
self.edges.sum_duplicates()
self.edge_data = edge_data
self.name = name
def out_degrees(self, *, weighted=False) -> np.ndarray:
pass
def in_degrees(self, *, weighted=False) -> np.ndarray:
pass
def degrees(self, *, weighted=False) -> np.ndarray:
pass
def out_degree(self, node, *, weighted=False) -> float:
pass
def in_degree(self, node, *, weighted=False) -> float:
pass
def degree(self, node, *, weighted=False) -> float:
pass
def outgoing(self, node, weights=False) -> np.ndarray:
pass
def ingoing(self, node, weights=False) -> np.ndarray:
pass
def neighbours(self, node, edge_type=None, weights=False) -> np.ndarray:
pass
@staticmethod
def _compose_neighbours(node: int, matrix: sp.csr_matrix, weights: bool):
fr, to = matrix.indptr[node], matrix.indptr[node + 1]
if not weights:
return matrix.indices[fr:to]
else:
return np.vstack(np.atleast_2d(matrix.indices[fr:to]),
np.atleast_2d(matrix.data[fr:to]))
@staticmethod
def _compute_degrees(edges, weighted):
if weighted:
return edges.sum(axis=1).getA1()
else:
return edges.indptr[1:] - edges.indptr[:-1]
@staticmethod
def _compute_degree(edges, node, weighted):
fr, to = edges.indptr[node], edges.indptr[node + 1]
if weighted:
return edges.data[fr:to].sum()
else:
return to - fr
def subset(self, mask, node_renumeration, shape):
edges = self.edges.tocoo()
edge_mask = np.logical_and(mask[edges.row], mask[edges.col])
row = node_renumeration[edges.row[edge_mask]]
col = node_renumeration[edges.col[edge_mask]]
data = edges.data[edge_mask]
edge_data = self.edge_data[edge_mask] if self.edge_data is not None \
else None
return type(self)(
sp.csr_matrix(
(data, (row, col)), shape=shape), edge_data, self.name)
class DirectedEdges(Edges):
directed = True
def __init__(self,
edges: sp.csr_matrix, # row=from, column=to
edge_data: Sequence = None,
name: str = ""):
super().__init__(edges, edge_data, name)
self.in_edges = self.edges.transpose()
def out_degrees(self, *, weighted=False):
return self._compute_degrees(self.edges, weighted)
def in_degrees(self, *, weighted=False):
return self._compute_degrees(self.in_edges, weighted)
def degrees(self, *, weighted=False):
return self._compute_degrees(self.edges, weighted) \
+ self._compute_degrees(self.in_edges, weighted)
def out_degree(self, node, *, weighted=False):
return self._compute_degree(self.edges, node, weighted)
def in_degree(self, node, *, weighted=False):
return self._compute_degree(self.in_edges, node, weighted)
def degree(self, node, *, weighted=False):
return self._compute_degree(self.in_edges, node, weighted) \
+ self._compute_degree(self.out_Edgesnode, weighted)
def outgoing(self, node, weights=False):
return self._compose_neighbours(node, self.edges, weights)
def incoming(self, node, weights=False):
return self._compose_neighbours(node, self.in_edges, weights)
def neighbours(self, node, edge_type=None, weights=False):
return np.hstack(
(self._compose_neighbours(node, self.edges, weights),
self._compose_neighbours(node, self.in_edges, weights)))
class UndirectedEdges(Edges):
directed = False
def __init__(self,
edges: sp.csr_matrix,
edge_data: Sequence = None,
name: str = ""):
super().__init__(edges, edge_data, name)
self.twoway_edges = self._make_twoway_edges()
def _make_twoway_edges(self):
edges = self.edges.copy()
n_edges = len(edges.data)
zero_strided = self.edges.data.strides == (0, )
if not n_edges:
return edges
# Replaces 0s with max + 1 so sparse operations don't remove them
if zero_strided:
max_weight = edges.data[0]
# Save (temporary) memory and CPU time
edges.data = as_strided(1, (n_edges, ), (0,))
else:
max_weight = np.max(edges.data)
edges.data[edges.data == 0] = max_weight + 1
twe = edges + edges.transpose()
twe.sum_duplicates()
if zero_strided:
# Save memory
twe.data = as_strided(max_weight, (n_edges, ), (0,))
else:
twe.data[twe.data > max_weight] = 0
return twe
def degrees(self, *, weighted=False):
return self._compute_degrees(self.twoway_edges, weighted)
def degree(self, node, *, weighted=False):
return self._compute_degree(self.twoway_edges, node, weighted)
def neighbours(self, node, weights=False):
return self._compose_neighbours(node, self.twoway_edges, weights)
in_degrees = out_degrees = degrees
in_degree = out_degree = degree
incoming = outgoing = neighbours
EdgeType = [UndirectedEdges, DirectedEdges]
def aggregate_over_edge_types(aggregate, arg_no=0):
def wrapwrap(f):
@wraps(f)
def wrapper(graph, *args, **kwargs):
if len(args) <= arg_no or args[arg_no] is None:
return aggregate([
f(graph,
*args[:arg_no], edge_type, *args[arg_no + 1:],
**kwargs)
for edge_type in range(len(graph.edges))])
else:
return f(graph, *args, **kwargs)
return wrapper
return wrapwrap
sum_over_edge_types = \
partial(aggregate_over_edge_types, lambda x: reduce(np.add, x))
concatenate_over_edge_types = partial(aggregate_over_edge_types, np.hstack)
class Network:
def __init__(self, nodes: Sequence, edges: Sequence, name: str = "",
coordinates: np.ndarray = None):
"""
Attributes:
nodes (Sequence): data about nodes; it can also be just range(n)
edges (List[Edges]): one or more set of edges
name (str): network name
Args:
nodes (Sequence): data about nodes; it can also be just range(n)
edges (sp.spmatrix or Edges or List[Edges]):
one or more set of edges
name (str): network name
"""
def as_edges(edges):
if isinstance(edges, Edges):
return edges
if sp.issparse(edges):
return UndirectedEdges(edges)
raise ValueError(
"edges must be an instance of 'Edges' or a sparse matrix,"
f"not '{type(edges).__name__}")
self.nodes = nodes
if isinstance(edges, Sequence):
self.edges = [as_edges(e) for e in edges]
else:
self.edges = [as_edges(edges)]
self.name = name
self.coordinates = coordinates
def copy(self):
"""Constructs a shallow copy of the network"""
return type(self)(self.nodes, self.edges, self.name, self.coordinates)
def number_of_nodes(self):
return len(self.nodes)
@sum_over_edge_types()
def number_of_edges(self, edge_type=None):
return len(self.edges[edge_type].edges.indices)
def links(self, attr, edge_type=0, matrix_type=sp.coo_matrix):
edges = self.edges[edge_type]
return matrix_type(edges.edges), edges.edge_data.get_column_view(attr)
@sum_over_edge_types()
def out_degrees(self, edge_type, *, weighted=False):
return self.edges[edge_type].out_degrees(weighted=weighted)
@sum_over_edge_types()
def in_degrees(self, edge_type=None, *, weighted=False):
return self.edges[edge_type].in_degrees(weighted=weighted)
@sum_over_edge_types()
def degrees(self, edge_type=None, *, weighted=False):
return self.edges[edge_type].degrees(weighted=weighted)
@sum_over_edge_types(1)
def out_degree(self, node, edge_type=None, *, weighted=False):
return self.edges[edge_type].out_degree(node, weighted=weighted)
@sum_over_edge_types(1)
def in_degree(self, node, edge_type=None, *, weighted=False):
return self.edges[edge_type].in_degree(node, weighted=weighted)
@sum_over_edge_types(1)
def degree(self, node, edge_type=None, *, weighted=False):
return self.edges[edge_type].degree(node, weighted=weighted)
@concatenate_over_edge_types(1)
def outgoing(self, node, edge_type=None, weights=False):
return self.edges[edge_type].outgoing(node, weights)
@concatenate_over_edge_types(1)
def incoming(self, node, edge_type=None, weights=False):
return self.edges[edge_type].incoming(node, weights)
@concatenate_over_edge_types(1)
def neighbours(self, node, edge_type=None, weights=False):
return self.edges[edge_type].neighbours(node, weights)
def subgraph(self, mask):
nodes = self.nodes[mask]
if self.coordinates is not None:
coordinates = self.coordinates[mask]
else:
coordinates = None
if mask.dtype != bool:
mask1 = np.full((self.number_of_nodes(),), False)
mask1[mask] = True
mask = mask1
node_renumeration = np.cumsum(mask) - 1
shape = (len(nodes), len(nodes))
edge_sets = [edges.subset(mask, node_renumeration, shape)
for edges in self.edges]
return Network(nodes, edge_sets, self.name, coordinates) | PypiClean |
/MIAvisual-0.0.6-py3-none-any.whl/matplotlib/docstring.py | import inspect
class Substitution:
"""
A decorator that performs %-substitution on an object's docstring.
This decorator should be robust even if ``obj.__doc__`` is None (for
example, if -OO was passed to the interpreter).
Usage: construct a docstring.Substitution with a sequence or dictionary
suitable for performing substitution; then decorate a suitable function
with the constructed object, e.g.::
sub_author_name = Substitution(author='Jason')
@sub_author_name
def some_function(x):
"%(author)s wrote this function"
# note that some_function.__doc__ is now "Jason wrote this function"
One can also use positional arguments::
sub_first_last_names = Substitution('Edgar Allen', 'Poe')
@sub_first_last_names
def some_function(x):
"%s %s wrote the Raven"
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise TypeError("Only positional or keyword args are allowed")
self.params = params = args or kwargs
def __call__(self, func):
if func.__doc__:
func.__doc__ = inspect.cleandoc(func.__doc__) % self.params
return func
def update(self, *args, **kwargs):
"""
Update ``self.params`` (which must be a dict) with the supplied args.
"""
self.params.update(*args, **kwargs)
def _recursive_subclasses(cls):
yield cls
for subcls in cls.__subclasses__():
yield from _recursive_subclasses(subcls)
class _ArtistKwdocLoader(dict):
def __missing__(self, key):
if not key.endswith(":kwdoc"):
raise KeyError(key)
name = key[:-len(":kwdoc")]
from matplotlib.artist import Artist, kwdoc
try:
cls, = [cls for cls in _recursive_subclasses(Artist)
if cls.__name__ == name]
except ValueError as e:
raise KeyError(key) from e
return self.setdefault(key, kwdoc(cls))
class _ArtistPropertiesSubstitution(Substitution):
"""
A `.Substitution` with two additional features:
- Substitutions of the form ``%(classname:kwdoc)s`` (ending with the
literal ":kwdoc" suffix) trigger lookup of an Artist subclass with the
given *classname*, and are substituted with the `.kwdoc` of that class.
- Decorating a class triggers substitution both on the class docstring and
on the class' ``__init__`` docstring (which is a commonly required
pattern for Artist subclasses).
"""
def __init__(self):
self.params = _ArtistKwdocLoader()
def __call__(self, obj):
super().__call__(obj)
if isinstance(obj, type) and obj.__init__ != object.__init__:
self(obj.__init__)
return obj
def copy(source):
"""Copy a docstring from another source function (if present)."""
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
# Create a decorator that will house the various docstring snippets reused
# throughout Matplotlib.
dedent_interpd = interpd = _ArtistPropertiesSubstitution() | PypiClean |
/CocoRPy27-1.4.1.zip/CocoRPy27-1.4.1/testSuite/TestAny1_scannerBaseline.py | import sys
class Token( object ):
def __init__( self ):
self.kind = 0 # token kind
self.pos = 0 # token position in the source text (starting at 0)
self.col = 0 # token column (starting at 0)
self.line = 0 # token line (starting at 1)
self.val = u'' # token value
self.next = None # AW 2003-03-07 Tokens are kept in linked list
class Position( object ): # position of source code stretch (e.g. semantic action, resolver expressions)
def __init__( self, buf, beg, len, col ):
assert isinstance( buf, Buffer )
assert isinstance( beg, int )
assert isinstance( len, int )
assert isinstance( col, int )
self.buf = buf
self.beg = beg # start relative to the beginning of the file
self.len = len # length of stretch
self.col = col # column number of start position
def getSubstring( self ):
return self.buf.readPosition( self )
class Buffer( object ):
EOF = u'\u0100' # 256
def __init__( self, s ):
self.buf = s
self.bufLen = len(s)
self.pos = 0
self.lines = s.splitlines( True )
def Read( self ):
if self.pos < self.bufLen:
result = self.buf[self.pos]
self.pos += 1
return result
else:
return Buffer.EOF
def ReadChars( self, numBytes=1 ):
result = self.buf[ self.pos : self.pos + numBytes ]
self.pos += numBytes
return result
def Peek( self ):
if self.pos < self.bufLen:
return self.buf[self.pos]
else:
return Scanner.buffer.EOF
def getString( self, beg, end ):
s = ''
oldPos = self.getPos( )
self.setPos( beg )
while beg < end:
s += self.Read( )
beg += 1
self.setPos( oldPos )
return s
def getPos( self ):
return self.pos
def setPos( self, value ):
if value < 0:
self.pos = 0
elif value >= self.bufLen:
self.pos = self.bufLen
else:
self.pos = value
def readPosition( self, pos ):
assert isinstance( pos, Position )
self.setPos( pos.beg )
return self.ReadChars( pos.len )
def __iter__( self ):
return iter(self.lines)
class Scanner(object):
EOL = u'\n'
eofSym = 0
charSetSize = 256
maxT = 10
noSym = 10
start = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-1]
def __init__( self, s ):
self.buffer = Buffer( unicode(s) ) # the buffer instance
self.ch = u'\0' # current input character
self.pos = -1 # column number of current character
self.line = 1 # line number of current character
self.lineStart = 0 # start position of current line
self.oldEols = 0 # EOLs that appeared in a comment;
self.NextCh( )
self.ignore = set( ) # set of characters to be ignored by the scanner
self.ignore.add( ord(' ') ) # blanks are always white space
# fill token list
self.tokens = Token( ) # the complete input token stream
node = self.tokens
node.next = self.NextToken( )
node = node.next
while node.kind != Scanner.eofSym:
node.next = self.NextToken( )
node = node.next
node.next = node
node.val = u'EOF'
self.t = self.tokens # current token
self.pt = self.tokens # current peek token
def NextCh( self ):
if self.oldEols > 0:
self.ch = Scanner.EOL
self.oldEols -= 1
else:
self.ch = self.buffer.Read( )
self.pos += 1
# replace isolated '\r' by '\n' in order to make
# eol handling uniform across Windows, Unix and Mac
if (self.ch == u'\r') and (self.buffer.Peek() != u'\n'):
self.ch = Scanner.EOL
if self.ch == Scanner.EOL:
self.line += 1
self.lineStart = self.pos + 1
def CheckLiteral( self ):
lit = self.t.val
def NextToken( self ):
while ord(self.ch) in self.ignore:
self.NextCh( )
self.t = Token( )
self.t.pos = self.pos
self.t.col = self.pos - self.lineStart + 1
self.t.line = self.line
if ord(self.ch) < len(self.start):
state = self.start[ord(self.ch)]
else:
state = 0
buf = u''
buf += unicode(self.ch)
self.NextCh()
done = False
while not done:
if state == -1:
self.t.kind = Scanner.eofSym # NextCh already done
done = True
elif state == 0:
self.t.kind = Scanner.noSym # NextCh already done
done = True
elif state == 1:
self.t.kind = 1
done = True
elif state == 2:
self.t.kind = 2
done = True
elif state == 3:
self.t.kind = 3
done = True
elif state == 4:
self.t.kind = 4
done = True
elif state == 5:
self.t.kind = 5
done = True
elif state == 6:
self.t.kind = 6
done = True
elif state == 7:
self.t.kind = 7
done = True
elif state == 8:
self.t.kind = 8
done = True
elif state == 9:
self.t.kind = 9
done = True
self.t.val = buf
return self.t
def Scan( self ):
self.t = self.t.next
self.pt = self.t.next
return self.t
def Peek( self ):
self.pt = self.pt.next
while self.pt.kind > self.maxT:
self.pt = self.pt.next
return self.pt
def ResetPeek( self ):
self.pt = self.t | PypiClean |
/Markdownipy-1.0.0.tar.gz/Markdownipy-1.0.0/README.md | # Markdownipy

Markdownipy is a Python library to generate markdown files using only two operators and some helpers
Markdownipy can be used in jupyter notebooks to generate markdown outputs in python cells or inside any .py file, see below for examples
## `<`(lower than operator) writes text to markdown file, `|`(pipe operator) just gives property to the text
Even this README file is genereted by markdownipy!
[Checkout example file](./example.py)
## Motivation
---
Lately, I'm trying to write markdown documentations for different codes and I needed to put
some specs, numbers etc. so just copy-paste some tables, links.
I was trying to change some predefined strings to speed up my writing process, yet
instead of text manipulation I just wanted to make it as a library.
Sometimes I also forget markdown syntax, so this library requires no memory (I hope so, there is even no function calls) :)
Only requirement is remembering the properties of markdownipy and the rest will be handled
---
### Install
`pip install markdownipy`
---
### Quick start - example Jupyter cell
```python
from markdownipy import markdownipy
from IPython.display import display,Markdown
md = markdownipy.markdownipy()
md < "hello there" | md.bold
md < "This should be a italic text" | md.italic
display(Markdown(md.print()))
```
__See the example jupter notebook output:__ [Example jupyter file](jupyter_example.ipynb)
---
### Quick start - example Python code
```python
from markdownipy import markdownipy
md = markdownipy.markdownipy()
#Simple line
md < "Hello there!"
#Bold text, numbers are also allowed
md < 3.14159 | md.bold
#Italic text
md < "Above bold text is just some digits of pi" | md.italic
#One line code and codeblock
md < "One line code " | md.code
md < '''
void markdownipy_helper(int x, int y, int z):
return x*y*z
''' | md.codeb("cpp")
#Lists
md < ["Item1","Item2","Item3"]
#Tables
md < {
"Country":["Fran","Kate","Ivan"],
"Age" : [30,48,73]
}
#E-mail, links
md < "fbgencer8@gmail.com" | md.link
md < "fbgencer8@gmail.com" | md.link("My mail")
#Image
md < "./markdownipy_logo.png" | md.image
#Writing to a file, README or README.md both works!
#Even md >> sys.stdout prints the whole markdown document
md >> "README"
```
---
### Library keywords
This table is the whole documentation of markdownipy! (Assuming md is the markdown object in python)
|Keywords|Explanation|Markdown equivalent|
|:---:|:---:|:---:|
|md.bold|Bold text|`__text__`|
|md.italic|Italic text|`_text_`|
|md.strike|Strikethrough text|`~~text~~`|
|md.code|For single line code|` `code` `|
|md.codeb|Code fence, takes optional arg as language name|` ```code``` `|
|md.link or md.link(link_name)|Link text|`[link_name(optional)](link)`|
|md.hline|Horizontal line|`---`|
|md.chapter|Chapter|`Chapter_name\n===`|
|md.h1|Header level 1|`# Header1`|
|md.h2|Header level 2|`## Header2`|
|md.h3|Header level 3|`### Header3`|
|md.h4|Header level 4|`#### Header4`|
|md.h5|Header level 5|`##### Header5`|
|md.h6|Header level 6|`###### Header6`|
|md.image or md.image(image_name)|Image insertion|``|
|md.task|Task entry in a list|`[ ] text`|
|md.task_check|Checked task entry in a list|`[x] text`|
|`object` \| md.`keyword`|Pipes the above defined line property keywords to the object||
|md < `object`|Adds any object to document (str,dict,list,numbers etc.)||
|md > `file_name`|Writes document to a file||
|md > `stdout`|Prints the document to console||
|md.print()|Returns the markdown document as string||
|md.clear()|Clears the markdown document||
---
---
### Headers
```python
md < "This is a chapter" | md.chapter
md < "This is header1" | md.h1
md < "This is header2" | md.h2
md < "This is header3" | md.h3
md < "This is header4" | md.h4
md < "This is header5" | md.h5
md < "This is header6" | md.h6
```
_Output :_
---
This is a chapter
===
# This is header1
## This is header2
### This is header3
#### This is header4
##### This is header5
###### This is header6
---
---
### Bold, italic, strikethrough texts
```python
md < "This should be a bold text" | md.bold
md < "This is an italic text" | md.italic
md < "Strikethrough is banned" | md.strike
```
_Output :_
---
__This should be a bold text__
_This is an italic text_
~~Strikethrough is banned~~
---
---
### Lists
```python
md < [
"Classical mechanics" | md.bold,
[
"Newton's law of motion",
"Lagrangian Mechanics"
],
"Thermodynamics and statistical mechanics" | md.italic,
"Electromagnetism and photonics",
"Relativistic mechanics",
"Quantum mechanics, atomic physics, and molecular physics",
[
"*", #For dot bullet put bullet type
"Schrödinger equation",
"Quantum field theory",
[
"Lists can be nested :)",
"This is another liner",
[
"Oh this is getting serious" | md.strike,
"And now bunch of numbers",
[
3.1415,
2.7176,
99999,
88888
],
"Now another item"
],
],
"Quantum statistical mechanics"
],
"Optics and acoustics",
"Condensed matter physics",
"High-energy particle physics and nuclear physics",
]
```
_Output :_
---
1. __Classical mechanics__
1. Newton's law of motion
2. Lagrangian Mechanics
3. _Thermodynamics and statistical mechanics_
4. Electromagnetism and photonics
5. Relativistic mechanics
6. Quantum mechanics, atomic physics, and molecular physics
* Schrödinger equation
* Quantum field theory
1. Lists can be nested :)
2. This is another liner
1. ~~Oh this is getting serious~~
2. And now bunch of numbers
1. 3.1415
2. 2.7176
3. 99999
4. 88888
4. Now another item
* Quantum statistical mechanics
8. Optics and acoustics
9. Condensed matter physics
10. High-energy particle physics and nuclear physics
---
---
### Table
```python
md < {
"Name" : ["Albert", "Paul" | md.bold, "Richard"],
"Surname" : ["Einstein" | md.italic, "Dirac" , "Feynman" | md.italic],
}
```
_Output :_
---
|Name|Surname|
|:---:|:---:|
|Albert|_Einstein_|
|__Paul__|Dirac|
|Richard|_Feynman_|
---
---
### Links
```python
md < "https://github.com/fbgencer/markdownipy" | md.link("Markdownipy website")
```
_Output :_
---
[Markdownipy website](https://github.com/fbgencer/markdownipy)
---
---
### Image
```python
md < ("markdownipy_logo.png" | md.image("Image name"))
```
_Output :_
---

---
---
### Single line Quote
```python
md << ("With Great Power Comes Great Responsibility" | md.quote)
```
_Output :_
---
> With Great Power Comes Great Responsibility
---
---
### Task lists
```python
md < ["-",
"Take the key" | md.bold | md.task,
"Cat food" | md.bold | md.task_check
]
```
_Output :_
---
- [ ] __Take the key__
- [x] __Cat food__
---
---
| PypiClean |
/Lokai-0.3.tar.gz/Lokai-0.3/lokai/lk_worker/sys_procs/lk_import_node.py |
#-----------------------------------------------------------------------
# Find one or more nodes and print a serialisation to stdout
#-----------------------------------------------------------------------
import yaml
import base64
import StringIO
from lokai.tool_box.tb_database.orm_interface import engine
from lokai.tool_box.tb_common.configuration import handle_ini_declaration
from lokai.tool_box.tb_common.dates import timetostr
from lokai.lk_worker.extensions.extension_manager import (
get_all_extensions,
LK_REGISTER_TYPES_AND_MODELS)
from lokai.lk_worker.models import model
from lokai.lk_worker.models.builtin_data_attachments import make_version
from lokai.lk_worker.nodes.data_interface import get_node_dataset
from lokai.lk_worker.nodes.search import find_from_string
from lokai.lk_worker.nodes.graph import make_link
from lokai.lk_worker.models import ndNode
from lkoai.lk_worker.models.builtin_data_activity import (
ndHistory,
ndActivity,
)
from lkoai.lk_worker.models.builtin_data_attachments import NodeAttachment
#-----------------------------------------------------------------------
table_map = {'ndNode': ndNode,
'ndActivity': ndActivity
}
def populate_row(table, data_set):
""" create a table object and put some data into it
"""
table_object = table_map[table]()
for item,value in data_set.iteritems():
table_object[item] = value
return table_object
def import_node(source, parent):
""" Import from source and use parent where needed
"""
# Keep a record of nde_idx values read in from source and mapped
# to the new idx values as we progress.
idx_map={}
node_list = yaml.load(source)
for node_data in node_list:
print node_data
table_set = node_data['nd_node']
data_set = table_set['ndNode']
node_object = populate_row('ndNode', data_set)
old_nde_idx = node_object['nde_idx']
new_nde_idx = node_object.get_next_in_sequence()
idx_map[old_nde_idx] = new_nde_idx
node_object['nde_idx'] = new_nde_idx
engine.session.add(node_object)
for table, data_set in table_set.iteritems():
if table != 'ndNode':
table_object = populate_row(table, data_set)
table_object['nde_idx'] = new_nde_idx
engine.session.add(table_object)
if 'attachments' in node_data:
for nda_data in node_data['attachments']:
nda_data['other_location'] = new_nde_idx
nda = NodeAttachment('node',
new_nde_idx,
nda_data['file_name'])
nda.set_from_object(nda_data)
nda.store(StringIO.StringIO(
base64.b64decode(nda_data['file_content'])))
if 'parent' in node_data:
parent_idx = node_data['parent']
if parent_idx:
make_link(new_nde_idx, idx_map[parent_idx])
else:
make_link(new_nde_idx, parent)
engine.session.flush()
engine.session.commit()
#-----------------------------------------------------------------------
def main_():
from optparse import OptionParser
import sys
parser = OptionParser()
parser.description = ("Import one or more nodes under a parent. "
)
parser.usage = "%prog [options] node_criterion [node_criterion]"
parser.add_option('-p', '--parent',
dest = 'parent_id',
help = 'Client reference, IDX path for the parent.'
' Default None.',
)
parser.add_option('-v', '--verbose',
dest = 'talk_talk',
help = "Request some feed back from the process",
action = 'count'
)
parser.set_defaults(description="Created by make_node from the command line")
handle_ini_declaration(prefix='lk')
(options, args) = parser.parse_args()
if options.talk_talk:
print "Using : %s"%' '.join(args)
print "Options: Parent: %s"%str(options.parent_id)
# configure these models
model.init()
get_all_extensions(LK_REGISTER_TYPES_AND_MODELS)
if options.parent_id is not None:
nde_idx = find_from_string(options.parent_id)
if nde_idx:
existing_parent = get_node_dataset(nde_idx[0])
if not existing_parent['nd_node']:
parser.error("Given parent ID not found")
target_parent = existing_parent['nd_node']['nde_idx']
else:
parser.error("Given parent ID not found")
else:
target_parent = None
for argstring in args:
source_file = argstring.open()
import_data(source_file, target_parent)
return 0 # Success?
if __name__ == '__main__':
main_()
#----------------------------------------------------------------------- | PypiClean |
/Flask_Admin-1.6.1-py3-none-any.whl/flask_admin/contrib/fileadmin/s3.py | import time
try:
from boto import s3
from boto.s3.prefix import Prefix
from boto.s3.key import Key
except ImportError:
s3 = None
from flask import redirect
from flask_admin.babel import gettext
from . import BaseFileAdmin
class S3Storage(object):
"""
Storage object representing files on an Amazon S3 bucket.
Usage::
from flask_admin.contrib.fileadmin import BaseFileAdmin
from flask_admin.contrib.fileadmin.s3 import S3Storage
class MyS3Admin(BaseFileAdmin):
# Configure your class however you like
pass
fileadmin_view = MyS3Admin(storage=S3Storage(...))
"""
def __init__(self, bucket_name, region, aws_access_key_id,
aws_secret_access_key):
"""
Constructor
:param bucket_name:
Name of the bucket that the files are on.
:param region:
Region that the bucket is located
:param aws_access_key_id:
AWS Access Key ID
:param aws_secret_access_key:
AWS Secret Access Key
Make sure the credentials have the correct permissions set up on
Amazon or else S3 will return a 403 FORBIDDEN error.
"""
if not s3:
raise ValueError('Could not import boto. You can install boto by '
'using pip install boto')
connection = s3.connect_to_region(
region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
self.bucket = connection.get_bucket(bucket_name)
self.separator = '/'
def get_files(self, path, directory):
def _strip_path(name, path):
if name.startswith(path):
return name.replace(path, '', 1)
return name
def _remove_trailing_slash(name):
return name[:-1]
def _iso_to_epoch(timestamp):
dt = time.strptime(timestamp.split(".")[0], "%Y-%m-%dT%H:%M:%S")
return int(time.mktime(dt))
files = []
directories = []
if path and not path.endswith(self.separator):
path += self.separator
for key in self.bucket.list(path, self.separator):
if key.name == path:
continue
if isinstance(key, Prefix):
name = _remove_trailing_slash(_strip_path(key.name, path))
key_name = _remove_trailing_slash(key.name)
directories.append((name, key_name, True, 0, 0))
else:
last_modified = _iso_to_epoch(key.last_modified)
name = _strip_path(key.name, path)
files.append((name, key.name, False, key.size, last_modified))
return directories + files
def _get_bucket_list_prefix(self, path):
parts = path.split(self.separator)
if len(parts) == 1:
search = ''
else:
search = self.separator.join(parts[:-1]) + self.separator
return search
def _get_path_keys(self, path):
search = self._get_bucket_list_prefix(path)
return {key.name for key in self.bucket.list(search, self.separator)}
def is_dir(self, path):
keys = self._get_path_keys(path)
return path + self.separator in keys
def path_exists(self, path):
if path == '':
return True
keys = self._get_path_keys(path)
return path in keys or (path + self.separator) in keys
def get_base_path(self):
return ''
def get_breadcrumbs(self, path):
accumulator = []
breadcrumbs = []
for n in path.split(self.separator):
accumulator.append(n)
breadcrumbs.append((n, self.separator.join(accumulator)))
return breadcrumbs
def send_file(self, file_path):
key = self.bucket.get_key(file_path)
if key is None:
raise ValueError()
return redirect(key.generate_url(3600))
def save_file(self, path, file_data):
key = Key(self.bucket, path)
headers = {
'Content-Type': file_data.content_type,
}
key.set_contents_from_file(file_data.stream, headers=headers)
def delete_tree(self, directory):
self._check_empty_directory(directory)
self.bucket.delete_key(directory + self.separator)
def delete_file(self, file_path):
self.bucket.delete_key(file_path)
def make_dir(self, path, directory):
dir_path = self.separator.join([path, (directory + self.separator)])
key = Key(self.bucket, dir_path)
key.set_contents_from_string('')
def _check_empty_directory(self, path):
if not self._is_directory_empty(path):
raise ValueError(gettext('Cannot operate on non empty '
'directories'))
return True
def rename_path(self, src, dst):
if self.is_dir(src):
self._check_empty_directory(src)
src += self.separator
dst += self.separator
self.bucket.copy_key(dst, self.bucket.name, src)
self.delete_file(src)
def _is_directory_empty(self, path):
keys = self._get_path_keys(path + self.separator)
return len(keys) == 1
def read_file(self, path):
key = Key(self.bucket, path)
return key.get_contents_as_string()
def write_file(self, path, content):
key = Key(self.bucket, path)
key.set_contents_from_file(content)
class S3FileAdmin(BaseFileAdmin):
"""
Simple Amazon Simple Storage Service file-management interface.
:param bucket_name:
Name of the bucket that the files are on.
:param region:
Region that the bucket is located
:param aws_access_key_id:
AWS Access Key ID
:param aws_secret_access_key:
AWS Secret Access Key
Sample usage::
from flask_admin import Admin
from flask_admin.contrib.fileadmin.s3 import S3FileAdmin
admin = Admin()
admin.add_view(S3FileAdmin('files_bucket', 'us-east-1', 'key_id', 'secret_key')
"""
def __init__(self, bucket_name, region, aws_access_key_id,
aws_secret_access_key, *args, **kwargs):
storage = S3Storage(bucket_name, region, aws_access_key_id,
aws_secret_access_key)
super(S3FileAdmin, self).__init__(*args, storage=storage, **kwargs) | PypiClean |
/DjangoDE-0.2.tar.gz/DjangoDE-0.2/djangode/gui/main_window.py |
import os
from djangode import global_objects
from djangode.project import Project
from djangode.utils import get_config_value, set_config_value
from djangode.utils.qt import QtCore, QtGui
from debug_bar import DebugBar
from dialogs import AboutDialog, DjangoDESettingsDialog, ProjectSettingsDialog
from editor import Editor
from left_bar import LeftBar
from toolbar import create_toolbar
import wizards
class MainWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.setWindowTitle('DjangoDE')
if global_objects.icon is not None:
self.setWindowIcon(QtGui.QIcon(global_objects.icon))
self.statusBar().showMessage('Welcome to DjangoDE')
self.editor = Editor()
self.setup_actions()
geometry = get_config_value("geometry")
if geometry is not None:
self.restoreGeometry(geometry)
state = get_config_value("state")
if state is not None:
self.restoreState(state)
self.setCentralWidget(self.editor)
self.toolbar = create_toolbar(self)
self.debug_bar = DebugBar(self)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea, self.debug_bar)
self.left_bar = LeftBar(self)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.left_bar)
last_project = get_config_value("last_project")
if last_project:
global_objects.project = Project(last_project)
global_objects.project.runner.start()
self.reload_project()
def setup_actions(self):
self.actions = {}
menu = self.menuBar()
exit = QtGui.QAction(QtGui.QIcon.fromTheme("application-exit"), 'Exit', self)
exit.setShortcut('Ctrl+Q')
exit.setStatusTip('Exit application')
exit.triggered.connect(self.close)
self.actions["exit"] = exit
new_file = QtGui.QAction(QtGui.QIcon.fromTheme("document-new"), 'New File', self)
new_file.setShortcut('Ctrl+N')
new_file.setStatusTip('New file')
new_file.triggered.connect(self.editor.new_file)
self.actions["new_file"] = new_file
open_file = QtGui.QAction(QtGui.QIcon.fromTheme("document-open"), 'Open File', self)
open_file.setStatusTip('Open an existing file')
open_file.triggered.connect(self.editor.open_file_action)
self.actions["open_file"] = open_file
save_file = QtGui.QAction(QtGui.QIcon.fromTheme("document-save"), 'Save File', self)
save_file.setShortcut('Ctrl+S')
save_file.setStatusTip('Save file')
save_file.triggered.connect(self.editor.save_file)
self.actions["save_file"] = save_file
file_menu = menu.addMenu("&File")
file_menu.addAction(new_file)
file_menu.addAction(open_file)
file_menu.addAction(save_file)
file_menu.addAction(exit)
new_project = QtGui.QAction(QtGui.QIcon.fromTheme("document-new"), 'New Project', self)
new_project.setStatusTip('Create a new Django site or a DjangoDE project from an existing site')
new_project.triggered.connect(self.new_project)
self.actions["new_project"] = new_project
open_project = QtGui.QAction(QtGui.QIcon.fromTheme("document-open"), 'Open Project', self)
open_project.setStatusTip('Open an existing DjangoDE project')
open_project.triggered.connect(self.open_project)
self.actions["open_project"] = open_project
close_project = QtGui.QAction(QtGui.QIcon.fromTheme("document-close"), 'Close Project', self)
close_project.setStatusTip('Close the currently open DjangoDE project')
close_project.triggered.connect(self.close_project)
self.actions["close_project"] = close_project
project_settings = QtGui.QAction('Project Settings', self)
project_settings.setStatusTip('Configure the currently open DjangoDE project')
project_settings.triggered.connect(self.project_settings)
self.actions["project_settings"] = project_settings
project_menu = menu.addMenu("&Project")
project_menu.addAction(new_project)
project_menu.addAction(open_project)
project_menu.addAction(close_project)
project_menu.addSeparator()
project_menu.addAction(project_settings)
self.manage_commands = project_menu.addMenu("Manage Commands")
add_new_app = QtGui.QAction('Create A New App', self)
add_new_app.setStatusTip('Starts a brand new Django application in this project')
add_new_app.triggered.connect(self.add_new_app)
self.actions["add_new_app"] = add_new_app
self.manage_commands.addAction(add_new_app)
debug_play = QtGui.QAction(QtGui.QIcon.fromTheme("media-playback-start"), "Continue", self)
debug_play.triggered.connect(self.debug_play)
debug_play.setEnabled(False)
self.actions["debug_play"] = debug_play
toggle_breakpoint = QtGui.QAction("Toggle Breakpoint", self)
toggle_breakpoint.setShortcut(QtGui.QKeySequence("F9"))
toggle_breakpoint.triggered.connect(self.editor.toggle_breakpoint)
self.actions["toggle_breakpoint"] = toggle_breakpoint
project_menu = menu.addMenu("&Debug")
project_menu.addAction(toggle_breakpoint)
djangode_settings = QtGui.QAction('DjangoDE Settings', self)
djangode_settings.setStatusTip('Configure DjangoDE')
djangode_settings.triggered.connect(self.djangode_settings)
self.actions["djangode_settings"] = djangode_settings
project_menu = menu.addMenu("&Settings")
project_menu.addAction(djangode_settings)
about = QtGui.QAction('About DjangoDE', self)
about.setStatusTip('About DjangoDE')
about.triggered.connect(self.about)
self.actions["about"] = about
about_django = QtGui.QAction('About Django', self)
about_django.setStatusTip('About Django')
about_django.triggered.connect(self.about_django)
self.actions["about_django"] = about_django
help_menu = menu.addMenu("&Help")
help_menu.addAction(about)
help_menu.addAction(about_django)
self.actions["about_django"] = about_django
def about(self):
self.about_dialog = AboutDialog()
self.about_dialog.show()
def about_django(self):
"""Open the Django Website in a tab"""
pass
def new_project(self):
wizards.NewProjectWizard().exec_()
def open_project(self):
filename = QtGui.QFileDialog.getOpenFileName(self, "Open a manage.py file.", os.path.expanduser('~'), "manage.py (*.py)")
global_objects.project = Project(unicode(filename[0]))
global_objects.project.runner.start()
self.reload_project()
def close_project(self):
if global_objects.project is not None:
global_objects.project.close()
global_objects.project = None
self.reload_project()
def project_settings(self):
self.project_settings_dialog = ProjectSettingsDialog()
self.project_settings_dialog.show()
def djangode_settings(self):
self.djangode_settings_dialog = DjangoDESettingsDialog()
self.djangode_settings_dialog.show()
def closeEvent(self, event):
set_config_value("geometry", self.saveGeometry())
set_config_value("state", self.saveState())
set_config_value("last_project", global_objects.project.manage_file if global_objects.project is not None else "")
if global_objects.project is not None:
global_objects.project.close()
global_objects.project = None
self.project_settings_dialog = None
return QtGui.QMainWindow.closeEvent(self, event)
def reload_project(self):
if global_objects.project is None:
self.actions["close_project"].setEnabled(False)
self.actions["project_settings"].setEnabled(False)
self.actions["add_new_app"].setEnabled(False)
else:
self.actions["close_project"].setEnabled(True)
self.actions["project_settings"].setEnabled(True)
self.actions["add_new_app"].setEnabled(True)
self.left_bar.reload_project()
self.editor.reload_project()
def add_new_app(self):
name, ok = QtGui.QInputDialog.getText(self, "DjangoED", "Please enter the name of the new application:")
if ok and name:
global_objects.project.add_new_app(str(name))
QtGui.QMessageBox.information(self, "DjangoED", "Your new application was created.")
def debug_play(self):
global_objects.project.debug_play() | PypiClean |
/Hikka_TL-1.24.14-py3-none-any.whl/telethon/utils.py | import base64
import binascii
import imghdr
import inspect
import io
import itertools
import logging
import mimetypes
import os
import pathlib
import re
import struct
from collections import namedtuple
from mimetypes import guess_extension
from types import GeneratorType
import typing
from .extensions import markdown, html
from .helpers import add_surrogate, del_surrogate
from .tl import types
try:
import hachoir
import hachoir.metadata
import hachoir.parser
except ImportError:
hachoir = None
# Register some of the most common mime-types to avoid any issues.
# See https://github.com/LonamiWebs/Telethon/issues/1096.
mimetypes.add_type("image/png", ".png")
mimetypes.add_type("image/jpeg", ".jpeg")
mimetypes.add_type("image/webp", ".webp")
mimetypes.add_type("image/gif", ".gif")
mimetypes.add_type("image/bmp", ".bmp")
mimetypes.add_type("image/x-tga", ".tga")
mimetypes.add_type("image/tiff", ".tiff")
mimetypes.add_type("image/vnd.adobe.photoshop", ".psd")
mimetypes.add_type("video/mp4", ".mp4")
mimetypes.add_type("video/quicktime", ".mov")
mimetypes.add_type("video/avi", ".avi")
mimetypes.add_type("audio/mpeg", ".mp3")
mimetypes.add_type("audio/m4a", ".m4a")
mimetypes.add_type("audio/aac", ".aac")
mimetypes.add_type("audio/ogg", ".ogg")
mimetypes.add_type("audio/flac", ".flac")
mimetypes.add_type("application/x-tgsticker", ".tgs")
USERNAME_RE = re.compile(
r"@|(?:https?://)?(?:www\.)?(?:telegram\.(?:me|dog)|t\.me)/(@|joinchat/)?"
)
TG_JOIN_RE = re.compile(r"tg://(join)\?invite=")
# The only shorter-than-five-characters usernames are those used for some
# special, very well known bots. This list may be incomplete though:
# "[...] @gif, @vid, @pic, @bing, @wiki, @imdb and @bold [...]"
#
# See https://telegram.org/blog/inline-bots#how-does-it-work
VALID_USERNAME_RE = re.compile(
r"^([a-z](?:(?!__)\w){3,30}[a-z\d]"
r"|gif|vid|pic|bing|wiki|imdb|bold|vote|like|coub)$",
re.IGNORECASE,
)
_FileInfo = namedtuple("FileInfo", "dc_id location size")
_log = logging.getLogger(__name__)
def chunks(iterable, size=100):
"""
Turns the given iterable into chunks of the specified size,
which is 100 by default since that's what Telegram uses the most.
"""
it = iter(iterable)
size -= 1
for head in it:
yield itertools.chain([head], itertools.islice(it, size))
def get_display_name(entity):
"""
Gets the display name for the given :tl:`User`,
:tl:`Chat` or :tl:`Channel`. Returns an empty string otherwise.
"""
if isinstance(entity, types.User):
if entity.last_name and entity.first_name:
return "{} {}".format(entity.first_name, entity.last_name)
elif entity.first_name:
return entity.first_name
elif entity.last_name:
return entity.last_name
else:
return ""
elif isinstance(entity, (types.Chat, types.ChatForbidden, types.Channel)):
return entity.title
return ""
def get_extension(media):
"""Gets the corresponding extension for any Telegram media."""
# Photos are always compressed as .jpg by Telegram
try:
get_input_photo(media)
return ".jpg"
except TypeError:
# These cases are not handled by input photo because it can't
if isinstance(media, (types.UserProfilePhoto, types.ChatPhoto)):
return ".jpg"
# Documents will come with a mime type
if isinstance(media, types.MessageMediaDocument):
media = media.document
if isinstance(media, (types.Document, types.WebDocument, types.WebDocumentNoProxy)):
if media.mime_type == "application/octet-stream":
# Octet stream are just bytes, which have no default extension
return ""
else:
return guess_extension(media.mime_type) or ""
return ""
def _raise_cast_fail(entity, target):
raise TypeError(
"Cannot cast {} to any kind of {}.".format(type(entity).__name__, target)
)
def get_input_peer(entity, allow_self=True, check_hash=True):
"""
Gets the input peer for the given "entity" (user, chat or channel).
A ``TypeError`` is raised if the given entity isn't a supported type
or if ``check_hash is True`` but the entity's ``access_hash is None``
*or* the entity contains ``min`` information. In this case, the hash
cannot be used for general purposes, and thus is not returned to avoid
any issues which can derive from invalid access hashes.
Note that ``check_hash`` **is ignored** if an input peer is already
passed since in that case we assume the user knows what they're doing.
This is key to getting entities by explicitly passing ``hash = 0``.
"""
# NOTE: It is important that this method validates the access hashes,
# because it is used when we *require* a valid general-purpose
# access hash. This includes caching, which relies on this method.
# Further, when resolving raw methods, they do e.g.,
# utils.get_input_channel(client.get_input_peer(...))
#
# ...which means that the client's method verifies the hashes.
#
# Excerpt from a conversation with official developers (slightly edited):
# > We send new access_hash for Channel with min flag since layer 102.
# > Previously, we omitted it.
# > That one works just to download the profile picture.
#
# < So, min hashes only work for getting files,
# < but the non-min hash is required for any other operation?
#
# > Yes.
#
# More information: https://core.telegram.org/api/min
try:
if entity.SUBCLASS_OF_ID == 0xC91C90B6: # crc32(b'InputPeer')
return entity
except AttributeError:
# e.g. custom.Dialog (can't cyclic import).
if allow_self and hasattr(entity, "input_entity"):
return entity.input_entity
elif hasattr(entity, "entity"):
return get_input_peer(entity.entity)
else:
_raise_cast_fail(entity, "InputPeer")
if isinstance(entity, types.User):
if entity.is_self and allow_self:
return types.InputPeerSelf()
elif (entity.access_hash is not None and not entity.min) or not check_hash:
return types.InputPeerUser(entity.id, entity.access_hash)
else:
raise TypeError("User without access_hash or min info cannot be input")
if isinstance(entity, (types.Chat, types.ChatEmpty, types.ChatForbidden)):
return types.InputPeerChat(entity.id)
if isinstance(entity, types.Channel):
if (entity.access_hash is not None and not entity.min) or not check_hash:
return types.InputPeerChannel(entity.id, entity.access_hash)
else:
raise TypeError("Channel without access_hash or min info cannot be input")
if isinstance(entity, types.ChannelForbidden):
# "channelForbidden are never min", and since their hash is
# also not optional, we assume that this truly is the case.
return types.InputPeerChannel(entity.id, entity.access_hash)
if isinstance(entity, types.InputUser):
return types.InputPeerUser(entity.user_id, entity.access_hash)
if isinstance(entity, types.InputChannel):
return types.InputPeerChannel(entity.channel_id, entity.access_hash)
if isinstance(entity, types.InputUserSelf):
return types.InputPeerSelf()
if isinstance(entity, types.InputUserFromMessage):
return types.InputPeerUserFromMessage(
entity.peer, entity.msg_id, entity.user_id
)
if isinstance(entity, types.InputChannelFromMessage):
return types.InputPeerChannelFromMessage(
entity.peer, entity.msg_id, entity.channel_id
)
if isinstance(entity, types.UserEmpty):
return types.InputPeerEmpty()
if isinstance(entity, types.UserFull):
return get_input_peer(entity.user)
if isinstance(entity, types.ChatFull):
return types.InputPeerChat(entity.id)
if isinstance(entity, types.PeerChat):
return types.InputPeerChat(entity.chat_id)
_raise_cast_fail(entity, "InputPeer")
def get_input_channel(entity):
"""
Similar to :meth:`get_input_peer`, but for :tl:`InputChannel`'s alone.
.. important::
This method does not validate for invalid general-purpose access
hashes, unlike `get_input_peer`. Consider using instead:
``get_input_channel(get_input_peer(channel))``.
"""
try:
if entity.SUBCLASS_OF_ID == 0x40F202FD: # crc32(b'InputChannel')
return entity
except AttributeError:
_raise_cast_fail(entity, "InputChannel")
if isinstance(entity, (types.Channel, types.ChannelForbidden)):
return types.InputChannel(entity.id, entity.access_hash or 0)
if isinstance(entity, types.InputPeerChannel):
return types.InputChannel(entity.channel_id, entity.access_hash)
if isinstance(entity, types.InputPeerChannelFromMessage):
return types.InputChannelFromMessage(
entity.peer, entity.msg_id, entity.channel_id
)
_raise_cast_fail(entity, "InputChannel")
def get_input_user(entity):
"""
Similar to :meth:`get_input_peer`, but for :tl:`InputUser`'s alone.
.. important::
This method does not validate for invalid general-purpose access
hashes, unlike `get_input_peer`. Consider using instead:
``get_input_channel(get_input_peer(channel))``.
"""
try:
if entity.SUBCLASS_OF_ID == 0xE669BF46: # crc32(b'InputUser'):
return entity
except AttributeError:
_raise_cast_fail(entity, "InputUser")
if isinstance(entity, types.User):
if entity.is_self:
return types.InputUserSelf()
else:
return types.InputUser(entity.id, entity.access_hash or 0)
if isinstance(entity, types.InputPeerSelf):
return types.InputUserSelf()
if isinstance(entity, (types.UserEmpty, types.InputPeerEmpty)):
return types.InputUserEmpty()
if isinstance(entity, types.UserFull):
return get_input_user(entity.user)
if isinstance(entity, types.InputPeerUser):
return types.InputUser(entity.user_id, entity.access_hash)
if isinstance(entity, types.InputPeerUserFromMessage):
return types.InputUserFromMessage(entity.peer, entity.msg_id, entity.user_id)
_raise_cast_fail(entity, "InputUser")
def get_input_dialog(dialog):
"""Similar to :meth:`get_input_peer`, but for dialogs"""
try:
if dialog.SUBCLASS_OF_ID == 0xA21C9795: # crc32(b'InputDialogPeer')
return dialog
if dialog.SUBCLASS_OF_ID == 0xC91C90B6: # crc32(b'InputPeer')
return types.InputDialogPeer(dialog)
except AttributeError:
_raise_cast_fail(dialog, "InputDialogPeer")
try:
return types.InputDialogPeer(get_input_peer(dialog))
except TypeError:
pass
_raise_cast_fail(dialog, "InputDialogPeer")
def get_input_document(document):
"""Similar to :meth:`get_input_peer`, but for documents"""
try:
if document.SUBCLASS_OF_ID == 0xF33FDB68: # crc32(b'InputDocument'):
return document
except AttributeError:
_raise_cast_fail(document, "InputDocument")
if isinstance(document, types.Document):
return types.InputDocument(
id=document.id,
access_hash=document.access_hash,
file_reference=document.file_reference,
)
if isinstance(document, types.DocumentEmpty):
return types.InputDocumentEmpty()
if isinstance(document, types.MessageMediaDocument):
return get_input_document(document.document)
if isinstance(document, types.Message):
return get_input_document(document.media)
_raise_cast_fail(document, "InputDocument")
def get_input_photo(photo):
"""Similar to :meth:`get_input_peer`, but for photos"""
try:
if photo.SUBCLASS_OF_ID == 0x846363E0: # crc32(b'InputPhoto'):
return photo
except AttributeError:
_raise_cast_fail(photo, "InputPhoto")
if isinstance(photo, types.Message):
photo = photo.media
if isinstance(photo, (types.photos.Photo, types.MessageMediaPhoto)):
photo = photo.photo
if isinstance(photo, types.Photo):
return types.InputPhoto(
id=photo.id,
access_hash=photo.access_hash,
file_reference=photo.file_reference,
)
if isinstance(photo, types.PhotoEmpty):
return types.InputPhotoEmpty()
if isinstance(photo, types.messages.ChatFull):
photo = photo.full_chat
if isinstance(photo, types.ChannelFull):
return get_input_photo(photo.chat_photo)
elif isinstance(photo, types.UserFull):
return get_input_photo(photo.profile_photo)
elif isinstance(photo, (types.Channel, types.Chat, types.User)):
return get_input_photo(photo.photo)
if isinstance(
photo,
(types.UserEmpty, types.ChatEmpty, types.ChatForbidden, types.ChannelForbidden),
):
return types.InputPhotoEmpty()
_raise_cast_fail(photo, "InputPhoto")
def get_input_chat_photo(photo):
"""Similar to :meth:`get_input_peer`, but for chat photos"""
try:
if photo.SUBCLASS_OF_ID == 0xD4EB2D74: # crc32(b'InputChatPhoto')
return photo
elif photo.SUBCLASS_OF_ID == 0xE7655F1F: # crc32(b'InputFile'):
return types.InputChatUploadedPhoto(photo)
except AttributeError:
_raise_cast_fail(photo, "InputChatPhoto")
photo = get_input_photo(photo)
if isinstance(photo, types.InputPhoto):
return types.InputChatPhoto(photo)
elif isinstance(photo, types.InputPhotoEmpty):
return types.InputChatPhotoEmpty()
_raise_cast_fail(photo, "InputChatPhoto")
def get_input_geo(geo):
"""Similar to :meth:`get_input_peer`, but for geo points"""
try:
if geo.SUBCLASS_OF_ID == 0x430D225: # crc32(b'InputGeoPoint'):
return geo
except AttributeError:
_raise_cast_fail(geo, "InputGeoPoint")
if isinstance(geo, types.GeoPoint):
return types.InputGeoPoint(lat=geo.lat, long=geo.long)
if isinstance(geo, types.GeoPointEmpty):
return types.InputGeoPointEmpty()
if isinstance(geo, types.MessageMediaGeo):
return get_input_geo(geo.geo)
if isinstance(geo, types.Message):
return get_input_geo(geo.media)
_raise_cast_fail(geo, "InputGeoPoint")
def get_input_media(
media,
*,
is_photo=False,
attributes=None,
force_document=False,
voice_note=False,
video_note=False,
supports_streaming=False,
ttl=None,
):
"""
Similar to :meth:`get_input_peer`, but for media.
If the media is :tl:`InputFile` and ``is_photo`` is known to be `True`,
it will be treated as an :tl:`InputMediaUploadedPhoto`. Else, the rest
of parameters will indicate how to treat it.
"""
try:
if media.SUBCLASS_OF_ID == 0xFAF846F4: # crc32(b'InputMedia')
return media
elif media.SUBCLASS_OF_ID == 0x846363E0: # crc32(b'InputPhoto')
return types.InputMediaPhoto(media, ttl_seconds=ttl)
elif media.SUBCLASS_OF_ID == 0xF33FDB68: # crc32(b'InputDocument')
return types.InputMediaDocument(media, ttl_seconds=ttl)
except AttributeError:
_raise_cast_fail(media, "InputMedia")
if isinstance(media, types.MessageMediaPhoto):
return types.InputMediaPhoto(
id=get_input_photo(media.photo), ttl_seconds=ttl or media.ttl_seconds
)
if isinstance(media, (types.Photo, types.photos.Photo, types.PhotoEmpty)):
return types.InputMediaPhoto(id=get_input_photo(media), ttl_seconds=ttl)
if isinstance(media, types.MessageMediaDocument):
return types.InputMediaDocument(
id=get_input_document(media.document), ttl_seconds=ttl or media.ttl_seconds
)
if isinstance(media, (types.Document, types.DocumentEmpty)):
return types.InputMediaDocument(id=get_input_document(media), ttl_seconds=ttl)
if isinstance(media, (types.InputFile, types.InputFileBig)):
if is_photo:
return types.InputMediaUploadedPhoto(file=media, ttl_seconds=ttl)
attrs, mime = get_attributes(
media,
attributes=attributes,
force_document=force_document,
voice_note=voice_note,
video_note=video_note,
supports_streaming=supports_streaming,
)
return types.InputMediaUploadedDocument(
file=media,
mime_type=mime,
attributes=attrs,
force_file=force_document,
ttl_seconds=ttl,
)
if isinstance(media, types.MessageMediaGame):
return types.InputMediaGame(
id=types.InputGameID(id=media.game.id, access_hash=media.game.access_hash)
)
if isinstance(media, types.MessageMediaContact):
return types.InputMediaContact(
phone_number=media.phone_number,
first_name=media.first_name,
last_name=media.last_name,
vcard="",
)
if isinstance(media, types.MessageMediaGeo):
return types.InputMediaGeoPoint(geo_point=get_input_geo(media.geo))
if isinstance(media, types.MessageMediaVenue):
return types.InputMediaVenue(
geo_point=get_input_geo(media.geo),
title=media.title,
address=media.address,
provider=media.provider,
venue_id=media.venue_id,
venue_type="",
)
if isinstance(media, types.MessageMediaDice):
return types.InputMediaDice(media.emoticon)
if isinstance(
media,
(
types.MessageMediaEmpty,
types.MessageMediaUnsupported,
types.ChatPhotoEmpty,
types.UserProfilePhotoEmpty,
types.ChatPhoto,
types.UserProfilePhoto,
),
):
return types.InputMediaEmpty()
if isinstance(media, types.Message):
return get_input_media(media.media, is_photo=is_photo, ttl=ttl)
if isinstance(media, types.MessageMediaPoll):
if media.poll.quiz:
if not media.results.results:
# A quiz has correct answers, which we don't know until answered.
# If the quiz hasn't been answered we can't reconstruct it properly.
raise TypeError(
"Cannot cast unanswered quiz to any kind of InputMedia."
)
correct_answers = [r.option for r in media.results.results if r.correct]
else:
correct_answers = None
return types.InputMediaPoll(
poll=media.poll,
correct_answers=correct_answers,
solution=media.results.solution,
solution_entities=media.results.solution_entities,
)
if isinstance(media, types.Poll):
return types.InputMediaPoll(media)
_raise_cast_fail(media, "InputMedia")
def get_input_message(message):
"""Similar to :meth:`get_input_peer`, but for input messages."""
try:
if isinstance(message, int): # This case is really common too
return types.InputMessageID(message)
elif message.SUBCLASS_OF_ID == 0x54B6BCC5: # crc32(b'InputMessage'):
return message
elif message.SUBCLASS_OF_ID == 0x790009E3: # crc32(b'Message'):
return types.InputMessageID(message.id)
except AttributeError:
pass
_raise_cast_fail(message, "InputMedia")
def get_input_group_call(call):
"""Similar to :meth:`get_input_peer`, but for input calls."""
try:
if call.SUBCLASS_OF_ID == 0x58611AB1: # crc32(b'InputGroupCall')
return call
elif call.SUBCLASS_OF_ID == 0x20B4F320: # crc32(b'GroupCall')
return types.InputGroupCall(id=call.id, access_hash=call.access_hash)
except AttributeError:
_raise_cast_fail(call, "InputGroupCall")
def _get_entity_pair(entity_id, entities, cache, get_input_peer=get_input_peer):
"""
Returns ``(entity, input_entity)`` for the given entity ID.
"""
entity = entities.get(entity_id)
try:
input_entity = cache[entity_id]
except KeyError:
# KeyError is unlikely, so another TypeError won't hurt
try:
input_entity = get_input_peer(entity)
except TypeError:
input_entity = None
return entity, input_entity
def get_message_id(message):
"""Similar to :meth:`get_input_peer`, but for message IDs."""
if message is None:
return None
if isinstance(message, int):
return message
try:
if message.SUBCLASS_OF_ID == 0x790009E3:
# hex(crc32(b'Message')) = 0x790009e3
return message.id
except AttributeError:
pass
raise TypeError("Invalid message type: {}".format(type(message)))
def _get_metadata(file):
if not hachoir:
return
stream = None
close_stream = True
seekable = True
# The parser may fail and we don't want to crash if
# the extraction process fails.
try:
# Note: aiofiles are intentionally left out for simplicity.
# `helpers._FileStream` is async only for simplicity too, so can't
# reuse it here.
if isinstance(file, str):
stream = open(file, "rb")
elif isinstance(file, bytes):
stream = io.BytesIO(file)
else:
stream = file
close_stream = False
seekable = file.seekable() if getattr(file, "seekable", None) else False
if not seekable:
return None
pos = stream.tell()
filename = getattr(file, "name", "")
parser = hachoir.parser.guess.guessParser(
hachoir.stream.InputIOStream(
stream, source=f"file:{filename}", tags=[], filename=filename
)
)
return hachoir.metadata.extractMetadata(parser)
except Exception as e:
_log.warning("Failed to analyze %s: %s %s", file, e.__class__, e)
finally:
if close_stream:
if stream:
stream.close()
elif seekable:
if stream:
stream.seek(pos)
def get_attributes(
file,
*,
attributes=None,
mime_type=None,
force_document=False,
voice_note=False,
video_note=False,
supports_streaming=False,
thumb=None,
):
"""
Get a list of attributes for the given file and
the mime type as a tuple ([attribute], mime_type).
"""
# Note: ``file.name`` works for :tl:`InputFile` and some `IOBase` streams
name = file if isinstance(file, str) else getattr(file, "name", "unnamed")
if mime_type is None:
mime_type = mimetypes.guess_type(name)[0]
attr_dict = {
types.DocumentAttributeFilename: types.DocumentAttributeFilename(
os.path.basename(name)
)
}
if is_audio(file):
if m := _get_metadata(file):
if m.has("author"):
performer = m.get("author")
elif m.has("artist"):
performer = m.get("artist")
else:
performer = None
attr_dict[types.DocumentAttributeAudio] = types.DocumentAttributeAudio(
voice=voice_note,
title=m.get("title") if m.has("title") else None,
performer=performer,
duration=int(m.get("duration").seconds if m.has("duration") else 0),
)
if not force_document and is_video(file):
if m := _get_metadata(file):
doc = types.DocumentAttributeVideo(
round_message=video_note,
w=m.get("width") if m.has("width") else 1,
h=m.get("height") if m.has("height") else 1,
duration=int(m.get("duration").seconds if m.has("duration") else 1),
supports_streaming=supports_streaming,
)
elif thumb:
t_m = _get_metadata(thumb)
width = 1
height = 1
if t_m and t_m.has("width"):
width = t_m.get("width")
if t_m and t_m.has("height"):
height = t_m.get("height")
doc = types.DocumentAttributeVideo(
0,
width,
height,
round_message=video_note,
supports_streaming=supports_streaming,
)
else:
doc = types.DocumentAttributeVideo(
0, 1, 1, round_message=video_note, supports_streaming=supports_streaming
)
attr_dict[types.DocumentAttributeVideo] = doc
if voice_note:
if types.DocumentAttributeAudio in attr_dict:
attr_dict[types.DocumentAttributeAudio].voice = True
else:
attr_dict[types.DocumentAttributeAudio] = types.DocumentAttributeAudio(
0, voice=True
)
# Now override the attributes if any. As we have a dict of
# {cls: instance}, we can override any class with the list
# of attributes provided by the user easily.
if attributes:
for a in attributes:
attr_dict[type(a)] = a
# Ensure we have a mime type, any; but it cannot be None
# 'The "octet-stream" subtype is used to indicate that a body
# contains arbitrary binary data.'
if not mime_type:
mime_type = "application/octet-stream"
return list(attr_dict.values()), mime_type
def sanitize_parse_mode(mode):
"""
Converts the given parse mode into an object with
``parse`` and ``unparse`` callable properties.
"""
if not mode:
return None
if callable(mode):
class CustomMode:
@staticmethod
def unparse(text, entities):
raise NotImplementedError
CustomMode.parse = mode
return CustomMode
elif all(hasattr(mode, x) for x in ("parse", "unparse")) and all(
callable(x) for x in (mode.parse, mode.unparse)
):
return mode
elif isinstance(mode, str):
try:
return {"md": markdown, "markdown": markdown, "htm": html, "html": html}[
mode.lower()
]
except KeyError:
raise ValueError("Unknown parse mode {}".format(mode))
else:
raise TypeError("Invalid parse mode type {}".format(mode))
def get_input_location(location):
"""
Similar to :meth:`get_input_peer`, but for input messages.
Note that this returns a tuple ``(dc_id, location)``, the
``dc_id`` being present if known.
"""
info = _get_file_info(location)
return info.dc_id, info.location
def _get_file_info(location):
try:
if location.SUBCLASS_OF_ID == 0x1523D462:
return _FileInfo(None, location, None) # crc32(b'InputFileLocation'):
except AttributeError:
_raise_cast_fail(location, "InputFileLocation")
if isinstance(location, types.Message):
location = location.media
if isinstance(location, types.MessageMediaDocument):
location = location.document
elif isinstance(location, types.MessageMediaPhoto):
location = location.photo
if isinstance(location, types.Document):
return _FileInfo(
location.dc_id,
types.InputDocumentFileLocation(
id=location.id,
access_hash=location.access_hash,
file_reference=location.file_reference,
thumb_size="", # Presumably to download one of its thumbnails
),
location.size,
)
elif isinstance(location, types.Photo):
return _FileInfo(
location.dc_id,
types.InputPhotoFileLocation(
id=location.id,
access_hash=location.access_hash,
file_reference=location.file_reference,
thumb_size=location.sizes[-1].type,
),
_photo_size_byte_count(location.sizes[-1]),
)
_raise_cast_fail(location, "InputFileLocation")
def _get_extension(file):
"""
Gets the extension for the given file, which can be either a
str or an ``open()``'ed file (which has a ``.name`` attribute).
"""
if isinstance(file, str):
return os.path.splitext(file)[-1]
elif isinstance(file, pathlib.Path):
return file.suffix
elif isinstance(file, bytes):
kind = imghdr.what(io.BytesIO(file))
return f".{kind}" if kind else ""
elif (
isinstance(file, io.IOBase)
and not isinstance(file, io.TextIOBase)
and file.seekable()
):
kind = imghdr.what(file)
return f".{kind}" if kind is not None else ""
elif getattr(file, "name", None):
# Note: ``file.name`` works for :tl:`InputFile` and some `IOBase`
return _get_extension(file.name)
else:
# Maybe it's a Telegram media
return get_extension(file)
def is_image(file):
"""
Returns `True` if the file extension looks like an image file to Telegram.
"""
match = re.match(r"\.(png|jpe?g)", _get_extension(file), re.IGNORECASE)
return True if match else isinstance(resolve_bot_file_id(file), types.Photo)
def is_gif(file):
"""
Returns `True` if the file extension looks like a gif file to Telegram.
"""
return re.match(r"\.gif", _get_extension(file), re.IGNORECASE)
def is_audio(file):
"""Returns `True` if the file has an audio mime type."""
if ext := _get_extension(file):
file = f"a{ext}"
return (mimetypes.guess_type(file)[0] or "").startswith("audio/")
else:
metadata = _get_metadata(file)
if metadata and metadata.has("mime_type"):
return metadata.get("mime_type").startswith("audio/")
else:
return False
def is_video(file):
"""Returns `True` if the file has a video mime type."""
if ext := _get_extension(file):
file = f"a{ext}"
return (mimetypes.guess_type(file)[0] or "").startswith("video/")
else:
metadata = _get_metadata(file)
if metadata and metadata.has("mime_type"):
return metadata.get("mime_type").startswith("video/")
else:
return False
def is_list_like(obj):
"""
Returns `True` if the given object looks like a list.
Checking ``if hasattr(obj, '__iter__')`` and ignoring ``str/bytes`` is not
enough. Things like ``open()`` are also iterable (and probably many
other things), so just support the commonly known list-like objects.
"""
return isinstance(obj, (list, tuple, set, dict, GeneratorType))
def parse_phone(phone):
"""Parses the given phone, or returns `None` if it's invalid."""
if isinstance(phone, int):
return str(phone)
phone = re.sub(r"[+()\s-]", "", str(phone))
if phone.isdigit():
return phone
def parse_username(username):
"""
Parses the given username or channel access hash, given
a string, username or URL. Returns a tuple consisting of
both the stripped, lowercase username and whether it is
a joinchat/ hash (in which case is not lowercase'd).
Returns ``(None, False)`` if the ``username`` or link is not valid.
"""
username = username.strip()
if m := USERNAME_RE.match(username) or TG_JOIN_RE.match(username):
username = username[m.end() :]
is_invite = bool(m.group(1))
if is_invite:
return username, True
else:
username = username.rstrip("/")
if VALID_USERNAME_RE.match(username):
return username.lower(), False
else:
return None, False
def get_inner_text(text, entities):
"""
Gets the inner text that's surrounded by the given entities.
For instance: text = 'hey!', entity = MessageEntityBold(2, 2) -> 'y!'.
:param text: the original text.
:param entities: the entity or entities that must be matched.
:return: a single result or a list of the text surrounded by the entities.
"""
text = add_surrogate(text)
result = []
for e in entities:
start = e.offset
end = e.offset + e.length
result.append(del_surrogate(text[start:end]))
return result
def get_peer(peer):
try:
if isinstance(peer, int):
pid, cls = resolve_id(peer)
return cls(pid)
elif peer.SUBCLASS_OF_ID == 0x2D45687:
return peer
elif isinstance(
peer,
(
types.contacts.ResolvedPeer,
types.InputNotifyPeer,
types.TopPeer,
types.Dialog,
types.DialogPeer,
),
):
return peer.peer
elif isinstance(peer, types.ChannelFull):
return types.PeerChannel(peer.id)
elif isinstance(peer, types.UserEmpty):
return types.PeerUser(peer.id)
elif isinstance(peer, types.ChatEmpty):
return types.PeerChat(peer.id)
if peer.SUBCLASS_OF_ID in (0x7D7C6F86, 0xD9C7FC18):
# ChatParticipant, ChannelParticipant
return types.PeerUser(peer.user_id)
peer = get_input_peer(peer, allow_self=False, check_hash=False)
if isinstance(peer, (types.InputPeerUser, types.InputPeerUserFromMessage)):
return types.PeerUser(peer.user_id)
elif isinstance(peer, types.InputPeerChat):
return types.PeerChat(peer.chat_id)
elif isinstance(
peer, (types.InputPeerChannel, types.InputPeerChannelFromMessage)
):
return types.PeerChannel(peer.channel_id)
except (AttributeError, TypeError):
pass
_raise_cast_fail(peer, "Peer")
def get_peer_id(peer, add_mark=True):
"""
Convert the given peer into its marked ID by default.
This "mark" comes from the "bot api" format, and with it the peer type
can be identified back. User ID is left unmodified, chat ID is negated,
and channel ID is "prefixed" with -100:
* ``user_id``
* ``-chat_id``
* ``-100channel_id``
The original ID and the peer type class can be returned with
a call to :meth:`resolve_id(marked_id)`.
"""
# First we assert it's a Peer TLObject, or early return for integers
if isinstance(peer, int):
return peer if add_mark else resolve_id(peer)[0]
# Tell the user to use their client to resolve InputPeerSelf if we got one
if isinstance(peer, types.InputPeerSelf):
_raise_cast_fail(peer, "int (you might want to use client.get_peer_id)")
try:
peer = get_peer(peer)
except TypeError:
_raise_cast_fail(peer, "int")
if isinstance(peer, types.PeerUser):
return peer.user_id
elif isinstance(peer, types.PeerChat):
# Check in case the user mixed things up to avoid blowing up
if not (0 < peer.chat_id <= 9999999999):
peer.chat_id = resolve_id(peer.chat_id)[0]
return -peer.chat_id if add_mark else peer.chat_id
else: # if isinstance(peer, types.PeerChannel):
# Check in case the user mixed things up to avoid blowing up
if not (0 < peer.channel_id <= 9999999999):
peer.channel_id = resolve_id(peer.channel_id)[0]
if not add_mark:
return peer.channel_id
# Growing backwards from -100_0000_000_000 indicates it's a channel
return -(1000000000000 + peer.channel_id)
def resolve_id(marked_id):
"""Given a marked ID, returns the original ID and its :tl:`Peer` type."""
if marked_id >= 0:
return marked_id, types.PeerUser
marked_id = -marked_id
if marked_id <= 1000000000000:
return marked_id, types.PeerChat
marked_id -= 1000000000000
return marked_id, types.PeerChannel
def _rle_decode(data):
"""
Decodes run-length-encoded `data`.
"""
if not data:
return data
new = b""
last = b""
for cur in data:
if last == b"\0":
new += last * cur
last = b""
else:
new += last
last = bytes([cur])
return new + last
def _rle_encode(string):
new = b""
count = 0
for cur in string:
if not cur:
count += 1
else:
if count:
new += b"\0" + bytes([count])
count = 0
new += bytes([cur])
return new
def _decode_telegram_base64(string):
"""
Decodes a url-safe base64-encoded string into its bytes
by first adding the stripped necessary padding characters.
This is the way Telegram shares binary data as strings,
such as Bot API-style file IDs or invite links.
Returns `None` if the input string was not valid.
"""
try:
return base64.urlsafe_b64decode(string + "=" * (len(string) % 4))
except (binascii.Error, ValueError, TypeError):
return None # not valid base64, not valid ascii, not a string
def _encode_telegram_base64(string):
"""
Inverse for `_decode_telegram_base64`.
"""
try:
return base64.urlsafe_b64encode(string).rstrip(b"=").decode("ascii")
except (binascii.Error, ValueError, TypeError):
return None # not valid base64, not valid ascii, not a string
def resolve_bot_file_id(file_id):
"""
Given a Bot API-style `file_id <telethon.tl.custom.file.File.id>`,
returns the media it represents. If the `file_id <telethon.tl.custom.file.File.id>`
is not valid, `None` is returned instead.
Note that the `file_id <telethon.tl.custom.file.File.id>` does not have information
such as image dimensions or file size, so these will be zero if present.
For thumbnails, the photo ID and hash will always be zero.
"""
data = _rle_decode(_decode_telegram_base64(file_id))
if not data:
return None
# This isn't officially documented anywhere, but
# we assume the last byte is some kind of "version".
data, version = data[:-1], data[-1]
if version not in (2, 4):
return None
if (version == 2 and len(data) == 24) or (version == 4 and len(data) == 25):
if version == 2:
file_type, dc_id, media_id, access_hash = struct.unpack("<iiqq", data)
# elif version == 4:
else:
# TODO Figure out what the extra byte means
file_type, dc_id, media_id, access_hash, _ = struct.unpack("<iiqqb", data)
if not (1 <= dc_id <= 5):
# Valid `file_id`'s must have valid DC IDs. Since this method is
# called when sending a file and the user may have entered a path
# they believe is correct but the file doesn't exist, this method
# may detect a path as "valid" bot `file_id` even when it's not.
# By checking the `dc_id`, we greatly reduce the chances of this
# happening.
return None
attributes = []
if file_type in [3, 9]:
attributes.append(
types.DocumentAttributeAudio(duration=0, voice=file_type == 3)
)
elif file_type in [4, 13]:
attributes.append(
types.DocumentAttributeVideo(
duration=0, w=0, h=0, round_message=file_type == 13
)
)
elif file_type == 8:
attributes.append(
types.DocumentAttributeSticker(
alt="", stickerset=types.InputStickerSetEmpty()
)
)
elif file_type == 10:
attributes.append(types.DocumentAttributeAnimated())
return types.Document(
id=media_id,
access_hash=access_hash,
date=None,
mime_type="",
size=0,
thumbs=None,
dc_id=dc_id,
attributes=attributes,
file_reference=b"",
)
elif (version == 2 and len(data) == 44) or version == 4 and len(data) in {49, 77}:
if version == 2:
(
file_type,
dc_id,
media_id,
access_hash,
volume_id,
secret,
local_id,
) = struct.unpack("<iiqqqqi", data)
# else version == 4:
elif len(data) == 49:
# TODO Figure out what the extra five bytes mean
(
file_type,
dc_id,
media_id,
access_hash,
volume_id,
secret,
local_id,
_,
) = struct.unpack("<iiqqqqi5s", data)
elif len(data) == 77:
# See #1613.
(
file_type,
dc_id,
_,
media_id,
access_hash,
volume_id,
_,
local_id,
_,
) = struct.unpack("<ii28sqqq12sib", data)
else:
return None
if not (1 <= dc_id <= 5):
return None
# Thumbnails (small) always have ID 0; otherwise size 'x'
photo_size = "s" if media_id or access_hash else "x"
return types.Photo(
id=media_id,
access_hash=access_hash,
file_reference=b"",
date=None,
sizes=[types.PhotoSize(type=photo_size, w=0, h=0, size=0)],
dc_id=dc_id,
has_stickers=None,
)
def pack_bot_file_id(file):
"""
Inverse operation for `resolve_bot_file_id`.
The only parameters this method will accept are :tl:`Document` and
:tl:`Photo`, and it will return a variable-length ``file_id`` string.
If an invalid parameter is given, it will ``return None``.
"""
if isinstance(file, types.MessageMediaDocument):
file = file.document
elif isinstance(file, types.MessageMediaPhoto):
file = file.photo
if isinstance(file, types.Document):
file_type = 5
for attribute in file.attributes:
if isinstance(attribute, types.DocumentAttributeAudio):
file_type = 3 if attribute.voice else 9
elif isinstance(attribute, types.DocumentAttributeVideo):
file_type = 13 if attribute.round_message else 4
elif isinstance(attribute, types.DocumentAttributeSticker):
file_type = 8
elif isinstance(attribute, types.DocumentAttributeAnimated):
file_type = 10
else:
continue
break
return _encode_telegram_base64(
_rle_encode(
struct.pack(
"<iiqqb", file_type, file.dc_id, file.id, file.access_hash, 2
)
)
)
elif isinstance(file, types.Photo):
size = next(
(
x
for x in reversed(file.sizes)
if isinstance(x, (types.PhotoSize, types.PhotoCachedSize))
),
None,
)
if not size:
return None
size = size.location
return _encode_telegram_base64(
_rle_encode(
struct.pack(
"<iiqqqqib",
2,
file.dc_id,
file.id,
file.access_hash,
size.volume_id,
0,
size.local_id,
2, # 0 = old `secret`
)
)
)
else:
return None
def resolve_invite_link(link):
"""
Resolves the given invite link. Returns a tuple of
``(link creator user id, global chat id, random int)``.
Note that for broadcast channels or with the newest link format, the link
creator user ID will be zero to protect their identity. Normal chats and
megagroup channels will have such ID.
Note that the chat ID may not be accurate for chats with a link that were
upgraded to megagroup, since the link can remain the same, but the chat
ID will be correct once a new link is generated.
"""
link_hash, is_link = parse_username(link)
if not is_link:
# Perhaps the user passed the link hash directly
link_hash = link
# Little known fact, but invite links with a
# hex-string of bytes instead of base64 also works.
if re.match(r"[a-fA-F\d]+", link_hash) and len(link_hash) in {24, 32}:
payload = bytes.fromhex(link_hash)
else:
payload = _decode_telegram_base64(link_hash)
try:
if len(payload) == 12:
return (0, *struct.unpack(">LQ", payload))
elif len(payload) == 16:
return struct.unpack(">LLQ", payload)
except (struct.error, TypeError):
pass
return None, None, None
def resolve_inline_message_id(inline_msg_id):
"""
Resolves an inline message ID. Returns a tuple of
``(message id, peer, dc id, access hash)``
The ``peer`` may either be a :tl:`PeerUser` referencing
the user who sent the message via the bot in a private
conversation or small group chat, or a :tl:`PeerChannel`
if the message was sent in a channel.
The ``access_hash`` does not have any use yet.
"""
try:
dc_id, message_id, pid, access_hash = struct.unpack(
"<iiiq", _decode_telegram_base64(inline_msg_id)
)
peer = types.PeerChannel(-pid) if pid < 0 else types.PeerUser(pid)
return message_id, peer, dc_id, access_hash
except (struct.error, TypeError):
return None, None, None, None
def get_appropriated_part_size(file_size):
"""
Gets the appropriated part size when uploading or downloading files,
given an initial file size.
"""
if file_size <= 104857600: # 100MB
return 128
if file_size <= 786432000: # 750MB
return 256
if file_size <= 2097152000: # 2000MB
return 512
if file_size <= 4194304000: # 4000MB
return 512
raise ValueError("File size too large")
def encode_waveform(waveform):
"""
Encodes the input `bytes` into a 5-bit byte-string
to be used as a voice note's waveform. See `decode_waveform`
for the reverse operation.
Example
.. code-block:: python
chat = ...
file = 'my.ogg'
# Send 'my.ogg' with a ascending-triangle waveform
await client.send_file(chat, file, attributes=[types.DocumentAttributeAudio(
duration=7,
voice=True,
waveform=utils.encode_waveform(bytes(range(2 ** 5)) # 2**5 because 5-bit
)]
# Send 'my.ogg' with a square waveform
await client.send_file(chat, file, attributes=[types.DocumentAttributeAudio(
duration=7,
voice=True,
waveform=utils.encode_waveform(bytes((31, 31, 15, 15, 15, 15, 31, 31)) * 4)
)]
"""
bits_count = len(waveform) * 5
bytes_count = (bits_count + 7) // 8
result = bytearray(bytes_count + 1)
for i in range(len(waveform)):
byte_index, bit_shift = divmod(i * 5, 8)
value = (waveform[i] & 0b00011111) << bit_shift
or_what = struct.unpack("<H", (result[byte_index : byte_index + 2]))[0]
or_what |= value
result[byte_index : byte_index + 2] = struct.pack("<H", or_what)
return bytes(result[:bytes_count])
def decode_waveform(waveform):
"""
Inverse operation of `encode_waveform`.
"""
bit_count = len(waveform) * 8
value_count = bit_count // 5
if value_count == 0:
return b""
result = bytearray(value_count)
for i in range(value_count - 1):
byte_index, bit_shift = divmod(i * 5, 8)
value = struct.unpack("<H", waveform[byte_index : byte_index + 2])[0]
result[i] = (value >> bit_shift) & 0b00011111
byte_index, bit_shift = divmod(value_count - 1, 8)
if byte_index == len(waveform) - 1:
value = waveform[byte_index]
else:
value = struct.unpack("<H", waveform[byte_index : byte_index + 2])[0]
result[value_count - 1] = (value >> bit_shift) & 0b00011111
return bytes(result)
def split_text(
text, entities, *, limit=4096, max_entities=100, split_at=(r"\n", r"\s", ".")
):
"""
Split a message text and entities into multiple messages, each with their
own set of entities. This allows sending a very large message as multiple
messages while respecting the formatting.
Arguments
text (`str`):
The message text.
entities (List[:tl:`MessageEntity`])
The formatting entities.
limit (`int`):
The maximum message length of each individual message.
max_entities (`int`):
The maximum amount of entities that will be present in each
individual message.
split_at (Tuplel[`str`]):
The list of regular expressions that will determine where to split
the text. By default, a newline is searched. If no newline is
present, a space is searched. If no space is found, the split will
be made at any character.
The last expression should always match a character, or else the
text will stop being splitted and the resulting text may be larger
than the limit.
Yields
Pairs of ``(str, entities)`` with the split message.
Example
.. code-block:: python
from telethon import utils
from telethon.extensions import markdown
very_long_markdown_text = "..."
text, entities = markdown.parse(very_long_markdown_text)
for text, entities in utils.split_text(text, entities):
await client.send_message(chat, text, formatting_entities=entities)
"""
# TODO add test cases (multiple entities beyond cutoff, at cutoff, splitting at emoji)
# TODO try to optimize this a bit more? (avoid new_ent, smarter update method)
def update(ent, **updates):
kwargs = ent.to_dict()
del kwargs["_"]
kwargs.update(updates)
return ent.__class__(**kwargs)
text = add_surrogate(text)
split_at = tuple(map(re.compile, split_at))
while True:
if len(entities) > max_entities:
last_ent = entities[max_entities - 1]
cur_limit = min(limit, last_ent.offset + last_ent.length)
else:
cur_limit = limit
if len(text) <= cur_limit:
break
for split in split_at:
for i in reversed(range(cur_limit)):
m = split.match(text, pos=i)
if m:
cur_text, new_text = text[: m.end()], text[m.end() :]
cur_ent, new_ent = [], []
for ent in entities:
if ent.offset < m.end():
if ent.offset + ent.length > m.end():
cur_ent.append(update(ent, length=m.end() - ent.offset))
new_ent.append(
update(
ent,
offset=0,
length=ent.offset + ent.length - m.end(),
)
)
else:
cur_ent.append(ent)
else:
new_ent.append(update(ent, offset=ent.offset - m.end()))
yield del_surrogate(cur_text), cur_ent
text, entities = new_text, new_ent
break
else:
continue
break
else:
# Can't find where to split, just return the remaining text and entities
break
yield del_surrogate(text), entities
class AsyncClassWrapper:
def __init__(self, wrapped):
self.wrapped = wrapped
def __getattr__(self, item):
w = getattr(self.wrapped, item)
async def wrapper(*args, **kwargs):
val = w(*args, **kwargs)
return await val if inspect.isawaitable(val) else val
if callable(w):
return wrapper
else:
return w
def stripped_photo_to_jpg(stripped):
"""
Adds the JPG header and footer to a stripped image.
Ported from https://github.com/telegramdesktop/tdesktop/blob/bec39d89e19670eb436dc794a8f20b657cb87c71/Telegram/SourceFiles/ui/image/image.cpp#L225
"""
# NOTE: Changes here should update _photo_size_byte_count
if len(stripped) < 3 or stripped[0] != 1:
return stripped
header = bytearray(
b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01\x00\x01\x00\x00\xff\xdb\x00C\x00(\x1c\x1e#\x1e\x19(#!#-+(0<dA<77<{X]Id\x91\x80\x99\x96\x8f\x80\x8c\x8a\xa0\xb4\xe6\xc3\xa0\xaa\xda\xad\x8a\x8c\xc8\xff\xcb\xda\xee\xf5\xff\xff\xff\x9b\xc1\xff\xff\xff\xfa\xff\xe6\xfd\xff\xf8\xff\xdb\x00C\x01+--<5<vAAv\xf8\xa5\x8c\xa5\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xff\xc0\x00\x11\x08\x00\x00\x00\x00\x03\x01"\x00\x02\x11\x01\x03\x11\x01\xff\xc4\x00\x1f\x00\x00\x01\x05\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x10\x00\x02\x01\x03\x03\x02\x04\x03\x05\x05\x04\x04\x00\x00\x01}\x01\x02\x03\x00\x04\x11\x05\x12!1A\x06\x13Qa\x07"q\x142\x81\x91\xa1\x08#B\xb1\xc1\x15R\xd1\xf0$3br\x82\t\n\x16\x17\x18\x19\x1a%&\'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xc4\x00\x1f\x01\x00\x03\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x11\x00\x02\x01\x02\x04\x04\x03\x04\x07\x05\x04\x04\x00\x01\x02w\x00\x01\x02\x03\x11\x04\x05!1\x06\x12AQ\x07aq\x13"2\x81\x08\x14B\x91\xa1\xb1\xc1\t#3R\xf0\x15br\xd1\n\x16$4\xe1%\xf1\x17\x18\x19\x1a&\'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xda\x00\x0c\x03\x01\x00\x02\x11\x03\x11\x00?\x00'
)
footer = b"\xff\xd9"
header[164] = stripped[1]
header[166] = stripped[2]
return bytes(header) + stripped[3:] + footer
def _photo_size_byte_count(size):
if isinstance(size, types.PhotoSize):
return size.size
elif isinstance(size, types.PhotoStrippedSize):
if len(size.bytes) < 3 or size.bytes[0] != 1:
return len(size.bytes)
return len(size.bytes) + 622
elif isinstance(size, types.PhotoCachedSize):
return len(size.bytes)
elif isinstance(size, types.PhotoSizeEmpty):
return 0
elif isinstance(size, types.PhotoSizeProgressive):
return max(size.sizes)
else:
return None
def convert_reaction(
reaction: "typing.Optional[hints.Reaction]" = None, # type: ignore
) -> typing.Optional[
typing.Union[
typing.List[types.ReactionEmoji], typing.List[types.ReactionCustomEmoji]
]
]:
"""
Converts a reaction to a list of :tl:`ReactionEmoji` or :tl:`ReactionCustomEmoji`.
"""
if not reaction:
return None
if isinstance(reaction, str):
reaction = types.ReactionEmoji(reaction)
if isinstance(reaction, int):
reaction = types.ReactionCustomEmoji(reaction)
if isinstance(reaction, (types.ReactionEmoji, types.ReactionCustomEmoji)):
reaction = [reaction]
for r in reaction:
if isinstance(r, str):
reaction[reaction.index(r)] = types.ReactionEmoji(r)
if isinstance(r, int):
reaction[reaction.index(r)] = types.ReactionCustomEmoji(r)
return reaction | PypiClean |
/Cubane-1.0.11.tar.gz/Cubane-1.0.11/cubane/views.py | from __future__ import unicode_literals
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.http import StreamingHttpResponse
from django.http.request import QueryDict
from django.shortcuts import render
from django.contrib import messages
from django.core.urlresolvers import reverse, reverse_lazy, RegexURLPattern
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.template.loader import TemplateDoesNotExist
from django.forms import ModelForm, ModelChoiceField
from django.forms.models import fields_for_model
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_GET, require_POST
from django.views.decorators.csrf import csrf_exempt
from django.utils.text import slugify
from django.utils.html import mark_safe
from django.db import models
from django.db import router
from django.db import transaction
from django.db import IntegrityError
from django.db.models import Q, Max
from django.db.models import CharField, TextField, EmailField, BooleanField, DateField
from django.db.models import ManyToManyField, IntegerField, AutoField
from django.db.models import Case, When
from django.db.models.functions import Lower
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import ForeignKey
from django.db.models.fields.reverse_related import ManyToOneRel
from django.db.models.deletion import Collector
from django.contrib.contenttypes.models import ContentType
from cubane.decorators import template
from cubane.decorators import user_has_permission as _user_has_permission
from cubane.decorators import identity, permission_required
from cubane.forms import FormLayout
from cubane.forms import DataImportForm
from cubane.forms import DateInput
from cubane.forms import MultiSelectFormField
from cubane.forms import SectionField
from cubane.models import DateTimeReadOnlyBase, DateTimeBase
from cubane.models.fields import TagsField
from cubane.models.fields import MultiSelectField
from cubane.models.data import Exporter, Importer
from cubane.media.models import Media
from cubane.backend.forms import ModelCollectionField
from cubane.backend.models import ChangeLog
from cubane.lib.libjson import to_json_response
from cubane.lib.file import to_uniform_filename
from cubane.lib.model import *
from cubane.lib.queryset import MaterializedQuerySet
from cubane.lib.url import url_with_arg, url_with_args, parse_query_string
from cubane.lib.ident import headline_from_ident
from cubane.lib.tree import TreeBuilder
from cubane.lib.request import request_int, request_bool, request_int_list
from cubane.lib.url import make_absolute_url
from cubane.lib.text import get_words, pluralize
from cubane.lib.acl import Acl
from cubane.lib.template import get_template
from cubane.signals import before_cms_save, after_cms_save
import re
import os
import copy
import datetime
TEXT_FIELDS = (CharField, EmailField, TextField)
MAX_RECORDS = 50
PAGINATION_MAX_RECORDS = 48
PAGINATION_PAGES_WINDOW_SIZE = 6
MAX_COLUMNS = 6
MAX_HIERARCHY_LEVELS = 3
def view_url(regex, view, kwargs=None, name=None):
"""
Encapsulates a url pattern that can be used with views. This is mainly to
support flexible arguments, so that we do not have to provide all
arguments all the time.
"""
return (regex, view, kwargs, name)
def get_columns(*args):
"""
Return a list of columns based on given arguments.
"""
return [{
'title': arg[0],
'fieldname': arg[1]
} for arg in args]
def view(decorator=None):
"""
Since methods do have an additional argument (self), we need to transform
a regular decorator (which works on plain view functions) to a method
decorator.
"""
if decorator:
return method_decorator(decorator)
else:
return identity
class View(object):
"""
A view provides a set of operations that can be performed typically on one
type of entity, for example a CustomerView derived from this class may
implement all features that are necessary to list, filter, search, create,
edit and delete customers in the context of a shop system for example.
"""
@property
def urls(self):
if not hasattr(self, '_urls'):
self._urls = self.get_urls()
return self._urls
@property
def url(self):
return self.get_url()
def _get_urlpatterns(self, patterns, prefix=None):
"""
Return url patterns than can be added to the django's url routing
system by extracting url patterns from the given list of patterns
and wrapping it into instances of RegexURLPattern.
"""
urls = []
for regex, handler_name, kwargs, name in patterns:
# inject url prefix into patterns
if prefix:
if regex.startswith('^'):
regex = '^%s/%s' % (prefix, regex[1:])
else:
regex = '%s/%s' % (prefix, regex)
# add namespace
if hasattr(self, 'namespace'):
name = self.namespace + '.' + name
# setup url route to view method
handler = self._create_view_handler(handler_name)
urls.append(RegexURLPattern(regex, handler, kwargs, name))
return urls
def get_patterns(self):
"""
Return a list of all url pattern that are specified by this class and
its super classes.
"""
_patterns = []
# collect patterns from derived classes
def collect_patterns(cls):
if hasattr(cls, 'patterns'):
_patterns.extend(cls.patterns)
for subcls in cls.__bases__:
collect_patterns(subcls)
collect_patterns(self.__class__)
# collect patterns from instance
if hasattr(self, 'patterns') and isinstance(self.patterns, list):
_patterns.extend(self.patterns)
return _patterns
def get_urls(self, prefix=None):
"""
Return a url patterns structure for all view methods that this view
implements. By convention, url patterns are extracted from the
class property with the name 'patterns'.
"""
# collect patterns from this class and all derived classes
patterns = self.get_patterns()
# generate url patterns
urls = []
if patterns:
urls = self._get_urlpatterns(patterns, prefix)
else:
urls = []
return urls
def get_url(self):
urls = self.get_urls()
if len(urls) > 0:
for p in urls:
if p.name.endswith('.index'):
return reverse(p.name)
return reverse(urls[0].name)
return ''
def run_handler(self, request, handler_name, *args, **kwargs):
"""
Execute given view handler methods. This is mostly intended to be used
by unit tests testing the functionality of the actual view
implementation.
"""
handler = self._create_view_handler(handler_name)
_process_templates = getattr(self, 'process_templates', True)
self.process_templates = False
response = handler(request, *args, **kwargs)
self.process_templates = _process_templates
return response
def _create_view_handler(self, handler_name):
"""
Return a wrapper for handling the view which in itself calls the method
with given name on the given view class. For each request, a new
instance of the view class is created.
"""
def view(request, *args, **kwargs):
# create a new instance for each dispatch, so we do not have any
# shared state. dispatch into method or raise 404
instance = copy.copy(self)
# inject view class into request, some decorators may use this
# in order to obtain access to the view model instance that
# is handeling the request
request.view_instance = instance
# get actual view handler to handler the request
handler = getattr(instance, handler_name, None)
if not handler:
raise Http404(
'method %s is not implemented in view class %s' % (
handler_name,
instance.__class__.__name__
)
)
return instance._dispatch(handler, request, *args, **kwargs)
# cascade CSRF excemption annotation on view handler function
if 'django.middleware.csrf.CsrfViewMiddleware' in settings.MIDDLEWARE_CLASSES:
handler = getattr(self, handler_name, None)
if handler:
if getattr(handler, 'csrf_exempt', False):
view.csrf_exempt = True
return view
def _dispatch(self, handler, request, *args, **kwargs):
"""
Dispatch to given view handler on this view class instance. Any dispatch
calls before() then the actual view handler method and then after().
- before() and after() can be overridden by a deriving view class.
- If before() raises an exception, the actual method handler is not
called nor is after().
- If before() returns something, the actual method handler is not
- called nor is after().
- If after() returns something, it overrides the response from the
actual view handler.
- The response from the actual view handler is passed to after().
"""
# before handler -> if it returns something, we are done with it...
response = self.before(request, handler)
if response: return response
# dispatch into actual view handler
response = handler(request, *args, **kwargs)
# after handler -> may override response from actual view handler...
after_response = self.after(request, handler, response)
if after_response:
return after_response
else:
return response
def before(self, request, handler):
"""
Default before handler. Override your own implementation in your
derived class.
"""
pass
def after(self, request, handler, response):
"""
Default after handler. Override your own implementation in your
derived class.
"""
pass
class ApiView(View):
"""
Provides an API-related view for XHR requests. The default output format is
JSON by default and the content type is text/javascript.
"""
def after(self, request, handler, response):
"""
Turn the data as returned from the actual view handler to JSON and
return an HTTPResponse with content type text/javascript unless we
already encounter a HttpResponse object returned by the actual view
handler directly.
"""
if isinstance(response, HttpResponse):
# directly return response if it is an HttpResponse already
return response
else:
return to_json_response(response)
class TemplateView(View):
"""
Provides the ability to return a dict or HttpResponse by any view handler
and to render a corresponding template file which name correlates with the
name of the view handler.
"""
template_path = None
process_templates = True
def _get_template_path(self, request, handler, response):
"""
May be overridden by the derived class to customize the handler to
template mapping. The base implementation constructs the full template
path based on the optional base path (template_path) and the name of
the view handler.
"""
# allow the template path to be overwritten
path = response.get('cubane_template_view_path')
if not path:
# construct path based on handler name
path = '%s.html' % handler.__name__
if self.template_path:
path = os.path.join(self.template_path, path)
return path
def after(self, request, handler, response):
"""
Turn template context as returned from the actual view handler into
an HttpResponse by calling into django's render(). The template is
based on the given template path (constructor), the name of the entity
and the name of the handler.
"""
# ignore template processing? (unit testing?)
if not self.process_templates:
return response
if isinstance(response, (HttpResponse, StreamingHttpResponse)):
# directly return response if it is an HttpResponse already
return response
else:
# assume we have a dict-like response. Extend dict. with
# self.context if available
if hasattr(self, 'context'):
response.update(self.context)
# determine the path to the template
path = self._get_template_path(request, handler, response)
# pass it to the default template renderer for django
return render(request, path, response)
class ModelView(TemplateView):
"""
Model view provides full support for listing, creating, updating and
deleting model entities.
"""
def __init__(self, model=None, namespace_prefix='', related_listing=False, related_instance=None, related_instance_attr=None, *args, **kwargs):
"""
Create a default instance for a given model.
"""
# related listing?
self.related_listing = related_listing
self.related_instance = related_instance
self.related_instance_attr = related_instance_attr
# construct based on given model?
if model is not None:
self.model = model
# no model?
if hasattr(self, 'model') and self.model is None:
return
# model acl
if hasattr(self, 'model'):
self.acl = Acl.of(self.model)
else:
self.acl = Acl.default(None)
# generate slug automatically based on model plural vebose name
# if no slug has been defined.
if hasattr(self, 'model') and not hasattr(self, 'slug'):
self.slug = slugify(self.model._meta.verbose_name_plural)
# generate namespace automatically based on model plural verbose name
# if no namespace has been defined
if hasattr(self, 'model') and not hasattr(self, 'namespace'):
self.namespace = namespace_prefix + unicode(slugify(self.model._meta.verbose_name_plural))
if not hasattr(self, 'model'):
self.model = None
# override: folder model
folder_model = get_listing_view_option(self.model, 'folder_model')
if folder_model == 'self':
self.folder_model = self.model
elif folder_model is not None:
self.folder_model = folder_model
# override: multiple folders
multiple_folders = get_listing_view_option(self.model, 'multiple_folders')
if multiple_folders is not None:
self.multiple_folders = multiple_folders
# override: list children
list_children = get_listing_view_option(self.model, 'list_children')
if list_children is not None:
self.list_children = list_children
@property
def _model_name(self):
return self.model.__name__.lower()
@property
def is_single_instance(self):
return hasattr(self, 'single_instance') and self.single_instance
@property
def model_is_folder(self):
"""
Return True, if the listing model and the folder model are the same.
"""
return self.model == self.get_folder_model()
@property
def listing_with_image(self):
"""
Return True, if the listing will preview an image alongside each item
in standard listing mode.
"""
return get_listing_option(self.model, 'listing_with_image', False)
def validate_models(self):
"""
Validate attached model that it can be used as part of the backend
system safety.
"""
validate_model(self.model)
def get_url_for_model(self, model, view='index'):
"""
Return the url for editing the given model and view or None. The view
argument defines the type of view we are interested in, for example
index, edit or create.
"""
if self.model == model:
return reverse(self._get_url_name(view, namespace=True))
else:
return None
def get_url_for_model_instance(self, instance, view='index'):
"""
Return the url for editing the given model and view or None. The view
argument defines the type of view we are interested in, for example
index, edit or create.
"""
if self.model == instance.__class__:
# there might be multiple ModelView instances managing the same
# model, therefore we might have to consider the backend section
# identifier as well...
if self._has_model_backend_sections():
if self._get_model_backend_section(instance) != self.model_attr_value:
return None
return reverse(self._get_url_name(view, namespace=True))
return None
def _get_exclude_columns(self):
"""
Return a list of columns that this view does not want to be presented
within the listing nor the filter form.
"""
if hasattr(self, 'exclude_columns'):
return self.exclude_columns
else:
return []
def _get_model_columns(
self,
model,
view='list',
listing_actions=[],
related_instance_attr=None,
searchable=False
):
"""
Return a list of all model columns that should be presented when
listing entities by using the default listing templates. If the
searchable argument is True, only searchable fields are included.
"""
# columns in edit mode
fieldnames = None
if view == 'edit':
fieldnames = get_listing_option(model, 'edit_columns')
exclude_non_editable = False
# columns in non-edit mode, or no columns specified for edit mode
if fieldnames is None:
fieldnames = get_listing_option(model, 'columns')
exclude_non_editable = False
# fallback: Extract columns from model automatically
if fieldnames is None:
fieldnames = get_model_field_names(model)
exclude_non_editable = True
auto_columns = True
else:
auto_columns = False
# process columns
columns = []
exclude_columns = self._get_exclude_columns()
for fieldname in fieldnames:
# check if field is half column
half_col = fieldname.startswith('/')
if half_col:
fieldname = fieldname[1:]
# check for explicit right-alignment
is_right_aligned = fieldname.startswith('-')
if is_right_aligned:
fieldname = fieldname[1:]
# split into fieldname and label
# format: <column-name>[(<display-column-name)]|<title>|<format> or <action>
# where format might be
# - bool for yes/no
# - url for inline link
# - html for arbitary html content
# - currency for currency format of number
# - percent for percent format
#
# action must point to listing action, such as
# action:foo
p = fieldname.split('|', 2)
is_bool = False
is_url = False
is_currency = False
is_percent = False
is_action = False
is_html = False
action = None
if len(p) == 3:
fieldname = p[0]
title = p[1]
is_bool = p[2] == 'bool'
is_url = p[2] == 'url'
is_currency = p[2] == 'currency'
is_percent = p[2] == 'percent'
is_html = p[2] == 'html'
if p[2].startswith('action:'):
action_view = p[2].replace('action:', '')
for _action in listing_actions:
if _action.get('view') == action_view:
action = _action
break
elif len(p) == 2:
fieldname = p[0]
title = p[1]
else:
title = None
# fieldname may express a different field or property for
# display purposes...
m = re.match(r'^(?P<fieldname>.*?)\((?P<display_fieldname>.*?)\)$', fieldname)
if m:
fieldname = m.group('fieldname')
display_fieldname = m.group('display_fieldname')
else:
display_fieldname = fieldname
# do not present a column that we filter by for an embedded listing
# view, this would be repeated and the same for every instance
# anyhow...
if fieldname == related_instance_attr:
continue
# do not include a column that the view does not want
# to be presented...
if fieldname in exclude_columns:
continue
# get field
try:
field = model._meta.get_field(fieldname)
except FieldDoesNotExist:
# foreign field?
field, related, rel_fieldname, rel_model, title = get_model_related_field(model, fieldname, title)
# generate automatic title
if not title:
title = headline_from_ident(fieldname)
# foreign field, property or callable
if related != None or (not searchable and hasattr(model, fieldname)):
columns.append({
'fieldname': fieldname,
'display_fieldname': display_fieldname,
'title': title,
'sortable': related != None,
'bool': is_bool,
'html': is_html,
'currency': is_currency,
'percent': is_percent,
'choices': False,
'url': fieldname.endswith('_url') or is_url,
'action': action,
'half_col': half_col,
'foreign': False,
'related': related,
'rel_model': rel_model,
'right_aligned': is_right_aligned
})
continue
# no field?
if not field:
continue
# do not include fields that are not editable?
if exclude_non_editable and not field.editable:
continue
# determine verbose name
if isinstance(field, ManyToOneRel):
verbose_name = field.field.verbose_name
else:
verbose_name = field.verbose_name
# determine field title
if not title:
title = ' '.join(x.capitalize() for x in verbose_name.split())
# many to many?
many2many = isinstance(field, (ManyToManyField, ManyToOneRel))
# choices?
has_choices = hasattr(field, 'choices') and field.choices and len(field.choices) > 0
# add to result
columns.append({
'fieldname': fieldname,
'display_fieldname': display_fieldname,
'title': title,
'sortable': True,
'bool': isinstance(field, BooleanField) or is_bool,
'html': is_html,
'currency': is_currency,
'percent': is_percent,
'choices': has_choices,
'many2many': many2many,
'choice_display': 'get_%s_display' % fieldname,
'url': fieldname.endswith('_url') or is_url,
'action': action,
'half_col': half_col,
'related': None,
'foreign': isinstance(field, ForeignKey),
'rel_model': None,
'right_aligned': is_right_aligned
})
# we should not have more than a fixed number of (full) columns
# (including half columns).
if self._count_full_columns(columns) > MAX_COLUMNS:
if auto_columns:
columns = columns[:MAX_COLUMNS]
else:
raise ValueError(
'This view exceeds the maximum number of allowed (full) ' +
' columns of %d.' % MAX_COLUMNS
)
# determine column widths based on full/half columns
self._inject_column_width(columns)
return columns
def _get_related_fields_from_columns(self, columns):
"""
Return list of related model fields (foreign keys) based on the given
list of model columns. The result can be used for select_related() on
the corresponding model queryset in order to fetch related models that
are used. The default listing view may present an image, which is why
the 'image' column is always included (if the model defines it)
"""
result = []
for column in columns:
related = column.get('related')
foreign = column.get('foreign')
fieldname = column.get('fieldname')
if related != None and related not in result:
result.append(related)
elif foreign and not fieldname in result:
result.append(fieldname)
# image field
if 'image' not in result and hasattr(self, 'model') and self.model is not None:
try:
image_field = self.model._meta.get_field('image')
if isinstance(image_field, ForeignKey) and issubclass(image_field.related_model, Media):
result.append('image')
except FieldDoesNotExist:
pass
return result
def _count_full_columns(self, columns):
"""
Return the number of full columns, where one half columns count as
0.5 full columns, therefore this function may yield a multiply of 0.5.
"""
i = 0.0
for c in columns:
if c.get('half_col'):
i += 0.5
else:
i += 1
return i
def _inject_column_width(self, columns):
"""
Determine column width on the basis of full columns and half columns.
"""
try:
# force first column to be full width
columns[0]['half_col'] = False
except:
# no columns available.
return
k = len(filter(lambda col: col.get('half_col', False), columns))
n = len(columns) - 1 - k
denominator = k + 2 * n
for i, col in enumerate(columns):
if i == 0:
col['col_class'] = 't-col-primary'
else:
col['col_class'] = 't-col-%s-%s' % (1 if col.get('half_col', False) else 2, denominator)
def _get_model_column_names(self, model, view='list', searchable=False):
"""
Return a list of field names for the model that are presented by the
listing controller.
"""
fields = self._get_model_columns(model, view, listing_actions=[], searchable=searchable)
return [f['fieldname'] for f in fields]
def _get_default_view(self):
"""
Return the default listing view for the model. If no default view
is defined via the 'default_view' attribute, the default listing
view is 'list'.
"""
return get_listing_option(self.model, 'default_view', 'list')
def _get_filter_by(self):
"""
Return a list of columns that the model can be filtered by or the empty
list.
"""
return get_listing_option(self.model, 'filter_by', [])
def _has_model_backend_sections(self):
"""
Return True, if this view supports multiple model types for multiple
backend sections.
"""
try:
return self.model_attr and self.model_attr_value
except:
return False
def _configure_model_backend_section(self, instance):
"""
Make sure that the model has the correct backend section assigned to it.
"""
if self._has_model_backend_sections():
setattr(instance, self.model_attr, self.model_attr_value)
def _get_model_backend_section(self, instance):
"""
Return the backend section of the given model instance.
"""
if self._has_model_backend_sections():
return getattr(instance, self.model_attr, None)
else:
return None
def _get_filter_form(self, request, args):
"""
Return the filter form that is used to filter records for this model
view. Please note that the form is based on the model form or any
form that is returned by get_filter_form().
"""
filter_by = self._get_filter_by()
# if we do not filter by any fields, we do not need a filter form
if len(filter_by) == 0:
return None
# get form class
try:
formclass = self.model.get_filter_form()
except:
formclass = self._get_form()
# instantiate new form instance
if formclass:
form = formclass(initial=args)
else:
form = None
if form:
# construct an empty instance to configure the form with
instance = self.model()
# pre-configure instance model type if available
self._configure_model_backend_section(instance)
# configure form
form.is_duplicate = False
form.is_embedded = False
form.parent_form = None
form.parent_instance = None
form.view = self
form.configure(request, instance=instance, edit=False)
# remove fields that we are not filtering by or are excluded
# by the view...filter out columns that the view may not want...
exclude_columns = self._get_exclude_columns()
for fieldname, field in form.fields.items():
if fieldname not in filter_by or fieldname in exclude_columns:
del form.fields[fieldname]
# foreign field?
for fieldname in filter_by:
field, related, rel_fieldname, rel_model, title = get_model_related_field(self.model, fieldname)
if rel_model:
# try to get form for related entity
try:
rel_formclass = rel_model.get_filter_form()
except:
try:
rel_formclass = rel_model.get_form()
except:
rel_formclass = None
# if this fails, try the form we already know about...
if not rel_formclass:
rel_formclass = formclass
if rel_formclass:
rel_form = rel_formclass()
field = rel_form.fields.get(rel_fieldname)
if field:
form.fields[fieldname] = field
# create fields that are not included in the form because they are
# not editable...Only include those fields if we directly refer
# to such field via Meta.filter_by
for fieldname in filter_by:
if fieldname not in form.fields:
if fieldname.startswith(':'):
# section field
label = fieldname[1:]
fname = '__%s' % slugify(label)
field = SectionField(label=label)
form.fields[fname] = field
else:
# regular field
try:
model_field = self.model._meta.get_field(fieldname)
except FieldDoesNotExist:
continue
# only include if the field is non-editable
if model_field.editable:
continue
# determine widgets based on type
kwargs = {}
if isinstance(model_field, models.DateTimeField):
kwargs['widget'] = DateInput()
field = model_field.formfield(**kwargs)
form.fields[fieldname] = field
# we do not need to present an empty filter form...
if len(form.fields) == 0:
return None
# make sure that all fields are:
# - not required and
# - do not have help text (unless it is a checkbox/radio)
for fieldname, field in form.fields.items():
# required
field.required = False
# remove initial so that the form does not start filtering.
if hasattr(field, 'initial'):
field.initial = None
# no help text unless checkbox or radio
if not isinstance(field.widget, (forms.CheckboxInput, forms.RadioSelect)):
field.help_text = None
# multiple-choices fields must have an empty value, unless
# they are tags
if hasattr(field, 'choices') and not hasattr(field, 'queryset') and 'select-tags' not in field.widget.attrs.get('class', ''):
# choices may already contain a choice for empty value
if not any(map(lambda v: v == '', [value for value, _ in field.choices])):
field.choices = [('', '-------')] + field.choices
field.initial = ''
# make sure that choice fields with queryset have
# an empty choice
if isinstance(field, ModelChoiceField):
field.empty_label = '-------'
# boolean fields are replaced with 3-value choice fields,
# e.g. OFF, YES and NO.
if isinstance(field, forms.BooleanField):
field = form.fields[fieldname] = forms.ChoiceField(
label=field.label,
required=False,
widget=forms.RadioSelect,
initial=unicode(args.get(fieldname)),
choices=(
('None', 'Off'),
('True', 'Yes'),
('False', 'No')
)
)
# field with class containing editable-html should be removed.
# We do not want to have editable html fields in filter panel
if 'editable-html' in field.widget.attrs.get('class', ''):
field.widget.attrs['class'] = field.widget.attrs['class'].replace('editable-html', '')
# respect the order in which filter columns have been declared
for fieldname in filter_by:
if fieldname.startswith(':'):
fieldname = '__%s' % slugify(fieldname[1:])
if fieldname in form.fields:
field = form.fields.get(fieldname)
del form.fields[fieldname]
form.fields[fieldname] = field
# if we only have one field per section, then we do not need a
# label for it...
_fields = []
_sections = []
_second_seen = False
for fieldname in filter_by:
if fieldname.startswith(':'):
if _fields:
_sections.append(_fields)
_fields = []
_second_seen = True
elif _second_seen:
_field = form.fields.get(fieldname)
if _field:
_fields.append(_field)
if _fields:
_sections.append(_fields)
for _fields_per_section in _sections:
if len(_fields_per_section) == 1:
for _field in _fields_per_section:
_field.no_label = True
# prefix names
fields = {}
for fieldname, field in form.fields.items():
form.fields['_filter_%s' % fieldname] = field
del form.fields[fieldname]
if fieldname in form.initial:
form.initial['_filter_%s' % fieldname] = form.initial.get(fieldname)
del form.initial[fieldname]
return form
def _get_url_name(self, name, namespace=False):
"""
Return the full url pattern name based on the url namespace and the
given name of the operation. if no namespace is used, the name of the
model is used as a prefix.
"""
if hasattr(self, 'namespace'):
if namespace:
return '%s.%s' % (self.namespace, name)
else:
return name
else:
return '%s.%s' % (self._model_name, name)
def _get_url(self, request, name, namespace=True, format=None, args=None, pk=None):
"""
Return the full url based on the given name. if no namespace is used,
the name of the model is used as a prefix. If we present in a dialog
window, all urls will contain the browse argument.
"""
# get full url name to lookup
if name.startswith('/'):
name = name[1:]
else:
name = self._get_url_name(name, namespace)
# split query arguments
if '?' in name:
name, query_string = name.split('?', 2)
else:
query_string = None
# resolve url and append browse/create arguments
url = reverse(name, args=args)
# append query string
if query_string:
args = parse_query_string(query_string)
url = url_with_args(url, args)
# are we in dialog mode for browse, create or edit?
index = request.GET.get('index-dialog', 'false') == 'true'
browse = request.GET.get('browse', 'false') == 'true'
create = request.GET.get('create', 'false') == 'true'
edit = request.GET.get('edit', 'false') == 'true'
frontend_editing = request.GET.get('frontend-editing', 'false') == 'true'
# append dialog mode
if index or browse or create or edit:
url = url_with_arg(url, 'dialog', 'true')
if index:
url = url_with_arg(url, 'index-dialog', 'true')
if browse:
url = url_with_arg(url, 'browse', 'true')
if create:
url = url_with_arg(url, 'create', 'true')
if edit:
url = url_with_arg(url, 'edit', 'true')
if frontend_editing:
url = url_with_arg(url, 'frontend-editing', 'true')
if format:
url = url_with_arg(url, 'f', format)
if pk is not None:
url = url_with_arg(url, 'pk', pk)
return url
def get_urls(self, prefix=None):
"""
Return a url patterns structure for all CRUD operations that this model
view supports based on the given model and url namespace.
"""
# crud urls
if not self.is_single_instance:
_patterns = [
('', 'index', {}, 'index'),
('selector', 'selector', {}, 'selector'),
('seq/', 'seq', {}, 'seq'),
('create/', 'create_edit', {}, 'create'),
('delete/(?P<pk>[^/]+)/', 'delete', {}, 'delete'),
('delete/', 'delete', {}, 'delete'),
('edit/', 'create_edit', {'edit': True}, 'edit'),
('edit/(?P<pk>[^/]+)/', 'create_edit', {'edit': True}, 'edit'),
('duplicate/(?P<pk>[^/]+)/', 'duplicate', {}, 'duplicate'),
('duplicate/', 'duplicate', {}, 'duplicate'),
('disable/(?P<pk>[^/]+)/', 'disable', {}, 'disable'),
('disable/', 'disable', {}, 'disable'),
('enable/(?P<pk>[^/]+)/', 'enable', {}, 'enable'),
('enable/', 'enable', {}, 'enable'),
('import/', 'data_import', {}, 'data_import'),
('export/', 'data_export', {}, 'data_export'),
('save_changes/', 'save_changes', {}, 'save_changes'),
('merge/', 'merge', {}, 'merge'),
('tree-node-state/', 'tree_node_state', {}, 'tree_node_state'),
('move-tree-node/', 'move_tree_node', {}, 'move_tree_node'),
('move-to-tree-node/', 'move_to_tree_node', {}, 'move_to_tree_node'),
('get-tree/', 'get_tree', {}, 'get_tree'),
('delete_empty_folders/', 'delete_empty_folders', {}, 'delete_empty_folders'),
('side-panel-resize/', 'side_panel_resize', {}, 'side_panel_resize'),
]
else:
_patterns = [
('', 'create_edit', {}, 'index'),
]
# summary info
_patterns += [
('summary-info/', 'summary_info', {}, 'summary_info'),
]
# attach addition url patterns as defined with patterns (if present)
_patterns += self.get_patterns()
# generate crud url patterns
urls = self._get_urlpatterns([
view_url(
'^' + ('%s/' % prefix if prefix else '') + regex + '$',
method_name,
kwargs,
self._get_url_name(name)
) for regex, method_name, kwargs, name in _patterns
])
return urls
def _get_create_url(self, request, session_prefix=''):
"""
Return the url that is used for creating a new entity. The create url
may encode the current folder id if folders are presented by this view,
so that the currently selected folder is pre-selected when creating a
new entity.
"""
url = self._get_url(request, 'create', namespace=True)
if self.has_folders(request):
url = url_with_arg(url, '%s_id' % self._get_folder_assignment_name(), self._get_active_folder_id(request, session_prefix))
if self.related_instance and self.related_instance_attr:
url = url_with_arg(url, '%s_id' % self.related_instance_attr, self.related_instance.pk)
return url
def _get_object(self, request):
"""
Overridden by derived class in the case that the ModelView is used for
single instances: Returns one model instance that is controlled by this
model view.
"""
raise NotImplementedError(
('The derived class of ModelView \'%s\' must implement _get_object() ' + \
'if single_instance is True.') % self.__class__.__name__
)
def _get_objects(self, request):
"""
Overridden by derived class: Returns a queryset containing all possible
model entities that can be controlled by this model view. This may
be all entities, e.g. ModelClass.objects.all() or a subset that is
restricted perhaps by the current user, for example
ModelClass.objects.filter(owner=request.user).
"""
if hasattr(self, 'model') and hasattr(self.model, 'objects'):
return self.model.objects.all()
else:
raise NotImplementedError(
('A derived class of ModelView \'%s\' must implement ' + \
'_get_objects(). Model is not defined and a default ' + \
'implementation cannot be provided.') % self.__class__.__name__
)
def _get_objects_or_404(self, request):
"""
Overridden by derived class: Return a queryset containing all possible
model entities that can be controlled by this model view whenever a
particular model instance is requested - for example for the purpose
of editing or deleting the instance.
By default, the default get_objects() implementation is called.
"""
return self._get_objects(request)
def filter_acl(self, request, objects):
"""
Return a queryset that confirms to ACL rules of this model view.
"""
return self.acl.filter(request, objects)
def _get_objects_base(self, request, related_instance_attr=None, related_instance_pk=None, get_object_or_404=False):
"""
Return a queryset that yields all available objects (base query).
"""
# get user's base query
if get_object_or_404:
objects = self._get_objects_or_404(request)
else:
objects = self._get_objects(request)
# staff members can only see items that belong to them
objects = self.filter_acl(request, objects)
# related listing (filter by instance we are editing)
if related_instance_attr:
if related_instance_pk:
objects = objects.filter(**{related_instance_attr: related_instance_pk})
else:
objects = objects.none()
return objects
def _get_objects_for_seq(self, request):
"""
Overridden by derived class: Returns a queryset containing all
possible model entities that are controlled by this model view for
the purpose of updating the seq.
"""
return self._get_objects_base(request)
def _get_folders(self, request, parent):
"""
Overridden by derived class: Returns a queryset containing all folders
for this view.
"""
folders = self.folder_model.objects.all()
if parent:
folders = folders.filter(parent=parent)
return folders
def configure(self, request, instance=None, edit=False):
"""
Configure this view in the context of a form that is being processed
(embedded related listing).
"""
if self.related_listing:
self.related_instance = instance
# find related instance field name automatically if we are a related
# listing
if self.related_instance_attr is None:
for fieldname in get_model_field_names(self.model):
try:
field = self.model._meta.get_field(fieldname)
if isinstance(field, ForeignKey):
if field.rel.to == self.related_instance.__class__:
self.related_instance_attr = fieldname
break
except FieldDoesNotExist:
pass
def get_object_or_404(self, request, pk):
"""
Return one single objects with the given primary key pk. Please note
that the given primary key must be in the subset of the queryset that
is returned by self._get_objects().
If no such object exists, 404 is raised.
"""
try:
return self._get_objects_base(request, get_object_or_404=True).get(pk=pk)
except (ObjectDoesNotExist, ValueError):
raise Http404(
'Unknown primary key %s for %s.' % (pk, self._model_name)
)
def _get_objects_by_ids(self, request, ids):
"""
Return a list of all objects matching the given list of ids.
"""
return self._get_objects_base(request).filter(pk__in=ids)
def _get_folder_by_id(self, request, pk, parent=None):
"""
Return the folder with the given primary keys in the or None.
"""
if not pk:
return None
try:
return self._get_folders(request, parent).get(pk=pk)
except self.folder_model.DoesNotExist:
return None
def _get_folders_by_ids(self, request, pks, parent=None):
"""
Return a list of folder instances with the given primary keys in the
order in which the keys are given or None.
"""
if not pks:
return []
items = self._get_folders(request, parent).in_bulk(pks)
folders = []
if pks:
for pk in pks:
item = items.get(pk, None)
if item:
folders.append(item)
return folders
def _redirect(self, request, name, instance=None, active_tab=None, args=None):
"""
Create a redirect response to the url with the given name that is part
of this model view. The name must be 'index', 'create', 'update' or
'delete'.
"""
if active_tab:
if not active_tab.startswith('#'):
active_tab = '#%s' % active_tab
url = self._get_url(request, name, namespace=True, args=args)
if instance:
url = url_with_arg(url, 'pk', instance.pk)
return HttpResponseRedirect(
url +
(('%s' % active_tab) if active_tab else '')
)
def user_has_permission(self, user, view=None, default=True):
"""
Return True, if the given user has sufficient permissions to perform
the given action on the current model; otherwise False.
Please note that permissions are only checked if
settings.CUBANE_BACKEND_PERMISSIONS is True; otherwise we only enforce
staff membership.
"""
# return false if the view itself does not allow it to begin with
if view:
if hasattr(self, 'can_%s' % view):
_can = getattr(self, 'can_%s' % view)
if callable(_can):
_can = _can(user)
if _can == False:
return False
# check model acl
if not user.is_superuser:
if not self.acl.can(view):
return False
# check the user/model permission system
return _user_has_permission(user, self.model, view, default)
def _get_success_message(self, label, completed_task):
"""
Return a plain success message confirming that the given task
was completed on the given instance successfully.
"""
if self.is_single_instance:
return '<em>%s</em> %s successfully.' % (
self.model._meta.verbose_name,
completed_task
)
else:
return '%s <em>%s</em> %s successfully.' % (
self.model._meta.verbose_name,
label,
completed_task
)
def _get_template_path(self, request, handler, response):
"""
Return the full template path based on the base template path
(optional) and the name of the handler. If no template path is given,
the name of the model is used as a template path.
"""
# allow the template path to be overwritten on a per request basis
path = response.get('cubane_template_view_path')
# determine path
if not path:
name = '%s.html' % handler.__name__
if self.template_path:
path = os.path.join(self.template_path, name)
else:
path = os.path.join(self._model_name, name)
return path
def _get_form(self):
"""
Return the form that we are supposed to use for managing the model.
This is usually defined as a class method on the model (get_form) or
as the form property on the derived ModelView class.
"""
if hasattr(self, 'form'):
return self.form
elif hasattr(self.model, 'get_form'):
return self.model.get_form()
else:
raise ValueError(
("We do not know which form to use for processing model " +
"'%(name)s'. Please implement the class method 'get_form()' " +
"in model class '%(name)s' and return the form class " +
"to use for editing.") % {
'name': self.model.__name__
}
)
def _get_request_data(self, request):
"""
Returns request.POST if the request is a post, otherwise request.GET.
"""
return request.POST if request.method == 'POST' else request.GET
def _is_json(self, request):
"""
Return True, if the request is an ajax request or a sepcific argument
has been provided to force the output to be json.
"""
d = self._get_request_data(request)
f = d.get('f', None)
if request.is_ajax() and not f:
f = 'json'
return f == 'json'
def _is_ajax_html(self, request):
"""
Return True, if the request is an ajax request that specifically
defines html as the output format.
"""
d = self._get_request_data(request)
f = d.get('f', None)
return request.is_ajax() and f == 'html'
def _can_import(self):
"""
Return True, if the model can be imported.
"""
return get_listing_option(self.model, 'data_import', False)
def _can_export(self):
"""
Return True, if the model can be exported.
"""
return get_listing_option(self.model, 'data_export', False)
def _can_disable_enable(self):
"""
Return True, if the model can be disabled.
"""
try:
field = self.model._meta.get_field('disabled')
return True
except FieldDoesNotExist:
return False
def _can_folder_model_create(self):
"""
Return True, if new folder model instances can be created from within
the folder tree view. By default: True, unless folder_model_create is
set.
"""
try:
return self.folder_model_create
except:
return True
def _supports_grid_view(self):
"""
Return True, if the model supports to be presented in an image-rich,
grid view that requires the media app. This view may override the model.
"""
if hasattr(self, 'grid_view'):
return self.grid_view
else:
return get_listing_option(self.model, 'grid_view', False)
def _supports_edit_view(self):
"""
Return True, if the model supports bulk editing mode.
"""
if hasattr(self, 'edit_view'):
return self.edit_view
else:
return get_listing_option(self.model, 'edit_view', False)
def _is_sortable(self, model):
"""
Returns True, if the given model is sortable (drag and drop).
"""
# try view first, which may override model
try:
return self.sortable
except AttributeError:
pass
# try method first
try:
return model.is_sortable(self.model_attr_value)
except AttributeError:
pass
# otherwise try the default sortable property within the
# Listing meta class
return get_listing_option(model, 'sortable', False)
def _update_with_highest_seq(self, request, instance):
"""
Update given instance with the highest seq. number that is available
(starting with 1).
"""
r = self._get_objects_base(request).aggregate(Max('seq'))
seq = r.get('seq__max')
instance.seq = seq + 1
instance.save()
def _get_order_by_arg(self, args, sortable):
"""
Extract the ordering argument from the request and verify that the
argument is correct. If no argument is given, return the default
order as defined by the Meta class of the model. If the model is
sortable, the default ordering is by 'seq'.
"""
# possible candidates for ordering. Only visible columns can be
# used for sorting.
candidates = self._get_model_column_names(self.model, 'list', searchable=True)
if sortable and 'seq' not in candidates:
candidates.append('seq')
# extarct order from request arguments or fall back to defaults
order_by = args.get('o', None)
reverse = args.get('ro', False) in [True, 'true', 'True', '1']
if not order_by:
# find column which matches the list of candidates we can search by
found = False
for _order_by in self.model._meta.ordering:
order_by = _order_by.strip()
reverse = order_by.startswith('-')
order_by = order_by.replace('-', '')
if order_by in candidates:
found = True
break
# if we cannnot find anything, order by first visible column
if not found:
if sortable:
order_by = 'seq'
elif candidates:
order_by = candidates[0]
else:
order_by = None
# verify that the order we have is a valid one
if order_by in candidates:
return (order_by, reverse)
else:
return (None, False)
def _order_queryset(self, request, objects, order_by, reverse_order):
"""
Order the given object queryset by given column (optionally reversed)
and return a new queryset that expresses the applied order of items.
"""
if order_by:
attr = self._get_folder_assignment_name()
prefix = '-' if reverse_order else ''
order = []
if self.has_folders(request) and order_by == 'seq':
# determine if folders have hierarchie
if hasattr(self.folder_model, 'parent'):
max_hierarchie = MAX_HIERARCHY_LEVELS + 1
else:
max_hierarchie = 0
if self.has_multiple_folders():
# determine order name
field = self.model._meta.get_field(attr)
through_model = field.rel.through
object_field_name = field.m2m_field_name()
target_field_name = field.m2m_reverse_field_name()
object_field = through_model._meta.get_field(object_field_name)
related_name = object_field.rel.related_name
for i in range(0, max_hierarchie):
order.append('%s%s__%s%s__%s' % (
prefix,
related_name,
target_field_name,
'__parent' * (max_hierarchie - i),
order_by
))
order.append('%s%s__%s' % (prefix, related_name, order_by))
pks = [x.get('id') for x in objects.order_by(*order).values('id')]
if pks:
preserved_order = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate(pks)])
objects = objects.order_by(preserved_order)
else:
# single folder
for i in range(0, max_hierarchie):
order.append('%s%s%s__%s' % (
prefix,
attr,
'__parent' * (max_hierarchie - i),
order_by
))
order.append('%s%s' % (prefix, order_by))
objects = objects.order_by(*order)
else:
# arbitary field or no folders
objects = objects.order_by('%s%s' % (prefix, order_by))
return objects
def _search_filter_by_words(self, fieldname, words):
"""
Return a query expression for filtering objects by the given fieldname
and query value q.
"""
q = Q()
name = '%s__icontains' % fieldname
for w in words:
q &= Q(**{name: w})
return q
def _search(self, objects, model, q):
"""
Filter given queryset objects by given search query q, where all
model properties are searched (only the ones defined by the model).
"""
if q:
fieldnames = self._get_model_column_names(model, 'list', searchable=True)
# we may add additional field names to this list
# based on Listing.searchable
for fieldname in get_listing_option(model, 'searchable', []):
if fieldname not in fieldnames:
fieldnames.append(fieldname)
if len(fieldnames) > 0:
# split input query into seperate words
words = get_words(unicode(q), min_word_length=3, max_words=5, allow_digits=True)
# build search query across all searchable fields
f = Q()
filtered = False
searchable_fields = (CharField, TextField, EmailField)
for fieldname in fieldnames:
related = False
try:
field = model._meta.get_field(fieldname)
except FieldDoesNotExist:
# foreign key?
field, related, rel_fieldname, rel_model, title = get_model_related_field(model, fieldname)
if field and rel_model:
try:
field = rel_model._meta.get_field(rel_fieldname)
except FieldDoesNotExist:
continue
if field:
# searchable field?
if isinstance(field, searchable_fields):
q = self._search_filter_by_words(fieldname, words)
if q:
filtered = True
f |= q
if filtered:
objects = objects.filter(f)
return objects
def _get_filter_args(self, args):
"""
Return only arguments that are relevant to the filter form, which
names begin with the prefix 'f_'.
"""
d = dict()
for k, v in args.items():
if k.startswith('f_'):
d[k[2:]] = v
return d
def _filter(self, objects, args, filter_form):
"""
Perform filter operation on given objects queryset based on given
GET arguments and filter form. First, filters are added that are
matching the model. Then the filter method on the model's manager
is performed (if available) to perform additional custom filtering.
"""
if filter_form:
# filter by model fields
filter_distinct = False
for fieldname, field in filter_form.fields.items():
# rewrite fieldname without prefix
fieldname = fieldname.replace('_filter_', '')
if fieldname in args:
related = False
value = args.get(fieldname)
try:
field = self.model._meta.get_field(fieldname)
except FieldDoesNotExist:
# related field?
field, related, rel_fieldname, rel_model, _ = get_model_related_field(self.model, fieldname)
if field and related:
field = field.rel.to._meta.get_field(rel_fieldname)
if field:
# get filter for specific field type
if related:
filter_distinct = True
objects = self._filter_by_field(objects, fieldname, field, value)
# custom filter (model)
if hasattr(self.model, 'filter_by'):
objects = self.model.filter_by(objects, args)
# custom filter (form)
if hasattr(filter_form, 'filter_by'):
objects = filter_form.filter_by(objects, args)
# we always want distinct records in the result
# many to many may - based filters may inject duplicates
if filter_distinct:
objects = objects.distinct()
return objects
def _filter_by_field(self, objects, fieldname, field, value):
"""
Apply query filter for filtering the given field with the given value.
"""
if isinstance(field, (TagsField, MultiSelectField)) and isinstance(value, list):
esc = '#' if isinstance(field, TagsField) else ''
q = Q()
for tag in value:
_tag = esc + tag + esc
q &= Q(**{'%s__icontains' % fieldname: _tag})
if len(q) > 0:
objects = objects.filter(q)
elif isinstance(field, TEXT_FIELDS):
filtername = '%s__icontains' % fieldname
objects = objects.filter(**{filtername: value})
elif isinstance(field, ForeignKey):
filtername = '%s__pk' % fieldname
objects = objects.filter(**{filtername: value})
elif isinstance(field, DateField):
value = datetime.datetime.strptime(value, '%d/%m/%Y')
objects = objects.filter(**{
'%s__year' % fieldname: value.year,
'%s__month' % fieldname: value.month,
'%s__day' % fieldname: value.day
})
elif isinstance(field, ManyToManyField):
if not isinstance(value, list):
value = [value]
filtername = '%s__in' % fieldname
value_arr = []
for v in value:
try:
value_arr.append(int(v))
except ValueError:
pass
value = value_arr
if len(value) > 0:
objects = objects.filter(**{filtername: value})
else:
objects = objects.filter(**{fieldname: value})
return objects
def _get_listing_actions(self, request):
"""
Return a list of additional actions that can be performed on single or
multiple entities from the listing control. Additional buttons are
presented to perform those actions.
"""
try:
listing_actions = self.listing_actions
except:
listing_actions = []
actions = []
for action in listing_actions:
# title, dialog?
dialog = False
small_dialog = False
title = action[0].strip()
if title.startswith('[') and title.endswith(']'):
title = title[1:-1]
dialog = True
if title.startswith('/'):
small_dialog = True
title = title[1:]
# view, external?
view = action[1]
if view.startswith('/'):
external = True
else:
external = False
# url name
url_name = view
if '?' in view:
view, _ = view.split('?', 2)
actions.append({
'title': title,
'view': view,
'url': self._get_url(request, url_name, namespace=True),
'typ': action[2],
'method': action[3] if len(action) >= 4 else 'location',
'confirm': action[4] if len(action) >= 5 else False,
'dialog': dialog,
'small_dialog': small_dialog,
'external': external
})
return actions
def _inject_listing_actions(self, objects, listing_actions):
"""
Set listing action for each object. Some objects may not have an
action depending on its internal state.
"""
for obj in objects:
obj.listing_actions = []
for action in listing_actions:
if self._object_can_execute_listing_action(obj, action):
obj.listing_actions.append(action)
return objects
def _object_can_execute_listing_action(self, obj, action):
"""
Return True, if the given object instance can execute the given action.
Otherwise return True.
"""
try:
return obj.can_execute_action(action)
except:
return True
def _get_shortcut_actions(self, listing_actions):
"""
Return a list of listing actions that are available for individual
records based on the list of listing actions. Each shortcut action
is presented for each item within the listing.
"""
try:
shortcut_actions = self.shortcut_actions
except:
shortcut_actions = []
actions = []
for action in listing_actions:
if action.get('view') in shortcut_actions and action.get('typ') in ['single', 'multiple', 'any']:
actions.append(action)
return actions
def _get_selector_model(self):
"""
Return the model of the entity that is used as a selector for cross
filtering against the main listing.
"""
if hasattr(self, 'selector_model'):
return self.selector_model
else:
return None
def _get_active_selector_session_name(self, session_prefix):
"""
Return the name of the session variable that is used for storing
the pk of the currently selected selector item. Selector items are
stored by selector model.
"""
selector_model = self._get_selector_model()
return '%sselector_filter_%s' % (
session_prefix,
selector_model.__name__
)
def _get_active_selector_pk(self, request, session_prefix=''):
"""
Return the pk of the currently selected selector item or None.
"""
session_name = self._get_active_selector_session_name(session_prefix)
pk = request.session.get(session_name, None)
if pk:
# make sure that the selector is an int if the primary key of
# the selector model is an int.
field = self.selector_model._meta.pk
if isinstance(field, IntegerField) or isinstance(field, AutoField):
try:
pk = int(pk)
except ValueError:
pk = None
return pk
def _set_active_selector_pk(self, request, pk, session_prefix=''):
"""
Store the given pk of the currently selected selector item in session.
"""
if pk != None:
session_name = self._get_active_selector_session_name(session_prefix)
request.session[session_name] = pk
def _get_active_folders_session_name(self, session_prefix):
"""
Return the name of the session variable that is used for storing
the folder id of the currently selected folder item. Active folders are
stored by model.
"""
return '%sfolders_id_%s' % (
session_prefix,
self.model.__name__
)
def _get_active_folder_ids(self, request, session_prefix=''):
"""
Return the folder_ids of the currently selected folders or None.
"""
session_name = self._get_active_folders_session_name(session_prefix)
folder_ids = request.session.get(session_name, None)
if folder_ids == None:
folder_ids = [-1]
if not isinstance(folder_ids, list):
folder_ids = [folder_ids]
ids = []
for _id in folder_ids:
try:
ids.append(int(_id))
except ValueError:
pass
if len(ids) == 0:
ids = [-1]
return ids
def _get_active_folder_id(self, request, session_prefix):
"""
Return the first folder id of the currently selected list of folders or
None.
"""
folder_ids = self._get_active_folder_ids(request, session_prefix)
if folder_ids:
return folder_ids[0]
else:
return None
def _set_active_folder_ids(self, request, folder_ids, session_prefix=''):
"""
Store the given folder ids of the currently selected folder nodes
in session.
"""
if folder_ids != None:
session_name = self._get_active_folders_session_name(session_prefix)
request.session[session_name] = folder_ids
def _get_open_folders_session_name(self):
"""
Return the name of the session variable that is used for storing
a list of all opoen folder tree nodes.
"""
return 'folder_ids_%s' % self.model.__name__
def _get_open_folders(self, request):
"""
Return a list of ids of open folder tree nodes.
"""
session_name = self._get_open_folders_session_name()
folder_ids = request.session.get(session_name, [])
_ids = []
if folder_ids:
for _id in folder_ids:
try:
_ids.append(int(_id))
except ValueError:
pass
return _ids
def _set_open_folders(self, request, folder_ids):
"""
Store the given list of folder tree node ids in the session.
"""
if folder_ids == None:
folder_ids = []
session_name = self._get_open_folders_session_name()
request.session[session_name] = folder_ids
def _get_model_selector(self, request, session_prefix=''):
"""
Return template information on the selector area for the listing view,
which allows users to cross-filter against the main listing.
"""
selector_model = self._get_selector_model()
if selector_model:
if hasattr(self, '_get_selector_objects'):
objects = self._get_selector_objects(request)
else:
objects = self.selector_model.objects.all()
# search
q = request.GET.get('sq', None)
objects = self._search(objects, selector_model, q)
return {
'objects': objects,
'active_pk': self._get_active_selector_pk(request, session_prefix)
}
return None
def _filter_by_selector(self, request, objects, session_prefix='', update_session=True):
"""
Filter given main list of objects by selector argument. The selector
argument is persistent between requests, so that we can select the
selector filter once and it remains as such.
"""
if hasattr(self, '_select_by'):
selector_model = self._get_selector_model()
filter_name = 'selector_filter_%s' % selector_model.__name__
pk = request.GET.get('s', None)
if pk:
# make sure that the pk of the selector item is an int
# if the primary key is an integer
field = self.selector_model._meta.pk
if isinstance(field, IntegerField) or isinstance(field, AutoField):
try:
pk = int(pk)
except ValueError:
pk = None
else:
# try to obtain pk from session
if update_session:
pk = self._get_active_selector_pk(request, session_prefix)
else:
pk = None
# store new pk in session
if update_session:
self._set_active_selector_pk(request, pk, session_prefix)
# filter by selector
if pk and pk != 0 and pk != '0':
# apply filter
objects = self._select_by(objects, pk)
return objects
def _filter_by_folders(self, request, objects, session_prefix='', update_session=True):
"""
Filter given list of objects by given list of parent folders, if
provided; otherwise return all objects that are not assigned to a
folder yet.
"""
folder_ids = None
folders = None
pks = None
if self.has_folders(request):
folder_ids = request_int_list(request.GET, 'folders[]')
if not folder_ids:
# try to obtain folder id from session
if update_session:
folder_ids = self._get_active_folder_ids(request, session_prefix)
else:
folder_ids = None
# store new pk in session
if update_session:
self._set_active_folder_ids(request, folder_ids, session_prefix)
# filter
pks = folder_ids
if pks == -1: pks = None
if pks and pks[0] == -1: pks = None
objects = self._folder_filter(request, objects, pks)
folders = self._get_folders_by_ids(request, pks)
return (objects, folders, pks)
def _folder_filter(self, request, objects, folder_pks):
"""
Virtual: Filter given object queryset by the given folder primary key(s).
"""
return self._folder_filter_base(request, objects, folder_pks)
def _folder_filter_base(self, request, objects, folder_pks):
"""
Filter given object queryset by the given folder primary key(s).
"""
attr_name = self._get_folder_assignment_name()
if self.is_listing_children():
if folder_pks:
q = Q()
has_parent_field = self.folder_has_parent_field(request)
for pk in folder_pks:
q |= Q(**{('%s__id' % attr_name): pk})
if has_parent_field:
q |= Q(**{('%s__parent_id' % attr_name): pk}) | \
Q(**{('%s__parent__parent_id' % attr_name): pk}) | \
Q(**{('%s__parent__parent__parent_id' % attr_name): pk}) | \
Q(**{('%s__parent__parent__parent__parent_id' % attr_name): pk}) | \
Q(**{('%s__parent__parent__parent__parent__parent_id' % attr_name): pk})
objects = objects.filter(q)
else:
if folder_pks:
q = Q()
for folder_pk in folder_pks:
q |= Q(**{attr_name: folder_pk})
objects = objects.filter(q)
else:
objects = objects.filter(**{attr_name: None})
return objects
def folder_has_parent_field(self, request):
"""
Return True, if the folder model has a 'parent' field that can
(potentially) point to parents.
"""
if self.has_folders(request):
attr_name = self._get_folder_assignment_name()
try:
self.folder_model._meta.get_field(attr_name)
return True
except FieldDoesNotExist:
pass
return False
def _folder_assign(self, request, obj, dst, cur):
"""
Assign the given destination folder to the given object.
"""
if self.has_multiple_folders():
# get through model
field = self.model._meta.get_field(self._get_folder_assignment_name())
through_model = field.rel.through
object_field_name = field.m2m_field_name()
target_field_name = field.m2m_reverse_field_name()
# change existing assignment to given folder
try:
assignment = through_model.objects.filter(**{
object_field_name: obj,
('%s__in' % target_field_name): cur
})[0]
if dst is not None:
# already exists?
try:
new_assignment = through_model.objects.filter(**{
object_field_name: obj,
('%s__in' % target_field_name): [dst]
})[0]
# already exists -> delete current once, since we are
# moving assignment over...
assignment.delete()
except IndexError:
# does not exist, save to change
setattr(assignment, target_field_name, dst)
assignment.save()
else:
assignment.delete()
except IndexError:
try:
# no existing category -> add to the end of all existing items
seq = self._folder_filter(request, self._get_objects_base(request), [dst.pk]).count() + 1
with transaction.atomic():
item = through_model()
setattr(item, object_field_name, obj)
setattr(item, target_field_name, dst)
setattr(item, 'seq', seq)
item.save()
except IntegrityError:
# object already in this category, rare case when two people will move the same object to same category at same time
pass
else:
# assign to new folder, keeping seq from old folder
setattr(obj, self._get_folder_assignment_name(), dst)
def _get_folder_assignment_name(self):
"""
Return the name of the field that is used to assign a folder to.
"""
folder_assignment_name = get_listing_view_option(self.model, 'folder_assignment_name')
if folder_assignment_name is not None:
return folder_assignment_name
else:
try:
return self.folder_assignment_name
except:
return 'parent'
def _get_folder_title_name(self):
"""
Return the name that is used to sort folders by.
"""
folder_title_name = get_listing_view_option(self.model, 'folder_title_name')
if folder_title_name is not None:
return folder_title_name
else:
try:
return self.folder_title_name
except:
return 'title'
def _get_index_session_name(self, session_prefix):
"""
Return the name of the session variable that is used to store
view-specific state.
"""
return ('%slisting_%s_%s' % (
session_prefix,
slugify(self.model._meta.app_label),
slugify(unicode(self.model.__name__))
)).replace('-', '_')
def _get_index_args(self, request, session_prefix='', update_session=True):
"""
Return view arguments for the index view related to search, sorting and
filtering. Arguments are loaded from session, combined with new
arguments via request.GET and finally saved back to session.
"""
session_name = self._get_index_session_name(session_prefix)
if update_session:
args = request.session.get(session_name, {})
else:
args = {}
# update arguments from GET (overwriting data from session)
for k, v in request.GET.items():
if k.endswith('[]'):
args[k[:-2]] = request.GET.getlist(k)
else:
args[k] = v
# remove empty arguments and rewrite boolean values
d = dict()
for k, v in args.items():
if v in ['true', 'True']:
v = True
elif v in ['false', 'False']:
v = False
elif v in ['none', 'None']:
v = None
if v != '' and v != None:
d[k] = v
args = d
# save new arguments in session for next time
if update_session:
request.session[session_name] = args
return args
def _set_session_index_args(self, request, session_prefix, attr, value):
"""
Overwrite a particular attribute value for the index arguments that
may be stored within the session for the current view.
"""
session_name = self._get_index_session_name(session_prefix)
args = request.session.get(session_name, {})
args[attr] = value
request.session[session_name] = args
return args
def _get_objects_page(self, args, objects_total):
"""
Return various aspects of pagination data based on the given
page argument within given set of arguments.
"""
objects_page = args.get('page', '1')
if objects_page != 'all':
try:
objects_page = int(objects_page)
except ValueError:
objects_page = 1
# determine count of total pages with overflow checks
objects_pages = objects_total / PAGINATION_MAX_RECORDS
if objects_total % PAGINATION_MAX_RECORDS > 0:
objects_pages += 1
if objects_pages < 1:
objects_pages = 1
# overflow current page index
if objects_page != 'all':
if objects_page < 1:
objects_page = 1
if objects_page > objects_pages:
objects_page = objects_pages
# current page index (numeric)
if objects_page != 'all':
page_index = (objects_page - 1) * PAGINATION_MAX_RECORDS
else:
page_index = 0
return objects_page, objects_pages, page_index
def _get_sidepanel_width(self, request, resize_panel_id):
"""
Return the side panel width.
"""
session_name = ('listing_%s_side_panel_width' % (
resize_panel_id
)).replace('-', '_')
return request.session.get(session_name, 240)
def _set_sidepanel_width(self, request, width, resize_panel_id):
"""
Set the side panel width.
"""
session_name = ('listing_%s_side_panel_width' % (
resize_panel_id
)).replace('-', '_')
try:
width = int(width)
except:
width = 180
request.session[session_name] = width
return width
def _get_form_initial(self, request):
"""
Return the form initials based on given request.
"""
initial = {}
fieldnames = get_model_field_names(self.model)
for fieldname in fieldnames:
v = request.GET.get(fieldname)
if not v:
v = request.GET.get('%s_id' % fieldname)
if v:
initial[fieldname] = v
return initial
def _get_view_identifier(self):
"""
Return a unique view identifier.
"""
try:
return self.view_identifier
except:
return ''
def _create_object_edit_form(self, request, formclass, instance, column_names, queryset_cache):
"""
Create and return a new instance of an edit form for editing the
given object.
"""
# create form as assign instance and initial data
form = formclass()
form.instance = instance
form.initial = model_to_dict(instance, fetch_related=False, fields=column_names)
# initial data
self.bulk_form_initial(request, form.initial, instance, edit=True)
# re-use cached querysets for ModelChoiceFields
for fieldname, queryset in queryset_cache.items():
form.fields[fieldname].queryset = queryset
# configure form for edit
form.configure(request, instance=instance, edit=True)
# remove fields that we are not presenting
for fieldname, _ in form.fields.items():
if fieldname not in column_names:
del form.fields[fieldname]
return form
def _inject_edit_form(self, request, objects, columns):
"""
Generate edit forms for each given object.
"""
# get form class
try:
formclass = self.model.get_form()
except:
formclass = self._get_form()
# construct cache of querysets within ModelChoice fields
form = formclass()
column_names = [c.get('fieldname') for c in columns]
queryset_cache = {}
for fieldname, field in form.fields.items():
if fieldname in column_names:
if isinstance(field, ModelChoiceField):
queryset_cache[fieldname] = MaterializedQuerySet(queryset=field.queryset)
# create form for each object
for instance in objects:
instance.cubane_view_edit_form = self._create_object_edit_form(
request,
formclass,
instance,
column_names,
queryset_cache
)
def _is_dialog(self, request):
"""
Return True, if this request is made from within a dialog window.
"""
is_browse_dialog = request.GET.get('browse', 'false') == 'true'
is_index_dialog = request.GET.get('index-dialog', 'false') == 'true'
is_external_dialog = request.GET.get('external-dialog', 'false') == 'true'
is_frontend_editing = request.GET.get('frontend-editing', 'false') == 'true'
is_dialog = request.GET.get('dialog', 'false') == 'true'
return is_dialog or is_browse_dialog or is_index_dialog or is_external_dialog or is_frontend_editing
def _get_session_prefix(self, request):
"""
Return the session prefix based on the current request. Usually, we
will use a different session prefix within dialog windows.
"""
return 'dialog-' if self._is_dialog(request) else ''
def _has_actions(self, context):
"""
Determine if there are actions available for this view.
"""
permissions = context.get('permissions')
has_folders = context.get('has_folders')
model_is_folder = context.get('model_is_folder')
duplicate = permissions.get('view') and permissions.get('edit') and permissions.get('create')
_import = context.get('import') and permissions.get('import')
_export = context.get('export') and permissions.get('export')
clean = permissions.get('clean')
merge = permissions.get('merge')
changes = permissions.get('changes')
return duplicate or _import or _export or clean or merge or changes
def _open_in_new_window(self):
"""
Return True, if view/edit actions are suppose to open a new window.
"""
try:
return self.open_in_new_window
except:
return False
@view(permission_required('view'))
def index(self, request):
"""
List all model instances.
"""
self._detect_frontend_editing(request)
# get content type
content_type = ContentType.objects.get_for_model(self.model)
# we use a different session prefix within dialog windows
session_prefix = self._get_session_prefix(request)
# get index listing arguments (session and/or GET)
args = self._get_index_args(request, session_prefix)
related_listing = request.GET.get('r_listing', '1' if self.related_listing else '0') == '1'
related_instance_pk = request.GET.get('r_pk', self.related_instance.pk if self.related_instance else None)
related_instance_attr = request.GET.get('r_attr', self.related_instance_attr)
if related_instance_pk == 'None': related_instance_pk = None
# in the context of a dialog window, close the dialog window once
# we go back to an index page...
if not related_listing and request.GET.get('index-dialog', 'false') == 'true':
return {
'close_index_dialog': True
}
# get base list of records
base_objects = self._get_objects_base(request, related_instance_attr, related_instance_pk)
# filter by folder
if not related_instance_pk:
objects, current_folders, folder_ids = self._filter_by_folders(request, base_objects, session_prefix)
else:
objects = base_objects
current_folders = None
folder_ids = None
objects_count = objects.count()
# if we have an empty result and the folder model is the same as
# the entity model, then we present the one folder instead rather
# than an empty result, which makes working with folders much easier.
if not related_instance_pk and folder_ids and objects_count == 0 and hasattr(self, 'model') and hasattr(self, 'folder_model') and self.model == self.folder_model:
objects = base_objects.filter(pk__in=folder_ids)
objects_count = 1
# filter by selector
if not related_instance_pk:
objects = self._filter_by_selector(request, objects, session_prefix)
# search
q = args.get('q', None)
objects = self._search(objects, self.model, q)
# filter
filter_args = self._get_filter_args(args)
filter_form = self._get_filter_form(request, filter_args)
objects = self._filter(objects, filter_args, filter_form)
# determine order
sortable = \
self._is_sortable(self.model) and \
self.user_has_permission(request.user, 'edit')
reverse_order = False
order_by, reverse_order = self._get_order_by_arg(args, sortable)
objects = self._order_queryset(request, objects, order_by, reverse_order)
# pagination and object count
objects_total = objects.count()
objects_page, objects_pages, page_index = self._get_objects_page(args, objects_total)
# list of pages
if objects_page != 'all':
objects_pages_list = [x for x in range(objects_page - PAGINATION_PAGES_WINDOW_SIZE, objects_page + PAGINATION_PAGES_WINDOW_SIZE + 1) if x >= 1 and x <= objects_pages]
else:
objects_pages_list = [x for x in range(1 - PAGINATION_PAGES_WINDOW_SIZE, 1 + PAGINATION_PAGES_WINDOW_SIZE + 1) if x >= 1 and x <= objects_pages]
if objects_pages_list[0] != 1:
objects_pages_list = [1] + objects_pages_list
if objects_pages_list[-1] != objects_pages:
objects_pages_list = objects_pages_list + [objects_pages]
# paged result
if objects_page != 'all':
objects = objects[page_index:page_index + PAGINATION_MAX_RECORDS]
# determine view template (list or grid)
default_view = self._get_default_view()
view = args.get('v', default_view)
if view not in ['list', 'list-compact', 'edit', 'grid']: view = 'list'
if not self._supports_grid_view() and view == 'grid': view = 'list'
if (not self._supports_edit_view() or related_instance_attr) and view == 'edit': view = 'list'
template = 'cubane/backend/listing/listing_%s.html' % view
# generate response
if self._is_json(request):
return to_json_response(
objects,
fields=get_model_field_names(self.model, json=True)
)
else:
listing_actions = self._get_listing_actions(request)
shortcut_actions = self._get_shortcut_actions(listing_actions)
model_name = self.model._meta.verbose_name.title()
selector = self._get_model_selector(request, session_prefix)
# get columns
columns = self._get_model_columns(self.model, view, listing_actions, related_instance_attr)
# update select_related, if we have some foreign columns
related_fields = self._get_related_fields_from_columns(columns)
if len(related_fields) > 0:
objects = objects.select_related(*related_fields)
# materialise objects
objects = list(objects)
objects = self._inject_listing_actions(objects, shortcut_actions)
# create edit forms for each object in edit mode
if view == 'edit':
self._inject_edit_form(request, objects, columns)
# get folders
folders = self.get_folders(request)
has_folders = self.has_folders(request) and not related_listing
context = {
'related_listing': related_listing,
'related_instance_pk': related_instance_pk,
'related_instance_attr': related_instance_attr,
'controls_visible': not related_instance_attr or (related_instance_attr and related_instance_pk),
'q': q,
'order_by': order_by,
'reverse_order': reverse_order,
'view': view,
'grid_view': self._supports_grid_view(),
'edit_view': self._supports_edit_view() and not related_listing,
'template': template,
'model': self.model,
'model_name': model_name,
'model_name_plural': self.model._meta.verbose_name_plural.title(),
'model_is_folder': self.model_is_folder,
'related_fields': related_fields,
'view_identifier': self._get_view_identifier(),
'has_folders': has_folders,
'create_folder_url': self.get_folder_url(request, 'create'),
'edit_folder_url': self.get_folder_url(request, 'edit'),
'folder_model_name_singular': self.get_folder_model_name_singular(),
'folder_model_name': self.get_folder_model_name(),
'single_model_with_folders': self.model_is_folder,
'folder_model_create': self._can_folder_model_create(),
'folders': folders,
'current_folders': current_folders,
'current_folder': current_folders[0] if current_folders else None,
'is_leaf_folder_view': self.is_leaf_folder_view(request, current_folders, objects_count),
'folder_ids': folder_ids,
'folder_id': folder_ids[0] if folder_ids else -1,
'folder_assignment_name': '%s_id' % self._get_folder_assignment_name(),
'objects': objects,
'objects_count': objects_count,
'objects_total': objects_total,
'objects_filtered': objects_count - objects_total,
'objects_pages': objects_pages,
'objects_page': objects_page,
'objects_pages_list': objects_pages_list,
'import': self._can_import(),
'export': self._can_export(),
'disable_enable': self._can_disable_enable(),
'verbose_name': self.model._meta.verbose_name,
'verbose_name_plural': self.model._meta.verbose_name_plural,
'filter_form': filter_form,
'listing_actions': listing_actions,
'sidepanel_folder_width': self._get_sidepanel_width(request, 'folders'),
'sidepanel_filter_width': self._get_sidepanel_width(request, 'filter'),
'permissions': {
'create': self.user_has_permission(request.user, 'add'),
'view': self.user_has_permission(request.user, 'view'),
'edit': self.user_has_permission(request.user, 'edit'),
'edit_or_view':
self.user_has_permission(request.user, 'edit') or
self.user_has_permission(request.user, 'view'),
'delete': self.user_has_permission(request.user, 'delete'),
'import': self.user_has_permission(request.user, 'import'),
'export': self.user_has_permission(request.user, 'export'),
'clean': (
self.user_has_permission(request.user, 'clean', settings.CUBANE_LISTING_DEFAULT_CLEAN) and
self.user_has_permission(request.user, 'delete') and
has_folders and
not self.model_is_folder
),
'merge': self.user_has_permission(request.user, 'merge', settings.CUBANE_LISTING_DEFAULT_MERGE),
'changes': not isinstance(self.model, ChangeLog)
},
'columns': columns,
'urls': {
'index': self._get_url(request, 'index', namespace=True, format='html'),
'create': self._get_create_url(request, session_prefix),
'edit': self._get_url(request, 'edit', namespace=True),
'duplicate': self._get_url(request, 'duplicate', namespace=True),
'delete': self._get_url(request, 'delete', namespace=True),
'disable': self._get_url(request, 'disable', namespace=True),
'enable': self._get_url(request, 'enable', namespace=True),
'import': self._get_url(request, 'data_import', namespace=True),
'export': self._get_url(request, 'data_export', namespace=True),
'merge': self._get_url(request, 'merge', namespace=True),
'selector': self._get_url(request, 'selector', namespace=True, format='html'),
'seq': self._get_url(request, 'seq', namespace=True),
'tree_node_state': self._get_url(request, 'tree_node_state', namespace=True),
'move_tree_node': self._get_url(request, 'move_tree_node', namespace=True),
'move_to_tree_node': self._get_url(request, 'move_to_tree_node', namespace=True),
'get_tree': self._get_url(request, 'get_tree', namespace=True),
'delete_empty_folders': self._get_url(request, 'delete_empty_folders', namespace=True),
'save_changes': self._get_url(request, 'save_changes', namespace=True),
'changes': reverse('cubane.backend.changelog.index') + ('?f_content_type=%d' % content_type.pk)
},
'sortable': sortable,
'selector': selector,
'filter_enabled': args.get('ff', False),
'search': True,
'listing_with_image': self.listing_with_image,
'open_in_new_window': self._open_in_new_window()
}
# determine if there are actions available
context['has_actions'] = self._has_actions(context)
if self._is_ajax_html(request):
return render(
request,
template,
context
)
else:
return context
@view(require_GET)
@view(permission_required('view'))
@view(template('cubane/backend/listing/selector.html'))
def selector(self, request):
"""
List all model instances for the selector listing.
"""
session_prefix = self._get_session_prefix(request)
sel_model = self._get_selector_model()
selector = self._get_model_selector(request, session_prefix)
if self._is_json(request):
return to_json_response(
selector.get('objects', []),
fields=get_model_field_names(sel_model)
)
else:
return {
'selector': selector
}
@view(require_POST)
@view(permission_required('edit'))
def seq(self, request):
"""
Update element sequence (sortable).
"""
# cannot update seq if model is not sortable
if not self._is_sortable(self.model):
return to_json_response({
'success': False,
'message': 'Model is not sortable.'
})
# get list of ids in the order in which they should be
try:
ids = [int(x) for x in request.POST.getlist('item[]')]
except ValueError:
return to_json_response({
'success': False,
'message': 'Unable to parse listing id argument as an integer value.'
})
# get current sorting order. We do not support reversed order
# for 'seq' order
order_by, reverse_order = self._get_order_by_arg(request.POST, sortable=True)
if order_by == 'seq':
reverse_order = False
# get folder
folder_ids = request_int_list(request.POST, 'folders')
# base query
objects = self._get_objects_base(request)
objects = self._order_queryset(request, objects, order_by, reverse_order)
# filter by folder
folder_pks = folder_ids
if folder_pks == -1: folder_pks = None
if folder_pks and folder_pks[0] == -1: folder_pks = None
if folder_pks:
objects = self._folder_filter(request, objects, folder_pks)
# pagination and object count
objects = objects.distinct()
objects_total = objects.count()
objects_page, objects_pages, page_index = self._get_objects_page(request.POST, objects_total)
# if the current order is not by 'seq', then we first apply the
# general seq. by the current sorting order...
if order_by != 'seq':
# get list of all ids in the target seq
all_ids = list(objects.values_list('id', flat=True))
# merge existing ids we were given into the result, so that we have
# a complete list of all identifiers in the required seq. order
n = min(PAGINATION_MAX_RECORDS, len(ids))
for j, i in enumerate(range(page_index, page_index + n)):
all_ids[i] = ids[j]
ids = all_ids
start_index = 0
else:
# switch view order column back to 'seq'
session_prefix = self._get_session_prefix(request)
self._set_session_index_args(request, session_prefix, 'o', 'seq')
start_index = page_index
# get all items (seq and last-mod timestamp)
if self.has_multiple_folders():
attr = self._get_folder_assignment_name()
field = self.model._meta.get_field(attr)
through_model = field.rel.through
target_field_name = field.m2m_reverse_field_name()
items = {}
for obj in objects:
folders = getattr(obj, attr, None)
if folders:
for assignment in folders.all():
folder = getattr(assignment, target_field_name, None)
if folder.pk in folder_pks:
items[obj.pk] = assignment
else:
items = objects.select_related().only('seq', 'updated_on').in_bulk(ids)
# apply new seq.
updated_on = datetime.datetime.now()
updated = False
for i, _id in enumerate(ids, start=start_index + 1):
# only update if seq changed...
item = items.get(_id)
if item and item.seq != i:
updated = True
if self.has_multiple_folders():
item.seq = i
item.save()
self._get_objects_for_seq(request).filter(pk=_id).update(
updated_on=updated_on
)
else:
self._get_objects_for_seq(request).filter(pk=_id).update(
seq=i,
updated_on=updated_on
)
# clear cache if we had to update at least one item
if updated and 'cubane.cms' in settings.INSTALLED_APPS:
from cubane.cms.views import get_cms
cms = get_cms()
cms.invalidate(verbose=False)
# json response
return to_json_response({
'success': True,
'updated': updated
})
def form_initial(self, request, initial, instance, edit):
"""
Called before a form is created with initial data.
"""
pass
def bulk_form_initial(self, request, initial, instance, edit):
"""
Called before a form is created with initial data in bulk editing mode.
"""
pass
def form_configure(self, request, form, edit, instance):
"""
Called after the form has been configured to perform further form
configuration if required.
"""
pass
def _instance_form_initial(self, request, initial, instance, edit):
"""
Call instance.form_initial if method is defined.
"""
if hasattr(instance, 'form_initial'):
return instance.form_initial(request, initial, instance, edit)
return False
def before_save(self, request, cleaned_data, instance, edit):
"""
Called before the given model instance is saved.
"""
pass
def before_save_changes(self, request, cleaned_data, instance, changes, edit):
"""
Called before the given model instance is saved.
"""
pass
def before_bulk_save(self, request, cleaned_data, instance, edit):
"""
Called before the given model instance is saved as part of bulk editing.
"""
pass
def after_save(self, request, cleaned_data, instance, edit):
"""
Called after the given model instance is saved.
"""
pass
def after_save_changes(self, request, cleaned_data, instance, changes, edit):
"""
Called after the given model instance is saved.
"""
pass
def after_bulk_save(self, request, cleaned_data, instance, edit):
"""
Called after the given model instance is saved as part of bulk editing.
"""
pass
def before_delete(self, request, instance):
"""
Called before the given model instance is deleted.
"""
pass
def after_delete(self, request, instance):
"""
Called after the given model instance has been deleted.
"""
pass
def _instance_before_save(self, request, d, instance, edit):
"""
Call instance.before_save if method is defined.
"""
if hasattr(instance, 'before_save'):
return instance.before_save(request, d, instance, edit)
return False
def _instance_before_save_changes(self, request, d, instance, changes, edit):
"""
Call instance.before_save_changes if method is defined.
"""
if hasattr(instance, 'before_save_changes'):
return instance.before_save_changes(request, d, instance, changes, edit)
return False
def _instance_before_bulk_save(self, request, d, instance, edit):
"""
Call instance.before_bulk_save if method is defined.
"""
if hasattr(instance, 'before_bulk_save'):
return instance.before_bulk_save(request, d, instance, edit)
return False
def _instance_after_save(self, request, d, instance, edit):
"""
Call instance.after_save if method is defined.
"""
if hasattr(instance, 'after_save'):
return instance.after_save(request, d, instance, edit)
return False
def _instance_after_save_changes(self, request, d, instance, changes, edit):
"""
Call instance.after_save_changes if method is defined.
"""
if hasattr(instance, 'after_save_changes'):
return instance.after_save_changes(request, d, instance, changes, edit)
return False
def _instance_after_bulk_save(self, request, d, instance, edit):
"""
Call instance.after_bulk_save if method is defined.
"""
if hasattr(instance, 'after_bulk_save'):
return instance.after_bulk_save(request, d, instance, edit)
return False
def _apply_frontend_editing_to_from(self, request, form):
"""
Configure the given form for frontend editing, if frontend editing
is configured and requested.
"""
is_frontend_editing = request.GET.get('frontend-editing', 'false') == 'true'
property_names = request.GET.get('property-names')
if settings.CUBANE_FRONTEND_EDITING and is_frontend_editing and property_names:
property_names = property_names.split(':')
property_names = [p.strip() for p in property_names]
property_names = filter(lambda x: x, property_names)
if not (len(property_names) == 1 and property_names[0] == '*'):
# remove all fields that are not relevant
for fieldname in form.fields.keys():
if fieldname not in property_names:
form.remove_field(fieldname)
# update layout and sections
form.layout = FormLayout.FLAT
form.remove_tabs();
form.update_sections()
def _detect_frontend_editing(self, request):
"""
Detect if the given request is frontend editing.
"""
is_frontend_editing = request.GET.get('frontend-editing', 'false') == 'true'
request.frontend_editing = settings.CUBANE_FRONTEND_EDITING and is_frontend_editing
return request.frontend_editing
def _apply_acl_to_form(self, request, form):
"""
Ensure that ACL rules for model querysets are applied for the given
form instance.
"""
for fieldname, field in form.fields.items():
if hasattr(field, 'queryset'):
acl = Acl.of(field.queryset.model)
if acl:
field.queryset = acl.filter(request, field.queryset)
def _get_changes(self, changes):
"""
Return a dictionary containing all changes made based on the change
data provided by the change log system.
"""
d = {}
for entry in changes:
name = entry.get('n')
old = entry.get('a')
new = entry.get('b')
d[name] = (old, new)
return d
def create_edit(self, request, pk=None, edit=False):
"""
Create a new instance or edit an existing model instance with given
primary key pk.
"""
if edit:
if request.method == 'POST':
return self._edit(request, pk)
else:
return self._view(request, pk)
else:
return self._create(request)
def summary_info(self, request):
"""
Return summary information for the given instance.
"""
pk = request.GET.get('pk')
instance = self.get_object_or_404(request, pk)
return {
'object': instance,
'urls': {
'summary_info': self._get_url(request, 'summary_info', namespace=True)
},
'object_summary_items': self.get_object_summary_items(instance),
'cubane_template_view_path': 'cubane/backend/summary_info.html'
}
def duplicate(self, request, pk=None):
"""
Duplicate an existing instance and edit the copy.
"""
return self._duplicate(request, pk)
@view(permission_required('add'))
def _create(self, request):
return self._create_edit(request)
@view(permission_required('view'))
def _view(self, request, pk):
return self._create_edit(request, pk, True)
@view(permission_required('edit'))
def _edit(self, request, pk):
return self._create_edit(request, pk, True)
@view(permission_required('add'))
@view(permission_required('view'))
def _duplicate(self, request, pk):
return self._create_edit(request, pk, False, duplicate=True)
def _create_edit(self, request, pk=None, edit=False, duplicate=False):
"""
Create a new instance or edit an existing model instance with given
primary key pk. This is the actual implementation of the view handler.
"""
self._detect_frontend_editing(request)
# cancel?
if request.POST.get('cubane_form_cancel', '0') == '1':
return self._redirect(request, 'index')
# id argument?
if (edit or duplicate) and not pk:
if not 'pk' in request.GET:
raise Http404("Missing argument 'pk'.")
pk = request.GET.get('pk')
# single instance?
instance = None
if self.is_single_instance:
instance = self._get_object(request)
pk = instance.pk if instance else None
edit = instance != None
# get existing or create new object
if edit or duplicate:
if not instance:
instance = self.get_object_or_404(request, pk)
# ajax GET?
if request.method == 'GET' and self._is_json(request):
return to_json_response(
instance,
fields=get_model_field_names(self.model, json=True)
)
fetch_related = get_listing_option(self.model, 'fetch_related', False)
initial = model_to_dict(instance, fetch_related)
else:
instance = self.model()
initial = self._get_form_initial(request)
# pre-configure instance model type if available
self._configure_model_backend_section(instance)
# if duplicate set parent
if duplicate and hasattr(instance, 'parent'):
initial['parent'] = instance.parent
# if we have a model form, pass in the current instance
formclass = self._get_form()
if edit and issubclass(formclass, ModelForm):
kwargs = {'instance': instance}
else:
kwargs = {}
# create form
if request.method == 'POST':
form = formclass(request.POST, request.FILES, **kwargs)
else:
self.form_initial(request, initial, instance, edit)
self._instance_form_initial(request, initial, instance, edit)
form = formclass(initial=initial, **kwargs)
# remove pk and dates if duplication
if duplicate:
instance.pk = None
if hasattr(instance, 'created_on'):
instance.created_on = None
if hasattr(instance, 'updated_on'):
instance.updated_on = None
# configure form
if not hasattr(form, 'configure'):
raise NotImplementedError(
('The form %s must implement ' +
'configure(request, edit, instance) in order to comply with ' +
'the model view %s.') % (
self._get_form().__name__,
self.__class__.__name__
)
)
form.is_duplicate = duplicate
form.is_embedded = False
form.parent_form = None
form.parent_instance = None
form.view = self
form.configure(request, edit=edit, instance=instance)
self.form_configure(request, form, edit, instance)
# scope form for frontend editing
self._apply_frontend_editing_to_from(request, form)
# make sure that ACL rules are enforced on any querysets that are part
# of the form
self._apply_acl_to_form(request, form)
# keep copy of original instance before we are changing it for the
# purpose of detecting changes made
previous_instance = request.changelog.get_changes(instance)
# validate form
if request.method == 'POST' and form.is_valid():
# update properties in model instance
d = form.cleaned_data
# set creator/updater
if not request.user.is_anonymous():
if edit and isinstance(instance, DateTimeBase):
instance.updated_by = request.user
if not edit and isinstance(instance, (
DateTimeReadOnlyBase,
DateTimeBase
)):
instance.created_by = request.user
# duplication? -> Remove pk to create a copy and tell model instance
# we also remove created_on and updated_on
if duplicate:
if hasattr(instance, 'on_duplicated'):
instance.on_duplicated()
# save model instance
before_cms_save.send_robust(
sender=self.model,
request=request,
cleaned_form_data=d,
model_instance=instance,
was_edited=edit
)
# before save handlers
changes = self._get_changes(request.changelog.get_changes(instance, previous_instance))
self.before_save(request, d, instance, edit)
self.before_save_changes(request, d, instance, changes, edit)
self._instance_before_save(request, d, instance, edit)
self._instance_before_save_changes(request, d, instance, changes, edit)
# make changes
save_model(d, instance)
# create only: update seq if model is sortable
if not edit and self._is_sortable(self.model):
self._update_with_highest_seq(request, instance)
# post save handler
changes = self._get_changes(request.changelog.get_changes(instance, previous_instance))
custom_response = None
# after_safe()
_custom_response = self.after_save(request, d, instance, edit)
if _custom_response: custom_response = _custom_response
# after_save_changes()
_custom_response = self.after_save_changes(request, d, instance, changes, edit)
if _custom_response: custom_response = _custom_response
# instance after_safe()
_custom_response = self._instance_after_save(request, d, instance, edit)
if _custom_response: custom_response = _custom_response
# instance after_safe_changes()
_custom_response = self._instance_after_save_changes(request, d, instance, changes, edit)
if _custom_response: custom_response = _custom_response
# send signal
after_cms_save.send_robust(
sender=self.model,
request=request,
cleaned_form_data=d,
model_instance=instance,
was_edited=edit
)
# create success message
message = self._get_success_message(
unicode(instance),
'duplicated' if duplicate else 'updated' if edit else 'created'
)
# commit changelog
if edit and not duplicate:
request.changelog.edit(instance, previous_instance)
else:
request.changelog.create(instance)
change = request.changelog.commit(message, instance)
# ajax operation, simply return success and message information
if request.is_ajax():
return to_json_response({
'success': True,
'message': message,
'change': change,
'next': self.get_index_url_or(request, 'edit', instance),
'instance_id': instance.pk,
'instance_title': unicode(instance)
})
# save frontend editing form
if request.GET.get('frontend-editing', 'false') == 'true':
return {
'frontend_editing_id': instance.pk
}
# if this is a create operation within a dialog window
# which was initiated by clicking the '+' button,
# we pass on information on the entity that was just created
# which will then generate javascript code in the template that
# will close to modal dialog window
if request.GET.get('create', 'false') == 'true':
return {
'dialog_created_id': instance.pk,
'dialog_created_title': unicode(instance)
}
# if this is an edit operation within a dialog window
# which was initiates by clicking an annnotated edit button,
# we pass on information on the entity that was edited
# which will then generate javascript code in the template that
# will close the model dialog window.
if request.GET.get('edit', 'false') == 'true':
return {
'dialog_edited_id': instance.pk
}
# return to listing or stay on edit page or return custom response
# if available
if custom_response and isinstance(custom_response, HttpResponse):
return custom_response
else:
return self.redirect_to_index_or(request, 'edit', instance)
elif request.is_ajax():
return to_json_response({
'success': False,
'errors': form.errors
})
# summary items
object_summary_items = self.get_object_summary_items(instance)
return {
'edit': edit,
'create_edit_page': True,
'object': instance,
'form': form,
'verbose_name': self.model._meta.verbose_name,
'permissions': {
'create': self.user_has_permission(request.user, 'add'),
'view': self.user_has_permission(request.user, 'view'),
'edit': self.user_has_permission(request.user, 'edit')
},
'urls': {
'summary_info': self._get_url(request, 'summary_info', namespace=True)
},
'object_summary_items': object_summary_items
}
def get_object_summary_items(self, instance):
"""
Return summary items for the given instance.
"""
try:
object_summary_items = instance.summary_items
except AttributeError, e:
if 'summary_items' in unicode(e):
object_summary_items = {}
else:
raise
return object_summary_items
def get_index_url_or(self, request, name, instance):
"""
Return the next url which is usually the index url or the url
based on the given name in case of continuation.
"""
active_tab = request.POST.get('cubane_save_and_continue', '0')
if not active_tab == '0':
# single view may not have edit
if name == 'edit' and getattr(self, 'single_instance', False):
name = 'index'
else:
name = 'index'
instance = None
active_tag = None
url = self._get_url(request, name, namespace=True)
if instance:
url = url_with_arg(url, 'pk', instance.pk)
return url
def redirect_to_index_or(self, request, name, instance):
"""
Redirect to index after we saved or stay on the current page if
we clicked on "Save and Continue".
"""
active_tab = request.POST.get('cubane_save_and_continue', '0')
if not active_tab == '0':
# single view may not have edit
if name == 'edit' and getattr(self, 'single_instance', False):
name = 'index'
return self._redirect(request, name, instance, active_tab)
else:
return self._redirect(request, 'index')
@view(require_POST)
@view(permission_required('delete'))
def delete(self, request, pk=None):
"""
Delete existing model instance with given primary key pk or (if no
primary key is given in the url) attempt to delete multiple entities
that are given by ids post argument.
"""
# determine list of pks
pks = []
if pk:
pks = [pk]
else:
pks = request.POST.getlist('pks[]', [])
if len(pks) == 0:
pk = request.POST.get('pk')
if pk:
pks = [pk]
# delete actual instance
folder_model = self.get_folder_model()
def _delete_instance(instance):
# deleting a folder should delete all children first
if self.model == folder_model and isinstance(instance, folder_model):
for child in self._get_objects_base(request).filter(parent=instance):
_delete_instance(child)
# delete instance itself
self.before_delete(request, instance)
request.changelog.delete(instance)
instance.delete()
self.after_delete(request, instance)
# delete instance(s)...
if len(pks) == 1:
instance = self.get_object_or_404(request, pks[0])
label = instance.__unicode__()
if not label: label = '<no label>'
_delete_instance(instance)
message = self._get_success_message(label, 'deleted')
else:
instances = self._get_objects_base(request).filter(pk__in=pks)
for instance in instances:
_delete_instance(instance)
message = '%d %s deleted successfully.' % (
len(instances),
self.model._meta.verbose_name_plural
)
# commit changelog
change = request.changelog.commit(message, flash=False)
# response
if self._is_json(request):
return to_json_response({
'success': True,
'message': message,
'change': change
})
else:
request.changelog.add_message(messages.SUCCESS, message, change)
return self._redirect(request, 'index')
@view(require_POST)
@view(permission_required('edit'))
def disable(self, request, pk=None):
"""
Disable existing model instance with given primary key pk or (if no
primary key is given in the url) attempt to disable multiple entities
that are given by ids post argument.
"""
# determine list of pks
if pk:
pks = [pk]
else:
pks = request.POST.getlist('pks[]', [])
# disable instance(s)...
if len(pks) == 1:
instance = self.get_object_or_404(request, pks[0])
label = instance.__unicode__()
if not label: label = '<no label>'
instance.disabled = True
instance.save()
message = self._get_success_message(label, 'disabled')
else:
instances = self._get_objects_base(request).filter(pk__in=pks)
for instance in instances:
instance.disabled = True
instance.save()
message = '%d %s disabled successfully.' % (
len(instances),
self.model._meta.verbose_name_plural
)
# response
if self._is_json(request):
return to_json_response({
'success': True,
'message': message
})
else:
messages.add_message(request, messages.SUCCESS, message)
return self._redirect(request, 'index')
@view(require_POST)
@view(permission_required('edit'))
def enable(self, request, pk=None):
"""
Enable existing model instance with given primary key pk or (if no
primary key is given in the url) attemt to enable multiple entities
that are given by ids post argument.
"""
# determine list of pks
if pk:
pks = [pk]
else:
pks = request.POST.getlist('pks[]', [])
# disable instance(s)...
if len(pks) == 1:
instance = self.get_object_or_404(request, pks[0])
label = instance.__unicode__()
if not label: label = '<no label>'
instance.disabled = False
instance.save()
message = self._get_success_message(label, 'enabled')
else:
instances = self._get_objects_base(request).filter(pk__in=pks)
for instance in instances:
instance.disabled = False
instance.save()
message = '%d %s enabled successfully.' % (
len(instances),
self.model._meta.verbose_name_plural
)
# response
if self._is_json(request):
return to_json_response({
'success': True,
'message': message
})
else:
messages.add_message(request, messages.SUCCESS, message)
return self._redirect(request, 'index')
@view(csrf_exempt)
@view(template('cubane/backend/listing/upload_with_encoding.html'))
@view(permission_required('import'))
def data_import(self, request):
"""
Provides the ability to import data.
"""
if request.method == 'POST':
form = DataImportForm(request.POST, request.FILES)
else:
form = DataImportForm()
form.configure(request)
if request.method == 'POST' and form.is_valid():
d = form.cleaned_data
i = 0
# import
importer = Importer(
self.model,
self._get_form(),
self._get_objects_base(request),
request.user,
encoding=d.get('encoding', 'utf-8')
)
i_success, i_error = importer.import_from_stream(
request,
request.FILES['csvfile']
)
# present general information what happend during import
typ = messages.SUCCESS if i_error == 0 else messages.ERROR
message = '<em>%d</em> records imported, <em>%d</em> errors occurred.' % (
i_success,
i_error
)
messages.add_message(request, typ, message)
return self._redirect(request, 'index')
return {
'form': form
}
@view(csrf_exempt)
@view(permission_required('export'))
def data_export(self, request):
"""
Export data and provide file download.
"""
# get data
objects = self._get_objects_base(request)
pks = request.POST.getlist('pks[]', []);
objects = self._get_objects_base(request)
if len(pks) > 0:
objects = objects.filter(pk__in=pks)
# export
exporter = Exporter(self.model, encoding=request.GET.get('encoding', 'utf-8'))
filename = to_uniform_filename(
self.model._meta.verbose_name_plural,
with_timestamp=True,
ext='.csv'
)
return exporter.export_to_response(objects, filename)
@view(require_POST)
@view(permission_required('edit'))
def save_changes(self, request):
"""
Save all changes made in edit mode at once and return some information
about form errors if there are any. If there is any error for any record
then no information is saved at all.
"""
ids = request.POST.getlist('ids[]');
listing_actions = self._get_listing_actions(request)
columns = self._get_model_columns(self.model, 'edit', listing_actions)
column_names = [c.get('fieldname') for c in columns]
errors = {}
records = []
for pk in ids:
form_data = request.POST.get('pk-%s' % pk, None)
if form_data:
# get instance
instance = self.model.objects.get(pk=pk)
# keep original state of instance
previous_instance = request.changelog.get_changes(instance)
# determine form class
formclass = self._get_form()
if issubclass(formclass, forms.ModelForm):
kwargs = {'instance': instance}
else:
kwargs = {}
# parse initial for given record
initial = parse_query_string(form_data)
# enforce arrays for certain form fields
for k, v in initial.items():
if not isinstance(v, list):
field = formclass.base_fields.get(k)
if field and isinstance(field, (ModelCollectionField, MultiSelectFormField)):
initial[k] = [v]
# create form
form = formclass(initial, **kwargs)
# configure form
if hasattr(form, 'configure'):
form.is_duplicate = False
form.is_embedded = False
form.parent_form = None
form.parent_instance = None
form.view = self
form.configure(request, edit=True, instance=instance)
# remove fields that were not presented
for fieldname, field in form.fields.items():
if fieldname not in column_names:
del form.fields[fieldname]
# validate form
if form.is_valid():
d = form.cleaned_data
records.append( (d, instance, previous_instance) )
else:
errors['pk-%s' % pk] = form.errors
# save data if we have no errors and support undo
if not errors:
n = 0
for d, instance, previous_instance in records:
# before save
self.before_bulk_save(request, d, instance, edit=True)
self._instance_before_bulk_save(request, d, instance, edit=True)
# save
save_model(d, instance)
# after save
self.after_bulk_save(request, d, instance, edit=True)
self._instance_after_bulk_save(request, d, instance, edit=True)
# generate changelog
request.changelog.edit(instance, previous_instance)
n += 1
# commit changes
message = pluralize(n, [self.model._meta.verbose_name, self.model._meta.verbose_name_plural], 'saved successfully.', tag='em')
change = request.changelog.commit(message, model=self.model, flash=False)
return to_json_response({
'success': True,
'message': message,
'change': change
})
return to_json_response({
'success': not errors,
'errors': errors
})
@view(permission_required('merge'))
def merge(self, request):
"""
Merge multiple instances together into one instance.
"""
def get_m2m_field(obj, field):
"""
Return the other foreign key field as part of a many to many
relationship that is not the given field.
"""
for rel_field in obj._meta.get_fields():
if rel_field != field and isinstance(rel_field, ForeignKey):
return rel_field
return None
def get_existing_relation(obj, field, rel_field, target):
"""
Try to return an existing many to many relationship between
the existing entity and the new target.
"""
try:
rel_value = getattr(obj, rel_field.name)
if rel_value is not None:
return obj.__class__.objects.get(**{
rel_field.name: rel_value,
field.name: target
})
except obj.__class__.DoesNotExist:
pass
return None
def same_m2m_properties(obj, rel_obj):
"""
Return True, if both m2m relationships are equal excluding foreign
keys. Also return True, if there are no more properties other than
foreign keys.
"""
for field in obj._meta.get_fields():
if not isinstance(field, ForeignKey) and not field.primary_key:
a = getattr(obj, field.attname)
b = getattr(rel_obj, field.attname)
if a != b:
# convert raw values to display values if the field is
# a choice field...
if field.choices:
getter = 'get_%s_display' % field.name
a = getattr(obj, getter)()
b = getattr(rel_obj, getter)()
# generate info message
return False, '<em>%s</em> is not the same, should be <em>%s</em> but was <em>%s</em>' % (
field.verbose_name, a, b
)
return True, None
def can_merge(obj, field, target):
info = field.get_reverse_path_info()
if info and info[0].m2m:
# get the other many to many foreign key
rel_field = get_m2m_field(obj, field)
if rel_field:
# get existing m2m relationship
existing_rel = get_existing_relation(obj, field, rel_field, target)
if existing_rel:
# test if fields are the same, if not we should not
# merge...
return same_m2m_properties(obj, existing_rel)
return True, None
def link_to_target(obj, target):
"""
try to re-link first, this may fail for ManyToMany due
to unique-together constraints...
"""
attr_found = False
for field in obj._meta.get_fields():
if isinstance(field, ForeignKey) and issubclass(field.related_model, self.model):
setattr(obj, field.name, target)
attr_found = True
return attr_found
def get_objects(source, using):
"""
Return list of related objects from given collector.
"""
collector = Collector(using)
collector.collect(sources, keep_parents=True)
collector.sort()
result = []
for model, obj in collector.instances_with_model():
# ignore the original source
if model == self.model and obj.pk == source.pk:
continue
result.append( (model, obj) )
return result
# abort?
if request.POST.get('cubane_form_cancel', '0') == '1':
return self._redirect(request, 'index')
# get objects to merge
pks = request_int_list(request.GET, 'pks[]')
objects = self.model.objects.in_bulk(pks)
sources = []
for pk in pks:
obj = objects.get(pk)
if obj is not None:
sources.append(obj)
# need at least two objects to work with, first object is always the
# target...
if len(sources) >= 2:
target = sources[0]
sources = sources[1:]
else:
target = None
sources = []
# we have to have a target
if target is None:
messages.add_message(request, messages.ERROR, 'Unable to merge less than two objects.')
return self._redirect(request, 'index')
# ask the target model instance if it can merge with the given list
# of sources
if hasattr(target, 'can_merge_with'):
_can_merge = target.can_merge_with(sources)
if isinstance(_can_merge, tuple):
_can_merge, _message = _can_merge
else:
_message = 'Unable to merge.'
if not _can_merge:
messages.add_message(request, messages.ERROR, _message)
return self._redirect(request, 'index')
# find all objects that are referencing one of the sources...
using = router.db_for_write(self.model, instance=target)
errors = False
for source in sources:
# get related objects pointing to source
objects = get_objects(source, using)
# determine any errors that may prevent the user from
# going ahead...
if request.method != 'POST':
for model, obj in objects:
for field in obj._meta.get_fields():
if isinstance(field, ForeignKey) and issubclass(field.related_model, self.model):
_can_merge, info_msg = can_merge(obj, field, target)
if not _can_merge:
message = mark_safe(info_msg)
source.merge_error_message = message
errors = True
break
# execute merge on POST, ignore any errors
if request.method == 'POST':
# keep original state of target
previous_target_instance = request.changelog.get_changes(target)
for model, obj in objects:
# keep original state of instance
previous_instance = request.changelog.get_changes(obj)
previous_instance_label = unicode(obj)
# try to link...
if link_to_target(obj, target):
try:
# try to save re-linked relationship
with transaction.atomic():
obj.save()
request.changelog.edit(obj, previous_instance, instance_label=previous_instance_label)
except IntegrityError:
pass
# post merge handler
self.post_merge(request, sources, target)
request.changelog.edit(target, previous_target_instance)
# all dependencies have been eliminated for the given source,
# so we can physically remove the source, which may fail because
# we may have already removed dependencies...
for source in sources:
request.changelog.delete(source)
source.delete()
# success message
message = pluralize(len(sources), [self.model._meta.verbose_name, self.model._meta.verbose_name_plural], 'merged into <em>%s</em> successfully.' % target, tag='em')
# commit changes
request.changelog.commit(message, target)
# redirect back to index
return self._redirect(request, 'index')
return {
'cubane_template_view_path': 'cubane/backend/merge.html',
'verbose_name_plural': self.model._meta.verbose_name_plural,
'target': target,
'sources': sources,
'errors': errors
}
def post_merge(self, request, sources, target):
"""
Virtual: Called as part of a merge operation after the given list of
source objects have been merged into the given target.
"""
pass
def get_folder_model(self):
"""
Return the model that represents folders used for the folders view
or None if no folders are supported for the corresponding model.
"""
try:
return self.folder_model
except:
return None
def has_folders(self, request):
"""
Return True, if this view supports folders and the current user has
read permissions to the folder model.
"""
if self.related_listing:
return False
folder_model = self.get_folder_model()
if folder_model is None:
return False
if not request.user.is_superuser:
if not Acl.of(folder_model).read:
return False
return True
def has_multiple_folders(self):
"""
Return True, if this view supports not only folders but also
allows entities to be assigned to multiple folders at once.
"""
try:
return self.multiple_folders
except:
return False
def is_listing_children(self):
"""
Return True, if this view will list children and grand-children
of a set of folders or not.
"""
try:
return self.list_children
except:
return False
def get_folder_url(self, request, name):
"""
Return the backend url for creating a new folder.
"""
model = self.get_folder_model()
if model:
return request.backend.get_url_for_model(model, name)
else:
return ''
def get_folder_model_name_singular(self):
"""
Return the verbose name (singular) of the folder model for this view.
"""
model = self.get_folder_model()
if model:
return model._meta.verbose_name
else:
return ''
def get_folder_model_name(self):
"""
Return the name of the folder model for this view.
"""
model = self.get_folder_model()
if model:
return model._meta.verbose_name_plural
else:
return ''
def is_leaf_folder_view(self, request, current_folders, objects_count):
"""
Return True, if the given set of folders represents folders of the same
level - and therefore the view can support sorting.
"""
# no folders? -> no mixed hierarchie
if not self.has_folders(request):
return True
# multiple folders? -> mixed hierarchie
if current_folders and len(current_folders) > 1:
return False
# folder pks
if current_folders:
folder_pks = [folder.pk for folder in current_folders]
else:
folder_pks = None
# count objects we should have within the current (single) folder,
# which might be the root folder
objects = self._get_objects_base(request)
objects = self._folder_filter_base(request, objects, folder_pks)
count = objects.count()
return count == objects_count
def get_pseudo_root(self, children, open_folder_ids):
"""
Return the pseudo root folder node.
"""
model = self.get_folder_model()
root = model()
root.id = -1
root.title = '/'
root.parent = None
root.children = children
root.is_open_folder = root.id in open_folder_ids
return root
def get_folders(self, request, parent=None):
"""
Return all folders for this model view.
"""
if self.has_folders(request):
# get base queryset
folders = self._get_folders(request, parent)
# determine if folders are sortable
folder_sortable = get_listing_option(self.folder_model, 'sortable', False)
# determine folder order seq.
if folder_sortable:
# sortable folders
folders = folders.order_by('seq')
else:
# sort by folder title, case-insensetively
folder_title_name = self._get_folder_title_name()
folders = folders.order_by(Lower(folder_title_name), folder_title_name)
# materialise database query
folders = list(folders)
# open/close states
folder_ids = self._get_open_folders(request)
for folder in folders:
folder.is_open_folder = folder.id in folder_ids
# make tree
folders = TreeBuilder().make_tree(folders)
folders = [self.get_pseudo_root(folders, folder_ids)]
return folders
else:
return []
def _get_folder_response(self, request, session_prefix=''):
"""
Return template or json for all folders.
"""
folders = self.get_folders(request)
if self._is_json(request):
return to_json_response(folders)
else:
return {
'folders': folders,
'folder_ids': self._get_active_folder_ids(request, session_prefix)
}
def _set_tree_node_state(self, request, folder_id, folder_open):
"""
Set the open state for the given tree node.
"""
folder_ids = self._get_open_folders(request)
if folder_open:
if folder_id not in folder_ids:
folder_ids.append(folder_id)
else:
if folder_id in folder_ids:
folder_ids.remove(folder_id)
self._set_open_folders(request, folder_ids)
def _is_root_node(self, node):
"""
Return True, if the given node is the root node.
"""
return node == None or node.id == -1
def _is_same_node(self, a, b):
"""
Return True, if a and b are the same nodes.
"""
a_id = a.id if a else -1
b_id = b.id if b else -1
return a_id == b_id
def _is_child_node_of(self, child, parent):
"""
Return True, if child is a child (or indirect child) of parent.
"""
node = child
while node:
try:
node = node.parent
except AttributeError:
return False
if self._is_same_node(node, parent):
return True
return False
def _can_move_node(self, src, dst):
"""
Return True, if the given src node can be moved to dst while the
integrity of the tree stays intact.
"""
return \
not self._is_root_node(src) and \
not self._is_same_node(src, dst) and \
not self._is_child_node_of(dst, src)
@view(require_POST)
@view(permission_required('view'))
def tree_node_state(self, request):
"""
Store visual state (open|closed) for the given tree node in the session.
"""
folder_id = request_int(request.POST, 'id')
folder_open = request_bool(request.POST, 'open')
self._set_tree_node_state(request, folder_id, folder_open)
return to_json_response({
'success': True
})
@view(require_POST)
@view(template('cubane/backend/listing/folders.html'))
@view(permission_required('edit'))
def move_tree_node(self, request):
"""
Move the given source tree node into the given destination tree node
and return the markup for the entire new tree as a result of the
operation. The dest. tree node is opened automatically.
"""
src_ids = request_int_list(request.POST, 'src[]')
src = self._get_folders_by_ids(request, src_ids)
dst = self._get_folder_by_id(request, request.POST.get('dst'))
# determine if we can move each node
can_move = True
for node in src:
if not self._can_move_node(node, dst):
can_move = False
break
# if we can move, move each node into target node
if can_move:
for node in src:
node.parent = dst
node.save()
# open target automatically, unless root node
if dst:
self._set_tree_node_state(request, dst.id, True)
session_prefix = self._get_session_prefix(request)
return self._get_folder_response(request, session_prefix)
@view(require_POST)
@view(permission_required('edit'))
def move_to_tree_node(self, request):
"""
Move a list of model instances to the given destination tree node
folder. Since the seq. attribute is not changed initially, the moved
items will place themselves into the order that already exists, unless
the folder is empty. In any case, we will re-generate the seq. in order
to guarantee that the sequence begins with 1 and is consistent without
duplicates or gaps.
"""
src = self._get_objects_by_ids(request, request_int_list(request.POST, 'src[]'))
dst = self._get_folder_by_id(request, request.POST.get('dst'))
cur = self._get_folders_by_ids(request, request_int_list(request.POST, 'cur[]'))
# move
updated = False
for obj in src:
if not self.model_is_folder or self._can_move_node(obj, dst):
updated = True
self._folder_assign(request, obj, dst, cur)
obj.save()
# re-apply seq. for all items within the target folder
if updated and self._is_sortable(self.model):
updated_on = datetime.datetime.now()
objects = self._get_objects_base(request).order_by('seq')
if dst:
objects = self._folder_filter(request, objects, [dst.pk])
for i, item in enumerate(objects, start=1):
# only update if seq changed...
if item.seq != i:
self._get_objects_base(request).filter(pk=item.pk).update(
seq=i,
updated_on=updated_on
)
# open target node if we are moving tree nodes
if dst and self.model_is_folder:
self._set_tree_node_state(request, dst.id, True)
# clear cache if we had to update at least one item
if updated and 'cubane.cms' in settings.INSTALLED_APPS:
from cubane.cms.views import get_cms
cms = get_cms()
cms.invalidate(verbose=False)
return to_json_response({
'success': True
})
@view(require_GET)
@view(template('cubane/backend/listing/folders.html'))
@view(permission_required('view'))
def get_tree(self, request):
"""
Return the current tree.
"""
session_prefix = self._get_session_prefix(request)
return self._get_folder_response(request, session_prefix)
def _folder_is_empty(self, request, folder):
"""
Return true if folder is empty.
"""
if self._get_folders(request, folder).count() == 0 and self.model.objects.filter(**{self._get_folder_assignment_name(): folder}).count() == 0:
return True
return False
def _get_folder_children(self, request, folder):
"""
Return folder children otherwise empty.
"""
if folder:
return self._get_folders(request, folder)
else:
return []
def _delete_folder_if_empty(self, request, folder):
"""
Delete the given folder if the folder is empty.
"""
for child in self._get_folder_children(request, folder):
self._delete_folder_if_empty(request, child)
if self._folder_is_empty(request, folder):
request.changelog.delete(folder)
folder.delete()
self._deleted += 1
@view(require_POST)
@view(permission_required('delete'))
def delete_empty_folders(self, request):
"""
Delete ALL empty folders from tree.
"""
self._deleted = 0
if self.has_folders(request) and not self.model_is_folder:
for folder in self._get_folders(request, None).filter(parent=None):
self._delete_folder_if_empty(request, folder)
if self._deleted == 0:
msg = '%s are already clean.' % self.get_folder_model_name()
messages.add_message(request, messages.SUCCESS, msg)
else:
request.changelog.commit(
'<em>%s</em> %s deleted.' % (self._deleted, self.get_folder_model_name()),
model=self.folder_model
)
return to_json_response({
'success': True
})
@view(require_POST)
def side_panel_resize(self, request):
"""
Change side panel width for this view.
"""
self._set_sidepanel_width(request, request.POST.get('width'), request.POST.get('resize_panel_id'))
return to_json_response({
'success': True
})
def robots_txt(request):
"""
Render robots.txt
"""
try:
t = get_template('robots.txt')
except TemplateDoesNotExist:
t = get_template('cubane/robots.txt')
return HttpResponse(
t.render({
'domainname': make_absolute_url('/sitemap.xml')
}, request),
content_type='text'
) | PypiClean |
/CADET-Process-0.7.3.tar.gz/CADET-Process-0.7.3/README.md | # CADET-Process
The [**CADET**](https://cadet.github.io) core simulator is a very powerful numerical engine that can simulate a large variety of physico-chemical models used in chromatography and other biochemical processes.
However, the configuration files of **CADET** can be complex and difficult to work with.
This is especially relevant when multiple unit operations are involved which is often the case for complex integrated processes.
Moreover, the structure of the configuration file may change during process optimization, for example when the order of dynamic events changes, making the direct use of **CADET** impossible without another layer of abstraction.
In this context [**CADET-Process**](https://cadet-process.readthedocs.io/en/latest/) was developed.
The package facilitates modeling processes using an object oriented model builder.
This interface layer provides convenient access to all model parameters in the system.
It automatically checks validity of the parameter values and sets reasonable default values where possible.
This simplifies the setup of **CADET** simulations and reduces the risk of ill-defined configurations files.
Importantly, **CADET-Process** enables the modelling of elaborate switching schemes and advanced chromatographic operating modes such as complex gradients, recycling systems, or multi-column systems by facilitating the definition of dynamic changes of flow sheet connectivity or any other time dependent parameters.
The package also includes tools to evaluate cyclic stationarity of processes, and routines to determine optimal fractionation times required determine common performance indicators such as yield, purity, and productivity.
Moreover, utility functions for calculating reaction equilibria and buffer capacities, as well as convenient functions for plotting simulation results are provided.
Finally, these processes can be optimized by defining an objective function (with constraints) and using one of the integrated optimization algorithms such as NSGA-3.
This can be used to determine any of the physico-chemical model parameters and to improve process performance.
For more information and tutorials, please refer to the [documentation](https://cadet-process.readthedocs.io/en/latest/).
The source code is freely available on [*GitHub*](https://github.com/fau-advanced-separations/CADET-Process), and a scientific paper was published in [*MDPI Processes*](https://doi.org/10.3390/pr8010065).
If **CADET-Process** is useful to you, please cite the following publication:
```
@Article{Schmoelder2020,
author = {Schmölder, Johannes and Kaspereit, Malte},
title = {A {{Modular Framework}} for the {{Modelling}} and {{Optimization}} of {{Advanced Chromatographic Processes}}},
doi = {10.3390/pr8010065},
number = {1},
pages = {65},
volume = {8},
journal = {Processes},
year = {2020},
}
```
## Installation
**CADET-Process** can be installed with the following command:
```
pip install CADET-Process
```
To use **CADET-Process**, make sure, that **CADET** is also installed.
This can for example be done using [conda](https://docs.conda.io/en/latest/):
```
conda install -c conda-forge cadet
```
For more information, see the [CADET Documentation](https://cadet.github.io/master/getting_started/installation.html).
## Free software
CADET-Process is free software: you can redistribute it and/or modify it under the terms of the [GNU General Public License version 3](https://github.com/fau-advanced-separations/CADET-Process/blob/master/LICENSE).
## Note
This software is work in progress and being actively developed.
Breaking changes and extensive restructuring may occur in any commit and release.
If you encounter problems or if you have questions, feel free to ask for support in the [**CADET-Forum**](https://forum.cadet-web.de).
Please report any bugs that you find [here](https://github.com/fau-advanced-separations/CADET-Process/issues).
Pull requests on [GitHub](https://github.com/fau-advanced-separations/CADET-Process) are also welcome.
| PypiClean |
/McStasScript-0.0.63.tar.gz/McStasScript-0.0.63/mcstasscript/instrument_diagram/component_description.py | from libpyvinyl.Parameters.Parameter import Parameter
from mcstasscript.helper.mcstas_objects import DeclareVariable
def component_description(component):
"""
Returns string of information about the component
Includes information on required parameters if they are not yet
specified. Information on the components are added when the
class is used as a superclass for classes describing each
McStas component. Uses mathtext for bold and italics.
"""
string = ""
if len(component.c_code_before) > 1:
string += component.c_code_before + "\n"
if len(component.comment) > 1:
string += "// " + component.comment + "\n"
if component.SPLIT != 0:
string += "SPLIT " + str(component.SPLIT) + " "
string += "COMPONENT " + str(component.name)
string += " = $\\bf{" + str(component.component_name).replace("_", "\_") + "}$\n"
for key in component.parameter_names:
val = getattr(component, key)
parameter_name = key
if val is not None:
unit = ""
if key in component.parameter_units:
unit = "[" + component.parameter_units[key] + "]"
if isinstance(val, Parameter):
val_string = val.name
elif isinstance(val, DeclareVariable):
val_string = val.name
else:
val_string = str(val)
value = "$\\bf{" + val_string.replace("_", "\_").replace('\"', "''").replace('"', "\''") + "}$"
string += " $\\bf{" + parameter_name.replace("_", "\_") + "}$"
string += " = " + value + " " + unit + "\n"
else:
if component.parameter_defaults[key] is None:
string += " $\\bf{" + parameter_name.replace("_", "\_") + "}$"
string += " : $\\bf{Required\ parameter\ not\ yet\ specified}$\n"
if not component.WHEN == "":
string += component.WHEN + "\n"
string += "AT " + str(component.AT_data)
if component.AT_reference is None:
string += " $\\it{ABSOLUTE}$\n"
else:
string += " RELATIVE $\\it{" + component.AT_reference.replace("_", "\_") + "}$\n"
if component.ROTATED_specified:
string += "ROTATED " + str(component.ROTATED_data)
if component.ROTATED_reference is None:
string += " $\\it{ABSOLUTE}$\n"
else:
string += " $\\it{" + component.ROTATED_reference.replace("_", "\_") + "}$\n"
if not component.GROUP == "":
string += "GROUP " + component.GROUP + "\n"
if not component.EXTEND == "":
string += "EXTEND %{" + "\n"
string += component.EXTEND + "%}" + "\n"
if not component.JUMP == "":
string += "JUMP " + component.JUMP + "\n"
if len(component.c_code_after) > 1:
string += component.c_code_after + "\n"
return string.strip() | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/dist/stream.js | "use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.Stream = void 0;
const events_1 = require("events");
const wrtc_1 = require("wrtc");
const binding_1 = require("./binding");
const buffer_optimized_1 = require("./buffer_optimized");
const os = require("os");
class Stream extends events_1.EventEmitter {
constructor(readable, bitsPerSample = 16, sampleRate = 48000, channelCount = 1, buffer_length = 10, timePulseBuffer = buffer_length == 4 ? 1.5 : 0) {
super();
this.readable = readable;
this.bitsPerSample = bitsPerSample;
this.sampleRate = sampleRate;
this.channelCount = channelCount;
this.buffer_length = buffer_length;
this.timePulseBuffer = timePulseBuffer;
this.paused = false;
this.finished = true;
this.stopped = false;
this.stopped_done = false;
this.finishedLoading = false;
this.bytesLoaded = 0;
this.playedBytes = 0;
this.bytesSpeed = 0;
this.lastLag = 0;
this.equalCount = 0;
this.lastBytesLoaded = 0;
this.finishedBytes = false;
this.lastByteCheck = 0;
this.lastByte = 0;
this.runningPulse = false;
this.isVideo = false;
this.videoWidth = 0;
this.videoHeight = 0;
this.videoFramerate = 0;
this.lastDifferenceRemote = 0;
this.lipSync = false;
this.bytesLength = 0;
this.overloadQuiet = false;
this.endListener = (() => {
this.finishedLoading = true;
if (this.readable !== undefined) {
binding_1.Binding.log('COMPLETED_BUFFERING -> ' + new Date().getTime() +
' -> ' + (this.isVideo ? 'VIDEO' : 'AUDIO'), binding_1.Binding.DEBUG);
binding_1.Binding.log('BYTES_STREAM_CACHE_LENGTH -> ' + this.cache.length +
' -> ' + (this.isVideo ? 'VIDEO' : 'AUDIO'), binding_1.Binding.DEBUG);
binding_1.Binding.log('BYTES_LOADED -> ' +
this.bytesLoaded +
'OF -> ' +
this.readable.fileSize() +
' -> ' + (this.isVideo ? 'VIDEO' : 'AUDIO'), binding_1.Binding.DEBUG);
}
});
this.dataListener = ((data) => {
this.bytesLoaded += data.length;
this.bytesSpeed = data.length;
try {
if (!(this.needsBuffering())) {
this.readable?.pause();
this.runningPulse = false;
}
}
catch (e) {
this.emit('stream_deleted');
return;
}
this.cache.push(data);
}).bind(this);
this.audioSource = new wrtc_1.nonstandard.RTCAudioSource();
this.videoSource = new wrtc_1.nonstandard.RTCVideoSource();
this.paused = true;
this.cache = new buffer_optimized_1.BufferOptimized(this.bytesLength);
if (this.readable !== undefined) {
this.setReadable(this.readable);
}
setTimeout(() => this.processData(), 1);
}
setLipSyncStatus(status) {
this.lipSync = status;
}
setOverloadQuietStatus(status) {
this.overloadQuiet = status;
}
setReadable(readable) {
this.finished = true;
this.finishedLoading = false;
this.bytesLoaded = 0;
this.playedBytes = 0;
this.bytesSpeed = 0;
this.lastLag = 0;
this.equalCount = 0;
this.lastBytesLoaded = 0;
this.finishedBytes = false;
this.lastByteCheck = 0;
this.lastByte = 0;
this.runningPulse = false;
this.lastDifferenceRemote = 0;
this.readable = readable;
this.bytesLength = this.bytesLengthCalculated();
this.cache = new buffer_optimized_1.BufferOptimized(this.bytesLength);
this.readable?.resume();
if (this.stopped) {
return;
}
if (this.readable != undefined) {
this.finished = false;
this.finishedLoading = false;
this.readable.onData = this.dataListener;
this.readable.onEnd = this.endListener;
}
}
needed_time() {
return this.isVideo ? 0.30 : 50;
}
needsBuffering(withPulseCheck = true) {
if (this.finishedLoading || this.readable === undefined) {
return false;
}
let result = this.cache.length < this.bytesLength * this.needed_time() * this.buffer_length;
result =
result &&
(this.bytesLoaded <
this.readable.fileSize() -
this.bytesSpeed * 2 ||
this.finishedBytes);
if (this.timePulseBuffer > 0 && withPulseCheck) {
result = result && this.runningPulse;
}
return result;
}
checkLag() {
if (this.finishedLoading) {
return false;
}
return this.cache.length < this.bytesLength * this.needed_time();
}
pause() {
if (this.stopped) {
throw new Error('Cannot pause when stopped');
}
this.paused = true;
this.emit('pause', this.paused);
}
resume() {
if (this.stopped) {
throw new Error('Cannot resume when stopped');
}
this.paused = false;
this.emit('resume', this.paused);
}
finish() {
this.readable?.stop();
this.finished = true;
this.finishedLoading = true;
}
stop() {
this.finish();
this.stopped = true;
}
restart(readable) {
this.stopped = true;
setTimeout(() => {
if (this.stopped_done) {
this.stopped = false;
this.stopped_done = false;
this.emit('restarted', readable);
this.processData();
}
else {
this.restart(readable);
}
}, 10);
}
createAudioTrack() {
return this.audioSource.createTrack();
}
createVideoTrack(width, height, framerate) {
this.videoWidth = width;
this.videoHeight = height;
this.isVideo = true;
this.videoFramerate = 1000 / framerate;
this.bytesLength = this.bytesLengthCalculated();
this.cache.byteLength = this.bytesLength;
return this.videoSource.createTrack();
}
setVideoParams(width, height, framerate) {
this.videoWidth = width;
this.videoHeight = height;
this.videoFramerate = 1000 / framerate;
this.bytesLength = this.bytesLengthCalculated();
this.cache.byteLength = this.bytesLength;
}
setAudioParams(bitrate) {
this.sampleRate = bitrate;
this.bytesLength = this.bytesLengthCalculated();
this.cache.byteLength = this.bytesLength;
}
bytesLengthCalculated() {
if (this.isVideo) {
return 1.5 * this.videoWidth * this.videoHeight;
}
else {
return ((this.sampleRate * this.bitsPerSample) / 8 / 100) * this.channelCount;
}
}
processData() {
const oldTime = new Date().getTime();
if (this.stopped) {
this.stopped_done = true;
return;
}
const lagging_remote = this.isLaggingRemote();
const byteLength = this.bytesLength;
const timeoutWait = this.frameTime() - this.lastDifferenceRemote;
setTimeout(() => this.processData(), timeoutWait > 0 ? timeoutWait : 0);
if (!(!this.finished &&
this.finishedLoading &&
this.cache.length < byteLength) && this.readable !== undefined) {
try {
if ((this.needsBuffering(false))) {
let checkBuff = true;
if (this.timePulseBuffer > 0) {
this.runningPulse =
this.cache.length <
byteLength * this.needed_time() * this.timePulseBuffer;
checkBuff = this.runningPulse;
}
if (this.readable !== undefined && checkBuff) {
this.readable.resume();
}
}
}
catch (e) {
this.emit('stream_deleted');
this.stopped = true;
return;
}
const checkLag = this.checkLag();
let fileSize;
try {
if (oldTime - this.lastByteCheck > 1000) {
fileSize = this.readable.fileSize();
this.lastByte = fileSize;
this.lastByteCheck = oldTime;
}
else {
fileSize = this.lastByte;
}
}
catch (e) {
this.emit('stream_deleted');
this.stopped = true;
return;
}
if (!this.paused &&
!this.finished &&
!lagging_remote &&
(this.cache.length >= byteLength || this.finishedLoading) &&
!checkLag) {
this.playedBytes += byteLength;
const buffer = this.cache.readBytes();
if (this.isVideo) {
const i420Frame = {
width: this.videoWidth,
height: this.videoHeight,
data: new Uint8ClampedArray(buffer)
};
this.videoSource.onFrame(i420Frame);
}
else {
const samples = new Int16Array(new Uint8Array(buffer).buffer);
this.audioSource.onData({
bitsPerSample: this.bitsPerSample,
sampleRate: this.sampleRate,
channelCount: this.channelCount,
numberOfFrames: samples.length,
samples,
});
}
}
else if (checkLag) {
this.notifyOverloadCpu((cpuPercentage) => {
if (cpuPercentage >= 90) {
binding_1.Binding.log('CPU_OVERLOAD_DETECTED -> ' + new Date().getTime() +
' -> ' + (this.isVideo ? 'VIDEO' : 'AUDIO'), !this.overloadQuiet ? binding_1.Binding.WARNING : binding_1.Binding.DEBUG);
}
else {
binding_1.Binding.log('STREAM_LAG -> ' + new Date().getTime() +
' -> ' + (this.isVideo ? 'VIDEO' : 'AUDIO'), binding_1.Binding.DEBUG);
}
binding_1.Binding.log('BYTES_STREAM_CACHE_LENGTH -> ' + this.cache.length +
' -> ' + (this.isVideo ? 'VIDEO' : 'AUDIO'), binding_1.Binding.DEBUG);
binding_1.Binding.log('BYTES_LOADED -> ' +
this.bytesLoaded +
'OF -> ' +
this.readable?.fileSize() +
' -> ' + (this.isVideo ? 'VIDEO' : 'AUDIO'), binding_1.Binding.DEBUG);
});
}
if (!this.finishedLoading) {
if (fileSize === this.lastBytesLoaded) {
if (this.equalCount >= 4) {
this.equalCount = 0;
binding_1.Binding.log('NOT_ENOUGH_BYTES -> ' + oldTime +
' -> ' + (this.isVideo ? 'VIDEO' : 'AUDIO'), binding_1.Binding.DEBUG);
this.finishedBytes = true;
this.readable?.resume();
}
else {
if (oldTime - this.lastLag > 1000) {
this.equalCount += 1;
this.lastLag = oldTime;
}
}
}
else {
this.lastBytesLoaded = fileSize;
this.equalCount = 0;
this.finishedBytes = false;
}
}
}
if (!this.finished &&
this.finishedLoading &&
this.cache.length < byteLength &&
this.readable !== undefined) {
this.finish();
this.emit('finish');
}
}
haveEnd() {
if (this.readable != undefined) {
return this.readable.haveEnd;
}
else {
return true;
}
}
isLaggingRemote() {
if (this.remotePlayingTime != undefined && !this.paused && this.lipSync && this.remoteLagging != undefined) {
const remote_play_time = this.remotePlayingTime().time;
const local_play_time = this.currentPlayedTime();
if (remote_play_time != undefined && local_play_time != undefined) {
if (local_play_time > remote_play_time) {
this.lastDifferenceRemote = (local_play_time - remote_play_time) * 10000;
return true;
}
else if (this.remoteLagging().isLagging && remote_play_time > local_play_time) {
this.lastDifferenceRemote = 0;
return true;
}
}
}
return false;
}
notifyOverloadCpu(action) {
function cpuAverage() {
let totalIdle = 0, totalTick = 0;
const cpus = os.cpus();
for (let i = 0, len = cpus.length; i < len; i++) {
const cpu = cpus[i];
for (let type in cpu.times) {
totalTick += cpu.times[type];
}
totalIdle += cpu.times.idle;
}
return { idle: totalIdle / cpus.length, total: totalTick / cpus.length };
}
const startMeasure = cpuAverage();
setTimeout(function () {
const endMeasure = cpuAverage();
const idleDifference = endMeasure.idle - startMeasure.idle;
const totalDifference = endMeasure.total - startMeasure.total;
const percentageCPU = 100 - ~~(100 * idleDifference / totalDifference);
action(percentageCPU);
}, 500);
}
frameTime() {
return (this.finished || this.paused || this.checkLag() || this.readable === undefined ? 500 : this.isVideo ? this.videoFramerate : 10);
}
currentPlayedTime() {
if (this.readable === undefined || this.finished) {
return undefined;
}
else {
return Math.ceil((this.playedBytes / this.bytesLength) / (0.0001 / this.frameTime()));
}
}
}
exports.Stream = Stream; | PypiClean |
/GeoSnipe-1.2.5.tar.gz/GeoSnipe-1.2.5/src/geosnipe/argparser.py |
import argparse
main_parser = argparse.ArgumentParser()
service_subparsers = main_parser.add_subparsers(title="GeoSnipe Commands", dest="service_command")
def create_offset_parser() -> None:
offset_parser = argparse.ArgumentParser(add_help=False)
offset_parser.add_argument(
"--tests", "-t", type=int, help="Amount of times you want to test | DEFAULT: 5", default=5
)
service_subparsers.add_parser("offset", help="Find your offset", parents=[offset_parser])
def create_sniper_parser() -> None:
snipe_parser = argparse.ArgumentParser(add_help=False)
snipe_parser.add_argument("username", metavar="username", type=str, help="Username you want to snipe")
snipe_parser.add_argument("drop_time", metavar="drop_time", type=float, help="What time the username drops")
snipe_parser.add_argument("offset", metavar="offset", type=float, help="The offset of the snipe")
snipe_parser.add_argument(
"--bearer", "-b", type=str, help="The bearer token of the account | DEFAULT: None", default=None
)
snipe_parser.add_argument(
"--combo", "-c", type=str, help="The EMAIL:PASSWORD of the account | DEFAULT: None", default=None
)
snipe_parser.add_argument(
"--auth", "-a", type=str,
help="Authentication you want to use (mojang or microsoft) | DEFAULT: mojang", default="mojang"
)
service_subparsers.add_parser("snipe", help="Snipe a username", parents=[snipe_parser])
def create_sshoffset_parser() -> None:
sshoffset_parser = argparse.ArgumentParser(add_help=False)
sshoffset_parser.add_argument("vps", metavar="vps", type=str, help="The VPS you want to use")
sshoffset_parser.add_argument(
"--tests", "-b", type=str, help="Amount of times you want to test | DEFAULT: 5", default=5
)
service_subparsers.add_parser("sshoffset", help="Find offset on VPS", parents=[sshoffset_parser])
def create_sshsniper_parser() -> None:
sshsnipe_parser = argparse.ArgumentParser(add_help=False)
sshsnipe_parser.add_argument("vps", metavar="vps", type=str, help="The VPS you want to use")
sshsnipe_parser.add_argument("username", metavar="username", type=str, help="Username you want to snipe")
sshsnipe_parser.add_argument("drop_time", metavar="drop_time", type=float, help="What time the username drops")
sshsnipe_parser.add_argument("offset", metavar="offset", type=float, help="The offset of the snipe")
sshsnipe_parser.add_argument(
"--bearer", "-b", type=str, help="The bearer token of the account | DEFAULT: None", default=None
)
sshsnipe_parser.add_argument(
"--combo", "-c", type=str, help="The EMAIL:PASSWORD of the account | DEFAULT: None", default=None
)
sshsnipe_parser.add_argument(
"--auth", "-a", type=str,
help="Authentication you want to use (mojang or microsoft) | DEFAULT: mojang", default="mojang"
)
service_subparsers.add_parser("sshsnipe", help="Snipe a username on a VPS", parents=[sshsnipe_parser])
def create_sshinstall_parser() -> None:
sshinstall_parser = argparse.ArgumentParser(add_help=False)
sshinstall_parser.add_argument("vps", metavar="vps", type=str, help="The VPS you want to use")
service_subparsers.add_parser("sshinstall", help="Install GeoSnipe on a VPS", parents=[sshinstall_parser])
def create_version_parser() -> None:
version_parser = argparse.ArgumentParser(add_help=False)
service_subparsers.add_parser("version", help="Check the version of GeoSnipe", parents=[version_parser])
def parse_args() -> argparse.Namespace:
create_offset_parser()
create_sniper_parser()
create_sshoffset_parser()
create_sshsniper_parser()
create_sshinstall_parser()
create_version_parser()
return main_parser.parse_args() | PypiClean |
/GSEIM-1.4.tar.gz/GSEIM-1.4/src/grc/gui/canvas/param.py |
from __future__ import absolute_import
import numbers
from .drawable import Drawable
from .. import ParamWidgets, Utils, Constants
from ...core.params import Param as CoreParam
class Param(CoreParam):
"""The graphical parameter."""
make_cls_with_base = classmethod(Drawable.make_cls_with_base.__func__)
def get_input(self, *args, **kwargs):
"""
Get the graphical gtk class to represent this parameter.
An enum requires and combo parameter.
A non-enum with options gets a combined entry/combo parameter.
All others get a standard entry parameter.
Returns:
gtk input class
"""
dtype = self.dtype
if dtype in ('file_open', 'file_save'):
input_widget_cls = ParamWidgets.FileParam
elif dtype == 'enum':
input_widget_cls = ParamWidgets.EnumParam
elif self.options:
input_widget_cls = ParamWidgets.EnumEntryParam
elif dtype == '_multiline':
input_widget_cls = ParamWidgets.MultiLineEntryParam
else:
input_widget_cls = ParamWidgets.EntryParam
return input_widget_cls(self, *args, **kwargs)
def format_label_markup(self, have_pending_changes=False):
block = self.parent
# fixme: using non-public attribute here
has_callback = False
return '<span {underline} {foreground} font_desc="Sans 11">{label}</span>'.format(
underline='underline="low"' if has_callback else '',
foreground='foreground="blue"' if have_pending_changes else
'foreground="red"' if not self.is_valid() else '',
label=Utils.encode(self.name)
)
def format_tooltip_text(self):
errors = self.get_error_messages()
tooltip_lines = ['Key: ' + self.key, 'Type: ' + self.dtype]
if self.is_valid():
value = str(self.get_evaluated())
if len(value) > 100:
value = '{}...{}'.format(value[:50], value[-50:])
tooltip_lines.append('Value: ' + value)
elif len(errors) == 1:
tooltip_lines.append('Error: ' + errors[0])
elif len(errors) > 1:
tooltip_lines.append('Error:')
tooltip_lines.extend(' * ' + msg for msg in errors)
return '\n'.join(tooltip_lines)
def pretty_print(self):
"""
Get the repr (nice string format) for this param.
Returns:
the string representation
"""
# Truncate helper method
def _truncate(string, style=0):
max_len = max(27 - len(self.name), 3)
if len(string) > max_len:
if style < 0: # Front truncate
string = '...' + string[3-max_len:]
elif style == 0: # Center truncate
string = string[:max_len//2 - 3] + '...' + string[-max_len//2:]
elif style > 0: # Rear truncate
string = string[:max_len-3] + '...'
return string
# Simple conditions
value = self.get_value()
if not self.is_valid():
return _truncate(value)
if value in self.options:
return self.options[value] # its name
# Split up formatting by type
# Default center truncate
truncate = 0
e = self.get_evaluated()
t = self.dtype
if isinstance(e, bool):
return str(e)
elif isinstance(e, numbers.Complex):
dt_str = Utils.num_to_str(e)
elif t in ('file_open', 'file_save'):
dt_str = self.get_value()
truncate = -1
else:
# Other types
dt_str = str(e)
# Done
return _truncate(dt_str, truncate)
def format_block_surface_markup(self):
"""
Get the markup for this param.
Returns:
a pango markup string
"""
return '<span {foreground} font_desc="{font}"><b>{label}:</b> {value}</span>'.format(
foreground='foreground="red"' if not self.is_valid() else '', font=Constants.PARAM_FONT,
label=Utils.encode(self.name), value=Utils.encode(self.pretty_print().replace('\n', ' '))
) | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_ar-td.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"\u0635",
"\u0645"
],
"DAY": [
"\u0627\u0644\u0623\u062d\u062f",
"\u0627\u0644\u0627\u062b\u0646\u064a\u0646",
"\u0627\u0644\u062b\u0644\u0627\u062b\u0627\u0621",
"\u0627\u0644\u0623\u0631\u0628\u0639\u0627\u0621",
"\u0627\u0644\u062e\u0645\u064a\u0633",
"\u0627\u0644\u062c\u0645\u0639\u0629",
"\u0627\u0644\u0633\u0628\u062a"
],
"MONTH": [
"\u064a\u0646\u0627\u064a\u0631",
"\u0641\u0628\u0631\u0627\u064a\u0631",
"\u0645\u0627\u0631\u0633",
"\u0623\u0628\u0631\u064a\u0644",
"\u0645\u0627\u064a\u0648",
"\u064a\u0648\u0646\u064a\u0648",
"\u064a\u0648\u0644\u064a\u0648",
"\u0623\u063a\u0633\u0637\u0633",
"\u0633\u0628\u062a\u0645\u0628\u0631",
"\u0623\u0643\u062a\u0648\u0628\u0631",
"\u0646\u0648\u0641\u0645\u0628\u0631",
"\u062f\u064a\u0633\u0645\u0628\u0631"
],
"SHORTDAY": [
"\u0627\u0644\u0623\u062d\u062f",
"\u0627\u0644\u0627\u062b\u0646\u064a\u0646",
"\u0627\u0644\u062b\u0644\u0627\u062b\u0627\u0621",
"\u0627\u0644\u0623\u0631\u0628\u0639\u0627\u0621",
"\u0627\u0644\u062e\u0645\u064a\u0633",
"\u0627\u0644\u062c\u0645\u0639\u0629",
"\u0627\u0644\u0633\u0628\u062a"
],
"SHORTMONTH": [
"\u064a\u0646\u0627\u064a\u0631",
"\u0641\u0628\u0631\u0627\u064a\u0631",
"\u0645\u0627\u0631\u0633",
"\u0623\u0628\u0631\u064a\u0644",
"\u0645\u0627\u064a\u0648",
"\u064a\u0648\u0646\u064a\u0648",
"\u064a\u0648\u0644\u064a\u0648",
"\u0623\u063a\u0633\u0637\u0633",
"\u0633\u0628\u062a\u0645\u0628\u0631",
"\u0623\u0643\u062a\u0648\u0628\u0631",
"\u0646\u0648\u0641\u0645\u0628\u0631",
"\u062f\u064a\u0633\u0645\u0628\u0631"
],
"fullDate": "EEEE\u060c d MMMM\u060c y",
"longDate": "d MMMM\u060c y",
"medium": "dd\u200f/MM\u200f/y h:mm:ss a",
"mediumDate": "dd\u200f/MM\u200f/y",
"mediumTime": "h:mm:ss a",
"short": "d\u200f/M\u200f/y h:mm a",
"shortDate": "d\u200f/M\u200f/y",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "FCFA",
"DECIMAL_SEP": "\u066b",
"GROUP_SEP": "\u066c",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4\u00a0-",
"negSuf": "",
"posPre": "\u00a4\u00a0",
"posSuf": ""
}
]
},
"id": "ar-td",
"pluralCat": function(n, opt_precision) { if (n == 0) { return PLURAL_CATEGORY.ZERO; } if (n == 1) { return PLURAL_CATEGORY.ONE; } if (n == 2) { return PLURAL_CATEGORY.TWO; } if (n % 100 >= 3 && n % 100 <= 10) { return PLURAL_CATEGORY.FEW; } if (n % 100 >= 11 && n % 100 <= 99) { return PLURAL_CATEGORY.MANY; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/DbtPy-3.0.5.5.tar.gz/DbtPy-3.0.5.5/WIKI.md | ## DbtPy: 高级原生扩展模块
API描述表
### DbtPy.active
bool DbtPy.active(IFXConnection connection)
**描述**
> 检查IFXConnection是否处于活动状态
**参数**
> connection - 有效的IFXConnection连接
**返回值**
> True - 资源处于活动状态
> False - 资源处于未激活状态
### DbtPy.autocommit
mixed DbtPy.autocommit ( IFXConnection connection [, bool value] )
**描述**
> 返回并设置指定IFXConnection的AUTOCOMMIT行为
**参数**
> connection - 有效的IFXConnection连接
> value - 以下参数之一:
> > SQL_AUTOCOMMIT_OFF
> > SQL_AUTOCOMMIT_ON
**返回值**
> 非指定参数value时:
> > 0 - AUTOCOMMIT值是关闭
> > 1 - AUTOCOMMIT值是打开
> 指定参数value时:
> > True - AUTOCOMMIT值设置成功
> > False - AUTOCOMMIT值未设置成功
### DbtPy.bind_param
bool DbtPy.bind_param (IFXStatement stmt, int parameter-number, string variable [, int parameter-type [, int data-type [, int precision [, int scale [, int size]]]]] )
**描述**
> 将Python变量绑定到DbtPy.prepare()返回的IFXStatement中的SQL语句参数。与简单地将变量作为可选输入元组的一部分传递给DbtPy.execute()相比,该函数为参数类型、数据类型、精度和参数扩展提供了更多的控制。
**参数**
> stmt - 从DbtPy.prepare()返回的预编译语句
> parameter-number - 从序号1开始的参数
> variable - 绑定到parameter-number指定的参数的Python变量
> parameter-type - 指定参数输入、输出的常量:
> > SQL_PARAM_INPUT - 仅输入参数
> > SQL_PARAM_OUTPUT - 仅输出参数
> > SQL_PARAM_INPUT_OUTPUT - 输入及输出参数
> > PARAM_FILE - 数据存储在变量中指定的文件名中,而不是变量本身中。这可以用来避免在内存中存储大量的LOB数据。
> data-type - 指定Python变量应该绑定为的SQL数据类型常量,仅接受以下值:
> > SQL_BINARY
> > CHAR
> > DOUBLE
> > LONG
> precision - 变量的精度
> scale - 变量的精度
**返回值**
> True - 绑定变量成功
> None - 绑定变量不成功
### DbtPy.callproc
( IFXStatement [, ...] ) DbtPy.callproc( IFXConnection connection, string procname [, parameters] )
**描述**
> 调用存储过程。存储过程调用的每个参数(IN/INPUT/OUT)为parameters一个元组。返回的IFXStatement,包含结果集和输入参数的修改副本。IN参数保持不变,INOUT/OUT参数可能会被更新。存储过程可能会0个或者多个结果集。使用DbtPy.fetch_assoc(),DbtPy.fetch_both(),或者DbtPy.fetch_tuple()从IFXStatement获取一行tuple/dict。或者,使用DbtPy.fetch row()将结果集指针移动到下一行,并使用DbtPy.result()一次获取一列。
> 示例参考: test_146_CallSPINAndOUTParams.py,test_148_CallSPDiffBindPattern_01.py 或者 test_52949_TestSPIntVarcharXml.py。
**参数**
> connection - 有效的IFXConnection
> procname - 有效的存储过程名称
> parameters - 包含存储过程所需的任意多个参数的元组
**返回值**
> 成功,包含IFXStatement对象的元组,后跟传递给过程的参数(如果有的话)
> 不成功,值为none
### DbtPy.client_info
object DbtPy.client_info ( IFXConnection connection )
**描述**
> 返回关于客户的只读对象信息
**参数**
> connection - 有效的IFXConnection
**返回值**
> 成功,对象包括如下信息:
> > APPL_CODEPAGE - 应用程序代码页
> > CONN_CODEPAGE - 当前连接的代码页
> > DATA_SOURCE_NAME - 用于创建到数据库的当前连接的数据源名称(DSN)
> > DRIVER_NAME - 实现调用级别接口(CLI)规范的库的名称
> > DRIVER_ODBC_VER - ODBC驱动程序的版本。这将返回一个字符串“MM.mm”,其中MM是主要版本,mm是次要版本。
> > DRIVER_VER - 客户端的版本,以字符串“MM.mm.uuuu”的形式。MM是主版本,mm是次版本,uuuu是更新版本。例如,“08.02.0001”表示主版本8,次版本2,更新1。
> > ODBC_SQL_CONFORMANCE - 客户机支持三种级别的ODBC SQL语法
> > > MINIMAL -支持最小ODBC SQL语法
> > > CORE - 支持核心ODBC SQL语法
> > > EXTENDED - 支持扩展ODBC SQL语法
> > ODBC_VER - ODBC驱动程序管理器支持的ODBC版本。以字符串“MM.mm.rrrr”的形式。MM是主版本,mm是次版本,rrrr是更新版本。客户端总是返回"03.01.0000"
> 不成功,False
### DbtPy.close
bool DbtPy.close ( IFXConnection connection )
**描述**
> 关闭指定的IFXConnection
**参数**
> connection - 有效的IFXConnection
**返回值**
> True为成功,False为失败
### DbtPy.column_privileges
IFXStatement DbtPy.column_privileges ( IFXConnection connection [, string qualifier [, string schema [, string table-name [, string column-name]]]] )
**描述**
> 返回一个结果集,包含列出表的列和相关权限。
**参数**
> connection - 有效的IFXConnection
> schema - 包含表的模式。如果要匹配所有模式,请传递None或空字符串。
> table-name - 表或视图的名称。如果要匹配数据库中的所有表,请传递None或空字符串。
> column-name - 列的名称。如果要匹配表中的所有列,请传递None或空字符串。
**返回值**
> IFXStatement其结果集包含以下列的行
> > TABLE_CAT - catalog的名称。如果数据库没有catalog,则为Nono。
> > TABLE_SCHEM - schema的名称。
> > TABLE_NAME - 表或者视图的名称。
> > COLUMN_NAME - 字段名称。
> > GRANTOR - 授予权限者。
> > GRANTEE - 被授权者。
> > PRIVILEGE - 字段权限。
> > IS_GRANTABLE - 是否允许授权给他人。
### DbtPy.columns
IFXStatement DbtPy.columns ( IFXConnection connection [, string qualifier [, string schema [, string table-name [, string column-name]]]] )
**描述**
> 返回列出表的列和相关元数据的结果集。
**参数**
> connection - 有效的IFXConnection
> schema - 包含表的模式。如果要匹配所有模式,请传递'%'。
> table-name - 表或视图的名称。如果要匹配数据库中的所有表,请传递None或空字符串。
> column-name - 列的名称。如果要匹配表中的所有列,请传递None或空字符串。
**返回值**
> IFXStatement其结果集包含以下列的行
> > TABLE_CAT - catalog的名称。如果数据库没有catalog,则为Nono。
> > TABLE_SCHEM - schema的名称。
> > TABLE_NAME - 表或者视图的名称。
> > COLUMN_NAME - 字段名称。
> > DATA_TYPE - 表示为整数值的列的SQL数据类型。
> > TYPE_NAME - 表示列的数据类型的字符串。
> > COLUMN_SIZE - 表示列大小的整数值。
> > BUFFER_LENGTH - 存储来自此列的数据所需的最大字节数。
> > DECIMAL_DIGITS - 列的刻度,如果不适用刻度,则为None。
> > NUM_PREC_RADIX - 整数值,可以是10(表示精确的数字数据类型),2(表示近似的数字数据类型),或者None(表示基数不适用的数据类型)。
> > NULLABLE - 整数值,表示列是否可为空。
> > REMARKS - 字段描述信息。
> > COLUMN_DEF - 字段默认值。
> > SQL_DATA_TYPE - 列的SQL数据类型。
> > SQL_DATETIME_SUB - 表示datetime子类型代码的整数值,对于不适用此值的SQL数据类型,则为None。
> > CHAR_OCTET_LENGTH - 字符数据类型列的最大字节长度,对于单字节字符集数据,该长度与列大小匹配,对于非字符数据类型,该长度为None。
> > ORDINAL_POSITION - 列在表中的索引位置(以1开始)。
> > IS_NULLABLE - 字符串值中的“YES”表示该列可为空,“NO”表示该列不可为空。
### DbtPy.commit
bool DbtPy.commit ( IFXConnection connection )
**描述**
> 在指定的IFXConnection上提交一个正在进行的事务,并开始一个新的事务。
> Python应用程序通常默认为自动提交模式,所以没有必要使用DbtPy.commit(),除非在IFXConnection中关闭了自动提交。
> 注意: 如果指定的IFXConnection是一个持久连接,则所有使用该持久连接的应用程序正在进行的所有事务都将被提交。因此,不建议在需要事务的应用程序中使用持久连接。
**参数**
> connection - 有效的IFXConnection
**返回值**
> True为成功,False为失败
### DbtPy.conn_error
string DbtPy.conn_error ( [IFXConnection connection] )
**描述**
> 如果没有传递任何参数,则返回表示上一次数据库连接失败原因的SQLSTATE。
> 当传递一个由DbtPy.connect()返回的有效IFXConnection时,返回SQLSTATE,表示上次使用IFXConnection的操作失败的原因。
**参数**
> connection - 有效的IFXConnection
**返回值**
> 返回包含SQLSTATE值的字符串,如果没有错误,则返回空字符串。
### DbtPy.conn_errormsg
string DbtPy.conn_errormsg ( [IFXConnection connection] )
**描述**
> 如果没有传递任何参数,则返回一个字符串,其中包含SQLCODE和表示上次数据库连接尝试失败的错误消息。
> 当传递一个由DbtPy.connect()返回的有效的IFXConnection时,返回一个字符串,其中包含SQLCODE和错误消息,表示上次使用IFXConnection的操作失败的原因。
**参数**
> connection - 有效的IFXConnection
**返回值**
> 返回包含SQLCODE和错误消息的字符串,如果没有错误,则返回空字符串。
### DbtPy.connect
IFXConnection DbtPy.connect(string ConnectionString, string user, string password [, dict options [, constant replace_quoted_literal])
**描述**
> 创建一个新到GBase 8s数据库的连接
**参数**
> ConnectionString以下格式的连接字符串,"PROTOCOL=onsoctcp;HOST=192.168.0.100;SERVICE=9088;SERVER=gbase01;DATABASE=testdb;DB_LOCALE=zh_CN.utf8;CLIENT_LOCALE=zh_CN.utf8",参考GBase 8s数据库连接参数,其中常用的参数如下:
> > PROTOCOL - 协议类型,常用有onsoctcp, olsoctcp等。
> > HOST - 数据库服务器的主机名或者IP地址。
> > SERVICE - 数据库服务器的侦听端口。
> > SERVER - 数据库服务名称/实例名称。
> > DATABASE - 数据库名称。
> > DB_LOCALE - 数据库服务使用的字符集。
> > CLIENT_LOCALE - 数据库客户端使用的字符集。
> user - 连接到数据库的用户名称。
> password - 用户的密码。
**返回值**
> 成功,返回IFXConnection对象
> 不成功,None
### DbtPy.cursor_type
int DbtPy.cursor_type ( IFXStatement stmt )
**描述**
> 返回IFXStatement使用的游标类型。使用此参数可确定您使用的是只向前游标还是可滚动游标。
**参数**
> stmt - 有效的IFXStatement.
**返回值**
> 以下值之一:
> > SQL_CURSOR_FORWARD_ONLY
> > SQL_CURSOR_KEYSET_DRIVEN
> > SQL_CURSOR_DYNAMIC
> > SQL_CURSOR_STATIC
### DbtPy.dropdb
bool DbtPy.dropdb ( IFXConnection connection, string dbName )
**描述**
> 删除指定的数据库
**参数**
> connection - 有效的IFXConnection
> dbName - 将要删除的数据库名称
**返回值**
> 删除成功返回True,否则返回None。
### DbtPy.exec_immediate
stmt_handle DbtPy.exec_immediate( IFXConnection connection, string statement [, dict options] )
**描述**
> 准备并执行一条SQL语句。
> 如果您计划使用不同的参数重复地执行相同的SQL语句,请考虑调用DbtPy.prepare()和DbtPy.execute(),以使数据库服务器能够复用其访问计划,并提高数据库访问的效率。
> 如果您计划将Python变量插入到SQL语句中,请理解这是一种更常见的安全性暴露。考虑调用DbtPy.prepare()来为输入值准备带有参数标记的SQL语句。然后可以调用DbtPy.execute()传入输入值并避免SQL注入攻击。
**参数**
> connection - 有效的IFXConnection
> statement - 一个SQL语句。语句不能包含任何参数标记。
> options -包含语句选项的dict。
> > SQL_ATTR_CURSOR_TYPE - 将游标类型设置为以下类型之一(并非所有数据库都支持)
> > > SQL_CURSOR_FORWARD_ONLY
> > > SQL_CURSOR_KEYSET_DRIVEN
> > > SQL_CURSOR_DYNAMIC
> > > SQL_CURSOR_STATIC
**返回值**
> 如果成功发出SQL语句,则返回一个stmt句柄资源;如果数据库执行SQL语句失败,则返回False。
### DbtPy.execute
bool DbtPy.execute ( IFXStatement stmt [, tuple parameters] )
**描述**
> DbtPy.execute()执行由DbtPy.prepare()准备的SQL语句。如果SQL语句返回一个结果集,例如,返回一个或多个结果集的SELECT语句,则可以使用DbtPy.fetch_assoc(),DbtPy.fetch_both() 或 DbtPy.fetch_tuple()从stmt资源中检索作为元组或字典的行。
> 或者,您可以使用DbtPy.fetch row()将结果集指针移动到下一行,并使用DbtPy.result()从该行每次获取一列。有关使用DbtPy.prepare()和DbtPy.execute()而不是使用DbtPy.exec_immediate()的优点的简短讨论,请参阅DbtPy.prepare()。要执行存储过程,参考DbtPy.callproc()。
**参数**
> stmt - 从DbtPy.prepare()返回的预编译语句。
> parameters - 匹配预置语句中包含的任何参数标记的输入参数元组。
**返回值**
> 成功返回Ture,失败返回False
### DbtPy.execute_many
mixed DbtPy.execute_many( IFXStatement stmt, tuple seq_of_parameters )
**描述**
> 对在参数序列找到的所有参数序列或映射执行由DbtPy.prepare()准备的SQL语句。
**参数**
> stmt - 从DbtPy.prepare()返回的预编译语句。
> seq_of_parameters - 一个元组的元组,每个元组都包含与预备语句中包含的参数标记相匹配的输入参数。
**返回值**
> 成功,返回(insert/update/delete)操作的行数
> 不成功,返回None。使用DbtPy.num_rows()查询(inserted/updated/deleted)操作的行数。
### DbtPy.fetch_tuple
tuple DbtPy.fetch_tuple ( IFXStatement stmt [, int row_number] )
**描述**
> 返回按列位置索引的元组,表示结果集中的行。
**参数**
> stmt - 包含结果集的有效stmt资源。
> row_number - 从结果集中请求特定的索引为1开始的行。如果结果集中使用只向前游标,传递此参数将导致警告。
**返回值**
> 返回一个元组,其中包含所有结果集的列值为选定的行,如果没有指定行号则为下一行。
> 如果没有行结果集,或者请求的行结果集的行号不存在,返回False。
### DbtPy.fetch_assoc
dict DbtPy.fetch_assoc ( IFXStatement stmt [, int row_number] )
**描述**
> 返回以列名为索引的dict,表示结果集中的行。
**参数**
> stmt - 包含结果集的有效stmt资源。
> row_number - 从结果集中请求特定的索引为1开始的行。如果结果集中使用只向前游标,传递此参数将导致警告。
**返回值**
> 返回一个元组,其中包含所有结果集的列值为选定的行,如果没有指定行号则为下一行。
> 如果没有行结果集,或者请求的行结果集的行号不存在,返回False。
### DbtPy.fetch_both
dict DbtPy.fetch_both ( IFXStatement stmt [, int row_number] )
**描述**
> 返回按列名称和位置索引的字典,表示结果集中的行。
**参数**
> stmt - 包含结果集的有效stmt资源。
> row_number - 从结果集中请求特定的索引为1开始的行。如果结果集中使用只向前游标,传递此参数将导致警告。
**返回值**
> 返回一个dict,其中包含所有按列名索引的列值,如果未指定行号,则按0索引的列号索引选定行或下一行。
> 如果结果集中没有剩下的行,或者行号请求的行在结果集中不存在,则返回False。
### DbtPy.fetch_row
bool DbtPy.fetch_row ( IFXStatement stmt [, int row_number] )
**描述**
> 将结果集指针设置为下一行或请求的行。
> 使用DbtPy.fetch row()用于遍历结果集,或者在请求可滚动游标时指向结果集中的特定行。
> 要从结果集中检索单个字段,请调用DbtPy.result()函数。而不是调用DbtPy.fetch_row()和DbtPy.result(),大多数应用程序将调用DbtPy.fetch_assoc(),DbtPy.fetch_both() 或 DbtPy.fetch_tuple() 中的一个来推进结果集指针并返回完整的行。
**参数**
> stmt - 包含结果集的有效stmt资源。
> row_number - 从结果集中请求特定的索引为1开始的行。如果结果集中使用只向前游标,传递此参数将导致警告。
**返回值**
> 如果请求的行存在于结果集中,则返回True。
> 如果请求的行不存在于结果集中,则返回False。
### DbtPy.field_display_size
int DbtPy.field_display_size ( IFXStatement stmt, mixed column )
**描述**
> 返回显示结果集中列所需的最大字节数。
**参数**
> stmt - 包含结果集的有效stmt资源。
> column - 指定结果集中的列。可以是表示列的0索引位置的整数,也可以是包含列名称的字符串。
**返回值**
> 返回显示指定列所需的最大字节数的整数值;
> 如果列不存在,则返回False。
### DbtPy.field_name
string DbtPy.field_name ( IFXStatement stmt, mixed column )
**描述**
> 返回结果集中指定列的名称。
**参数**
> stmt - 包含结果集的有效stmt资源。
> column - 指定结果集中的列。可以是表示列的0索引位置的整数,也可以是包含列名称的字符串。
**返回值**
> 返回一个包含指定列名称的字符串;
> 如果列不存在则返回False。
### DbtPy.field_num
int DbtPy.field_num ( IFXStatement stmt, mixed column )
**描述**
> 返回指定列在结果集中的位置。
**参数**
> stmt - 包含结果集的有效stmt资源。
> column - 指定结果集中的列。可以是表示列的0索引位置的整数,也可以是包含列名称的字符串。
**返回值**
> 返回一个整数,其中包含指定列的0索引位置;
> 如果列不存在,则返回False。
### DbtPy.field_precision
int DbtPy.field_precision ( IFXStatement stmt, mixed column )
**描述**
> 返回结果集中指定列的精度。
**参数**
> stmt - 包含结果集的有效stmt资源。
> column - 指定结果集中的列。可以是表示列的0索引位置的整数,也可以是包含列名称的字符串。
**返回值**
> 返回一个包含指定列精度的整数;
> 如果列不存在,则返回False。
### DbtPy.field_scale
int DbtPy.field_scale ( IFXStatement stmt, mixed column )
**描述**
> 返回结果集中指定列的比例。
**参数**
> stmt - 包含结果集的有效stmt资源。
> column - 指定结果集中的列。可以是表示列的0索引位置的整数,也可以是包含列名称的字符串。
**返回值**
> 返回一个包含指定列的比例的整数;
> 如果列不存在则返回False。
### DbtPy.field_type
string DbtPy.field_type ( IFXStatement stmt, mixed column )
**描述**
> 返回结果集中指定列的数据类型。
**参数**
> stmt - 包含结果集的有效stmt资源。
> column - 指定结果集中的列。可以是表示列的0索引位置的整数,也可以是包含列名称的字符串。
**返回值**
> 返回一个字符串,其中包含指定列的定义数据类型;
> 如果列不存在,则返回False。
### DbtPy.field_width
int DbtPy.field_width ( IFXStatement stmt, mixed column )
**描述**
> 返回结果集中指定列的当前值的宽度。对于定长数据类型,这是列的最大宽度;对于变长数据类型,这是列的实际宽度。
**参数**
> stmt - 包含结果集的有效stmt资源。
> column - 指定结果集中的列。可以是表示列的0索引位置的整数,也可以是包含列名称的字符串。
**返回值**
> 返回一个包含指定字符或二进制列宽度的整数;
> 如果列不存在,则为False。
### DbtPy.foreign_keys
IFXStatement DbtPy.foreign_keys ( IFXConnection connection, string qualifier, string schema, string table-name )
**描述**
> 返回列出表的外键的结果集。
**参数**
> connection - 有效的IFXConnection
> schema - 包含表的模式。如果schema为None,则使用连接的当前模式。
> table-name - 表名
**返回值**
> 返回一个IFXStatement,其结果集包含以下列:
> > PKTABLE_CAT - 包含主键的表的catalog名称。如果该表没有catalog,则该值为None。
> > PKTABLE_SCHEM - 包含主键的表的模式名。
> > PKTABLE_NAME - 包含主键的表的名称。
> > PKCOLUMN_NAME - 包含主键的列的名称。
> > KEY_SEQ - 列在键中的1开始的索引位置。
> > UPDATE_RULE - 整数值,表示更新SQL操作时应用于外键的操作。
> > DELETE_RULE - 整数值,表示删除SQL操作时应用于外键的操作。
> > FK_NAME - 外键名称。
> > PK_NAME - 主键名称。
> > DEFERRABILITY - 一个整数值,表示外键可延期性是SQL_INITIALLY_DEFERRED, SQL_INITIALLY_IMMEDIATE 还是 SQL_NOT_DEFERRABLE。
### DbtPy.free_result
bool DbtPy.free_result ( IFXStatement stmt )
**描述**
> 释放与结果集关联的系统和IFXConnections资源。这些资源在脚本结束时被隐式释放,但是您可以在脚本结束前调用DbtPy.free_result()来显式释放结果集资源。
**参数**
> stmt - 包含结果集的有效stmt资源。
**返回值**
> 成功返回True,失败返回False
### DbtPy.free_stmt
bool DbtPy.free_stmt ( IFXStatement stmt ) (DEPRECATED)
**描述**
> 释放与结果集关联的系统和IFXStatement资源。这些资源在脚本结束时被隐式释放,但是您可以在脚本结束前调用DbtPy.free_stmt()来显式释放结果集资源。
> 该API已弃用。应用程序应该使用DbtPy.free_result代替。
**参数**
> stmt - 包含结果集的有效stmt资源。
**返回值**
> 成功返回True,失败返回False
### DbtPy.get_option
mixed DbtPy.get_option ( mixed resc, int options, int type )
**描述**
> 返回连接或语句属性的当前设置的值。
**参数**
> resc - 有效的IFXConnection 或者 IFXStatement
> options - 要检索的选项
> type - 资源类型
> > 0 - IFXStatement
> > 1 - IFXConnection
**返回值**
> 返回所提供的资源属性的当前设置。
### DbtPy.next_result
IFXStatement DbtPy.next_result ( IFXStatement stmt )
**描述**
> 请求存储过程中的下一个结果集。存储过程可以返回零个或多个结果集。
> 虽然您处理第一个结果集的方式与处理简单SELECT语句返回的结果完全相同,但要从存储过程获取第二个和随后的结果集,必须调DbtPy.next_result()函数,并将结果返回给唯一命名的Python变量。
**参数**
> stmt - 从DbtPy.exec_immediate() 或者 DbtPy.execute()返回的预处理语句。
**返回值**
> 如果存储过程返回另一个结果集,则返回包含下一个结果集的新的IFXStatement。
> 如果存储过程没有返回另一个结果集,则返回False。
### DbtPy.num_fields
int DbtPy.num_fields ( IFXStatement stmt )
**描述**
> 返回结果集中包含的字段的数量。这对于处理动态生成的查询返回的结果集或存储过程返回的结果集最有用,否则应用程序无法知道如何检索和使用结果。
**参数**
> stmt - 包含结果集的有效stmt资源。
**返回值**
> 返回一个整数值,表示与指定的IFXStatement相关联的结果集中字段的数量。
> 如果stmt不是一个有效的IFXStatement对象,则返回False。
### DbtPy.num_rows
int DbtPy.num_rows ( IFXStatement stmt )
**描述**
> 返回SQL语句delete,insert或者update的行数。
> 要确定SELECT语句将返回的行数,请使用与预期的SELECT语句相同的谓词发出SELECT COUNT(*)并检索值。
**参数**
> stmt - 包含结果集的有效stmt资源。
**返回值**
> 返回受指定语句句柄发出的最后一条SQL语句影响的行数。
### DbtPy.prepare
IFXStatement DbtPy.prepare ( IFXConnection connection, string statement [, dict options] )
**描述**
> 创建一个预编译的SQL语句,该语句可以包括0个或多个参数标记(?字符)表示输入、输出或输入/输出的参数。您可以使用DbtPy.bind_param()将参数传递给预编译的语句。或仅用于输入值,作为传递给DbtPy.execute()的元组。
> 在应用程序中使用准备好的语句有两个主要优点
> > 性能:预编译一条语句时,数据库服务器会创建一个优化的访问计划,以便使用该语句检索数据。随后使用DbtPy.execute()发出预编译的语句,使语句能够重用该访问计划,并避免为发出的每个语句动态创建新的访问计划的开销。
> > 安全:在预编译语句中,可以为输入值包括参数标记。当使用占位符的输入值执行准备好的语句时,数据库服务器会检查每个输入值,以确保类型与列定义或参数定义匹配。
**参数**
> connection - 有效的IFXConnection
> statement - SQL语句,可选地包含一个或多个参数标记。
> options - 包含语句选项的dict。
> > SQL_ATTR_CURSOR_TYPE - 将游标类型设置为以下类型之一(并非所有数据库都支持)
> > > SQL_CURSOR_FORWARD_ONLY
> > > SQL_CURSOR_KEYSET_DRIVEN
> > > SQL_CURSOR_DYNAMIC
> > > SQL_CURSOR_STATIC
**返回值**
> 如果数据库服务器成功地解析和准备了SQL语句,则返回一个IFXStatement对象;
> 如果数据库服务器返回错误,则返回False。
### DbtPy.primary_keys
IFXStatement DbtPy.primary_keys ( IFXConnection connection, string qualifier, string schema, string table-name )
**描述**
> 返回列出表的主键的结果集。
**参数**
> connection - 有效的IFXConnection
> schema - 包含表的schema。如果schema为None,则使用连接的当前模式。
> table-name - 表名
**返回值**
> 返回一个IFXStatement,其结果集包含以下列:
> > TABLE_CAT - 包含主键的表的catalog名称。如果该表没有catalog,则该值为None。
> > TABLE_SCHEM - 包含主键的schema的名称。
> > TABLE_NAME - 包含主键的表的名称。
> > COLUMN_NAME - 包含主键的列的名称。
> > KEY_SEQ - 列在键中的从1开始索引的位置。
> > PK_NAME - 主键的名称
### DbtPy.procedure_columns
IFXStatement DbtPy.procedure_columns ( IFXConnection connection, string qualifier, string schema, string procedure, string parameter )
**描述**
> 返回一个结果集,列出一个或多个存储过程的参数
**参数**
> connection - 有效的IFXConnection
> schema - 包含过程的模式。该参数接受包含 _ 和 % 作为通配符的搜索模式。
> procedure - 存储过程的名称。该参数接受包含 _ 和 % 作为通配符的搜索模式。
> parameter - 参数名称。该参数接受包含 _ 和 % 作为通配符的搜索模式。如果该参数为None,返回所有的参数。
**返回值**
> 返回一个IFXStatement,其结果集包含以下列:
> > PROCEDURE_CAT - 包含存储过程的catalog名称。如果该存储过程没有catalog,则该值为None。
> > PROCEDURE_SCHEM - 包含存储过程的schema名称
> > PROCEDURE_NAME - 存储过程的名称。
> > COLUMN_NAME - 参数的名称。
> > COLUMN_TYPE - 表示参数类型的整数值:
> > > 1 ( SQL_PARAM_INPUT ) - 输入参数 (IN).
> > > 2 ( SQL_PARAM_INPUT _OUTPUT) - 输入输出参数 (INOUT).
> > > 3 ( SQL_PARAM_OUTPUT ) - 输出参数 (OUT).
> > DATA_TYPE - 表示为整数值的参数的SQL数据类型。
> > TYPE_NAME - 表示参数的数据类型的字符串。
> > COLUMN_SIZE - 表示参数大小的整数值。
> > BUFFER_LENGTH - 存储此参数的数据所需的最大字节数。
> > DECIMAL_DIGITS - 参数的刻度,如果刻度不适用,则为None。
> > NUM_PREC_RADIX - 一个整数值,可以是10(表示精确的数字数据类型),2(表示近似的数字数据类型),或者None(表示基数不适用的数据类型)。
> > NULLABLE - 一个整数值,表示参数是否可为空。
> > REMARKS - 参数的描述。
> > COLUMN_DEF - 参数的默认值。
> > SQL_DATA_TYPE - 表示参数大小的整数值。
> > SQL_DATETIME_SUB - 返回表示datetime子类型代码的整数值,对于不适用此方法的SQL数据类型,则返回None。
> > CHAR_OCTET_LENGTH - 字符数据类型参数的最大字节长度,对于单字节字符集数据,该参数匹配COLUMN_SIZE,对于非字符数据类型,该参数为None。
> > ORDINAL_POSITION - 参数在CALL语句中的以1开始为索引的位置。
> > IS_NULLABLE - 一个字符串值,其中'YES'表示参数接受或返回无值,'NO'表示参数不接受或返回无值。
### DbtPy.procedures
resource DbtPy.procedures ( IFXConnection connection, string qualifier, string schema, string procedure )
**描述**
> 返回一个结果集,列出在数据库中注册的存储过程。
**参数**
> connection - 有效的IFXConnection
> schema - 包含过程的模式。该参数接受包含 _ 和 % 作为通配符的搜索模式。
> procedure - 存储过程的名称。该参数接受包含 _ 和 % 作为通配符的搜索模式。
**返回值**
> 返回一个IFXStatement,其结果集包含以下列:
> > PROCEDURE_CAT - 包含存储过程的catalog名称。如果该存储过程没有catalog,则该值为None。
> > PROCEDURE_SCHEM - 包含存储过程的schema名称
> > PROCEDURE_NAME - 存储过程的名称。
> > NUM_INPUT_PARAMS - 存储过程的输入参数 (IN) 的数目。
> > NUM_OUTPUT_PARAMS - 存储过程的输出参数 (OUT) 的数目。
> > NUM_RESULT_SETS - 存储过程返回的结果集的数目。
> > REMARKS - 存储过程的描述。
> > PROCEDURE_TYPE - 总是返回1,表示存储过程不返回返回值。
### DbtPy.result
mixed DbtPy.result ( IFXStatement stmt, mixed column )
**描述**
> 使用DbtPy.result()返回结果集中当前**row的指定列的值。你必须调用DbtPy。在调用DbtPy.result()之前调用DbtPy.fetch_row()来设置结果集指针的位置。
**参数**
> stmt - 包含结果集的有效stmt资源。
> column - 映射到结果集中以0开始的索引的字段的整数,或者匹配列名称的字符串。
**返回值**
> 如果结果集中存在请求的字段,则返回该字段的值。
> 如果该字段不存在,则返回None,并发出警告。
### DbtPy.rollback
bool DbtPy.rollback ( IFXConnection connection )
**描述**
> 回滚指定的IFXConnection上正在进行的事务,并开始一个新的事务。
> Python应用程序通常默认为自动提交模式,因此DbtPy.rollback()通常没有效果,除非在IFXConnection中关闭了自动提交。
> 注意:如果指定的IFXConnection是一个持久连接,那么使用该持久连接的所有应用程序的所有正在进行的事务都将回滚。因此,不建议在需要事务的应用程序中使用持久连接。
**参数**
> connection - 有效的IFXConnection
**返回值**
> 成功返回True,不成功返回False。
### DbtPy.server_info
IFXServerInfo DbtPy.server_info ( IFXConnection connection )
**描述**
> 返回一个只读对象,其中包含有关GBase 8s服务器的信息。
**参数**
> connection - 有效的IFXConnection
**返回值**
> 成功时,一个包含以下字段的对象:
> > DBMS_NAME - 连接到的数据库服务器的名称。
> > DBMS_VER - 数据库的版本号,格式为"MM.mm.uuuu",其中 MM 是主版本号,mm 是次版本号,uuuu 是更新版本号。例:"08.02.0001"
> > DB_CODEPAGE - 连接到的数据库的代码页。(int)
> > DB_NAME - 连接到的数据库的名称。(string)
> > DFT_ISOLATION - 服务器支持的默认事务隔离级别: (string)
> > > UR - Uncommitted read: 所有并发事务都可以立即看到更改。
> > > CS - Cursor stability: 一个事务读取的行可以被第二个并发事务修改和提交。
> > > RS - Read stability: 事务可以添加或删除匹配搜索条件或待处理事务的行。
> > > RR - Repeatable read: 受待处理事务影响的数据对其他事务不可用。
> > > NC - No commit: 在成功的操作结束时,任何更改都是可见的。不允许显式提交和回滚。
> > IDENTIFIER_QUOTE_CHAR - 用于分隔标识符的字符。 (string)
> > INST_NAME - 包含数据库的数据库服务器上的实例名称。 (string)
> > ISOLATION_OPTION - 数据库服务器支持的隔离级别元组。隔离级别在DFT_ISOLATION属性中进行了描述。 (tuple)
> > KEYWORDS - 数据库服务器保留的关键字的元组。(tuple)
> > LIKE_ESCAPE_CLAUSE - 如果数据库服务器支持使用%和_通配符,则为True。如果数据库服务器不支持这些通配符,则为False。(bool)
> > MAX_COL_NAME_LEN - 数据库服务器支持的列名的最大长度,单位为字节。(int)
> > MAX_IDENTIFIER_LEN - 数据库服务器支持的SQL标识符的最大长度,以字符表示。(int)
> > MAX_INDEX_SIZE - 数据库服务器支持的索引中合并列的最大大小(以字节表示)。(int)
> > MAX_PROC_NAME_LEN - 数据库服务器支持的过程名的最大长度,以字节表示。(int)
> > MAX_ROW_SIZE - 数据库服务器支持的基表中一行的最大长度,以字节表示。(int)
> > MAX_SCHEMA_NAME_LEN - 数据库服务器支持的模式名的最大长度,以字节表示。(int)
> > MAX_STATEMENT_LEN - 数据库服务器支持的SQL语句的最大长度,以字节表示。(int)
> > MAX_TABLE_NAME_LEN - 数据库服务器支持的表名的最大长度,以字节表示。(int)
> > NON_NULLABLE_COLUMNS - 如果数据库服务器支持定义为NOT NULL的列,则为True;如果数据库服务器不支持定义为NOT NULL的列,则为False。(bool)
> > PROCEDURES - 如果数据库服务器支持使用CALL语句调用存储过程,则为True;如果数据库服务器不支持CALL语句,则为False。(bool)
> > SPECIAL_CHARS - 包含除A- z、0-9和下划线之外的所有可用于标识符名称的字符串。(string)
> > SQL_CONFORMANCE - 数据库服务器提供的符合ANSI或ISO SQL-92规范的级别:(string)
> > > ENTRY - 入门级SQL-92兼容性。
> > > FIPS127 - FIPS-127-2过渡兼容性。
> > > FULL - 完全SQL-92兼容。
> > > INTERMEDIATE - 中性SQL-92兼容
> 失败时,返回False
### DbtPy.set_option
bool DbtPy.set_option ( mixed resc, dict options, int type )
**描述**
> 为IFXConnection 或者 IFXStatement设置选项。不能为结果集资源设置选项。
**参数**
> resc - 有效的IFXConnection 或者 IFXStatement.
> options - 要设置的选项
> type - 指定resc类型的字段
> > 0 - IFXStatement
> > 1 - IFXConnection
**返回值**
> 成功返回True,不成功返回False。
### DbtPy.special_columns
IFXStatement DbtPy.special_columns ( IFXConnection connection, string qualifier, string schema, string table_name, int scope )
**描述**
> 返回一个结果集,列出表的唯一行标识符列。
**参数**
> connection - 有效的IFXConnection
> schema - 表所有的schema
> table_name - 表名
> scope - 表示唯一行标识符有效的最小持续时间的整数值。这可以是以下值之一:
> > 0 - 行标识符仅在游标位于行上时有效。(SQL_SCOPE_CURROW)
> > 1 - 行标识符在事务的持续时间内有效。(SQL_SCOPE_TRANSACTION)
> > 2 - 行标识符在连接期间有效。(SQL_SCOPE_SESSION)
**返回值**
> 返回一个IFXStatement,其结果集包含以下列:
> > SCOPE - 表示唯一行标识符有效的最小持续时间的整数值
> > > 0 - 行标识符仅在游标位于行上时有效。(SQL_SCOPE_CURROW)
> > > 1 - 行标识符在事务的持续时间内有效。(SQL_SCOPE_TRANSACTION)
> > > 2 - 行标识符在连接期间有效。(SQL_SCOPE_SESSION)
> > COLUMN_NAME - 唯一列的名称。
> > DATA_TYPE - 列的SQL数据类型。
> > TYPE_NAME - 列的SQL数据类型的字符串表示形式。
> > COLUMN_SIZE - 表示列大小的整数值。
> > BUFFER_LENGTH - 存储这个列存储数据所需的最大字节数。
> > DECIMAL_DIGITS - 列的刻度,如果不适用刻度,则为None。
> > NUM_PREC_RADIX - 一个整数值,10(表示精确的数字数据类型),2(表示近似的数字数据类型),或None(表示基数不适用的数据类型)。
> > PSEUDO_COLUMN - 总是返回 1。
### DbtPy.statistics
IFXStatement DbtPy.statistics ( IFXConnection connection, string qualifier, string schema, string table_name, bool unique )
**描述**
> 返回一个结果集,列出表的索引和统计信息。
**参数**
> connection - 有效的IFXConnection
> schema - 包含表的schema。如果该参数为None,则返回当前用户模式的统计信息和索引。
> table_name - 表名。
> unique - 一个布尔值,表示要返回的索引信息的类型。
> > False - 只返回表上惟一索引的信息。
> > True - 返回表中所有索引的信息。
**返回值**
> 返回一个IFXStatement,其结果集包含以下列:
> > TABLE_CAT - 包含表格的catalog。如果该表没有catalog,则该值为None。
> > TABLE_SCHEM - 包含表的模式的名称。
> > TABLE_NAME - 表名。
> > NON_UNIQUE - 一个整数值,表示索引是否禁止唯一值,或者行是否表示表本身的统计信息:
> > > 0 (SQL_FALSE) - 索引允许重复的值。
> > > 1 (SQL_TRUE) - 索引值必须唯一。
> > > None - 这一行是表本身的统计信息。
> > INDEX_QUALIFIER - 表示限定符的字符串值,该限定符必须预先添加到INDEX_NAME以完全限定索引。
> > INDEX_NAME - 表示索引名称的字符串。
> > TYPE - 一个整数值,表示结果集中这一行中包含的信息的类型:
> > > 0 (SQL_TABLE_STAT) - 该行包含有关表本身的统计信息。
> > > 1 (SQL_INDEX_CLUSTERED) - 该行包含关于聚集索引的信息。
> > > 2 (SQL_INDEX_HASH) - 该行包含有关散列索引的信息。
> > > 3 (SQL_INDEX_OTHER) - 该行包含有关既没有聚集也没有散列的索引类型的信息。
> > ORDINAL_POSITION - 索引中列的以1为开始的索引位置。如果行包含有关表本身的统计信息,则为None。
> > COLUMN_NAME - 索引中列的名称。如果行包含有关表本身的统计信息,则为None。
> > ASC_OR_DESC - A表示列按升序排序,D表示列按降序排序,如果行包含关于表本身的统计信息,则为None。
> > CARDINALITY - 如果行包含有关索引的信息,则此列包含一个整数值,表示索引中惟一值的数目。如果行包含关于表本身的信息,则此列包含一个整数值,表示表中的行数。
> > PAGES - 如果行包含有关索引的信息,则此列包含一个整数值,表示用于存储索引的页数。如果行包含关于表本身的信息,则此列包含一个整数值,表示用于存储表的页数。
> > FILTER_CONDITION - 总是返回None。
### DbtPy.stmt_error
string DbtPy.stmt_error ( [IFXStatement stmt] )
**描述**
> 当没有传递任何参数时,返回表示上次通过IFXStatement执行DbtPy.prepare(), DbtPy.exec_immediate() 或者 DbtPy.callproc() 返回的SQLSTATE
> 当传递一个有效的IFXStatement时,返回SQLSTATE,表示上次使用资源的操作失败的原因。
**参数**
> stmt - 有效的IFXStatement.
**返回值**
> 返回包含SQLSTATE值的字符串,如果没有错误,则返回空字符串。
### DbtPy.stmt_errormsg
string DbtPy.stmt_errormsg ( [IFXStatement stmt] )
**描述**
> 当没有传递任何参数时,返回表示上次通过IFXStatement执行DbtPy.prepare(), DbtPy.exec_immediate() 或者 DbtPy.callproc() 返回的SQLCODE及错误信息
> 当传递一个有效的IFXStatement时,返回SQLCODE及错误信息,表示上次使用资源的操作失败的原因。
**参数**
> stmt - 有效的IFXStatement.
**返回值**
> 返回包含SQLCODE值的字符串,如果没有错误,则返回空字符串。
### DbtPy.table_privileges
IFXStatement DbtPy.table_privileges ( IFXConnection connection [, string qualifier [, string schema [, string table_name]]] )
**描述**
> 返回一个结果集,列出数据库中的表和相关权限。
**参数**
> connection - 有效的IFXConnection
> schema - 包含表的模式。该参数接受包含_和%作为通配符的搜索模式。
> table_name - 表名。该参数接受包含_和%作为通配符的搜索模式。
**返回值**
> 返回一个IFXStatement,其结果集包含以下列:
> > TABLE_CAT - 包含表的catalog。如果该表没有catalog,则该值为None。
> > TABLE_SCHEM - 包含表的schema。
> > TABLE_NAME - 表名。
> > GRANTOR - 授予权限者。
> > GRANTEE - 被授权者。
> > PRIVILEGE - 被授予的权限。这可以是ALTER、CONTROL、DELETE、INDEX、INSERT、REFERENCES、SELECT或UPDATE之一。
> > IS_GRANTABLE - 字符串值“YES”或“NO”,表示被授权人是否可以将该权限授予其他用户。
### DbtPy.tables
IFXStatement DbtPy.tables ( IFXConnection connection [, string qualifier [, string schema [, string table-name [, string table-type]]]] )
**描述**
> 返回一个结果集,列出数据库中的表和相关元数据
**参数**
> connection - 有效的IFXConnection
> schema - 包含表的模式。该参数接受包含_和%作为通配符的搜索模式。
> table-name - 表名。该参数接受包含_和%作为通配符的搜索模式。
> table-type -以逗号分隔的表类型标识符列表。要匹配所有表类型,请传递None或空字符串。
> > ALIAS
> > HIERARCHY TABLE
> > INOPERATIVE VIEW
> > NICKNAME
> > MATERIALIZED QUERY TABLE
> > SYSTEM TABLE
> > TABLE
> > TYPED TABLE
> > TYPED VIEW
> > VIEW
**返回值**
返回一个IFXStatement,其结果集包含以下列:
> TABLE_CAT - 包含表的catalog。如果该表没有catalog,则该值为None。
> TABLE_SCHEMA - 包含表的模式的名称。
> TABLE_NAME - 表名。
> TABLE_TYPE - 表的表类型标识符。
> REMARKS - 表的描述。
| PypiClean |
/MPT5.0.1.2-0.1.2.tar.gz/MPT5.0.1.2-0.1.2/src/MPT5/Config/Init.py |
import sys
import os
import shutil
#### Main Application Path = MAP , Os name and Slash of OS
MAP = os.getcwd()
MyOsIs = os.name
if sys.platform == u'win32':
SLASH = u'\\'
else:
SLASH = u'/'
#### Some Utility function
def opj(path):
"""Convert paths to the platform-specific separator"""
st = os.path.join(*tuple(path.split('/')))
# HACK: on Linux, a leading / gets lost...
if path.startswith('/'):
st = '/' + st
#print st
return st
def _displayHook(obj):
"""
Custom display hook to prevent Python stealing '_'.
"""
if obj is not None:
print(repr(obj))
#### Path of Program source for use inside of program
DATABASE_PATH = os.path.join(MAP,opj(u'Database')+SLASH)
AI_PATH = os.path.join(MAP,opj(u'AI')+SLASH)
ML_PATH = os.path.join(AI_PATH,opj(u'MLA')+SLASH)
DL_PATH = os.path.join(AI_PATH,opj(u'DL')+SLASH)
DCC_PATH = os.path.join(MAP,opj(u'DCC1')+SLASH)
GUI_PATH = os.path.join(MAP,opj(u'GUI')+SLASH)
RES_PATH = os.path.join(MAP,opj(u'Res')+SLASH)
SRC_PATH = os.path.join(MAP,opj(u'Src')+SLASH)
ICONS_PATH = os.path.join(RES_PATH,opj(u'Icons')+SLASH)
ICON16_PATH = os.path.join(ICONS_PATH,opj(u'16x16')+SLASH)
ICON32_PATH = os.path.join(ICONS_PATH,opj(u'32x32')+SLASH)
ICONS_MENU = os.path.join(ICONS_PATH,opj(u'Menu')+SLASH)
ICONS_TOOL = os.path.join(ICONS_PATH,opj(u'Toolbar')+SLASH)
PICS_PATH = os.path.join(RES_PATH,opj(u'Pics')+SLASH)
IMAGE_PATH = os.path.join(RES_PATH,opj(u'Images')+SLASH)
SPALSH_PATH = os.path.join(RES_PATH,opj(u'Splash')+SLASH)
UTILITY_PATH = os.path.join(MAP,opj(u'Utility')+SLASH)
CONFIG_PATH = os.path.join(MAP,opj(u'Config')+SLASH)
LOGS_PATH = os.path.join(MAP,opj(u'Logs')+SLASH)
LOCALE_PATH = os.path.join(MAP,opj(u'Locale')+SLASH)
TEMPS_PATH = os.path.join(MAP,opj(u'Temps')+SLASH)
#### Source pass for user
Src_api = os.path.join(SRC_PATH,opj(u'API')+SLASH)
Src_aui = os.path.join(SRC_PATH,opj(u'AUI')+SLASH)
Src_dbf = os.path.join(SRC_PATH,opj(u'DBF')+SLASH)
Src_gui = os.path.join(SRC_PATH,opj(u'GUI')+SLASH)
Src_mla = os.path.join(SRC_PATH,opj(u'MLA')+SLASH)
Src_mlp = os.path.join(SRC_PATH,opj(u'MLP')+SLASH)
Src_prg = os.path.join(SRC_PATH,opj(u'PRG')+SLASH)
Src_Dir = {'PRG':6111,'API':6122,'MLA':6133,'MLP':6144,'AUI':6155,'DBF':6166,'GUI':6177}
Src_Pth = {Src_prg:'PRG',Src_api:'API',Src_mla:'MLA',Src_mlp:'MLP',Src_aui:'AUI',Src_dbf:'DBF',Src_gui:'GUI'}
#### List of language to use in application
LANGUAGE_LIST = {1:"English",2:"Farsi",3:"French",4:"German",5:"Spanish",6:"Turkish"}
Database_type = {1:"sqlite",2:"mysql",3:"postgresql",4:"oracle",5:"sqlserver"}
#### other function to use in application
def thistxt(filename):
with open(LOGS_PATH+filename,mode='r',encoding='utf-8') as f:
lines = f.readlines()
txt = ''
for t in range(len(lines)):
txt = txt + '\n' + lines[t]
#print txt
return txt
def fil2txt(filename):
with open(filename, mode='r', encoding='utf-8') as f:
lines = f.readlines()
txt = ''
for t in range(len(lines)):
txt = txt + lines[t]
return txt
def OpenListML():
with open(CONFIG_PATH+u'MLmethod.ini', mode='r', encoding='utf-8') as f:
lines = f.readlines()
MLlst = {}
MLAlg = {}
for t in range(len(lines)):
#print(lines[t])
if ':' in lines[t]:
#print(lines[t].split(':'))
L1 = lines[t].split(':')
l1 = L1[1].split(';')
#print(L1,l1)
MLlst[(int(l1[0]),int(l1[1].rstrip('\n')))] = L1[0].strip(' ')
if ',' in lines[t] and ' ' == lines[t][:4]:
#print(lines[t].split(','))
L1 = lines[t].split(',')
l0 = L1[0].strip(' ')
l1 = L1[1].split(';')
MLAlg[(int(l1[0]),int(l1[1].rstrip('\n')))] = l0
#print(MLlst,MLAlg)
return MLlst,MLAlg
def CopyIcon(iconfile,thssrc=''):
if thssrc == 'Menu':
shutil.copy(iconfile, ICONS_MENU)
elif thssrc == 'Toolbar':
shutil.copy(iconfile, ICONS_TOOL)
else:
shutil.copy(iconfile, ICONS_PATH) | PypiClean |
/ASGIWebDAV-1.3.2.tar.gz/ASGIWebDAV-1.3.2/asgi_webdav/property.py | from dataclasses import dataclass, field
from asgi_webdav.constants import DAVPath, DAVPropertyIdentity, DAVTime
from asgi_webdav.helpers import generate_etag
@dataclass
class DAVPropertyBasicData:
is_collection: bool
display_name: str
creation_date: DAVTime
last_modified: DAVTime
# resource_type: str = field(init=False)
content_type: str | None = field(default=None)
content_charset: str | None = None
content_length: int = field(default=0)
content_encoding: str | None = None
def __post_init__(self):
# https://developer.mozilla.org/zh-CN/docs/Web/HTTP/Basics_of_HTTP/MIME_types
if self.content_type is None:
if self.is_collection:
# self.content_type = "httpd/unix-directory"
self.content_type = "application/index"
else:
self.content_type = "application/octet-stream"
if self.content_length is None:
self.content_length = 0
@property
def etag(self) -> str:
return generate_etag(self.content_length, self.last_modified.timestamp)
def get_get_head_response_headers(self) -> dict[bytes, bytes]:
if self.content_type.startswith("text/") and self.content_charset:
content_type = "{}; charset={}".format(
self.content_type, self.content_charset
)
else:
content_type = self.content_type
headers = {
b"ETag": self.etag.encode("utf-8"),
b"Last-Modified": self.last_modified.http_date().encode("utf-8"),
b"Content-Type": content_type.encode("utf-8"),
}
if self.is_collection:
return headers
headers.update(
{
b"Content-Length": str(self.content_length).encode("utf-8"),
}
)
if self.content_encoding:
headers.update(
{
b"Content-Encodings": self.content_encoding.encode("utf-8"),
}
)
return headers
def as_dict(self) -> dict[str, str]:
data = {
"displayname": self.display_name,
"getetag": self.etag,
"creationdate": self.creation_date.dav_creation_date(),
"getlastmodified": self.last_modified.http_date(),
"getcontenttype": self.content_type,
}
if self.is_collection:
return data
data.update(
{
"getcontentlength": self.content_length,
}
)
if self.content_encoding:
data.update(
{
"encoding": self.content_encoding, # TODO ???
}
)
return data
@dataclass
class DAVProperty:
# href_path = passport.prefix + passport.src_path + child
# or = request.src_path + child
# child maybe is empty
href_path: DAVPath
is_collection: bool
# basic_data: dict[str, str]
basic_data: DAVPropertyBasicData
extra_data: dict[DAVPropertyIdentity, str] = field(default_factory=dict)
extra_not_found: list[str] = field(default_factory=list) | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/full/plugins/specialchar/dialogs/lang/no.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
CKEDITOR.plugins.setLang("specialchar","no",{euro:"Eurosymbol",lsquo:"Venstre enkelt anførselstegn",rsquo:"Høyre enkelt anførselstegn",ldquo:"Venstre dobbelt anførselstegn",rdquo:"Høyre anførsesltegn",ndash:"Kort tankestrek",mdash:"Lang tankestrek",iexcl:"Omvendt utropstegn",cent:"Centsymbol",pound:"Pundsymbol",curren:"Valutategn",yen:"Yensymbol",brvbar:"Brutt loddrett strek",sect:"Paragraftegn",uml:"Tøddel",copy:"Copyrighttegn",ordf:"Feminin ordensindikator",laquo:"Venstre anførselstegn",not:"Negasjonstegn",
reg:"Registrert varemerke-tegn",macr:"Makron",deg:"Gradsymbol",sup2:"Hevet totall",sup3:"Hevet tretall",acute:"Akutt aksent",micro:"Mikrosymbol",para:"Avsnittstegn",middot:"Midtstilt prikk",cedil:"Cedille",sup1:"Hevet ettall",ordm:"Maskulin ordensindikator",raquo:"Høyre anførselstegn",frac14:"Fjerdedelsbrøk",frac12:"Halvbrøk",frac34:"Tre fjerdedelers brøk",iquest:"Omvendt spørsmålstegn",Agrave:"Stor A med grav aksent",Aacute:"Stor A med akutt aksent",Acirc:"Stor A med cirkumfleks",Atilde:"Stor A med tilde",
Auml:"Stor A med tøddel",Aring:"Stor Å",AElig:"Stor Æ",Ccedil:"Stor C med cedille",Egrave:"Stor E med grav aksent",Eacute:"Stor E med akutt aksent",Ecirc:"Stor E med cirkumfleks",Euml:"Stor E med tøddel",Igrave:"Stor I med grav aksent",Iacute:"Stor I med akutt aksent",Icirc:"Stor I med cirkumfleks",Iuml:"Stor I med tøddel",ETH:"Stor Edd/stungen D",Ntilde:"Stor N med tilde",Ograve:"Stor O med grav aksent",Oacute:"Stor O med akutt aksent",Ocirc:"Stor O med cirkumfleks",Otilde:"Stor O med tilde",Ouml:"Stor O med tøddel",
times:"Multiplikasjonstegn",Oslash:"Stor Ø",Ugrave:"Stor U med grav aksent",Uacute:"Stor U med akutt aksent",Ucirc:"Stor U med cirkumfleks",Uuml:"Stor U med tøddel",Yacute:"Stor Y med akutt aksent",THORN:"Stor Thorn",szlig:"Liten dobbelt-s/Eszett",agrave:"Liten a med grav aksent",aacute:"Liten a med akutt aksent",acirc:"Liten a med cirkumfleks",atilde:"Liten a med tilde",auml:"Liten a med tøddel",aring:"Liten å",aelig:"Liten æ",ccedil:"Liten c med cedille",egrave:"Liten e med grav aksent",eacute:"Liten e med akutt aksent",
ecirc:"Liten e med cirkumfleks",euml:"Liten e med tøddel",igrave:"Liten i med grav aksent",iacute:"Liten i med akutt aksent",icirc:"Liten i med cirkumfleks",iuml:"Liten i med tøddel",eth:"Liten edd/stungen d",ntilde:"Liten n med tilde",ograve:"Liten o med grav aksent",oacute:"Liten o med akutt aksent",ocirc:"Liten o med cirkumfleks",otilde:"Liten o med tilde",ouml:"Liten o med tøddel",divide:"Divisjonstegn",oslash:"Liten ø",ugrave:"Liten u med grav aksent",uacute:"Liten u med akutt aksent",ucirc:"Liten u med cirkumfleks",
uuml:"Liten u med tøddel",yacute:"Liten y med akutt aksent",thorn:"Liten thorn",yuml:"Liten y med tøddel",OElig:"Stor ligatur av O og E",oelig:"Liten ligatur av o og e",372:"Stor W med cirkumfleks",374:"Stor Y med cirkumfleks",373:"Liten w med cirkumfleks",375:"Liten y med cirkumfleks",sbquo:"Enkelt lavt 9-anførselstegn",8219:"Enkelt høyt reversert 9-anførselstegn",bdquo:"Dobbelt lavt 9-anførselstegn",hellip:"Ellipse",trade:"Varemerkesymbol",9658:"Svart høyrevendt peker",bull:"Tykk interpunkt",rarr:"Høyrevendt pil",
rArr:"Dobbel høyrevendt pil",hArr:"Dobbel venstrevendt pil",diams:"Svart ruter",asymp:"Omtrent likhetstegn"}); | PypiClean |
/MyStudyChat_client-1.0.1-py3-none-any.whl/common/utils.py |
import json
import os
import socket
import sys
# sys.path.append(os.path.join(os.getcwd(), '../..'))
from .decorator import logs
from .variables import MAX_LEN_MSG, ENCODE, ANS_104, ANS_400, ALERT
@logs
def send_message(msg: dict, sock: socket.socket) -> bool:
"""
Функция для отправки сообщения, создает JSON из dict, кодирует его в байты и отправляет в адрес сокета
:param msg: dict для создания JSON
:param sock: сокет, в который необходимо отправить байты
:return: Возвращает True, если отправка успешна, либо False если произошла ошибка
"""
try:
if isinstance(msg, dict) and isinstance(sock, socket.socket):
json_msg = json.dumps(msg).encode(ENCODE)
if isinstance(json_msg, bytes):
if sock:
sock.send(json_msg)
return True
raise TypeError
raise TypeError
except Exception:
if sys.exc_info()[0] in (TypeError, ValueError) and sock:
return False
if sys.exc_info()[0] in (ConnectionResetError, OSError) and sock:
return False
@logs
def get_message(sock: socket.socket) -> dict:
"""
Функция для получения данных из сокета и преобразования полученных байтов в dict
:param sock: сокет из которого получаем данные
:return: При удачном получении данных возвращает dict c полученным данными.
При исключениях TypeError или ValueError возвращает словарь:
ANS_400 = {
ACTION: ALERT,
RESPONSE: 400,
ALERT: 'Неправильный запрос/JSON-объект',
TIME: time.time()
}
При исключениях ConnectionResetError, OSErrorr возвращает словарь:
ANS_104 = {
ACTION: ALERT,
RESPONSE: 104,
ALERT: 'ConnectionResetError',
TIME: time.time()
}
"""
try:
if isinstance(sock, socket.socket):
byte_json_msg = sock.recv(MAX_LEN_MSG)
json_msg = byte_json_msg.decode(ENCODE)
data = json.loads(json_msg)
if isinstance(data, dict):
return data
return ANS_400
return ANS_400
except:
if sys.exc_info()[0] in (TypeError, ValueError):
return ANS_400
if sys.exc_info()[0] in (ConnectionResetError, OSError, ConnectionAbortedError):
ANS_104[ALERT] = sys.exc_info()[0]
return ANS_104 | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/mobile.js | This is an optimized version of Dojo, built for deployment and not for
development. To get sources and documentation, please visit:
http://dojotoolkit.org
*/
//>>built
require({cache:{"dojox/mobile/ViewController":function(){define(["dojo/_base/kernel","dojo/_base/array","dojo/_base/connect","dojo/_base/declare","dojo/_base/lang","dojo/_base/window","dojo/dom","dojo/dom-class","dojo/dom-construct","dojo/on","dojo/ready","dijit/registry","./ProgressIndicator","./TransitionEvent"],function(_1,_2,_3,_4,_5,_6,_7,_8,_9,on,_a,_b,_c,_d){var dm=_5.getObject("dojox.mobile",true);var _e=_4("dojox.mobile.ViewController",null,{constructor:function(){this.viewMap={};this.currentView=null;this.defaultView=null;_a(_5.hitch(this,function(){on(_6.body(),"startTransition",_5.hitch(this,"onStartTransition"));}));},findCurrentView:function(_f,src){if(_f){var w=_b.byId(_f);if(w&&w.getShowingView){return w.getShowingView();}}if(dm.currentView){return dm.currentView;}w=src;while(true){w=w.getParent();if(!w){return null;}if(_8.contains(w.domNode,"mblView")){break;}}return w;},onStartTransition:function(evt){evt.preventDefault();if(!evt.detail||(evt.detail&&!evt.detail.moveTo&&!evt.detail.href&&!evt.detail.url&&!evt.detail.scene)){return;}var w=this.findCurrentView(evt.detail.moveTo,(evt.target&&evt.target.id)?_b.byId(evt.target.id):_b.byId(evt.target));if(!w||(evt.detail&&evt.detail.moveTo&&w===_b.byId(evt.detail.moveTo))){return;}if(evt.detail.href){var t=_b.byId(evt.target.id).hrefTarget;if(t){dm.openWindow(evt.detail.href,t);}else{w.performTransition(null,evt.detail.transitionDir,evt.detail.transition,evt.target,function(){location.href=evt.detail.href;});}return;}else{if(evt.detail.scene){_3.publish("/dojox/mobile/app/pushScene",[evt.detail.scene]);return;}}var _10=evt.detail.moveTo;if(evt.detail.url){var id;if(dm._viewMap&&dm._viewMap[evt.detail.url]){id=dm._viewMap[evt.detail.url];}else{var _11=this._text;if(!_11){if(_b.byId(evt.target.id).sync){_1.xhrGet({url:evt.detail.url,sync:true,load:function(_12){_11=_5.trim(_12);}});}else{var s="dojo/_base/xhr";require([s],_5.hitch(this,function(xhr){var _13=_c.getInstance();_6.body().appendChild(_13.domNode);_13.start();var obj=xhr.get({url:evt.detail.url,handleAs:"text"});obj.addCallback(_5.hitch(this,function(_14,_15){_13.stop();if(_14){this._text=_14;new _d(evt.target,{transition:evt.detail.transition,transitionDir:evt.detail.transitionDir,moveTo:_10,href:evt.detail.href,url:evt.detail.url,scene:evt.detail.scene},evt.detail).dispatch();}}));obj.addErrback(function(_16){_13.stop();});}));return;}}this._text=null;id=this._parse(_11,_b.byId(evt.target.id).urlTarget);if(!dm._viewMap){dm._viewMap=[];}dm._viewMap[evt.detail.url]=id;}_10=id;w=this.findCurrentView(_10,_b.byId(evt.target.id))||w;}w.performTransition(_10,evt.detail.transitionDir,evt.detail.transition,null,null);},_parse:function(_17,id){var _18,_19,i,j,len;var _1a=this.findCurrentView();var _1b=_b.byId(id)&&_b.byId(id).containerNode||_7.byId(id)||_1a&&_1a.domNode.parentNode||_6.body();var _1c=null;for(j=_1b.childNodes.length-1;j>=0;j--){var c=_1b.childNodes[j];if(c.nodeType===1){if(c.getAttribute("fixed")==="bottom"){_1c=c;}break;}}if(_17.charAt(0)==="<"){_18=_9.create("DIV",{innerHTML:_17});for(i=0;i<_18.childNodes.length;i++){var n=_18.childNodes[i];if(n.nodeType===1){_19=n;break;}}if(!_19){return;}_19.style.visibility="hidden";_1b.insertBefore(_18,_1c);var ws=_1.parser.parse(_18);_2.forEach(ws,function(w){if(w&&!w._started&&w.startup){w.startup();}});for(i=0,len=_18.childNodes.length;i<len;i++){_1b.insertBefore(_18.firstChild,_1c);}_1b.removeChild(_18);_b.byNode(_19)._visible=true;}else{if(_17.charAt(0)==="{"){_18=_9.create("DIV");_1b.insertBefore(_18,_1c);this._ws=[];_19=this._instantiate(eval("("+_17+")"),_18);for(i=0;i<this._ws.length;i++){var w=this._ws[i];w.startup&&!w._started&&(!w.getParent||!w.getParent())&&w.startup();}this._ws=null;}}_19.style.display="none";_19.style.visibility="visible";return _1.hash?"#"+_19.id:_19.id;},_instantiate:function(obj,_1d,_1e){var _1f;for(var key in obj){if(key.charAt(0)=="@"){continue;}var cls=_5.getObject(key);if(!cls){continue;}var _20={};var _21=cls.prototype;var _22=_5.isArray(obj[key])?obj[key]:[obj[key]];for(var i=0;i<_22.length;i++){for(var _23 in _22[i]){if(_23.charAt(0)=="@"){var val=_22[i][_23];_23=_23.substring(1);if(typeof _21[_23]=="string"){_20[_23]=val;}else{if(typeof _21[_23]=="number"){_20[_23]=val-0;}else{if(typeof _21[_23]=="boolean"){_20[_23]=(val!="false");}else{if(typeof _21[_23]=="object"){_20[_23]=eval("("+val+")");}}}}}}_1f=new cls(_20,_1d);if(_1d){_1f._visible=true;this._ws.push(_1f);}if(_1e&&_1e.addChild){_1e.addChild(_1f);}this._instantiate(_22[i],null,_1f);}}return _1f&&_1f.domNode;}});new _e();return _e;});},"dojox/mobile/RoundRect":function(){define(["dojo/_base/array","dojo/_base/declare","dojo/_base/window","dijit/_Contained","dijit/_Container","dijit/_WidgetBase"],function(_24,_25,win,_26,_27,_28){return _25("dojox.mobile.RoundRect",[_28,_27,_26],{shadow:false,buildRendering:function(){this.domNode=this.containerNode=this.srcNodeRef||win.doc.createElement("DIV");this.domNode.className=this.shadow?"mblRoundRect mblShadow":"mblRoundRect";},resize:function(){_24.forEach(this.getChildren(),function(_29){if(_29.resize){_29.resize();}});}});});},"dojox/mobile/RoundRectList":function(){define(["dojo/_base/array","dojo/_base/declare","dojo/_base/window","dijit/_Contained","dijit/_Container","dijit/_WidgetBase"],function(_2a,_2b,win,_2c,_2d,_2e){return _2b("dojox.mobile.RoundRectList",[_2e,_2d,_2c],{transition:"slide",iconBase:"",iconPos:"",select:"",stateful:false,buildRendering:function(){this.domNode=this.containerNode=this.srcNodeRef||win.doc.createElement("UL");this.domNode.className="mblRoundRectList";},resize:function(){_2a.forEach(this.getChildren(),function(_2f){if(_2f.resize){_2f.resize();}});},onCheckStateChanged:function(_30,_31){},_setStatefulAttr:function(_32){this.stateful=_32;_2a.forEach(this.getChildren(),function(_33){_33.setArrow&&_33.setArrow();});},deselectItem:function(_34){_34.deselect();},deselectAll:function(){_2a.forEach(this.getChildren(),function(_35){_35.deselect&&_35.deselect();});},selectItem:function(_36){_36.select();}});});},"dojox/mobile/sniff":function(){define(["dojo/_base/window","dojo/_base/sniff"],function(win,has){var ua=navigator.userAgent;has.add("bb",ua.indexOf("BlackBerry")>=0&&parseFloat(ua.split("Version/")[1])||undefined,undefined,true);has.add("android",parseFloat(ua.split("Android ")[1])||undefined,undefined,true);if(ua.match(/(iPhone|iPod|iPad)/)){var p=RegExp.$1.replace(/P/,"p");var v=ua.match(/OS ([\d_]+)/)?RegExp.$1:"1";var os=parseFloat(v.replace(/_/,".").replace(/_/g,""));has.add(p,os,undefined,true);has.add("iphone",os,undefined,true);}if(has("webkit")){has.add("touch",(typeof win.doc.documentElement.ontouchstart!="undefined"&&navigator.appVersion.indexOf("Mobile")!=-1)||!!has("android"),undefined,true);}return has;});},"dojox/mobile/TransitionEvent":function(){define(["dojo/_base/declare","dojo/_base/Deferred","dojo/_base/lang","dojo/on","./transition"],function(_37,_38,_39,on,_3a){return _37("dojox.mobile.TransitionEvent",null,{constructor:function(_3b,_3c,_3d){this.transitionOptions=_3c;this.target=_3b;this.triggerEvent=_3d||null;},dispatch:function(){var _3e={bubbles:true,cancelable:true,detail:this.transitionOptions,triggerEvent:this.triggerEvent};var evt=on.emit(this.target,"startTransition",_3e);if(evt){_38.when(_3a,_39.hitch(this,function(_3f){_38.when(_3f.call(this,evt),_39.hitch(this,function(_40){this.endTransition(_40);}));}));}},endTransition:function(_41){on.emit(this.target,"endTransition",{detail:_41.transitionOptions});}});});},"dijit/_WidgetBase":function(){define("dijit/_WidgetBase",["require","dojo/_base/array","dojo/aspect","dojo/_base/config","dojo/_base/connect","dojo/_base/declare","dojo/dom","dojo/dom-attr","dojo/dom-class","dojo/dom-construct","dojo/dom-geometry","dojo/dom-style","dojo/_base/kernel","dojo/_base/lang","dojo/on","dojo/ready","dojo/Stateful","dojo/topic","dojo/_base/window","./registry"],function(_42,_43,_44,_45,_46,_47,dom,_48,_49,_4a,_4b,_4c,_4d,_4e,on,_4f,_50,_51,win,_52){if(!_4d.isAsync){_4f(0,function(){var _53=["dijit/_base/manager"];_42(_53);});}var _54={};function _55(obj){var ret={};for(var _56 in obj){ret[_56.toLowerCase()]=true;}return ret;};function _57(_58){return function(val){_48[val?"set":"remove"](this.domNode,_58,val);this._set(_58,val);};};return _47("dijit._WidgetBase",_50,{id:"",_setIdAttr:"domNode",lang:"",_setLangAttr:_57("lang"),dir:"",_setDirAttr:_57("dir"),textDir:"","class":"",_setClassAttr:{node:"domNode",type:"class"},style:"",title:"",tooltip:"",baseClass:"",srcNodeRef:null,domNode:null,containerNode:null,attributeMap:{},_blankGif:_45.blankGif||_42.toUrl("dojo/resources/blank.gif"),postscript:function(_59,_5a){this.create(_59,_5a);},create:function(_5b,_5c){this.srcNodeRef=dom.byId(_5c);this._connects=[];this._supportingWidgets=[];if(this.srcNodeRef&&(typeof this.srcNodeRef.id=="string")){this.id=this.srcNodeRef.id;}if(_5b){this.params=_5b;_4e.mixin(this,_5b);}this.postMixInProperties();if(!this.id){this.id=_52.getUniqueId(this.declaredClass.replace(/\./g,"_"));}_52.add(this);this.buildRendering();if(this.domNode){this._applyAttributes();var _5d=this.srcNodeRef;if(_5d&&_5d.parentNode&&this.domNode!==_5d){_5d.parentNode.replaceChild(this.domNode,_5d);}}if(this.domNode){this.domNode.setAttribute("widgetId",this.id);}this.postCreate();if(this.srcNodeRef&&!this.srcNodeRef.parentNode){delete this.srcNodeRef;}this._created=true;},_applyAttributes:function(){var _5e=this.constructor,_5f=_5e._setterAttrs;if(!_5f){_5f=(_5e._setterAttrs=[]);for(var _60 in this.attributeMap){_5f.push(_60);}var _61=_5e.prototype;for(var _62 in _61){if(_62 in this.attributeMap){continue;}var _63="_set"+_62.replace(/^[a-z]|-[a-zA-Z]/g,function(c){return c.charAt(c.length-1).toUpperCase();})+"Attr";if(_63 in _61){_5f.push(_62);}}}_43.forEach(_5f,function(_64){if(this.params&&_64 in this.params){}else{if(this[_64]){this.set(_64,this[_64]);}}},this);for(var _65 in this.params){this.set(_65,this[_65]);}},postMixInProperties:function(){},buildRendering:function(){if(!this.domNode){this.domNode=this.srcNodeRef||_4a.create("div");}if(this.baseClass){var _66=this.baseClass.split(" ");if(!this.isLeftToRight()){_66=_66.concat(_43.map(_66,function(_67){return _67+"Rtl";}));}_49.add(this.domNode,_66);}},postCreate:function(){},startup:function(){if(this._started){return;}this._started=true;_43.forEach(this.getChildren(),function(obj){if(!obj._started&&!obj._destroyed&&_4e.isFunction(obj.startup)){obj.startup();obj._started=true;}});},destroyRecursive:function(_68){this._beingDestroyed=true;this.destroyDescendants(_68);this.destroy(_68);},destroy:function(_69){this._beingDestroyed=true;this.uninitialize();var c;while(c=this._connects.pop()){c.remove();}var w;while(w=this._supportingWidgets.pop()){if(w.destroyRecursive){w.destroyRecursive();}else{if(w.destroy){w.destroy();}}}this.destroyRendering(_69);_52.remove(this.id);this._destroyed=true;},destroyRendering:function(_6a){if(this.bgIframe){this.bgIframe.destroy(_6a);delete this.bgIframe;}if(this.domNode){if(_6a){_48.remove(this.domNode,"widgetId");}else{_4a.destroy(this.domNode);}delete this.domNode;}if(this.srcNodeRef){if(!_6a){_4a.destroy(this.srcNodeRef);}delete this.srcNodeRef;}},destroyDescendants:function(_6b){_43.forEach(this.getChildren(),function(_6c){if(_6c.destroyRecursive){_6c.destroyRecursive(_6b);}});},uninitialize:function(){return false;},_setStyleAttr:function(_6d){var _6e=this.domNode;if(_4e.isObject(_6d)){_4c.set(_6e,_6d);}else{if(_6e.style.cssText){_6e.style.cssText+="; "+_6d;}else{_6e.style.cssText=_6d;}}this._set("style",_6d);},_attrToDom:function(_6f,_70,_71){_71=arguments.length>=3?_71:this.attributeMap[_6f];_43.forEach(_4e.isArray(_71)?_71:[_71],function(_72){var _73=this[_72.node||_72||"domNode"];var _74=_72.type||"attribute";switch(_74){case "attribute":if(_4e.isFunction(_70)){_70=_4e.hitch(this,_70);}var _75=_72.attribute?_72.attribute:(/^on[A-Z][a-zA-Z]*$/.test(_6f)?_6f.toLowerCase():_6f);_48.set(_73,_75,_70);break;case "innerText":_73.innerHTML="";_73.appendChild(win.doc.createTextNode(_70));break;case "innerHTML":_73.innerHTML=_70;break;case "class":_49.replace(_73,_70,this[_6f]);break;}},this);},get:function(_76){var _77=this._getAttrNames(_76);return this[_77.g]?this[_77.g]():this[_76];},set:function(_78,_79){if(typeof _78==="object"){for(var x in _78){this.set(x,_78[x]);}return this;}var _7a=this._getAttrNames(_78),_7b=this[_7a.s];if(_4e.isFunction(_7b)){var _7c=_7b.apply(this,Array.prototype.slice.call(arguments,1));}else{var _7d=this.focusNode&&!_4e.isFunction(this.focusNode)?"focusNode":"domNode",tag=this[_7d].tagName,_7e=_54[tag]||(_54[tag]=_55(this[_7d])),map=_78 in this.attributeMap?this.attributeMap[_78]:_7a.s in this?this[_7a.s]:((_7a.l in _7e&&typeof _79!="function")||/^aria-|^data-|^role$/.test(_78))?_7d:null;if(map!=null){this._attrToDom(_78,_79,map);}this._set(_78,_79);}return _7c||this;},_attrPairNames:{},_getAttrNames:function(_7f){var apn=this._attrPairNames;if(apn[_7f]){return apn[_7f];}var uc=_7f.replace(/^[a-z]|-[a-zA-Z]/g,function(c){return c.charAt(c.length-1).toUpperCase();});return (apn[_7f]={n:_7f+"Node",s:"_set"+uc+"Attr",g:"_get"+uc+"Attr",l:uc.toLowerCase()});},_set:function(_80,_81){var _82=this[_80];this[_80]=_81;if(this._watchCallbacks&&this._created&&_81!==_82){this._watchCallbacks(_80,_82,_81);}},on:function(_83,_84){return _44.after(this,this._onMap(_83),_84,true);},_onMap:function(_85){var _86=this.constructor,map=_86._onMap;if(!map){map=(_86._onMap={});for(var _87 in _86.prototype){if(/^on/.test(_87)){map[_87.replace(/^on/,"").toLowerCase()]=_87;}}}return map[_85.toLowerCase()];},toString:function(){return "[Widget "+this.declaredClass+", "+(this.id||"NO ID")+"]";},getChildren:function(){return this.containerNode?_52.findWidgets(this.containerNode):[];},getParent:function(){return _52.getEnclosingWidget(this.domNode.parentNode);},connect:function(obj,_88,_89){var _8a=_46.connect(obj,_88,this,_89);this._connects.push(_8a);return _8a;},disconnect:function(_8b){var i=_43.indexOf(this._connects,_8b);if(i!=-1){_8b.remove();this._connects.splice(i,1);}},subscribe:function(t,_8c){var _8d=_51.subscribe(t,_4e.hitch(this,_8c));this._connects.push(_8d);return _8d;},unsubscribe:function(_8e){this.disconnect(_8e);},isLeftToRight:function(){return this.dir?(this.dir=="ltr"):_4b.isBodyLtr();},isFocusable:function(){return this.focus&&(_4c.get(this.domNode,"display")!="none");},placeAt:function(_8f,_90){if(_8f.declaredClass&&_8f.addChild){_8f.addChild(this,_90);}else{_4a.place(this.domNode,_8f,_90);}return this;},getTextDir:function(_91,_92){return _92;},applyTextDir:function(){}});});},"dojox/mobile/View":function(){define("dojox/mobile/View",["dojo/_base/kernel","dojo/_base/array","dojo/_base/config","dojo/_base/connect","dojo/_base/declare","dojo/_base/lang","dojo/_base/sniff","dojo/_base/window","dojo/_base/Deferred","dojo/dom","dojo/dom-class","dojo/dom-geometry","dojo/dom-style","dijit/registry","dijit/_Contained","dijit/_Container","dijit/_WidgetBase","./ViewController","./transition"],function(_93,_94,_95,_96,_97,_98,has,win,_99,dom,_9a,_9b,_9c,_9d,_9e,_9f,_a0,_a1,_a2){var dm=_98.getObject("dojox.mobile",true);return _97("dojox.mobile.View",[_a0,_9f,_9e],{selected:false,keepScrollPos:true,constructor:function(_a3,_a4){if(_a4){dom.byId(_a4).style.visibility="hidden";}this._aw=has("android")>=2.2&&has("android")<3;},buildRendering:function(){this.domNode=this.containerNode=this.srcNodeRef||win.doc.createElement("DIV");this.domNode.className="mblView";this.connect(this.domNode,"webkitAnimationEnd","onAnimationEnd");this.connect(this.domNode,"webkitAnimationStart","onAnimationStart");if(!_95["mblCSS3Transition"]){this.connect(this.domNode,"webkitTransitionEnd","onAnimationEnd");}var id=location.href.match(/#(\w+)([^\w=]|$)/)?RegExp.$1:null;this._visible=this.selected&&!id||this.id==id;if(this.selected){dm._defaultView=this;}},startup:function(){if(this._started){return;}var _a5=[];var _a6=this.domNode.parentNode.childNodes;var _a7=false;for(var i=0;i<_a6.length;i++){var c=_a6[i];if(c.nodeType===1&&_9a.contains(c,"mblView")){_a5.push(c);_a7=_a7||_9d.byNode(c)._visible;}}var _a8=this._visible;if(_a5.length===1||(!_a7&&_a5[0]===this.domNode)){_a8=true;}var _a9=this;setTimeout(function(){if(!_a8){_a9.domNode.style.display="none";}else{dm.currentView=_a9;_a9.onStartView();_96.publish("/dojox/mobile/startView",[_a9]);}if(_a9.domNode.style.visibility!="visible"){_a9.domNode.style.visibility="visible";}var _aa=_a9.getParent&&_a9.getParent();if(!_aa||!_aa.resize){_a9.resize();}},has("ie")?100:0);this.inherited(arguments);},resize:function(){_94.forEach(this.getChildren(),function(_ab){if(_ab.resize){_ab.resize();}});},onStartView:function(){},onBeforeTransitionIn:function(_ac,dir,_ad,_ae,_af){},onAfterTransitionIn:function(_b0,dir,_b1,_b2,_b3){},onBeforeTransitionOut:function(_b4,dir,_b5,_b6,_b7){},onAfterTransitionOut:function(_b8,dir,_b9,_ba,_bb){},_saveState:function(_bc,dir,_bd,_be,_bf){this._context=_be;this._method=_bf;if(_bd=="none"){_bd=null;}this._moveTo=_bc;this._dir=dir;this._transition=_bd;this._arguments=_98._toArray(arguments);this._args=[];if(_be||_bf){for(var i=5;i<arguments.length;i++){this._args.push(arguments[i]);}}},_fixViewState:function(_c0){var _c1=this.domNode.parentNode.childNodes;for(var i=0;i<_c1.length;i++){var n=_c1[i];if(n.nodeType===1&&_9a.contains(n,"mblView")){n.className="mblView";}}_c0.className="mblView";},convertToId:function(_c2){if(typeof (_c2)=="string"){_c2.match(/^#?([^&?]+)/);return RegExp.$1;}return _c2;},performTransition:function(_c3,dir,_c4,_c5,_c6){if(_c3==="#"){return;}if(_93.hash){if(typeof (_c3)=="string"&&_c3.charAt(0)=="#"&&!dm._params){dm._params=[];for(var i=0;i<arguments.length;i++){dm._params.push(arguments[i]);}_93.hash(_c3);return;}}this._saveState.apply(this,arguments);var _c7;if(_c3){_c7=this.convertToId(_c3);}else{if(!this._dummyNode){this._dummyNode=win.doc.createElement("DIV");win.body().appendChild(this._dummyNode);}_c7=this._dummyNode;}var _c8=this.domNode;var _c9=_c8.offsetTop;_c7=this.toNode=dom.byId(_c7);if(!_c7){return;}_c7.style.visibility=this._aw?"visible":"hidden";_c7.style.display="";this._fixViewState(_c7);var _ca=_9d.byNode(_c7);if(_ca){if(_95["mblAlwaysResizeOnTransition"]||!_ca._resized){dm.resizeAll(null,_ca);_ca._resized=true;}if(_c4&&_c4!="none"){_ca.containerNode.style.paddingTop=_c9+"px";}_ca.movedFrom=_c8.id;}this.onBeforeTransitionOut.apply(this,arguments);_96.publish("/dojox/mobile/beforeTransitionOut",[this].concat(_98._toArray(arguments)));if(_ca){if(this.keepScrollPos&&!this.getParent()){var _cb=win.body().scrollTop||win.doc.documentElement.scrollTop||win.global.pageYOffset||0;_c8._scrollTop=_cb;var _cc=(dir==1)?0:(_c7._scrollTop||0);_c7.style.top="0px";if(_cb>1||_cc!==0){_c8.style.top=_cc-_cb+"px";if(_95["mblHideAddressBar"]!==false){setTimeout(function(){win.global.scrollTo(0,(_cc||1));},0);}}}else{_c7.style.top="0px";}_ca.onBeforeTransitionIn.apply(_ca,arguments);_96.publish("/dojox/mobile/beforeTransitionIn",[_ca].concat(_98._toArray(arguments)));}if(!this._aw){_c7.style.display="none";_c7.style.visibility="visible";}if(dm._iw&&dm.scrollable){var ss=dm.getScreenSize();win.body().appendChild(dm._iwBgCover);_9c.set(dm._iwBgCover,{position:"absolute",top:"0px",left:"0px",height:(ss.h+1)+"px",width:ss.w+"px",backgroundColor:_9c.get(win.body(),"background-color"),zIndex:-10000,display:""});_9c.set(_c7,{position:"absolute",zIndex:-10001,visibility:"visible",display:""});setTimeout(_98.hitch(this,function(){this._doTransition(_c8,_c7,_c4,dir);}),80);}else{this._doTransition(_c8,_c7,_c4,dir);}},_toCls:function(s){return "mbl"+s.charAt(0).toUpperCase()+s.substring(1);},_doTransition:function(_cd,_ce,_cf,dir){var rev=(dir==-1)?" mblReverse":"";if(dm._iw&&dm.scrollable){_9c.set(_ce,{position:"",zIndex:""});win.body().removeChild(dm._iwBgCover);}else{if(!this._aw){_ce.style.display="";}}if(!_cf||_cf=="none"){this.domNode.style.display="none";this.invokeCallback();}else{if(_95["mblCSS3Transition"]){_99.when(_a2,_98.hitch(this,function(_d0){var _d1=_9c.get(_ce,"position");_9c.set(_ce,"position","absolute");_99.when(_d0(_cd,_ce,{transition:_cf,reverse:(dir===-1)?true:false}),_98.hitch(this,function(){_9c.set(_ce,"position",_d1);this.invokeCallback();}));}));}else{var s=this._toCls(_cf);_9a.add(_cd,s+" mblOut"+rev);_9a.add(_ce,s+" mblIn"+rev);setTimeout(function(){_9a.add(_cd,"mblTransition");_9a.add(_ce,"mblTransition");},100);var _d2="50% 50%";var _d3="50% 50%";var _d4,_d5,_d6;if(_cf.indexOf("swirl")!=-1||_cf.indexOf("zoom")!=-1){if(this.keepScrollPos&&!this.getParent()){_d4=win.body().scrollTop||win.doc.documentElement.scrollTop||win.global.pageYOffset||0;}else{_d4=-_9b.position(_cd,true).y;}_d6=win.global.innerHeight/2+_d4;_d2="50% "+_d6+"px";_d3="50% "+_d6+"px";}else{if(_cf.indexOf("scale")!=-1){var _d7=_9b.position(_cd,true);_d5=((this.clickedPosX!==undefined)?this.clickedPosX:win.global.innerWidth/2)-_d7.x;if(this.keepScrollPos&&!this.getParent()){_d4=win.body().scrollTop||win.doc.documentElement.scrollTop||win.global.pageYOffset||0;}else{_d4=-_d7.y;}_d6=((this.clickedPosY!==undefined)?this.clickedPosY:win.global.innerHeight/2)+_d4;_d2=_d5+"px "+_d6+"px";_d3=_d5+"px "+_d6+"px";}}_9c.set(_cd,{webkitTransformOrigin:_d2});_9c.set(_ce,{webkitTransformOrigin:_d3});}}dm.currentView=_9d.byNode(_ce);},onAnimationStart:function(e){},onAnimationEnd:function(e){var _d8=e.animationName||e.target.className;if(_d8.indexOf("Out")===-1&&_d8.indexOf("In")===-1&&_d8.indexOf("Shrink")===-1){return;}var _d9=false;if(_9a.contains(this.domNode,"mblOut")){_d9=true;this.domNode.style.display="none";_9a.remove(this.domNode,[this._toCls(this._transition),"mblIn","mblOut","mblReverse"]);}else{this.containerNode.style.paddingTop="";}_9c.set(this.domNode,{webkitTransformOrigin:""});if(_d8.indexOf("Shrink")!==-1){var li=e.target;li.style.display="none";_9a.remove(li,"mblCloseContent");}if(_d9){this.invokeCallback();}this.domNode&&(this.domNode.className="mblView");this.clickedPosX=this.clickedPosY=undefined;},invokeCallback:function(){this.onAfterTransitionOut.apply(this,this._arguments);_96.publish("/dojox/mobile/afterTransitionOut",[this].concat(this._arguments));var _da=_9d.byNode(this.toNode);if(_da){_da.onAfterTransitionIn.apply(_da,this._arguments);_96.publish("/dojox/mobile/afterTransitionIn",[_da].concat(this._arguments));_da.movedFrom=undefined;}var c=this._context,m=this._method;if(!c&&!m){return;}if(!m){m=c;c=null;}c=c||win.global;if(typeof (m)=="string"){c[m].apply(c,this._args);}else{m.apply(c,this._args);}},getShowingView:function(){var _db=this.domNode.parentNode.childNodes;for(var i=0;i<_db.length;i++){var n=_db[i];if(n.nodeType===1&&_9a.contains(n,"mblView")&&_9c.get(n,"display")!=="none"){return _9d.byNode(n);}}return null;},show:function(){var _dc=this.getShowingView();if(_dc){_dc.domNode.style.display="none";}this.domNode.style.display="";dm.currentView=this;}});});},"dojox/main":function(){define(["dojo/_base/kernel"],function(_dd){return _dd.dojox;});},"dojox/mobile/transition":function(){define(["dojo/_base/Deferred","dojo/_base/config"],function(_de,_df){if(_df["mblCSS3Transition"]){var _e0=new _de();require([_df["mblCSS3Transition"]],function(_e1){_e0.resolve(_e1);});return _e0;}return null;});},"dojo/Stateful":function(){define(["./_base/kernel","./_base/declare","./_base/lang","./_base/array"],function(_e2,_e3,_e4,_e5){return _e2.declare("dojo.Stateful",null,{postscript:function(_e6){if(_e6){_e4.mixin(this,_e6);}},get:function(_e7){return this[_e7];},set:function(_e8,_e9){if(typeof _e8==="object"){for(var x in _e8){this.set(x,_e8[x]);}return this;}var _ea=this[_e8];this[_e8]=_e9;if(this._watchCallbacks){this._watchCallbacks(_e8,_ea,_e9);}return this;},watch:function(_eb,_ec){var _ed=this._watchCallbacks;if(!_ed){var _ee=this;_ed=this._watchCallbacks=function(_ef,_f0,_f1,_f2){var _f3=function(_f4){if(_f4){_f4=_f4.slice();for(var i=0,l=_f4.length;i<l;i++){try{_f4[i].call(_ee,_ef,_f0,_f1);}catch(e){console.error(e);}}}};_f3(_ed["_"+_ef]);if(!_f2){_f3(_ed["*"]);}};}if(!_ec&&typeof _eb==="function"){_ec=_eb;_eb="*";}else{_eb="_"+_eb;}var _f5=_ed[_eb];if(typeof _f5!=="object"){_f5=_ed[_eb]=[];}_f5.push(_ec);return {unwatch:function(){_f5.splice(_e5.indexOf(_f5,_ec),1);}};}});});},"dojox/mobile/Heading":function(){define(["dojo/_base/array","dojo/_base/connect","dojo/_base/declare","dojo/_base/lang","dojo/_base/window","dojo/dom-class","dojo/dom-construct","dojo/dom-style","dijit/registry","dijit/_Contained","dijit/_Container","dijit/_WidgetBase","./View"],function(_f6,_f7,_f8,_f9,win,_fa,_fb,_fc,_fd,_fe,_ff,_100,View){var dm=_f9.getObject("dojox.mobile",true);return _f8("dojox.mobile.Heading",[_100,_ff,_fe],{back:"",href:"",moveTo:"",transition:"slide",label:"",iconBase:"",backProp:{className:"mblArrowButton"},tag:"H1",buildRendering:function(){this.domNode=this.containerNode=this.srcNodeRef||win.doc.createElement(this.tag);this.domNode.className="mblHeading";if(!this.label){_f6.forEach(this.domNode.childNodes,function(n){if(n.nodeType==3){var v=_f9.trim(n.nodeValue);if(v){this.label=v;this.labelNode=_fb.create("SPAN",{innerHTML:v},n,"replace");}}},this);}if(!this.labelNode){this.labelNode=_fb.create("SPAN",null,this.domNode);}this.labelNode.className="mblHeadingSpanTitle";this.labelDivNode=_fb.create("DIV",{className:"mblHeadingDivTitle",innerHTML:this.labelNode.innerHTML},this.domNode);},startup:function(){if(this._started){return;}var _101=this.getParent&&this.getParent();if(!_101||!_101.resize){var _102=this;setTimeout(function(){_102.resize();},0);}this.inherited(arguments);},resize:function(){if(this._btn){this._btn.style.width=this._body.offsetWidth+this._head.offsetWidth+"px";}if(this.labelNode){var _103,_104;var _105=this.containerNode.childNodes;for(var i=_105.length-1;i>=0;i--){var c=_105[i];if(c.nodeType===1){if(!_104&&_fa.contains(c,"mblToolBarButton")&&_fc.get(c,"float")==="right"){_104=c;}if(!_103&&(_fa.contains(c,"mblToolBarButton")&&_fc.get(c,"float")==="left"||c===this._btn)){_103=c;}}}if(!this.labelNodeLen&&this.label){this.labelNode.style.display="inline";this.labelNodeLen=this.labelNode.offsetWidth;this.labelNode.style.display="";}var bw=this.domNode.offsetWidth;var rw=_104?bw-_104.offsetLeft+5:0;var lw=_103?_103.offsetLeft+_103.offsetWidth+5:0;var tw=this.labelNodeLen||0;_fa[bw-Math.max(rw,lw)*2>tw?"add":"remove"](this.domNode,"mblHeadingCenterTitle");}_f6.forEach(this.getChildren(),function(_106){if(_106.resize){_106.resize();}});},_setBackAttr:function(back){if(!back){_fb.destroy(this._btn);this._btn=null;this.back="";}else{if(!this._btn){var btn=_fb.create("DIV",this.backProp,this.domNode,"first");var head=_fb.create("DIV",{className:"mblArrowButtonHead"},btn);var body=_fb.create("DIV",{className:"mblArrowButtonBody mblArrowButtonText"},btn);this._body=body;this._head=head;this._btn=btn;this.backBtnNode=btn;this.connect(body,"onclick","onClick");}this.back=back;this._body.innerHTML=this._cv?this._cv(this.back):this.back;}this.resize();},_setLabelAttr:function(_107){this.label=_107;this.labelNode.innerHTML=this.labelDivNode.innerHTML=this._cv?this._cv(_107):_107;},findCurrentView:function(){var w=this;while(true){w=w.getParent();if(!w){return null;}if(w instanceof View){break;}}return w;},onClick:function(e){var h1=this.domNode;_fa.add(h1,"mblArrowButtonSelected");setTimeout(function(){_fa.remove(h1,"mblArrowButtonSelected");},1000);if(this.back&&!this.moveTo&&!this.href&&history){history.back();return;}var view=this.findCurrentView();if(view){view.clickedPosX=e.clientX;view.clickedPosY=e.clientY;}this.goTo(this.moveTo,this.href);},goTo:function(_108,href){var view=this.findCurrentView();if(!view){return;}if(href){view.performTransition(null,-1,this.transition,this,function(){location.href=href;});}else{if(dm.app&&dm.app.STAGE_CONTROLLER_ACTIVE){_f7.publish("/dojox/mobile/app/goback");}else{var node=_fd.byId(view.convertToId(_108));if(node){var _109=node.getParent();while(view){var _10a=view.getParent();if(_109===_10a){break;}view=_10a;}}if(view){view.performTransition(_108,-1,this.transition);}}}}});});},"dojox/mobile/Switch":function(){define(["dojo/_base/array","dojo/_base/connect","dojo/_base/declare","dojo/_base/event","dojo/_base/window","dojo/dom-class","dijit/_Contained","dijit/_WidgetBase","./sniff"],function(_10b,_10c,_10d,_10e,win,_10f,_110,_111,has){return _10d("dojox.mobile.Switch",[_111,_110],{value:"on",name:"",leftLabel:"ON",rightLabel:"OFF",_width:53,buildRendering:function(){this.domNode=win.doc.createElement("DIV");var c=(this.srcNodeRef&&this.srcNodeRef.className)||this.className||this["class"];this._swClass=(c||"").replace(/ .*/,"");this.domNode.className="mblSwitch";var _112=this.name?" name=\""+this.name+"\"":"";this.domNode.innerHTML="<div class=\"mblSwitchInner\">"+"<div class=\"mblSwitchBg mblSwitchBgLeft\">"+"<div class=\"mblSwitchText mblSwitchTextLeft\"></div>"+"</div>"+"<div class=\"mblSwitchBg mblSwitchBgRight\">"+"<div class=\"mblSwitchText mblSwitchTextRight\"></div>"+"</div>"+"<div class=\"mblSwitchKnob\"></div>"+"<input type=\"hidden\""+_112+"></div>"+"</div>";var n=this.inner=this.domNode.firstChild;this.left=n.childNodes[0];this.right=n.childNodes[1];this.knob=n.childNodes[2];this.input=n.childNodes[3];},postCreate:function(){this.connect(this.domNode,"onclick","onClick");this.connect(this.domNode,has("touch")?"touchstart":"onmousedown","onTouchStart");this._initialValue=this.value;},_changeState:function(_113,anim){var on=(_113==="on");this.left.style.display="";this.right.style.display="";this.inner.style.left="";if(anim){_10f.add(this.domNode,"mblSwitchAnimation");}_10f.remove(this.domNode,on?"mblSwitchOff":"mblSwitchOn");_10f.add(this.domNode,on?"mblSwitchOn":"mblSwitchOff");var _114=this;setTimeout(function(){_114.left.style.display=on?"":"none";_114.right.style.display=!on?"":"none";_10f.remove(_114.domNode,"mblSwitchAnimation");},anim?300:0);},startup:function(){if(this._swClass.indexOf("Round")!=-1){var r=Math.round(this.domNode.offsetHeight/2);this.createRoundMask(this._swClass,r,this.domNode.offsetWidth);}},createRoundMask:function(_115,r,w){if(!has("webkit")||!_115){return;}if(!this._createdMasks){this._createdMasks=[];}if(this._createdMasks[_115]){return;}this._createdMasks[_115]=1;var ctx=win.doc.getCSSCanvasContext("2d",_115+"Mask",w,100);ctx.fillStyle="#000000";ctx.beginPath();ctx.moveTo(r,0);ctx.arcTo(0,0,0,2*r,r);ctx.arcTo(0,2*r,r,2*r,r);ctx.lineTo(w-r,2*r);ctx.arcTo(w,2*r,w,r,r);ctx.arcTo(w,0,w-r,0,r);ctx.closePath();ctx.fill();},onClick:function(e){if(this._moved){return;}this.value=this.input.value=(this.value=="on")?"off":"on";this._changeState(this.value,true);this.onStateChanged(this.value);},onTouchStart:function(e){this._moved=false;this.innerStartX=this.inner.offsetLeft;if(!this._conn){this._conn=[];this._conn.push(_10c.connect(this.inner,has("touch")?"touchmove":"onmousemove",this,"onTouchMove"));this._conn.push(_10c.connect(this.inner,has("touch")?"touchend":"onmouseup",this,"onTouchEnd"));}this.touchStartX=e.touches?e.touches[0].pageX:e.clientX;this.left.style.display="";this.right.style.display="";_10e.stop(e);},onTouchMove:function(e){e.preventDefault();var dx;if(e.targetTouches){if(e.targetTouches.length!=1){return false;}dx=e.targetTouches[0].clientX-this.touchStartX;}else{dx=e.clientX-this.touchStartX;}var pos=this.innerStartX+dx;var d=10;if(pos<=-(this._width-d)){pos=-this._width;}if(pos>=-d){pos=0;}this.inner.style.left=pos+"px";if(Math.abs(dx)>d){this._moved=true;}},onTouchEnd:function(e){_10b.forEach(this._conn,_10c.disconnect);this._conn=null;if(this.innerStartX==this.inner.offsetLeft){if(has("touch")){var ev=win.doc.createEvent("MouseEvents");ev.initEvent("click",true,true);this.inner.dispatchEvent(ev);}return;}var _116=(this.inner.offsetLeft<-(this._width/2))?"off":"on";this._changeState(_116,true);if(_116!=this.value){this.value=this.input.value=_116;this.onStateChanged(_116);}},onStateChanged:function(_117){},_setValueAttr:function(_118){this._changeState(_118,false);if(this.value!=_118){this.onStateChanged(_118);}this.value=this.input.value=_118;},_setLeftLabelAttr:function(_119){this.leftLabel=_119;this.left.firstChild.innerHTML=this._cv?this._cv(_119):_119;},_setRightLabelAttr:function(_11a){this.rightLabel=_11a;this.right.firstChild.innerHTML=this._cv?this._cv(_11a):_11a;},reset:function(){this.set("value",this._initialValue);}});});},"dojox/mobile/ListItem":function(){define("dojox/mobile/ListItem",["dojo/_base/array","dojo/_base/connect","dojo/_base/declare","dojo/_base/lang","dojo/dom-class","dojo/dom-construct","dojo/has","./common","./_ItemBase","./TransitionEvent"],function(_11b,_11c,_11d,lang,_11e,_11f,has,_120,_121,_122){return _11d("dojox.mobile.ListItem",_121,{rightText:"",rightIcon:"",rightIcon2:"",anchorLabel:false,noArrow:false,selected:false,checked:false,arrowClass:"mblDomButtonArrow",checkClass:"mblDomButtonCheck",variableHeight:false,rightIconTitle:"",rightIcon2Title:"",btnClass:"",btnClass2:"",tag:"li",postMixInProperties:function(){if(this.btnClass){this.rightIcon=this.btnClass;}this._setBtnClassAttr=this._setRightIconAttr;this._setBtnClass2Attr=this._setRightIcon2Attr;},buildRendering:function(){this.domNode=this.srcNodeRef||_11f.create(this.tag);this.inherited(arguments);this.domNode.className="mblListItem"+(this.selected?" mblItemSelected":"");var box=this.box=_11f.create("DIV");box.className="mblListItemTextBox";if(this.anchorLabel){box.style.cursor="pointer";}var r=this.srcNodeRef;if(r&&!this.label){this.label="";for(var i=0,len=r.childNodes.length;i<len;i++){var n=r.firstChild;if(n.nodeType===3&&lang.trim(n.nodeValue)!==""){n.nodeValue=this._cv?this._cv(n.nodeValue):n.nodeValue;this.labelNode=_11f.create("SPAN",{className:"mblListItemLabel"});this.labelNode.appendChild(n);n=this.labelNode;}box.appendChild(n);}}if(!this.labelNode){this.labelNode=_11f.create("SPAN",{className:"mblListItemLabel"},box);}if(this.anchorLabel){box.style.display="inline";}var a=this.anchorNode=_11f.create("A");a.className="mblListItemAnchor";this.domNode.appendChild(a);a.appendChild(box);},startup:function(){if(this._started){return;}this.inheritParams();var _123=this.getParent();if(this.moveTo||this.href||this.url||this.clickable||(_123&&_123.select)){this._onClickHandle=this.connect(this.anchorNode,"onclick","onClick");}this.setArrow();if(_11e.contains(this.domNode,"mblVariableHeight")){this.variableHeight=true;}if(this.variableHeight){_11e.add(this.domNode,"mblVariableHeight");setTimeout(lang.hitch(this,"layoutVariableHeight"));}this.set("icon",this.icon);if(!this.checked&&this.checkClass.indexOf(",")!==-1){this.set("checked",this.checked);}this.inherited(arguments);},resize:function(){if(this.variableHeight){this.layoutVariableHeight();}},onClick:function(e){var a=e.currentTarget;var li=a.parentNode;if(_11e.contains(li,"mblItemSelected")){return;}if(this.anchorLabel){for(var p=e.target;p.tagName!==this.tag.toUpperCase();p=p.parentNode){if(p.className=="mblListItemTextBox"){_11e.add(p,"mblListItemTextBoxSelected");setTimeout(function(){_11e.remove(p,"mblListItemTextBoxSelected");},has("android")?300:1000);this.onAnchorLabelClicked(e);return;}}}var _124=this.getParent();if(_124.select){if(_124.select==="single"){if(!this.checked){this.set("checked",true);}}else{if(_124.select==="multiple"){this.set("checked",!this.checked);}}}this.select();if(this.href&&this.hrefTarget){_120.openWindow(this.href,this.hrefTarget);return;}var _125;if(this.moveTo||this.href||this.url||this.scene){_125={moveTo:this.moveTo,href:this.href,url:this.url,scene:this.scene,transition:this.transition,transitionDir:this.transitionDir};}else{if(this.transitionOptions){_125=this.transitionOptions;}}if(_125){this.setTransitionPos(e);return new _122(this.domNode,_125,e).dispatch();}},select:function(){var _126=this.getParent();if(_126.stateful){_126.deselectAll();}else{var _127=this;setTimeout(function(){_127.deselect();},has("android")?300:1000);}_11e.add(this.domNode,"mblItemSelected");},deselect:function(){_11e.remove(this.domNode,"mblItemSelected");},onAnchorLabelClicked:function(e){},layoutVariableHeight:function(){var h=this.anchorNode.offsetHeight;if(h===this.anchorNodeHeight){return;}this.anchorNodeHeight=h;_11b.forEach([this.rightTextNode,this.rightIcon2Node,this.rightIconNode,this.iconNode],function(n){if(n){var t=Math.round((h-n.offsetHeight)/2);n.style.marginTop=t+"px";}});},setArrow:function(){if(this.checked){return;}var c="";var _128=this.getParent();if(this.moveTo||this.href||this.url||this.clickable){if(!this.noArrow&&!(_128&&_128.stateful)){c=this.arrowClass;}}if(c){this._setRightIconAttr(c);}},_setIconAttr:function(icon){if(!this.getParent()){return;}this.icon=icon;var a=this.anchorNode;if(!this.iconNode){if(icon){var ref=this.rightIconNode||this.rightIcon2Node||this.rightTextNode||this.box;this.iconNode=_11f.create("DIV",{className:"mblListItemIcon"},ref,"before");}}else{_11f.empty(this.iconNode);}if(icon&&icon!=="none"){_120.createIcon(icon,this.iconPos,null,this.alt,this.iconNode);if(this.iconPos){_11e.add(this.iconNode.firstChild,"mblListItemSpriteIcon");}_11e.remove(a,"mblListItemAnchorNoIcon");}else{_11e.add(a,"mblListItemAnchorNoIcon");}},_setCheckedAttr:function(_129){var _12a=this.getParent();if(_12a&&_12a.select==="single"&&_129){_11b.forEach(_12a.getChildren(),function(_12b){_12b.set("checked",false);});}this._setRightIconAttr(this.checkClass);var _12c=this.rightIconNode.childNodes;if(_12c.length===1){this.rightIconNode.style.display=_129?"":"none";}else{_12c[0].style.display=_129?"":"none";_12c[1].style.display=!_129?"":"none";}_11e.toggle(this.domNode,"mblListItemChecked",_129);if(_12a&&this.checked!==_129){_12a.onCheckStateChanged(this,_129);}this.checked=_129;},_setRightTextAttr:function(text){if(!this.rightTextNode){this.rightTextNode=_11f.create("DIV",{className:"mblListItemRightText"},this.box,"before");}this.rightText=text;this.rightTextNode.innerHTML=this._cv?this._cv(text):text;},_setRightIconAttr:function(icon){if(!this.rightIconNode){var ref=this.rightIcon2Node||this.rightTextNode||this.box;this.rightIconNode=_11f.create("DIV",{className:"mblListItemRightIcon"},ref,"before");}else{_11f.empty(this.rightIconNode);}this.rightIcon=icon;var arr=(icon||"").split(/,/);if(arr.length===1){_120.createIcon(icon,null,null,this.rightIconTitle,this.rightIconNode);}else{_120.createIcon(arr[0],null,null,this.rightIconTitle,this.rightIconNode);_120.createIcon(arr[1],null,null,this.rightIconTitle,this.rightIconNode);}},_setRightIcon2Attr:function(icon){if(!this.rightIcon2Node){var ref=this.rightTextNode||this.box;this.rightIcon2Node=_11f.create("DIV",{className:"mblListItemRightIcon2"},ref,"before");}else{_11f.empty(this.rightIcon2Node);}this.rightIcon2=icon;_120.createIcon(icon,null,null,this.rightIcon2Title,this.rightIcon2Node);},_setLabelAttr:function(text){this.label=text;this.labelNode.innerHTML=this._cv?this._cv(text):text;}});});},"dijit/registry":function(){define("dijit/registry",["dojo/_base/array","dojo/_base/sniff","dojo/_base/unload","dojo/_base/window","."],function(_12d,has,_12e,win,_12f){var _130={},hash={};var _131={length:0,add:function(_132){if(hash[_132.id]){throw new Error("Tried to register widget with id=="+_132.id+" but that id is already registered");}hash[_132.id]=_132;this.length++;},remove:function(id){if(hash[id]){delete hash[id];this.length--;}},byId:function(id){return typeof id=="string"?hash[id]:id;},byNode:function(node){return hash[node.getAttribute("widgetId")];},toArray:function(){var ar=[];for(var id in hash){ar.push(hash[id]);}return ar;},getUniqueId:function(_133){var id;do{id=_133+"_"+(_133 in _130?++_130[_133]:_130[_133]=0);}while(hash[id]);return _12f._scopeName=="dijit"?id:_12f._scopeName+"_"+id;},findWidgets:function(root){var _134=[];function _135(root){for(var node=root.firstChild;node;node=node.nextSibling){if(node.nodeType==1){var _136=node.getAttribute("widgetId");if(_136){var _137=hash[_136];if(_137){_134.push(_137);}}else{_135(node);}}}};_135(root);return _134;},_destroyAll:function(){_12f._curFocus=null;_12f._prevFocus=null;_12f._activeStack=[];_12d.forEach(_131.findWidgets(win.body()),function(_138){if(!_138._destroyed){if(_138.destroyRecursive){_138.destroyRecursive();}else{if(_138.destroy){_138.destroy();}}}});},getEnclosingWidget:function(node){while(node){var id=node.getAttribute&&node.getAttribute("widgetId");if(id){return hash[id];}node=node.parentNode;}return null;},_hash:hash};if(has("ie")){_12e.addOnWindowUnload(function(){_131._destroyAll();});}_12f.registry=_131;return _131;});},"dojox/mobile/common":function(){define(["dojo/_base/kernel","dojo/_base/array","dojo/_base/config","dojo/_base/connect","dojo/_base/lang","dojo/_base/window","dojo/dom-class","dojo/dom-construct","dojo/dom-style","dojo/ready","dijit/registry","./sniff","./uacss"],function(dojo,_139,_13a,_13b,lang,win,_13c,_13d,_13e,_13f,_140,has,_141){var dm=lang.getObject("dojox.mobile",true);dm.getScreenSize=function(){return {h:win.global.innerHeight||win.doc.documentElement.clientHeight,w:win.global.innerWidth||win.doc.documentElement.clientWidth};};dm.updateOrient=function(){var dim=dm.getScreenSize();_13c.replace(win.doc.documentElement,dim.h>dim.w?"dj_portrait":"dj_landscape",dim.h>dim.w?"dj_landscape":"dj_portrait");};dm.updateOrient();dm.tabletSize=500;dm.detectScreenSize=function(_142){var dim=dm.getScreenSize();var sz=Math.min(dim.w,dim.h);var from,to;if(sz>=dm.tabletSize&&(_142||(!this._sz||this._sz<dm.tabletSize))){from="phone";to="tablet";}else{if(sz<dm.tabletSize&&(_142||(!this._sz||this._sz>=dm.tabletSize))){from="tablet";to="phone";}}if(to){_13c.replace(win.doc.documentElement,"dj_"+to,"dj_"+from);_13b.publish("/dojox/mobile/screenSize/"+to,[dim]);}this._sz=sz;};dm.detectScreenSize();dm.setupIcon=function(_143,_144){if(_143&&_144){var arr=_139.map(_144.split(/[ ,]/),function(item){return item-0;});var t=arr[0];var r=arr[1]+arr[2];var b=arr[0]+arr[3];var l=arr[1];_13e.set(_143,{clip:"rect("+t+"px "+r+"px "+b+"px "+l+"px)",top:(_143.parentNode?_13e.get(_143,"top"):0)-t+"px",left:-l+"px"});}};dm.hideAddressBarWait=typeof (_13a["mblHideAddressBarWait"])==="number"?_13a["mblHideAddressBarWait"]:1500;dm.hide_1=function(_145){scrollTo(0,1);var h=dm.getScreenSize().h+"px";if(has("android")){if(_145){win.body().style.minHeight=h;}dm.resizeAll();}else{if(_145||dm._h===h&&h!==win.body().style.minHeight){win.body().style.minHeight=h;dm.resizeAll();}}dm._h=h;};dm.hide_fs=function(){var t=win.body().style.minHeight;win.body().style.minHeight=(dm.getScreenSize().h*2)+"px";scrollTo(0,1);setTimeout(function(){dm.hide_1(1);dm._hiding=false;},1000);};dm.hideAddressBar=function(evt){if(dm.disableHideAddressBar||dm._hiding){return;}dm._hiding=true;dm._h=0;win.body().style.minHeight=(dm.getScreenSize().h*2)+"px";setTimeout(dm.hide_1,0);setTimeout(dm.hide_1,200);setTimeout(dm.hide_1,800);setTimeout(dm.hide_fs,dm.hideAddressBarWait);};dm.resizeAll=function(evt,root){if(dm.disableResizeAll){return;}_13b.publish("/dojox/mobile/resizeAll",[evt,root]);dm.updateOrient();dm.detectScreenSize();var _146=function(w){var _147=w.getParent&&w.getParent();return !!((!_147||!_147.resize)&&w.resize);};var _148=function(w){_139.forEach(w.getChildren(),function(_149){if(_146(_149)){_149.resize();}_148(_149);});};if(root){if(root.resize){root.resize();}_148(root);}else{_139.forEach(_139.filter(_140.toArray(),_146),function(w){w.resize();});}};dm.openWindow=function(url,_14a){win.global.open(url,_14a||"_blank");};dm.createDomButton=function(_14b,_14c,_14d){if(!dm._domButtons){if(has("webkit")){var _14e=function(_14f,dic){var i,j;if(!_14f){var dic={};var ss=dojo.doc.styleSheets;for(i=0;i<ss.length;i++){ss[i]&&_14e(ss[i],dic);}return dic;}var _150=_14f.cssRules||[];for(i=0;i<_150.length;i++){var rule=_150[i];if(rule.href&&rule.styleSheet){_14e(rule.styleSheet,dic);}else{if(rule.selectorText){var sels=rule.selectorText.split(/,/);for(j=0;j<sels.length;j++){var sel=sels[j];var n=sel.split(/>/).length-1;if(sel.match(/(mblDomButton\w+)/)){var cls=RegExp.$1;if(!dic[cls]||n>dic[cls]){dic[cls]=n;}}}}}}};dm._domButtons=_14e();}else{dm._domButtons={};}}var s=_14b.className;var node=_14d||_14b;if(s.match(/(mblDomButton\w+)/)&&s.indexOf("/")===-1){var _151=RegExp.$1;var nDiv=4;if(s.match(/(mblDomButton\w+_(\d+))/)){nDiv=RegExp.$2-0;}else{if(dm._domButtons[_151]!==undefined){nDiv=dm._domButtons[_151];}}var _152=null;if(has("bb")&&_13a["mblBBBoxShadowWorkaround"]!==false){_152={style:"-webkit-box-shadow:none"};}for(var i=0,p=node;i<nDiv;i++){p=p.firstChild||_13d.create("DIV",_152,p);}if(_14d){setTimeout(function(){_13c.remove(_14b,_151);},0);_13c.add(_14d,_151);}}else{if(s.indexOf(".")!==-1){_13d.create("IMG",{src:s},node);}else{return null;}}_13c.add(node,"mblDomButton");if(_13a["mblAndroidWorkaround"]!==false&&has("android")>=2.2){_13e.set(node,"webkitTransform","translate3d(0,0,0)");}!!_14c&&_13e.set(node,_14c);return node;};dm.createIcon=function(icon,_153,node,_154,_155){if(icon&&icon.indexOf("mblDomButton")===0){if(node&&node.className.match(/(mblDomButton\w+)/)){_13c.remove(node,RegExp.$1);}else{node=_13d.create("DIV");}node.title=_154;_13c.add(node,icon);dm.createDomButton(node);}else{if(icon&&icon!=="none"){if(!node||node.nodeName!=="IMG"){node=_13d.create("IMG",{alt:_154});}node.src=(icon||"").replace("${theme}",dm.currentTheme);dm.setupIcon(node,_153);if(_155&&_153){var arr=_153.split(/[ ,]/);_13e.set(_155,{width:arr[2]+"px",height:arr[3]+"px"});}}}if(_155){_155.appendChild(node);}return node;};dm._iw=_13a["mblIosWorkaround"]!==false&&has("iphone");if(dm._iw){dm._iwBgCover=_13d.create("div");}if(_13a.parseOnLoad){_13f(90,function(){var _156=win.body().getElementsByTagName("*");var i,len,s;len=_156.length;for(i=0;i<len;i++){s=_156[i].getAttribute("dojoType");if(s){if(_156[i].parentNode.getAttribute("lazy")=="true"){_156[i].setAttribute("__dojoType",s);_156[i].removeAttribute("dojoType");}}}});}_13f(function(){dm.detectScreenSize(true);if(_13a["mblApplyPageStyles"]!==false){_13c.add(win.doc.documentElement,"mobile");}if(has("chrome")){_13c.add(win.doc.documentElement,"dj_chrome");}if(_13a["mblAndroidWorkaround"]!==false&&has("android")>=2.2){if(_13a["mblAndroidWorkaroundButtonStyle"]!==false){_13d.create("style",{innerHTML:"BUTTON,INPUT[type='button'],INPUT[type='submit'],INPUT[type='reset'],INPUT[type='file']::-webkit-file-upload-button{-webkit-appearance:none;}"},win.doc.head,"first");}if(has("android")<3){_13e.set(win.doc.documentElement,"webkitTransform","translate3d(0,0,0)");_13b.connect(null,"onfocus",null,function(e){_13e.set(win.doc.documentElement,"webkitTransform","");});_13b.connect(null,"onblur",null,function(e){_13e.set(win.doc.documentElement,"webkitTransform","translate3d(0,0,0)");});}else{if(_13a["mblAndroid3Workaround"]!==false){_13e.set(win.doc.documentElement,{webkitBackfaceVisibility:"hidden",webkitPerspective:8000});}}}var f=dm.resizeAll;if(_13a["mblHideAddressBar"]!==false&&navigator.appVersion.indexOf("Mobile")!=-1||_13a["mblForceHideAddressBar"]===true){dm.hideAddressBar();if(_13a["mblAlwaysHideAddressBar"]===true){f=dm.hideAddressBar;}}_13b.connect(null,(win.global.onorientationchange!==undefined&&!has("android"))?"onorientationchange":"onresize",null,f);var _157=win.body().getElementsByTagName("*");var i,len=_157.length,s;for(i=0;i<len;i++){s=_157[i].getAttribute("__dojoType");if(s){_157[i].setAttribute("dojoType",s);_157[i].removeAttribute("__dojoType");}}if(dojo.hash){var _158=function(root){if(!root){return [];}var arr=_140.findWidgets(root);var _159=arr;for(var i=0;i<_159.length;i++){arr=arr.concat(_158(_159[i].containerNode));}return arr;};_13b.subscribe("/dojo/hashchange",null,function(_15a){var view=dm.currentView;if(!view){return;}var _15b=dm._params;if(!_15b){var _15c=_15a?_15a:dm._defaultView.id;var _15d=_158(view.domNode);var dir=1,_15e="slide";for(i=0;i<_15d.length;i++){var w=_15d[i];if("#"+_15c==w.moveTo){_15e=w.transition;dir=(w instanceof dm.Heading)?-1:1;break;}}_15b=[_15c,dir,_15e];}view.performTransition.apply(view,_15b);dm._params=null;});}win.body().style.visibility="visible";});_140.getEnclosingWidget=function(node){while(node){var id=node.getAttribute&&node.getAttribute("widgetId");if(id){return _140.byId(id);}node=node._parentNode||node.parentNode;}return null;};return dm;});},"dojox/mobile/uacss":function(){define("dojox/mobile/uacss",["dojo/_base/kernel","dojo/_base/lang","dojo/_base/window","dojox/mobile/sniff"],function(dojo,lang,win,has){win.doc.documentElement.className+=lang.trim([has("bb")?"dj_bb":"",has("android")?"dj_android":"",has("iphone")?"dj_iphone":"",has("ipod")?"dj_ipod":"",has("ipad")?"dj_ipad":""].join(" ").replace(/ +/g," "));return dojo;});},"dojox/mobile/RoundRectCategory":function(){define(["dojo/_base/declare","dojo/_base/window","dijit/_Contained","dijit/_WidgetBase"],function(_15f,win,_160,_161){return _15f("dojox.mobile.RoundRectCategory",[_161,_160],{label:"",buildRendering:function(){this.domNode=this.containerNode=this.srcNodeRef||win.doc.createElement("H2");this.domNode.className="mblRoundRectCategory";if(!this.label){this.label=this.domNode.innerHTML;}},_setLabelAttr:function(_162){this.label=_162;this.domNode.innerHTML=this._cv?this._cv(_162):_162;}});});},"dojox/mobile/ProgressIndicator":function(){define(["dojo/_base/config","dojo/_base/declare","dojo/dom-construct","dojo/dom-style","dojo/has"],function(_163,_164,_165,_166,has){var cls=_164("dojox.mobile.ProgressIndicator",null,{interval:100,colors:["#C0C0C0","#C0C0C0","#C0C0C0","#C0C0C0","#C0C0C0","#C0C0C0","#B8B9B8","#AEAFAE","#A4A5A4","#9A9A9A","#8E8E8E","#838383"],constructor:function(){this._bars=[];this.domNode=_165.create("DIV");this.domNode.className="mblProgContainer";if(_163["mblAndroidWorkaround"]!==false&&has("android")>=2.2&&has("android")<3){_166.set(this.domNode,"webkitTransform","translate3d(0,0,0)");}this.spinnerNode=_165.create("DIV",null,this.domNode);for(var i=0;i<this.colors.length;i++){var div=_165.create("DIV",{className:"mblProg mblProg"+i},this.spinnerNode);this._bars.push(div);}},start:function(){if(this.imageNode){var img=this.imageNode;var l=Math.round((this.domNode.offsetWidth-img.offsetWidth)/2);var t=Math.round((this.domNode.offsetHeight-img.offsetHeight)/2);img.style.margin=t+"px "+l+"px";return;}var cntr=0;var _167=this;var n=this.colors.length;this.timer=setInterval(function(){cntr--;cntr=cntr<0?n-1:cntr;var c=_167.colors;for(var i=0;i<n;i++){var idx=(cntr+i)%n;_167._bars[i].style.backgroundColor=c[idx];}},this.interval);},stop:function(){if(this.timer){clearInterval(this.timer);}this.timer=null;if(this.domNode.parentNode){this.domNode.parentNode.removeChild(this.domNode);}},setImage:function(file){if(file){this.imageNode=_165.create("IMG",{src:file},this.domNode);this.spinnerNode.style.display="none";}else{if(this.imageNode){this.domNode.removeChild(this.imageNode);this.imageNode=null;}this.spinnerNode.style.display="";}}});cls._instance=null;cls.getInstance=function(){if(!cls._instance){cls._instance=new cls();}return cls._instance;};return cls;});},"dojox/mobile/EdgeToEdgeList":function(){define(["dojo/_base/declare","./RoundRectList"],function(_168,_169){return _168("dojox.mobile.EdgeToEdgeList",_169,{buildRendering:function(){this.inherited(arguments);this.domNode.className="mblEdgeToEdgeList";}});});},"dojox/mobile/EdgeToEdgeCategory":function(){define(["dojo/_base/declare","./RoundRectCategory"],function(_16a,_16b){return _16a("dojox.mobile.EdgeToEdgeCategory",_16b,{buildRendering:function(){this.inherited(arguments);this.domNode.className="mblEdgeToEdgeCategory";}});});},"dojox/mobile/ToolBarButton":function(){define(["dojo/_base/declare","dojo/_base/window","dojo/dom-class","dojo/dom-construct","dojo/dom-style","./common","./_ItemBase"],function(_16c,win,_16d,_16e,_16f,_170,_171){return _16c("dojox.mobile.ToolBarButton",_171,{selected:false,btnClass:"",_defaultColor:"mblColorDefault",_selColor:"mblColorDefaultSel",buildRendering:function(){this.domNode=this.containerNode=this.srcNodeRef||win.doc.createElement("div");this.inheritParams();_16d.add(this.domNode,"mblToolBarButton mblArrowButtonText");var _172;if(this.selected){_172=this._selColor;}else{if(this.domNode.className.indexOf("mblColor")==-1){_172=this._defaultColor;}}_16d.add(this.domNode,_172);if(!this.label){this.label=this.domNode.innerHTML;}if(this.icon&&this.icon!="none"){this.iconNode=_16e.create("div",{className:"mblToolBarButtonIcon"},this.domNode);_170.createIcon(this.icon,this.iconPos,null,this.alt,this.iconNode);if(this.iconPos){_16d.add(this.iconNode.firstChild,"mblToolBarButtonSpriteIcon");}}else{if(_170.createDomButton(this.domNode)){_16d.add(this.domNode,"mblToolBarButtonDomButton");}else{_16d.add(this.domNode,"mblToolBarButtonText");}}this.connect(this.domNode,"onclick","onClick");},select:function(){_16d.toggle(this.domNode,this._selColor,!arguments[0]);this.selected=!arguments[0];},deselect:function(){this.select(true);},onClick:function(e){this.setTransitionPos(e);this.defaultClickAction();},_setBtnClassAttr:function(_173){var node=this.domNode;if(node.className.match(/(mblDomButton\w+)/)){_16d.remove(node,RegExp.$1);}_16d.add(node,_173);if(_170.createDomButton(this.domNode)){_16d.add(this.domNode,"mblToolBarButtonDomButton");}},_setLabelAttr:function(text){this.label=text;this.domNode.innerHTML=this._cv?this._cv(text):text;}});});},"dojox/mobile/_ItemBase":function(){define("dojox/mobile/_ItemBase",["dojo/_base/kernel","dojo/_base/config","dojo/_base/declare","dijit/registry","dijit/_Contained","dijit/_Container","dijit/_WidgetBase","./TransitionEvent","./View"],function(_174,_175,_176,_177,_178,_179,_17a,_17b,View){return _176("dojox.mobile._ItemBase",[_17a,_179,_178],{icon:"",iconPos:"",alt:"",href:"",hrefTarget:"",moveTo:"",scene:"",clickable:false,url:"",urlTarget:"",transition:"",transitionDir:1,transitionOptions:null,callback:null,sync:true,label:"",toggle:false,_duration:800,inheritParams:function(){var _17c=this.getParent();if(_17c){if(!this.transition){this.transition=_17c.transition;}if(this.icon&&_17c.iconBase&&_17c.iconBase.charAt(_17c.iconBase.length-1)==="/"){this.icon=_17c.iconBase+this.icon;}if(!this.icon){this.icon=_17c.iconBase;}if(!this.iconPos){this.iconPos=_17c.iconPos;}}},select:function(){},deselect:function(){},defaultClickAction:function(e){if(this.toggle){if(this.selected){this.deselect();}else{this.select();}}else{if(!this.selected){this.select();if(!this.selectOne){var _17d=this;setTimeout(function(){_17d.deselect();},this._duration);}var _17e;if(this.moveTo||this.href||this.url||this.scene){_17e={moveTo:this.moveTo,href:this.href,url:this.url,scene:this.scene,transition:this.transition,transitionDir:this.transitionDir};}else{if(this.transitionOptions){_17e=this.transitionOptions;}}if(_17e){return new _17b(this.domNode,_17e,e).dispatch();}}}},getParent:function(){var ref=this.srcNodeRef||this.domNode;return ref&&ref.parentNode?_177.getEnclosingWidget(ref.parentNode):null;},setTransitionPos:function(e){var w=this;while(true){w=w.getParent();if(!w||w instanceof View){break;}}if(w){w.clickedPosX=e.clientX;w.clickedPosY=e.clientY;}},transitionTo:function(_17f,href,url,_180){if(_175.isDebug){var _181=arguments.callee._ach||(arguments.callee._ach={}),_182=(arguments.callee.caller||"unknown caller").toString();if(!_181[_182]){_174.deprecated(this.declaredClass+"::transitionTo() is deprecated."+_182,"","2.0");_181[_182]=true;}}new _17b(this.domNode,{moveTo:_17f,href:href,url:url,scene:_180,transition:this.transition,transitionDir:this.transitionDir}).dispatch();}});});},"dijit/_Contained":function(){define("dijit/_Contained",["dojo/_base/declare","./registry"],function(_183,_184){return _183("dijit._Contained",null,{_getSibling:function(_185){var node=this.domNode;do{node=node[_185+"Sibling"];}while(node&&node.nodeType!=1);return node&&_184.byNode(node);},getPreviousSibling:function(){return this._getSibling("previous");},getNextSibling:function(){return this._getSibling("next");},getIndexInParent:function(){var p=this.getParent();if(!p||!p.getIndexOfChild){return -1;}return p.getIndexOfChild(this);}});});},"dojox/mobile/_base":function(){define(["./common","./View","./Heading","./RoundRect","./RoundRectCategory","./EdgeToEdgeCategory","./RoundRectList","./EdgeToEdgeList","./ListItem","./Switch","./ToolBarButton","./ProgressIndicator"],function(_186,View,_187,_188,_189,_18a,_18b,_18c,_18d,_18e,_18f,_190){return _186;});},"dijit/main":function(){define("dijit/main",["dojo/_base/kernel"],function(dojo){return dojo.dijit;});},"dijit/_Container":function(){define("dijit/_Container",["dojo/_base/array","dojo/_base/declare","dojo/dom-construct","./registry"],function(_191,_192,_193,_194){return _192("dijit._Container",null,{buildRendering:function(){this.inherited(arguments);if(!this.containerNode){this.containerNode=this.domNode;}},addChild:function(_195,_196){var _197=this.containerNode;if(_196&&typeof _196=="number"){var _198=this.getChildren();if(_198&&_198.length>=_196){_197=_198[_196-1].domNode;_196="after";}}_193.place(_195.domNode,_197,_196);if(this._started&&!_195._started){_195.startup();}},removeChild:function(_199){if(typeof _199=="number"){_199=this.getChildren()[_199];}if(_199){var node=_199.domNode;if(node&&node.parentNode){node.parentNode.removeChild(node);}}},hasChildren:function(){return this.getChildren().length>0;},_getSiblingOfChild:function(_19a,dir){var node=_19a.domNode,_19b=(dir>0?"nextSibling":"previousSibling");do{node=node[_19b];}while(node&&(node.nodeType!=1||!_194.byNode(node)));return node&&_194.byNode(node);},getIndexOfChild:function(_19c){return _191.indexOf(this.getChildren(),_19c);}});});}}});require(["dojo/i18n"],function(i18n){i18n._preloadLocalizations("dojox/nls/mobile",[]);});define("dojox/mobile",[".","dojo/_base/lang","dojox/mobile/_base"],function(_19d,lang,base){lang.getObject("mobile",true,_19d);return _19d.mobile;}); | PypiClean |
/Corrfunc-2.5.1.tar.gz/Corrfunc-2.5.1/docs/source/modules/weighted_correlations.rst | .. _weighted_correlations:
Computing Weighted Correlation Functions
========================================
Every clustering statistic in ``Corrfunc`` accepts an array
of weights that can be used to compute weighted correlation
functions. The API reference for each clustering statistic
(:py:mod:`Corrfunc.theory.xi`, :py:mod:`Corrfunc.mocks.DDrppi_mocks`,
etc.) contains examples of how to do this. The interface is standard across functions: the
inputs are a ``weights`` array and a ``weight_type`` string
that specifies how to use the "point weights" to compute a "pair weight".
Currently, the only supported ``weight_type`` is ``pair_product``,
in which the pair weight is the product of the point weights
(but see :ref:`custom_weighting` for how to write your own
function).
.. warning::
The computation of the weighted result is susceptible to loss of floating
point precision, especially in single precision. If you are using single
precision, make sure you test double precision as well (by casting all
pos and weight input arrays to type ``np.float64``, for example)
and check that the difference with the single-precision result
is acceptable.
If ``weight_type`` and ``weights`` (or ``weights1`` and ``weights2``
for cross-correlations) are given, the mean pair weight in a
separation bin will be given in the ``weightavg`` field of the
output. This field is 0.0 if weights are disabled.
Pair counts (i.e. the ``npairs`` field in the ``results`` array)
are never affected by weights. For theory functions like
:py:mod:`Corrfunc.theory.xi` and :py:mod:`Corrfunc.theory.wp`
that actually return a clustering statistic, the statistic is weighted.
For ``pair_product``, the distribution used to compute the
expected bin weight from an unclustered particle set (the ``RR`` term)
is taken to be a spatially uniform particle set where every particle
has the mean weight. See :ref:`weighted_rr` for more discussion.
Running with weights incurrs a modest performance hit (around
20%, similar to enabling ``ravg``). Weights are supported for
all instruction sets (SSE, AVX, and fallback).
Consider the following simple example adapted from the :py:mod:`Corrfunc.theory.xi`
docstring, in which we assign a weight of 0.5 to every particle and get
the expected average pair weight of 0.25 (last column of the output).
Note that ``xi`` (fourth column) is also weighted, but the case of uniform
weights is equivalent to the unweighted case.
::
>>> from __future__ import print_function
>>> import numpy as np
>>> from os.path import dirname, abspath, join as pjoin
>>> import Corrfunc
>>> from Corrfunc.theory.xi import xi
>>> binfile = pjoin(dirname(abspath(Corrfunc.__file__)),
... "../theory/tests/", "bins")
>>> N = 100000
>>> boxsize = 420.0
>>> nthreads = 4
>>> seed = 42
>>> np.random.seed(seed)
>>> X = np.random.uniform(0, boxsize, N)
>>> Y = np.random.uniform(0, boxsize, N)
>>> Z = np.random.uniform(0, boxsize, N)
>>> weights = np.full_like(X, 0.5)
>>> results = xi(boxsize, nthreads, binfile, X, Y, Z, weights=weights, weight_type='pair_product', output_ravg=True)
>>> for r in results: print("{0:10.6f} {1:10.6f} {2:10.6f} {3:10.6f} {4:10d} {5:10.6f}"
... .format(r['rmin'], r['rmax'],
... r['ravg'], r['xi'], r['npairs'], r['weightavg']))
... # doctest: +NORMALIZE_WHITESPACE
0.167536 0.238755 0.226592 -0.205733 4 0.250000
0.238755 0.340251 0.289277 -0.176729 12 0.250000
0.340251 0.484892 0.426819 -0.051829 40 0.250000
0.484892 0.691021 0.596187 -0.131853 106 0.250000
0.691021 0.984777 0.850100 -0.049207 336 0.250000
0.984777 1.403410 1.225112 0.028543 1052 0.250000
1.403410 2.000000 1.737153 0.011403 2994 0.250000
2.000000 2.850200 2.474588 0.005405 8614 0.250000
2.850200 4.061840 3.532018 -0.014098 24448 0.250000
4.061840 5.788530 5.022241 -0.010784 70996 0.250000
5.788530 8.249250 7.160648 -0.001588 207392 0.250000
8.249250 11.756000 10.207213 -0.000323 601002 0.250000
11.756000 16.753600 14.541171 0.000007 1740084 0.250000
16.753600 23.875500 20.728773 -0.001595 5028058 0.250000
| PypiClean |
/OASYS1-ShadowOui-1.5.210.tar.gz/OASYS1-ShadowOui-1.5.210/orangecontrib/shadow/widgets/preprocessor/dabam_height_profile.py | import os, sys
from PyQt5.QtWidgets import QApplication
import orangecanvas.resources as resources
from oasys.widgets.abstract.error_profile.abstract_dabam_height_profile import OWAbstractDabamHeightProfile
from Shadow import ShadowTools as ST
from orangecontrib.shadow.util.shadow_objects import ShadowPreProcessorData
class OWdabam_height_profile(OWAbstractDabamHeightProfile):
name = "DABAM Height Profile"
id = "dabam_height_profile"
description = "Calculation of mirror surface error profile"
icon = "icons/dabam.png"
author = "Luca Rebuffi"
maintainer_email = "srio@esrf.eu; lrebuffi@anl.gov"
priority = 6
category = ""
keywords = ["dabam_height_profile"]
outputs = [OWAbstractDabamHeightProfile.get_dabam_output(),
{"name": "PreProcessor_Data",
"type": ShadowPreProcessorData,
"doc": "PreProcessor Data",
"id": "PreProcessor_Data"}]
usage_path = os.path.join(resources.package_dirname("orangecontrib.shadow.widgets.gui"), "misc", "dabam_height_profile_usage.png")
def __init__(self):
super().__init__()
def after_change_workspace_units(self):
self.si_to_user_units = 1 / self.workspace_units_to_m
self.horHeaders = ["Entry", "Shape", "Length\n[" + self.get_axis_um() + "]", "Heights St.Dev.\n[nm]", "Slopes St.Dev.\n[" + u"\u03BC" + "rad]"]
self.table.setHorizontalHeaderLabels(self.horHeaders)
self.plot_canvas[0].setGraphXLabel("Y [" + self.get_axis_um() + "]")
self.plot_canvas[1].setGraphXLabel("Y [" + self.get_axis_um() + "]")
self.axis.set_xlabel("X [" + self.get_axis_um() + "]")
self.axis.set_ylabel("Y [" + self.get_axis_um() + "]")
label = self.le_dimension_y_from.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_dimension_y_to.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_dimension_x.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_step_x.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_new_length_1.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_new_length_2.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
if not self.heigth_profile_file_name is None:
if self.heigth_profile_file_name.endswith("hdf5"):
self.heigth_profile_file_name = self.heigth_profile_file_name[:-4] + "dat"
def get_usage_path(self):
return self.usage_path
def get_axis_um(self):
return self.workspace_units_label
def write_error_profile_file(self):
ST.write_shadow_surface(self.zz, self.xx, self.yy, self.heigth_profile_file_name)
def send_data(self, dimension_x, dimension_y):
self.send("PreProcessor_Data", ShadowPreProcessorData(error_profile_data_file=self.heigth_profile_file_name,
error_profile_x_dim=dimension_x,
error_profile_y_dim=dimension_y))
'''
import os, sys
import time
import numpy
import threading
from PyQt5.QtCore import QRect, Qt
from PyQt5.QtWidgets import QApplication, QMessageBox, QScrollArea, QTableWidget, QTableWidgetItem, QHeaderView, QAbstractItemView, QWidget, QLabel, QSizePolicy
from PyQt5.QtGui import QTextCursor,QFont, QPalette, QColor, QPainter, QBrush, QPen, QPixmap
from matplotlib import cm
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.figure import Figure
import orangecanvas.resources as resources
from orangewidget import gui, widget
from orangewidget.settings import Setting
from oasys.widgets.widget import OWWidget
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.widgets.gui import ConfirmDialog
from oasys.util.oasys_util import EmittingStream
try:
from mpl_toolkits.mplot3d import Axes3D # necessario per caricare i plot 3D
except:
pass
from orangecontrib.shadow.util.shadow_objects import ShadowPreProcessorData
from srxraylib.metrology import profiles_simulation, dabam
from Shadow import ShadowTools as ST
class OWdabam_height_profile(OWWidget):
name = "DABAM Height Profile"
id = "dabam_height_profile"
description = "Calculation of mirror surface error profile"
icon = "icons/dabam.png"
author = "Luca Rebuffi"
maintainer_email = "srio@esrf.eu; lrebuffi@anl.gov"
priority = 6
category = ""
keywords = ["dabam_height_profile"]
outputs = [{"name": "PreProcessor_Data",
"type": ShadowPreProcessorData,
"doc": "PreProcessor Data",
"id": "PreProcessor_Data"}]
want_main_area = 1
want_control_area = 1
MAX_WIDTH = 1320
MAX_HEIGHT = 700
IMAGE_WIDTH = 860
IMAGE_HEIGHT = 645
CONTROL_AREA_WIDTH = 405
TABS_AREA_HEIGHT = 618
xx = None
yy = None
zz = None
entry_number = Setting(1)
shape=Setting(0)
slope_error_from = Setting(0.0)
slope_error_to = Setting(1.5)
dimension_y_from = Setting(0.0)
dimension_y_to = Setting(200.0)
use_undetrended = Setting(0)
step_x = Setting(1.0)
dimension_x = Setting(10.0)
center_y = Setting(1)
modify_y = Setting(0)
new_length = Setting(200.0)
filler_value = Setting(0.0)
renormalize_y = Setting(1)
error_type_y = Setting(0)
rms_y = Setting(0.9)
dabam_profile_index = Setting(1)
heigth_profile_file_name = Setting('mirror.dat')
tab=[]
usage_path = os.path.join(resources.package_dirname("orangecontrib.shadow.widgets.gui"), "misc", "dabam_height_profile_usage.png")
def __init__(self):
super().__init__()
self.runaction = widget.OWAction("Calculate Height Profile", self)
self.runaction.triggered.connect(self.calculate_heigth_profile_ni)
self.addAction(self.runaction)
self.runaction = widget.OWAction("Generate Height Profile File", self)
self.runaction.triggered.connect(self.generate_heigth_profile_file_ni)
self.addAction(self.runaction)
geom = QApplication.desktop().availableGeometry()
self.setGeometry(QRect(round(geom.width() * 0.05),
round(geom.height() * 0.05),
round(min(geom.width() * 0.98, self.MAX_WIDTH)),
round(min(geom.height() * 0.95, self.MAX_HEIGHT))))
self.setMaximumHeight(self.geometry().height())
self.setMaximumWidth(self.geometry().width())
# DABAM INITIALIZATION
self.server = dabam.dabam()
self.server.set_input_silent(True)
gui.separator(self.controlArea)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Calculate Height\nProfile", callback=self.calculate_heigth_profile)
button.setFixedHeight(45)
button = gui.button(button_box, self, "Generate Height\nProfile File", callback=self.generate_heigth_profile_file)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button.setFixedWidth(150)
button = gui.button(button_box, self, "Reset Fields", callback=self.call_reset_settings)
font = QFont(button.font())
font.setItalic(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Red'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
gui.separator(self.controlArea)
tabs_setting = oasysgui.tabWidget(self.controlArea)
tabs_setting.setFixedHeight(self.TABS_AREA_HEIGHT)
tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)
tab_input = oasysgui.createTabPage(tabs_setting, "DABAM Search Setting")
tab_gener = oasysgui.createTabPage(tabs_setting, "DABAM Generation Setting")
tab_out = oasysgui.createTabPage(tabs_setting, "Output")
tab_usa = oasysgui.createTabPage(tabs_setting, "Use of the Widget")
tab_usa.setStyleSheet("background-color: white;")
usage_box = oasysgui.widgetBox(tab_usa, "", addSpace=True, orientation="horizontal")
label = QLabel("")
label.setAlignment(Qt.AlignCenter)
label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
label.setPixmap(QPixmap(self.usage_path))
usage_box.layout().addWidget(label)
manual_box = oasysgui.widgetBox(tab_input, "Manual Entry", addSpace=True, orientation="vertical")
oasysgui.lineEdit(manual_box, self, "entry_number", "Entry Number",
labelWidth=300, valueType=int, orientation="horizontal")
gui.separator(manual_box)
button = gui.button(manual_box, self, "Retrieve Profile", callback=self.retrieve_profile)
button.setFixedHeight(35)
button.setFixedWidth(self.CONTROL_AREA_WIDTH-35)
input_box = oasysgui.widgetBox(tab_input, "Search Parameters", addSpace=True, orientation="vertical")
gui.comboBox(input_box, self, "shape", label="Mirror Shape", labelWidth=300,
items=["All", "Plane", "Cylindrical", "Elliptical", "Toroidal", "Spherical"],
sendSelectedValue=False, orientation="horizontal")
gui.separator(input_box)
input_box_1 = oasysgui.widgetBox(input_box, "", addSpace=True, orientation="horizontal")
oasysgui.lineEdit(input_box_1, self, "slope_error_from", "Slope Error From (" + u"\u03BC" + "rad)",
labelWidth=150, valueType=float, orientation="horizontal")
oasysgui.lineEdit(input_box_1, self, "slope_error_to", "To (" + u"\u03BC" + "rad)",
labelWidth=60, valueType=float, orientation="horizontal")
input_box_2 = oasysgui.widgetBox(input_box, "", addSpace=True, orientation="horizontal")
self.le_dimension_y_from = oasysgui.lineEdit(input_box_2, self, "dimension_y_from", "Mirror Length From",
labelWidth=150, valueType=float, orientation="horizontal")
self.le_dimension_y_to = oasysgui.lineEdit(input_box_2, self, "dimension_y_to", "To",
labelWidth=60, valueType=float, orientation="horizontal")
table_box = oasysgui.widgetBox(tab_input, "Search Results", addSpace=True, orientation="vertical", height=250)
self.overlay_search = Overlay(table_box, self.search_profiles)
self.overlay_search.hide()
button = gui.button(input_box, self, "Search", callback=self.overlay_search.show)
button.setFixedHeight(35)
button.setFixedWidth(self.CONTROL_AREA_WIDTH-35)
gui.comboBox(table_box, self, "use_undetrended", label="Use Undetrended Profile", labelWidth=300,
items=["No", "Yes"], callback=self.table_item_clicked, sendSelectedValue=False, orientation="horizontal")
gui.separator(table_box)
self.scrollarea = QScrollArea()
self.scrollarea.setMinimumWidth(self.CONTROL_AREA_WIDTH-35)
table_box.layout().addWidget(self.scrollarea, alignment=Qt.AlignHCenter)
self.table = QTableWidget(1, 5)
self.table.setStyleSheet("background-color: #FBFBFB;")
self.table.setAlternatingRowColors(True)
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Fixed)
self.table.verticalHeader().setVisible(False)
self.table.setColumnWidth(0, 40)
self.table.setColumnWidth(1, 70)
self.table.setColumnWidth(2, 70)
self.table.setColumnWidth(3, 85)
self.table.setColumnWidth(4, 80)
self.table.resizeRowsToContents()
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.itemClicked.connect(self.table_item_clicked)
self.scrollarea.setWidget(self.table)
self.scrollarea.setWidgetResizable(1)
output_profile_box = oasysgui.widgetBox(tab_gener, "Surface Generation Parameters", addSpace=True, orientation="vertical", height=320)
self.le_dimension_x = oasysgui.lineEdit(output_profile_box, self, "dimension_x", "Width",
labelWidth=300, valueType=float, orientation="horizontal")
self.le_step_x = oasysgui.lineEdit(output_profile_box, self, "step_x", "Step Width",
labelWidth=300, valueType=float, orientation="horizontal")
gui.comboBox(output_profile_box, self, "center_y", label="Center Profile in the middle of O.E.", labelWidth=300,
items=["No", "Yes"], sendSelectedValue=False, orientation="horizontal")
gui.separator(output_profile_box)
gui.comboBox(output_profile_box, self, "modify_y", label="Modify Length?", labelWidth=150,
items=["No", "Rescale to new length", "Fit to new length (fill or cut)"], callback=self.set_ModifyY, sendSelectedValue=False, orientation="horizontal")
self.modify_box_1 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
self.modify_box_2 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
self.le_new_length_1 = oasysgui.lineEdit(self.modify_box_2, self, "new_length", "New Length", labelWidth=300, valueType=float, orientation="horizontal")
self.modify_box_3 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
self.le_new_length_2 = oasysgui.lineEdit(self.modify_box_3, self, "new_length", "New Length", labelWidth=300, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.modify_box_3, self, "filler_value", "Filler Value (if new length > profile length) [nm]", labelWidth=300, valueType=float, orientation="horizontal")
self.set_ModifyY()
gui.comboBox(output_profile_box, self, "renormalize_y", label="Renormalize Length Profile to different RMS", labelWidth=300,
items=["No", "Yes"], callback=self.set_RenormalizeY, sendSelectedValue=False, orientation="horizontal")
self.output_profile_box_1 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
self.output_profile_box_2 = oasysgui.widgetBox(output_profile_box, "", addSpace=False, orientation="vertical", height=60)
gui.comboBox(self.output_profile_box_1, self, "error_type_y", label="Normalization to", labelWidth=270,
items=["Figure Error (nm)", "Slope Error (" + u"\u03BC" + "rad)"],
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(self.output_profile_box_1, self, "rms_y", "Rms Value",
labelWidth=300, valueType=float, orientation="horizontal")
self.set_RenormalizeY()
output_box = oasysgui.widgetBox(tab_gener, "Outputs", addSpace=True, orientation="vertical")
select_file_box = oasysgui.widgetBox(output_box, "", addSpace=True, orientation="horizontal")
self.le_heigth_profile_file_name = oasysgui.lineEdit(select_file_box, self, "heigth_profile_file_name", "Output File Name",
labelWidth=120, valueType=str, orientation="horizontal")
gui.button(select_file_box, self, "...", callback=self.selectFile)
self.shadow_output = oasysgui.textArea(height=400)
out_box = oasysgui.widgetBox(tab_out, "System Output", addSpace=True, orientation="horizontal", height=500)
out_box.layout().addWidget(self.shadow_output)
gui.rubber(self.controlArea)
self.initializeTabs()
gui.rubber(self.mainArea)
self.overlay_search.raise_()
def resizeEvent(self, event):
self.overlay_search.resize(self.CONTROL_AREA_WIDTH - 15, 290)
event.accept()
def after_change_workspace_units(self):
self.si_to_user_units = 1e2 / self.workspace_units_to_cm
self.horHeaders = ["Entry", "Shape", "Length\n[" + self.workspace_units_label + "]", "Heights St.Dev.\n[nm]", "Slopes St.Dev.\n[" + u"\u03BC" + "rad]"]
self.table.setHorizontalHeaderLabels(self.horHeaders)
self.plot_canvas[0].setGraphXLabel("Y [" + self.workspace_units_label + "]")
self.plot_canvas[1].setGraphXLabel("Y [" + self.workspace_units_label + "]")
self.axis.set_xlabel("X [" + self.workspace_units_label + "]")
self.axis.set_ylabel("Y [" + self.workspace_units_label + "]")
label = self.le_dimension_y_from.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_dimension_y_to.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_dimension_x.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_step_x.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_new_length_1.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_new_length_2.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
def initializeTabs(self):
self.tabs = oasysgui.tabWidget(self.mainArea)
self.tab = [oasysgui.createTabPage(self.tabs, "Info"),
oasysgui.createTabPage(self.tabs, "Heights Profile"),
oasysgui.createTabPage(self.tabs, "Slopes Profile"),
oasysgui.createTabPage(self.tabs, "PSD Heights"),
oasysgui.createTabPage(self.tabs, "CSD Heights"),
oasysgui.createTabPage(self.tabs, "ACF"),
oasysgui.createTabPage(self.tabs, "Generated 2D Profile"),
]
for tab in self.tab:
tab.setFixedHeight(self.IMAGE_HEIGHT)
tab.setFixedWidth(self.IMAGE_WIDTH)
self.plot_canvas = [None, None, None, None, None, None]
self.plot_canvas[0] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[0].setDefaultPlotLines(True)
self.plot_canvas[0].setActiveCurveColor(color='blue')
self.plot_canvas[0].setGraphYLabel("Z [nm]")
self.plot_canvas[0].setGraphTitle("Heights Profile")
self.plot_canvas[0].setInteractiveMode(mode='zoom')
self.plot_canvas[1] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[1].setDefaultPlotLines(True)
self.plot_canvas[1].setActiveCurveColor(color='blue')
self.plot_canvas[1].setGraphYLabel("Zp [$\mu$rad]")
self.plot_canvas[1].setGraphTitle("Slopes Profile")
self.plot_canvas[1].setInteractiveMode(mode='zoom')
self.plot_canvas[2] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[2].setDefaultPlotLines(True)
self.plot_canvas[2].setActiveCurveColor(color='blue')
self.plot_canvas[2].setGraphXLabel("f [m^-1]")
self.plot_canvas[2].setGraphYLabel("PSD [m^3]")
self.plot_canvas[2].setGraphTitle("Power Spectral Density of Heights Profile")
self.plot_canvas[2].setInteractiveMode(mode='zoom')
self.plot_canvas[2].setXAxisLogarithmic(True)
self.plot_canvas[2].setYAxisLogarithmic(True)
self.plot_canvas[3] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[3].setDefaultPlotLines(True)
self.plot_canvas[3].setActiveCurveColor(color='blue')
self.plot_canvas[3].setGraphXLabel("f [m^-1]")
self.plot_canvas[3].setGraphYLabel("CSD [m^3]")
self.plot_canvas[3].setGraphTitle("Cumulative Spectral Density of Heights Profile")
self.plot_canvas[3].setInteractiveMode(mode='zoom')
self.plot_canvas[3].setXAxisLogarithmic(True)
self.plot_canvas[4] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[4].setDefaultPlotLines(True)
self.plot_canvas[4].setActiveCurveColor(color='blue')
self.plot_canvas[4].setGraphXLabel("Length [m]")
self.plot_canvas[4].setGraphYLabel("ACF")
self.plot_canvas[4].setGraphTitle("Autocovariance Function of Heights Profile")
self.plot_canvas[4].setInteractiveMode(mode='zoom')
self.figure = Figure(figsize=(self.IMAGE_HEIGHT, self.IMAGE_HEIGHT)) # QUADRATA!
self.figure.patch.set_facecolor('white')
self.axis = self.figure.add_subplot(111, projection='3d')
self.axis.set_zlabel("Z [nm]")
self.plot_canvas[5] = FigureCanvasQTAgg(self.figure)
self.profileInfo = oasysgui.textArea(height=self.IMAGE_HEIGHT-5, width=400)
profile_box = oasysgui.widgetBox(self.tab[0], "", addSpace=True, orientation="horizontal", height = self.IMAGE_HEIGHT, width=410)
profile_box.layout().addWidget(self.profileInfo)
for index in range(0, 6):
self.tab[index+1].layout().addWidget(self.plot_canvas[index])
self.tabs.setCurrentIndex(1)
def plot_dabam_graph(self, plot_canvas_index, curve_name, x_values, y_values, xtitle, ytitle, color='blue', replace=True):
self.plot_canvas[plot_canvas_index].addCurve(x_values, y_values, curve_name, symbol='', color=color, replace=replace) #'+', '^', ','
self.plot_canvas[plot_canvas_index].setGraphXLabel(xtitle)
self.plot_canvas[plot_canvas_index].setGraphYLabel(ytitle)
self.plot_canvas[plot_canvas_index].replot()
def set_ModifyY(self):
self.modify_box_1.setVisible(self.modify_y == 0)
self.modify_box_2.setVisible(self.modify_y == 1)
self.modify_box_3.setVisible(self.modify_y == 2)
def set_RenormalizeY(self):
self.output_profile_box_1.setVisible(self.renormalize_y==1)
self.output_profile_box_2.setVisible(self.renormalize_y==0)
def table_item_clicked(self):
if self.table.selectionModel().hasSelection():
if not self.table.rowCount() == 0:
if not self.table.item(0, 0) is None:
row = self.table.selectionModel().selectedRows()[0].row()
self.entry_number = int(self.table.item(row, 0).text())
self.retrieve_profile()
def retrieve_profile(self):
try:
if self.entry_number is None or self.entry_number <= 0:
raise Exception("Entry number should be a strictly positive integer number")
self.server.load(self.entry_number)
self.profileInfo.setText(self.server.info_profiles())
self.plot_canvas[0].setGraphTitle(
"Heights Profile. St.Dev.=%.3f nm" % (self.server.stdev_profile_heights() * 1e9))
self.plot_canvas[1].setGraphTitle(
"Slopes Profile. St.Dev.=%.3f $\mu$rad" % (self.server.stdev_profile_slopes() * 1e6))
if self.use_undetrended == 0:
self.plot_dabam_graph(0, "heights_profile", self.si_to_user_units * self.server.y,
1e9 * self.server.zHeights, "Y [" + self.workspace_units_label + "]", "Z [nm]")
self.plot_dabam_graph(1, "slopes_profile", self.si_to_user_units * self.server.y, 1e6 * self.server.zSlopes,
"Y [" + self.workspace_units_label + "]", "Zp [$\mu$rad]")
else:
self.plot_dabam_graph(0, "heights_profile", self.si_to_user_units * self.server.y,
1e9 * self.server.zHeightsUndetrended, "Y [" + self.workspace_units_label + "]",
"Z [nm]")
self.plot_dabam_graph(1, "slopes_profile", self.si_to_user_units * self.server.y,
1e6 * self.server.zSlopesUndetrended, "Y [" + self.workspace_units_label + "]",
"Zp [$\mu$rad]")
y = self.server.f ** (self.server.powerlaw["hgt_pendent"]) * 10 ** self.server.powerlaw["hgt_shift"]
i0 = self.server.powerlaw["index_from"]
i1 = self.server.powerlaw["index_to"]
beta = -self.server.powerlaw["hgt_pendent"]
self.plot_canvas[2].setGraphTitle(
"Power Spectral Density of Heights Profile (beta=%.2f,Df=%.2f)" % (beta, (5 - beta) / 2))
self.plot_dabam_graph(2, "psd_heights_2", self.server.f, self.server.psdHeights, "f [m^-1]", "PSD [m^3]")
self.plot_dabam_graph(2, "psd_heights_1", self.server.f, y, "f [m^-1]", "PSD [m^3]", color='green',
replace=False)
self.plot_dabam_graph(2, "psd_heights_3", self.server.f[i0:i1], y[i0:i1], "f [m^-1]", "PSD [m^3]", color='red',
replace=False)
self.plot_dabam_graph(3, "csd", self.server.f, self.server.csd_heights(), "f [m^-1]", "CSD [m^3]")
c1, c2, c3 = dabam.autocorrelationfunction(self.server.y, self.server.zHeights)
self.plot_canvas[4].setGraphTitle(
"Autocovariance Function of Heights Profile.\nAutocorrelation Length (ACF=0.5)=%.3f m" % (c3))
self.plot_dabam_graph(4, "acf", c1[0:-1], c2, "Length [m]", "Heights Autocovariance")
# surface error removal
if not self.zz is None and not self.yy is None and not self.xx is None:
self.xx = None
self.yy = None
self.zz = None
self.axis.set_title("")
self.axis.clear()
self.plot_canvas[5].draw()
if (self.tabs.currentIndex()==6): self.tabs.setCurrentIndex(1)
except Exception as exception:
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def search_profiles(self):
try:
self.table.itemClicked.disconnect(self.table_item_clicked)
self.table.clear()
row_count = self.table.rowCount()
for n in range(0, row_count):
self.table.removeRow(0)
self.table.setHorizontalHeaderLabels(self.horHeaders)
profiles = dabam.dabam_summary_dictionary(surface=self.get_dabam_shape(),
slp_err_from=self.slope_error_from*1e-6,
slp_err_to=self.slope_error_to*1e-6,
length_from=self.dimension_y_from / self.si_to_user_units,
length_to=self.dimension_y_to / self.si_to_user_units)
for index in range(0, len(profiles)):
self.table.insertRow(0)
for index in range(0, len(profiles)):
table_item = QTableWidgetItem(str(profiles[index]["entry"]))
table_item.setTextAlignment(Qt.AlignCenter)
self.table.setItem(index, 0, table_item)
table_item = QTableWidgetItem(str(profiles[index]["surface"]))
table_item.setTextAlignment(Qt.AlignLeft)
self.table.setItem(index, 1, table_item)
table_item = QTableWidgetItem(str(numpy.round(profiles[index]["length"]*self.si_to_user_units, 3)))
table_item.setTextAlignment(Qt.AlignRight)
self.table.setItem(index, 2, table_item)
table_item = QTableWidgetItem(str(numpy.round(profiles[index]["hgt_err"]*1e9, 3)))
table_item.setTextAlignment(Qt.AlignRight)
self.table.setItem(index, 3, table_item)
table_item = QTableWidgetItem(str(numpy.round(profiles[index]["slp_err"]*1e6, 3)))
table_item.setTextAlignment(Qt.AlignRight)
self.table.setItem(index, 4, table_item)
self.table.setHorizontalHeaderLabels(self.horHeaders)
self.table.resizeRowsToContents()
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.itemClicked.connect(self.table_item_clicked)
self.overlay_search.hide()
except Exception as exception:
self.overlay_search.hide()
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
def get_dabam_shape(self):
if self.shape == 0: return None
elif self.shape == 1: return "plane"
elif self.shape == 2: return "cylindrical"
elif self.shape == 3: return "elliptical"
elif self.shape == 4: return "toroidal"
elif self.shape == 5: return "spherical"
def calculate_heigth_profile_ni(self):
self.calculate_heigth_profile(not_interactive_mode=True)
def calculate_heigth_profile(self, not_interactive_mode=False):
import matplotlib
print (matplotlib.__version__)
try:
if self.server.y is None: raise Exception("No Profile Selected")
sys.stdout = EmittingStream(textWritten=self.writeStdOut)
self.check_fields()
# PREVENTS CRASH WITH PYQT5
if not not_interactive_mode: self.tabs.setCurrentIndex(6)
combination = "EF"
if self.modify_y == 2:
profile_1D_y_x_temp = self.si_to_user_units * self.server.y
if self.use_undetrended == 0: profile_1D_y_y_temp = self.si_to_user_units * self.server.zHeights
else: profile_1D_y_y_temp = self.si_to_user_units * self.server.zHeightsUndetrended
first_coord = profile_1D_y_x_temp[0]
second_coord = profile_1D_y_x_temp[1]
last_coord = profile_1D_y_x_temp[-1]
step = numpy.abs(second_coord - first_coord)
length = numpy.abs(last_coord - first_coord)
n_points_old = len(profile_1D_y_x_temp)
if self.new_length > length:
difference = self.new_length - length
n_added_points = int(difference/step)
if difference % step == 0:
n_added_points += 1
if n_added_points % 2 != 0:
n_added_points += 1
profile_1D_y_x = numpy.arange(n_added_points + n_points_old) * step
profile_1D_y_y = numpy.ones(n_added_points + n_points_old) * self.filler_value * 1e-9 * self.si_to_user_units
profile_1D_y_y[int(n_added_points/2) : n_points_old + int(n_added_points/2)] = profile_1D_y_y_temp
elif self.new_length < length:
difference = length - self.new_length
n_removed_points = int(difference/step)
if difference % step == 0:
n_removed_points -= 1
if n_removed_points % 2 != 0:
n_removed_points -= 1
if n_removed_points >= 2:
profile_1D_y_x = profile_1D_y_x_temp[0 : (n_points_old - n_removed_points)]
profile_1D_y_y = profile_1D_y_y_temp[(int(n_removed_points/2) - 1) : (n_points_old - int(n_removed_points/2) - 1)]
else:
profile_1D_y_x = profile_1D_y_x_temp
profile_1D_y_y = profile_1D_y_y_temp
else:
profile_1D_y_x = profile_1D_y_x_temp
profile_1D_y_y = profile_1D_y_y_temp
else:
if self.modify_y == 0:
profile_1D_y_x = self.si_to_user_units * self.server.y
elif self.modify_y == 1:
scale_factor_y = self.new_length/(self.si_to_user_units * (max(self.server.y)-min(self.server.y)))
profile_1D_y_x = self.si_to_user_units * self.server.y * scale_factor_y
if self.use_undetrended == 0: profile_1D_y_y = self.si_to_user_units * self.server.zHeights
else: profile_1D_y_y = self.si_to_user_units * self.server.zHeightsUndetrended
if self.center_y:
first_coord = profile_1D_y_x[0]
last_coord = profile_1D_y_x[-1]
length = numpy.abs(last_coord - first_coord)
profile_1D_y_x_temp = numpy.linspace(-length/2, length/2, len(profile_1D_y_x))
profile_1D_y_x = profile_1D_y_x_temp
if self.renormalize_y == 0:
rms_y = None
else:
if self.error_type_y == profiles_simulation.FIGURE_ERROR:
rms_y = self.si_to_user_units * self.rms_y * 1e-9 # from nm to user units
else:
rms_y = self.rms_y * 1e-6 # from urad to rad
xx, yy, zz = profiles_simulation.simulate_profile_2D(combination = combination,
error_type_l = self.error_type_y,
rms_l = rms_y,
x_l = profile_1D_y_x,
y_l = profile_1D_y_y,
mirror_width = self.dimension_x,
step_w = self.step_x,
rms_w = 0.0)
self.xx = xx
self.yy = yy
self.zz = zz # in user units
self.axis.clear()
x_to_plot, y_to_plot = numpy.meshgrid(xx, yy)
z_to_plot = zz * 1e9 / self.si_to_user_units #nm
self.axis.plot_surface(x_to_plot, y_to_plot, z_to_plot,
rstride=1, cstride=1, cmap=cm.autumn, linewidth=0.5, antialiased=True)
sloperms = profiles_simulation.slopes(zz.T, xx, yy, return_only_rms=1)
title = ' Slope error rms in X direction: %f $\mu$rad' % (sloperms[0]*1e6) + '\n' + \
' Slope error rms in Y direction: %f $\mu$rad' % (sloperms[1]*1e6) + '\n' + \
' Figure error rms in X direction: %f nm' % (round(zz[0, :].std()*1e9/self.si_to_user_units, 6)) + '\n' + \
' Figure error rms in Y direction: %f nm' % (round(zz[:, 0].std()*1e9/self.si_to_user_units, 6))
self.axis.set_xlabel("X [" + self.workspace_units_label + "]")
self.axis.set_ylabel("Y [" + self.workspace_units_label + "]")
self.axis.set_zlabel("Z [nm]")
self.axis.set_title(title)
self.axis.mouse_init()
if not not_interactive_mode:
try:
import matplotlib
if matplotlib.__version__ == "1.4.3":
self.plot_canvas[5].draw()
except:
pass
QMessageBox.information(self, "QMessageBox.information()",
"Height Profile calculated: if the result is satisfactory,\nclick \'Generate Height Profile File\' to complete the operation ",
QMessageBox.Ok)
except Exception as exception:
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def generate_heigth_profile_file_ni(self):
self.generate_heigth_profile_file(not_interactive_mode=True)
def generate_heigth_profile_file(self, not_interactive_mode=False):
if not self.zz is None and not self.yy is None and not self.xx is None:
try:
congruence.checkDir(self.heigth_profile_file_name)
sys.stdout = EmittingStream(textWritten=self.writeStdOut)
ST.write_shadow_surface(self.zz, self.xx, self.yy, outFile=congruence.checkFileName(self.heigth_profile_file_name))
if not not_interactive_mode:
QMessageBox.information(self, "QMessageBox.information()",
"Height Profile file " + self.heigth_profile_file_name + " written on disk",
QMessageBox.Ok)
if self.modify_y == 0:
dimension_y = self.si_to_user_units * (self.server.y[-1] - self.server.y[0])
if self.modify_y == 1 or self.modify_y == 2:
dimension_y = self.new_length
self.send("PreProcessor_Data", ShadowPreProcessorData(error_profile_data_file=self.heigth_profile_file_name,
error_profile_x_dim=self.dimension_x,
error_profile_y_dim=dimension_y))
except Exception as exception:
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
def call_reset_settings(self):
if ConfirmDialog.confirmed(parent=self, message="Confirm Reset of the Fields?"):
try:
self.resetSettings()
except:
pass
def check_fields(self):
self.dimension_x = congruence.checkStrictlyPositiveNumber(self.dimension_x, "Dimension X")
self.step_x = congruence.checkStrictlyPositiveNumber(self.step_x, "Step X")
congruence.checkLessOrEqualThan(self.step_x, self.dimension_x/2, "Step Width", "Width/2")
if self.modify_y == 1 or self.modify_y == 2:
self.new_length = congruence.checkStrictlyPositiveNumber(self.new_length, "New Length")
if self.renormalize_y == 1:
self.rms_y = congruence.checkPositiveNumber(self.rms_y, "Rms Y")
congruence.checkDir(self.heigth_profile_file_name)
def writeStdOut(self, text):
cursor = self.shadow_output.textCursor()
cursor.movePosition(QTextCursor.End)
cursor.insertText(text)
self.shadow_output.setTextCursor(cursor)
self.shadow_output.ensureCursorVisible()
def selectFile(self):
self.le_heigth_profile_file_name.setText(oasysgui.selectFileFromDialog(self, self.heigth_profile_file_name, "Select Output File", file_extension_filter="Data Files (*.dat)"))
class Overlay(QWidget):
def __init__(self, container_widget=None, target_method=None):
QWidget.__init__(self, container_widget)
self.container_widget = container_widget
self.target_method = target_method
palette = QPalette(self.palette())
palette.setColor(palette.Background, Qt.transparent)
self.setPalette(palette)
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), QBrush(QColor(255, 255, 255, 127)))
painter.setPen(QPen(Qt.NoPen))
for i in range(1, 7):
if self.position_index == i:
painter.setBrush(QBrush(QColor(255, 165, 0)))
else:
painter.setBrush(QBrush(QColor(127, 127, 127)))
painter.drawEllipse(
self.width()/2 + 30 * numpy.cos(2 * numpy.pi * i / 6.0) - 10,
self.height()/2 + 30 * numpy.sin(2 * numpy.pi * i / 6.0) - 10,
20, 20)
time.sleep(0.005)
painter.end()
def showEvent(self, event):
self.timer = self.startTimer(0)
self.counter = 0
self.position_index = 0
t = threading.Thread(target=self.target_method)
t.start()
def hideEvent(self, QHideEvent):
self.killTimer(self.timer)
def timerEvent(self, event):
self.counter += 1
self.position_index += 1
if self.position_index == 7: self.position_index = 1
self.update()
'''
if __name__ == "__main__":
app = QApplication(sys.argv)
w = OWdabam_height_profile()
w.si_to_user_units = 100
w.show()
app.exec()
w.saveSettings() | PypiClean |
/Flask-WhooshAlchemy-Redux-0.7.1.tar.gz/Flask-WhooshAlchemy-Redux-0.7.1/flask_whooshalchemy.py | from __future__ import with_statement
from __future__ import absolute_import
import flask.ext.sqlalchemy as flask_sqlalchemy
import sqlalchemy
from whoosh.qparser import OrGroup
from whoosh.qparser import AndGroup
from whoosh.qparser import MultifieldParser
from whoosh.analysis import StemmingAnalyzer
import whoosh.index
from whoosh.fields import Schema
#from whoosh.fields import ID, TEXT, KEYWORD, STORED
import heapq
import os
__searchable__ = '__searchable__'
try:
unicode
except NameError:
unicode = str
DEFAULT_WHOOSH_INDEX_NAME = 'whoosh_index'
class _QueryProxy(flask_sqlalchemy.BaseQuery):
# We're replacing the model's ``query`` field with this proxy. The main
# thing this proxy does is override the __iter__ method so that results are
# returned in the order of the whoosh score to reflect text-based ranking.
def __init__(self, entities, session=None):
super(_QueryProxy, self).__init__(entities, session)
self._modelclass = self._mapper_zero().class_
self._primary_key_name = self._modelclass.whoosh_primary_key
self._whoosh_searcher = self._modelclass.pure_whoosh
# Stores whoosh results from query. If ``None``, indicates that no
# whoosh query was performed.
self._whoosh_rank = None
def __iter__(self):
''' Reorder ORM-db results according to Whoosh relevance score. '''
super_iter = super(_QueryProxy, self).__iter__()
if self._whoosh_rank is None or self._order_by is not False:
# Whoosh search hasn't been run or caller has explicitly asked
# for results to be sorted, so behave as normal (no Whoosh
# relevance score sorting).
return super_iter
super_rows = list(super_iter)
# Iterate through the values and re-order by whoosh relevance.
ordered_by_whoosh_rank = []
for row in super_rows:
# Push items onto heap, where sort value is the rank provided by
# Whoosh
if hasattr(row, self._primary_key_name):
heapq.heappush(ordered_by_whoosh_rank,
(self._whoosh_rank[unicode(getattr(row,
self._primary_key_name))], row))
else:
# PK column not found in result row
return iter(super_rows)
def _inner():
while ordered_by_whoosh_rank:
yield heapq.heappop(ordered_by_whoosh_rank)[1]
return _inner()
def whoosh_search(self, query, limit=None, fields=None, or_=False):
'''
Execute text query on database. Results have a text-based
match to the query, ranked by the scores from the underlying Whoosh
index.
By default, the search is executed on all of the indexed fields as an
OR conjunction. For example, if a model has 'title' and 'content'
indicated as ``__searchable__``, a query will be checked against both
fields, returning any instance whose title or content are a content
match for the query. To specify particular fields to be checked,
populate the ``fields`` parameter with the desired fields.
By default, results will only be returned if they contain all of the
query terms (AND). To switch to an OR grouping, set the ``or_``
parameter to ``True``.
'''
if not isinstance(query, unicode):
query = unicode(query)
results = self._whoosh_searcher(query, limit, fields, or_)
if not results:
# We don't want to proceed with empty results because we get a
# stderr warning from sqlalchemy when executing 'in_' on empty set.
# However we cannot just return an empty list because it will not
# be a query.
# XXX is this efficient?
# explicitly set text('null') to avoid a warning in output
return self.filter(sqlalchemy.text('null'))
result_set = set()
result_ranks = {}
for rank, result in enumerate(results):
pk = result[self._primary_key_name]
result_set.add(pk)
result_ranks[pk] = rank
f = self.filter(getattr(self._modelclass,
self._primary_key_name).in_(result_set))
f._whoosh_rank = result_ranks
return f
class _Searcher(object):
''' Assigned to a Model class as ``pure_search``, which enables
text-querying to whoosh hit list. Also used by ``query.whoosh_search``'''
def __init__(self, primary, indx):
self.primary_key_name = primary
self._index = indx
self.searcher = indx.searcher()
self._all_fields = list(set(indx.schema._fields.keys()) -
set([self.primary_key_name]))
def __call__(self, query, limit=None, fields=None, or_=False):
if fields is None:
fields = self._all_fields
group = OrGroup if or_ else AndGroup
parser = MultifieldParser(fields, self._index.schema, group=group)
return self._index.searcher().search(parser.parse(query),
limit=limit)
def whoosh_index(app, model):
''' Create whoosh index for ``model``, if one does not exist. If
the index exists it is opened and cached. '''
# gets the whoosh index for this model, creating one if it does not exist.
# A dict of model -> whoosh index is added to the ``app`` variable.
if not hasattr(app, 'whoosh_indexes'):
app.whoosh_indexes = {}
return app.whoosh_indexes.get(model.__name__,
_create_index(app, model))
def _get_analyzer(app, model):
analyzer = getattr(model, '__analyzer__', None)
if not analyzer and app.config.get('WHOOSH_ANALYZER'):
analyzer = app.config['WHOOSH_ANALYZER']
if not analyzer:
analyzer = StemmingAnalyzer()
return analyzer
def _create_index(app, model):
# a schema is created based on the fields of the model. Currently we only
# support primary key -> whoosh.ID, and sqlalchemy.(String, Unicode, Text)
# -> whoosh.TEXT.
if not app.config.get('WHOOSH_BASE'):
# XXX todo: is there a better approach to handle the absenSe of a
# config value for whoosh base? Should we throw an exception? If
# so, this exception will be thrown in the after_commit function,
# which is probably not ideal.
app.config['WHOOSH_BASE'] = DEFAULT_WHOOSH_INDEX_NAME
# we index per model.
wi = os.path.join(app.config.get('WHOOSH_BASE'),
model.__name__)
analyzer = _get_analyzer(app, model)
schema, primary_key = _get_whoosh_schema_and_primary_key(model, analyzer)
if whoosh.index.exists_in(wi):
indx = whoosh.index.open_dir(wi)
else:
if not os.path.exists(wi):
os.makedirs(wi)
indx = whoosh.index.create_in(wi, schema)
app.whoosh_indexes[model.__name__] = indx
model.pure_whoosh = _Searcher(primary_key, indx)
model.whoosh_primary_key = primary_key
# change the query class of this model to our own
model.query_class = _QueryProxy
return indx
def _get_whoosh_schema_and_primary_key(model, analyzer):
schema = {}
primary = None
searchable = set(model.__searchable__)
for field in model.__table__.columns:
if field.primary_key:
schema[field.name] = whoosh.fields.ID(stored=True, unique=True)
primary = field.name
if field.name in searchable and isinstance(field.type,
(sqlalchemy.types.Text, sqlalchemy.types.String,
sqlalchemy.types.Unicode)):
schema[field.name] = whoosh.fields.TEXT(analyzer=analyzer)
return Schema(**schema), primary
def _after_flush(app, changes):
# Any db updates go through here. We check if any of these models have
# ``__searchable__`` fields, indicating they need to be indexed. With these
# we update the whoosh index for the model. If no index exists, it will be
# created here; this could impose a penalty on the initial commit of a
# model.
bytype = {} # sort changes by type so we can use per-model writer
for change in changes:
update = change[1] in ('update', 'insert')
if hasattr(change[0].__class__, __searchable__):
bytype.setdefault(change[0].__class__.__name__, []).append((update,
change[0]))
for model, values in bytype.items():
index = whoosh_index(app, values[0][1].__class__)
with index.writer() as writer:
primary_field = values[0][1].pure_whoosh.primary_key_name
searchable = values[0][1].__searchable__
for update, v in values:
if update:
attrs = {}
for key in searchable:
try:
attrs[key] = unicode(getattr(v, key))
except AttributeError:
raise AttributeError('{0} does not have {1} field {2}'
.format(model, __searchable__, key))
attrs[primary_field] = unicode(getattr(v, primary_field))
writer.update_document(**attrs)
else:
writer.delete_by_term(primary_field, unicode(getattr(v,
primary_field)))
flask_sqlalchemy.models_committed.connect(_after_flush) | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_tg-cyrl-tj.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"\u043f\u0435. \u0447\u043e.",
"\u043f\u0430. \u0447\u043e."
],
"DAY": [
"\u042f\u043a\u0448\u0430\u043d\u0431\u0435",
"\u0414\u0443\u0448\u0430\u043d\u0431\u0435",
"\u0421\u0435\u0448\u0430\u043d\u0431\u0435",
"\u0427\u043e\u0440\u0448\u0430\u043d\u0431\u0435",
"\u041f\u0430\u043d\u04b7\u0448\u0430\u043d\u0431\u0435",
"\u04b6\u0443\u043c\u044a\u0430",
"\u0428\u0430\u043d\u0431\u0435"
],
"MONTH": [
"\u042f\u043d\u0432\u0430\u0440",
"\u0424\u0435\u0432\u0440\u0430\u043b",
"\u041c\u0430\u0440\u0442",
"\u0410\u043f\u0440\u0435\u043b",
"\u041c\u0430\u0439",
"\u0418\u044e\u043d",
"\u0418\u044e\u043b",
"\u0410\u0432\u0433\u0443\u0441\u0442",
"\u0421\u0435\u043d\u0442\u044f\u0431\u0440",
"\u041e\u043a\u0442\u044f\u0431\u0440",
"\u041d\u043e\u044f\u0431\u0440",
"\u0414\u0435\u043a\u0430\u0431\u0440"
],
"SHORTDAY": [
"\u042f\u0448\u0431",
"\u0414\u0448\u0431",
"\u0421\u0448\u0431",
"\u0427\u0448\u0431",
"\u041f\u0448\u0431",
"\u04b6\u043c\u044a",
"\u0428\u043d\u0431"
],
"SHORTMONTH": [
"\u042f\u043d\u0432",
"\u0424\u0435\u0432",
"\u041c\u0430\u0440",
"\u0410\u043f\u0440",
"\u041c\u0430\u0439",
"\u0418\u044e\u043d",
"\u0418\u044e\u043b",
"\u0410\u0432\u0433",
"\u0421\u0435\u043d",
"\u041e\u043a\u0442",
"\u041d\u043e\u044f",
"\u0414\u0435\u043a"
],
"fullDate": "EEEE, y MMMM dd",
"longDate": "y MMMM d",
"medium": "y MMM d HH:mm:ss",
"mediumDate": "y MMM d",
"mediumTime": "HH:mm:ss",
"short": "yy/MM/dd HH:mm",
"shortDate": "yy/MM/dd",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "Som",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4\u00a0-",
"negSuf": "",
"posPre": "\u00a4\u00a0",
"posSuf": ""
}
]
},
"id": "tg-cyrl-tj",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/DLStudio-2.3.0.tar.gz/DLStudio-2.3.0/Examples/object_detection_and_localization_iou.py |
## object_detection_and_localization_iou.py
"""
This script is for experimenting with IoU-based loss function for the
regression part of object detection and tracking. These loss functions are
defined in the class
DIoULoss
that is in the inner class DetectAndLocalize of DLStudio. See Slides 37
through 42 of my Week 7 presentation on Object Detection and Localization
for an explanation of these loss functions. This script also uses the
PurdueShapes5
dataset for training and testing.
"""
import random
import numpy
import torch
import os, sys
seed = 0
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
numpy.random.seed(seed)
torch.backends.cudnn.deterministic=True
torch.backends.cudnn.benchmarks=False
os.environ['PYTHONHASHSEED'] = str(seed)
## watch -d -n 0.5 nvidia-smi
from DLStudio import *
dls = DLStudio(
dataroot = "./data/PurdueShapes5/",
# dataroot = "/mnt/cloudNAS3/Avi/ImageDatasets/PurdueShapes5/",
# dataroot = "/home/kak/ImageDatasets/PurdueShapes5/",
image_size = [32,32],
path_saved_model = "./saved_model",
momentum = 0.9,
# learning_rate = 1e-4, ## when loss_mode is set to d2 or diou1
learning_rate = 5e-3, ## when loss_mode is set to diou2 or diou3
epochs = 4,
batch_size = 4,
classes = ('rectangle','triangle','disk','oval','star'),
use_gpu = True,
)
detector = DLStudio.DetectAndLocalize( dl_studio = dls )
dataserver_train = DLStudio.DetectAndLocalize.PurdueShapes5Dataset(
train_or_test = 'train',
dl_studio = dls,
dataset_file = "PurdueShapes5-10000-train.gz",
)
dataserver_test = DLStudio.DetectAndLocalize.PurdueShapes5Dataset(
train_or_test = 'test',
dl_studio = dls,
dataset_file = "PurdueShapes5-1000-test.gz"
)
detector.dataserver_train = dataserver_train
detector.dataserver_test = dataserver_test
detector.load_PurdueShapes5_dataset(dataserver_train, dataserver_test)
model = detector.LOADnet2(skip_connections=True, depth=8)
number_of_learnable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("\n\nThe number of learnable parameters in the model: %d" % number_of_learnable_params)
num_layers = len(list(model.parameters()))
print("\nThe number of layers in the model: %d\n\n" % num_layers)
#detector.run_code_for_training_with_iou_regression(model, loss_mode='d2', show_images=True) ## use learning_rate = 1e-4
#detector.run_code_for_training_with_iou_regression(model, loss_mode='diou1', show_images=True) ## use learning_rate = 1e-4
#detector.run_code_for_training_with_iou_regression(model, loss_mode='diou2', show_images=True) ## use learning_rate = 5e-3
detector.run_code_for_training_with_iou_regression(model, loss_mode='diou3', show_images=True) ## use learning_rate = 5e-3
import pymsgbox
response = pymsgbox.confirm("Finished training. Start testing on unseen data?")
if response == "OK":
detector.run_code_for_testing_detection_and_localization(model) | PypiClean |
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/client.py | from __future__ import absolute_import, unicode_literals, print_function
import os
import sys
import json
import time
import threading
import logging
import socket
import weakref
import functools
try:
import queue
except ImportError:
import Queue as queue
# Priorities at which scanning requests can be scheduled.
PRIORITY_CONTROL = 0 # Special sentinal priority to control scheduler
PRIORITY_IMMEDIATE = 1 # UI is requesting info on this file now
PRIORITY_CURRENT = 2 # UI requires info on this file soon
PRIORITY_OPEN = 3 # UI will likely require info on this file soon
PRIORITY_BACKGROUND = 4 # info may be needed sometime
logger_name = 'CodeIntel.codeintel'
logging.getLogger(logger_name).setLevel(logging.INFO) # INFO
class CodeIntel(object):
def __init__(self):
self.log = logging.getLogger(logger_name + '.' + self.__class__.__name__)
self.mgr = None
self._mgr_lock = threading.Lock()
self.buffers = {}
self.languages = {}
self._queue = queue.Queue()
self._quit_application = False # app is shutting down, don't try to respawn
self._observers = weakref.WeakKeyDictionary()
self._enabled = False
def add_observer(self, obj):
if hasattr(obj, 'observer'):
self._observers[obj] = True
def notify_observers(self, topic, data):
"""Observer calls must be called on the main thread"""
if topic:
for obj in self._observers.keys():
obj.observer(topic, data)
def _on_mgr_progress(self, mgr, message, state=None, response=None):
topic = 'status_message'
self.log.debug("Progress: %s", message)
if state is CodeIntelManager.STATE_DESTROYED:
self.log.debug("startup failed: %s", message)
topic = 'error_message'
message = "Startup failed: %s" % message
elif state is CodeIntelManager.STATE_BROKEN:
self.log.debug("db is broken, needs manual intervention")
topic = 'error_message'
message = "There is an error with your code intelligence database; it must be reset before it can be used."
elif state is CodeIntelManager.STATE_ABORTED:
self.log.debug("Got abort message")
topic = 'error_message'
message = "Code Intelligence Initialization Aborted"
elif state is CodeIntelManager.STATE_WAITING:
self.log.debug("Waiting for CodeIntel")
elif state is CodeIntelManager.STATE_READY:
self.log.debug("db is ready")
if message:
self.notify_observers(topic, dict(response or {}, message=message))
else:
self.log.debug("nothing to report")
def _on_mgr_shutdown(self, mgr):
# The codeintel manager is going away, drop the reference to it
with self._mgr_lock:
if self.mgr is mgr:
self.mgr = None
def activate(self, reset_db_as_necessary=False, codeintel_command=None, oop_mode=None, log_levels=None, env=None, prefs=None):
self.log.debug("activating codeintel service")
if self._quit_application:
return # don't ever restart after quit-application
# clean up dead managers
with self._mgr_lock:
if self.mgr and not self.mgr.is_alive():
self.mgr = None
# create a new manager as necessary
if not self.mgr:
self.mgr = CodeIntelManager(
self,
progress_callback=self._on_mgr_progress,
shutdown_callback=self._on_mgr_shutdown,
codeintel_command=codeintel_command,
oop_mode=oop_mode,
log_levels=log_levels,
env=env,
prefs=prefs,
)
while True:
try:
# Tell the manager to deal with it; note that this request
# will get queued by the manager for now, since we haven't
# actually started the manager.
self.mgr.send(**self._queue.get(False))
except queue.Empty:
break # no more items
# new codeintel manager; update all the buffers to use this new one
for buf in list(self.buffers.values()):
buf.mgr = self.mgr
self._enabled = True
try:
# run the new manager
self.mgr.start(reset_db_as_necessary)
except RuntimeError:
# thread already started
pass
@property
def enabled(self):
return self._enabled
def deactivate(self):
with self._mgr_lock:
if self.mgr:
self.mgr.shutdown()
self.mgr = None
self._enabled = False
def cancel(self):
mgr = self.mgr
if mgr:
mgr.abort()
def is_cpln_lang(self, language):
return language in self.get_cpln_langs()
def get_cpln_langs(self):
return self.mgr.cpln_langs if self.mgr else []
def is_citadel_lang(self, language):
return language in self.get_citadel_langs()
def get_citadel_langs(self):
return self.mgr.citadel_langs if self.mgr else []
def is_xml_lang(self, language):
return language in self.get_xml_langs()
def get_xml_langs(self):
return self.mgr.xml_langs if self.mgr else []
@property
def available_catalogs(self):
return self.mgr.available_catalogs if self.mgr else []
def update_catalogs(self, update_callback=None):
if self.mgr:
self.mgr.update_catalogs(update_callback=update_callback)
def send(self, discardable=False, **kwargs):
if not self._enabled:
self.log.warn("send called when not enabled (ignoring command) %r", kwargs)
return
if self.mgr:
self.mgr.send(**kwargs)
elif not discardable:
self._queue.put(kwargs)
self.activate()
else:
self.log.debug("discarding request %r", kwargs)
def collectReports(self, callback, closure):
def on_have_report(request, response):
for path, data in list(response.get('memory', {}).items()):
amount = data.get('amount')
if amount is None:
continue # This value was unavailable
units = data.get('units') # bytes or count
if path.startswith('explicit/'):
kind = 'heap'
else:
kind = 'other'
desc = data.get('desc', "No description available.")
callback(path, kind, units, amount, desc)
have_response.add(True)
have_response = set()
self.send(command='memory-report', callback=on_have_report)
while not have_response:
time.sleep(0.1)
def buf_from_path(self, path):
"""
Get an existing buffer given the path
@note Prefer buf_from_view; this might be less accurate.
(multiple buffers might have the same path.)
"""
if not self.mgr or not path:
return None
path = CodeIntelBuffer.normpath(path) # Fix case on Windows
for vid, buf in list(self.buffers.items()):
if CodeIntelBuffer.normpath(buf.path) == path:
return buf
return None
class _Connection(object):
def get_commandline_args(self):
"""Return list of command line args to pass to child"""
raise NotImplementedError()
def get_stream(self):
"""Return file-like object for read/write"""
raise NotImplementedError()
def cleanup(self):
"""Do any cleanup required"""
class _TCPConnection(_Connection):
"""A connection using TCP sockets"""
_read = None
_write = None
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('127.0.0.1', 0))
self.sock.listen(0)
def get_commandline_args(self):
return ['--tcp', '%s:%s' % self.sock.getsockname()]
def get_stream(self):
conn = self.sock.accept()
self._read = conn[0].makefile('rb', 0)
self._write = conn[0].makefile('wb', 0)
return self
def read(self, count):
return self._read.read(count)
def write(self, data):
return self._write.write(data)
def cleanup(self):
if self.sock:
self.sock.close()
class _ServerConnection(_Connection):
"""A connection using TCP sockets"""
sock = None
_read = None
_write = None
def __init__(self, host='127.0.0.1', port=9999):
self.host = host
self.port = port
def get_commandline_args(self):
return ['--server', '%s:%s' % (self.host, self.port)]
def get_stream(self):
conn = socket.create_connection((self.host, self.port))
self._read = conn.makefile('rb', 0)
self._write = conn.makefile('wb', 0)
self.sock = conn
return self
def read(self, count):
return self._read.read(count)
def write(self, data):
return self._write.write(data)
def cleanup(self):
if self.sock:
self.sock.close()
if sys.platform.startswith("win"):
from win32_named_pipe import Win32Pipe
class _PipeConnection(Win32Pipe):
"""This is a wrapper around our Win32Pipe class to expose the expected
API"""
pipe_prefix = "codeintel-"
def get_commandline_args(self):
return ['--pipe', self.name]
def get_stream(self):
self._ensure_stream()
return self
def cleanup(self):
return
del Win32Pipe
else:
# posix pipe class
class _PipeConnection(_Connection):
_dir = None
_read = None
_write = None
def get_commandline_args(self):
import tempfile
self._dir = tempfile.mkdtemp(prefix='codeintel-', suffix='-oop-pipes')
os.mkfifo(os.path.join(self._dir, 'in'), 0o600)
os.mkfifo(os.path.join(self._dir, 'out'), 0o600)
return ['--pipe', self._dir]
def get_stream(self):
# Open the write end first, so that the child doesn't hang
self._read = open(os.path.join(self._dir, 'out'), 'rb', 0)
self._write = open(os.path.join(self._dir, 'in'), 'wb', 0)
return self
def read(self, count):
return self._read.read(count)
def write(self, data):
return self._write.write(data)
def cleanup(self):
# don't close the streams here, but remove the files. The fds are
# left open so we can communicate through them, but we no longer
# need the file names around.
os.remove(self._read.name)
os.remove(self._write.name)
try:
os.rmdir(self._dir)
except OSError:
pass
def close(self):
try:
self.cleanup()
except Exception as e:
pass
self._read.close()
self._write.close()
class CodeIntelManager(threading.Thread):
STATE_UNINITIALIZED = ("uninitialized",) # not initialized
STATE_CONNECTED = ("connected",) # child process spawned, connection up; not ready
STATE_BROKEN = ("broken",) # database is broken and needs to be reset
STATE_WAITING = ("waiting",) # waiting for CodeIntel
STATE_READY = ("ready",) # ready for use
STATE_QUITTING = ("quitting",) # shutting down
STATE_DESTROYED = ("destroyed",) # connection shut down, child process dead
STATE_ABORTED = ("aborted",)
_codeintel_command = '/usr/local/bin/codeintel'
_oop_mode = 'pipe'
_log_levels = ['WARNING']
_state = STATE_UNINITIALIZED
_send_request_thread = None # background thread to send unsent requests
_reset_db_as_necessary = False # whether to reset the db if it's broken
_watchdog_thread = None # background thread to watch for process termination
_memory_error_restart_count = 0
_cmd_messge = True
proc = None
pipe = None
cpln_langs = []
citadel_langs = []
xml_langs = []
stdlib_langs = [] # languages which support standard libraries
available_catalogs = [] # see get-available-catalogs command
env = dict(os.environ)
prefs = [
{
'codeintel_max_recursive_dir_depth': 10,
'codeintel_scan_files_in_project': True,
'codeintel_selected_catalogs': [],
'defaultHTML5Decl': '-//W3C//DTD HTML 5//EN',
'defaultHTMLDecl': '-//W3C//DTD HTML 5//EN',
'javascriptExtraPaths': '',
'nodejsDefaultInterpreter': '',
'nodejsExtraPaths': '',
'perl': '',
'perlExtraPaths': '',
'php': '',
'phpConfigFile': '',
'phpExtraPaths': '',
'python': '',
'python3': '',
'python3ExtraPaths': '',
'pythonExtraPaths': '',
'ruby': '',
'rubyExtraPaths': '',
},
]
def __init__(self, service, progress_callback=None, shutdown_callback=None, codeintel_command=None, oop_mode=None, log_levels=None, env=None, prefs=None):
self.log = logging.getLogger(logger_name + '.' + self.__class__.__name__)
self.service = service
self.languages = service.languages
self._abort = set()
self._next_id = 0
self._progress_callback = progress_callback
self._shutdown_callback = shutdown_callback
if codeintel_command is not None:
self._codeintel_command = codeintel_command
if oop_mode is not None:
self._oop_mode = oop_mode
if log_levels is not None:
self._log_levels = log_levels
if prefs is not None:
self.prefs = [prefs] if isinstance(prefs, dict) else prefs
if env is not None:
self.env = env
self._state_condvar = threading.Condition()
self.requests = {} # keyed by request id; value is tuple (callback, request data, time sent) requests will time out at some point...
self.unsent_requests = queue.Queue()
threading.Thread.__init__(self, name="CodeIntel Manager Thread")
@property
def state(self):
return self._state
@state.setter
def state(self, state):
with self._state_condvar:
self._state = state
self._state_condvar.notifyAll()
def start(self, reset_db_as_necessary=False):
self._reset_db_as_necessary = reset_db_as_necessary
threading.Thread.start(self)
def shutdown(self):
"""Abort any outstanding requests and shut down gracefully"""
self.abort()
if self.state is CodeIntelManager.STATE_DESTROYED:
return # already dead
if not self.pipe:
# not quite dead, but already disconnected... ungraceful shutdown
self.kill()
return
self._send(command='quit', callback=self.do_quit)
self.state = CodeIntelManager.STATE_QUITTING
def abort(self):
"""Abort all running requests"""
for req in list(self.requests.keys()):
self._abort.add(req)
self._send(
command='abort',
id=req,
callback=lambda request, response: None,
)
def close(self):
try:
self.pipe.close()
except Exception as e:
pass # The other end is dead, this is kinda pointless
self.pipe = None
def kill(self):
"""
Kill the subprocess. This may be safely called when the process has
already exited. This should *always* be called no matter how the
process exits, in order to maintain the correct state.
"""
with self.service._mgr_lock:
if self.state == CodeIntelManager.STATE_DESTROYED:
return
# It's destroying time.
self.state = CodeIntelManager.STATE_DESTROYED
try:
self.proc.kill()
except Exception as e:
pass
self.close()
try:
# Shut down the request sending thread (self._send_request_thread)
self.unsent_requests.put((None, None))
except Exception as e:
pass # umm... no idea?
if self._shutdown_callback:
self._shutdown_callback(self)
def init_child(self):
import process
assert threading.current_thread().name != "MainThread", \
"CodeIntelManager.init_child should run on background thread!"
self.log.debug("initializing child process")
conn = None
try:
_codeintel_command = self._codeintel_command
if not os.path.exists(_codeintel_command):
_codeintel_command = os.path.basename(_codeintel_command)
cmd = [_codeintel_command]
database_dir = os.path.expanduser('~/.codeintel')
cmd += ['--log-file', os.path.join(database_dir, 'codeintel.log')]
for log_level in self._log_levels:
cmd += ['--log-level', log_level]
cmd += ['oop']
cmd += ['--database-dir', database_dir]
_oop_mode = self._oop_mode
if _oop_mode == 'pipe':
conn = _PipeConnection()
elif _oop_mode == 'tcp':
conn = _TCPConnection()
elif _oop_mode == 'server':
conn = _ServerConnection()
else:
self.log.warn("Unknown codeintel oop mode %s, falling back to pipes", _oop_mode)
conn = _PipeConnection()
cmd += conn.get_commandline_args()
if _oop_mode == 'server':
if self._cmd_messge:
self._cmd_messge = False
self.log.warn("Please start OOP server with command: %s", " ".join(cmd))
self.proc = True
else:
self.log.debug("Running OOP: %s", " ".join(cmd))
self.proc = process.ProcessOpen(cmd, cwd=None, env=None)
assert self.proc.returncode is None, "Early process death!"
self._watchdog_thread = threading.Thread(
target=self._run_watchdog_thread,
name="CodeIntel Subprocess Watchdog Thread",
args=(self.proc,),
)
self._watchdog_thread.start()
try:
self.pipe = conn.get_stream()
self._cmd_messge = True
self.log.info("Successfully connected with OOP CodeIntel!")
except Exception:
self.pipe = None
conn.cleanup() # This will remove the filesystem files (it keeps the fds open)
self.state = CodeIntelManager.STATE_CONNECTED
except Exception as e:
self.kill()
message = "Error initing child: %s" % e
self.log.error(message)
self._progress_callback(self, message)
else:
self._send_init_requests()
def _run_watchdog_thread(self, proc):
self.log.debug("Watchdog witing for OOP codeintel process to die...")
if hasattr(proc, 'wait'):
proc.wait()
elif hasattr(proc, 'join'):
proc.join()
self.log.info("Child OOP CodeIntel process died!")
self.state = CodeIntelManager.STATE_WAITING
self.close()
def _send_init_requests(self):
assert threading.current_thread().name != "MainThread", \
"CodeIntelManager._send_init_requests should run on background thread!"
self.log.debug("sending internal initial requests")
outstanding_cpln_langs = set()
def update(message=None, state=None, response=None):
if state in (CodeIntelManager.STATE_DESTROYED, CodeIntelManager.STATE_BROKEN):
self.kill()
if state is not None:
self.state = state
if response is not None:
if message:
message += "\n"
else:
message = ""
message += response.get('message', "(No further information available)")
if any(x is not None for x in (message, state)):
# don't do anything if everything we have is just none
self._progress_callback(self, message, state, response)
def get_citadel_langs(request, response):
if not response.get('success', False):
update("Failed to get citadel languages:", state=CodeIntelManager.STATE_DESTROYED, response=response)
return
self.citadel_langs = sorted(response.get('languages'))
def get_xml_langs(request, response):
if not response.get('success', False):
update("Failed to get XML languages:", state=CodeIntelManager.STATE_DESTROYED, response=response)
return
self.xml_langs = sorted(response.get('languages'))
def get_stdlib_langs(request, response):
if not response.get('success', False):
update("Failed to get languages which support standard libraries:", state=CodeIntelManager.STATE_DESTROYED, response=response)
return
self.stdlib_langs = sorted(response.get('languages'))
def get_cpln_langs(request, response):
if not response.get('success', False):
update("Failed to get completion languages:", state=CodeIntelManager.STATE_DESTROYED, response=response)
return
self.cpln_langs = sorted(response.get('languages'))
self.languages.clear()
for lang in self.cpln_langs:
outstanding_cpln_langs.add(lang)
self._send(callback=get_lang_info, command='get-language-info', language=lang)
def get_lang_info(request, response):
lang = request['language']
if not response.get('success', False):
update("Failed to get information for %s:" % (lang,), state=CodeIntelManager.STATE_DESTROYED, response=response)
return
self.languages[lang] = dict(
cpln_fillup_chars=response['completion-fillup-chars'],
cpln_stop_chars=response['completion-stop-chars'],
)
outstanding_cpln_langs.discard(lang)
if not outstanding_cpln_langs:
fixup_db({}, {'success': True})
def fixup_db(request, response):
command = request.get('command')
previous_command = request.get('previous-command')
state = response.get('state')
req_id = response.get('req_id')
if req_id in self._abort:
self.log.debug("Aborting startup")
update("Codeintel startup aborted", state=CodeIntelManager.STATE_ABORTED)
return
update(response=response)
if 'success' not in response:
# status update
return
if command != 'database-info':
if response.get("abort", False):
# The request was aborted, don't retry
return
# We just ran some sort of db-fixing command; check current status
self._send(callback=fixup_db, command='database-info', previous_command=command)
return
# Possible db progression:
# preload-needed -> (preload) -> ready
# upgrade-needed -> (upgrade) -> preload-needed -> (preload) -> ready
# upgrade-blocked -> (reset) -> preload-needed -> (preload) -> ready
# broken -> (reset) -> preload-needed -> (preload) -> ready
if state == 'ready':
# db is fine
initialization_completed()
return
if state == 'preload-needed':
# database needs preloading
if previous_command not in (None, 'database-reset'):
update("Unexpected empty database after %s" % (previous_command,), state=CodeIntelManager.STATE_BROKEN)
return
langs = {}
for lang in self.stdlib_langs:
ver = None
print("Language %s needs version resolving!" % lang)
# Get the version for the language here ([0-9]+.[0-9]+)
langs[lang] = ver
self._send(callback=fixup_db, command='database-preload', languages=langs)
return
if state == 'upgrade-needed':
# database needs to be upgraded
if previous_command is not None:
update("Unexpected database upgrade needed after %s" % (previous_command,), state=CodeIntelManager.STATE_BROKEN)
self._send(callback=fixup_db, command='database-upgrade')
return
if state == 'upgrade-blocked' or state == 'broken':
# database can't be upgraded but can't be used either
if previous_command is not None:
update("Unexpected database requires wiping after %s" % (previous_command,), state=CodeIntelManager.STATE_BROKEN)
if self._reset_db_as_necessary:
self._send(callback=fixup_db, command='database-reset')
else:
update("Database is broken and must be reset", state=CodeIntelManager.STATE_BROKEN)
return
update("Unexpected database state %s" % (state,), state=CodeIntelManager.STATE_BROKEN)
def initialization_completed():
self.log.debug("internal initial requests completed")
if not self._send_request_thread:
self._send_request_thread = threading.Thread(
target=self._send_queued_requests,
name="CodeIntel Manager Request Sending Thread")
self._send_request_thread.daemon = True
self._send_request_thread.start()
update("CodeIntel ready.", state=CodeIntelManager.STATE_READY)
self._send(callback=get_cpln_langs, command='get-languages', type='cpln')
self._send(callback=get_citadel_langs, command='get-languages', type='citadel')
self._send(callback=get_xml_langs, command='get-languages', type='xml')
self._send(callback=get_stdlib_langs, command='get-languages', type='stdlib-supported')
self.set_global_environment(self.env, self.prefs)
def update_callback(response):
if not response.get("success", False):
update("Failed to get available catalogs:", state=CodeIntelManager.STATE_DESTROYED, response=response)
self.update_catalogs(update_callback=update_callback)
self.send(command="set-xml-catalogs")
def set_global_environment(self, env, prefs):
self.env = env
self.prefs = [prefs] if isinstance(prefs, dict) else prefs
self._send(
command='set-environment',
env=self.env,
prefs=self.prefs,
)
def update_catalogs(self, update_callback=None):
def get_available_catalogs(request, response):
if response.get("success", False):
self.available_catalogs = response.get('catalogs', [])
if update_callback:
update_callback(response)
self._send(callback=get_available_catalogs, command='get-available-catalogs')
def send(self, callback=None, **kwargs):
"""Public API for sending a request.
Requests are expected to be well-formed (has a command, etc.)
The callback recieves two arguments, the request and the response,
both as dicts.
@note The callback is invoked on a background thread; proxy it to
the main thread if desired."""
if self.state is CodeIntelManager.STATE_DESTROYED:
raise RuntimeError("Manager already shut down")
self.unsent_requests.put((callback, kwargs))
def _send_queued_requests(self):
"""Worker to send unsent requests"""
while True:
with self._state_condvar:
if self.state is CodeIntelManager.STATE_DESTROYED:
break # Manager already shut down
if self.state is not CodeIntelManager.STATE_READY:
self._state_condvar.wait()
continue # wait...
callback, kwargs = self.unsent_requests.get()
if callback is None and kwargs is None:
# end of queue (shutting down)
break
self._send(callback, **kwargs)
def _send(self, callback=None, **kwargs):
"""
Private API for sending; ignores the current state of the manager and
just dumps things over. The caller should check that it things are in
the expected state_ (Used for initialization.) This will block the
calling thread until the data has been written (though possibly not yet
received on the other end).
"""
if not self.pipe or self.state is CodeIntelManager.STATE_QUITTING:
return # Nope, eating all commands during quit
req_id = hex(self._next_id)
kwargs['req_id'] = req_id
text = json.dumps(kwargs, separators=(',', ':'))
# Keep the request parameters so the handler can examine it; however,
# drop the text and env, because those are huge and usually useless
kwargs.pop('text', None)
kwargs.pop('env', None)
self.requests[req_id] = (callback, kwargs, time.time())
self._next_id += 1
self.log.debug("sending frame: %s", text)
text = text.encode('utf-8')
length = "%i" % len(text)
length = length.encode('utf-8')
buf = length + text
try:
self.pipe.write(buf)
except Exception as e:
message = "Error writing data to OOP CodeIntel: %s" % e
self.log.error(message)
self._progress_callback(self, message)
self.close()
def run(self):
"""Event loop for the codeintel manager background thread"""
assert threading.current_thread().name != "MainThread", \
"CodeIntelManager.run should run on background thread!"
self.log.debug("CodeIntelManager thread started...")
while True:
ok = False
self.init_child()
if not self.proc:
break # init child failed
first_buf = True
discard_time = 0.0
try:
buf = b''
while self.proc and self.pipe:
# Loop to read from the pipe
ch = self.pipe.read(1)
if not ch:
# nothing read, EOF
raise IOError("Failed to read from socket")
ok = True
if ch == b'{':
length = int(buf)
buf = ch
while len(buf) < length:
data = self.pipe.read(length - len(buf))
if not data:
# nothing read, EOF
raise IOError("Failed to read from socket")
buf += data
self.log.debug("Got codeintel response: %r" % buf)
if first_buf and buf == b'{}':
first_buf = False
buf = b''
continue
response = json.loads(buf.decode('utf-8'))
self.handle(response) # handle runs asynchronously and shouldn't raise exceptions
buf = b''
else:
if ch not in b'0123456789':
raise ValueError("Invalid frame length character: %r" % ch)
buf += ch
now = time.time()
if now - discard_time > 60: # discard some stale results
for req_id, (callback, request, sent_time) in list(self.requests.items()):
if sent_time < now - 5 * 60:
# sent 5 minutes ago - it's irrelevant now
try:
if callback:
callback(request, {})
except Exception as e:
self.log.error("Failed timing out request")
else:
self.log.debug("Discarding request %r", request)
del self.requests[req_id]
except Exception as e:
if self.state in (CodeIntelManager.STATE_QUITTING, CodeIntelManager.STATE_DESTROYED):
self.log.debug("IOError in codeintel during shutdown; ignoring")
break # this is intentional
message = "Error reading data from OOP CodeIntel: %s" % e
self.log.error(message)
self._progress_callback(self, message)
self.state = CodeIntelManager.STATE_WAITING
self.close()
if not ok:
time.sleep(3)
self.log.debug("CodeIntelManager thread ended!")
def handle(self, response):
"""Handle a response from the codeintel process"""
self.log.debug("handling: %r", response)
req_id = response.get('req_id')
callback, request, sent_time = self.requests.get(req_id, (None, None, None))
request_command = request.get('command', '') if request else None
response_command = response.get('command', request_command)
if req_id is None or request_command != response_command:
# unsolicited response, look for a handler
try:
if not response_command:
self.log.error("No 'command' in response %r", response)
raise ValueError("Invalid response frame %r" % response)
meth = getattr(self, 'do_' + response_command.replace('-', '_'), None)
if not meth:
self.log.error("Unknown command %r, response %r", response_command, response)
raise ValueError("Unknown unsolicited response \"%s\"" % response_command)
meth(response)
except Exception as e:
self.log.error("Error handling unsolicited response")
return
if not request:
self.log.error("Discard response for unknown request %s (command %s): have %s",
req_id, response_command or '%r' % response, sorted(self.requests.keys()))
return
self.log.debug("Request %s (command %s) took %0.2f seconds", req_id, request_command or '<unknown>', time.time() - sent_time)
if 'success' in response:
# remove completed request
self.log.debug("Removing completed request %s", req_id)
del self.requests[req_id]
else:
# unfinished response; update the sent time so it doesn't time out
self.requests[req_id] = (callback, request, time.time())
if callback:
callback(request, response)
def do_scan_complete(self, response):
"""Scan complete unsolicited response"""
path = response.get('path')
if path:
buf = self.service.buf_from_path(path)
self.service.notify_observers('codeintel_buffer_scanned', buf)
def do_report_message(self, response):
"""Report a message from codeintel (typically, scan status) unsolicited response"""
if response.get('type') == 'logging':
message = response.get('message')
if message.strip().endswith("MemoryError") and "Traceback (most recent call last):" in message:
# Python memory error - kill the process (it will restart itself) - bug 103067.
if self._memory_error_restart_count < 20:
self.log.fatal("Out-of-process ran out of memory - killing process")
self.kill()
self._memory_error_restart_count += 1
return
self.service.notify_observers('status_message', response)
def do_report_error(self, response):
"""Report a codeintel error into the error log"""
self.service.notify_observers('error_message', response)
def do_quit(self, request, response):
"""Quit successful"""
assert threading.current_thread().name == "MainThread", \
"CodeIntelManager.activate::do_quit() should run on main thread!"
self.kill()
if self.is_alive():
self.join(1)
class CodeIntelBuffer(object):
"""A buffer-like object for codeintel; this is specific to a
CodeIntelManager instance."""
def __init__(self, service, vid, lang=None, path=None, text=None, env=None, prefs=None):
self.log = logging.getLogger(logger_name + '.' + self.__class__.__name__)
self.service = service
self.vid = vid
self.lang = lang
self.path = path
self.text = text
self._env = env
self._prefs = [prefs] if isinstance(prefs, dict) else prefs
@property
def env(self):
env = dict(self.service.mgr and self.service.mgr.env or {})
env.update(self._env or {})
return env
@env.setter
def env(self, env):
self._env = env
@property
def prefs(self):
prefs = list(self.service.mgr and self.service.mgr.prefs or [])
for pref in self._prefs or []:
if pref not in prefs:
prefs.append(pref)
return prefs
@prefs.setter
def prefs(self, prefs):
self._prefs = [prefs] if isinstance(prefs, dict) else prefs
@staticmethod
def normpath(path):
"""Routine to normalize the path used for codeintel buffers
@note See also codeintel/lib/oop/driver.py::Driver.normpath
"""
return os.path.normcase(path)
@property
def cpln_fillup_chars(self):
return self.service.languages[self.lang]['cpln_fillup_chars']
@property
def cpln_stop_chars(self):
return self.service.languages[self.lang]['cpln_stop_chars']
def scan_document(self, handler, lines_added, file_mtime=False, callback=None):
def invoke_callback(request, response):
if not response.get('success'):
msg = response.get('message')
if not msg:
msg = "scan_document: Can't scan document"
try:
handler.set_status_message(self, msg)
except Exception as e:
self.log.error("Error reporting scan_document error: %s", response.get('message', e))
pass
return
try:
handler.on_document_scanned(self)
except Exception as e:
self.log.error("Error calling scan_document callback: %s", e)
pass
if callback is not None:
callback(request, response)
mtime = None if file_mtime else time.time()
self.service.send(
command='scan-document',
path=self.path,
language=self.lang,
env={
'env': self.env,
'prefs': self.prefs,
},
text=self.text,
encoding='utf-8',
discardable=True,
priority=PRIORITY_IMMEDIATE if lines_added else PRIORITY_CURRENT,
mtime=mtime,
callback=invoke_callback,
)
def _post_trg_from_pos_handler(self, handler, context, request, response):
# This needs to be proxied to the main thread for the callback invocation
if not response.get('success'):
msg = response.get('message')
if not msg:
msg = "%s: Can't get a trigger for position %s" % (context, request.get("pos", "<unknown position>"))
try:
handler.set_status_message(self, msg)
except Exception as e:
self.log.error("Error reporting scan_document error: %s", response.get('message', e))
pass
return
else:
trg = response['trg']
try:
if trg:
handler.on_trg_from_pos(self, context, trg)
except Exception as e:
self.log.error("Error calling %s callback: %s", context, e)
pass
def trg_from_pos(self, handler, implicit, pos=None):
self.service.send(
command='trg-from-pos',
path=self.path,
language=self.lang,
pos=self.pos if pos is None else pos,
env={
'env': self.env,
'prefs': self.prefs,
},
implicit=implicit,
text=self.text,
encoding='utf-8',
callback=functools.partial(self._post_trg_from_pos_handler, handler, 'trg_from_pos')
)
def preceding_trg_from_pos(self, handler, curr_pos, pos=None):
self.service.send(
command='trg-from-pos',
path=self.path,
language=self.lang,
pos=self.pos if pos is None else pos,
env={
'env': self.env,
'prefs': self.prefs,
},
text=self.text,
encoding='utf-8',
callback=functools.partial(self._post_trg_from_pos_handler, handler, 'preceding_trg_from_pos'),
**{'curr-pos': curr_pos}
)
def defn_trg_from_pos(self, handler, pos=None):
self.service.send(
command='trg-from-pos',
type='defn',
path=self.path,
language=self.lang,
pos=self.pos if pos is None else pos,
env={
'env': self.env,
'prefs': self.prefs,
},
text=self.text,
encoding='utf-8',
callback=functools.partial(self._post_trg_from_pos_handler, handler, 'defn_trg_from_pos')
)
def async_eval_at_trg(self, handler, trg, silent=False, keep_existing=False):
def callback(request, response):
try:
if not response.get('success'):
try:
handler.set_status_message(self, response.get('message', ""), response.get('highlight', False))
except Exception as e:
self.log.error("Error reporting async_eval_at_trg error: %s", response.get("message", e))
pass
return
if 'retrigger' in response:
trg['retriggerOnCompletion'] = response['retrigger']
if 'cplns' in response:
# split into separate lists
cplns = response['cplns']
try:
handler.set_auto_complete_info(self, cplns, trg)
except Exception as e:
self.log.error("Error calling set_auto_complete_info: %s", e)
pass
elif 'calltip' in response:
try:
handler.set_call_tip_info(self, response['calltip'], request.get('explicit', False), trg)
except Exception as e:
self.log.error("Error calling set_call_tip_info: e", e)
pass
elif 'defns' in response:
handler.set_definitions_info(self, response['defns'], trg)
finally:
handler.done()
self.service.send(
command='eval',
trg=trg,
silent=silent,
keep_existing=keep_existing,
callback=callback,
)
def to_html_async(self, callback, flags=None, title=None):
def invoke_callback(request, response):
try:
if response.get('success'):
RESULT_SUCCESSFUL = True
callback(RESULT_SUCCESSFUL, response.get('html'))
else:
RESULT_ERROR = False
callback(RESULT_ERROR, None)
except Exception as e:
self.log.error("Error calling to_html callback: %s", e)
flag_dict = {
'include_styling': True,
'include_html': True,
'do_trg': True,
'do_eval': True,
}
if flags is not None:
flag_dict.update(flags)
self.service.send(
command='buf-to-html',
path=self.path,
language=self.lang,
text=self.text,
env={
'env': self.env,
'prefs': self.prefs,
},
title=title,
flags=flag_dict,
callback=invoke_callback,
)
def get_calltip_arg_range(self, handler, trg_pos, calltip, curr_pos):
def callback(request, response):
if not response.get('success'):
msg = response.get('message')
if not msg:
msg = "get_calltip_arg_range: Can't get a calltip at position %d" % curr_pos
try:
handler.set_status_message(self, msg)
except Exception as e:
self.log.error("Error reporting get_calltip_arg_range error: %s", response.get('message', e))
pass
return
start = response.get('start', -1)
end = response.get('end', -1)
try:
handler.on_get_calltip_range(self, start, end)
except Exception as e:
self.log.error("Error calling get_calltip_arg_range callback: %s", e)
pass
self.service.send(
command='calltip-arg-range',
path=self.path,
language=self.lang,
text=self.text,
encoding='utf-8',
trg_pos=trg_pos,
calltip=calltip,
curr_pos=curr_pos,
env={
'env': self.env,
'prefs': self.prefs,
},
callback=callback,
) | PypiClean |
/InowasFlopyAdapter-1.5.0.tar.gz/InowasFlopyAdapter-1.5.0/FlopyAdapter/MfPackages/HobAdapter.py | import flopy.modflow as mf
class HobAdapter:
_data = None
def __init__(self, data):
self._data = data
def validate(self):
# should be implemented
# for key in content:
# do something
# return some hints
pass
def is_valid(self):
# should be implemented
# for key in content:
# do something
# return true or false
return True
def merge(self):
default = self.default()
for key in self._data:
default[key] = self._data[key]
return default
def get_package(self, _mf):
content = self.merge()
# noinspection PyTypeChecker
content["obs_data"] = self.map_obs_data(_mf, content["obs_data"])
return mf.ModflowHob(
_mf,
**content
)
@staticmethod
def map_obs_data(model, observations):
obs = []
counter = 0
for o in observations:
counter += 1
obs.append(mf.HeadObservation(
model,
obsname=o.get('obsname', 'HOB.' + str(counter)),
layer=o['layer'],
row=o['row'],
column=o['column'],
time_series_data=o['time_series_data']
))
return obs
@staticmethod
def default():
default = {
"iuhobsv": 1051,
"hobdry": 0,
"tomulth": 1.0,
"obs_data": None,
"hobname": None,
"extension": 'hob',
"unitnumber": None,
"filenames": None
}
return default
@staticmethod
def read_package(package):
content = {
"iuhobsv": package.iuhobsv,
"hobdry": package.hobdry,
"tomulth": package.tomulth,
"obs_data": package.obs_data,
"hobname": package.hobname,
"extension": package.extension[0],
"unitnumber": package.unit_number[0],
"filenames": package.filenames
}
return content | PypiClean |
/GeoNode-3.2.0-py3-none-any.whl/geonode/static/geonode/js/ol-2.13/lib/OpenLayers/Util/vendorPrefix.js | * @requires OpenLayers/SingleFile.js
*/
OpenLayers.Util = OpenLayers.Util || {};
/**
* Namespace: OpenLayers.Util.vendorPrefix
* A collection of utility functions to detect vendor prefixed features
*/
OpenLayers.Util.vendorPrefix = (function() {
"use strict";
var VENDOR_PREFIXES = ["", "O", "ms", "Moz", "Webkit"],
divStyle = document.createElement("div").style,
cssCache = {},
jsCache = {};
/**
* Function: domToCss
* Converts a upper camel case DOM style property name to a CSS property
* i.e. transformOrigin -> transform-origin
* or WebkitTransformOrigin -> -webkit-transform-origin
*
* Parameters:
* prefixedDom - {String} The property to convert
*
* Returns:
* {String} The CSS property
*/
function domToCss(prefixedDom) {
if (!prefixedDom) { return null; }
return prefixedDom.
replace(/([A-Z])/g, function(c) { return "-" + c.toLowerCase(); }).
replace(/^ms-/, "-ms-");
}
/**
* APIMethod: css
* Detect which property is used for a CSS property
*
* Parameters:
* property - {String} The standard (unprefixed) CSS property name
*
* Returns:
* {String} The standard CSS property, prefixed property or null if not
* supported
*/
function css(property) {
if (cssCache[property] === undefined) {
var domProperty = property.
replace(/(-[\s\S])/g, function(c) { return c.charAt(1).toUpperCase(); });
var prefixedDom = style(domProperty);
cssCache[property] = domToCss(prefixedDom);
}
return cssCache[property];
}
/**
* APIMethod: js
* Detect which property is used for a JS property/method
*
* Parameters:
* obj - {Object} The object to test on
* property - {String} The standard (unprefixed) JS property name
*
* Returns:
* {String} The standard JS property, prefixed property or null if not
* supported
*/
function js(obj, property) {
if (jsCache[property] === undefined) {
var tmpProp,
i = 0,
l = VENDOR_PREFIXES.length,
prefix,
isStyleObj = (typeof obj.cssText !== "undefined");
jsCache[property] = null;
for(; i<l; i++) {
prefix = VENDOR_PREFIXES[i];
if(prefix) {
if (!isStyleObj) {
// js prefix should be lower-case, while style
// properties have upper case on first character
prefix = prefix.toLowerCase();
}
tmpProp = prefix + property.charAt(0).toUpperCase() + property.slice(1);
} else {
tmpProp = property;
}
if(obj[tmpProp] !== undefined) {
jsCache[property] = tmpProp;
break;
}
}
}
return jsCache[property];
}
/**
* APIMethod: style
* Detect which property is used for a DOM style property
*
* Parameters:
* property - {String} The standard (unprefixed) style property name
*
* Returns:
* {String} The standard style property, prefixed property or null if not
* supported
*/
function style(property) {
return js(divStyle, property);
}
return {
css: css,
js: js,
style: style,
// used for testing
cssCache: cssCache,
jsCache: jsCache
};
}()); | PypiClean |
/Arduinozore-1.1.3.tar.gz/Arduinozore-1.1.3/arduinozore/handlers/device.py | import os
from arduinozore.handlers.crudHandler import CrudHandler
from arduinozore.handlers.tools import get_arduino
from arduinozore.handlers.tools import get_config_name
from arduinozore.models.card import Card
from arduinozore.models.device import Device
from arduinozore.models.sensor import Sensor
from arduinozore.settings import DEVICE_CONFIG_FOLDER
from arduinozore.settings import SSL_PORT
from arduinozore.settings import path
class DevicePageHandler(CrudHandler):
"""Device page handler."""
default_args = {'enabled': '', 'name': '', 'type': ''}
def list(self):
"""List configuration."""
devices = Device.get_all()
self.render('device/list.html', devices=devices)
def show(self, slug):
"""Show device."""
device = Device.get(Device.get_identifier_from_serial(slug))
if device is None:
device = Device.get_config(slug)
if device is None:
self.redirect(self.redirect_url + '/create', permanent=False)
else:
settings = dict()
settings['device'] = device
settings['slug'] = slug
self.render('device/show.html', **settings)
else:
settings = dict()
settings['port'] = SSL_PORT
settings['slug'] = slug
settings['device'] = device
self.render('device/communicate.html', **settings)
def create(self, slug):
"""Show configuration form for device."""
cards = Card.get_all()
sensors = Sensor.get_all()
device = Device.get(slug)
if 'card' in self.request.arguments:
card = Card.get(self.get_argument('card'))
else:
card = None
settings = dict()
settings['method'] = 'post'
settings['cards'] = cards
settings['card'] = card
settings['sensors'] = sensors
settings['device'] = device
settings['slug'] = slug
settings['method'] = 'post'
self.render('device/config.html', **settings)
def edit(self, slug):
"""Show configuration form for device."""
device = Device.get(Device.get_identifier_from_serial(slug))
cards = Card.get_all()
sensors = Sensor.get_all()
if device is None:
device = Device.get_config(slug)
if device is None:
self.redirect(self.redirect_url + '/create', permanent=False)
settings = dict()
settings['method'] = 'put'
settings['cards'] = cards
settings['card'] = device.card
settings['sensors'] = sensors
settings['device'] = device
settings['method'] = 'put'
self.render('device/config.html', **settings)
def store(self, slug):
"""Store configuration."""
self.save(slug)
self.redirect(self.redirect_url, permanent=True)
def update(self, slug):
"""Update configuration."""
self.save(slug)
self.redirect(self.redirect_url, permanent=True)
def save(self, slug):
"""Save configuration."""
try:
self.request.arguments.pop("_method")
except Exception:
pass
device = Device.from_request_args(slug, self.request.arguments)
device.save()
def destroy(self, slug):
"""Destroy configuration."""
arduino = get_arduino(slug)
config_name = get_config_name(arduino)
config_file = path(DEVICE_CONFIG_FOLDER, config_name)
os.remove(config_file)
self.redirect(self.redirect_url, permanent=False) | PypiClean |
/IPS-Vagrant-0.4.1.tar.gz/IPS-Vagrant-0.4.1/ips_vagrant/commands/list/__init__.py | import click
from ips_vagrant.cli import pass_context, Context
from ips_vagrant.common import domain_parse, styled_status
from ips_vagrant.models.sites import Domain, Site, Session
# noinspection PyUnboundLocalVariable
@click.command('list', short_help='List all domains, or all installations under a specified domain.')
@click.argument('dname', default=False, metavar='<domain>')
@click.argument('site', default=False, metavar='<site>')
@pass_context
def cli(ctx, dname, site):
"""
List all domains if no <domain> is provided. If <domain> is provided but <site> is not, lists all sites
hosted under <domain>. If both <domain> and <site> are provided, lists information on the specified site.
"""
assert isinstance(ctx, Context)
if dname:
dname = domain_parse(dname).hostname
domain = Session.query(Domain).filter(Domain.name == dname).first()
# No such domain
if not domain:
click.secho('No such domain: {dn}'.format(dn=dname), fg='red', bold=True, err=True)
return
if site:
site_name = site
site = Site.get(domain, site)
if not site:
click.secho('No such site: {site}'.format(site=site_name), fg='red', bold=True, err=True)
return
click.secho('Name: {n}'.format(n=site.name), bold=True)
click.secho('Domain: {dn}'.format(dn=site.domain.name), bold=True)
click.secho('Version: {v}'.format(v=site.version), bold=True)
click.secho('License Key: {lk}'.format(lk=site.license_key), bold=True)
click.secho('Status: {s}'.format(s=styled_status(site.enabled)), bold=True)
click.secho('IN_DEV: {id}'.format(id=styled_status(site.in_dev)), bold=True)
click.secho('SSL: {s}'.format(s=styled_status(site.ssl)), bold=True)
click.secho('SPDY: {s}'.format(s=styled_status(site.spdy)), bold=True)
click.secho('GZIP: {g}'.format(g=styled_status(site.gzip)), bold=True)
click.secho('MySQL Database: {db}'.format(db=site.db_name), bold=True)
click.secho('MySQL User: {u}'.format(u=site.db_user), bold=True)
click.secho('MySQL Password: {u}'.format(u=site.db_pass), bold=True)
return
# Print sites
if dname:
# Get sites
sites = Site.all(domain)
if not sites:
click.secho('No sites active under domain: {dn}'.format(dn=dname), fg='red', bold=True, err=True)
return
# Display site data
for site in sites:
prefix = '[DEV] ' if site.in_dev else ''
fg = 'green' if site.enabled else 'white'
click.secho('{pre}{name} ({ver})'.format(pre=prefix, name=site.name, ver=site.version), fg=fg, bold=True)
return
# Print domains
domains = Domain.all()
for domain in domains:
# Extra domains
extras = ''
if domain.extras:
extras = ' ({dnames})'.format(dnames=str(domain.extras).replace(',', ', '))
click.secho('{dname}{extras}'.format(dname=domain.name, extras=extras), bold=True) | PypiClean |
/MiWork-2021.2.20.20.8.11-py3-none-any.whl/miwork/helper.py |
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from datetime import datetime
from enum import Enum
from io import BytesIO
from typing import TYPE_CHECKING
from six import PY2, string_types
from six.moves.urllib.request import urlretrieve
from miwork.exception import LarkInvalidArguments
if TYPE_CHECKING:
from datetime import tzinfo
def to_timestamp(t):
"""
:param t:
:type t: datetime
:return:
:rtype: int
"""
try:
return int(t.timestamp())
except AttributeError:
return int((time.mktime(t.timetuple()) + t.microsecond / 1000000.0))
def to_native(s):
"""转成 str
:type s: Union[str, bytes]
:rtype str
"""
if isinstance(s, bytes):
return s.decode('utf-8')
return s
def _read_from_url(url):
filename, _ = urlretrieve(url)
return open(filename, 'rb')
def _read_from_file(file):
return open(file, 'rb')
def to_file_like(image):
"""
:param image:
:type image: Union[string_types, bytes, BytesIO]
:return:
"""
if isinstance(image, bytes):
return BytesIO(image)
if isinstance(image, string_types):
if image.startswith(str('http://')) or image.startswith(str('https://')):
return _read_from_url(image)
return _read_from_file(image)
return image
def converter_enum(value, ranges=None):
v = value.value if isinstance(value, Enum) else value
if ranges is not None:
ranges_v = [i.value if isinstance(i, Enum) else i for i in ranges]
if v not in ranges_v:
raise LarkInvalidArguments(msg='enum: %s should be in ranges: %s' % (v, ' / '.join(map(str, ranges_v))))
return v
def datetime_format_rfc3339(d, default_tz=None):
"""datetime 转 RFC3339 格式的时间字符串
:param d: datetime
:type d: datetime
:param default_tz:
:type default_tz: tzinfo
:return: RFC3339 格式的时间字符串
:rtype: str
"""
# 如果没有时区,给一个 default_tz
if not d.tzinfo and default_tz:
d = d.replace(tzinfo=default_tz)
return d.astimezone(d.tzinfo).isoformat()
def join_url(base_url, qs, sep='?'):
url = base_url
qs = '&'.join(map(lambda x: '{}={}'.format(x[0], x[1]), filter(lambda x: x[1], qs)))
if qs:
url = url + sep + qs
return url
def join_dict(base, d):
for i in d:
key, val = i[0], i[1]
if isinstance(val, bool):
if val is not None:
base[key] = val
else:
if val:
base[key] = val
return base
def pop_or_none(d, key):
try:
return d.pop(key)
except KeyError:
return | PypiClean |
/Maxis_EA_server-0.0.1.tar.gz/Maxis_EA_server-0.0.1/server/common/metaclasses.py | import dis
class ServerMetaClass(type):
"""
Метакласс, проверяющий что в результирующем классе нет клиентских
вызовов таких как: connect. Также проверяется, что серверный
сокет является TCP и работает по IPv4 протоколу.
"""
def __init__(cls, clsname, bases, clsdict):
methods = []
attrs = []
for func in clsdict:
try:
ret = dis.get_instructions(clsdict[func])
except TypeError:
pass
else:
for i in ret:
if i.opname == 'LOAD_GLOBAL':
if i.argval not in methods:
methods.append(i.argval)
elif i.opname == 'LOAD_ATTR':
if i.argval not in attrs:
attrs.append(i.argval)
if 'connect' in methods:
raise TypeError('Method "connect" is restricted ')
if not ('SOCK_STREAM' in attrs and 'AF_INET' in attrs):
raise TypeError('Incorrect initialization of a socket.')
super().__init__(clsname, bases, clsdict)
class ClientMetaClass(type):
"""
Метакласс, проверяющий что в результирующем классе нет серверных
вызовов таких как: accept, listen. Также проверяется, что сокет не
создаётся внутри конструктора класса.
"""
def __init__(cls, clsname, bases, clsdict):
methods = []
for func in clsdict:
try:
ret = dis.get_instructions(clsdict[func])
except TypeError:
pass
else:
for i in ret:
if i.opname == 'LOAD_GLOBAL':
if i.argval not in methods:
methods.append(i.argval)
for command in ('accept', 'listen', 'socket'):
if command in methods:
raise TypeError(f'{command} is restricted in the method')
if 'get_message' in methods or 'send_message' in methods:
pass
else:
raise TypeError('There are no calls to functions which work with sockets.')
super().__init__(clsname, bases, clsdict) | PypiClean |
/FreeClimb-4.5.0-py3-none-any.whl/freeclimb/model/get_digits.py | import re # noqa: F401
import sys # noqa: F401
from freeclimb.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from freeclimb.exceptions import ApiAttributeError
def lazy_import():
from freeclimb.model.add_to_conference import AddToConference
from freeclimb.model.create_conference import CreateConference
from freeclimb.model.dequeue import Dequeue
from freeclimb.model.enqueue import Enqueue
from freeclimb.model.get_digits import GetDigits
from freeclimb.model.get_digits_all_of import GetDigitsAllOf
from freeclimb.model.get_speech import GetSpeech
from freeclimb.model.hangup import Hangup
from freeclimb.model.out_dial import OutDial
from freeclimb.model.park import Park
from freeclimb.model.pause import Pause
from freeclimb.model.percl_command import PerclCommand
from freeclimb.model.play import Play
from freeclimb.model.play_early_media import PlayEarlyMedia
from freeclimb.model.record_utterance import RecordUtterance
from freeclimb.model.redirect import Redirect
from freeclimb.model.reject import Reject
from freeclimb.model.remove_from_conference import RemoveFromConference
from freeclimb.model.say import Say
from freeclimb.model.send_digits import SendDigits
from freeclimb.model.set_listen import SetListen
from freeclimb.model.set_talk import SetTalk
from freeclimb.model.sms import Sms
from freeclimb.model.start_record_call import StartRecordCall
from freeclimb.model.terminate_conference import TerminateConference
from freeclimb.model.unpark import Unpark
globals()['AddToConference'] = AddToConference
globals()['CreateConference'] = CreateConference
globals()['Dequeue'] = Dequeue
globals()['Enqueue'] = Enqueue
globals()['GetDigits'] = GetDigits
globals()['GetDigitsAllOf'] = GetDigitsAllOf
globals()['GetSpeech'] = GetSpeech
globals()['Hangup'] = Hangup
globals()['OutDial'] = OutDial
globals()['Park'] = Park
globals()['Pause'] = Pause
globals()['PerclCommand'] = PerclCommand
globals()['Play'] = Play
globals()['PlayEarlyMedia'] = PlayEarlyMedia
globals()['RecordUtterance'] = RecordUtterance
globals()['Redirect'] = Redirect
globals()['Reject'] = Reject
globals()['RemoveFromConference'] = RemoveFromConference
globals()['Say'] = Say
globals()['SendDigits'] = SendDigits
globals()['SetListen'] = SetListen
globals()['SetTalk'] = SetTalk
globals()['Sms'] = Sms
globals()['StartRecordCall'] = StartRecordCall
globals()['TerminateConference'] = TerminateConference
globals()['Unpark'] = Unpark
class GetDigits(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'action_url': (str,), # noqa: E501
'digit_timeout_ms': (int,), # noqa: E501
'finish_on_key': (str,), # noqa: E501
'flush_buffer': (bool,), # noqa: E501
'initial_timeout_ms': (str,), # noqa: E501
'max_digits': (int,), # noqa: E501
'min_digits': (int,), # noqa: E501
'prompts': ([PerclCommand],), # noqa: E501
'privacy_mode': (bool,), # noqa: E501
'command': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'action_url': 'actionUrl', # noqa: E501
'digit_timeout_ms': 'digitTimeoutMs', # noqa: E501
'finish_on_key': 'finishOnKey', # noqa: E501
'flush_buffer': 'flushBuffer', # noqa: E501
'initial_timeout_ms': 'initialTimeoutMs', # noqa: E501
'max_digits': 'maxDigits', # noqa: E501
'min_digits': 'minDigits', # noqa: E501
'prompts': 'prompts', # noqa: E501
'privacy_mode': 'privacyMode', # noqa: E501
'command': 'command', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""GetDigits - a model defined in OpenAPI
Keyword Args:
action_url (str): When the Caller has finished entering digits, FreeClimb will make an HTTP POST request to this URL. A PerCL response is expected to continue handling the Call. Make sure to keep “http://“ in the URL.
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
digit_timeout_ms (int): Maximum time in milliseconds that FreeClimb will wait for the Caller to press any digit after the last digit entered, before making a determination that a `timeout` has occurred and moving on to make the request to the actionUrl to submit the results of the `GetDigits` command. This timeout interval begins and resets after each digit entered.. [optional] # noqa: E501
finish_on_key (str): Digit that causes the input sequence to be deemed complete. This attribute defers to the `timeout` attribute – so, if a `timeout` occurs, then the command terminates regardless of the value of `finishOnKey`.. [optional] # noqa: E501
flush_buffer (bool): If set to true, the FreeClimb platform starts with an empty DTMF buffer to store the digits entered by the caller. If set to false, FreeClimb will append the user inputs to the end of the existing digits buffer and will return digits from the start of the digits buffer.. [optional] # noqa: E501
initial_timeout_ms (str): Maximum time in milliseconds that FreeClimb will wait for the Caller to press the first digit before making a determination that a `timeout` has occurred and moving on to make the request to the `actionUrl` to submit the results of the `GetDigits` command. This timeout interval begins when all nested commands have been fully executed.. [optional] # noqa: E501
max_digits (int): Maximum number of digits expected in the input. If the terminating digit is not entered and the caller has entered the maximum number of digits allowed, the `GetDigits` command terminates regardless of the value of `finishOnKey`.. [optional] # noqa: E501
min_digits (int): Minimum number of digits expected in the input. If specified, FreeClimb will return the collected digits only if the Caller has entered at least that many digits.. [optional] # noqa: E501
prompts ([PerclCommand]): JSON array of PerCL commands to nest within the `GetDigits` command. The `Say`, `Play`, and `Pause` commands can be used. The nested actions are executed while FreeClimb is waiting for input from the Caller.. [optional] # noqa: E501
privacy_mode (bool): Parameter `privacyMode` will not log the `text` as required by PCI compliance.. [optional] # noqa: E501
command (str): Name of PerCL Command (this is automatically derived from mapping configuration and should not be manually supplied in any arguments). [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""GetDigits - a model defined in OpenAPI
Keyword Args:
action_url (str): When the Caller has finished entering digits, FreeClimb will make an HTTP POST request to this URL. A PerCL response is expected to continue handling the Call. Make sure to keep “http://“ in the URL.
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
digit_timeout_ms (int): Maximum time in milliseconds that FreeClimb will wait for the Caller to press any digit after the last digit entered, before making a determination that a `timeout` has occurred and moving on to make the request to the actionUrl to submit the results of the `GetDigits` command. This timeout interval begins and resets after each digit entered.. [optional] # noqa: E501
finish_on_key (str): Digit that causes the input sequence to be deemed complete. This attribute defers to the `timeout` attribute – so, if a `timeout` occurs, then the command terminates regardless of the value of `finishOnKey`.. [optional] # noqa: E501
flush_buffer (bool): If set to true, the FreeClimb platform starts with an empty DTMF buffer to store the digits entered by the caller. If set to false, FreeClimb will append the user inputs to the end of the existing digits buffer and will return digits from the start of the digits buffer.. [optional] # noqa: E501
initial_timeout_ms (str): Maximum time in milliseconds that FreeClimb will wait for the Caller to press the first digit before making a determination that a `timeout` has occurred and moving on to make the request to the `actionUrl` to submit the results of the `GetDigits` command. This timeout interval begins when all nested commands have been fully executed.. [optional] # noqa: E501
max_digits (int): Maximum number of digits expected in the input. If the terminating digit is not entered and the caller has entered the maximum number of digits allowed, the `GetDigits` command terminates regardless of the value of `finishOnKey`.. [optional] # noqa: E501
min_digits (int): Minimum number of digits expected in the input. If specified, FreeClimb will return the collected digits only if the Caller has entered at least that many digits.. [optional] # noqa: E501
prompts ([PerclCommand]): JSON array of PerCL commands to nest within the `GetDigits` command. The `Say`, `Play`, and `Pause` commands can be used. The nested actions are executed while FreeClimb is waiting for input from the Caller.. [optional] # noqa: E501
privacy_mode (bool): Parameter `privacyMode` will not log the `text` as required by PCI compliance.. [optional] # noqa: E501
command (str): Name of PerCL Command (this is automatically derived from mapping configuration and should not be manually supplied in any arguments). [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def command():
mappings = PerclCommand.discriminator['command']
mapping = next((mapping for mapping,schema in mappings.items() if schema == GetDigits), None)
if mapping == None:
raise ApiAttributeError("{0} has no mapping '{1}'".format(GetDigits.__class__.name, 'command'))
return mapping
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
GetDigitsAllOf,
PerclCommand,
],
'oneOf': [
],
} | PypiClean |
/NESTML-5.3.0-py3-none-any.whl/pynestml/meta_model/ast_comparison_operator.py |
from pynestml.meta_model.ast_node import ASTNode
class ASTComparisonOperator(ASTNode):
"""
This class is used to store a single comparison operator.
Grammar:
comparisonOperator : (lt='<' | le='<=' | eq='==' | ne='!=' | ne2='<>' | ge='>=' | gt='>');
Attributes:
is_lt = False
is_le = False
is_eq = False
is_ne = False
is_ne2 = False
is_ge = False
is_gt = False
"""
def __init__(self, is_lt=False, is_le=False, is_eq=False, is_ne=False, is_ne2=False, is_ge=False,
is_gt=False, *args, **kwargs):
"""
Standard constructor.
Parameters for superclass (ASTNode) can be passed through :python:`*args` and :python:`**kwargs`.
:param is_lt: is less than operator.
:type is_lt: bool
:param is_le: is less equal operator.
:type is_le: bool
:param is_eq: is equality operator.
:type is_eq: bool
:param is_ne: is not equal operator.
:type is_ne: bool
:param is_ne2: is not equal operator (alternative syntax).
:type is_ne2: bool
:param is_ge: is greater equal operator.
:type is_ge: bool
:param is_gt: is greater than operator.
:type is_gt: bool
"""
assert ((is_lt + is_le + is_eq + is_ne + is_ne2 + is_ge + is_gt) == 1), \
'(PyNestML.ASTComparisonOperator) Comparison operator not correctly specified!'
super(ASTComparisonOperator, self).__init__(*args, **kwargs)
self.is_gt = is_gt
self.is_ge = is_ge
self.is_ne2 = is_ne2
self.is_ne = is_ne
self.is_eq = is_eq
self.is_le = is_le
self.is_lt = is_lt
def clone(self):
"""
Return a clone ("deep copy") of this node.
:return: new AST node instance
:rtype: ASTComparisonOperator
"""
dup = ASTComparisonOperator(is_lt=self.is_lt,
is_le=self.is_le,
is_eq=self.is_eq,
is_ne=self.is_ne,
is_ne2=self.is_ne2,
is_ge=self.is_ge,
is_gt=self.is_gt,
# ASTNode common attriutes:
source_position=self.source_position,
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
in_comment=self.in_comment,
implicit_conversion_factor=self.implicit_conversion_factor)
return dup
def get_parent(self, ast):
"""
Indicates whether a this node contains the handed over node.
:param ast: an arbitrary meta_model node.
:type ast: AST_
:return: AST if this or one of the child nodes contains the handed over element.
:rtype: AST_ or None
"""
return None
def equals(self, other):
"""
The equals method.
:param other: a different object.
:type other: object
:return: True if equal, otherwise False.
:rtype: bool
"""
if not isinstance(other, ASTComparisonOperator):
return False
return (self.is_lt == other.is_lt and self.is_le == other.is_le
and self.is_eq == other.is_eq and self.is_ne == other.is_ne
and self.is_ne2 == other.is_ne2 and self.is_ge == other.is_ge and self.is_gt == other.is_gt) | PypiClean |
/Brian2-2.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/brian2/input/poissoninput.py | from brian2.core.variables import Variables
from brian2.groups.group import CodeRunner
from brian2.units.fundamentalunits import (
DimensionMismatchError,
check_units,
get_dimensions,
have_same_dimensions,
)
from brian2.units.stdunits import Hz
from .binomial import BinomialFunction
__all__ = ["PoissonInput"]
class PoissonInput(CodeRunner):
"""
PoissonInput(target, target_var, N, rate, weight, when='synapses', order=0)
Adds independent Poisson input to a target variable of a `Group`. For large
numbers of inputs, this is much more efficient than creating a
`PoissonGroup`. The synaptic events are generated randomly during the
simulation and are not preloaded and stored in memory. All the inputs must
target the same variable, have the same frequency and same synaptic weight.
All neurons in the target `Group` receive independent realizations of
Poisson spike trains.
Parameters
----------
target : `Group`
The group that is targeted by this input.
target_var : str
The variable of `target` that is targeted by this input.
N : int
The number of inputs
rate : `Quantity`
The rate of each of the inputs
weight : str or `Quantity`
Either a string expression (that can be interpreted in the context of
`target`) or a `Quantity` that will be added for every event to
the `target_var` of `target`. The unit has to match the unit of
`target_var`
when : str, optional
When to update the target variable during a time step. Defaults to
the `synapses` scheduling slot. See :ref:`scheduling` for possible values.
order : int, optional
The priority of of the update compared to other operations occurring at
the same time step and in the same scheduling slot. Defaults to 0.
"""
@check_units(N=1, rate=Hz)
def __init__(self, target, target_var, N, rate, weight, when="synapses", order=0):
if target_var not in target.variables:
raise KeyError(f"{target_var} is not a variable of {target.name}")
self._weight = weight
self._target_var = target_var
if isinstance(weight, str):
weight = f"({weight})"
else:
weight_dims = get_dimensions(weight)
target_dims = target.variables[target_var].dim
# This will be checked automatically in the abstract code as well
# but doing an explicit check here allows for a clearer error
# message
if not have_same_dimensions(weight_dims, target_dims):
raise DimensionMismatchError(
"The provided weight does not "
"have the same unit as the "
f"target variable '{target_var}'",
weight_dims,
target_dims,
)
weight = repr(weight)
self._N = N
self._rate = rate
binomial_sampling = BinomialFunction(
N, rate * target.clock.dt, name="poissoninput_binomial*"
)
code = f"{target_var} += {binomial_sampling.name}()*{weight}"
self._stored_dt = target.dt_[:] # make a copy
# FIXME: we need an explicit reference here for on-the-fly subgroups
# For example: PoissonInput(group[:N], ...)
self._group = target
CodeRunner.__init__(
self,
group=target,
template="stateupdate",
code=code,
user_code="",
when=when,
order=order,
name="poissoninput*",
clock=target.clock,
)
self.variables = Variables(self)
self.variables._add_variable(binomial_sampling.name, binomial_sampling)
rate = property(fget=lambda self: self._rate, doc="The rate of each input")
N = property(fget=lambda self: self._N, doc="The number of inputs")
target_var = property(
fget=lambda self: self._target_var, doc="The targetted variable"
)
weight = property(fget=lambda self: self._weight, doc="The synaptic weight")
def before_run(self, run_namespace):
if self._group.dt_ != self._stored_dt:
raise NotImplementedError(
f"The dt used for simulating {self.group.name} "
"changed after the PoissonInput source was "
"created."
)
CodeRunner.before_run(self, run_namespace=run_namespace) | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas_rhino/geometry/surfaces/surface.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry import Surface
from compas_rhino.conversions import point_to_rhino
from compas_rhino.conversions import point_to_compas
from compas_rhino.conversions import vector_to_compas
from compas_rhino.conversions import plane_to_compas_frame
from compas_rhino.conversions import frame_to_rhino_plane
from compas_rhino.conversions import plane_to_rhino
from compas_rhino.conversions import box_to_compas
from compas_rhino.conversions import xform_to_rhino
from compas_rhino.conversions import sphere_to_rhino
from compas_rhino.conversions import cylinder_to_rhino
from compas_rhino.geometry.curves import RhinoCurve
import Rhino.Geometry
class RhinoSurface(Surface):
"""Class representing a general surface object.
Attributes
----------
u_domain: tuple[float, float]
The parameter domain in the U direction.
v_domain: tuple[float, float]
The parameter domain in the V direction.
is_u_periodic: bool
True if the surface is periodic in the U direction.
is_v_periodic: bool
True if the surface is periodic in the V direction.
"""
def __init__(self, name=None):
super(RhinoSurface, self).__init__(name=name)
self._rhino_surface = None
@property
def rhino_surface(self):
return self._rhino_surface
@rhino_surface.setter
def rhino_surface(self, surface):
self._rhino_surface = surface
# ==============================================================================
# Data
# ==============================================================================
# ==============================================================================
# Properties
# ==============================================================================
@property
def u_domain(self):
if self.rhino_surface:
return self.rhino_surface.Domain(0)
@property
def v_domain(self):
if self.rhino_surface:
return self.rhino_surface.Domain(1)
@property
def is_u_periodic(self):
if self.rhino_surface:
return self.rhino_surface.IsPeriodic(0)
@property
def is_v_periodic(self):
if self.rhino_surface:
return self.rhino_surface.IsPeriodic(1)
# ==============================================================================
# Constructors
# ==============================================================================
@classmethod
def from_corners(cls, corners):
"""Creates a NURBS surface using the given 4 corners.
The order of the given points determins the normal direction of the generated surface.
Parameters
----------
corners : list(:class:`~compas.geometry.Point`)
4 points in 3d space to represent the corners of the planar surface.
Returns
-------
:class:`~compas_rhino.geometry.RhinoNurbsSurface`
"""
rhino_points = [Rhino.Geometry.Point3d(corner.x, corner.y, corner.z) for corner in corners]
return cls.from_rhino(Rhino.Geometry.NurbsSurface.CreateFromCorners(*rhino_points))
@classmethod
def from_sphere(cls, sphere):
"""Creates a NURBS surface from a sphere.
Parameters
----------
sphere : :class:`~compas.geometry.Sphere`
The surface's geometry.
Returns
-------
:class:`~compas_rhino.geometry.RhinoNurbsSurface`
"""
sphere = sphere_to_rhino(sphere)
surface = Rhino.Geometry.NurbsSurface.CreateFromSphere(sphere)
return cls.from_rhino(surface)
@classmethod
def from_cylinder(cls, cylinder):
"""Create a NURBS surface from a cylinder.
Parameters
----------
cylinder : :class:`~compas.geometry.Cylinder`
The surface's geometry.
Returns
-------
:class:`~compas_rhino.geometry.RhinoNurbsSurface`
"""
cylinder = cylinder_to_rhino(cylinder)
surface = Rhino.Geometry.NurbsSurface.CreateFromCylinder(cylinder)
return cls.from_rhino(surface)
@classmethod
def from_torus(cls, torus):
"""Create a NURBS surface from a torus.
Parameters
----------
torus : :class:`~compas.geometry.Torus`
The surface's geometry.
Returns
-------
:class:`~compas_rhino.geometry.RhinoNurbsSurface`
"""
raise NotImplementedError
@classmethod
def from_rhino(cls, rhino_surface):
"""Construct a NURBS surface from an existing Rhino surface.
Parameters
----------
rhino_surface : :rhino:`Rhino.Geometry.Surface`
A Rhino surface.
Returns
-------
:class:`~compas_rhino.geometry.RhinoSurface`
"""
curve = cls()
curve.rhino_surface = rhino_surface
return curve
@classmethod
def from_plane(cls, plane, box):
"""Construct a surface from a plane.
Parameters
----------
plane : :class:`compas.geometry.Plane`
The plane.
Returns
-------
:class:`~compas_rhino.geometry.RhinoSurface`
"""
plane = plane_to_rhino(plane)
box = Rhino.Geometry.BoundingBox(box.xmin, box.ymin, box.zmin, box.xmax, box.ymax, box.zmax)
rhino_surface = Rhino.Geometry.PlaneSurface.CreateThroughBox(plane, box)
return cls.from_rhino(rhino_surface)
@classmethod
def from_frame(cls, frame, u_interval, v_interval):
"""Creates a planar surface from a frame and parametric domain information.
Parameters
----------
frame : :class:`~compas.geometry.Frame`
A frame with point at the center of the wanted plannar surface and
x and y axes the direction of u and v respectively.
u_interval : tuple(float, float)
The parametric domain of the U parameter. u_interval[0] => u_interval[1].
v_interval : tuple(float, float)
The parametric domain of the V parameter. v_interval[0] => v_interval[1].
Returns
-------
:class:`compas_rhino.geometry.surface.RhinoSurface`
"""
surface = Rhino.Geometry.PlaneSurface(
frame_to_rhino_plane(frame),
Rhino.Geometry.Interval(*u_interval),
Rhino.Geometry.Interval(*v_interval),
)
if not surface:
msg = "Failed creating PlaneSurface from frame:{} u_interval:{} v_interval:{}"
raise ValueError(msg.format(frame, u_interval, v_interval))
return cls.from_rhino(surface)
# ==============================================================================
# Conversions
# ==============================================================================
# ==============================================================================
# Methods
# ==============================================================================
def copy(self):
"""Make an independent copy of the current surface.
Returns
-------
:class:`~compas_rhino.geometry.RhinoSurface`
"""
cls = type(self)
surface = cls()
surface.rhino_surface = self.rhino_surface.Duplicate()
return surface
def transform(self, T):
"""Transform this surface.
Parameters
----------
T : :class:`~compas.geometry.Transformation`
A COMPAS transformation.
Returns
-------
None
"""
self.rhino_surface.Transform(xform_to_rhino(T))
def u_isocurve(self, u):
"""Compute the isoparametric curve at parameter u.
Parameters
----------
u : float
Returns
-------
:class:`~compas_rhino.geometry.RhinoCurve`
"""
curve = self.rhino_surface.IsoCurve(1, u)
return RhinoCurve.from_rhino(curve)
def v_isocurve(self, v):
"""Compute the isoparametric curve at parameter v.
Parameters
----------
v : float
Returns
-------
:class:`~compas_rhino.geometry.RhinoCurve`
"""
curve = self.rhino_surface.IsoCurve(0, v)
return RhinoCurve.from_rhino(curve)
def point_at(self, u, v):
"""Compute a point on the surface.
Parameters
----------
u : float
v : float
Returns
-------
:class:`~compas.geometry.Point`
"""
point = self.rhino_surface.PointAt(u, v)
return point_to_compas(point)
def curvature_at(self, u, v):
"""Compute the curvature at a point on the surface.
Parameters
----------
u : float
v : float
Returns
-------
tuple[[float, float, float], [float, float, float], float, [float, float, float], float, [float, float, float], float, float] | None
A tuple containing the point, normal vector, maximum principal curvature value, maximum principal curvature direction,
minimun principal curvature value, minimun principal curvature direction, gaussian curvature value and mean curvature
value for the point at UV. None at failure.
"""
surface_curvature = self.rhino_surface.CurvatureAt(u, v)
if surface_curvature:
point, normal, kappa_u, direction_u, kappa_v, direction_v, gaussian, mean = surface_curvature
cpoint = point_to_compas(point)
cnormal = vector_to_compas(normal)
cdirection_u = vector_to_compas(direction_u)
cdirection_v = vector_to_compas(direction_v)
return (cpoint, cnormal, kappa_u, cdirection_u, kappa_v, cdirection_v, gaussian, mean)
def frame_at(self, u, v):
"""Compute the local frame at a point on the curve.
Parameters
----------
u : float
v : float
Returns
-------
:class:`~compas.geometry.Frame`
"""
result, plane = self.rhino_surface.FrameAt(u, v)
if result:
return plane_to_compas_frame(plane)
# ==============================================================================
# Methods continued
# ==============================================================================
def closest_point(self, point, return_parameters=False):
"""Compute the closest point on the curve to a given point.
Parameters
----------
point : :class:`~compas.geometry.Point`
The test point.
return_parameters : bool, optional
If True, return the UV parameters of the closest point as tuple in addition to the point location.
Returns
-------
:class:`~compas.geometry.Point`
If `return_parameters` is False.
:class:`~compas.geometry.Point`, (float, float)
If `return_parameters` is True.
"""
result, u, v = self.rhino_surface.ClosestPoint(point_to_rhino(point))
if not result:
return
point = self.point_at(u, v)
if return_parameters:
return point, (u, v)
return point
def aabb(self, precision=0.0, optimal=False):
"""Compute the axis aligned bounding box of the surface.
Parameters
----------
precision : float, optional
optimal : float, optional
Flag indicating that the box should be precise.
Returns
-------
:class:`~compas.geometry.Box`
"""
box = self.rhino_surface.GetBoundingBox(optimal)
return box_to_compas(Rhino.Geometry.Box(box))
def intersections_with_curve(self, curve, tolerance=1e-3, overlap=1e-3):
"""Compute the intersections with a curve.
Parameters
----------
line : :class:`~compas.geometry.Curve`
Returns
-------
list[:class:`~compas.geometry.Point`]
"""
intersections = Rhino.Geometry.Intersect.Intersection.CurveSurface(
curve.rhino_curve, self.rhino_surface, tolerance, overlap
)
points = []
for event in intersections:
if event.IsPoint:
point = point_to_compas(event.PointA)
points.append(point)
return points | PypiClean |
/Netfoll_TL-2.0.1-py3-none-any.whl/netfoll_tl/client/auth.py | import getpass
import inspect
import os
import sys
import typing
import warnings
from .. import utils, helpers, errors, password as pwd_mod
from ..tl import types, functions, custom
from .._updates import SessionState
if typing.TYPE_CHECKING:
from .telegramclient import TelegramClient
class AuthMethods:
# region Public methods
def start(
self: 'TelegramClient',
phone: typing.Callable[[], str] = lambda: input('Please enter your phone (or bot token): '),
password: typing.Callable[[], str] = lambda: getpass.getpass('Please enter your password: '),
*,
bot_token: str = None,
force_sms: bool = False,
code_callback: typing.Callable[[], typing.Union[str, int]] = None,
first_name: str = 'New User',
last_name: str = '',
max_attempts: int = 3) -> 'TelegramClient':
"""
Starts the client (connects and logs in if necessary).
By default, this method will be interactive (asking for
user input if needed), and will handle 2FA if enabled too.
If the event loop is already running, this method returns a
coroutine that you should await on your own code; otherwise
the loop is ran until said coroutine completes.
Arguments
phone (`str` | `int` | `callable`):
The phone (or callable without arguments to get it)
to which the code will be sent. If a bot-token-like
string is given, it will be used as such instead.
The argument may be a coroutine.
password (`str`, `callable`, optional):
The password for 2 Factor Authentication (2FA).
This is only required if it is enabled in your account.
The argument may be a coroutine.
bot_token (`str`):
Bot Token obtained by `@BotFather <https://t.me/BotFather>`_
to log in as a bot. Cannot be specified with ``phone`` (only
one of either allowed).
force_sms (`bool`, optional):
Whether to force sending the code request as SMS.
This only makes sense when signing in with a `phone`.
code_callback (`callable`, optional):
A callable that will be used to retrieve the Telegram
login code. Defaults to `input()`.
The argument may be a coroutine.
first_name (`str`, optional):
The first name to be used if signing up. This has no
effect if the account already exists and you sign in.
last_name (`str`, optional):
Similar to the first name, but for the last. Optional.
max_attempts (`int`, optional):
How many times the code/password callback should be
retried or switching between signing in and signing up.
Returns
This `TelegramClient`, so initialization
can be chained with ``.start()``.
Example
.. code-block:: python
client = TelegramClient('anon', api_id, api_hash)
# Starting as a bot account
await client.start(bot_token=bot_token)
# Starting as a user account
await client.start(phone)
# Please enter the code you received: 12345
# Please enter your password: *******
# (You are now logged in)
# Starting using a context manager (this calls start()):
with client:
pass
"""
if code_callback is None:
def code_callback():
return input('Please enter the code you received: ')
elif not callable(code_callback):
raise ValueError(
'The code_callback parameter needs to be a callable '
'function that returns the code you received by Telegram.'
)
if not phone and not bot_token:
raise ValueError('No phone number or bot token provided.')
if phone and bot_token and not callable(phone):
raise ValueError('Both a phone and a bot token provided, '
'must only provide one of either')
coro = self._start(
phone=phone,
password=password,
bot_token=bot_token,
force_sms=force_sms,
code_callback=code_callback,
first_name=first_name,
last_name=last_name,
max_attempts=max_attempts
)
return (
coro if self.loop.is_running()
else self.loop.run_until_complete(coro)
)
async def _start(
self: 'TelegramClient', phone, password, bot_token, force_sms,
code_callback, first_name, last_name, max_attempts):
if not self.is_connected():
await self.connect()
# Rather than using `is_user_authorized`, use `get_me`. While this is
# more expensive and needs to retrieve more data from the server, it
# enables the library to warn users trying to login to a different
# account. See #1172.
me = await self.get_me()
if me is not None:
# The warnings here are on a best-effort and may fail.
if bot_token:
# bot_token's first part has the bot ID, but it may be invalid
# so don't try to parse as int (instead cast our ID to string).
if bot_token[:bot_token.find(':')] != str(me.id):
warnings.warn(
'the session already had an authorized user so it did '
'not login to the bot account using the provided '
'bot_token (it may not be using the user you expect)'
)
elif phone and not callable(phone) and utils.parse_phone(phone) != me.phone:
warnings.warn(
'the session already had an authorized user so it did '
'not login to the user account using the provided '
'phone (it may not be using the user you expect)'
)
return self
if not bot_token:
# Turn the callable into a valid phone number (or bot token)
while callable(phone):
value = phone()
if inspect.isawaitable(value):
value = await value
if ':' in value:
# Bot tokens have 'user_id:access_hash' format
bot_token = value
break
phone = utils.parse_phone(value) or phone
if bot_token:
await self.sign_in(bot_token=bot_token)
return self
me = None
attempts = 0
two_step_detected = False
await self.send_code_request(phone, force_sms=force_sms)
while attempts < max_attempts:
try:
value = code_callback()
if inspect.isawaitable(value):
value = await value
# Since sign-in with no code works (it sends the code)
# we must double-check that here. Else we'll assume we
# logged in, and it will return None as the User.
if not value:
raise errors.PhoneCodeEmptyError(request=None)
# Raises SessionPasswordNeededError if 2FA enabled
me = await self.sign_in(phone, code=value)
break
except errors.SessionPasswordNeededError:
two_step_detected = True
break
except (errors.PhoneCodeEmptyError,
errors.PhoneCodeExpiredError,
errors.PhoneCodeHashEmptyError,
errors.PhoneCodeInvalidError):
print('Invalid code. Please try again.', file=sys.stderr)
attempts += 1
else:
raise RuntimeError(
'{} consecutive sign-in attempts failed. Aborting'
.format(max_attempts)
)
if two_step_detected:
if not password:
raise ValueError(
"Two-step verification is enabled for this account. "
"Please provide the 'password' argument to 'start()'."
)
if callable(password):
for _ in range(max_attempts):
try:
value = password()
if inspect.isawaitable(value):
value = await value
me = await self.sign_in(phone=phone, password=value)
break
except errors.PasswordHashInvalidError:
print('Invalid password. Please try again',
file=sys.stderr)
else:
raise errors.PasswordHashInvalidError(request=None)
else:
me = await self.sign_in(phone=phone, password=password)
# We won't reach here if any step failed (exit by exception)
signed, name = 'Signed in successfully as', utils.get_display_name(me)
try:
print(signed, name)
except UnicodeEncodeError:
# Some terminals don't support certain characters
print(signed, name.encode('utf-8', errors='ignore')
.decode('ascii', errors='ignore'))
return self
def _parse_phone_and_hash(self, phone, phone_hash):
"""
Helper method to both parse and validate phone and its hash.
"""
phone = utils.parse_phone(phone) or self._phone
if not phone:
raise ValueError(
'Please make sure to call send_code_request first.'
)
phone_hash = phone_hash or self._phone_code_hash.get(phone, None)
if not phone_hash:
raise ValueError('You also need to provide a phone_code_hash.')
return phone, phone_hash
async def sign_in(
self: 'TelegramClient',
phone: str = None,
code: typing.Union[str, int] = None,
*,
password: str = None,
bot_token: str = None,
phone_code_hash: str = None) -> 'typing.Union[types.User, types.auth.SentCode]':
"""
Logs in to Telegram to an existing user or bot account.
You should only use this if you are not authorized yet.
This method will send the code if it's not provided.
.. note::
In most cases, you should simply use `start()` and not this method.
Arguments
phone (`str` | `int`):
The phone to send the code to if no code was provided,
or to override the phone that was previously used with
these requests.
code (`str` | `int`):
The code that Telegram sent. Note that if you have sent this
code through the application itself it will immediately
expire. If you want to send the code, obfuscate it somehow.
If you're not doing any of this you can ignore this note.
password (`str`):
2FA password, should be used if a previous call raised
``SessionPasswordNeededError``.
bot_token (`str`):
Used to sign in as a bot. Not all requests will be available.
This should be the hash the `@BotFather <https://t.me/BotFather>`_
gave you.
phone_code_hash (`str`, optional):
The hash returned by `send_code_request`. This can be left as
`None` to use the last hash known for the phone to be used.
Returns
The signed in user, or the information about
:meth:`send_code_request`.
Example
.. code-block:: python
phone = '+34 123 123 123'
await client.sign_in(phone) # send code
code = input('enter code: ')
await client.sign_in(phone, code)
"""
me = await self.get_me()
if me:
return me
if phone and not code and not password:
return await self.send_code_request(phone)
elif code:
phone, phone_code_hash = \
self._parse_phone_and_hash(phone, phone_code_hash)
# May raise PhoneCodeEmptyError, PhoneCodeExpiredError,
# PhoneCodeHashEmptyError or PhoneCodeInvalidError.
request = functions.auth.SignInRequest(
phone, phone_code_hash, str(code)
)
elif password:
pwd = await self(functions.account.GetPasswordRequest())
request = functions.auth.CheckPasswordRequest(
pwd_mod.compute_check(pwd, password)
)
elif bot_token:
request = functions.auth.ImportBotAuthorizationRequest(
flags=0, bot_auth_token=bot_token,
api_id=self.api_id, api_hash=self.api_hash
)
else:
raise ValueError(
'You must provide a phone and a code the first time, '
'and a password only if an RPCError was raised before.'
)
try:
result = await self(request)
except errors.PhoneCodeExpiredError:
self._phone_code_hash.pop(phone, None)
raise
if isinstance(result, types.auth.AuthorizationSignUpRequired):
# Emulate pre-layer 104 behaviour
self._tos = result.terms_of_service
raise errors.PhoneNumberUnoccupiedError(request=request)
return await self._on_login(result.user)
async def sign_up(
self: 'TelegramClient',
code: typing.Union[str, int],
first_name: str,
last_name: str = '',
*,
phone: str = None,
phone_code_hash: str = None) -> 'types.User':
"""
This method can no longer be used, and will immediately raise a ``ValueError``.
See `issue #4050 <https://github.com/LonamiWebs/Telethon/issues/4050>`_ for context.
"""
raise ValueError('Third-party applications cannot sign up for Telegram. See https://github.com/LonamiWebs/Telethon/issues/4050 for details')
async def _on_login(self, user):
"""
Callback called whenever the login or sign up process completes.
Returns the input user parameter.
"""
self._mb_entity_cache.set_self_user(user.id, user.bot, user.access_hash)
self._authorized = True
state = await self(functions.updates.GetStateRequest())
self._message_box.load(SessionState(0, 0, 0, state.pts, state.qts, int(state.date.timestamp()), state.seq, 0), [])
return user
async def send_code_request(
self: 'TelegramClient',
phone: str,
*,
force_sms: bool = False,
_retry_count: int = 0) -> 'types.auth.SentCode':
"""
Sends the Telegram code needed to login to the given phone number.
Arguments
phone (`str` | `int`):
The phone to which the code will be sent.
force_sms (`bool`, optional):
Whether to force sending as SMS. This has been deprecated.
See `issue #4050 <https://github.com/LonamiWebs/Telethon/issues/4050>`_ for context.
Returns
An instance of :tl:`SentCode`.
Example
.. code-block:: python
phone = '+34 123 123 123'
sent = await client.send_code_request(phone)
print(sent)
"""
if force_sms:
warnings.warn('force_sms has been deprecated and no longer works')
force_sms = False
result = None
phone = utils.parse_phone(phone) or self._phone
phone_hash = self._phone_code_hash.get(phone)
if not phone_hash:
try:
result = await self(functions.auth.SendCodeRequest(
phone, self.api_id, self.api_hash, types.CodeSettings()))
except errors.AuthRestartError:
if _retry_count > 2:
raise
return await self.send_code_request(
phone, force_sms=force_sms, _retry_count=_retry_count+1)
# TODO figure out when/if/how this can happen
if isinstance(result, types.auth.SentCodeSuccess):
raise RuntimeError('logged in right after sending the code')
# If we already sent a SMS, do not resend the code (hash may be empty)
if isinstance(result.type, types.auth.SentCodeTypeSms):
force_sms = False
# phone_code_hash may be empty, if it is, do not save it (#1283)
if result.phone_code_hash:
self._phone_code_hash[phone] = phone_hash = result.phone_code_hash
else:
force_sms = True
self._phone = phone
if force_sms:
try:
result = await self(
functions.auth.ResendCodeRequest(phone, phone_hash))
except errors.PhoneCodeExpiredError:
if _retry_count > 2:
raise
self._phone_code_hash.pop(phone, None)
self._log[__name__].info(
"Phone code expired in ResendCodeRequest, requesting a new code"
)
return await self.send_code_request(
phone, force_sms=False, _retry_count=_retry_count+1)
if isinstance(result, types.auth.SentCodeSuccess):
raise RuntimeError('logged in right after resending the code')
self._phone_code_hash[phone] = result.phone_code_hash
return result
async def qr_login(self: 'TelegramClient', ignored_ids: typing.List[int] = None) -> custom.QRLogin:
"""
Initiates the QR login procedure.
Note that you must be connected before invoking this, as with any
other request.
It is up to the caller to decide how to present the code to the user,
whether it's the URL, using the token bytes directly, or generating
a QR code and displaying it by other means.
See the documentation for `QRLogin` to see how to proceed after this.
Arguments
ignored_ids (List[`int`]):
List of already logged-in user IDs, to prevent logging in
twice with the same user.
Returns
An instance of `QRLogin`.
Example
.. code-block:: python
def display_url_as_qr(url):
pass # do whatever to show url as a qr to the user
qr_login = await client.qr_login()
display_url_as_qr(qr_login.url)
# Important! You need to wait for the login to complete!
await qr_login.wait()
# If you have 2FA enabled, `wait` will raise `telethon.errors.SessionPasswordNeededError`.
# You should except that error and call `sign_in` with the password if this happens.
"""
qr_login = custom.QRLogin(self, ignored_ids or [])
await qr_login.recreate()
return qr_login
async def log_out(self: 'TelegramClient') -> bool:
"""
Logs out Telegram and deletes the current ``*.session`` file.
The client is unusable after logging out and a new instance should be created.
Returns
`True` if the operation was successful.
Example
.. code-block:: python
# Note: you will need to login again!
await client.log_out()
"""
try:
await self(functions.auth.LogOutRequest())
except errors.RPCError:
return False
self._mb_entity_cache.set_self_user(None, None, None)
self._authorized = False
await self.disconnect()
self.session.delete()
self.session = None
return True
async def edit_2fa(
self: 'TelegramClient',
current_password: str = None,
new_password: str = None,
*,
hint: str = '',
email: str = None,
email_code_callback: typing.Callable[[int], str] = None) -> bool:
"""
Changes the 2FA settings of the logged in user.
Review carefully the parameter explanations before using this method.
Note that this method may be *incredibly* slow depending on the
prime numbers that must be used during the process to make sure
that everything is safe.
Has no effect if both current and new password are omitted.
Arguments
current_password (`str`, optional):
The current password, to authorize changing to ``new_password``.
Must be set if changing existing 2FA settings.
Must **not** be set if 2FA is currently disabled.
Passing this by itself will remove 2FA (if correct).
new_password (`str`, optional):
The password to set as 2FA.
If 2FA was already enabled, ``current_password`` **must** be set.
Leaving this blank or `None` will remove the password.
hint (`str`, optional):
Hint to be displayed by Telegram when it asks for 2FA.
Leaving unspecified is highly discouraged.
Has no effect if ``new_password`` is not set.
email (`str`, optional):
Recovery and verification email. If present, you must also
set `email_code_callback`, else it raises ``ValueError``.
email_code_callback (`callable`, optional):
If an email is provided, a callback that returns the code sent
to it must also be set. This callback may be asynchronous.
It should return a string with the code. The length of the
code will be passed to the callback as an input parameter.
If the callback returns an invalid code, it will raise
``CodeInvalidError``.
Returns
`True` if successful, `False` otherwise.
Example
.. code-block:: python
# Setting a password for your account which didn't have
await client.edit_2fa(new_password='I_<3_Telethon')
# Removing the password
await client.edit_2fa(current_password='I_<3_Telethon')
"""
if new_password is None and current_password is None:
return False
if email and not callable(email_code_callback):
raise ValueError('email present without email_code_callback')
pwd = await self(functions.account.GetPasswordRequest())
pwd.new_algo.salt1 += os.urandom(32)
assert isinstance(pwd, types.account.Password)
if not pwd.has_password and current_password:
current_password = None
if current_password:
password = pwd_mod.compute_check(pwd, current_password)
else:
password = types.InputCheckPasswordEmpty()
if new_password:
new_password_hash = pwd_mod.compute_digest(
pwd.new_algo, new_password)
else:
new_password_hash = b''
try:
await self(functions.account.UpdatePasswordSettingsRequest(
password=password,
new_settings=types.account.PasswordInputSettings(
new_algo=pwd.new_algo,
new_password_hash=new_password_hash,
hint=hint,
email=email,
new_secure_settings=None
)
))
except errors.EmailUnconfirmedError as e:
code = email_code_callback(e.code_length)
if inspect.isawaitable(code):
code = await code
code = str(code)
await self(functions.account.ConfirmPasswordEmailRequest(code))
return True
# endregion
# region with blocks
async def __aenter__(self):
return await self.start()
async def __aexit__(self, *args):
await self.disconnect()
__enter__ = helpers._sync_enter
__exit__ = helpers._sync_exit
# endregion | PypiClean |
/Lantz-0.3.zip/Lantz-0.3/docs/_themes/lantz/static/sidebar.js | $(function() {
// global elements used by the functions.
// the 'sidebarbutton' element is defined as global after its
// creation, in the add_sidebar_button function
var bodywrapper = $('.bodywrapper');
var sidebar = $('.sphinxsidebar');
var sidebarwrapper = $('.sphinxsidebarwrapper');
// original margin-left of the bodywrapper and width of the sidebar
// with the sidebar expanded
var bw_margin_expanded = bodywrapper.css('margin-left');
var ssb_width_expanded = sidebar.width();
// margin-left of the bodywrapper and width of the sidebar
// with the sidebar collapsed
var bw_margin_collapsed = '.8em';
var ssb_width_collapsed = '.8em';
// colors used by the current theme
var dark_color = '#4686A2';
var light_color = '#4E9CB5';
function sidebar_is_collapsed() {
return sidebarwrapper.is(':not(:visible)');
}
function toggle_sidebar() {
if (sidebar_is_collapsed())
expand_sidebar();
else
collapse_sidebar();
}
function collapse_sidebar() {
sidebarwrapper.hide();
sidebar.css('width', ssb_width_collapsed);
bodywrapper.css('margin-left', bw_margin_collapsed);
sidebarbutton.css({
'margin-left': '0',
'height': bodywrapper.height(),
'border-radius': '0 5px 5px 0'
});
sidebarbutton.find('span').text('»');
sidebarbutton.attr('title', _('Expand sidebar'));
document.cookie = 'sidebar=collapsed';
}
function expand_sidebar() {
bodywrapper.css('margin-left', bw_margin_expanded);
sidebar.css('width', ssb_width_expanded);
sidebarwrapper.show();
sidebarbutton.css({
'margin-left': ssb_width_expanded-12,
'height': bodywrapper.height(),
'border-radius': '5px'
});
sidebarbutton.find('span').text('«');
sidebarbutton.attr('title', _('Collapse sidebar'));
//sidebarwrapper.css({'padding-top':
// Math.max(window.pageYOffset - sidebarwrapper.offset().top, 10)});
document.cookie = 'sidebar=expanded';
}
function add_sidebar_button() {
sidebarwrapper.css({
'float': 'left',
'margin-right': '0',
'width': ssb_width_expanded - 28
});
// create the button
sidebar.append(
'<div id="sidebarbutton"><span>«</span></div>'
);
var sidebarbutton = $('#sidebarbutton');
// find the height of the viewport to center the '<<' in the page
var viewport_height;
if (window.innerHeight)
viewport_height = window.innerHeight;
else
viewport_height = $(window).height();
var sidebar_offset = sidebar.offset().top;
var sidebar_height = Math.max(bodywrapper.height(), sidebar.height());
sidebarbutton.find('span').css({
'display': 'block',
'position': 'fixed',
'top': Math.min(viewport_height/2, sidebar_height/2 + sidebar_offset) - 10
});
sidebarbutton.click(toggle_sidebar);
sidebarbutton.attr('title', _('Collapse sidebar'));
sidebarbutton.css({
'border-radius': '5px',
'color': '#ffffff', //'#444444',
'background-color': '#4E9CB5',
'font-size': '1.2em',
'cursor': 'pointer',
'height': sidebar_height,
'padding-top': '1px',
'padding-left': '1px',
'margin-left': ssb_width_expanded - 12
});
sidebarbutton.hover(
function () {
$(this).css('background-color', dark_color);
},
function () {
$(this).css('background-color', light_color);
}
);
}
function set_position_from_cookie() {
if (!document.cookie)
return;
var items = document.cookie.split(';');
for(var k=0; k<items.length; k++) {
var key_val = items[k].split('=');
var key = key_val[0];
if (key == 'sidebar') {
var value = key_val[1];
if ((value == 'collapsed') && (!sidebar_is_collapsed()))
collapse_sidebar();
else if ((value == 'expanded') && (sidebar_is_collapsed()))
expand_sidebar();
}
}
}
add_sidebar_button();
var sidebarbutton = $('#sidebarbutton');
set_position_from_cookie();
}); | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Tool/PharLapCommon.py |
__revision__ = "src/engine/SCons/Tool/PharLapCommon.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import os
import os.path
import SCons.Errors
import SCons.Util
import re
def getPharLapPath():
"""Reads the registry to find the installed path of the Phar Lap ETS
development kit.
Raises UserError if no installed version of Phar Lap can
be found."""
if not SCons.Util.can_read_reg:
raise SCons.Errors.InternalError("No Windows registry module was found")
try:
k=SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Pharlap\\ETS')
val, type = SCons.Util.RegQueryValueEx(k, 'BaseDir')
# The following is a hack...there is (not surprisingly)
# an odd issue in the Phar Lap plug in that inserts
# a bunch of junk data after the phar lap path in the
# registry. We must trim it.
idx=val.find('\0')
if idx >= 0:
val = val[:idx]
return os.path.normpath(val)
except SCons.Util.RegError:
raise SCons.Errors.UserError("Cannot find Phar Lap ETS path in the registry. Is it installed properly?")
REGEX_ETS_VER = re.compile(r'#define\s+ETS_VER\s+([0-9]+)')
def getPharLapVersion():
"""Returns the version of the installed ETS Tool Suite as a
decimal number. This version comes from the ETS_VER #define in
the embkern.h header. For example, '#define ETS_VER 1010' (which
is what Phar Lap 10.1 defines) would cause this method to return
1010. Phar Lap 9.1 does not have such a #define, but this method
will return 910 as a default.
Raises UserError if no installed version of Phar Lap can
be found."""
include_path = os.path.join(getPharLapPath(), os.path.normpath("include/embkern.h"))
if not os.path.exists(include_path):
raise SCons.Errors.UserError("Cannot find embkern.h in ETS include directory.\nIs Phar Lap ETS installed properly?")
with open(include_path, 'r') as f:
mo = REGEX_ETS_VER.search(f.read())
if mo:
return int(mo.group(1))
# Default return for Phar Lap 9.1
return 910
def addPharLapPaths(env):
"""This function adds the path to the Phar Lap binaries, includes,
and libraries, if they are not already there."""
ph_path = getPharLapPath()
try:
env_dict = env['ENV']
except KeyError:
env_dict = {}
env['ENV'] = env_dict
SCons.Util.AddPathIfNotExists(env_dict, 'PATH',
os.path.join(ph_path, 'bin'))
SCons.Util.AddPathIfNotExists(env_dict, 'INCLUDE',
os.path.join(ph_path, 'include'))
SCons.Util.AddPathIfNotExists(env_dict, 'LIB',
os.path.join(ph_path, 'lib'))
SCons.Util.AddPathIfNotExists(env_dict, 'LIB',
os.path.join(ph_path, os.path.normpath('lib/vclib')))
env['PHARLAP_PATH'] = getPharLapPath()
env['PHARLAP_VERSION'] = str(getPharLapVersion())
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/OBITools-1.2.13.tar.gz/OBITools-1.2.13/distutils.ext/obidistutils/serenity/pip/_vendor/requests/utils.py | import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*'
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in value.split(","):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out | PypiClean |
/125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/embeddings/stacked_embedding.py |
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: stacked_embedding.py
# time: 2019-05-23 09:18
import json
import pydoc
from typing import Union, Optional, Tuple, List, Dict
import numpy as np
import tensorflow as tf
from tensorflow.python import keras
import kashgari
from kashgari.embeddings.base_embedding import Embedding
from kashgari.layers import L
from kashgari.processors.base_processor import BaseProcessor
class StackedEmbedding(Embedding):
"""Embedding layer without pre-training, train embedding layer while training model"""
@classmethod
def _load_saved_instance(cls,
config_dict: Dict,
model_path: str,
tf_model: keras.Model):
embeddings = []
for embed_info in config_dict['embeddings']:
embed_class = pydoc.locate(f"{embed_info['module']}.{embed_info['class_name']}")
embedding: Embedding = embed_class._load_saved_instance(embed_info,
model_path,
tf_model)
embeddings.append(embedding)
instance = cls(embeddings=embeddings,
from_saved_model=True)
print('----')
print(instance.embeddings)
embed_model_json_str = json.dumps(config_dict['embed_model'])
instance.embed_model = keras.models.model_from_json(embed_model_json_str,
custom_objects=kashgari.custom_objects)
# Load Weights from model
for layer in instance.embed_model.layers:
layer.set_weights(tf_model.get_layer(layer.name).get_weights())
return instance
def info(self):
info = super(StackedEmbedding, self).info()
info['embeddings'] = [embed.info() for embed in self.embeddings]
info['config'] = {}
return info
def __init__(self,
embeddings: List[Embedding],
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
embeddings:
processor:
"""
task = kashgari.CLASSIFICATION
if all(isinstance(embed.sequence_length, int) for embed in embeddings):
sequence_length = [embed.sequence_length for embed in embeddings]
else:
raise ValueError('Need to set sequence length for all embeddings while using stacked embedding')
super(StackedEmbedding, self).__init__(task=task,
sequence_length=sequence_length[0],
embedding_size=100,
processor=processor,
from_saved_model=from_saved_model)
self.embeddings = embeddings
self.processor = embeddings[0].processor
if not from_saved_model:
self._build_model()
def _build_model(self, **kwargs):
if self.embed_model is None and all(embed.embed_model is not None for embed in self.embeddings):
layer_concatenate = L.Concatenate(name='layer_concatenate')
inputs = []
for embed in self.embeddings:
inputs += embed.embed_model.inputs
# inputs = [embed.embed_model.inputs for embed in self.embeddings]
outputs = layer_concatenate([embed.embed_model.output for embed in self.embeddings])
self.embed_model = tf.keras.Model(inputs, outputs)
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[str]], List[str]]):
for index in range(len(x)):
self.embeddings[index].analyze_corpus(x[index], y)
self._build_model()
def process_x_dataset(self,
data: Tuple[List[List[str]], ...],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
result = []
for index, dataset in enumerate(data):
x = self.embeddings[index].process_x_dataset(dataset, subset)
if isinstance(x, tuple):
result += list(x)
else:
result.append(x)
return tuple(result)
def process_y_dataset(self,
data: List[List[str]],
subset: Optional[List[int]] = None) -> np.ndarray:
return self.embeddings[0].process_y_dataset(data, subset)
if __name__ == "__main__":
pass | PypiClean |
/ATS_Library-1.2-py3-none-any.whl/ats/common/assertion/assert_common.py | from ats.common.log.log import LogTest
TAG = "Assertion"
class Assertion(object):
@staticmethod
def assert_equal(actual, expected, message=None):
"""
断言相等
:param actual: 实际值
:param expected: 期望值
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertEquals: actual = {}, expected = {}".format(actual, expected))
if actual == expected:
return
LogTest.error(TAG, "AssertEquals Failed: actual = {}, expected = {}".format(actual, expected))
raise AssertionError(message)
@staticmethod
def assert_not_equal(actual, expected, message=None):
"""
断言不相等
:param actual: 实际值
:param expected: 期望值
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertNotEquals: actual = {}, expected = {}".format(actual, expected))
if actual != expected:
return
LogTest.error(TAG, "AssertNotEquals Failed: actual = {}, expected = {}".format(actual, expected))
raise AssertionError(message)
@staticmethod
def assert_true(condition, message=None):
"""
断言条件为真
:param condition: 条件
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertTrue: condition = {}".format(condition))
if condition:
return
LogTest.error(TAG, "AssertTrue Failed: condition = {}".format(condition))
raise AssertionError(message)
@staticmethod
def assert_false(condition, message=None):
"""
断言条件为假
:param condition: 条件
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertFalse: condition = {}".format(condition))
if not condition:
return
LogTest.error(TAG, "AssertFalse Failed: condition = {}".format(condition))
raise AssertionError(message)
@staticmethod
def assert_in(member, container, message=None):
"""
断言成员在容器中
:param member: 成员
:param container: 容器
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertIn: member = {}, container = {}".format(member, container))
if member in container:
return
LogTest.error(TAG, "AssertIn Failed: member = {}, container = {}".format(member, container))
raise AssertionError(message)
@staticmethod
def assert_not_in(member, container, message=None):
"""
断言成员不在容器中
:param member: 成员
:param container: 容器
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertNotIn: member = {}, container = {}".format(member, container))
if member not in container:
return
LogTest.error(TAG, "AssertNotIn Failed: member = {}, container = {}".format(member, container))
raise AssertionError(message)
@staticmethod
def assert_is(var1, var2, message=None):
"""
断言两个变量是同一个对象
:param var1: 变量1
:param var2: 变量2
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertIs: var1 = {}, var2 = {}".format(var1, var2))
if var1 is var2:
return
LogTest.error(TAG, "AssertIs Failed: var1 = {}, var2 = {}".format(var1, var2))
raise AssertionError(message)
@staticmethod
def assert_is_not(var1, var2, message=None):
"""
断言两个变量不是同一个对象
:param var1: 变量1
:param var2: 变量2
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertIsNot: var1 = {}, var2 = {}".format(var1, var2))
if var1 is not var2:
return
LogTest.error(TAG, "AssertIsNot Failed: var1 = {}, var2 = {}".format(var1, var2))
raise AssertionError(message)
@staticmethod
def assert_is_none(var, message=None):
"""
断言变量为空
:param var: 变量
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertNone: var = {}".format(var))
if var is None:
return
LogTest.error(TAG, "AssertNone Failed: var = {}".format(var))
raise AssertionError(message)
@staticmethod
def assert_is_not_none(var, message=None):
"""
断言变量不为空
:param var: 变量
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertNotNone: var = {}".format(var))
if var is not None:
return
LogTest.error(TAG, "AssertNotNone Failed: var = {}".format(var))
raise AssertionError(message)
@staticmethod
def assert_is_instance(obj, cls, message=None):
"""
断言对象是指定类型
:param obj: 对象
:param cls: 类型
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertIsInstance: obj = {}, cls = {}".format(obj, cls))
if isinstance(obj, cls):
return
LogTest.error(TAG, "AssertIsInstance Failed: obj = {}, cls = {}".format(obj, cls))
raise AssertionError(message)
@staticmethod
def assert_not_is_instance(obj, cls, message=None):
"""
断言对象不是指定类型
:param obj: 对象
:param cls: 类型
:param message: 描述信息
:return: None
"""
LogTest.debug(TAG, "AssertNotIsInstance: obj = {}, cls = {}".format(obj, cls))
if not isinstance(obj, cls):
return
LogTest.error(TAG, "AssertNotIsInstance Failed: obj = {}, cls = {}".format(obj, cls))
raise AssertionError(message) | PypiClean |
/Congo-0.0.1.tar.gz/Congo-0.0.1/portfolio/component/static/portfolio/vendor/mdeditor/bower_components/codemirror/addon/search/search.js |
// Replace works a little oddly -- it will do the replace on the next
// Ctrl-G (or whatever is bound to findNext) press. You prevent a
// replace by making sure the match is no longer selected when hitting
// Ctrl-G.
(function() {
function searchOverlay(query) {
if (typeof query == "string") return {token: function(stream) {
if (stream.match(query)) return "searching";
stream.next();
stream.skipTo(query.charAt(0)) || stream.skipToEnd();
}};
return {token: function(stream) {
if (stream.match(query)) return "searching";
while (!stream.eol()) {
stream.next();
if (stream.match(query, false)) break;
}
}};
}
function SearchState() {
this.posFrom = this.posTo = this.query = null;
this.overlay = null;
}
function getSearchState(cm) {
return cm.state.search || (cm.state.search = new SearchState());
}
function getSearchCursor(cm, query, pos) {
// Heuristic: if the query string is all lowercase, do a case insensitive search.
return cm.getSearchCursor(query, pos, typeof query == "string" && query == query.toLowerCase());
}
function dialog(cm, text, shortText, f) {
if (cm.openDialog) cm.openDialog(text, f);
else f(prompt(shortText, ""));
}
function confirmDialog(cm, text, shortText, fs) {
if (cm.openConfirm) cm.openConfirm(text, fs);
else if (confirm(shortText)) fs[0]();
}
function parseQuery(query) {
var isRE = query.match(/^\/(.*)\/([a-z]*)$/);
return isRE ? new RegExp(isRE[1], isRE[2].indexOf("i") == -1 ? "" : "i") : query;
}
var queryDialog =
'Search: <input type="text" style="width: 10em"/> <span style="color: #888">(Use /re/ syntax for regexp search)</span>';
function doSearch(cm, rev) {
var state = getSearchState(cm);
if (state.query) return findNext(cm, rev);
dialog(cm, queryDialog, "Search for:", function(query) {
cm.operation(function() {
if (!query || state.query) return;
state.query = parseQuery(query);
cm.removeOverlay(state.overlay);
state.overlay = searchOverlay(state.query);
cm.addOverlay(state.overlay);
state.posFrom = state.posTo = cm.getCursor();
findNext(cm, rev);
});
});
}
function findNext(cm, rev) {cm.operation(function() {
var state = getSearchState(cm);
var cursor = getSearchCursor(cm, state.query, rev ? state.posFrom : state.posTo);
if (!cursor.find(rev)) {
cursor = getSearchCursor(cm, state.query, rev ? CodeMirror.Pos(cm.lastLine()) : CodeMirror.Pos(cm.firstLine(), 0));
if (!cursor.find(rev)) return;
}
cm.setSelection(cursor.from(), cursor.to());
state.posFrom = cursor.from(); state.posTo = cursor.to();
});}
function clearSearch(cm) {cm.operation(function() {
var state = getSearchState(cm);
if (!state.query) return;
state.query = null;
cm.removeOverlay(state.overlay);
});}
var replaceQueryDialog =
'Replace: <input type="text" style="width: 10em"/> <span style="color: #888">(Use /re/ syntax for regexp search)</span>';
var replacementQueryDialog = 'With: <input type="text" style="width: 10em"/>';
var doReplaceConfirm = "Replace? <button>Yes</button> <button>No</button> <button>Stop</button>";
function replace(cm, all) {
dialog(cm, replaceQueryDialog, "Replace:", function(query) {
if (!query) return;
query = parseQuery(query);
dialog(cm, replacementQueryDialog, "Replace with:", function(text) {
if (all) {
cm.operation(function() {
for (var cursor = getSearchCursor(cm, query); cursor.findNext();) {
if (typeof query != "string") {
var match = cm.getRange(cursor.from(), cursor.to()).match(query);
cursor.replace(text.replace(/\$(\d)/, function(_, i) {return match[i];}));
} else cursor.replace(text);
}
});
} else {
clearSearch(cm);
var cursor = getSearchCursor(cm, query, cm.getCursor());
var advance = function() {
var start = cursor.from(), match;
if (!(match = cursor.findNext())) {
cursor = getSearchCursor(cm, query);
if (!(match = cursor.findNext()) ||
(start && cursor.from().line == start.line && cursor.from().ch == start.ch)) return;
}
cm.setSelection(cursor.from(), cursor.to());
confirmDialog(cm, doReplaceConfirm, "Replace?",
[function() {doReplace(match);}, advance]);
};
var doReplace = function(match) {
cursor.replace(typeof query == "string" ? text :
text.replace(/\$(\d)/, function(_, i) {return match[i];}));
advance();
};
advance();
}
});
});
}
CodeMirror.commands.find = function(cm) {clearSearch(cm); doSearch(cm);};
CodeMirror.commands.findNext = doSearch;
CodeMirror.commands.findPrev = function(cm) {doSearch(cm, true);};
CodeMirror.commands.clearSearch = clearSearch;
CodeMirror.commands.replace = replace;
CodeMirror.commands.replaceAll = function(cm) {replace(cm, true);};
})(); | PypiClean |
/FASTrack-1.0.1.tar.gz/FASTrack-1.0.1/FAST/motility.py |
#Invitro-motility (Actin sliding via myosins) analysis python module
#Tural Aksel
import matplotlib
matplotlib.use('TkAgg')
import os
import sys
import time
import math
import re
import numpy as np
import numpy
import matplotlib.pyplot as py
import matplotlib.cm as cm
import scipy.io
import plotparams as plotparams
from numpy import ma
from scipy.ndimage import label
from scipy.ndimage.morphology import binary_fill_holes,binary_closing,binary_opening
from scipy import stats
from skimage import img_as_uint
from skimage.filters import thresholding,rank,threshold_otsu,gaussian_filter
from skimage.morphology import disk, square, rectangle, skeletonize, dilation
from skimage.morphology.watershed import watershed
from skimage.io import imread_collection
from imageio import imwrite
from scipy.optimize import leastsq
from scipy.stats import kde
import cv2
#Global variables/structures
sqr_1 = square(1) #Square with a radius of 1 pixel
sqr_2 = square(2) #Square with a radius of 2 pixels
sqr_3 = square(3) #Square with a radius of 3 pixels
disk_1 = disk(1) #Disk with a radius of 1 pixel
disk_2 = disk(2) #Disk with a radius of 2 pixels
disk_3 = disk(3) #Disk with a radius of 3 pixels
#Very small numbers
ZERO = 1E-100
#Utility functions
def make_N_colors(cmap_name, N):
cmap = cm.get_cmap(cmap_name, N)
return cmap(np.arange(N))
def stack_to_tiffs(fname,frame_rate=1.0):
'''
Read and convert tiff stack file to individual files
'''
#Find the directory the tiff stack file is located
abs_path = os.path.abspath(fname)
head,tail = os.path.split(abs_path)
base,ext = os.path.splitext(tail)
#Make the new directory
new_dir = head+'/'+('_'.join(base.split())).replace('#','')
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
#Read all the frames
tiff_frames = imread_collection(fname)
num_frames = len(tiff_frames)
f = open(new_dir+'/metadata.txt','w')
elapsed_time_ms = 0.0
#Write out the individual image files
for i in range(num_frames):
fout = new_dir+'/img_000000%03d'%(i)+'__000.tif'
imwrite(fout,tiff_frames[i])
#Write elapsed times
f.write(' "ElapsedTime-ms": %d,\n'%(elapsed_time_ms))
elapsed_time_ms += 1000*1.0/frame_rate
f.close()
#Functions for statistical analysis
def gaussian(X,amp,mu,stdev):
'''
Gaussian
'''
return amp*np.exp(-(X-mu)**2/(2*stdev**2))
def fit_gaussian(bin_centers,bin_amps):
'''
Fit gaussian to a histogram
'''
err = lambda params: params[0]*np.exp(-(bin_centers-params[1])**2/(2*params[2]**2)) - bin_amps
params = [50,700,100]
best_params,success = leastsq(err,params,maxfev=1000)
return best_params[0],best_params[1],best_params[2]
def fit_length_velocity(length,velocity,fil_weights,weighted=False):
'''
Function for fitting a length velocity curve
'''
myosin_density = 1.0/36.0
Neff = length*myosin_density
weights = np.ones(len(length))
if weighted:
weights = fil_weights
err = lambda params: weights*(params[0]*(1.0-(1.0-params[1])**Neff) - velocity)
params = [700,0.001]
best_params,success = leastsq(err,params,maxfev=1000)
residuals = np.array(err(best_params)/weights)
return best_params[0],best_params[1],residuals, success
def length_velocity(length,max_vel,f):
'''
Uyeda's simple length-velocity relationship
'''
myosin_density = 1.0/36.0
Neff = length*myosin_density
return max_vel*(1.0-(1.0-f)**Neff)
def coupling_velocity(length,max_vel,amp,tau):
'''
Coupling relationship with single exponential decay
'''
return max_vel-amp*np.exp(-length/tau)
def fit_coupling_velocity(length,velocity,fil_weights,weighted=False):
'''
Function for fitting a length velocity curve
'''
weights = np.ones(len(length))
if weighted:
weights = fil_weights
err = lambda params: weights*(params[0]-params[1]*np.exp(-length/params[2]) - velocity)
params = [700,200,500]
best_params,success = leastsq(err,params,maxfev=1000)
residuals = np.array(err(best_params)/weights)
return best_params[0],best_params[1],best_params[2],residuals, success
def bin_length_velocity(length,velocity,dx=100):
'''
Bin legth vs.velocity profile
'''
max_len = np.max(length)
bin_vel = []
for i in np.arange(0,int(max_len/dx)):
valid = (length > i*dx)*(length <=(i+1)*dx)
if np.sum(valid) > 0:
mean_len = np.mean(length[valid])
mean_vel = np.mean(velocity[valid])
bin_vel.append([mean_len,mean_vel])
return np.array(bin_vel)
def contour2contour(contour1,contour2,fil_direction):
'''
Find the distance between two counters
'''
short_contour = contour1
long_contour = contour2
if len(short_contour) > len(long_contour):
short_contour = contour2
long_contour = contour1
#Length of the short and long vectors
short_len = len(short_contour)
long_len = len(long_contour)
#Multiplicate measurements
multiplicate_measures = long_len - short_len + 1
distance_score = 0
for i in range(multiplicate_measures):
long_short_diff = long_contour[i:i+short_len,:][::fil_direction] - short_contour
distance_length = np.mean(np.sqrt(np.sum(long_short_diff**2,axis=1)))
distance_score += distance_length
distance_score /= multiplicate_measures
return distance_score
#Global functions
def vec_length(vec):
'''
Returns the euclidian distance of a numpy array
'''
return np.sqrt(np.sum(vec**2,axis=1))
'''
Frame link class
'''
class Link:
def __init__(self):
self.frame1_no = 0
self.frame2_no = 0
self.filament1_label = 0
self.filament2_label = 0
self.filament1_length = 0
self.filament2_length = 0
self.filament1_contour= []
self.filament2_contour= []
self.filament1_cm = []
self.filament2_cm = []
self.average_length = 0
self.overlap_score = 0
self.area_score = 0
self.distance_score = 0
self.fil_direction = 1
self.mov_direction = 1
self.dt = 0
self.instant_velocity = 0
self.forward_link = None
self.reverse_link = None
self.direct_link = False
class Path:
def __init__(self):
self.links = []
self.first_frame_no = 0
self.path_length = 0
self.ave_fil_length = 0
self.ave_velocity = 0
self.std_velocity = 0
self.max_velocity = 0
self.min_velocity = 0
self.stuck = False
class Motility:
def __init__(self):
self.elapsed_times = [] #Elapsed times for the movie: read from metadata.txt file generated by micro manager 1.4
self.dt = 1.0 #Time difference between frames in seconds(s)
self.dx = 80.65 #Pixel length in nm (from calibration 12/11/13)
self.frame = None #Single frame
self.frame1 = None #First frame
self.frame2 = None #Second frame
self.frame_links = [] #Container for the frame links
self.min_velocity = 80 #Minimum average path velocity of the filament in nm/s (Default:80 nm/s)
self.max_velocity = 25 #Maximum velocity of the filament in pixels/frame (Default:25)
self.min_fil_length = 0 #Minimum filament length
self.max_fil_length = 125 #Maximum filament length
self.max_fil_width = 25 #Maximum filament width
self.max_length_dif = 5 #Maximum length difference for the same filament in two different frames (Default:5)
self.max_velocity_dif = 5 #Maximum velocity difference between end-end, center-center, front-front (Default:5)
self.directory = '' #Directory where all the frames are
self.header = '' #Header name for the image files
self.tail = '' #Tail name for the image files
self.num_frames = 0 #Total number of frames starting from 0
self.norm_len_vel = [] #Corrected length-velocity after length dependent velocity fit
self.full_len_vel = [] #Uncorrected (raw) length-velocity profile
self.max_len_vel = [] #Keeps only the maximum instantaneous velocities for the paths
self.corr_lens = [] #Correlation length information of the filaments
#Paths
self.paths = [] #Paths constructed from frame links
self.path_img = None #Path image file
#Default frame widths and heights
self.width = 1002
self.height = 1004
#Cutoff values
self.overlap_score_cutoff = 0.4
self.log_area_score_cutoff = 1.0
self.dif_log_area_score_cutoff = 0.5
#Force analysis parameter
self.force_analysis = False
def min_length_filter(self, min_filament_length):
'''
Applies a minimum length filter (higher than or equal). Length is in nm.
'''
valid_length = np.nonzero(self.full_len_vel[:,0] >= min_filament_length)[0]
self.full_len_vel = self.full_len_vel[valid_length,:]
def read_metadata(self):
'''
Read metadata.txt file to retrieve elapsed time information
'''
fname = self.directory+'/metadata.txt'
#Read metadata file
if os.path.exists(fname):
f = open(fname,'r')
lines = f.readlines()
f.close()
filtered_lines = filter(lambda x:x.find('"ElapsedTime-ms"') > 0,lines)
#Elapsed times
self.elapsed_times = []
for line in filtered_lines:
m = re.search('ElapsedTime-ms":\s+(\d+),', line)
self.elapsed_times.append(float(m.group(1)))
#Elapsed times in seconds
self.elapsed_times = 0.001*np.array(self.elapsed_times)
#Sort the time array
self.elapsed_times = np.sort(self.elapsed_times)
def calc_persistence_len(self):
'''
Calculate length correlation
'''
self.final_corr_len = np.zeros(1000)
self.final_corr_weight = np.zeros(1000)
for corr_len in self.corr_lens:
if len(corr_len) == 0:
continue
new_corr_len = np.zeros(1000)
new_corr_weight = np.zeros(1000)
new_corr_len[np.arange(corr_len.shape[0],dtype=int)] =corr_len[:,1]
new_corr_weight[np.arange(corr_len.shape[0],dtype=int)] =corr_len[:,0]
self.final_corr_len += new_corr_len*new_corr_weight
self.final_corr_weight += new_corr_weight
#Finalize the arrays
valid = self.final_corr_weight > 0
self.final_corr_len[valid] = self.final_corr_len[valid]/self.final_corr_weight[valid]
self.final_corr_len = self.final_corr_len[valid]
self.final_corr_weight = self.final_corr_weight[valid]
def wire_frame_links(self,depth=5):
'''
Wire the frame links that are not connected to each other to create contigous paths
'''
num_frames = len(self.frame_links)
for d in range(1,depth+1):
for f1 in range(len(self.frame_links)):
possible_links = filter(lambda link:link.forward_link == None,self.frame_links[f1])
for l1 in range(len(possible_links)):
link1 = possible_links[l1]
new_filament1 = Filament()
new_filament1.contour = link1.filament2_contour
new_filament1.fil_length = link1.filament2_length
new_filament1.cm = link1.filament2_cm
new_filament1.time = link1.filament2_time
#Average distance score per time for link 1
avg_distance_score_1 = possible_links[l1].distance_score/possible_links[l1].dt
if f1+d < num_frames:
forward_links = filter(lambda link:link.reverse_link == None and np.sqrt(np.sum((link.filament1_cm - new_filament1.cm)**2)) < d*self.max_velocity and np.fabs(link.filament1_length - new_filament1.fil_length) < self.max_length_dif ,self.frame_links[f1+d])
for l2 in range(len(forward_links)):
link2 = forward_links[l2]
new_filament2 = Filament()
new_filament2.contour = link2.filament1_contour
new_filament2.fil_length = link2.filament1_length
new_filament2.cm = link2.filament1_cm
new_filament2.time = link2.filament1_time
#Average distance score per time for link 2
avg_distance_score_2 = forward_links[l2].distance_score/forward_links[l2].dt
#Average distance score for the two links
avg_distance_score = 0.5*(avg_distance_score_1+avg_distance_score_2)
#Calculate time difference
dt = new_filament2.time - new_filament1.time
#Calculate similarity scores
overlap_score,area_score,distance_score,fil_direction,mov_direction = new_filament1.sim_score(new_filament2)
if np.fabs(overlap_score) > self.overlap_score_cutoff and np.log10(area_score) < self.log_area_score_cutoff and distance_score/dt < 2*avg_distance_score and distance_score/dt > 0.5*avg_distance_score:
new_link = Link()
new_link.frame1_no = link1.frame2_no
new_link.frame2_no = link2.frame1_no
new_link.filament1_label = link1.filament2_label
new_link.filament2_label = link2.filament1_label
new_link.filament1_cm = link1.filament2_cm
new_link.filament2_cm = link2.filament1_cm
new_link.filament1_length = link1.filament2_length
new_link.filament2_length = link2.filament1_length
new_link.filament1_contour = link1.filament2_contour
new_link.filament2_contour = link2.filament1_contour
new_link.filament1_midpoint = link1.filament2_midpoint
new_link.filament2_midpoint = link2.filament1_midpoint
new_link.filament1_time = link1.filament2_time
new_link.filament2_time = link2.filament1_time
new_link.fil_direction = fil_direction
new_link.mov_direction = mov_direction
new_link.overlap_score = overlap_score
new_link.area_score = area_score
new_link.distance_score = distance_score
new_link.average_length = 0.5*(new_link.filament1_length+new_link.filament2_length)
new_link.instant_velocity = new_link.distance_score/dt
new_link.dt = dt
#This is not a direct connection
new_link.direct_link = False
#Add new link to frame links
self.frame_links[new_link.frame1_no].append(new_link)
#Make the link connections
link1.forward_link = new_link
link2.reverse_link = new_link
new_link.reverse_link = link1
new_link.forward_link = link2
def read_frame_links(self):
'''
Read frame links saved as npy
'''
if not self.force_analysis and os.path.exists(self.directory+'/links.npy'):
try:
self.frame_links = np.load(self.directory+'/links.npy')
#If links.npy is the output of an old version of motility
except ImportError:
print'Movie analysed previously with an old version of motility. Links will be regenerated.'
return False
return True
else:
return False
def reconstruct_skeleton_images(self):
'''
Reconstruct images from reduced skeleton representations
'''
#Check if paths_2D.png figure exists - if not do not construct images
if not os.path.isfile(self.directory+'/paths_2D.png'):
return
#filament size ratio
ratio = self.width/1002.0
for i in range(len(self.frame_links)):
#Start with a frame
new_frame = Frame()
#Prepare the processed images
new_frame.img_skeletons = np.zeros((self.width,self.height),dtype=np.bool)
for link in self.frame_links[i]:
new_frame.img_skeletons[link.filament1_contour[:,0],link.filament1_contour[:,1]] = True
#Dilate skeletons for better visualization
new_frame.img_skeletons = dilation(new_frame.img_skeletons,selem=disk(ratio*6))
#Make 0 values 2 for transparency
new_frame.img_skeletons = ma.masked_where(new_frame.img_skeletons == 0, new_frame.img_skeletons)
py.figure()
py.imshow(new_frame.img_skeletons,cmap=cm.gray,interpolation='nearest',alpha=1.0)
#Plot velocities and movement direction on skeleton images
arrow_length = ratio*1.0
for link in self.frame_links[i]:
velocity = link.instant_velocity
mp_1 = link.filament1_midpoint
mp_2 = link.filament2_midpoint
mp_diff = mp_2 - mp_1
py.arrow(mp_1[1],mp_1[0],arrow_length*mp_diff[1],arrow_length*mp_diff[0],color='r',head_width=ratio*20,head_length=ratio*30,alpha=1.0)
py.text(mp_1[1],mp_1[0],"%.f"%(velocity),fontsize=10,color='k',alpha=1.0)
#Change x,y-ticks to um scale
ax = py.gca()
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
#Skeleton and paths image filename
skeleton_fname = self.directory+'/skeletons_%03d.png'%(i)
paths_fname = self.directory+'/paths_2D.png'
py.savefig(skeleton_fname,dpi=400,transparent=True)
py.close()
#Combine with the path image
os.system('composite -compose src-over '+skeleton_fname+' -alpha on '+paths_fname+' '+skeleton_fname)
def make_forward_links(self):
'''
Make the forward links for frame links
'''
#Traverse through all frame links starting from the last frame
#Make the forward link assignments for the frame links
for i in range(len(self.frame_links)-1,-1,-1):
for link in self.frame_links[i]:
prev_link = link.reverse_link
current_link = link
if link.forward_link == None:
while not prev_link == None:
prev_link.forward_link = current_link
current_link = prev_link
prev_link = prev_link.reverse_link
def create_paths(self):
'''
Create paths from frame links
'''
self.paths = []
for i in range(len(self.frame_links)-1,-1,-1):
for link in self.frame_links[i]:
new_path = Path()
prev_link = link.reverse_link
current_link = link
if link.forward_link == None:
#Add the current filament2 cm to path
new_path.links.append(current_link)
while not prev_link == None:
current_link = prev_link
#Add the current filament2 cm to path
new_path.links.append(current_link)
prev_link = prev_link.reverse_link
if len(new_path.links) > 0:
self.paths.append(new_path)
#Correct velocity values - with respect to filament direction
for path in self.paths:
fil_direction = path.links[0].mov_direction
for i in range(1,len(path.links)):
fil_direction *= path.links[i-1].fil_direction
mov_direction = fil_direction*path.links[i].mov_direction
path.links[i].instant_velocity = path.links[i].instant_velocity*mov_direction
def path_velocities(self,num_points=1):
'''
Calculate average velocities over a path with num_points points
'''
self.full_len_vel = []
self.max_len_vel = []
for path in self.paths:
#Minimum path length should higher than num points
if len(path.links) < num_points:
continue
#Check if the filament has moved or not
mp_diff = path.links[-1].filament1_midpoint - path.links[0].filament2_midpoint
time_diff = np.fabs(path.links[-1].filament1_time - path.links[0].filament2_time)
dist = np.sqrt(np.sum(mp_diff**2))
#Determine whether a filament is stuck
if self.dx*dist/time_diff < self.min_velocity:
path.stuck = True
#Determine filament length and velocities in nm and nm/s units
for link in path.links:
link.instant_velocity *= self.dx
link.average_length *= self.dx
#Complete path analysis
array_vel = np.array([np.fabs(link.instant_velocity) for link in path.links])
array_len = np.array([np.fabs(link.average_length) for link in path.links])
path_length = len(array_vel)
#Smooth velocity representation
array_smooth = []
for i in range(len(path.links)-num_points+1):
#Average length
ave_len = np.mean(array_len[i:i+num_points])
if path.stuck == True:
ave_vel = 0
std_vel = 0
else:
ave_vel = np.mean(array_vel[i:i+num_points])
std_vel = np.std(array_vel[i:i+num_points])
array_smooth.append([ave_len, ave_vel, std_vel, path_length])
self.full_len_vel.append([ave_len, ave_vel, std_vel, path_length])
#Pick the max velocity along the path
array_smooth = np.array(array_smooth)
max_i = np.argmax(array_smooth[:,1])
self.max_len_vel.append(array_smooth[max_i,:])
self.full_len_vel = np.array(self.full_len_vel)
self.max_len_vel = np.array(self.max_len_vel)
#If there is no path constructed-there won't ne any velocity data
if len(self.full_len_vel) == 0:
return
#Sort the lists based on filament length
sort_i = np.argsort(self.full_len_vel[:,0])
self.full_len_vel = self.full_len_vel[sort_i,:]
sort_i = np.argsort(self.max_len_vel[:,0])
self.max_len_vel = self.max_len_vel[sort_i,:]
def make_frame_links(self):
'''
Make links between two adjacent frames
'''
#Storage for link candidates
link_candidates_major = []
#Storage for frame links between frame1 and frame2
new_frame_links = []
#Pick filaments in frame1 with length higher than min_fil_length
self.frame1.filaments = filter(lambda filament:filament.fil_length > self.min_fil_length,self.frame1.filaments)
self.frame2.filaments = filter(lambda filament:filament.fil_length > self.min_fil_length,self.frame2.filaments)
#Reset filament labels
self.frame1.reset_filament_labels()
self.frame2.reset_filament_labels()
#Time difference between frame1 and frame2
if len(self.elapsed_times) > 0:
self.dt = self.elapsed_times[self.frame2.frame_no] - self.elapsed_times[self.frame1.frame_no]
frame1_time = self.elapsed_times[self.frame1.frame_no]
frame2_time = self.elapsed_times[self.frame2.frame_no]
else:
frame1_time = self.frame1.frame_no*self.dt
frame2_time = self.frame2.frame_no*self.dt
for i in range(len(self.frame1.filaments)):
filament1 = self.frame1.filaments[i]
#First create two link candidate list
link_candidates = []
#Look only the filaments that are close in space with similar lengths (within 5 pixels)
frame2_filaments = filter(lambda filament:np.sqrt(np.sum((filament.cm - filament1.cm)**2)) < self.max_velocity and np.fabs(filament.fil_length - filament1.fil_length) < self.max_length_dif ,self.frame2.filaments)
for j in range(len(frame2_filaments)):
filament2 = frame2_filaments[j]
#Calculate similarity score filament1 vs. filament2
overlap_score,area_score,distance_score,fil_direction,mov_direction = filament1.sim_score(filament2)
link_candidates.append([filament2.label,area_score,overlap_score,distance_score,fil_direction,mov_direction])
link_candidates = np.array(link_candidates)
num_candidates = len(link_candidates)
if num_candidates > 0:
#Sort candidate list based on area score
sorted_i = np.argsort(link_candidates[:,2])
link_candidates = link_candidates[sorted_i,:]
#Take log scores
area_score_list = link_candidates[:,1]
log_area_score_list = np.log10(area_score_list)
log_area_score_diff_list = log_area_score_list[1:] - log_area_score_list[:-1]
overlap_score_list = link_candidates[:,2]
distance_score_list = link_candidates[:,3]
fil_direction_list = link_candidates[:,4]
mov_direction_list = link_candidates[:,5]
#Acceptance criteria
if np.fabs(overlap_score_list[0]) > self.overlap_score_cutoff and log_area_score_list[0] < self.log_area_score_cutoff and ((num_candidates > 1 and log_area_score_diff_list[0] >= self.dif_log_area_score_cutoff) or (num_candidates == 1)):
new_link = Link()
new_link.frame1_no = filament1.frame_no
new_link.frame2_no = filament2.frame_no
new_link.filament1_label = filament1.label
new_link.filament2_label = filament2.label
new_link.filament1_cm = filament1.cm
new_link.filament2_cm = filament2.cm
new_link.filament1_length = filament1.fil_length
new_link.filament2_length = filament2.fil_length
new_link.filament1_contour = filament1.contour
new_link.filament2_contour = filament2.contour
new_link.filament1_midpoint = filament1.midpoint
new_link.filament2_midpoint = filament2.midpoint
new_link.filament1_time = frame1_time
new_link.filament2_time = frame2_time
new_link.fil_direction = fil_direction_list[0]
new_link.mov_direction = mov_direction_list[0]
new_link.overlap_score = overlap_score_list[0]
new_link.area_score = area_score_list[0]
new_link.distance_score = distance_score_list[0]
new_link.average_length = 0.5*(filament1.fil_length+filament2.fil_length)
new_link.instant_velocity = new_link.distance_score/self.dt
new_link.dt = self.dt
#This is a direct connection
new_link.direct_link = True
new_link.reverse_link = filament1.reverse_link
#Pick only one confident link per filament
filament1.forward_link = new_link
if filament2.reverse_link == None:
filament2.reverse_link = new_link
elif new_link.overlap_score < filament2.reverse_link.overlap_score:
#Pick up the existing reverse link for filament2
prev_fil_label = int(filament2.reverse_link.filament1_label)
prev_filament1 = self.frame1.filaments[prev_fil_label]
prev_filament1.forward_link = None
#Assign the reverse link for filament2
filament2.reverse_link = new_link
#Put all the new links in the frame link container
for i in range(len(self.frame1.filaments)):
filament1 = self.frame1.filaments[i]
if not filament1.forward_link == None:
new_frame_links.append(filament1.forward_link)
#Finally add the links to the big list
self.frame_links.append(new_frame_links)
def plot_2D_path_data(self, num_points, extra_fname=None):
'''
Write path velocities and the image
'''
#Filament size ratio
ratio = self.width/1002.0
#Path data
self.path_data = []
self.path_stats = []
#Prepare the processed images
self.path_img = np.nan*np.ones((self.width,self.height),dtype=np.bool)
#Filter paths - pick only paths longer-equal to num-points
filtered_paths = filter(lambda x:len(x.links) >= num_points,self.paths)
#If there are no filtered paths - continue
if len(filtered_paths) == 0:
return
#Get colors from a colormap
path_colors = make_N_colors('Accent',len(filtered_paths))
py.figure(2000)
py.imshow(self.path_img,cmap=cm.gray,alpha=1.0)
py.figure(2001)
py.imshow(self.path_img,cmap=cm.gray,alpha=1.0)
#Go through each path and write velocities
for i in range(len(filtered_paths)):
path = filtered_paths[i]
mp_mean = np.mean(np.array([[link.filament1_midpoint[1],link.filament1_midpoint[0]] for link in path.links[::-1]]),axis=0)
len_array = np.array([np.fabs(link.average_length) for link in path.links[::-1]])
vel_array = np.array([np.fabs(link.instant_velocity) for link in path.links[::-1]])
first_frame = path.links[-1].frame1_no
path_length = len(path.links)
stuck = path.stuck
#Keep data in the arrays
self.path_data.append([first_frame ,stuck,vel_array])
self.path_stats.append([first_frame,stuck,path_length,np.mean(len_array),np.mean(vel_array),np.std(vel_array)])
#Mean velocity
mean_velocity = np.fabs(np.mean(vel_array))
if stuck:
mean_velocity = 0
for j in range(len(path.links)):
mp_x1 = path.links[j].filament1_midpoint[1]
mp_y1 = path.links[j].filament1_midpoint[0]
mp_x2 = path.links[j].filament2_midpoint[1]
mp_y2 = path.links[j].filament2_midpoint[0]
#Plot the arrows connecting path
py.figure(2000)
py.arrow(mp_x2,mp_y2,mp_x1-mp_x2,mp_y1-mp_y2,color=path_colors[i],head_width=ratio*5,head_length=ratio*10,alpha=1.0)
py.figure(2001)
py.arrow(mp_x2,mp_y2,mp_x1-mp_x2,mp_y1-mp_y2,color=path_colors[i],head_width=ratio*5,head_length=ratio*10,alpha=1.0)
#Write mean velocity for the path
py.figure(2001)
py.text(mp_mean[0],mp_mean[1],"%.f"%(mean_velocity),fontsize=10,color='k')
#Convert path data to numpy array
self.path_stats = np.array(self.path_stats)
py.figure(2000)
ax = py.gca()
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
py.savefig(self.directory+'/paths_2D.png',dpi=400,transparent=False)
py.figure(2001)
ax = py.gca()
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if not extra_fname == None:
py.figure(2001)
py.savefig(extra_fname+'_2D.png',dpi=400,transparent=False)
py.close('all')
return self.path_data
def write_path_data(self, extra_fname = None):
#Write paths info
f = open(self.directory+'/paths.txt','w')
for data in self.path_data:
f.write('%8d\t%8d'%(data[0],data[1]))
for vel in data[2]:
f.write('\t%8.f'%(vel))
f.write('\n')
f.close()
if not extra_fname == None:
#Write paths info in extra file
f = open(extra_fname+'.txt','w')
for data in self.path_data:
f.write('%8d\t%8d'%(data[0],data[1]))
for vel in data[2]:
f.write('\t%8.f'%(vel))
f.write('\n')
f.close()
def plot_1D_path_data(self, min_path_length = 10, include_stuck = False, max_velocity = 1000, show_length = False, extra_fname=None):
#Prepare two figures with and without velocity values
py.figure(1000,figsize=(20,10))
py.figure(1001,figsize=(20,10))
#Number of bins
nbins = 20
py.figure(1000)
valid = np.arange(len(self.path_stats[:,1]))
hist_valid = np.nonzero((self.path_stats[:,1] == 0)*(self.path_stats[:,2] > min_path_length))[0]
if not include_stuck:
valid = np.nonzero(self.path_stats[:,1] == 0)[0]
mean_vel = np.hstack(self.path_stats[hist_valid,4])
std_vel = np.hstack(self.path_stats[hist_valid,5])
#Change maximum velocity if the preassigned value is less than the maximum of the data
if max_velocity < np.max(mean_vel):
max_velocity = np.max(mean_vel)
#Histogram edges
xedges = np.linspace(0, max_velocity,nbins)
yedges = np.linspace(0,0.5*max_velocity,nbins)
data = np.vstack((mean_vel,std_vel))
k = kde.gaussian_kde(data)
X, Y = np.mgrid[0:max_velocity+1:max_velocity/nbins, 0:0.5*max_velocity+1:max_velocity/(2*nbins)]
Z = k(np.vstack([X.flatten(), Y.flatten()]))
x_bin_size = max_velocity/nbins
y_bin_size = max_velocity/(2*nbins)
py.pcolormesh(X, Y, Z.reshape(X.shape))
CS = py.contour(X+0.5*x_bin_size, Y+0.5*y_bin_size, Z.reshape(X.shape))
#Determine the peak mean values
Z = Z.reshape(X.shape)
high_point = CS.levels[-1]
high_valid = np.nonzero(Z >= high_point)
pvm,pvs,psm,pss = np.mean(X[high_valid[0],1]),np.std(X[high_valid[0],1]),np.mean(Y[1,high_valid[1]]),np.std(Y[1,high_valid[1]])
#Calculate 1D projections - save them
mean_vel_project = np.hstack((np.vstack(X[:,1]),np.vstack(np.sum(Z,axis=1))))
std_vel_project = np.hstack((np.vstack(Y[1,:]),np.vstack(np.sum(Z,axis=0))))
#Show length information on 1D-path-plots
if show_length:
#Plot marker-legend
for i in range(11):
py.plot(max_velocity*i/11.0,0.5*max_velocity*10.0/11.0,markersize=i*3,mec=(1,1,1,0),mfc=(1, 1, 1, 0.7),marker='o')
#Plot individual points
for point in range(len(hist_valid)):
valid_point = hist_valid[point]
py.plot(self.path_stats[valid_point,4],self.path_stats[valid_point,5],markersize=30.0*self.path_stats[valid_point,3]/5000,mec=(1,1,1,0),mfc=(1, 1, 1, 0.7),marker='o')
np.savetxt(self.directory+'/paths_mean_vel_projection.dat',mean_vel_project)
np.savetxt(self.directory+'/paths_std_vel_projection.dat',std_vel_project)
#Determine mean-values
mvm = np.sum(Z*X)/np.sum(Z)
mvs = np.sqrt(np.sum(Z*(X-mvm)**2)/np.sum(Z))
msm = np.sum(Z*Y)/np.sum(Z)
mss = np.sqrt(np.sum(Z*(Y-msm)**2)/np.sum(Z))
#Peak value
peak_stats = [pvm,pvs,psm,pss,mvm,mvs,msm,mss]
path_vel_mean = np.sum(self.path_stats[valid,4]*self.path_stats[valid,2])/np.sum(self.path_stats[valid,2])
path_std_mean = np.sum(self.path_stats[valid,5]*self.path_stats[valid,2])/np.sum(self.path_stats[valid,2])
path_vel_std = np.sqrt(np.sum((self.path_stats[valid,4]- path_vel_mean)**2*self.path_stats[valid,2])/np.sum(self.path_stats[valid,2]))
path_std_std = np.sqrt(np.sum((self.path_stats[valid,5]- path_std_mean)**2*self.path_stats[valid,2])/np.sum(self.path_stats[valid,2]))
py.xlim([0,max_velocity])
py.ylim([0,max_velocity*0.5])
py.grid(color='white',linewidth=5,linestyle='--',alpha=0.5)
py.savefig(self.directory+'/paths_1D.png',dpi=200,transparent=True)
#Set tick label sizes
ax = py.gca()
py.setp(ax.get_xticklabels() , fontsize=55, visible=True)
py.setp(ax.get_yticklabels() , fontsize=55, visible=True)
ax.set_xlabel("Mean path velocity (nm/s)",fontsize=55)
ax.set_ylabel("Std Path velocity (nm/s)",fontsize=55)
#Save path stats
np.savetxt(self.directory+'/paths_stats.dat',self.path_stats,header='first-frame stuck path-length mean-length mean-vel mean-std')
if not extra_fname == None:
py.figure(1001)
valid = np.nonzero(self.path_stats[:,2] > min_path_length)[0]
hist_valid = np.nonzero((self.path_stats[:,1] == 0)*(self.path_stats[:,2] > min_path_length))[0]
if not include_stuck:
valid = np.nonzero((self.path_stats[:,1] == 0)*(self.path_stats[:,2] > min_path_length))[0]
mean_vel = np.hstack(self.path_stats[hist_valid,4])
std_vel = np.hstack(self.path_stats[hist_valid,5])
data = np.vstack((mean_vel,std_vel))
k = kde.gaussian_kde(data)
X, Y = np.mgrid[0:max_velocity+1:max_velocity/nbins, 0:0.5*max_velocity+1:max_velocity/(2*nbins)]
Z = k(np.vstack([X.flatten(), Y.flatten()]))
x_bin_size = max_velocity/nbins
y_bin_size = max_velocity/(2*nbins)
py.pcolormesh(X, Y, Z.reshape(X.shape))
CS = py.contour(X+0.5*x_bin_size, Y+0.5*y_bin_size, Z.reshape(X.shape))
#Determine the peak mean values
Z = Z.reshape(X.shape)
high_point = CS.levels[-1]
high_valid = np.nonzero(Z >= high_point)
pvm,pvs,psm,pss = np.mean(X[high_valid[0],1]),np.std(X[high_valid[0],1]),np.mean(Y[1,high_valid[1]]),np.std(Y[1,high_valid[1]])
#Calculate 1D projections - save them
mean_vel_project = np.hstack((np.vstack(X[:,1]),np.vstack(np.sum(Z,axis=1))))
std_vel_project = np.hstack((np.vstack(Y[1,:]),np.vstack(np.sum(Z,axis=0))))
#Show length information on 1D-path-plots
if show_length:
#Plot marker-legend
for i in range(10):
py.plot(max_velocity*i/11.0,0.5*max_velocity*10.0/11.0,markersize=i*3,mec=(1,1,1,0),mfc=(1, 1, 1, 0.7),marker='o')
#Plot individual points
for point in range(len(hist_valid)):
valid_point = hist_valid[point]
py.plot(self.path_stats[valid_point,4],self.path_stats[valid_point,5],markersize=30.0*self.path_stats[valid_point,3]/5000,mec=(1,1,1,0),mfc=(1, 1, 1, 0.7),marker='o')
np.savetxt(extra_fname+'_mean_vel_projection.dat',mean_vel_project)
np.savetxt(extra_fname+'_std_vel_projection.dat',std_vel_project)
#Save path stats
np.savetxt(extra_fname+'_stats.dat',self.path_stats,header='first-frame stuck path-length mean-length mean-vel mean-std')
#Determine mean-values
mvm = np.sum(Z*X)/np.sum(Z)
mvs = np.sqrt(np.sum(Z*(X-mvm)**2)/np.sum(Z))
msm = np.sum(Z*Y)/np.sum(Z)
mss = np.sqrt(np.sum(Z*(Y-msm)**2)/np.sum(Z))
#Peak value
peak_stats = [pvm,pvs,psm,pss,mvm,mvs,msm,mss]
#For histgram use only
len_hist_data = len(self.path_stats[hist_valid,4])
path_vel_mean = np.sum(self.path_stats[valid,4]*self.path_stats[valid,2])/np.sum(self.path_stats[valid,2])
path_std_mean = np.sum(self.path_stats[valid,5]*self.path_stats[valid,2])/np.sum(self.path_stats[valid,2])
path_vel_std = np.sqrt(np.sum((self.path_stats[valid,4]- path_vel_mean)**2*self.path_stats[valid,2])/np.sum(self.path_stats[valid,2]))
path_std_std = np.sqrt(np.sum((self.path_stats[valid,5]- path_std_mean)**2*self.path_stats[valid,2])/np.sum(self.path_stats[valid,2]))
py.xlim([0,max_velocity])
py.ylim([0,max_velocity*0.5])
py.grid(color='white',linewidth=5,linestyle='--',alpha=0.5)
#Set tick label sizes
ax = py.gca()
py.setp(ax.get_xticklabels() , fontsize=55, visible=True)
py.setp(ax.get_yticklabels() , fontsize=55, visible=True)
#Set x and y-labels
ax.set_xlabel("Mean path velocity (nm/s)",fontsize=55)
ax.set_ylabel("Std path velocity (nm/s)",fontsize=55)
#Save figure
py.savefig(extra_fname+'_1D.png',dpi=200,transparent=True)
py.close('all')
return self.path_stats,peak_stats
def process_frame_links(self,num_points=5):
'''
Process frame links to extract length/velocity information
'''
#Make the forward links for frame links
self.make_forward_links()
#Connect disconnected paths
self.wire_frame_links()
#Create paths from frame links
self.create_paths()
#Calculate velocities from paths
self.path_velocities(num_points)
def make_movie(self,extra_fname=None):
'''
Make a movie file using ffmpeg
'''
#Check if paths_2D.png figure exists - if not do not make the movie
if not os.path.isfile(self.directory+'/paths_2D.png'):
return
#Get current working directory
cwd = os.getcwd()
os.chdir(self.directory)
avicommand = "avconv -y -r 1 -i skeletons_%03d.png -r 1 filament_tracks.avi"
os.system(avicommand)
#Copy the movie to output directory
if not extra_fname == None:
copy_command = "cp filament_tracks.avi "+extra_fname+"filament_tracks.avi"
os.system(copy_command)
#Delete all skeleton file
os.system('rm -f skeletons_*.png')
os.chdir(cwd)
def read_frame(self,num_frame,force_read = False):
'''
Read single frame
'''
#Read the frame
print 'Reading frame: %d'%(num_frame)
self.frame = Frame()
self.frame.directory = self.directory
self.frame.header = self.header
self.frame.tail = self.tail
self.frame.frame_no = num_frame
#If already exists load the saved array file
filament_file = self.directory+'/filXYs%03d.npy'%num_frame
if not force_read and os.path.isfile(filament_file):
self.frame.read_filXYs()
self.frame.filXY2filaments()
return 1
#If the file does not exist, exit the loop
if not self.frame.read_frame(num_frame):
sys.exit('File not found!')
self.frame.low_pass_filter()
self.frame.entropy_clusters()
self.frame.filter_islands()
self.frame.skeletonize_islands()
self.frame.filaments2filXYs()
def save_frame(self):
'''
Save the filament-xy's of frame
'''
self.frame.save_filXYs()
def load_frame1(self, frame_no):
'''
Load frame 1
'''
self.frame1 = Frame()
self.frame1.directory = self.directory
self.frame1.header = self.header
self.frame1.tail = self.tail
self.frame1.frame_no = frame_no
#Read the filament XYs and reconstruct filaments
self.frame1.read_filXYs()
self.frame1.filXY2filaments()
def load_frame2(self, frame_no):
'''
Load frame 2
'''
self.frame2 = Frame()
self.frame2.directory = self.directory
self.frame2.header = self.header
self.frame2.tail = self.tail
self.frame2.frame_no = frame_no
#Read the filament XYs and reconstruct filaments
self.frame2.read_filXYs()
self.frame2.filXY2filaments()
def write_length_velocity(self, header='',extra_fname=None):
'''
Write length vs.velocity in a txt file
'''
np.savetxt(self.directory+'/'+header+'full_length_velocity.txt', self.full_len_vel)
np.savetxt(self.directory+'/'+header+'max_length_velocity.txt', self.max_len_vel)
if not extra_fname == None:
np.savetxt(extra_fname+'full_length_velocity.txt',self.full_len_vel)
np.savetxt(extra_fname+'max_length_velocity.txt',self.max_len_vel)
def save_links(self):
'''
Save the results in working directory
'''
np.save(self.directory+'/links.npy' ,self.frame_links)
def load_links(self):
'''
Load the results that have been saved earlier
'''
self.frame_links = np.load(self.directory+'/links.npy')
def plot_length_velocity(self,header='',extra_fname=None,max_vel=2400, max_length= 10000,nbins=30, min_points=2, min_path_length = 5, weighted=True, percent_tolerance=500, print_plot=True, minimal_plot=False, maxvel_color='b', plot_xlabels = True, plot_ylabels = True, square_plot = True, plot_length_f=False, fit_f = 'exp',dpi_plot=200):
'''
Plot length-vs-velocity data
'''
#Pick only filaments shorter than maximum length
valid = np.nonzero(self.full_len_vel[:,0] < max_length)[0]
self.full_len_vel = self.full_len_vel[valid,:]
#Calculate top 1%, top 5%, mean velocity excluding stuck
tolerance_data = []
tolerance_list = [2.5,5,10,20,40,80]
valid_points = np.nonzero(self.full_len_vel[:,1] >= 0)[0]
for filter_value in tolerance_list[::-1]:
filtered_data = self.full_len_vel[valid_points,:]
if len(valid_points) > 10:
stuck = np.nonzero(filtered_data[:,1] == 0)[0]
non_stuck = np.nonzero(filtered_data[:,1] != 0)[0]
if len(non_stuck) > 0:
#Get filament velocities
fil_vel = filtered_data[non_stuck,1]
#Mean velocity of the moving filaments only
mean_vel_m = np.mean(fil_vel)
std_vel_m = np.std(fil_vel)
velocities_sorted = np.sort(fil_vel)[::-1]
top_1_num = int(np.ceil(0.01*len(velocities_sorted)))
top_5_num = int(np.ceil(0.05*len(velocities_sorted)))
#Number of data points
num_filter_points = len(fil_vel)
top_1_velocity = np.mean(velocities_sorted[:top_1_num])
top_5_velocity = np.mean(velocities_sorted[:top_5_num])
tolerance_data.append([filter_value*2, num_filter_points, top_1_velocity,top_5_velocity,mean_vel_m,std_vel_m])
else:
tolerance_data.append([filter_value*2, 0.0, 0.0 ,0.0, 0.0, 0.0])
else:
tolerance_data.append([filter_value*2, 0.0, 0.0 ,0.0, 0.0, 0.0])
#Valid points for the next iteration
valid_points = np.nonzero(self.full_len_vel[:,2] <= filter_value/100.0*self.full_len_vel[:,1])[0]
#Convert tolerance data to array format
tolerance_data = np.array(tolerance_data)
#Calculate percent stuck
percent_stuck = 100.0*np.sum(self.full_len_vel[:,1] == 0)/len(self.full_len_vel[:,0])
#Text fontsize
text_font_size = 30
#Valid points - only moving, satisfying smooth movement tolerance
valid_filtered = np.nonzero((self.full_len_vel[:,1] > 0)*(self.full_len_vel[:,2] <= percent_tolerance/100.0*self.full_len_vel[:,1])*(self.full_len_vel[:,3] >= min_path_length))[0]
num_points_filtered = len(valid_filtered)
#Valid points - only moving
valid_mobile = np.nonzero((self.full_len_vel[:,1] > 0)*(self.full_len_vel[:,3] >= min_path_length))[0]
num_points_mobile = len(valid_mobile)
#Valid points including stuck
valid_all = np.nonzero(self.full_len_vel[:,3] >= min_path_length)[0]
num_points_all = len(valid_all)
#Valid points - only stuck filaments
valid_stuck = np.nonzero((self.full_len_vel[:,3] >= min_path_length)*(self.full_len_vel[:,1] == 0))[0]
num_points_stuck = len(valid_stuck)
if num_points_filtered < min_points:
#There is no frame-link
print 'Warning: There is not enough velocity data! - %d points'%(num_points_t)
return -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
#Statistics data
MVEL_filtered = np.mean(self.full_len_vel[valid_filtered,1])
MVEL = np.mean(self.full_len_vel[valid_mobile,1])
MVIS = np.mean(self.full_len_vel[valid_all,1])
mean_len_filtered = np.mean(self.full_len_vel[valid_filtered,0])
mean_len_mobile = np.mean(self.full_len_vel[valid_mobile,0])
mean_len_stuck = np.mean(self.full_len_vel[valid_stuck,0])
mean_len_all = np.mean(self.full_len_vel[valid_all,0])
#Get filament length and velocities
fil_len = self.full_len_vel[valid_filtered,0]
fil_vel = self.full_len_vel[valid_filtered,1]
#Length histogram bin edges-centers parameters
l_bin_edges = np.linspace(0,max_length*1E-3, nbins)
l_bin_centers = 0.5*(l_bin_edges[:-1]+l_bin_edges[1:])
l_tick_locs = l_bin_centers
l_tick_labels = ['%d'%round(x) for x in l_bin_centers]
#Get length histogram values excluding stuck filaments
l_bin_counts, l_bin_locs = np.histogram(self.full_len_vel[valid_filtered,0],bins=l_bin_edges,normed=False)
#Velocity histogram bin edges-centers parameters
v_bin_edges = np.linspace(0,max_vel,nbins)
v_bin_width = v_bin_edges[1] - v_bin_edges[0]
v_bin_centers = 0.5*(v_bin_edges[:-1]+v_bin_edges[1:])
v_tick_locs = [round(x) for x in v_bin_centers]
v_tick_labels = ['%.0f'%round(x) for x in v_bin_centers]
#Get velocity histogram values excluding stuck filaments
v_bin_counts, v_bin_locs = np.histogram(self.full_len_vel[valid_filtered,1],bins=v_bin_edges,normed=True)
#Determine top 1 and 5 percent velocities
velocities_sorted = np.sort(fil_vel)[::-1]
top_1_num = np.ceil(0.01*len(velocities_sorted))
top_5_num = np.ceil(0.05*len(velocities_sorted))
top_1_velocity = np.mean(velocities_sorted[:top_1_num])
top_5_velocity = np.mean(velocities_sorted[:top_5_num])
#Fit the data
fil_len_digitized = np.digitize(1E-3*fil_len,l_bin_locs)
fil_weights = 1.0/l_bin_counts[fil_len_digitized-1]
mean_vel_u,mean_vel_amp,mean_vel_tau,residuals,success = fit_coupling_velocity(fil_len,fil_vel,fil_weights,weighted=weighted)
std_u = np.sqrt(np.mean(residuals**2))
#Find the treshold filament length for plateou
bound_prob = coupling_velocity(fil_len,0.0,-1.0,mean_vel_tau)
plateu_valid = np.nonzero(bound_prob > 0.95)[0]
plateu_vel = fil_vel[plateu_valid]
mean_plateu = np.mean(plateu_vel)
std_plateu = np.std(plateu_vel)
roof_plateu = mean_plateu+2.0*std_plateu
#Find max probability
max_index_t = np.argmax(v_bin_counts)
#Peak velocity - moving
peak_vel_t = v_bin_centers[max_index_t]
#Perform length velocity analysis on the maximum velocity data
#Minimum path length is required for maximum lenght velocity analysis
max_valid = np.nonzero((self.max_len_vel[:,1] > 0)*(self.max_len_vel[:,2] <= percent_tolerance/100.0*self.max_len_vel[:,1])*(self.max_len_vel[:,3] >= min_path_length))[0]
#Function fitted can be either exponential or uyeda's equation
if fit_f == 'exp':
max_vel_u,max_vel_amp,max_vel_tau,residuals,success = fit_coupling_velocity(self.max_len_vel[max_valid,0],self.max_len_vel[max_valid,1],np.ones(len(self.max_len_vel[max_valid,0])),weighted=False)
std_u = np.sqrt(np.mean(residuals**2))
elif fit_f == 'uyeda':
max_vel_u,max_vel_r,residuals,success = fit_length_velocity(self.max_len_vel[max_valid,0],self.max_len_vel[max_valid,1],np.ones(len(self.max_len_vel[max_valid,0])),weighted=False)
std_u = np.sqrt(np.mean(residuals**2))
#Estimated curve from filament length data
exp_len = np.linspace(np.min(fil_len),15000,1000)
if fit_f == 'exp':
exp_vel = coupling_velocity(exp_len,max_vel_u,max_vel_amp,max_vel_tau)
elif fit_f == 'uyeda':
exp_vel = length_velocity(exp_len,max_vel_u,max_vel_r)
#Determine tolerance word
if percent_tolerance == 500:
tolerance_string = 'none'
else:
tolerance_string = str(percent_tolerance)
if print_plot:
if minimal_plot:
text_font_size = 55
linewidth = 10
#Determine 0.99 probability length: myosin density=1/36
if fit_f == 'exp':
length_f = max_vel_tau
elif fit_f == 'uyeda':
length_f = np.log(0.01)/np.log(1-max_vel_r)*36.0
#Plot square image
x,y = plotparams.get_figsize(1080)
if square_plot:
py.figure(0,figsize=(y,y))
else:
py.figure(0,figsize=(x,y))
py.plot(1E-3*fil_len,fil_vel,'.',markersize=10 ,color='gray')
py.plot(1E-3*self.max_len_vel[max_valid,0],self.max_len_vel[max_valid,1],marker='^',markersize=10,mec=maxvel_color, mfc=maxvel_color,linestyle='None')
#Do not plot plateaue line if user doesn't want data to be fitted
if not fit_f == 'none':
py.plot(1E-3*exp_len,exp_vel,'k-',linewidth=linewidth, alpha=0.7)
py.plot(1E-3*exp_len,np.ones(len(exp_len))*top_5_velocity,'k-.' ,linewidth=linewidth)
if plot_length_f:
#Plot L-0.5
py.plot(1E-3*np.array([length_f,length_f]),[0,top_5_velocity],linestyle='dashed',color='k')
py.text(length_f*1E-3+0.1,10,'%.1f'%(length_f*1E-3),fontsize=text_font_size,color='k')
#X/Y limits
py.ylim([0,max_vel])
py.xlim([0,max_length*1E-3+1.0])
#Place the histogram ticks
ax = py.gca()
vel_ticks = ax.get_yticks()
ax.set_yticks(vel_ticks[::2])
ax.set_yticklabels(vel_ticks[::2]*1E-3)
len_ticks = ax.get_xticks()
ax.set_xticks(len_ticks[::2])
ax.set_xticklabels([int(x) for x in len_ticks[::2]])
#Change the padding between the axis and labels
ax.tick_params(pad=10)
#Set tick label size
py.setp(ax.get_xticklabels(), fontsize=text_font_size, visible=plot_xlabels)
py.setp(ax.get_yticklabels(), fontsize=text_font_size, visible=plot_ylabels)
else:
# definitions for the axes
left, width = 0.1, 0.5
bottom, height = 0.1, 0.5
#Left boundary for velocity histograms
left_h1 = left+width
left_h2 = left_h1 +0.15
#Lower boundary for the length histogram
bottom_v1 = bottom+0.27
rect_scatter = [left , bottom_v1 , width , height ]
rect_tolerance = [left_h1+0.01, bottom+0.02 , 0.29 , 0.24 ]
rect_histy1 = [left_h1 , bottom_v1 , 0.15 , height ]
rect_histy2 = [left_h2 , bottom_v1 , 0.15 , height ]
rect_histx1 = [left , bottom+0.02 , width , 0.25 ]
#Plot the data and fit
py.figure(0,figsize=plotparams.get_figsize(1200))
axScatter = py.axes(rect_scatter)
axHisty1 = py.axes(rect_histy1)
axHisty2 = py.axes(rect_histy2)
axHistx1 = py.axes(rect_histx1)
axTolerance1 = py.axes(rect_tolerance)
axTolerance2 = axTolerance1.twinx()
#Plot tolerance data - top 5% and mean-velocity excluding stuck filaments
max_tol_vel = np.max(tolerance_data[:,3:5])
min_tol_vel = np.min(tolerance_data[:,3:5])
#Plot the tolerance information
#First plot lines
axTolerance2.plot(tolerance_data[:,0],tolerance_data[:,3],color='k',linestyle='--',marker='.',linewidth=5,markersize=15)
axTolerance2.plot(tolerance_data[:,0],tolerance_data[:,4],color='k',linestyle='-' ,marker='.',linewidth=5,markersize=15)
axTolerance2.set_xscale('symlog')
axTolerance1.set_xscale('symlog')
tol_ymin = min_tol_vel-100
tol_ymax = max_tol_vel+100
tol_diff = max_tol_vel-min_tol_vel+200
axTolerance2.set_ylim([tol_ymin, tol_ymax])
axTolerance2.set_xlim([5,200])
#Plot tolerance legend
axTolerance2.plot([6,9],[tol_ymin+0.25*tol_diff,tol_ymin+0.25*tol_diff],color='k',linestyle='--',linewidth=5)
axTolerance2.text(10,tol_ymin+0.20*tol_diff,r"%s"%('TOP5%') ,fontsize=text_font_size,color='k')
axTolerance2.plot([6,9],[tol_ymin+0.10*tol_diff,tol_ymin+0.1*tol_diff],color='k',linestyle='-',linewidth=5)
axTolerance2.text(10,tol_ymin+0.05*tol_diff,r"%s"%('Mean Velocity') ,fontsize=text_font_size,color='k')
axTolerance2.set_xticks(tolerance_data[:,0])
axTolerance2.set_xticklabels(['*']+[int(x) for x in tolerance_data[1:,0]])
axTolerance1.set_xlabel('% Tolerance',fontsize=text_font_size,labelpad=20)
#Set velocity labels
vel_ticks = axTolerance2.get_yticks()
axTolerance2.set_yticks(vel_ticks[1::2])
axTolerance2.set_yticklabels(vel_ticks[1::2]*1E-3)
ylim = axTolerance2.get_ylim()
tol_diff = ylim[1]-ylim[0]
axTolerance2.text(300,ylim[1]+0.1*tol_diff,r'$x10^3$',fontsize=25)
py.setp(axTolerance2.get_yticklabels() , fontsize=text_font_size, visible=True)
py.setp(axTolerance2.get_xticklabels() , fontsize=text_font_size, visible=True)
py.setp(axTolerance1.get_yticklabels() , fontsize=text_font_size, visible=False)
py.setp(axTolerance1.get_xticklabels() , fontsize=text_font_size, visible=True)
#Plot Length histogram
l_bin_counts, l_bin_locs, l_patches = axHistx1.hist(1E-3*self.full_len_vel[valid_filtered,0],bins=l_bin_edges,normed=False,orientation='vertical',color='gray')
max_prob_l = np.max(l_bin_counts)
#Plot histogram with all mobile filaments
v_bin_counts, v_bin_locs, v_patches = axHisty2.hist(self.full_len_vel[valid_mobile,1],bins=v_bin_edges,normed=True,orientation='horizontal',color='gray')
#Get the y-limit for y-histogram-2
max_prob_a = axHisty2.get_xlim()[1]
#Plot histogram without stuck filaments
v_bin_counts, v_bin_locs, v_patches = axHisty1.hist(self.full_len_vel[valid_filtered,1],bins=v_bin_edges,normed=True,orientation='horizontal',color='gray')
#Get the y-limit for y-histogram-1
max_prob_t = axHisty1.get_xlim()[1]
#Plot the data and fit
axScatter.plot(1E-3*fil_len,fil_vel,'.',markersize=5 ,color='gray')
axScatter.plot(1E-3*self.max_len_vel[max_valid,0],self.max_len_vel[max_valid,1],marker='^',markersize=5,mec=maxvel_color, mfc=maxvel_color,linestyle='None')
#Do not plot plateaue line if user doesn't want data to be fitted
if not fit_f == 'none':
axScatter.plot(1E-3*exp_len,exp_vel,'k-',alpha=0.7)
axScatter.plot(1E-3*exp_len,np.ones(len(exp_len))*top_5_velocity,'k--' ,linewidth=5)
axHisty1.plot([0,max_prob_t],np.ones(2)*MVEL_filtered ,color='k',linestyle='-',linewidth=5)
axHisty2.plot([0,max_prob_a],np.ones(2)*MVEL ,'k-' ,linewidth=5)
axHistx1.plot([mean_len_filtered*1E-3,mean_len_filtered*1E-3],[0,max_prob_l],'k-' ,linewidth=5)
#X/Y limits
axScatter.set_ylim([0,max_vel])
axScatter.set_xlim([0,max_length*1E-3])
#Place the histogram ticks
vel_ticks = axScatter.get_yticks()
vel_ticks = vel_ticks[::2]
axScatter.set_yticks(vel_ticks)
axScatter.set_yticklabels(vel_ticks*1E-3)
len_ticks = axScatter.get_xticks()
axScatter.set_xticks(len_ticks[:-1])
axScatter.set_xticklabels([int(x) for x in len_ticks[:-1]])
axHistx1.set_xticks(len_ticks[:-1])
axHistx1.set_xticklabels([int(x) for x in len_ticks[:-1]])
axHisty1.set_yticks(vel_ticks)
axHisty1.set_yticklabels(vel_ticks*1E-3)
axHisty2.set_yticks(vel_ticks)
axHisty2.set_yticklabels(vel_ticks*1E-3)
axScatter.text(0,max_vel,r'$x10^3$',fontsize=25)
#X/Y limits
axScatter.set_ylim([0,max_vel])
axScatter.set_xlim([0,max_length*1E-3])
#Format tick labels
py.setp(axHisty1.get_yticklabels() , fontsize=text_font_size, visible=False)
py.setp(axHistx1.get_xticklabels() , fontsize=text_font_size, visible=True)
py.setp(axScatter.get_xticklabels(), fontsize=text_font_size, visible=False)
py.setp(axScatter.get_yticklabels(), fontsize=text_font_size, visible=True)
axHisty1.set_ylim([0,max_vel])
axHisty2.set_ylim([0,max_vel])
axHistx1.set_xlim([0,max_length*1E-3])
#Remove the xtick labels for the histogram
axHisty1.ticklabel_format(style='sci', axis='x', scilimits=(-5,5))
axHisty2.ticklabel_format(style='sci', axis='x', scilimits=(-5,5))
py.setp(axHisty1.get_xticklabels(), visible=False)
py.setp(axHisty2.get_xticklabels(), visible=False)
py.setp(axHisty2.get_yticklabels(), visible=False)
py.setp(axHistx1.get_yticklabels(), visible=False)
#Labels for the histogram plots
axTolerance2.set_ylabel(r'Velocity (nm/s)',labelpad=20,fontsize=text_font_size)
axScatter.set_ylabel(r'Velocity (nm/s)',labelpad=20,fontsize=text_font_size)
axHistx1.set_xlabel(r'Actin filament length ($\mu m$)',labelpad=20,fontsize=text_font_size)
axHisty1.text(0.1*max_prob_t,1.1*max_vel,'Filtered' ,fontsize=text_font_size)
axHisty2.text(0.1*max_prob_a,1.1*max_vel,'Unfiltered',fontsize=text_font_size)
#Get maximum length for writing velocity on the figure
axScatter.plot(max_length*1E-3*np.array([1,2])/15.0,[2150/2400.0*max_vel,2150/2400.0*max_vel],'k--' ,linewidth=5)
axScatter.text(max_length*1E-3*2.1/15.0 ,2100/2400.0*max_vel,r"%.f$^{TOP5\%%}$"%(top_5_velocity) ,fontsize=text_font_size,color='k')
#Do not write plateaue velocity if user doesn't want data to be fitted
if not fit_f == 'none':
axScatter.plot(max_length*1E-3*np.array([6,7])/15.0,[2150/2400.0*max_vel,2150/2400.0*max_vel],'k-' ,linewidth=10)
axScatter.text(max_length*1E-3*7.1/15.0 ,2100/2400.0*max_vel ,r"%.f$^{PLATEAU}$"%(max_vel_u) ,fontsize=text_font_size,color='k')
axHisty1.text(0.1*max_prob_t,1900/2400.0*max_vel,r"%.f$^{MVEL_{%s}}$"%(MVEL_filtered,tolerance_string) ,fontsize=text_font_size,color='k')
axHisty2.text(0.1*max_prob_a,1900/2400.0*max_vel,r"%.f$^{MVEL}$"%(MVEL) ,fontsize=text_font_size,color='k')
axHisty2.text(0.15*max_prob_a,1600/2400.0*max_vel,r"%.f$^{\%%STUCK}$"%(percent_stuck) ,fontsize=text_font_size,color='k')
axHistx1.text(mean_len_filtered*1E-3,max_prob_l*0.5,r"%.3f$^{<FIL-LENGTH>}$"%(mean_len_filtered*1E-3) ,fontsize=text_font_size,color='k')
py.savefig(self.directory+'/'+header+'length_velocity.png',dpi=dpi_plot,transparent=False)
if not extra_fname == None:
py.savefig(extra_fname,dpi=dpi_plot,transparent=False)
py.close()
#List to be returned
if not fit_f == 'none':
return_list = top_5_velocity, percent_stuck, MVEL, MVEL_filtered, max_vel_u, MVIS, mean_len_stuck, mean_len_filtered, mean_len_mobile, mean_len_all,num_points_filtered
else:
return_list = top_5_velocity, percent_stuck, MVEL, MVEL_filtered, -1 , MVIS, mean_len_stuck, mean_len_filtered, mean_len_mobile, mean_len_all,num_points_filtered
return return_list
def plot_correlation_profile(self,extra_fname = None):
py.figure(4,figsize=plotparams.get_figsize(1080))
array_corr_len = np.arange(len(self.final_corr_len))*self.dx
array_corr_weight = np.arange(len(self.final_corr_weight))*self.dx
valid = np.nonzero((self.final_corr_len > 0.7)*(array_corr_len<=1500))
slope, intercept, r_value, p_value, std_err = stats.linregress(array_corr_len[valid],1.0*self.final_corr_len[valid])
#The distance that leads to 0.7 correlation
length_0_7 = np.round((0.7 - intercept)/slope)
mean_corr_1500 = np.mean(1.0*self.final_corr_len[valid])
py.subplot(211)
py.plot(array_corr_len ,1.0*self.final_corr_len,'bo')
py.plot(array_corr_len,array_corr_len*slope+intercept,'r-',linewidth=5)
py.text(1500,0.9,r'l$_{0.7}$: %d'%(length_0_7),fontsize=50)
py.xlim(0,3000)
py.ylim(0.7,1.0)
py.ylabel(r'c($\Delta$ nm)')
py.subplot(212)
py.plot(array_corr_weight,1.0*self.final_corr_weight)
py.xlim(0,self.max_fil_length*self.dx)
py.xlabel(r'$\Delta$ nm')
py.ylabel(r'weight (#)')
py.xlim(0,self.max_fil_length*self.dx)
py.savefig(self.directory+'/correlation_length.png',dpi=200)
if not extra_fname == None:
py.savefig(extra_fname,dpi=200)
py.close()
return length_0_7, mean_corr_1500
class Frame:
def __init__(self):
#Global parameters - mainly cutoff-thresholding values
self.frame_no = 0 #Frame number
self.window_island = 15 #The radius of the disk used for local entropy measurement (default:15)
self.disk_win = disk(self.window_island) #Disk used for local entropy calculation
#The images to store
self.img = None #Image in matrix form
self.img_filaments = None #Full filaments' image
self.img_skeletons = None #Skeletonized filaments' image
#Frame dimension properties
self.width = 1002 #Width of the image in pixels
self.height = 1004 #Height of the image in pixels
#Conatainer for the islands and backward links
self.backward_links = [] #Frame links in backward direction
self.islands = [] #Array that keeps the islands
self.filaments = [] #Array that keeps the accepted filaments
self.filXYs = [] #Array that keeps X-Y positions of the filaments
#File path properties
self.directory = '' #Directory for the frames
self.header = '' #Header name for the tiff images
self.tail = '' #Tail name for the tiff images
#Filament counter(label)
self.filament_counter = 0
def reset_filament_labels(self):
'''
Resets filament labels
'''
for i in range(len(self.filaments)):
self.filaments[i].label = i
def read_frame(self, frame_no):
'''
Read the image file corresponding to a frame
'''
fname = self.directory+'/'+self.header+'%03d'%frame_no+'_'+self.tail+'_000.tif'
self.frame_no = frame_no
#Use openCV to read 16bit images
if not os.path.isfile(fname):
return False
self.img = cv2.imread(fname,cv2.IMREAD_GRAYSCALE)
#Convert to 8bits
self.img = img_as_uint(self.img)
#Get the dimensions of the frame
self.width,self.height = self.img.shape
return True
def filXY2filaments(self):
'''
Reconstruct filaments array from filXY
'''
self.filaments = []
#Filament counter for filament label
fil_counter = 0
for filXY,width,density,midpoint in self.filXYs:
filament = Filament()
filament.frame_no = self.frame_no
filament.contour = filXY
filament.fil_width = width
filament.fil_density = density
filament.label = fil_counter
filament.midpoint = midpoint
#Calculate filament properties
filament.calc_props()
#Add filament to list
self.filaments.append(filament)
#Pudate filament label
fil_counter += 1
def reconstruct_skeleton_images(self):
#Prepare the processed images
self.img_skeletons = np.zeros((self.width,self.height),dtype=np.bool)
for filament in self.filaments:
self.img_skeletons[filament.contour[:,0],filament.contour[:,1]] = True
def reconstruct_filament_images(self):
'''
Reconstruct images from reduced filament representations
'''
#Prepare the processed images
self.img_filaments = np.zeros((self.width,self.height),dtype=np.uint16)
for filament in self.filaments:
x_corrected = filament.xy[0]+filament.island.x_min
y_corrected = filament.xy[1]+filament.island.y_min
self.img_filaments[x_corrected,y_corrected] = filament.img_reduced[filament.xy_norm]
def save_filament_img(self):
'''
Save skeleton image
'''
py.figure()
py.imshow(self.img_filaments,cmap=cm.gray)
#Change x,y-ticks to um scale
ax = py.gca()
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
py.savefig(self.directory+'/filaments_%03d.png'%(self.frame_no),dpi=200)
py.close()
def calc_fil_corr_funcs(self):
'''
Calculate filament length-correlation profiles
'''
for filament in self.filaments:
filament.correlation_function()
def save_filXYs(self):
'''
Save contours of filaments
'''
#Save the filament-contours as npy file
filament_file = self.directory+'/filXYs%03d'%self.frame_no
np.save(filament_file,self.filXYs)
def read_filXYs(self):
'''
Load the filXYs for filament reconstruction
'''
filament_file = self.directory+'/filXYs%03d.npy'%self.frame_no
self.filXYs = np.load(filament_file)
def check_picture_quality(self):
'''
Check picture quality
If the maximum value in difference image is less than 500, picture is of bad quality
'''
#Determine the 95-percentile intensity within a radius of 15 pixels
img_1 = rank.percentile(self.img,self.disk_win,p0=0.95)
#Determine the 5-percentile intensity within a radius of 15 pixels
img_0 = rank.percentile(self.img,self.disk_win,p0=0.05)
#Subtract the 5-percentile from 95-percentile map
self.img_diff = img_1 - img_0
#Maximum of the difference value
max_diff = np.max(self.img_diff)
relative_contrast = 1.0*np.max(self.img_diff)/np.max(img_1)
if relative_contrast > 0.7 and max_diff > 1000:
return 'good'
else:
return 'bad'
def low_pass_filter(self,sigma=2):
'''
Low pass filter to remove high-frequency noise
'''
self.img = gaussian_filter(self.img,sigma=sigma)
def entropy_clusters(self):
'''
Create entropy clusters
'''
#Determine the 95-percentile intensity within a radius of 15 pixels
img_1 = rank.percentile(self.img,self.disk_win,p0=0.95)
#Determine the 5-percentile intensity within a radius of 15 pixels
img_0 = rank.percentile(self.img,self.disk_win,p0=0.05)
#Subtract the 5-percentile from 95-percentile map
self.img_diff = img_1 - img_0
#Cutoff value is the mean intensity of the background subtracted image
self.cutoff_diff = np.mean(self.img_diff)
#Define a mask for the pixels with intensity higher than cut-off value
self.mask_diff = self.img_diff > self.cutoff_diff
#Label each separate area on the mask with a different number
self.labels_island,self.num_island = label(self.mask_diff)
self.img_water = watershed(self.mask_diff,self.labels_island,mask=self.mask_diff)
#Array to keep all the entropy islands
self.islands = []
#Prepare the entropy island objects
for i in range(1,self.num_island+1):
xy = np.nonzero(self.img_water == i)
#If it is a single pixel island - ignore
if len(xy[0]) < 2:
continue
new_island = Island()
new_island.reduce_image(xy,self.img)
#Assign the frame for the island
new_island.frame = self
#Append island to the list
self.islands.append(new_island)
def filter_function(self,island):
'''
Filter function to filter islands
'''
area_constraint = island.area > self.window_island**2
return area_constraint
def filter_islands(self):
'''
Filter islands using the filter function
'''
self.islands = filter(self.filter_function,self.islands)
def skeletonize_islands(self):
'''
Skeletonize the islands
'''
#Filament labels
self.filament_counter = 0
#First decompose to filaments
[island.decompose_to_filaments() for island in self.islands]
#Second filter filaments in the islands
[island.filter_filaments() for island in self.islands]
#Third skeletonize all the filaments in the islands
[island.skeletonize_filaments() for island in self.islands]
#Forth remove crossing filaments
[island.remove_crossing_filaments() for island in self.islands]
#Finally add these filaments to Frame's filament list
self.filaments = [island.filaments for island in self.islands]
self.filaments = [item for sub in self.filaments for item in sub]
def filaments2filXYs(self):
'''
Convert filaments to filXY - that keeps the positions of the pixels
'''
self.filXYs = []
for filament in self.filaments:
self.filXYs.append([filament.contour,filament.fil_width,filament.fil_density,filament.midpoint])
class Island:
def __init__(self):
self.area = 0 #Area of the island
self.xy = [] #Island x-y coordinates
self.xy_norm = [[],[]] #Normalized x-y coordinates
self.x_min = [] #Limits of x-axis
self.y_min = [] #Limits of y-axis
self.x_dim = 0 #Reduced x-dim
self.y_dim = 0 #Reduced y-dim
self.img_reduced = None #Reduced full image
self.filaments = [] #Container that keeps filament objects
self.min_filament = 4 #Minimum filament length accepted in the analysis (default:3)
self.window_island = 15 #The radius of the disk used for local entropy measurement (default:15)
self.frame = None #Pointer to the frame that the Island belongs to
def reduce_image(self, xy, img):
'''
Reduce the image of a filament
'''
#Get the area
self.area = len(xy[0])
#Get the x,y coordinates
self.xy = xy
self.x_min = np.min(self.xy[0])
self.y_min = np.min(self.xy[1])
self.x_dim = np.max(self.xy[0]) - np.min(self.xy[0])
self.y_dim = np.max(self.xy[1]) - np.min(self.xy[1])
#Find out the normalized x-y dimensions
self.xy_norm = [[],[]]
self.xy_norm[0] = self.xy[0]-self.x_min
self.xy_norm[1] = self.xy[1]-self.y_min
#Get the reduced image
self.img_reduced = np.zeros((self.x_dim+1,self.y_dim+1))
self.img_reduced[self.xy_norm] = img[self.xy]
def decompose_to_filaments(self):
'''
Decompose entropy island to filaments
'''
#Pick only pixels with integer intensity values
valid = self.img_reduced > 0
#Determine the Otsu threshold value
cutoff = threshold_otsu(self.img_reduced[valid])
#Filament in coarse representation
self.fil_reduced = self.img_reduced > cutoff
self.img_fil = self.fil_reduced*self.img_reduced
#Label the filaments
fil_labels, fil_features = label(self.fil_reduced)
#In a cluster there may be more than a single filament - each cluster corresponds to a filament
fine_clusters = watershed(self.fil_reduced,fil_labels,mask=self.fil_reduced)
#Start with an empty list of filaments
self.filaments = []
for i in range(1,fil_features+1):
xy_bool = fine_clusters == i
xy = np.nonzero(xy_bool)
#If it is a single pixel island - ignore
if len(xy[0]) < 2:
continue
new_filament = Filament()
#Assign the label
new_filament.label = self.frame.filament_counter
#Assign the current to island to the filament
new_filament.island = self
#Shrink the size of the filament image
new_filament.reduce_image(xy)
#Density of the filament in terms of intensity
new_filament.fil_density = np.sum(1.0*self.img_reduced*xy_bool)/np.sum(xy_bool)
#Fill in the holes in a filament
new_filament.img_reduced = binary_fill_holes(new_filament.img_reduced)
#Binary opening-closing needed for getting rid of extra branches
new_filament.img_reduced = binary_closing(new_filament.img_reduced,structure=disk_1)
#Add new filament to the list
self.filaments.append(new_filament)
#Increment filament counter
self.frame.filament_counter += 1
def filter_function(self,filament):
'''
Filter function to be used to filter bad filaments
'''
size_constraint = np.sum(filament.img_reduced) > 10
x_constraint = (filament.x_min+self.x_min > 5) and (filament.x_min+self.x_min+filament.x_dim < self.frame.width )
y_constraint = (filament.y_min+self.y_min > 5) and (filament.y_min+self.y_min+filament.y_dim < self.frame.height)
return size_constraint and x_constraint and y_constraint
def filter_filaments(self):
'''
Filter the filaments using the filter function
'''
self.filaments = filter(self.filter_function,self.filaments)
def skeletonize_filaments(self):
'''
Seperate routine for skeletonizing the filaments
'''
#Skeletonize
[filament.make_skeleton() for filament in self.filaments]
#Filter out filaments with length = 0
self.filaments = filter(lambda fil:len(fil.contour) > 0,self.filaments)
#Calculate filament properties
[filament.calc_fil_stats() for filament in self.filaments]
def remove_crossing_filaments(self):
'''
Remove crossing filaments
'''
self.filaments = filter(lambda filament:filament.num_tips == 2,self.filaments)
class Filament:
def __init__(self):
self.frame_no = 0 #The number of the frame in which the filament is located in the current configuration
self.label = 0 #Filament label
#Contour
self.contour = [] #The x-y positions of the filament pixels in ordered form (beginning to end/end to beginning)
self.coarse = [] #Coarse N point representation of the filament
self.cm = [] #Center of mass
self.midpoint = [] #Filament midpoint
#Images
self.edge = 5 #Extra padding in the reduced img representation
self.img_reduced = [] #Reduced filament image
self.img_skeleton = [] #Skeletonized image
#Tips
self.tips = [] #The coordinates of the tips
self.num_tips = 0 #Number of tips
#Filament properties
self.fil_length = 0 #Filament length in pixel number
self.fil_density = 0 #Filament intensity density
self.fil_area = 0 #Filament area
self.fil_width = 0 #Filamnent width
self.end2end = 0 #Filament end-to-end distance
#Filament links
self.next_filament = None #Next filament
self.pre_filament = None #Previous filament
#The Island that the filament belongs to
self.island = None #Island that keeps the filament
#Correlation function for the filament
self.corr_len = None #Correlation length profile to calculate persistence length
#Link to a filament in the next and previous frame
self.forward_link = None
self.reverse_link = None
#Elapsed time
self.time = 0
def reduce_image(self, xy):
'''
Reduce the image of a filament
'''
#Get the x,y coordinates
self.xy = xy
self.x_min = np.min(self.xy[0])
self.y_min = np.min(self.xy[1])
self.x_dim = np.max(self.xy[0]) - np.min(self.xy[0])
self.y_dim = np.max(self.xy[1]) - np.min(self.xy[1])
#Find out the normalized x-y dimensions
self.xy_norm = [[],[]]
self.xy_norm[0] = self.xy[0]-self.x_min+self.edge
self.xy_norm[1] = self.xy[1]-self.y_min+self.edge
#Get the reduced image
self.img_reduced = np.zeros((self.x_dim+1+2*self.edge,self.y_dim+1+2*self.edge),dtype=np.uint16)
self.img_reduced[self.xy_norm] = True
def find_tips(self):
'''
Find the tips of a skeletonized filament
'''
neighbours = rank.pop(self.img_skeleton,sqr_3,mask=self.img_skeleton)
tips = np.nonzero(neighbours*self.img_skeleton==2)
if len(tips[0]) == 0:
self.num_tips = 0
return np.array([])
self.tips = np.hstack((np.vstack(tips[0]),np.vstack(tips[1])))
self.num_tips = len(self.tips)
return self.tips
def make_skeleton(self):
#Skeletonize the image
self.img_skeleton = skeletonize(self.img_reduced)
#Find the tips
self.find_tips()
#Continue if only the number of tips is more than 2
if self.num_tips ==2:
#Remove bad pixels
self.remove_bad_pixels()
#Make the links in a filament
self.make_links()
def sim_score(self,fil_other):
'''
Similarity score of the first filament to another one
'''
short_current = False
if len(self.contour) < len(fil_other.contour):
short_current = True
#Difference vectors
contour1_diff = self.contour[1:] - self.contour[:-1]
contour2_diff = fil_other.contour[1:] - fil_other.contour[:-1]
#Length of the short and long vectors
contour1_len = len(self.contour)
contour2_len = len(fil_other.contour)
#Short contour length
short_con_len = min((contour1_len,contour2_len))
#Short filament length
short_fil_len = min((self.fil_length,fil_other.fil_length))
#Multiplicate measurements
multiplicate_measures = abs(contour1_len - contour2_len) + 1
#Keep the direction of the current filament with respect to each other and move direction
fil_direction = 1
mov_direction = 1
#Calculate overlap score
overlap_score = 0
for i in range(multiplicate_measures):
if short_current:
len1 = np.sum(np.sqrt(np.sum(contour2_diff[i:i+short_con_len-1,:]**2,axis=1)))
len2 = np.sum(np.sqrt(np.sum(contour1_diff**2,axis=1)))
if len1 > 0 and len2 > 0:
overlap_score += np.sum(contour2_diff[i:i+short_con_len-1,:]*contour1_diff)
else:
len1 = np.sum(np.sqrt(np.sum(contour1_diff[i:i+short_con_len-1,:]**2,axis=1)))
len2 = np.sum(np.sqrt(np.sum(contour2_diff**2,axis=1)))
if len1 > 0 and len2 > 0:
overlap_score += np.sum(contour1_diff[i:i+short_con_len-1,:]*contour2_diff)
overlap_score /= 1.0*(multiplicate_measures*short_fil_len)
#Find the directione
if overlap_score > 0:
fil_direction = 1
else:
fil_direction = -1
#Calculate area and distance scores
area_score = 0
distance_score = 0
move_score = 0
for i in range(multiplicate_measures):
if short_current:
contour2_1_diff = fil_other.contour[i:i+short_con_len,:][::fil_direction] - self.contour
dot_prod = contour2_1_diff[:-1,:]*contour1_diff
cross_prod = contour2_1_diff[:-1,:]*contour1_diff[:,[1,0]]
cross_prod = np.fabs(cross_prod[:,1]-cross_prod[:,0])
contour1_diff_len= np.mean(np.sqrt(np.sum(contour1_diff**2,axis=1)))
else:
contour2_1_diff = fil_other.contour[::fil_direction] - self.contour[i:i+short_con_len,:]
dot_prod = contour2_1_diff[:-1,:]*contour1_diff[i:i+short_con_len-1,:]
cross_prod = contour2_1_diff[:-1,:]*contour1_diff[i:i+short_con_len-1,[1,0]]
cross_prod = np.fabs(cross_prod[:,1]-cross_prod[:,0])
contour1_diff_len= np.mean(np.sqrt(np.sum(contour1_diff[i:i+short_con_len-1,:]**2,axis=1)))
area_score += np.sum(cross_prod)
distance_length = np.mean(np.sqrt(np.sum(contour2_1_diff[:-1,:]**2,axis=1)))
distance_score += distance_length
if distance_length > 0 and contour1_diff_len > 0:
move_score += np.mean(np.sum(dot_prod,axis=1))/(distance_length*contour1_diff_len)
#ZERO added for taking logarithm
area_score = area_score/(short_fil_len*multiplicate_measures) + ZERO
distance_score /= multiplicate_measures
move_score /= multiplicate_measures
#Filament move direction
if move_score > 0:
mov_direction = 1
else:
mov_direction = -1
return overlap_score, area_score, distance_score, fil_direction, mov_direction
#Remove strongly connected paths - fortunately not very often used (suitable for tip systems only)
def remove_bad_pixels(self):
tip_s = self.tips[0,:]
tip_e = self.tips[1,:]
bad_fil = True
while bad_fil == True:
#Neighborhood information of all pixels
nb_all = rank.pop(self.img_skeleton,disk_1,mask=self.img_skeleton)
#Find only pixels that are connected to 3 neighboring pixels on the faces
nb_3 = np.nonzero(nb_all*self.img_skeleton==4)
if len(nb_3[0]) > 0:
bad_1 = [nb_3[0][0],nb_3[1][0]]
self.img_skeleton[bad_1[0]][bad_1[1]] = 0
new_tips = self.find_tips()
for i in range(self.num_tips):
new_tip = new_tips[i,:]
if np.all(new_tip != tip_s) and np.all(new_tip != tip_e):
self.img_skeleton[new_tip[0]][new_tip[1]] = 0
else:
bad_fil = False
#N point representation of the filament
def N_point(self,N=3):
points_floor = np.array([np.floor(x) for x in np.linspace(0,self.num_contour_pixels-1,N)],dtype=int)
points_ceil = np.array([np.ceil(x) for x in np.linspace(0,self.num_contour_pixels-1,N)],dtype=int)
self.coarse = 0.5*(1.0*self.contour[points_floor,:]+1.0*self.contour[points_ceil,:])
return self.coarse
#calcualte filament length
def calc_fil_length(self):
self.num_contour_pixels = len(self.contour[:,0])
dist = self.contour[1:,:] - self.contour[:-1,:]
self.fil_length = np.sum(np.sqrt(np.sum(dist**2,axis=1)))
#Calculate filament stats
def calc_fil_stats(self):
#Calculate filament length
self.calc_fil_length()
#Calculate center of mass
self.cm = np.mean(1.0*self.contour,axis=0)
#Filament midpoint
self.midpoint = self.contour[len(self.contour)/2-1]
#Determine filament area
self.fil_area = np.sum(self.img_reduced)
#Calculate the filament width
self.fil_width = 0.0
if self.fil_length > 0:
self.fil_width = 1.0*self.fil_area/self.fil_length
#Prepare coarse representation
self.N_point()
#Calculate filament properties from contour
def calc_props(self):
'''
Calculate filament properties from contour
'''
#Filament length
self.calc_fil_length()
#Calculate center of mass
self.cm = np.mean(self.contour,axis=0)
#Filament midpoint
self.midpoint = self.contour[len(self.contour)/2-1]
#Prepare coarse representation
self.N_point()
def correlation_function(self,P=3):
'''
Calculate the correlation function to determine persistence length
'''
#Tangent vectors
tan_vecs = self.contour[:-P,:] - self.contour[P:,:]
#Length vectors
len_vecs = vec_length(tan_vecs)
num_vecs = len(tan_vecs)
self.corr_len = [[num_vecs,1.0]]
for i in range(1,len(tan_vecs)):
if np.sum(len_vecs[:-i]*len_vecs[i:] == 0) == 0:
self.corr_len.append([num_vecs-i,np.mean(np.sum(tan_vecs[:-i,:]*tan_vecs[i:,:],axis=1)/(len_vecs[:-i]*len_vecs[i:]))])
#If there is invalid number in the array discard the correlation profile
if np.sum(np.isnan(self.corr_len)) > 0:
self.corr_len = []
self.corr_len = np.array(self.corr_len)
#Filament linked-list for a filament with two-tips
def make_links(self):
self.contour = []
self.num_contour_pixels = int(np.sum(self.img_skeleton))
img_c = self.img_skeleton.copy()
tip_s = self.tips[0,:]
tip_e = self.tips[1,:]
self.contour.append(tip_s)
for i in range(self.num_contour_pixels-1):
img_c[tip_s[0],tip_s[1]] = 0
if img_c[tip_s[0]+1,tip_s[1]] == 1:
new_tip_s = [tip_s[0]+1,tip_s[1]]
elif img_c[tip_s[0] ,tip_s[1]+1] == 1:
new_tip_s = [tip_s[0],tip_s[1]+1]
elif img_c[tip_s[0]+1,tip_s[1]+1] == 1:
new_tip_s = [tip_s[0]+1,tip_s[1]+1]
elif img_c[tip_s[0]-1,tip_s[1] ] == 1:
new_tip_s = [tip_s[0]-1,tip_s[1]]
elif img_c[tip_s[0] ,tip_s[1]-1] == 1:
new_tip_s = [tip_s[0] ,tip_s[1]-1]
elif img_c[tip_s[0]-1,tip_s[1]-1] == 1:
new_tip_s = [tip_s[0]-1,tip_s[1]-1]
elif img_c[tip_s[0]+1,tip_s[1]-1] == 1:
new_tip_s = [tip_s[0]+1,tip_s[1]-1]
elif img_c[tip_s[0]-1,tip_s[1]+1] == 1:
new_tip_s = [tip_s[0]-1,tip_s[1]+1]
tip_s = new_tip_s
self.contour.append(tip_s)
self.contour = np.array(self.contour)
#Correct the indices of contour in the context of full image
#Determine the offsets
offset_x = self.x_min + self.island.x_min - self.edge
offset_y = self.y_min + self.island.y_min - self.edge
self.contour = self.contour + np.array([offset_x,offset_y])
return self.contour | PypiClean |
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/forms/util.py | from __future__ import unicode_literals
from django.conf import settings
from django.utils.html import format_html, format_html_join
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
# Import ValidationError so that it can be imported from this
# module to maintain backwards compatibility.
from django.core.exceptions import ValidationError
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. It is assumed that the keys do not need to be XML-escaped.
If the passed dictionary is empty, then return an empty string.
The result is passed through 'mark_safe'.
"""
return format_html_join('', ' {0}="{1}"', sorted(attrs.items()))
@python_2_unicode_compatible
class ErrorDict(dict):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def __str__(self):
return self.as_ul()
def as_ul(self):
if not self: return ''
return format_html('<ul class="errorlist">{0}</ul>',
format_html_join('', '<li>{0}{1}</li>',
((k, force_text(v))
for k, v in self.items())
))
def as_text(self):
return '\n'.join(['* %s\n%s' % (k, '\n'.join([' * %s' % force_text(i) for i in v])) for k, v in self.items()])
@python_2_unicode_compatible
class ErrorList(list):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __str__(self):
return self.as_ul()
def as_ul(self):
if not self: return ''
return format_html('<ul class="errorlist">{0}</ul>',
format_html_join('', '<li>{0}</li>',
((force_text(e),) for e in self)
)
)
def as_text(self):
if not self: return ''
return '\n'.join(['* %s' % force_text(e) for e in self])
def __repr__(self):
return repr([force_text(e) for e in self])
# Utilities for time zone support in DateTimeField et al.
def from_current_timezone(value):
"""
When time zone support is enabled, convert naive datetimes
entered in the current time zone to aware datetimes.
"""
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
return timezone.make_aware(value, current_timezone)
except Exception:
raise ValidationError(_('%(datetime)s couldn\'t be interpreted '
'in time zone %(current_timezone)s; it '
'may be ambiguous or it may not exist.')
% {'datetime': value,
'current_timezone': current_timezone})
return value
def to_current_timezone(value):
"""
When time zone support is enabled, convert aware datetimes
to naive dateimes in the current time zone for display.
"""
if settings.USE_TZ and value is not None and timezone.is_aware(value):
current_timezone = timezone.get_current_timezone()
return timezone.make_naive(value, current_timezone)
return value | PypiClean |
/MAPIE-0.6.5-py3-none-any.whl/mapie/regression/regression.py | from __future__ import annotations
from typing import Iterable, List, Optional, Tuple, Union, cast
import numpy as np
from joblib import Parallel, delayed
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import BaseCrossValidator, ShuffleSplit
from sklearn.pipeline import Pipeline
from sklearn.utils import _safe_indexing, check_random_state
from sklearn.utils.validation import (_check_y, _num_samples, check_is_fitted,
indexable)
from mapie._compatibility import np_nanquantile
from mapie._typing import ArrayLike, NDArray
from mapie.aggregation_functions import aggregate_all, phi2D
from mapie.conformity_scores import ConformityScore
from mapie.utils import (check_alpha, check_alpha_and_n_samples,
check_conformity_score, check_cv,
check_estimator_fit_predict, check_n_features_in,
check_n_jobs, check_nan_in_aposteriori_prediction,
check_null_weight, check_verbose, fit_estimator)
class MapieRegressor(BaseEstimator, RegressorMixin):
"""
Prediction interval with out-of-fold conformity scores.
This class implements the jackknife+ strategy and its variations
for estimating prediction intervals on single-output data. The
idea is to evaluate out-of-fold conformity scores (signed residuals,
absolute residuals, residuals normalized by the predicted mean...)
on hold-out validation sets and to deduce valid confidence intervals
with strong theoretical guarantees.
Parameters
----------
estimator: Optional[RegressorMixin]
Any regressor with scikit-learn API
(i.e. with ``fit`` and ``predict`` methods).
If ``None``, estimator defaults to a ``LinearRegression`` instance.
By default ``None``.
method: str
Method to choose for prediction interval estimates.
Choose among:
- ``"naive"``, based on training set conformity scores,
- ``"base"``, based on validation sets conformity scores,
- ``"plus"``, based on validation conformity scores and
testing predictions,
- ``"minmax"``, based on validation conformity scores and
testing predictions (min/max among cross-validation clones).
By default ``"plus"``.
cv: Optional[Union[int, str, BaseCrossValidator]]
The cross-validation strategy for computing conformity scores.
It directly drives the distinction between jackknife and cv variants.
Choose among:
- ``None``, to use the default 5-fold cross-validation
- integer, to specify the number of folds.
If equal to ``-1``, equivalent to
``sklearn.model_selection.LeaveOneOut()``.
- CV splitter: any ``sklearn.model_selection.BaseCrossValidator``
Main variants are:
- ``sklearn.model_selection.LeaveOneOut`` (jackknife),
- ``sklearn.model_selection.KFold`` (cross-validation),
- ``subsample.Subsample`` object (bootstrap).
- ``"split"``, does not involve cross-validation but a division
of the data into training and calibration subsets. The splitter
used is the following: ``sklearn.model_selection.ShuffleSplit``.
- ``"prefit"``, assumes that ``estimator`` has been fitted already,
and the ``method`` parameter is ignored.
All data provided in the ``fit`` method is then used
for computing conformity scores only.
At prediction time, quantiles of these conformity scores are used
to provide a prediction interval with fixed width.
The user has to take care manually that data for model fitting and
conformity scores estimate are disjoint.
By default ``None``.
test_size: Optional[Union[int, float]]
If ``float``, should be between ``0.0`` and ``1.0`` and represent the
proportion of the dataset to include in the test split. If ``int``,
represents the absolute number of test samples. If ``None``,
it will be set to ``0.1``.
If cv is not ``"split"``, ``test_size`` is ignored.
By default ``None``.
n_jobs: Optional[int]
Number of jobs for parallel processing using joblib
via the "locky" backend.
If ``-1`` all CPUs are used.
If ``1`` is given, no parallel computing code is used at all,
which is useful for debugging.
For ``n_jobs`` below ``-1``, ``(n_cpus + 1 - n_jobs)`` are used.
``None`` is a marker for `unset` that will be interpreted as
``n_jobs=1`` (sequential execution).
By default ``None``.
agg_function: Optional[str]
Determines how to aggregate predictions from perturbed models, both at
training and prediction time.
If ``None``, it is ignored except if ``cv`` class is ``Subsample``,
in which case an error is raised.
If ``"mean"`` or ``"median"``, returns the mean or median of the
predictions computed from the out-of-folds models.
Note: if you plan to set the ``ensemble`` argument to ``True`` in the
``predict`` method, you have to specify an aggregation function.
Otherwise an error would be raised.
The Jackknife+ interval can be interpreted as an interval around the
median prediction, and is guaranteed to lie inside the interval,
unlike the single estimator predictions.
When the cross-validation strategy is ``Subsample`` (i.e. for the
Jackknife+-after-Bootstrap method), this function is also used to
aggregate the training set in-sample predictions.
If ``cv`` is ``"prefit"`` or ``"split"``, ``agg_function`` is ignored.
By default ``"mean"``.
verbose: int
The verbosity level, used with joblib for multiprocessing.
The frequency of the messages increases with the verbosity level.
If it more than ``10``, all iterations are reported.
Above ``50``, the output is sent to stdout.
By default ``0``.
conformity_score: Optional[ConformityScore]
ConformityScore instance.
It defines the link between the observed values, the predicted ones
and the conformity scores. For instance, the default ``None`` value
correspondonds to a conformity score which assumes
y_obs = y_pred + conformity_score.
- ``None``, to use the default ``AbsoluteConformityScore`` conformity
score
- ConformityScore: any ``ConformityScore`` class
By default ``None``.
random_state: Optional[Union[int, RandomState]]
Pseudo random number generator state used for random sampling.
Pass an int for reproducible output across multiple function calls.
By default ``None``.
Attributes
----------
valid_methods_: List[str]
List of all valid methods.
single_estimator_: sklearn.RegressorMixin
Estimator fitted on the whole training set.
estimators_: list
List of out-of-folds estimators.
conformity_scores_: ArrayLike of shape (n_samples_train,)
Conformity scores between ``y_train`` and ``y_pred``.
k_: ArrayLike
- Array of nans, of shape (len(y), 1) if ``cv`` is ``"prefit"``
(defined but not used)
- Dummy array of folds containing each training sample, otherwise.
Of shape (n_samples_train, cv.get_n_splits(X_train, y_train)).
n_features_in_: int
Number of features passed to the ``fit`` method.
References
----------
Rina Foygel Barber, Emmanuel J. Candès,
Aaditya Ramdas, and Ryan J. Tibshirani.
"Predictive inference with the jackknife+."
Ann. Statist., 49(1):486-507, February 2021.
Byol Kim, Chen Xu, and Rina Foygel Barber.
"Predictive Inference Is Free with the Jackknife+-after-Bootstrap."
34th Conference on Neural Information Processing Systems (NeurIPS 2020).
Examples
--------
>>> import numpy as np
>>> from mapie.regression import MapieRegressor
>>> from sklearn.linear_model import LinearRegression
>>> X_toy = np.array([[0], [1], [2], [3], [4], [5]])
>>> y_toy = np.array([5, 7.5, 9.5, 10.5, 12.5, 15])
>>> clf = LinearRegression().fit(X_toy, y_toy)
>>> mapie_reg = MapieRegressor(estimator=clf, cv="prefit")
>>> mapie_reg = mapie_reg.fit(X_toy, y_toy)
>>> y_pred, y_pis = mapie_reg.predict(X_toy, alpha=0.5)
>>> print(y_pis[:, :, 0])
[[ 4.95714286 5.61428571]
[ 6.84285714 7.5 ]
[ 8.72857143 9.38571429]
[10.61428571 11.27142857]
[12.5 13.15714286]
[14.38571429 15.04285714]]
>>> print(y_pred)
[ 5.28571429 7.17142857 9.05714286 10.94285714 12.82857143 14.71428571]
"""
cv_need_agg_function_ = ["Subsample"]
no_agg_cv_ = ["prefit", "split"]
valid_methods_ = ["naive", "base", "plus", "minmax"]
no_agg_methods_ = ["naive", "base"]
valid_agg_functions_ = [None, "median", "mean"]
ensemble_agg_functions_ = ["median", "mean"]
fit_attributes = [
"single_estimator_",
"estimators_",
"k_",
"conformity_scores_",
"conformity_score_function_",
"n_features_in_",
]
def __init__(
self,
estimator: Optional[RegressorMixin] = None,
method: str = "plus",
cv: Optional[Union[int, str, BaseCrossValidator]] = None,
test_size: Optional[Union[int, float]] = None,
n_jobs: Optional[int] = None,
agg_function: Optional[str] = "mean",
verbose: int = 0,
conformity_score: Optional[ConformityScore] = None,
random_state: Optional[Union[int, np.random.RandomState]] = None,
) -> None:
self.estimator = estimator
self.method = method
self.cv = cv
self.test_size = test_size
self.n_jobs = n_jobs
self.agg_function = agg_function
self.verbose = verbose
self.conformity_score = conformity_score
self.random_state = random_state
def _check_parameters(self) -> None:
"""
Perform several checks on input parameters.
Raises
------
ValueError
If parameters are not valid.
"""
self._check_method(self.method)
check_n_jobs(self.n_jobs)
check_verbose(self.verbose)
check_random_state(self.random_state)
def _check_method(
self, method: str
) -> str:
"""
Check if ``method`` is correct.
Parameters
----------
method: str
Method's name to check.
Returns
-------
str
``method`` itself.
Raises
------
ValueError
If ``method`` is not in ``self.valid_methods_``.
"""
if method not in self.valid_methods_:
raise ValueError(
f"Invalid method. Allowed values are {self.valid_methods_}."
)
else:
return method
def _check_agg_function(
self, agg_function: Optional[str] = None
) -> Optional[str]:
"""
Check if ``agg_function`` is correct, and consistent with other
arguments.
Parameters
----------
agg_function: Optional[str]
Aggregation function's name to check, by default ``None``.
Returns
-------
str
``agg_function`` itself or ``"mean"``.
Raises
------
ValueError
If ``agg_function`` is not in [``None``, ``"mean"``, ``"median"``],
or is ``None`` while cv class is in ``cv_need_agg_function_``.
"""
if agg_function not in self.valid_agg_functions_:
raise ValueError(
"Invalid aggregation function "
f"Allowed values are '{self.valid_agg_functions_}'."
)
elif (agg_function is None) and (
type(self.cv).__name__ in self.cv_need_agg_function_
):
raise ValueError(
"You need to specify an aggregation function when "
f"cv's type is in {self.cv_need_agg_function_}."
)
elif (agg_function is not None) or (self.cv in self.no_agg_cv_):
return agg_function
else:
return "mean"
def _check_estimator(
self, estimator: Optional[RegressorMixin] = None
) -> RegressorMixin:
"""
Check if estimator is ``None``,
and returns a ``LinearRegression`` instance if necessary.
If the ``cv`` attribute is ``"prefit"``,
check if estimator is indeed already fitted.
Parameters
----------
estimator: Optional[RegressorMixin]
Estimator to check, by default ``None``.
Returns
-------
RegressorMixin
The estimator itself or a default ``LinearRegression`` instance.
Raises
------
ValueError
If the estimator is not ``None``
and has no ``fit`` nor ``predict`` methods.
NotFittedError
If the estimator is not fitted
and ``cv`` attribute is ``"prefit"``.
"""
if estimator is None:
return LinearRegression()
else:
check_estimator_fit_predict(estimator)
if self.cv == "prefit":
if isinstance(estimator, Pipeline):
check_is_fitted(estimator[-1])
else:
check_is_fitted(estimator)
return estimator
def _check_ensemble(
self, ensemble: bool,
) -> None:
"""
Check if ``ensemble`` is ``False`` and if ``self.agg_function``
is ``None``. Else raise error.
Parameters
----------
ensemble: bool
``ensemble`` argument to check the coherennce with
``self.agg_function``.
Raises
------
ValueError
If ``ensemble`` is ``True`` and ``self.agg_function`` is ``None``.
"""
if ensemble and (self.agg_function is None):
raise ValueError(
"If ensemble is True, the aggregation function has to be "
f"in '{self.ensemble_agg_functions_}'."
)
def _fit_and_predict_oof_model(
self,
estimator: RegressorMixin,
X: ArrayLike,
y: ArrayLike,
train_index: ArrayLike,
val_index: ArrayLike,
sample_weight: Optional[ArrayLike] = None,
) -> Tuple[RegressorMixin, NDArray, ArrayLike]:
"""
Fit a single out-of-fold model on a given training set and
perform predictions on a test set.
Parameters
----------
estimator: RegressorMixin
Estimator to train.
X: ArrayLike of shape (n_samples, n_features)
Input data.
y: ArrayLike of shape (n_samples,)
Input labels.
train_index: ArrayLike of shape (n_samples_train)
Training data indices.
val_index: ArrayLike of shape (n_samples_val)
Validation data indices.
sample_weight: Optional[ArrayLike] of shape (n_samples,)
Sample weights. If ``None``, then samples are equally weighted.
By default ``None``.
Returns
-------
Tuple[RegressorMixin, NDArray, ArrayLike]
- [0]: RegressorMixin, fitted estimator
- [1]: NDArray of shape (n_samples_val,),
estimator predictions on the validation fold.
- [2]: ArrayLike of shape (n_samples_val,),
validation data indices.
"""
X_train = _safe_indexing(X, train_index)
y_train = _safe_indexing(y, train_index)
X_val = _safe_indexing(X, val_index)
if sample_weight is None:
estimator = fit_estimator(estimator, X_train, y_train)
else:
sample_weight_train = _safe_indexing(sample_weight, train_index)
estimator = fit_estimator(
estimator, X_train, y_train, sample_weight_train
)
if _num_samples(X_val) > 0:
y_pred = estimator.predict(X_val)
else:
y_pred = np.array([])
return estimator, y_pred, val_index
def _aggregate_with_mask(
self,
x: NDArray,
k: NDArray
) -> NDArray:
"""
Take the array of predictions, made by the refitted estimators,
on the testing set, and the 1-or-nan array indicating for each training
sample which one to integrate, and aggregate to produce phi-{t}(x_t)
for each training sample x_t.
Parameters:
-----------
x: ArrayLike of shape (n_samples_test, n_estimators)
Array of predictions, made by the refitted estimators,
for each sample of the testing set.
k: ArrayLike of shape (n_samples_training, n_estimators)
1-or-nan array: indicates whether to integrate the prediction
of a given estimator into the aggregation, for each training
sample.
Returns:
--------
ArrayLike of shape (n_samples_test,)
Array of aggregated predictions for each testing sample.
"""
if self.method in self.no_agg_methods_ \
or self.cv in self.no_agg_cv_:
raise ValueError(
"There should not be aggregation of predictions "
f"if cv is in '{self.no_agg_cv_}' "
f"or if method is in '{self.no_agg_methods_}'."
)
elif self.agg_function == "median":
return phi2D(A=x, B=k, fun=lambda x: np.nanmedian(x, axis=1))
# To aggregate with mean() the aggregation coud be done
# with phi2D(A=x, B=k, fun=lambda x: np.nanmean(x, axis=1).
# However, phi2D contains a np.apply_along_axis loop which
# is much slower than the matrices multiplication that can
# be used to compute the means.
elif self.agg_function in ["mean", None]:
K = np.nan_to_num(k, nan=0.0)
return np.matmul(x, (K / (K.sum(axis=1, keepdims=True))).T)
else:
raise ValueError("The value of self.agg_function is not correct")
def _pred_multi(
self,
X: ArrayLike
) -> NDArray:
"""
Return a prediction per train sample for each test sample, by
aggregation with matrix ``k_``.
Parameters
----------
X: NDArray of shape (n_samples_test, n_features)
Input data
Returns
-------
NDArray of shape (n_samples_test, n_samples_train)
"""
y_pred_multi = np.column_stack(
[e.predict(X) for e in self.estimators_]
)
# At this point, y_pred_multi is of shape
# (n_samples_test, n_estimators_). The method
# ``_aggregate_with_mask`` fits it to the right size
# thanks to the shape of k_.
y_pred_multi = self._aggregate_with_mask(y_pred_multi, self.k_)
return y_pred_multi
def fit(
self,
X: ArrayLike,
y: ArrayLike,
sample_weight: Optional[ArrayLike] = None,
) -> MapieRegressor:
"""
Fit estimator and compute conformity scores used for
prediction intervals.
Fit the base estimator under the ``single_estimator_`` attribute.
Fit all cross-validated estimator clones
and rearrange them into a list, the ``estimators_`` attribute.
Out-of-fold conformity scores are stored under
the ``conformity_scores_`` attribute.
Parameters
----------
X: ArrayLike of shape (n_samples, n_features)
Training data.
y: ArrayLike of shape (n_samples,)
Training labels.
sample_weight: Optional[ArrayLike] of shape (n_samples,)
Sample weights for fitting the out-of-fold models.
If ``None``, then samples are equally weighted.
If some weights are null,
their corresponding observations are removed
before the fitting process and hence have no conformity scores.
If weights are non-uniform,
conformity scores are still uniformly weighted.
By default ``None``.
Returns
-------
MapieRegressor
The model itself.
"""
# Checks
self._check_parameters()
cv = check_cv(
self.cv, test_size=self.test_size, random_state=self.random_state
)
estimator = self._check_estimator(self.estimator)
agg_function = self._check_agg_function(self.agg_function)
X, y = indexable(X, y)
y = _check_y(y)
sample_weight = cast(Optional[NDArray], sample_weight)
self.n_features_in_ = check_n_features_in(X, cv, estimator)
sample_weight, X, y = check_null_weight(sample_weight, X, y)
self.conformity_score_function_ = check_conformity_score(
self.conformity_score
)
y = cast(NDArray, y)
n_samples = _num_samples(y)
# Initialization
self.estimators_: List[RegressorMixin] = []
# Work
if cv == "prefit":
self.single_estimator_ = estimator
y_pred = self.single_estimator_.predict(X)
self.k_ = np.full(
shape=(n_samples, 1), fill_value=np.nan, dtype=float
)
else:
cv = cast(BaseCrossValidator, cv)
self.k_ = np.full(
shape=(n_samples, cv.get_n_splits(X, y)),
fill_value=np.nan,
dtype=float,
)
self.single_estimator_ = fit_estimator(
clone(estimator), X, y, sample_weight
)
if self.method == "naive":
y_pred = self.single_estimator_.predict(X)
else:
outputs = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(self._fit_and_predict_oof_model)(
clone(estimator),
X,
y,
train_index,
val_index,
sample_weight,
)
for train_index, val_index in cv.split(X)
)
self.estimators_, predictions, val_indices = map(
list, zip(*outputs)
)
pred_matrix = np.full(
shape=(n_samples, cv.get_n_splits(X, y)),
fill_value=np.nan,
dtype=float,
)
for i, val_ind in enumerate(val_indices):
pred_matrix[val_ind, i] = np.array(
predictions[i], dtype=float
)
self.k_[val_ind, i] = 1
check_nan_in_aposteriori_prediction(pred_matrix)
y_pred = aggregate_all(agg_function, pred_matrix)
self.conformity_scores_ = (
self.conformity_score_function_.get_conformity_scores(y, y_pred)
)
if isinstance(cv, ShuffleSplit):
self.single_estimator_ = self.estimators_[0]
return self
def predict(
self,
X: ArrayLike,
ensemble: bool = False,
alpha: Optional[Union[float, Iterable[float]]] = None,
) -> Union[NDArray, Tuple[NDArray, NDArray]]:
"""
Predict target on new samples with confidence intervals.
Conformity scores from the training set and predictions
from the model clones are central to the computation.
Prediction Intervals for a given ``alpha`` are deduced from either
- quantiles of conformity scores (``naive`` and ``base`` methods),
- quantiles of (predictions +/- conformity scores) (``plus`` method),
- quantiles of (max/min(predictions) +/- conformity scores)
(``minmax`` method).
Parameters
----------
X: ArrayLike of shape (n_samples, n_features)
Test data.
ensemble: bool
Boolean determining whether the predictions are ensembled or not.
If ``False``, predictions are those of the model trained on the
whole training set.
If ``True``, predictions from perturbed models are aggregated by
the aggregation function specified in the ``agg_function``
attribute.
If ``cv`` is ``"prefit"`` or ``"split"``, ``ensemble`` is ignored.
By default ``False``.
alpha: Optional[Union[float, Iterable[float]]]
Can be a float, a list of floats, or a ``ArrayLike`` of floats.
Between ``0`` and ``1``, represents the uncertainty of the
confidence interval.
Lower ``alpha`` produce larger (more conservative) prediction
intervals.
``alpha`` is the complement of the target coverage level.
By default ``None``.
Returns
-------
Union[NDArray, Tuple[NDArray, NDArray]]
- NDArray of shape (n_samples,) if ``alpha`` is ``None``.
- Tuple[NDArray, NDArray] of shapes (n_samples,) and
(n_samples, 2, n_alpha) if ``alpha`` is not ``None``.
- [:, 0, :]: Lower bound of the prediction interval.
- [:, 1, :]: Upper bound of the prediction interval.
"""
# Checks
check_is_fitted(self, self.fit_attributes)
self._check_ensemble(ensemble)
alpha = cast(Optional[NDArray], check_alpha(alpha))
y_pred = self.single_estimator_.predict(X)
n = len(self.conformity_scores_)
if alpha is None:
return np.array(y_pred)
alpha_np = cast(NDArray, alpha)
check_alpha_and_n_samples(alpha_np, n)
if self.method in self.no_agg_methods_ \
or self.cv in self.no_agg_cv_:
y_pred_multi_low = y_pred[:, np.newaxis]
y_pred_multi_up = y_pred[:, np.newaxis]
else:
y_pred_multi = self._pred_multi(X)
if self.method == "minmax":
y_pred_multi_low = np.min(y_pred_multi, axis=1, keepdims=True)
y_pred_multi_up = np.max(y_pred_multi, axis=1, keepdims=True)
else:
y_pred_multi_low = y_pred_multi
y_pred_multi_up = y_pred_multi
if ensemble:
y_pred = aggregate_all(self.agg_function, y_pred_multi)
# compute distributions of lower and upper bounds
if self.conformity_score_function_.sym:
conformity_scores_low = -self.conformity_scores_
conformity_scores_up = self.conformity_scores_
else:
conformity_scores_low = self.conformity_scores_
conformity_scores_up = self.conformity_scores_
alpha_np = alpha_np / 2
lower_bounds = (
self.conformity_score_function_.get_estimation_distribution(
y_pred_multi_low, conformity_scores_low
)
)
upper_bounds = (
self.conformity_score_function_.get_estimation_distribution(
y_pred_multi_up, conformity_scores_up
)
)
# get desired confidence intervals according to alpha
y_pred_low = np.column_stack(
[
np_nanquantile(
lower_bounds.astype(float),
_alpha,
axis=1,
method="lower",
)
for _alpha in alpha_np
]
)
y_pred_up = np.column_stack(
[
np_nanquantile(
upper_bounds.astype(float),
1 - _alpha,
axis=1,
method="higher",
)
for _alpha in alpha_np
]
)
return y_pred, np.stack([y_pred_low, y_pred_up], axis=1) | PypiClean |
/Henson-2.2.0.tar.gz/Henson-2.2.0/README.rst | #####################
Henson |build status|
#####################
.. |build status| image:: https://travis-ci.org/iheartradio/Henson.svg?branch=master
:target: https://travis-ci.org/iheartradio/Henson
.. image:: docs/_static/logo.png
:width: 400
:height: 400
:align: center
A framework for running a Python service driven by a consumer.
* `Documentation <https://henson.readthedocs.io>`_
* `Installation <https://henson.readthedocs.io/en/latest/#installation>`_
* `Changelog <https://henson.readthedocs.io/en/latest/changes.html>`_
* `Source <https://github.com/iheartradio/Henson>`_
| PypiClean |
/Office365-REST-Python-Client-2.4.3.tar.gz/Office365-REST-Python-Client-2.4.3/office365/sharepoint/sites/sph_site.py | from office365.runtime.client_result import ClientResult
from office365.runtime.queries.service_operation import ServiceOperationQuery
from office365.runtime.paths.resource_path import ResourcePath
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.sites.home_site_reference import SPHSiteReference
class SPHSite(BaseEntity):
def __init__(self, context, resource_path=None):
"""
A home site represents a SharePoint communication site.
It brings together news, events, embedded video and conversations, and other resources to deliver an engaging
experience that reflects your organization's voice, priorities, and brand.
It also allows your users to search for content (such as sites, news, and files) across your organization
"""
if resource_path is None:
resource_path = ResourcePath("SP.SPHSite")
super(SPHSite, self).__init__(context, resource_path)
def details(self):
return_type = ClientResult(self.context, SPHSiteReference())
qry = ServiceOperationQuery(self, "Details", None, None, None, return_type)
self.context.add_query(qry)
return return_type
@staticmethod
def is_comm_site(context, site_url, return_value=None):
"""
Determines whether a site is a communication site
:param office365.sharepoint.client_context.ClientContext context:
:param str site_url: URL of the site to return status for
:param ClientResult return_value:
"""
if return_value is None:
return_value = ClientResult(context)
params = {"siteUrl": site_url}
qry = ServiceOperationQuery(SPHSite(context), "IsCommSite", params, None, None, return_value, True)
context.add_query(qry)
return return_value
@staticmethod
def is_modern_site_with_horizontal_nav(context, site_url, return_type=None):
"""
Determines whether a site is a modern site with horizontal navigation
:param office365.sharepoint.client_context.ClientContext context:
:param str site_url: URL of the site to return status for
:param ClientResult return_type: Return value
"""
if return_type is None:
return_type = ClientResult(context)
params = {"siteUrl": site_url}
qry = ServiceOperationQuery(SPHSite(context), "IsModernSiteWithHorizontalNav", params, None, None,
return_type, True)
context.add_query(qry)
return return_type
@staticmethod
def is_valid_home_site(context, site_url, return_value=None):
"""
Determines whether a site is landing site for your intranet.
:param office365.sharepoint.client_context.ClientContext context:
:param str site_url: URL of the site to return status for
:param ClientResult return_value:
"""
if return_value is None:
return_value = ClientResult(context)
sph = SPHSite(context)
params = {"siteUrl": site_url}
qry = ServiceOperationQuery(sph, "IsValidHomeSite", params, None, None, return_value)
qry.static = True
context.add_query(qry)
return return_value
@staticmethod
def validate_home_site(context, site_url, validation_action_type):
"""
:param office365.sharepoint.client_context.ClientContext context:
:param str site_url: URL of the site to return status for
:param int validation_action_type:
"""
sph = SPHSite(context)
params = {"siteUrl": site_url, "validationActionType": validation_action_type}
qry = ServiceOperationQuery(sph, "ValidateHomeSite", params, None, None, None, True)
context.add_query(qry)
return sph
@staticmethod
def set_as_home_site(context, site_url, viva_connections_default_start=None, return_value=None):
"""
Sets a site as a landing site for your intranet.
:param ClientResult return_value:
:param office365.sharepoint.client_context.ClientContext context:
:param str site_url:
:param bool viva_connections_default_start:
"""
if return_value is None:
return_value = ClientResult(context)
sph = SPHSite(context)
params = {"siteUrl": site_url, "vivaConnectionsDefaultStart": viva_connections_default_start}
qry = ServiceOperationQuery(sph, "SetSPHSite", None, params, None, return_value)
context.add_query(qry)
return return_value | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/code_generation/YieldCodes.py | from .CodeHelpers import (
generateChildExpressionsCode,
withObjectCodeTemporaryAssignment,
)
from .ErrorCodes import getErrorExitCode
from .PythonAPICodes import getReferenceExportCode
from .VariableDeclarations import VariableDeclaration
def _getYieldPreserveCode(
to_name, value_name, preserve_exception, yield_code, resume_code, emit, context
):
yield_return_label = context.allocateLabel("yield_return")
yield_return_index = yield_return_label.split("_")[-1]
locals_preserved = context.variable_storage.getLocalPreservationDeclarations()
# Need not preserve it, if we are not going to use it for the purpose
# of releasing it.
if type(value_name) is tuple:
value_names = value_name
else:
value_names = (value_name,)
for name in value_names:
if not context.needsCleanup(name):
locals_preserved.remove(name)
# Target name is not assigned, no need to preserve it.
if to_name in locals_preserved:
locals_preserved.remove(to_name)
if locals_preserved:
yield_tmp_storage = context.variable_storage.getVariableDeclarationTop(
"yield_tmps"
)
if yield_tmp_storage is None:
yield_tmp_storage = context.variable_storage.addVariableDeclarationTop(
"char[1024]", "yield_tmps", None
)
emit(
"Nuitka_PreserveHeap(%s, %s, NULL);"
% (
yield_tmp_storage,
", ".join(
"&%s, sizeof(%s)" % (local_preserved, local_preserved.c_type)
for local_preserved in locals_preserved
),
)
)
if preserve_exception:
emit(
"SAVE_%s_EXCEPTION(%s);"
% (context.getContextObjectName().upper(), context.getContextObjectName())
)
emit(
"""\
%(context_object_name)s->m_yield_return_index = %(yield_return_index)s;"""
% {
"context_object_name": context.getContextObjectName(),
"yield_return_index": yield_return_index,
}
)
emit(yield_code)
emit("%(yield_return_label)s:" % {"yield_return_label": yield_return_label})
if preserve_exception:
emit(
"RESTORE_%s_EXCEPTION(%s);"
% (context.getContextObjectName().upper(), context.getContextObjectName())
)
if locals_preserved:
emit(
"Nuitka_RestoreHeap(%s, %s, NULL);"
% (
yield_tmp_storage,
", ".join(
"&%s, sizeof(%s)" % (local_preserved, local_preserved.c_type)
for local_preserved in locals_preserved
),
)
)
if resume_code:
emit(resume_code)
yield_return_name = VariableDeclaration(
"PyObject *", "yield_return_value", None, None
)
getErrorExitCode(check_name=yield_return_name, emit=emit, context=context)
# Called with object
emit("%s = %s;" % (to_name, yield_return_name))
def generateYieldCode(to_name, expression, emit, context):
(value_name,) = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
# In handlers, we must preserve/restore the exception.
preserve_exception = expression.isExceptionPreserving()
getReferenceExportCode(value_name, emit, context)
if context.needsCleanup(value_name):
context.removeCleanupTempName(value_name)
yield_code = "return %(yielded_value)s;" % {"yielded_value": value_name}
with withObjectCodeTemporaryAssignment(
to_name, "yield_result", expression, emit, context
) as result_name:
_getYieldPreserveCode(
to_name=result_name,
value_name=value_name,
yield_code=yield_code,
resume_code=None,
preserve_exception=preserve_exception,
emit=emit,
context=context,
)
# This conversion will not use it, and since it is borrowed, debug mode
# would otherwise complain.
if to_name.c_type == "nuitka_void":
result_name.maybe_unused = True
# Comes as only borrowed.
# context.addCleanupTempName(result_name)
def generateYieldFromCode(to_name, expression, emit, context):
(value_name,) = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
# In handlers, we must preserve/restore the exception.
preserve_exception = expression.isExceptionPreserving()
getReferenceExportCode(value_name, emit, context)
if context.needsCleanup(value_name):
context.removeCleanupTempName(value_name)
yield_code = """\
generator->m_yieldfrom = %(yield_from)s;
return NULL;
""" % {
"yield_from": value_name
}
with withObjectCodeTemporaryAssignment(
to_name, "yieldfrom_result", expression, emit, context
) as result_name:
_getYieldPreserveCode(
to_name=result_name,
value_name=value_name,
yield_code=yield_code,
resume_code=None,
preserve_exception=preserve_exception,
emit=emit,
context=context,
)
context.addCleanupTempName(result_name)
def generateYieldFromWaitableCode(to_name, expression, emit, context):
# In handlers, we must preserve/restore the exception.
preserve_exception = expression.isExceptionPreserving()
(awaited_name,) = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
yield_code = """\
%(object_name)s->m_yieldfrom = %(yield_from)s;
%(object_name)s->m_awaiting = true;
return NULL;
""" % {
"object_name": context.getContextObjectName(),
"yield_from": awaited_name,
}
resume_code = """\
%(object_name)s->m_awaiting = false;
""" % {
"object_name": context.getContextObjectName()
}
getReferenceExportCode(awaited_name, emit, context)
if context.needsCleanup(awaited_name):
context.removeCleanupTempName(awaited_name)
with withObjectCodeTemporaryAssignment(
to_name, "await_result", expression, emit, context
) as result_name:
_getYieldPreserveCode(
to_name=result_name,
value_name=awaited_name,
yield_code=yield_code,
resume_code=resume_code,
preserve_exception=preserve_exception,
emit=emit,
context=context,
)
# TODO: Seems to be redundant with and _getYieldPreserveCode doing
# it and could be removed
getErrorExitCode(check_name=result_name, emit=emit, context=context)
context.addCleanupTempName(result_name)
def getYieldReturnDispatchCode(context):
function_dispatch = [
"case %(index)d: goto yield_return_%(index)d;" % {"index": yield_index}
for yield_index in range(context.getLabelCount("yield_return"), 0, -1)
]
if function_dispatch:
function_dispatch.insert(
0, "switch(%s->m_yield_return_index) {" % context.getContextObjectName()
)
function_dispatch.append("}")
return function_dispatch | PypiClean |
/Adytum-PyMonitor-1.0.5.tar.bz2/Adytum-PyMonitor-1.0.5/lib/net/rwhois.py | _version = "1.1"
import os, sys, string, time, getopt, socket, select, re, errno, copy, signal
timeout=5
class WhoisRecord:
defaultserver='whois.networksolutions.com'
whoismap={ 'com' : 'whois.internic.net' , \
'org' : 'whois.internic.net' , \
'net' : 'whois.internic.net' , \
'edu' : 'whois.networksolutions.com' , \
'de' : 'whois.denic.de' , \
'gov' : 'whois.nic.gov' , \
# See http://www.nic.gov/cgi-bin/whois
'mil' : 'whois.nic.mil' , \
# See http://www.nic.mil/cgi-bin/whois
'ca' : 'whois.cdnnet.ca' , \
'uk' : 'whois.nic.uk' , \
'au' : 'whois.aunic.net' , \
'hu' : 'whois.nic.hu' , \
# All the following are unverified/checked.
'be' : 'whois.ripe.net',
'it' : 'whois.ripe.net' , \
# also whois.nic.it
'at' : 'whois.ripe.net' , \
# also www.nic.at, whois.aco.net
'dk' : 'whois.ripe.net' , \
'fo' : 'whois.ripe.net' , \
'lt' : 'whois.ripe.net' , \
'no' : 'whois.ripe.net' , \
'sj' : 'whois.ripe.net' , \
'sk' : 'whois.ripe.net' , \
'tr' : 'whois.ripe.net' , \
# also whois.metu.edu.tr
'il' : 'whois.ripe.net' , \
'bv' : 'whois.ripe.net' , \
'se' : 'whois.nic-se.se' , \
'br' : 'whois.nic.br' , \
# a.k.a. whois.fapesp.br?
'fr' : 'whois.nic.fr' , \
'sg' : 'whois.nic.net.sg' , \
'hm' : 'whois.registry.hm' , \
# see also whois.nic.hm
'nz' : 'domainz.waikato.ac.nz' , \
'nl' : 'whois.domain-registry.nl' , \
# RIPE also handles other countries
# See http://www.ripe.net/info/ncc/rir-areas.html
'ru' : 'whois.ripn.net' , \
'ch' : 'whois.nic.ch' , \
# see http://www.nic.ch/whois_readme.html
'jp' : 'whois.nic.ad.jp' , \
# (use DOM foo.jp/e for english; need to lookup !handles separately)
'to' : 'whois.tonic.to' , \
'nu' : 'whois.nic.nu' , \
'fm' : 'www.dot.fm' , \
# http request http://www.dot.fm/search.html
'am' : 'whois.nic.am' , \
'nu' : 'www.nunames.nu' , \
# http request
# e.g. http://www.nunames.nu/cgi-bin/drill.cfm?domainname=nunames.nu
#'cx' : 'whois.nic.cx' , \ # no response from this server
'af' : 'whois.nic.af' , \
'as' : 'whois.nic.as' , \
'li' : 'whois.nic.li' , \
'lk' : 'whois.nic.lk' , \
'mx' : 'whois.nic.mx' , \
'pw' : 'whois.nic.pw' , \
'sh' : 'whois.nic.sh' , \
# consistently resets connection
'tj' : 'whois.nic.tj' , \
'tm' : 'whois.nic.tm' , \
'pt' : 'whois.dns.pt' , \
'kr' : 'whois.nic.or.kr' , \
# see also whois.krnic.net
'kz' : 'whois.nic.or.kr' , \
# see also whois.krnic.net
'al' : 'whois.ripe.net' , \
'az' : 'whois.ripe.net' , \
'ba' : 'whois.ripe.net' , \
'bg' : 'whois.ripe.net' , \
'by' : 'whois.ripe.net' , \
'cy' : 'whois.ripe.net' , \
'cz' : 'whois.ripe.net' , \
'dz' : 'whois.ripe.net' , \
'ee' : 'whois.ripe.net' , \
'eg' : 'whois.ripe.net' , \
'es' : 'whois.ripe.net' , \
'fi' : 'whois.ripe.net' , \
'gr' : 'whois.ripe.net' , \
'hr' : 'whois.ripe.net' , \
'lu' : 'whois.ripe.net' , \
'lv' : 'whois.ripe.net' , \
'ma' : 'whois.ripe.net' , \
'md' : 'whois.ripe.net' , \
'mk' : 'whois.ripe.net' , \
'mt' : 'whois.ripe.net' , \
'pl' : 'whois.ripe.net' , \
'ro' : 'whois.ripe.net' , \
'si' : 'whois.ripe.net' , \
'sm' : 'whois.ripe.net' , \
'su' : 'whois.ripe.net' , \
'tn' : 'whois.ripe.net' , \
'ua' : 'whois.ripe.net' , \
'va' : 'whois.ripe.net' , \
'yu' : 'whois.ripe.net' , \
# unchecked
'ac' : 'whois.nic.ac' , \
'cc' : 'whois.nic.cc' , \
#'cn' : 'whois.cnnic.cn' , \ # connection refused
'gs' : 'whois.adamsnames.tc' , \
'hk' : 'whois.apnic.net' , \
#'ie' : 'whois.ucd.ie' , \ # connection refused
#'is' : 'whois.isnet.is' , \# connection refused
#'mm' : 'whois.nic.mm' , \ # connection refused
'ms' : 'whois.adamsnames.tc' , \
'my' : 'whois.mynic.net' , \
#'pe' : 'whois.rcp.net.pe' , \ # connection refused
'st' : 'whois.nic.st' , \
'tc' : 'whois.adamsnames.tc' , \
'tf' : 'whois.adamsnames.tc' , \
'th' : 'whois.thnic.net' , \
'tw' : 'whois.twnic.net' , \
'us' : 'whois.isi.edu' , \
'vg' : 'whois.adamsnames.tc' , \
#'za' : 'whois.co.za' # connection refused
}
def __init__(self,domain=None):
self.domain=domain
self.whoisserver=None
self.page=None
return
def whois(self,domain=None, server=None, cache=0):
if domain is not None:
self.domain=domain
pass
if server is not None:
self.whoisserver=server
pass
if self.domain is None:
print "No Domain"
raise "No Domain"
if self.whoisserver is None:
self.chooseserver()
if self.whoisserver is None:
print "No Server"
raise "No Server"
if cache:
fn = "%s.dom" % domainname
if os.path.exists(fn):
return open(fn).read()
pass
self.page=self._whois()
if cache:
open(fn, "w").write(page)
pass
return
def chooseserver(self):
try:
(secondlevel,toplevel)=string.split(self.domain,'.')
self.whoisserver=WhoisRecord.whoismap.get(toplevel)
if self.whoisserver==None:
self.whoisserver=WhoisRecord.defaultserver
return
pass
except:
self.whoisserver=WhoisRecord.defaultserver
return
if(toplevel=='com' or toplevel=='org' or toplevel=='net'):
tmp=self._whois()
m=re.search("Whois Server:(.+)",tmp)
if m:
self.whoisserver=string.strip(m.group(1))
return
self.whoisserver='whois.networksolutions.com'
tmp=self._whois()
m=re.search("Whois Server:(.+)",tmp)
if m:
self.whoisserver=string.strip(m.group(1))
return
pass
return
def _whois(self):
def alrmhandler(signum,frame):
raise "TimedOut", "on connect"
s = None
## try until we timeout
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setblocking(0)
signal.signal(signal.SIGALRM,alrmhandler)
signal.alarm(timeout)
while 1:
try:
s.connect(self.whoisserver, 43)
except socket.error, (ecode, reason):
if ecode==errno.EINPROGRESS:
continue
elif ecode==errno.EALREADY:
continue
else:
raise socket.error, (ecode, reason)
pass
break
signal.alarm(0)
ret = select.select ([s], [s], [], 30)
if len(ret[1])== 0 and len(ret[0]) == 0:
s.close()
raise TimedOut, "on data"
s.setblocking(1)
s.send("%s\n" % self.domain)
page = ""
while 1:
data = s.recv(8196)
if not data: break
page = page + data
pass
s.close()
if string.find(page, "No match for") != -1:
raise 'NoSuchDomain', self.domain
if string.find(page, "No entries found") != -1:
raise 'NoSuchDomain', self.domain
if string.find(page, "no domain specified") != -1:
raise 'NoSuchDomain', self.domain
if string.find(page, "NO MATCH:") != -1:
raise 'NoSuchDomain', self.domain
return page
##
## ----------------------------------------------------------------------
##
class ContactRecord:
def __init__(self):
self.type=None
self.organization=None
self.person=None
self.handle=None
self.address=None
self.email=None
self.phone=None
self.fax=None
self.lastupdated=None
return
def __str__(self):
return "Type: %s\nOrganization: %s\nPerson: %s\nHandle: %s\nAddress: %s\nEmail: %s\nPhone: %s\nFax: %s\nLastupdate: %s\n" % (self.type,self.organization,self.person,self.handle,self.address,self.email,self.phone,self.fax,self.lastupdated)
class DomainRecord(WhoisRecord):
parsemap={ 'whois.networksolutions.com' : 'ParseWhois_NetworkSolutions' , \
'whois.register.com' : 'ParseWhois_RegisterCOM' }
def __init__(self,domain=None):
WhoisRecord.__init__(self,domain)
self.domainid = None
self.created = None
self.lastupdated = None
self.expires = None
self.databaseupdated = None
self.servers = None
self.registrant = ContactRecord()
self.registrant.type='registrant'
self.contacts = {}
return
def __str__(self):
con=''
for (k,v) in self.contacts.items():
con=con + str(v) +'\n'
return "%s (%s):\nWhoisServer: %s\nCreated : %s\nLastupdated : %s\nDatabaseupdated : %s\nExpires : %s\nServers : %s\nRegistrant >>\n\n%s\nContacts >>\n\n%s\n" % (self.domain, self.domainid,self.whoisserver,self.created, self.lastupdated, self.databaseupdated, self.expires,self.servers, self.registrant, con)
def Parse(self):
self._ParseWhois()
return
def _ParseWhois(self):
parser=DomainRecord.parsemap.get(self.whoisserver)
if parser==None:
raise 'NoParser'
parser='self.'+parser+'()'
eval(parser)
return
##
## ----------------------------------------------------------------------
##
def _ParseContacts_RegisterCOM(self,page):
parts = re.split("((?:(?:Administrative|Billing|Technical|Zone) Contact,?[ ]*)+:)\n", page)
contacttypes = None
for part in parts:
if string.find(part, "Contact:") != -1:
if part[-1] == ":": part = part[:-1]
contacttypes = string.split(part, ",")
continue
part = string.strip(part)
if not part: continue
contact=ContactRecord()
m = re.search("Email: (.+@.+)", part)
if m:
contact.email=string.lower(string.strip(m.group(1)))
m = re.search("\s+Phone: (.+)", part)
if m:
contact.phone=m.group(1)
end=m.start(0)
start=0
lines = string.split(part[start:end], "\n")
lines = map(string.strip,lines)
contact.organization = lines.pop(0)
contact.person = lines.pop(0)
contact.address=string.join(lines,'\n')
for contacttype in contacttypes:
contacttype = string.lower(string.strip(contacttype))
contacttype = string.replace(contacttype, " contact", "")
contact.type=contacttype
self.contacts[contacttype] = copy.copy(contact)
pass
pass
return
def ParseWhois_RegisterCOM(self):
m = re.search("Record last updated on.*: (.+)", self.page)
if m: self.lastupdated = m.group(1)
m = re.search("Created on.*: (.+)", self.page)
if m: self.created = m.group(1)
m = re.search("Expires on.*: (.+)", self.page)
if m: self.expires = m.group(1)
m = re.search("Phone: (.+)", self.page)
if m: self.registrant.phone=m.group(1)
m = re.search("Email: (.+@.+)",self.page)
if m: self.registrant.email=m.group(1)
m = re.search("Organization:(.+?)Phone:",self.page,re.S)
if m:
start=m.start(1)
end=m.end(1)
registrant = string.strip(self.page[start:end])
registrant = string.split(registrant, "\n")
registrant = map(string.strip,registrant)
self.registrant.organization = registrant[0]
self.registrant.person =registrant[1]
self.registrant.address = string.join(registrant[2:], "\n")
pass
m = re.search("Domain servers in listed order:\n\n(.+?)\n\n", self.page, re.S)
if m:
start = m.start(1)
end = m.end(1)
servers = string.strip(self.page[start:end])
lines = string.split(servers, "\n")
self.servers = []
for line in lines:
m=re.search("(\w|\.)+?\s*(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})",string.strip(line))
if m:
self.servers.append(m.group(1), m.group(2))
pass
pass
pass
m = re.search("((?:(?:Administrative|Billing|Technical|Zone) Contact,?[ ]*)+:)\n", self.page)
if m:
i = m.start()
m = re.search("Domain servers in listed order", self.page)
j = m.start()
contacts = string.strip(self.page[i:j])
pass
self._ParseContacts_RegisterCOM(contacts)
return
def _ParseContacts_NetworkSolutions(self,page):
parts = re.split("((?:(?:Administrative|Billing|Technical|Zone) Contact,?[ ]*)+:)\n", page)
contacttypes = None
for part in parts:
if string.find(part, "Contact:") != -1:
if part[-1] == ":": part = part[:-1]
contacttypes = string.split(part, ",")
continue
part = string.strip(part)
if not part: continue
record=ContactRecord()
lines = string.split(part, "\n")
m = re.search("(.+) \((.+)\) (.+@.+)", lines.pop(0))
if m:
record.person = string.strip(m.group(1))
record.handle = string.strip(m.group(2))
record.email = string.lower(string.strip(m.group(3)))
pass
record.organization=string.strip(lines.pop(0))
flag = 0
addresslines = []
phonelines = []
phonelines.append(string.strip(lines.pop()))
for line in lines:
line = string.strip(line)
#m=re.search("^(\d|-|\+|\s)+$",line)
#if m: flag = 1
if flag == 0:
addresslines.append(line)
else:
phonelines.append(line)
pass
pass
record.phone = string.join(phonelines, "\n")
record.address = string.join(addresslines, "\n")
for contacttype in contacttypes:
contacttype = string.lower(string.strip(contacttype))
contacttype = string.replace(contacttype, " contact", "")
record.type=contacttype
self.contacts.update({contacttype:copy.copy(record)})
pass
pass
return
def ParseWhois_NetworkSolutions(self):
m = re.search("Record last updated on (.+)\.", self.page)
if m: self.lastupdated = m.group(1)
m = re.search("Record created on (.+)\.", self.page)
if m: self.created = m.group(1)
m = re.search("Database last updated on (.+)\.", self.page)
if m: self.databaseupdated = m.group(1)
m = re.search("Record expires on (.+)\.",self.page)
if m: self.expires=m.group(1)
m = re.search("Registrant:(.+?)\n\n", self.page, re.S)
if m:
start= m.start(1)
end = m.end(1)
reg = string.strip(self.page[start:end])
reg = string.split(reg, "\n")
reg = map(string.strip,reg)
self.registrant.organization = reg[0]
self.registrant.address = string.join(reg[1:],'\n')
m = re.search("(.+) \((.+)\)", self.registrant.organization)
if m:
self.domainid = m.group(2)
pass
pass
m = re.search("Domain servers in listed order:\n\n", self.page)
if m:
i = m.end()
m = re.search("\n\n", self.page[i:])
j = m.start()
servers = string.strip(self.page[i:i+j])
lines = string.split(servers, "\n")
self.servers = []
for line in lines:
m=re.search("(\w|\.)+?\s*(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})",string.strip(line))
if m:
self.servers.append(m.group(1), m.group(2))
pass
pass
pass
m = re.search("((?:(?:Administrative|Billing|Technical|Zone) Contact,?[ ]*)+:)\n", self.page)
if m:
i = m.start()
m = re.search("Record last updated on", self.page)
j = m.start()
contacts = string.strip(self.page[i:j])
pass
self._ParseContacts_NetworkSolutions(contacts)
return
##
## ----------------------------------------------------------------------
##
##
## ----------------------------------------------------------------------
##
def usage(progname):
version = _version
print __doc__ % vars()
def main(argv, stdout, environ):
progname = argv[0]
list, args = getopt.getopt(argv[1:], "", ["help", "version"])
for (field, val) in list:
if field == "--help":
usage(progname)
return
elif field == "--version":
print progname, _version
return
rec=WhoisRecord();
for domain in args:
whoisserver=None
if string.find(domain,'@')!=-1:
(domain,whoisserver)=string.split(domain,'@')
try:
rec.whois(domain,whoisserver)
print rec.page
except 'NoSuchDomain', reason:
print "ERROR: no such domain %s" % domain
except socket.error, (ecode,reason):
print reason
except "TimedOut", reason:
print "Timed out", reason
if __name__ == "__main__":
main(sys.argv, sys.stdout, os.environ) | PypiClean |
/MeaningCloud-python-2.0.0.tar.gz/MeaningCloud-python-2.0.0/meaningcloud/SummarizationRequest.py | import meaningcloud.Request
class SummarizationRequest(meaningcloud.Request):
endpoint = 'summarization-1.0'
otherparams = None
extraheaders = None
type_ = ""
def __init__(self, key, sentences=5, txt=None, url=None, doc=None, otherparams=None, extraheaders=None, server='https://api.meaningcloud.com/'):
"""
SummarizationRequest constructor
:param key:
License key
:param sentences:
Number of sentences for the summary.
:param txt:
Text to use in the API calls
:param url:
Url to use in the API calls
:param doc:
File to use in the API calls
:param otherparams:
Array where other params can be added to be used in the API call
:param extraheaders:
Array where other headers can be added to be used in the request
:param server:
String with the server the requests will be sent to
"""
if server[len(server)-1] != '/':
server += '/'
self._params = {}
meaningcloud.Request.__init__(self, (server + self.endpoint), key)
self.otherarams = otherparams
self.extraheaders = extraheaders
self._url = server + self.endpoint
self.addParam('key', key)
self.addParam('sentences', sentences)
if txt:
type_ = 'txt'
elif doc:
type_ = 'doc'
elif url:
type_ = 'url'
else:
type_ = 'default'
options = {'doc': lambda: self.setContentFile(doc),
'url': lambda: self.setContentUrl(url),
'txt': lambda: self.setContentTxt(txt),
'default': lambda: self.setContentTxt(txt)
}
options[type_]()
if otherparams:
for key in otherparams:
self.addParam(key, otherparams[key])
def sendReq(self):
return self.sendRequest(self.extraheaders) | PypiClean |
/CRIkit2-0.4.4.tar.gz/CRIkit2-0.4.4/crikit/ui/dialog_AbstractFactorization.py | import sys as _sys
import numpy as _np
import traceback as _traceback
from PyQt5 import QtWidgets as _QtWidgets
from PyQt5.QtWidgets import (QApplication as _QApplication,
QDialog as _QDialog,
QMessageBox as _QMsg)
import PyQt5.QtCore as _QtCore
# Import from Designer-based GUI
from crikit.ui.qt_Factorization import Ui_Dialog ### EDIT ###
# Generic imports for MPL-incorporation
import matplotlib as _mpl
from sciplot.ui.widget_mpl import MplCanvas as _MplCanvas
_mpl.use('Qt5Agg')
_mpl.rcParams['font.family'] = 'sans-serif'
_mpl.rcParams['font.size'] = 12
class DialogAbstractFactorization(_QDialog):
"""
SubUiSVD : SVD SubUI
"""
def __init__(self, parent=None):
super(DialogAbstractFactorization, self).__init__(parent=parent)
self._mpl_v1 = _mpl.__version__.split('.')[0] == 1
## def __init__(self, parent=None):
# raise NotImplementedError('This is an abstract class.')
## super(DialogAbstractFactorization, self).__init__(parent=parent) ### EDIT ###
## self.setup()
## self.setupData(img_shape=img_shape)
## self.ui_changes()
# pass
def ui_changes(self):
"""
Any changes to ui labels or otherwise for particular implementation
"""
pass
def setupData(self, img_shape):
self._img_shape = img_shape
self._img_size = int(_np.array(img_shape).prod())
self._img_shape2D = tuple(_np.array(img_shape)[0:2])
self._img_size2D = int(_np.array(img_shape)[0:2].prod())
self._n_y = img_shape[0]
self._n_x = img_shape[1]
self._n_spectra = img_shape[2]
self._n_factors = self.max_factors()
self.selected_factors = set()
self.cube_all = None
self.img_all = None
self.spect_all = None
def setup(self, parent = None):
# Generic load/init designer-based GUI
# super(DialogAbstractFactorization, self).__init__(parent) ### EDIT ###
self.ui = Ui_Dialog() ### EDIT ###
self.ui.setupUi(self) ### EDIT ###
self.ui.pushButtonNext.clicked.connect(self.advance)
self.ui.pushButtonPrev.clicked.connect(self.advance)
self.ui.pushButtonGoTo.clicked.connect(self.advance)
self.ui.pushButtonCancel.clicked.connect(self.reject)
self.ui.pushButtonOk.clicked.connect(self.accept)
self.ui.pushButtonClear.clicked.connect(self.clear)
self.ui.pushButtonApply.clicked.connect(self.applyCheckBoxes)
self.ui.pushButtonScript.clicked.connect(self.runScript)
self._first_factor_visible = 0
self._num_factor_visible = 6
self.ui.lcdSelectedFactors.display(0)
self.ui.lcdMaxFactors.display(self.max_factors())
self.factorWins = []
self.factorLabelCheckBoxes = [self.ui.checkBox,
self.ui.checkBox_2,
self.ui.checkBox_3,
self.ui.checkBox_4,
self.ui.checkBox_5,
self.ui.checkBox_6]
for count in range(self._num_factor_visible):
self.factorWins.append(_MplCanvas(subplot=211))
self.factorWins[count].ax[0].axis('Off')
if self._mpl_v1:
self.factorWins[count].ax[1].hold('Off')
self.ui.gridLayout.addWidget(self.factorWins[0],1,0)
self.ui.gridLayout.addWidget(self.factorWins[1],1,1)
self.ui.gridLayout.addWidget(self.factorWins[2],1,2)
self.ui.gridLayout.addWidget(self.factorWins[3],3,0)
self.ui.gridLayout.addWidget(self.factorWins[4],3,1)
self.ui.gridLayout.addWidget(self.factorWins[5],3,2)
self.reconCurrent = _MplCanvas(subplot=211)
self.reconCurrent.ax[0].axis('Off')
if self._mpl_v1:
self.reconCurrent.ax[1].hold('Off')
self.reconRemainder = _MplCanvas(subplot=211)
self.reconRemainder.ax[0].axis('Off')
if self._mpl_v1:
self.reconRemainder.ax[1].hold('Off')
self.ui.verticalLayout_3.insertWidget(1,self.reconCurrent)
self.ui.verticalLayout_3.insertWidget(-1,self.reconRemainder)
for count in range(self._num_factor_visible):
self.factorLabelCheckBoxes[count].setText('Keep: ' + str(count))
@property
def unselected_factors(self):
all_factors = set(_np.arange(self._n_factors))
return all_factors - self.selected_factors
def applyCheckBoxes(self):
"""
Add checked singular values (and remove un-checked SVs)
"""
for count, checkBox in enumerate(self.factorLabelCheckBoxes):
if checkBox.isChecked() == True:
self.selected_factors.add(self._first_factor_visible+count)
else:
try:
self.selected_factors.remove(self._first_factor_visible+count)
except Exception:
pass
#print('Self.S: {}'.format(self.svddata.S[0:3]))
self.ui.lcdSelectedFactors.display(len(self.selected_factors))
self.updateCurrentRemainder()
def advance(self):
"""
View next set of SVs
"""
sender = self.sender().objectName()
if sender == 'pushButtonPrev':
self.updatePlots(startnum=self._first_factor_visible-self._num_factor_visible)
elif sender == 'pushButtonNext':
self.updatePlots(startnum=self._first_factor_visible+self._num_factor_visible)
elif sender == 'pushButtonGoTo':
self.updatePlots(startnum=self.ui.spinBoxGoTo.value())
else:
pass
def runScript(self):
"""
Run "script" of singular value selection
Example:
[1,2,3,5:7] = 1,2,3,5,6,7
"""
script = self.ui.lineEditSelections.text()
script = script.strip('[').strip(']')
script = script.split(',')
try:
for count in script:
if ':' in count:
temp = count.split(':')
if (int(temp[0]) >= self._n_factors) | (int(temp[1]) >= self._n_factors):
raise ValueError('{} value greater than maximum factor {}'.format(self.temp, self._n_factors))
self.selected_factors.update(set(_np.arange(int(temp[0]),int(temp[1])+1)))
elif count.strip() == '':
pass
else:
if int(count) >= self._n_factors:
raise ValueError('{} value greater than maximum factor {}'.format(int(count), self._n_factors))
self.selected_factors.add(int(count))
except Exception:
_traceback.print_exc(limit=1)
msg_box = _QMsg(_QMsg.Warning, 'Script Error', 'There was a problem with the script command: {}'.format(script), parent=self)
msg_box.exec()
self.updatePlots(startnum=self._first_factor_visible)
self.ui.lcdSelectedFactors.display(len(self.selected_factors))
self.updateCurrentRemainder()
def max_factors(self):
raise NotImplementedError('max_factors method not implemented')
def combiner(self, factors=None):
raise NotImplementedError('combiner method not implemented')
def mean_spatial(self, cube):
raise NotImplementedError('mean_spatial method not implemented')
def mean_spectral(self, cube):
raise NotImplementedError('mean_spectral method not implemented')
def get_spatial_slice(self, num):
raise NotImplementedError('get_spatial_slice method not implemented.')
def get_spectral_slice(self, num):
raise NotImplementedError('get_spectral_slice method not implemented.')
def updateCurrentRemainder(self):
"""
Update image reconstructed (mean over spectral vector) using remaining \
(unselected) singular values
"""
cube_select = self.combiner(self.selected_factors)
img_select = self.mean_spatial(cube_select)
spect_select = self.mean_spectral(cube_select)
# cube_nonselect = self.combiner(self.unselected_factors)
# cube_nonselect = self.cube_all - cube_select
# img_nonselect = self.mean_spatial(cube_nonselect)
# spect_nonselect = self.mean_spectral(cube_nonselect)
img_nonselect = self.img_all - img_select
print('Spect_select: {}'.format(spect_select))
print('Spect_select is None: {}'.format(spect_select is None))
spect_nonselect = self.spect_all - spect_select
self.reconCurrent.ax[0].cla()
self.reconCurrent.ax[1].cla()
# s_lim = _np.abs(img_select).max()
s_lim = _np.abs(img_select.mean() + 3*img_select.std())
self.reconCurrent.ax[0].imshow(img_select, interpolation='None',
cmap = 'bwr', origin='lower', vmin=0, vmax=s_lim)
self.reconCurrent.ax[1].plot(spect_select)
self.reconCurrent.draw()
self.reconRemainder.ax[0].cla()
self.reconRemainder.ax[1].cla()
# s_lim = _np.abs(img_nonselect).max()
s_lim = _np.abs(img_nonselect.mean() + 3*img_nonselect.std())
self.reconRemainder.ax[0].imshow(img_nonselect, interpolation='None',
cmap = 'bwr', origin='lower', vmin=-s_lim, vmax=s_lim)
self.reconRemainder.ax[1].plot(spect_nonselect)
self.reconRemainder.draw()
def updatePlots(self, startnum=0):
"""
Update images and spectra of set of singular values starting at SV \
number startnum
"""
if startnum <= 0:
startnum = 0
self.ui.pushButtonPrev.setEnabled(False)
self.ui.pushButtonNext.setEnabled(True)
elif startnum > self._n_factors - self._num_factor_visible:
startnum = self._n_factors - self._num_factor_visible
self.ui.pushButtonPrev.setEnabled(True)
self.ui.pushButtonNext.setEnabled(False)
else:
self.ui.pushButtonPrev.setEnabled(True)
self.ui.pushButtonNext.setEnabled(True)
self._first_factor_visible = startnum
for count in range(self._num_factor_visible):
self.factorWins[count].ax[0].clear()
sl = self.get_spatial_slice(count + self._first_factor_visible)
# sl_lim = _np.abs(sl).max()
sl_lim = _np.abs(sl.mean() + 3*sl.std())
self.factorWins[count].ax[0].imshow(sl, vmin=-sl_lim, vmax=sl_lim,
interpolation='none',
cmap = 'bwr' , origin='lower')
self.factorWins[count].ax[0].axis('Off')
self.factorWins[count].ax[1].clear()
self.factorWins[count].ax[1].plot(self.get_spectral_slice(count + self._first_factor_visible))
self.factorLabelCheckBoxes[count].setText('Keep: ' + str(startnum + count))
self.factorWins[count].draw()
if self._first_factor_visible + count in self.selected_factors:
self.factorLabelCheckBoxes[count].setChecked(True)
else:
self.factorLabelCheckBoxes[count].setChecked(False)
def clear(self):
"""
Clear selected singular values (i.e., none will be selected)
"""
self.selected_factors = set()
self.ui.lcdSelectedFactors.display(len(self.selected_factors))
self.updateCurrentRemainder()
self.updatePlots(startnum=self._first_factor_visible)
if __name__ == '__main__':
pass
# app = _QApplication(_sys.argv)
# app.setStyle('Cleanlooks')
# x = _np.linspace(100,200,50)
# y = _np.linspace(200,300,50)
# f = _np.linspace(500,3000,800)
# Ex = 30*_np.exp((-(f-1750)**2/(200**2)))
# Spectrum = _np.convolve(_np.flipud(Ex),Ex,mode='same')
# data = _np.zeros((y.size,x.size,f.size))
# for count in range(y.size):
# data[count,:,:] = y[count]*_np.random.poisson(_np.dot(x[:,None],Spectrum[None,:]))
# win = DialogAbstractFactorization.dialogAbstractFactorization(data, data.shape) ### EDIT ###
# print(win)
# _sys.exit(app.exec_())
# # _sys.exit() | PypiClean |
/NEMO_billing-2.6.7-py3-none-any.whl/NEMO_billing/prepayments/models.py | from __future__ import annotations
import datetime
from _decimal import Decimal
from datetime import date
from logging import getLogger
from typing import Dict, List, Optional, Tuple, Union
from NEMO.models import (
AreaAccessRecord,
BaseCategory,
BaseModel,
EmailNotificationType,
Project,
Reservation,
StaffCharge,
TrainingSession,
UsageEvent,
User,
)
from NEMO.utilities import EmailCategory, get_month_timeframe, send_mail
from dateutil.relativedelta import relativedelta
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.validators import MaxValueValidator, MinValueValidator, validate_comma_separated_integer_list
from django.db import models
from django.db.models import Q, QuerySet
from django.dispatch import receiver
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from NEMO_billing.invoices.customization import BillingCustomization
from NEMO_billing.invoices.models import BillableItemType, Invoice, InvoiceConfiguration, InvoiceSummaryItem
from NEMO_billing.invoices.processors import BillableItem
from NEMO_billing.invoices.utilities import display_amount
from NEMO_billing.models import CoreFacility
from NEMO_billing.prepayments.exceptions import (
ProjectFundsExpiredException,
ProjectFundsInactiveException,
ProjectInsufficientFundsException,
)
from NEMO_billing.utilities import Months, filter_date_year_month_gt, filter_date_year_month_lte
class ProjectPrepaymentDetail(BaseModel):
project = models.OneToOneField(Project, verbose_name="Project", help_text="The project", on_delete=models.CASCADE)
configuration = models.ForeignKey(InvoiceConfiguration, null=True, blank=True, on_delete=models.CASCADE)
charge_types = models.CharField(
validators=[validate_comma_separated_integer_list],
max_length=100,
help_text="List of charge types allowed",
)
only_core_facilities = models.ManyToManyField(
CoreFacility,
blank=True,
help_text="Limit which core facilities are allowed for this project. Leave blank to allow them all",
)
balance_last_updated = models.DateField(blank=True)
@property
def billable_charge_types(self) -> List[BillableItemType]:
return [BillableItemType(int(value)) for value in self.charge_types.split(",") if value]
def get_charge_types_display(self):
return mark_safe(
"<br>".join([charge_type.friendly_display_name() for charge_type in self.billable_charge_types])
)
def get_only_core_facilities_display(self):
if not self.only_core_facilities.exists():
return "All"
return mark_safe("<br>".join([facility.name for facility in self.only_core_facilities.all()]))
def active_funds(self, check_date: date) -> QuerySet:
funds = self.fund_set.all()
# Funds cannot all have expired
non_expired_funds = funds.filter(
Q(expiration_month__isnull=True, expiration_year__isnull=True)
| filter_date_year_month_gt("expiration", check_date)
)
if not non_expired_funds.exists():
raise ProjectFundsExpiredException(self.project)
# At least one fund has to be active
active_funds = non_expired_funds.filter(filter_date_year_month_lte("start", check_date))
if not active_funds.exists():
raise ProjectFundsInactiveException(self.project, check_date)
return active_funds.filter(balance__gt=0).order_by("start_year", "start_month", "id")
def get_charges_amount_between(self, start_date: datetime, end_date: datetime) -> (List[BillableItem], Decimal):
from NEMO_billing.invoices.processors import invoice_data_processor_class as data_processor
project_filter = Q(project_id=self.project_id)
config = self.configuration or InvoiceConfiguration.first_or_default()
charges = data_processor.get_billable_items(
start_date, end_date, config, project_filter, project_filter, project_filter
)
total = sum(charge.amount for charge in charges)
if not self.project.projectbillingdetails.no_tax:
taxes = total * config.tax_amount()
total = total + taxes
return charges, total
def update_balances_with_new_charges(self, new_charges: Decimal, as_of_date: date) -> List[Tuple[Fund, Decimal]]:
fund_and_used_amount = []
# This is used at the end of the month when invoicing
active_funds_sorted_by_date = self.active_funds(as_of_date)
last_item = active_funds_sorted_by_date.last()
# Max out active funds one by one until the last one or no more charges
for fund in active_funds_sorted_by_date:
if new_charges <= 0:
break
if fund == last_item:
# last fund, use all remaining charges
update_amount = new_charges
else:
# max out other funds
update_amount = min(fund.balance, new_charges)
fund.update_balance(update_amount)
fund_and_used_amount.append((fund, update_amount))
new_charges = new_charges - update_amount
self.balance_last_updated = as_of_date
self.save()
return fund_and_used_amount
def get_prepayment_info(
self, until: date, start_in_month: date = None, raise_exception=False
) -> (List[BillableItem], Decimal, Dict[int, Decimal]):
# Returns total charges, total charges amount, and fund balances
total_charges: List[BillableItem] = []
total_charges_amount = Decimal(0)
if not start_in_month:
# balance is always one month behind
start_in_month = (self.balance_last_updated + relativedelta(months=1)).replace(day=1)
months = number_of_months_between_dates(until, start_in_month)
# keep track of fund balances month to month
fund_balances: Dict[int, Decimal] = {}
for month in range(0, months + 1):
month_date = start_in_month + relativedelta(months=month)
# beginning and end of the month
start, end = get_month_timeframe(month_date.isoformat())
monthly_charges, monthly_charges_amount = self.get_charges_amount_between(start, end)
total_charges.extend(monthly_charges)
total_charges_amount = total_charges_amount + monthly_charges_amount
if monthly_charges_amount:
# only need to check funds valid at this date (expired or inactive won't be returned by active_funds)
funds_left = Decimal(0)
new_funds = Fund.objects.none()
try:
new_funds = self.active_funds(end.date())
except Exception:
if raise_exception:
raise
last_fund_checked: Optional[Fund] = None
last_item = new_funds.last()
for fund in new_funds:
balance = max(fund_balances.setdefault(fund.id, fund.balance), Decimal(0))
if fund == last_item:
# last fund, use all remaining charges
update_amount = monthly_charges_amount
else:
# max out other funds
update_amount = min(fund.balance, monthly_charges_amount)
fund_balances[fund.id] = balance - update_amount
monthly_charges_amount = monthly_charges_amount - update_amount
funds_left += fund_balances[fund.id]
last_fund_checked = fund
if last_fund_checked:
last_fund_checked.check_for_low_balance(funds_left)
if not funds_left > 0:
if raise_exception:
raise ProjectInsufficientFundsException(self.project)
return total_charges, total_charges_amount, fund_balances
def invoice_fund_summaries(self, invoice: Invoice) -> list[InvoiceSummaryItem]:
fund_summaries = []
fund_and_amounts = self.update_balances_with_new_charges(invoice.total_amount, invoice.end)
for fund_and_amount in fund_and_amounts:
fund, amount = fund_and_amount
fund_summary = InvoiceSummaryItem(
invoice=invoice, name=f"{fund} balance: {display_amount(fund.balance, invoice.configuration)}"
)
fund_summary.amount = -amount
fund_summary.summary_item_type = InvoiceSummaryItem.InvoiceSummaryItemType.FUND
# Set fund id in details, so we can find it later for voiding/deleting
fund_summary.details = str(fund.id)
fund_summaries.append(fund_summary)
return fund_summaries
def restore_funds(self, invoice: Invoice, request):
previous_balance_date = invoice.end - relativedelta(months=1)
for fund_summary in invoice.invoicesummaryitem_set.filter(
summary_item_type=InvoiceSummaryItem.InvoiceSummaryItemType.FUND
):
# Restore balance on each fund
fund = Fund.objects.get(id=fund_summary.details)
fund.update_balance(fund_summary.amount)
messages.success(
request,
f"the balance for {fund} was successfully credited back {display_amount(-fund_summary.amount, invoice.configuration)}.",
"data-speed=30000",
)
self.balance_last_updated = previous_balance_date
self.save()
def clean(self):
if self.project_id:
if not self.project.projectbillingdetails.no_tax and not self.configuration:
raise ValidationError(
{
"configuration": _(
"Configuration is required for taxed projects. Select a configuration or make the project tax exempt"
)
}
)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if not self.pk and self.balance_last_updated is None:
# Set original balance date to be last day of last month
# Reusing get_month_timeframe since that's what will be used when running invoices
self.balance_last_updated = get_month_timeframe(
(datetime.date.today() - relativedelta(months=1)).isoformat()
)[1]
super().save(force_insert, force_update, using, update_fields)
def __str__(self):
return f"Prepayment details for {self.project.name}"
class Meta:
ordering = ["project"]
class FundType(BaseCategory):
pass
class Fund(BaseModel):
project_prepayment = models.ForeignKey(ProjectPrepaymentDetail, on_delete=models.CASCADE)
fund_type = models.ForeignKey(FundType, on_delete=models.PROTECT)
reference = models.CharField(max_length=255, null=True, blank=True)
amount = models.DecimalField(decimal_places=2, max_digits=14)
start_month = models.PositiveIntegerField(choices=Months.choices)
start_year = models.PositiveIntegerField(
validators=[MinValueValidator(1900), MaxValueValidator(datetime.MAXYEAR)],
)
expiration_month = models.PositiveIntegerField(null=True, blank=True, choices=Months.choices)
expiration_year = models.PositiveIntegerField(
null=True,
blank=True,
validators=[MinValueValidator(1900), MaxValueValidator(datetime.MAXYEAR)],
)
balance = models.DecimalField(blank=True, decimal_places=2, max_digits=14)
balance_warning_percent = models.PositiveIntegerField(
null=True,
blank=True,
help_text="Send a warning email when the balance is below this percent.",
validators=[MaxValueValidator(100)],
)
balance_warning_sent = models.DateTimeField(null=True, blank=True)
note = models.CharField(max_length=255, null=True, blank=True)
@property
def start_date(self) -> date:
if self.start_year and self.start_month:
return date(self.start_year, self.start_month, 1)
@start_date.setter
def start_date(self, value: date):
if value:
self.start_month, self.start_year = value.month, value.year
else:
self.start_month, self.start_year = None, None
@property
def expiration_date(self) -> date:
if self.expiration_year and self.expiration_month:
return date(self.expiration_year, self.expiration_month, 1)
@expiration_date.setter
def expiration_date(self, value: date):
if value:
self.expiration_month, self.expiration_year = value.month, value.year
else:
self.expiration_month, self.expiration_year = None, None
def is_active(self, check_date: date):
return self in self.project_prepayment.active_funds(check_date)
def update_balance(self, new_charges: Decimal):
self.balance = self.balance - new_charges
self.save()
def check_for_low_balance(self, balance_left: Decimal, raise_exception=False):
try:
if not self.balance_warning_sent:
if self.balance_warning_percent:
warning_amount = self.balance_warning_percent / Decimal(100) * self.amount
if balance_left <= warning_amount:
subject = f"Low fund balance for project {self.project_prepayment.project.name}"
# Send to accounting staff, billing accounting email and project email
recipients = [
email for user in User.objects.filter(is_active=True, is_accounting_officer=True) for email in user.get_emails(EmailNotificationType.BOTH_EMAILS)
]
billing_email = BillingCustomization.get("billing_accounting_email_address")
if billing_email:
recipients.append(billing_email)
recipients.extend(self.project_prepayment.project.projectbillingdetails.email_to())
message = "Hello,<br><br>\n\n"
message += f"You project {self.project_prepayment.project.name} has a low fund balance:<br>\n"
message += f"Original amount: {self.amount:.2f}<br>\n"
message += f"Current balance: {balance_left:.2f}<br>\n"
send_mail(
subject=subject,
content=message,
from_email=billing_email,
to=recipients,
email_category=EmailCategory.GENERAL,
)
self.balance_warning_sent = timezone.now()
self.save(update_fields=["balance_warning_sent"])
except:
getLogger(__name__).exception("Error checking/sending low balance email")
if raise_exception:
raise
def clean(self):
errors = {}
if self.balance and self.balance > self.amount:
errors["balance"] = _("The balance cannot be greater than the fund amount")
if self.start_month and not self.start_year or self.start_year and not self.start_month:
errors["start_year"] = "Both year/month are required together"
errors["start_month"] = "Both year/month are required together"
if self.expiration_month and not self.expiration_year or self.expiration_year and not self.expiration_month:
errors["expiration_year"] = "Both year/month are required together"
errors["expiration_month"] = "Both year/month are required together"
if errors:
raise ValidationError(errors)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if not self.pk and self.balance is None:
self.balance = self.amount
super().save(force_insert, force_update, using, update_fields)
def __str__(self):
fund_name = f"Fund ref: {self.reference}" if self.reference else f"Fund #{self.id}"
return f"{fund_name} ({self.fund_type})"
@receiver(models.signals.pre_save)
def auto_check_charge_type_for_prepaid_projects(
sender, instance: Union[AreaAccessRecord, UsageEvent, TrainingSession, StaffCharge], **kwargs
):
# We only care about area access records and training
if not issubclass(sender, (AreaAccessRecord, UsageEvent, TrainingSession, StaffCharge)):
return
if not instance.pk:
# Check on creation if the project is prepaid and validate the charge type
if hasattr(instance.project, "projectprepaymentdetail"):
from NEMO.policy import policy_class as policy
from NEMO_billing.prepayments.policy import PrepaymentPolicy
if isinstance(policy, PrepaymentPolicy):
policy.check_project_prepayment_charge(
instance.project, BillableItem(instance, instance.project).item_type
)
def number_of_months_between_dates(end_date, start_date):
return (end_date.year - start_date.year) * 12 + end_date.month - start_date.month | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py |
__revision__ = "src/engine/SCons/Tool/msvs.py 2014/07/05 09:42:21 garyo"
import SCons.compat
import base64
import hashlib
import ntpath
import os
# compat layer imports "cPickle" for us if it's available.
import pickle
import re
import sys
import SCons.Builder
import SCons.Node.FS
import SCons.Platform.win32
import SCons.Script.SConscript
import SCons.PathList
import SCons.Util
import SCons.Warnings
from MSCommon import msvc_exists, msvc_setup_env_once
from SCons.Defaults import processDefines
##############################################################################
# Below here are the classes and functions for generation of
# DSP/DSW/SLN/VCPROJ files.
##############################################################################
def xmlify(s):
s = s.replace("&", "&") # do this first
s = s.replace("'", "'")
s = s.replace('"', """)
return s
# Process a CPPPATH list in includes, given the env, target and source.
# Returns a tuple of nodes.
def processIncludes(includes, env, target, source):
return SCons.PathList.PathList(includes).subst_path(env, target, source)
external_makefile_guid = '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}'
def _generateGUID(slnfile, name):
"""This generates a dummy GUID for the sln file to use. It is
based on the MD5 signatures of the sln filename plus the name of
the project. It basically just needs to be unique, and not
change with each invocation."""
m = hashlib.md5()
# Normalize the slnfile path to a Windows path (\ separators) so
# the generated file has a consistent GUID even if we generate
# it on a non-Windows platform.
m.update(ntpath.normpath(str(slnfile)) + str(name))
solution = m.hexdigest().upper()
# convert most of the signature to GUID form (discard the rest)
solution = "{" + solution[:8] + "-" + solution[8:12] + "-" + solution[12:16] + "-" + solution[16:20] + "-" + solution[20:32] + "}"
return solution
version_re = re.compile(r'(\d+\.\d+)(.*)')
def msvs_parse_version(s):
"""
Split a Visual Studio version, which may in fact be something like
'7.0Exp', into is version number (returned as a float) and trailing
"suite" portion.
"""
num, suite = version_re.match(s).groups()
return float(num), suite
# os.path.relpath has been introduced in Python 2.6
# We define it locally for earlier versions of Python
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
import sys
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.sep)
path_list = os.path.abspath(path).split(os.sep)
if 'posix' in sys.builtin_module_names:
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
else:
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
if not "relpath" in os.path.__all__:
os.path.relpath = relpath
# This is how we re-invoke SCons from inside MSVS Project files.
# The problem is that we might have been invoked as either scons.bat
# or scons.py. If we were invoked directly as scons.py, then we could
# use sys.argv[0] to find the SCons "executable," but that doesn't work
# if we were invoked as scons.bat, which uses "python -c" to execute
# things and ends up with "-c" as sys.argv[0]. Consequently, we have
# the MSVS Project file invoke SCons the same way that scons.bat does,
# which works regardless of how we were invoked.
def getExecScriptMain(env, xml=None):
scons_home = env.get('SCONS_HOME')
if not scons_home and 'SCONS_LIB_DIR' in os.environ:
scons_home = os.environ['SCONS_LIB_DIR']
if scons_home:
exec_script_main = "from os.path import join; import sys; sys.path = [ r'%s' ] + sys.path; import SCons.Script; SCons.Script.main()" % scons_home
else:
version = SCons.__version__
exec_script_main = "from os.path import join; import sys; sys.path = [ join(sys.prefix, 'Lib', 'site-packages', 'scons-%(version)s'), join(sys.prefix, 'scons-%(version)s'), join(sys.prefix, 'Lib', 'site-packages', 'scons'), join(sys.prefix, 'scons') ] + sys.path; import SCons.Script; SCons.Script.main()" % locals()
if xml:
exec_script_main = xmlify(exec_script_main)
return exec_script_main
# The string for the Python executable we tell the Project file to use
# is either sys.executable or, if an external PYTHON_ROOT environment
# variable exists, $(PYTHON)ROOT\\python.exe (generalized a little to
# pluck the actual executable name from sys.executable).
try:
python_root = os.environ['PYTHON_ROOT']
except KeyError:
python_executable = sys.executable
else:
python_executable = os.path.join('$$(PYTHON_ROOT)',
os.path.split(sys.executable)[1])
class Config(object):
pass
def splitFully(path):
dir, base = os.path.split(path)
if dir and dir != '' and dir != path:
return splitFully(dir)+[base]
if base == '':
return []
return [base]
def makeHierarchy(sources):
'''Break a list of files into a hierarchy; for each value, if it is a string,
then it is a file. If it is a dictionary, it is a folder. The string is
the original path of the file.'''
hierarchy = {}
for file in sources:
path = splitFully(file)
if len(path):
dict = hierarchy
for part in path[:-1]:
if part not in dict:
dict[part] = {}
dict = dict[part]
dict[path[-1]] = file
#else:
# print 'Warning: failed to decompose path for '+str(file)
return hierarchy
class _DSPGenerator(object):
""" Base class for DSP generators """
srcargs = [
'srcs',
'incs',
'localincs',
'resources',
'misc']
def __init__(self, dspfile, source, env):
self.dspfile = str(dspfile)
try:
get_abspath = dspfile.get_abspath
except AttributeError:
self.dspabs = os.path.abspath(dspfile)
else:
self.dspabs = get_abspath()
if 'variant' not in env:
raise SCons.Errors.InternalError("You must specify a 'variant' argument (i.e. 'Debug' or " +\
"'Release') to create an MSVSProject.")
elif SCons.Util.is_String(env['variant']):
variants = [env['variant']]
elif SCons.Util.is_List(env['variant']):
variants = env['variant']
if 'buildtarget' not in env or env['buildtarget'] == None:
buildtarget = ['']
elif SCons.Util.is_String(env['buildtarget']):
buildtarget = [env['buildtarget']]
elif SCons.Util.is_List(env['buildtarget']):
if len(env['buildtarget']) != len(variants):
raise SCons.Errors.InternalError("Sizes of 'buildtarget' and 'variant' lists must be the same.")
buildtarget = []
for bt in env['buildtarget']:
if SCons.Util.is_String(bt):
buildtarget.append(bt)
else:
buildtarget.append(bt.get_abspath())
else:
buildtarget = [env['buildtarget'].get_abspath()]
if len(buildtarget) == 1:
bt = buildtarget[0]
buildtarget = []
for _ in variants:
buildtarget.append(bt)
if 'outdir' not in env or env['outdir'] == None:
outdir = ['']
elif SCons.Util.is_String(env['outdir']):
outdir = [env['outdir']]
elif SCons.Util.is_List(env['outdir']):
if len(env['outdir']) != len(variants):
raise SCons.Errors.InternalError("Sizes of 'outdir' and 'variant' lists must be the same.")
outdir = []
for s in env['outdir']:
if SCons.Util.is_String(s):
outdir.append(s)
else:
outdir.append(s.get_abspath())
else:
outdir = [env['outdir'].get_abspath()]
if len(outdir) == 1:
s = outdir[0]
outdir = []
for v in variants:
outdir.append(s)
if 'runfile' not in env or env['runfile'] == None:
runfile = buildtarget[-1:]
elif SCons.Util.is_String(env['runfile']):
runfile = [env['runfile']]
elif SCons.Util.is_List(env['runfile']):
if len(env['runfile']) != len(variants):
raise SCons.Errors.InternalError("Sizes of 'runfile' and 'variant' lists must be the same.")
runfile = []
for s in env['runfile']:
if SCons.Util.is_String(s):
runfile.append(s)
else:
runfile.append(s.get_abspath())
else:
runfile = [env['runfile'].get_abspath()]
if len(runfile) == 1:
s = runfile[0]
runfile = []
for v in variants:
runfile.append(s)
self.sconscript = env['MSVSSCONSCRIPT']
cmdargs = env.get('cmdargs', '')
self.env = env
if 'name' in self.env:
self.name = self.env['name']
else:
self.name = os.path.basename(SCons.Util.splitext(self.dspfile)[0])
self.name = self.env.subst(self.name)
sourcenames = [
'Source Files',
'Header Files',
'Local Headers',
'Resource Files',
'Other Files']
self.sources = {}
for n in sourcenames:
self.sources[n] = []
self.configs = {}
self.nokeep = 0
if 'nokeep' in env and env['variant'] != 0:
self.nokeep = 1
if self.nokeep == 0 and os.path.exists(self.dspabs):
self.Parse()
for t in zip(sourcenames,self.srcargs):
if t[1] in self.env:
if SCons.Util.is_List(self.env[t[1]]):
for i in self.env[t[1]]:
if not i in self.sources[t[0]]:
self.sources[t[0]].append(i)
else:
if not self.env[t[1]] in self.sources[t[0]]:
self.sources[t[0]].append(self.env[t[1]])
for n in sourcenames:
#TODO 2.4: compat layer supports sorted(key=) but not sort(key=)
#TODO 2.4: self.sources[n].sort(key=lambda a: a.lower())
self.sources[n] = sorted(self.sources[n], key=lambda a: a.lower())
def AddConfig(self, variant, buildtarget, outdir, runfile, cmdargs, dspfile=dspfile):
config = Config()
config.buildtarget = buildtarget
config.outdir = outdir
config.cmdargs = cmdargs
config.runfile = runfile
match = re.match('(.*)\|(.*)', variant)
if match:
config.variant = match.group(1)
config.platform = match.group(2)
else:
config.variant = variant
config.platform = 'Win32'
self.configs[variant] = config
print "Adding '" + self.name + ' - ' + config.variant + '|' + config.platform + "' to '" + str(dspfile) + "'"
for i in range(len(variants)):
AddConfig(self, variants[i], buildtarget[i], outdir[i], runfile[i], cmdargs)
self.platforms = []
for key in self.configs.keys():
platform = self.configs[key].platform
if not platform in self.platforms:
self.platforms.append(platform)
def Build(self):
pass
V6DSPHeader = """\
# Microsoft Developer Studio Project File - Name="%(name)s" - Package Owner=<4>
# Microsoft Developer Studio Generated Build File, Format Version 6.00
# ** DO NOT EDIT **
# TARGTYPE "Win32 (x86) External Target" 0x0106
CFG=%(name)s - Win32 %(confkey)s
!MESSAGE This is not a valid makefile. To build this project using NMAKE,
!MESSAGE use the Export Makefile command and run
!MESSAGE
!MESSAGE NMAKE /f "%(name)s.mak".
!MESSAGE
!MESSAGE You can specify a configuration when running NMAKE
!MESSAGE by defining the macro CFG on the command line. For example:
!MESSAGE
!MESSAGE NMAKE /f "%(name)s.mak" CFG="%(name)s - Win32 %(confkey)s"
!MESSAGE
!MESSAGE Possible choices for configuration are:
!MESSAGE
"""
class _GenerateV6DSP(_DSPGenerator):
"""Generates a Project file for MSVS 6.0"""
def PrintHeader(self):
# pick a default config
confkeys = sorted(self.configs.keys())
name = self.name
confkey = confkeys[0]
self.file.write(V6DSPHeader % locals())
for kind in confkeys:
self.file.write('!MESSAGE "%s - Win32 %s" (based on "Win32 (x86) External Target")\n' % (name, kind))
self.file.write('!MESSAGE \n\n')
def PrintProject(self):
name = self.name
self.file.write('# Begin Project\n'
'# PROP AllowPerConfigDependencies 0\n'
'# PROP Scc_ProjName ""\n'
'# PROP Scc_LocalPath ""\n\n')
first = 1
confkeys = sorted(self.configs.keys())
for kind in confkeys:
outdir = self.configs[kind].outdir
buildtarget = self.configs[kind].buildtarget
if first == 1:
self.file.write('!IF "$(CFG)" == "%s - Win32 %s"\n\n' % (name, kind))
first = 0
else:
self.file.write('\n!ELSEIF "$(CFG)" == "%s - Win32 %s"\n\n' % (name, kind))
env_has_buildtarget = 'MSVSBUILDTARGET' in self.env
if not env_has_buildtarget:
self.env['MSVSBUILDTARGET'] = buildtarget
# have to write this twice, once with the BASE settings, and once without
for base in ("BASE ",""):
self.file.write('# PROP %sUse_MFC 0\n'
'# PROP %sUse_Debug_Libraries ' % (base, base))
if kind.lower().find('debug') < 0:
self.file.write('0\n')
else:
self.file.write('1\n')
self.file.write('# PROP %sOutput_Dir "%s"\n'
'# PROP %sIntermediate_Dir "%s"\n' % (base,outdir,base,outdir))
cmd = 'echo Starting SCons && ' + self.env.subst('$MSVSBUILDCOM', 1)
self.file.write('# PROP %sCmd_Line "%s"\n'
'# PROP %sRebuild_Opt "-c && %s"\n'
'# PROP %sTarget_File "%s"\n'
'# PROP %sBsc_Name ""\n'
'# PROP %sTarget_Dir ""\n'\
%(base,cmd,base,cmd,base,buildtarget,base,base))
if not env_has_buildtarget:
del self.env['MSVSBUILDTARGET']
self.file.write('\n!ENDIF\n\n'
'# Begin Target\n\n')
for kind in confkeys:
self.file.write('# Name "%s - Win32 %s"\n' % (name,kind))
self.file.write('\n')
first = 0
for kind in confkeys:
if first == 0:
self.file.write('!IF "$(CFG)" == "%s - Win32 %s"\n\n' % (name,kind))
first = 1
else:
self.file.write('!ELSEIF "$(CFG)" == "%s - Win32 %s"\n\n' % (name,kind))
self.file.write('!ENDIF \n\n')
self.PrintSourceFiles()
self.file.write('# End Target\n'
'# End Project\n')
if self.nokeep == 0:
# now we pickle some data and add it to the file -- MSDEV will ignore it.
pdata = pickle.dumps(self.configs,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '\n')
pdata = pickle.dumps(self.sources,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '\n')
def PrintSourceFiles(self):
categories = {'Source Files': 'cpp|c|cxx|l|y|def|odl|idl|hpj|bat',
'Header Files': 'h|hpp|hxx|hm|inl',
'Local Headers': 'h|hpp|hxx|hm|inl',
'Resource Files': 'r|rc|ico|cur|bmp|dlg|rc2|rct|bin|cnt|rtf|gif|jpg|jpeg|jpe',
'Other Files': ''}
for kind in sorted(categories.keys(), key=lambda a: a.lower()):
if not self.sources[kind]:
continue # skip empty groups
self.file.write('# Begin Group "' + kind + '"\n\n')
typelist = categories[kind].replace('|', ';')
self.file.write('# PROP Default_Filter "' + typelist + '"\n')
for file in self.sources[kind]:
file = os.path.normpath(file)
self.file.write('# Begin Source File\n\n'
'SOURCE="' + file + '"\n'
'# End Source File\n')
self.file.write('# End Group\n')
# add the SConscript file outside of the groups
self.file.write('# Begin Source File\n\n'
'SOURCE="' + str(self.sconscript) + '"\n'
'# End Source File\n')
def Parse(self):
try:
dspfile = open(self.dspabs,'r')
except IOError:
return # doesn't exist yet, so can't add anything to configs.
line = dspfile.readline()
while line:
if line.find("# End Project") > -1:
break
line = dspfile.readline()
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.configs.update(data)
data = None
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
# it has a "# " in front of it, so we strip that.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.sources.update(data)
def Build(self):
try:
self.file = open(self.dspabs,'w')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dspabs + '" for writing:' + str(detail))
else:
self.PrintHeader()
self.PrintProject()
self.file.close()
V7DSPHeader = """\
<?xml version="1.0" encoding="%(encoding)s"?>
<VisualStudioProject
\tProjectType="Visual C++"
\tVersion="%(versionstr)s"
\tName="%(name)s"
\tProjectGUID="%(project_guid)s"
%(scc_attrs)s
\tKeyword="MakeFileProj">
"""
V7DSPConfiguration = """\
\t\t<Configuration
\t\t\tName="%(variant)s|%(platform)s"
\t\t\tOutputDirectory="%(outdir)s"
\t\t\tIntermediateDirectory="%(outdir)s"
\t\t\tConfigurationType="0"
\t\t\tUseOfMFC="0"
\t\t\tATLMinimizesCRunTimeLibraryUsage="FALSE">
\t\t\t<Tool
\t\t\t\tName="VCNMakeTool"
\t\t\t\tBuildCommandLine="%(buildcmd)s"
\t\t\t\tReBuildCommandLine="%(rebuildcmd)s"
\t\t\t\tCleanCommandLine="%(cleancmd)s"
\t\t\t\tOutput="%(runfile)s"/>
\t\t</Configuration>
"""
V8DSPHeader = """\
<?xml version="1.0" encoding="%(encoding)s"?>
<VisualStudioProject
\tProjectType="Visual C++"
\tVersion="%(versionstr)s"
\tName="%(name)s"
\tProjectGUID="%(project_guid)s"
\tRootNamespace="%(name)s"
%(scc_attrs)s
\tKeyword="MakeFileProj">
"""
V8DSPConfiguration = """\
\t\t<Configuration
\t\t\tName="%(variant)s|%(platform)s"
\t\t\tConfigurationType="0"
\t\t\tUseOfMFC="0"
\t\t\tATLMinimizesCRunTimeLibraryUsage="false"
\t\t\t>
\t\t\t<Tool
\t\t\t\tName="VCNMakeTool"
\t\t\t\tBuildCommandLine="%(buildcmd)s"
\t\t\t\tReBuildCommandLine="%(rebuildcmd)s"
\t\t\t\tCleanCommandLine="%(cleancmd)s"
\t\t\t\tOutput="%(runfile)s"
\t\t\t\tPreprocessorDefinitions="%(preprocdefs)s"
\t\t\t\tIncludeSearchPath="%(includepath)s"
\t\t\t\tForcedIncludes=""
\t\t\t\tAssemblySearchPath=""
\t\t\t\tForcedUsingAssemblies=""
\t\t\t\tCompileAsManaged=""
\t\t\t/>
\t\t</Configuration>
"""
class _GenerateV7DSP(_DSPGenerator):
"""Generates a Project file for MSVS .NET"""
def __init__(self, dspfile, source, env):
_DSPGenerator.__init__(self, dspfile, source, env)
self.version = env['MSVS_VERSION']
self.version_num, self.suite = msvs_parse_version(self.version)
if self.version_num >= 9.0:
self.versionstr = '9.00'
self.dspheader = V8DSPHeader
self.dspconfiguration = V8DSPConfiguration
elif self.version_num >= 8.0:
self.versionstr = '8.00'
self.dspheader = V8DSPHeader
self.dspconfiguration = V8DSPConfiguration
else:
if self.version_num >= 7.1:
self.versionstr = '7.10'
else:
self.versionstr = '7.00'
self.dspheader = V7DSPHeader
self.dspconfiguration = V7DSPConfiguration
self.file = None
def PrintHeader(self):
env = self.env
versionstr = self.versionstr
name = self.name
encoding = self.env.subst('$MSVSENCODING')
scc_provider = env.get('MSVS_SCC_PROVIDER', '')
scc_project_name = env.get('MSVS_SCC_PROJECT_NAME', '')
scc_aux_path = env.get('MSVS_SCC_AUX_PATH', '')
# MSVS_SCC_LOCAL_PATH is kept for backwards compatibility purpose and should
# be deprecated as soon as possible.
scc_local_path_legacy = env.get('MSVS_SCC_LOCAL_PATH', '')
scc_connection_root = env.get('MSVS_SCC_CONNECTION_ROOT', os.curdir)
scc_local_path = os.path.relpath(scc_connection_root, os.path.dirname(self.dspabs))
project_guid = env.get('MSVS_PROJECT_GUID', '')
if not project_guid:
project_guid = _generateGUID(self.dspfile, '')
if scc_provider != '':
scc_attrs = '\tSccProjectName="%s"\n' % scc_project_name
if scc_aux_path != '':
scc_attrs += '\tSccAuxPath="%s"\n' % scc_aux_path
scc_attrs += ('\tSccLocalPath="%s"\n'
'\tSccProvider="%s"' % (scc_local_path, scc_provider))
elif scc_local_path_legacy != '':
# This case is kept for backwards compatibility purpose and should
# be deprecated as soon as possible.
scc_attrs = ('\tSccProjectName="%s"\n'
'\tSccLocalPath="%s"' % (scc_project_name, scc_local_path_legacy))
else:
self.dspheader = self.dspheader.replace('%(scc_attrs)s\n', '')
self.file.write(self.dspheader % locals())
self.file.write('\t<Platforms>\n')
for platform in self.platforms:
self.file.write(
'\t\t<Platform\n'
'\t\t\tName="%s"/>\n' % platform)
self.file.write('\t</Platforms>\n')
if self.version_num >= 8.0:
self.file.write('\t<ToolFiles>\n'
'\t</ToolFiles>\n')
def PrintProject(self):
self.file.write('\t<Configurations>\n')
confkeys = sorted(self.configs.keys())
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
outdir = self.configs[kind].outdir
buildtarget = self.configs[kind].buildtarget
runfile = self.configs[kind].runfile
cmdargs = self.configs[kind].cmdargs
env_has_buildtarget = 'MSVSBUILDTARGET' in self.env
if not env_has_buildtarget:
self.env['MSVSBUILDTARGET'] = buildtarget
starting = 'echo Starting SCons && '
if cmdargs:
cmdargs = ' ' + cmdargs
else:
cmdargs = ''
buildcmd = xmlify(starting + self.env.subst('$MSVSBUILDCOM', 1) + cmdargs)
rebuildcmd = xmlify(starting + self.env.subst('$MSVSREBUILDCOM', 1) + cmdargs)
cleancmd = xmlify(starting + self.env.subst('$MSVSCLEANCOM', 1) + cmdargs)
# This isn't perfect; CPPDEFINES and CPPPATH can contain $TARGET and $SOURCE,
# so they could vary depending on the command being generated. This code
# assumes they don't.
preprocdefs = xmlify(';'.join(processDefines(self.env.get('CPPDEFINES', []))))
includepath_Dirs = processIncludes(self.env.get('CPPPATH', []), self.env, None, None)
includepath = xmlify(';'.join([str(x) for x in includepath_Dirs]))
if not env_has_buildtarget:
del self.env['MSVSBUILDTARGET']
self.file.write(self.dspconfiguration % locals())
self.file.write('\t</Configurations>\n')
if self.version_num >= 7.1:
self.file.write('\t<References>\n'
'\t</References>\n')
self.PrintSourceFiles()
self.file.write('</VisualStudioProject>\n')
if self.nokeep == 0:
# now we pickle some data and add it to the file -- MSDEV will ignore it.
pdata = pickle.dumps(self.configs,1)
pdata = base64.encodestring(pdata)
self.file.write('<!-- SCons Data:\n' + pdata + '\n')
pdata = pickle.dumps(self.sources,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '-->\n')
def printSources(self, hierarchy, commonprefix):
sorteditems = sorted(hierarchy.items(), key=lambda a: a[0].lower())
# First folders, then files
for key, value in sorteditems:
if SCons.Util.is_Dict(value):
self.file.write('\t\t\t<Filter\n'
'\t\t\t\tName="%s"\n'
'\t\t\t\tFilter="">\n' % (key))
self.printSources(value, commonprefix)
self.file.write('\t\t\t</Filter>\n')
for key, value in sorteditems:
if SCons.Util.is_String(value):
file = value
if commonprefix:
file = os.path.join(commonprefix, value)
file = os.path.normpath(file)
self.file.write('\t\t\t<File\n'
'\t\t\t\tRelativePath="%s">\n'
'\t\t\t</File>\n' % (file))
def PrintSourceFiles(self):
categories = {'Source Files': 'cpp;c;cxx;l;y;def;odl;idl;hpj;bat',
'Header Files': 'h;hpp;hxx;hm;inl',
'Local Headers': 'h;hpp;hxx;hm;inl',
'Resource Files': 'r;rc;ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe',
'Other Files': ''}
self.file.write('\t<Files>\n')
cats = sorted([k for k in categories.keys() if self.sources[k]],
key=lambda a: a.lower())
for kind in cats:
if len(cats) > 1:
self.file.write('\t\t<Filter\n'
'\t\t\tName="%s"\n'
'\t\t\tFilter="%s">\n' % (kind, categories[kind]))
sources = self.sources[kind]
# First remove any common prefix
commonprefix = None
s = list(map(os.path.normpath, sources))
# take the dirname because the prefix may include parts
# of the filenames (e.g. if you have 'dir\abcd' and
# 'dir\acde' then the cp will be 'dir\a' )
cp = os.path.dirname( os.path.commonprefix(s) )
if cp and s[0][len(cp)] == os.sep:
# +1 because the filename starts after the separator
sources = [s[len(cp)+1:] for s in sources]
commonprefix = cp
hierarchy = makeHierarchy(sources)
self.printSources(hierarchy, commonprefix=commonprefix)
if len(cats)>1:
self.file.write('\t\t</Filter>\n')
# add the SConscript file outside of the groups
self.file.write('\t\t<File\n'
'\t\t\tRelativePath="%s">\n'
'\t\t</File>\n' % str(self.sconscript))
self.file.write('\t</Files>\n'
'\t<Globals>\n'
'\t</Globals>\n')
def Parse(self):
try:
dspfile = open(self.dspabs,'r')
except IOError:
return # doesn't exist yet, so can't add anything to configs.
line = dspfile.readline()
while line:
if line.find('<!-- SCons Data:') > -1:
break
line = dspfile.readline()
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.configs.update(data)
data = None
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.sources.update(data)
def Build(self):
try:
self.file = open(self.dspabs,'w')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dspabs + '" for writing:' + str(detail))
else:
self.PrintHeader()
self.PrintProject()
self.file.close()
V10DSPHeader = """\
<?xml version="1.0" encoding="%(encoding)s"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
"""
V10DSPProjectConfiguration = """\
\t\t<ProjectConfiguration Include="%(variant)s|%(platform)s">
\t\t\t<Configuration>%(variant)s</Configuration>
\t\t\t<Platform>%(platform)s</Platform>
\t\t</ProjectConfiguration>
"""
V10DSPGlobals = """\
\t<PropertyGroup Label="Globals">
\t\t<ProjectGuid>%(project_guid)s</ProjectGuid>
%(scc_attrs)s\t\t<RootNamespace>%(name)s</RootNamespace>
\t\t<Keyword>MakeFileProj</Keyword>
\t</PropertyGroup>
"""
V10DSPPropertyGroupCondition = """\
\t<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'" Label="Configuration">
\t\t<ConfigurationType>Makefile</ConfigurationType>
\t\t<UseOfMfc>false</UseOfMfc>
\t</PropertyGroup>
"""
V10DSPImportGroupCondition = """\
\t<ImportGroup Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'" Label="PropertySheets">
\t\t<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
\t</ImportGroup>
"""
V10DSPCommandLine = """\
\t\t<NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">%(buildcmd)s</NMakeBuildCommandLine>
\t\t<NMakeReBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">%(rebuildcmd)s</NMakeReBuildCommandLine>
\t\t<NMakeCleanCommandLine Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">%(cleancmd)s</NMakeCleanCommandLine>
\t\t<NMakeOutput Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">%(runfile)s</NMakeOutput>
\t\t<NMakePreprocessorDefinitions Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">%(preprocdefs)s</NMakePreprocessorDefinitions>
\t\t<NMakeIncludeSearchPath Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">%(includepath)s</NMakeIncludeSearchPath>
\t\t<NMakeForcedIncludes Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">$(NMakeForcedIncludes)</NMakeForcedIncludes>
\t\t<NMakeAssemblySearchPath Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">$(NMakeAssemblySearchPath)</NMakeAssemblySearchPath>
\t\t<NMakeForcedUsingAssemblies Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">$(NMakeForcedUsingAssemblies)</NMakeForcedUsingAssemblies>
"""
class _GenerateV10DSP(_DSPGenerator):
"""Generates a Project file for MSVS 2010"""
def __init__(self, dspfile, source, env):
_DSPGenerator.__init__(self, dspfile, source, env)
self.dspheader = V10DSPHeader
self.dspconfiguration = V10DSPProjectConfiguration
self.dspglobals = V10DSPGlobals
def PrintHeader(self):
env = self.env
name = self.name
encoding = env.subst('$MSVSENCODING')
project_guid = env.get('MSVS_PROJECT_GUID', '')
scc_provider = env.get('MSVS_SCC_PROVIDER', '')
scc_project_name = env.get('MSVS_SCC_PROJECT_NAME', '')
scc_aux_path = env.get('MSVS_SCC_AUX_PATH', '')
# MSVS_SCC_LOCAL_PATH is kept for backwards compatibility purpose and should
# be deprecated as soon as possible.
scc_local_path_legacy = env.get('MSVS_SCC_LOCAL_PATH', '')
scc_connection_root = env.get('MSVS_SCC_CONNECTION_ROOT', os.curdir)
scc_local_path = os.path.relpath(scc_connection_root, os.path.dirname(self.dspabs))
if not project_guid:
project_guid = _generateGUID(self.dspfile, '')
if scc_provider != '':
scc_attrs = '\t\t<SccProjectName>%s</SccProjectName>\n' % scc_project_name
if scc_aux_path != '':
scc_attrs += '\t\t<SccAuxPath>%s</SccAuxPath>\n' % scc_aux_path
scc_attrs += ('\t\t<SccLocalPath>%s</SccLocalPath>\n'
'\t\t<SccProvider>%s</SccProvider>\n' % (scc_local_path, scc_provider))
elif scc_local_path_legacy != '':
# This case is kept for backwards compatibility purpose and should
# be deprecated as soon as possible.
scc_attrs = ('\t\t<SccProjectName>%s</SccProjectName>\n'
'\t\t<SccLocalPath>%s</SccLocalPath>\n' % (scc_project_name, scc_local_path_legacy))
else:
self.dspglobals = self.dspglobals.replace('%(scc_attrs)s', '')
self.file.write(self.dspheader % locals())
self.file.write('\t<ItemGroup Label="ProjectConfigurations">\n')
confkeys = sorted(self.configs.keys())
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
self.file.write(self.dspconfiguration % locals())
self.file.write('\t</ItemGroup>\n')
self.file.write(self.dspglobals % locals())
def PrintProject(self):
name = self.name
confkeys = sorted(self.configs.keys())
self.file.write('\t<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\n')
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
self.file.write(V10DSPPropertyGroupCondition % locals())
self.file.write('\t<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\n')
self.file.write('\t<ImportGroup Label="ExtensionSettings">\n')
self.file.write('\t</ImportGroup>\n')
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
self.file.write(V10DSPImportGroupCondition % locals())
self.file.write('\t<PropertyGroup Label="UserMacros" />\n')
self.file.write('\t<PropertyGroup>\n')
self.file.write('\t<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>\n')
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
outdir = self.configs[kind].outdir
buildtarget = self.configs[kind].buildtarget
runfile = self.configs[kind].runfile
cmdargs = self.configs[kind].cmdargs
env_has_buildtarget = 'MSVSBUILDTARGET' in self.env
if not env_has_buildtarget:
self.env['MSVSBUILDTARGET'] = buildtarget
starting = 'echo Starting SCons && '
if cmdargs:
cmdargs = ' ' + cmdargs
else:
cmdargs = ''
buildcmd = xmlify(starting + self.env.subst('$MSVSBUILDCOM', 1) + cmdargs)
rebuildcmd = xmlify(starting + self.env.subst('$MSVSREBUILDCOM', 1) + cmdargs)
cleancmd = xmlify(starting + self.env.subst('$MSVSCLEANCOM', 1) + cmdargs)
# This isn't perfect; CPPDEFINES and CPPPATH can contain $TARGET and $SOURCE,
# so they could vary depending on the command being generated. This code
# assumes they don't.
preprocdefs = xmlify(';'.join(processDefines(self.env.get('CPPDEFINES', []))))
includepath_Dirs = processIncludes(self.env.get('CPPPATH', []), self.env, None, None)
includepath = xmlify(';'.join([str(x) for x in includepath_Dirs]))
if not env_has_buildtarget:
del self.env['MSVSBUILDTARGET']
self.file.write(V10DSPCommandLine % locals())
self.file.write('\t</PropertyGroup>\n')
#filter settings in MSVS 2010 are stored in separate file
self.filtersabs = self.dspabs + '.filters'
try:
self.filters_file = open(self.filtersabs, 'w')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.filtersabs + '" for writing:' + str(detail))
self.filters_file.write('<?xml version="1.0" encoding="utf-8"?>\n'
'<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\n')
self.PrintSourceFiles()
self.filters_file.write('</Project>')
self.filters_file.close()
self.file.write('\t<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\n'
'\t<ImportGroup Label="ExtensionTargets">\n'
'\t</ImportGroup>\n'
'</Project>\n')
if self.nokeep == 0:
# now we pickle some data and add it to the file -- MSDEV will ignore it.
pdata = pickle.dumps(self.configs,1)
pdata = base64.encodestring(pdata)
self.file.write('<!-- SCons Data:\n' + pdata + '\n')
pdata = pickle.dumps(self.sources,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '-->\n')
def printFilters(self, hierarchy, name):
sorteditems = sorted(hierarchy.items(), key = lambda a: a[0].lower())
for key, value in sorteditems:
if SCons.Util.is_Dict(value):
filter_name = name + '\\' + key
self.filters_file.write('\t\t<Filter Include="%s">\n'
'\t\t\t<UniqueIdentifier>%s</UniqueIdentifier>\n'
'\t\t</Filter>\n' % (filter_name, _generateGUID(self.dspabs, filter_name)))
self.printFilters(value, filter_name)
def printSources(self, hierarchy, kind, commonprefix, filter_name):
keywords = {'Source Files': 'ClCompile',
'Header Files': 'ClInclude',
'Local Headers': 'ClInclude',
'Resource Files': 'None',
'Other Files': 'None'}
sorteditems = sorted(hierarchy.items(), key = lambda a: a[0].lower())
# First folders, then files
for key, value in sorteditems:
if SCons.Util.is_Dict(value):
self.printSources(value, kind, commonprefix, filter_name + '\\' + key)
for key, value in sorteditems:
if SCons.Util.is_String(value):
file = value
if commonprefix:
file = os.path.join(commonprefix, value)
file = os.path.normpath(file)
self.file.write('\t\t<%s Include="%s" />\n' % (keywords[kind], file))
self.filters_file.write('\t\t<%s Include="%s">\n'
'\t\t\t<Filter>%s</Filter>\n'
'\t\t</%s>\n' % (keywords[kind], file, filter_name, keywords[kind]))
def PrintSourceFiles(self):
categories = {'Source Files': 'cpp;c;cxx;l;y;def;odl;idl;hpj;bat',
'Header Files': 'h;hpp;hxx;hm;inl',
'Local Headers': 'h;hpp;hxx;hm;inl',
'Resource Files': 'r;rc;ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe',
'Other Files': ''}
cats = sorted([k for k in categories.keys() if self.sources[k]],
key = lambda a: a.lower())
# print vcxproj.filters file first
self.filters_file.write('\t<ItemGroup>\n')
for kind in cats:
self.filters_file.write('\t\t<Filter Include="%s">\n'
'\t\t\t<UniqueIdentifier>{7b42d31d-d53c-4868-8b92-ca2bc9fc052f}</UniqueIdentifier>\n'
'\t\t\t<Extensions>%s</Extensions>\n'
'\t\t</Filter>\n' % (kind, categories[kind]))
# First remove any common prefix
sources = self.sources[kind]
commonprefix = None
s = list(map(os.path.normpath, sources))
# take the dirname because the prefix may include parts
# of the filenames (e.g. if you have 'dir\abcd' and
# 'dir\acde' then the cp will be 'dir\a' )
cp = os.path.dirname( os.path.commonprefix(s) )
if cp and s[0][len(cp)] == os.sep:
# +1 because the filename starts after the separator
sources = [s[len(cp)+1:] for s in sources]
commonprefix = cp
hierarchy = makeHierarchy(sources)
self.printFilters(hierarchy, kind)
self.filters_file.write('\t</ItemGroup>\n')
# then print files and filters
for kind in cats:
self.file.write('\t<ItemGroup>\n')
self.filters_file.write('\t<ItemGroup>\n')
# First remove any common prefix
sources = self.sources[kind]
commonprefix = None
s = list(map(os.path.normpath, sources))
# take the dirname because the prefix may include parts
# of the filenames (e.g. if you have 'dir\abcd' and
# 'dir\acde' then the cp will be 'dir\a' )
cp = os.path.dirname( os.path.commonprefix(s) )
if cp and s[0][len(cp)] == os.sep:
# +1 because the filename starts after the separator
sources = [s[len(cp)+1:] for s in sources]
commonprefix = cp
hierarchy = makeHierarchy(sources)
self.printSources(hierarchy, kind, commonprefix, kind)
self.file.write('\t</ItemGroup>\n')
self.filters_file.write('\t</ItemGroup>\n')
# add the SConscript file outside of the groups
self.file.write('\t<ItemGroup>\n'
'\t\t<None Include="%s" />\n'
#'\t\t<None Include="SConstruct" />\n'
'\t</ItemGroup>\n' % str(self.sconscript))
def Parse(self):
print "_GenerateV10DSP.Parse()"
def Build(self):
try:
self.file = open(self.dspabs, 'w')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dspabs + '" for writing:' + str(detail))
else:
self.PrintHeader()
self.PrintProject()
self.file.close()
class _DSWGenerator(object):
""" Base class for DSW generators """
def __init__(self, dswfile, source, env):
self.dswfile = os.path.normpath(str(dswfile))
self.dsw_folder_path = os.path.dirname(os.path.abspath(self.dswfile))
self.env = env
if 'projects' not in env:
raise SCons.Errors.UserError("You must specify a 'projects' argument to create an MSVSSolution.")
projects = env['projects']
if not SCons.Util.is_List(projects):
raise SCons.Errors.InternalError("The 'projects' argument must be a list of nodes.")
projects = SCons.Util.flatten(projects)
if len(projects) < 1:
raise SCons.Errors.UserError("You must specify at least one project to create an MSVSSolution.")
self.dspfiles = list(map(str, projects))
if 'name' in self.env:
self.name = self.env['name']
else:
self.name = os.path.basename(SCons.Util.splitext(self.dswfile)[0])
self.name = self.env.subst(self.name)
def Build(self):
pass
class _GenerateV7DSW(_DSWGenerator):
"""Generates a Solution file for MSVS .NET"""
def __init__(self, dswfile, source, env):
_DSWGenerator.__init__(self, dswfile, source, env)
self.file = None
self.version = self.env['MSVS_VERSION']
self.version_num, self.suite = msvs_parse_version(self.version)
self.versionstr = '7.00'
if self.version_num >= 11.0:
self.versionstr = '12.00'
elif self.version_num >= 10.0:
self.versionstr = '11.00'
elif self.version_num >= 9.0:
self.versionstr = '10.00'
elif self.version_num >= 8.0:
self.versionstr = '9.00'
elif self.version_num >= 7.1:
self.versionstr = '8.00'
if 'slnguid' in env and env['slnguid']:
self.slnguid = env['slnguid']
else:
self.slnguid = _generateGUID(dswfile, self.name)
self.configs = {}
self.nokeep = 0
if 'nokeep' in env and env['variant'] != 0:
self.nokeep = 1
if self.nokeep == 0 and os.path.exists(self.dswfile):
self.Parse()
def AddConfig(self, variant, dswfile=dswfile):
config = Config()
match = re.match('(.*)\|(.*)', variant)
if match:
config.variant = match.group(1)
config.platform = match.group(2)
else:
config.variant = variant
config.platform = 'Win32'
self.configs[variant] = config
print "Adding '" + self.name + ' - ' + config.variant + '|' + config.platform + "' to '" + str(dswfile) + "'"
if 'variant' not in env:
raise SCons.Errors.InternalError("You must specify a 'variant' argument (i.e. 'Debug' or " +\
"'Release') to create an MSVS Solution File.")
elif SCons.Util.is_String(env['variant']):
AddConfig(self, env['variant'])
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
AddConfig(self, variant)
self.platforms = []
for key in self.configs.keys():
platform = self.configs[key].platform
if not platform in self.platforms:
self.platforms.append(platform)
def GenerateProjectFilesInfo(self):
for dspfile in self.dspfiles:
dsp_folder_path, name = os.path.split(dspfile)
dsp_folder_path = os.path.abspath(dsp_folder_path)
dsp_relative_folder_path = os.path.relpath(dsp_folder_path, self.dsw_folder_path)
if dsp_relative_folder_path == os.curdir:
dsp_relative_file_path = name
else:
dsp_relative_file_path = os.path.join(dsp_relative_folder_path, name)
dspfile_info = {'NAME': name,
'GUID': _generateGUID(dspfile, ''),
'FOLDER_PATH': dsp_folder_path,
'FILE_PATH': dspfile,
'SLN_RELATIVE_FOLDER_PATH': dsp_relative_folder_path,
'SLN_RELATIVE_FILE_PATH': dsp_relative_file_path}
self.dspfiles_info.append(dspfile_info)
self.dspfiles_info = []
GenerateProjectFilesInfo(self)
def Parse(self):
try:
dswfile = open(self.dswfile,'r')
except IOError:
return # doesn't exist yet, so can't add anything to configs.
line = dswfile.readline()
while line:
if line[:9] == "EndGlobal":
break
line = dswfile.readline()
line = dswfile.readline()
datas = line
while line:
line = dswfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.configs.update(data)
def PrintSolution(self):
"""Writes a solution file"""
self.file.write('Microsoft Visual Studio Solution File, Format Version %s\n' % self.versionstr)
if self.version_num >= 11.0:
self.file.write('# Visual Studio 11\n')
elif self.version_num >= 10.0:
self.file.write('# Visual Studio 2010\n')
elif self.version_num >= 9.0:
self.file.write('# Visual Studio 2008\n')
elif self.version_num >= 8.0:
self.file.write('# Visual Studio 2005\n')
for dspinfo in self.dspfiles_info:
name = dspinfo['NAME']
base, suffix = SCons.Util.splitext(name)
if suffix == '.vcproj':
name = base
self.file.write('Project("%s") = "%s", "%s", "%s"\n'
% (external_makefile_guid, name, dspinfo['SLN_RELATIVE_FILE_PATH'], dspinfo['GUID']))
if self.version_num >= 7.1 and self.version_num < 8.0:
self.file.write('\tProjectSection(ProjectDependencies) = postProject\n'
'\tEndProjectSection\n')
self.file.write('EndProject\n')
self.file.write('Global\n')
env = self.env
if 'MSVS_SCC_PROVIDER' in env:
scc_number_of_projects = len(self.dspfiles) + 1
slnguid = self.slnguid
scc_provider = env.get('MSVS_SCC_PROVIDER', '').replace(' ', r'\u0020')
scc_project_name = env.get('MSVS_SCC_PROJECT_NAME', '').replace(' ', r'\u0020')
scc_connection_root = env.get('MSVS_SCC_CONNECTION_ROOT', os.curdir)
scc_local_path = os.path.relpath(scc_connection_root, self.dsw_folder_path).replace('\\', '\\\\')
self.file.write('\tGlobalSection(SourceCodeControl) = preSolution\n'
'\t\tSccNumberOfProjects = %(scc_number_of_projects)d\n'
'\t\tSccProjectName0 = %(scc_project_name)s\n'
'\t\tSccLocalPath0 = %(scc_local_path)s\n'
'\t\tSccProvider0 = %(scc_provider)s\n'
'\t\tCanCheckoutShared = true\n' % locals())
sln_relative_path_from_scc = os.path.relpath(self.dsw_folder_path, scc_connection_root)
if sln_relative_path_from_scc != os.curdir:
self.file.write('\t\tSccProjectFilePathRelativizedFromConnection0 = %s\\\\\n'
% sln_relative_path_from_scc.replace('\\', '\\\\'))
if self.version_num < 8.0:
# When present, SolutionUniqueID is automatically removed by VS 2005
# TODO: check for Visual Studio versions newer than 2005
self.file.write('\t\tSolutionUniqueID = %s\n' % slnguid)
for dspinfo in self.dspfiles_info:
i = self.dspfiles_info.index(dspinfo) + 1
dsp_relative_file_path = dspinfo['SLN_RELATIVE_FILE_PATH'].replace('\\', '\\\\')
dsp_scc_relative_folder_path = os.path.relpath(dspinfo['FOLDER_PATH'], scc_connection_root).replace('\\', '\\\\')
self.file.write('\t\tSccProjectUniqueName%(i)s = %(dsp_relative_file_path)s\n'
'\t\tSccLocalPath%(i)d = %(scc_local_path)s\n'
'\t\tCanCheckoutShared = true\n'
'\t\tSccProjectFilePathRelativizedFromConnection%(i)s = %(dsp_scc_relative_folder_path)s\\\\\n'
% locals())
self.file.write('\tEndGlobalSection\n')
if self.version_num >= 8.0:
self.file.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\n')
else:
self.file.write('\tGlobalSection(SolutionConfiguration) = preSolution\n')
confkeys = sorted(self.configs.keys())
cnt = 0
for name in confkeys:
variant = self.configs[name].variant
platform = self.configs[name].platform
if self.version_num >= 8.0:
self.file.write('\t\t%s|%s = %s|%s\n' % (variant, platform, variant, platform))
else:
self.file.write('\t\tConfigName.%d = %s\n' % (cnt, variant))
cnt = cnt + 1
self.file.write('\tEndGlobalSection\n')
if self.version_num <= 7.1:
self.file.write('\tGlobalSection(ProjectDependencies) = postSolution\n'
'\tEndGlobalSection\n')
if self.version_num >= 8.0:
self.file.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\n')
else:
self.file.write('\tGlobalSection(ProjectConfiguration) = postSolution\n')
for name in confkeys:
variant = self.configs[name].variant
platform = self.configs[name].platform
if self.version_num >= 8.0:
for dspinfo in self.dspfiles_info:
guid = dspinfo['GUID']
self.file.write('\t\t%s.%s|%s.ActiveCfg = %s|%s\n'
'\t\t%s.%s|%s.Build.0 = %s|%s\n' % (guid,variant,platform,variant,platform,guid,variant,platform,variant,platform))
else:
for dspinfo in self.dspfiles_info:
guid = dspinfo['GUID']
self.file.write('\t\t%s.%s.ActiveCfg = %s|%s\n'
'\t\t%s.%s.Build.0 = %s|%s\n' %(guid,variant,variant,platform,guid,variant,variant,platform))
self.file.write('\tEndGlobalSection\n')
if self.version_num >= 8.0:
self.file.write('\tGlobalSection(SolutionProperties) = preSolution\n'
'\t\tHideSolutionNode = FALSE\n'
'\tEndGlobalSection\n')
else:
self.file.write('\tGlobalSection(ExtensibilityGlobals) = postSolution\n'
'\tEndGlobalSection\n'
'\tGlobalSection(ExtensibilityAddIns) = postSolution\n'
'\tEndGlobalSection\n')
self.file.write('EndGlobal\n')
if self.nokeep == 0:
pdata = pickle.dumps(self.configs,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '\n')
def Build(self):
try:
self.file = open(self.dswfile,'w')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dswfile + '" for writing:' + str(detail))
else:
self.PrintSolution()
self.file.close()
V6DSWHeader = """\
Microsoft Developer Studio Workspace File, Format Version 6.00
# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE!
###############################################################################
Project: "%(name)s"="%(dspfile)s" - Package Owner=<4>
Package=<5>
{{{
}}}
Package=<4>
{{{
}}}
###############################################################################
Global:
Package=<5>
{{{
}}}
Package=<3>
{{{
}}}
###############################################################################
"""
class _GenerateV6DSW(_DSWGenerator):
"""Generates a Workspace file for MSVS 6.0"""
def PrintWorkspace(self):
""" writes a DSW file """
name = self.name
dspfile = os.path.relpath(self.dspfiles[0], self.dsw_folder_path)
self.file.write(V6DSWHeader % locals())
def Build(self):
try:
self.file = open(self.dswfile,'w')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dswfile + '" for writing:' + str(detail))
else:
self.PrintWorkspace()
self.file.close()
def GenerateDSP(dspfile, source, env):
"""Generates a Project file based on the version of MSVS that is being used"""
version_num = 6.0
if 'MSVS_VERSION' in env:
version_num, suite = msvs_parse_version(env['MSVS_VERSION'])
if version_num >= 10.0:
g = _GenerateV10DSP(dspfile, source, env)
g.Build()
elif version_num >= 7.0:
g = _GenerateV7DSP(dspfile, source, env)
g.Build()
else:
g = _GenerateV6DSP(dspfile, source, env)
g.Build()
def GenerateDSW(dswfile, source, env):
"""Generates a Solution/Workspace file based on the version of MSVS that is being used"""
version_num = 6.0
if 'MSVS_VERSION' in env:
version_num, suite = msvs_parse_version(env['MSVS_VERSION'])
if version_num >= 7.0:
g = _GenerateV7DSW(dswfile, source, env)
g.Build()
else:
g = _GenerateV6DSW(dswfile, source, env)
g.Build()
##############################################################################
# Above here are the classes and functions for generation of
# DSP/DSW/SLN/VCPROJ files.
##############################################################################
def GetMSVSProjectSuffix(target, source, env, for_signature):
return env['MSVS']['PROJECTSUFFIX']
def GetMSVSSolutionSuffix(target, source, env, for_signature):
return env['MSVS']['SOLUTIONSUFFIX']
def GenerateProject(target, source, env):
# generate the dsp file, according to the version of MSVS.
builddspfile = target[0]
dspfile = builddspfile.srcnode()
# this detects whether or not we're using a VariantDir
if not dspfile is builddspfile:
try:
bdsp = open(str(builddspfile), "w+")
except IOError, detail:
print 'Unable to open "' + str(dspfile) + '" for writing:',detail,'\n'
raise
bdsp.write("This is just a placeholder file.\nThe real project file is here:\n%s\n" % dspfile.get_abspath())
GenerateDSP(dspfile, source, env)
if env.get('auto_build_solution', 1):
builddswfile = target[1]
dswfile = builddswfile.srcnode()
if not dswfile is builddswfile:
try:
bdsw = open(str(builddswfile), "w+")
except IOError, detail:
print 'Unable to open "' + str(dspfile) + '" for writing:',detail,'\n'
raise
bdsw.write("This is just a placeholder file.\nThe real workspace file is here:\n%s\n" % dswfile.get_abspath())
GenerateDSW(dswfile, source, env)
def GenerateSolution(target, source, env):
GenerateDSW(target[0], source, env)
def projectEmitter(target, source, env):
"""Sets up the DSP dependencies."""
# todo: Not sure what sets source to what user has passed as target,
# but this is what happens. When that is fixed, we also won't have
# to make the user always append env['MSVSPROJECTSUFFIX'] to target.
if source[0] == target[0]:
source = []
# make sure the suffix is correct for the version of MSVS we're running.
(base, suff) = SCons.Util.splitext(str(target[0]))
suff = env.subst('$MSVSPROJECTSUFFIX')
target[0] = base + suff
if not source:
source = 'prj_inputs:'
source = source + env.subst('$MSVSSCONSCOM', 1)
source = source + env.subst('$MSVSENCODING', 1)
# Project file depends on CPPDEFINES and CPPPATH
preprocdefs = xmlify(';'.join(processDefines(env.get('CPPDEFINES', []))))
includepath_Dirs = processIncludes(env.get('CPPPATH', []), env, None, None)
includepath = xmlify(';'.join([str(x) for x in includepath_Dirs]))
source = source + "; ppdefs:%s incpath:%s"%(preprocdefs, includepath)
if 'buildtarget' in env and env['buildtarget'] != None:
if SCons.Util.is_String(env['buildtarget']):
source = source + ' "%s"' % env['buildtarget']
elif SCons.Util.is_List(env['buildtarget']):
for bt in env['buildtarget']:
if SCons.Util.is_String(bt):
source = source + ' "%s"' % bt
else:
try: source = source + ' "%s"' % bt.get_abspath()
except AttributeError: raise SCons.Errors.InternalError("buildtarget can be a string, a node, a list of strings or nodes, or None")
else:
try: source = source + ' "%s"' % env['buildtarget'].get_abspath()
except AttributeError: raise SCons.Errors.InternalError("buildtarget can be a string, a node, a list of strings or nodes, or None")
if 'outdir' in env and env['outdir'] != None:
if SCons.Util.is_String(env['outdir']):
source = source + ' "%s"' % env['outdir']
elif SCons.Util.is_List(env['outdir']):
for s in env['outdir']:
if SCons.Util.is_String(s):
source = source + ' "%s"' % s
else:
try: source = source + ' "%s"' % s.get_abspath()
except AttributeError: raise SCons.Errors.InternalError("outdir can be a string, a node, a list of strings or nodes, or None")
else:
try: source = source + ' "%s"' % env['outdir'].get_abspath()
except AttributeError: raise SCons.Errors.InternalError("outdir can be a string, a node, a list of strings or nodes, or None")
if 'name' in env:
if SCons.Util.is_String(env['name']):
source = source + ' "%s"' % env['name']
else:
raise SCons.Errors.InternalError("name must be a string")
if 'variant' in env:
if SCons.Util.is_String(env['variant']):
source = source + ' "%s"' % env['variant']
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
if SCons.Util.is_String(variant):
source = source + ' "%s"' % variant
else:
raise SCons.Errors.InternalError("name must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be specified")
for s in _DSPGenerator.srcargs:
if s in env:
if SCons.Util.is_String(env[s]):
source = source + ' "%s' % env[s]
elif SCons.Util.is_List(env[s]):
for t in env[s]:
if SCons.Util.is_String(t):
source = source + ' "%s"' % t
else:
raise SCons.Errors.InternalError(s + " must be a string or a list of strings")
else:
raise SCons.Errors.InternalError(s + " must be a string or a list of strings")
source = source + ' "%s"' % str(target[0])
source = [SCons.Node.Python.Value(source)]
targetlist = [target[0]]
sourcelist = source
if env.get('auto_build_solution', 1):
env['projects'] = [env.File(t).srcnode() for t in targetlist]
t, s = solutionEmitter(target, target, env)
targetlist = targetlist + t
# Beginning with Visual Studio 2010 for each project file (.vcxproj) we have additional file (.vcxproj.filters)
if float(env['MSVS_VERSION']) >= 10.0:
targetlist.append(targetlist[0] + '.filters')
return (targetlist, sourcelist)
def solutionEmitter(target, source, env):
"""Sets up the DSW dependencies."""
# todo: Not sure what sets source to what user has passed as target,
# but this is what happens. When that is fixed, we also won't have
# to make the user always append env['MSVSSOLUTIONSUFFIX'] to target.
if source[0] == target[0]:
source = []
# make sure the suffix is correct for the version of MSVS we're running.
(base, suff) = SCons.Util.splitext(str(target[0]))
suff = env.subst('$MSVSSOLUTIONSUFFIX')
target[0] = base + suff
if not source:
source = 'sln_inputs:'
if 'name' in env:
if SCons.Util.is_String(env['name']):
source = source + ' "%s"' % env['name']
else:
raise SCons.Errors.InternalError("name must be a string")
if 'variant' in env:
if SCons.Util.is_String(env['variant']):
source = source + ' "%s"' % env['variant']
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
if SCons.Util.is_String(variant):
source = source + ' "%s"' % variant
else:
raise SCons.Errors.InternalError("name must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be specified")
if 'slnguid' in env:
if SCons.Util.is_String(env['slnguid']):
source = source + ' "%s"' % env['slnguid']
else:
raise SCons.Errors.InternalError("slnguid must be a string")
if 'projects' in env:
if SCons.Util.is_String(env['projects']):
source = source + ' "%s"' % env['projects']
elif SCons.Util.is_List(env['projects']):
for t in env['projects']:
if SCons.Util.is_String(t):
source = source + ' "%s"' % t
source = source + ' "%s"' % str(target[0])
source = [SCons.Node.Python.Value(source)]
return ([target[0]], source)
projectAction = SCons.Action.Action(GenerateProject, None)
solutionAction = SCons.Action.Action(GenerateSolution, None)
projectBuilder = SCons.Builder.Builder(action = '$MSVSPROJECTCOM',
suffix = '$MSVSPROJECTSUFFIX',
emitter = projectEmitter)
solutionBuilder = SCons.Builder.Builder(action = '$MSVSSOLUTIONCOM',
suffix = '$MSVSSOLUTIONSUFFIX',
emitter = solutionEmitter)
default_MSVS_SConscript = None
def generate(env):
"""Add Builders and construction variables for Microsoft Visual
Studio project files to an Environment."""
try:
env['BUILDERS']['MSVSProject']
except KeyError:
env['BUILDERS']['MSVSProject'] = projectBuilder
try:
env['BUILDERS']['MSVSSolution']
except KeyError:
env['BUILDERS']['MSVSSolution'] = solutionBuilder
env['MSVSPROJECTCOM'] = projectAction
env['MSVSSOLUTIONCOM'] = solutionAction
if SCons.Script.call_stack:
# XXX Need to find a way to abstract this; the build engine
# shouldn't depend on anything in SCons.Script.
env['MSVSSCONSCRIPT'] = SCons.Script.call_stack[0].sconscript
else:
global default_MSVS_SConscript
if default_MSVS_SConscript is None:
default_MSVS_SConscript = env.File('SConstruct')
env['MSVSSCONSCRIPT'] = default_MSVS_SConscript
env['MSVSSCONS'] = '"%s" -c "%s"' % (python_executable, getExecScriptMain(env))
env['MSVSSCONSFLAGS'] = '-C "${MSVSSCONSCRIPT.dir.abspath}" -f ${MSVSSCONSCRIPT.name}'
env['MSVSSCONSCOM'] = '$MSVSSCONS $MSVSSCONSFLAGS'
env['MSVSBUILDCOM'] = '$MSVSSCONSCOM "$MSVSBUILDTARGET"'
env['MSVSREBUILDCOM'] = '$MSVSSCONSCOM "$MSVSBUILDTARGET"'
env['MSVSCLEANCOM'] = '$MSVSSCONSCOM -c "$MSVSBUILDTARGET"'
# Set-up ms tools paths for default version
msvc_setup_env_once(env)
if 'MSVS_VERSION' in env:
version_num, suite = msvs_parse_version(env['MSVS_VERSION'])
else:
(version_num, suite) = (7.0, None) # guess at a default
if 'MSVS' not in env:
env['MSVS'] = {}
if (version_num < 7.0):
env['MSVS']['PROJECTSUFFIX'] = '.dsp'
env['MSVS']['SOLUTIONSUFFIX'] = '.dsw'
elif (version_num < 10.0):
env['MSVS']['PROJECTSUFFIX'] = '.vcproj'
env['MSVS']['SOLUTIONSUFFIX'] = '.sln'
else:
env['MSVS']['PROJECTSUFFIX'] = '.vcxproj'
env['MSVS']['SOLUTIONSUFFIX'] = '.sln'
if (version_num >= 10.0):
env['MSVSENCODING'] = 'utf-8'
else:
env['MSVSENCODING'] = 'Windows-1252'
env['GET_MSVSPROJECTSUFFIX'] = GetMSVSProjectSuffix
env['GET_MSVSSOLUTIONSUFFIX'] = GetMSVSSolutionSuffix
env['MSVSPROJECTSUFFIX'] = '${GET_MSVSPROJECTSUFFIX}'
env['MSVSSOLUTIONSUFFIX'] = '${GET_MSVSSOLUTIONSUFFIX}'
env['SCONS_HOME'] = os.environ.get('SCONS_HOME')
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/IMMP-0.12.1.tar.gz/IMMP-0.12.1/immp/hook/identity.py | from asyncio import gather
from collections import defaultdict
import logging
import immp
from immp.hook.command import command, CommandParser
CROSS = "\N{CROSS MARK}"
TICK = "\N{WHITE HEAVY CHECK MARK}"
log = logging.getLogger(__name__)
@immp.pretty_str
class Identity:
"""
Basic representation of an external identity.
Attributes:
name (str):
Common name used across any linked platforms.
provider (.IdentityProvider):
Service hook where the identity information was acquired from.
links (.User list):
Physical platform users assigned to this identity.
roles (str list):
Optional set of role names, if applicable to the backend.
profile (str):
URL to the identity profile page.
"""
@classmethod
async def gather(cls, *tasks):
"""
Helper for fetching users from plugs, filtering out calls with no matches::
>>> await Identity.gather(plug1.user_from_id(id1), plug2.user_from_id(id2))
[<Plug1User: '123' 'User'>]
Args:
tasks (coroutine list):
Non-awaited coroutines or tasks.
Returns:
.User list:
Gathered results of those tasks.
"""
tasks = list(filter(None, tasks))
if not tasks:
return []
users = []
for result in await gather(*tasks, return_exceptions=True):
if isinstance(result, BaseException):
log.warning("Failed to retrieve user for identity", exc_info=result)
elif result:
users.append(result)
return users
def __init__(self, name, provider=None, links=(), roles=(), profile=None):
self.name = name
self.provider = provider
self.links = links
self.roles = roles
self.profile = profile
def __eq__(self, other):
return (isinstance(other, Identity) and
(self.name, self.provider) == (other.name, other.provider))
def __hash__(self):
return hash((self.name, self.provider))
def __repr__(self):
return "<{}: {} x{}{}>".format(self.__class__.__name__, repr(self.name), len(self.links),
" ({})".format(" ".join(self.roles)) if self.roles else "")
class IdentityProvider:
"""
Interface for hooks to provide identity information from a backing source.
Attributes:
provider_name (str):
Readable name of the underlying service, used when displaying info about this provider.
"""
provider_name = None
async def identity_from_name(self, name):
"""
Look up an identity by the external provider's username for them.
Args:
name (str):
External name to query.
Returns:
.Identity:
Matching identity from the provider, or ``None`` if not found.
"""
raise NotImplementedError
async def identity_from_user(self, user):
"""
Look up an identity by a linked network user.
Args:
user (.User):
Plug user referenced by the identity.
Returns:
.Identity:
Matching identity from the provider, or ``None`` if not found.
"""
raise NotImplementedError
class WhoIsHook(immp.Hook):
"""
Hook to provide generic lookup of user profiles across one or more identity providers.
"""
schema = immp.Schema({"identities": [str],
immp.Optional("public", False): bool})
_identities = immp.ConfigProperty([IdentityProvider])
async def _query_all(self, query, providers=None):
getter = "identity_from_{}".format("user" if isinstance(query, immp.User) else "name")
providers = providers or self._identities
tasks = (getattr(provider, getter)(query) for provider in providers)
identities = []
for provider, result in zip(providers, await gather(*tasks, return_exceptions=True)):
if isinstance(result, Identity):
identities.append(result)
elif isinstance(result, Exception):
log.warning("Failed to retrieve identity from %r (%r)",
provider.name, provider.provider_name, exc_info=result)
return identities
@command("who", parser=CommandParser.none)
async def who(self, msg, name):
"""
Recall a known identity and all of its links.
"""
if self.config["public"]:
providers = self._identities
else:
providers = [ident.provider for ident in await self._query_all(msg.user)]
if providers:
identities = await self._query_all(name[0].mention or str(name), providers)
if identities:
identities.sort(key=lambda ident: ident.provider.provider_name)
links = defaultdict(list)
roles = []
for ident in identities:
for link in ident.links:
links[link].append(ident)
if ident.roles:
roles.append(ident)
text = name.clone()
text.prepend(immp.Segment("Info for "))
for segment in text:
segment.bold = True
text.append(immp.Segment("\nMatching providers:"))
for i, ident in enumerate(identities):
text.append(immp.Segment("\n{}.\t".format(i + 1)),
immp.Segment(ident.provider.provider_name, link=ident.profile))
if links:
text.append(immp.Segment("\nIdentity links:"))
for user in sorted(links, key=lambda user: user.plug.network_name):
text.append(immp.Segment("\n({}) ".format(user.plug.network_name)))
if user.link:
text.append(immp.Segment(user.real_name or user.username,
link=user.link))
elif user.real_name and user.username:
text.append(immp.Segment("{} [{}]".format(user.real_name,
user.username)))
else:
text.append(immp.Segment(user.real_name or user.username))
known = links[user]
if known != identities:
indexes = [identities.index(ident) + 1 for ident in known]
text.append(immp.Segment(" {}".format(indexes)))
if roles:
text.append(immp.Segment("\nRoles:"))
for ident in roles:
text.append(immp.Segment("\n({}) {}".format(ident.provider.provider_name,
", ".join(ident.roles))))
else:
text = "{} Name not in use".format(CROSS)
else:
text = "{} Not identified".format(CROSS)
await msg.channel.send(immp.Message(text=text)) | PypiClean |
/Ancestration-0.1.0.tar.gz/Ancestration-0.1.0/ancestration/_errors.py | from __future__ import unicode_literals
from ancestration import FamilyInheritanceError
invalid_adoption_object = lambda obj: FamilyInheritanceError(
'Invalid object to adopt into family, only classes and functions are allowed: {}'.format(obj))
multiple_family_declarations = lambda: FamilyInheritanceError(
'A module may be made a family module only once.')
invalid_family_extends = lambda extends: FamilyInheritanceError(
'Only family modules can be family-extented, but is {}.'.format(type(extends)))
outside_family = lambda: FamilyInheritanceError(
'A family class may only be defined in a family module.')
multiple_family_bases = lambda: FamilyInheritanceError(
'A family class may not extend more than one family class.')
different_family_base = lambda: FamilyInheritanceError(
'The redefined family class has a different family base class than the original.')
no_super_family = lambda: FamilyInheritanceError(
'"FAMILY_INHERIT" was given but there is no super family.')
no_super_family_base = lambda cls_name: FamilyInheritanceError(
'"FAMILY_INHERIT" contains "{}", but no equally named class was found in the super family.'.format(cls_name))
missing_attribute = lambda attr_name: FamilyInheritanceError(
'The "FAMILY_INHERIT" attribute "{}" does not exist in super family base.'.format(attr_name))
def adoption_import_error(family_module, module, import_error):
error_module = import_error.__class__.__module__
if error_module is None or error_module == '__builtin__':
error_module = ''
error_qualified_name = error_module + '.' + import_error.__class__.__name__
message = 'Could not adopt the module "{}" into family "{}", because an exception of type "{}" was raised'.format(
module, family_module.__name__, error_qualified_name)
error_message = '{}'.format(import_error)
if len(error_message) == 0:
message += '.'
else:
message += ' with message: {}'.format(error_message)
return FamilyInheritanceError(message) | PypiClean |
/DLDummyGen-0.0.2.tar.gz/DLDummyGen-0.0.2/README.md |
# 딥러닝용 더미 데이터 생성 자동화
- Deep-Learning Dummy Data File Generator by csv File
---
## Overview
외부 노출에 민감한 자료가 포함된 데이터는 개인 정보 보호법에 의하여 망분리 PC (인터넷이 차단된 PC) 에서 관리된다
대부분의 망분리 PC 는 터미널의 역할만 하고 성능이 떨어지는 미니 PC 를 사용하게 된다
딥러닝을 효율적으로 수행하기 위해서는 성능 좋은 PC 에서 실제 데이터를 사용하는 것이나 현실적으로 그렇지 못한 상황일 경우 더미 데이터를 사용해야 한다
---
## 망 분리 PC 의 한계점에 따른 더미 데이터 사용

---
## 실제 데이터에서 더미 데이터를 만드는 과정
- 망분리 PC 에서 데이터 특성을 파악한다
- 더미 데이터를 만들기 위해서 실제 데이터의 특성을 발췌한다
- 실제 데이터의 스키마 구조를 발췌한다.<br/>
(이때 망분리 PC 는 클립보드 복사나 파일 복사가 어렵기 때문에 수기로 작성하게 된다)
- 실제 데이터에서 각 필드별 최대값, 최소 값 등도 발췌한다
- 날짜형의 경우 범위를 확인하여 발췌해야 한다
- 코드 값 같은 문자열 상수는 발췌하기 어렵다
- 발췌한 특성 정보를 바탕으로 더미 데이터를 생성하기 위한 코드를 작성한다
- 각 필드별 특성을 작성한다
- 수치, 문자열 (코드형 / 랜덤 문자열), 날짜형의 랜덤 생성 코드를 작성한다
---
## 더미 데이터 생성 자동화 소개

---
## Usage
- Install the prerequisites DLDummyGen
```
> pip install DLDummyGen
```
- Sample Code
```python
from DLDummyGen.DLDummyGen import DLLogger, DLDummyFieldHandler, DLDummyGenerator
if __name__ == '__main__':
# Original csv File (Real Data)
CSV_FILE_NAME = "pima-indians-diabetes.csv"
# Maximum length of data to be generated
GEN_ROW_MAX = 10
# Length of Unique String Field (eg, Code Value) Judgment criteria
UNIQUE_FIELD_COUNT = 1000
# Create Logging Object
logger = DLLogger()
dg = DLDummyGenerator(CSV_FILE_NAME, GEN_ROW_MAX, UNIQUE_FIELD_COUNT, logger=logger)
# Run to Generate python source code
dg.gen_src_from_csv()
```
- With Custom Field Callback Handler Code
```python
from DLDummyGen.DLDummyGen import DLLogger, DLDummyFieldHandler, DLDummyGenerator
class DLDummyFieldAutoIncrement(DLDummyFieldHandler):
"""
Auto Increment ID - Custom Field Callback Handler
"""
def on_custom_field(self, dg, fgen, column, dataset):
fgen.write('gen_df[\"' + column + '\"] = ')
fgen.write('[\'ID{:05d}\'.format(idx+1) for idx in range(GEN_ROW_MAX)]\n\n')
class DLDummyFieldChoiceString(DLDummyFieldHandler):
"""
Choice String - Custom Field Callback Handler
"""
def on_custom_field(self, dg, fgen, column, dataset):
fgen.write('gen_df[\"' + column + '\"] = ')
fgen.write('choice([\"' + '\", \"'.join(['Y', 'N']) + '\"], GEN_ROW_MAX)\n\n')
...
if __name__ == '__main__':
# Original csv File (Real Data)
CSV_FILE_NAME = "pima-indians-diabetes.csv"
# Maximum length of data to be generated
GEN_ROW_MAX = 10
# Length of Unique String Field (eg, Code Value) Judgment criteria
UNIQUE_FIELD_COUNT = 1000
# Create Logging Object
logger = DLLogger()
dg = DLDummyGenerator(CSV_FILE_NAME, GEN_ROW_MAX, UNIQUE_FIELD_COUNT, logger=logger)
# Definition to generate random date/time
# [[Field Name, Start Date, End Date, Input Date Format, Output Date Format]]
DATE_FIELDS = [
[' Glucose', '2019-01', '2019-12', '%Y-%m', '%Y%m']
]
dg.set_date_fields(DATE_FIELDS)
# Definition to custom field handler
# [[Field Name, DLDummyFieldHandler class implement instance]]
CUSTOM_FIELDS = [
['Pregnancies', DLDummyFieldAutoIncrement()]
, [' Outcome', DLDummyFieldChoiceString()]
]
dg.set_custom_fields(CUSTOM_FIELDS)
# Run to Generate python source code
dg.gen_src_from_csv()
```
---
## Generated Python Source Code
- Install the prerequisites numpy, pandas and faker (Python 3.7)
```
> pip install numpy
> pip install pandas
> pip install faker
```
- Generated Python Source Code
```python
import pandas as pd
import numpy as np
from numpy import random
from datetime import datetime
...
gen_df = pd.DataFrame()
# Pregnancies
gen_df["Pregnancies"] = ['ID{:05d}'.format(idx+1) for idx in range(GEN_ROW_MAX)]
# Glucose
gen_df[" Glucose"] = [fake.date_between(
start_date=datetime.strptime('2019-01', '%Y-%m')
, end_date=datetime.strptime('2019-12', '%Y-%m')).strftime('%Y%m')
for _ in range(GEN_ROW_MAX)]
...
# Age
gen_df[" Age"] = random.randint(21, 81 + 1, GEN_ROW_MAX, dtype="int64")
# Outcome
gen_df[" Outcome"] = choice(["Y", "N"], GEN_ROW_MAX)
gen_df.to_csv('gen_pima-indians-diabetes.csv', index=False)
print('\ngen_pima-indians-diabetes.csv File Created...\n')
```
---
## Appendix
- [Numpy](https://numpy.org/doc/stable/) : NumPy is the fundamental package for scientific computing in Python
- [Pandas](https://pandas.pydata.org/docs) : pandas is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language.
- [Faker](https://github.com/joke2k/faker) : Python package that generates fake data for you
| PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/dispatch/dispatcher.py | import logging
import threading
import weakref
from django.utils.inspect import func_accepts_kwargs
logger = logging.getLogger("django.dispatch")
def _make_id(target):
if hasattr(target, "__func__"):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal:
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, use_caching=False):
"""
Create a new signal.
"""
self.receivers = []
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
a Python object, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
if not callable(receiver):
raise TypeError("Signal receivers must be callable.")
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError(
"Signal receivers must accept keyword arguments (**kwargs)."
)
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, "__self__") and hasattr(receiver, "__func__"):
ref = weakref.WeakMethod
receiver_object = receiver.__self__
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
if not any(r_key == lookup_key for r_key, _ in self.receivers):
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be removed from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Return a list of tuple pairs [(receiver, response), ... ].
"""
if (
not self.receivers
or self.sender_receivers_cache.get(sender) is NO_RECEIVERS
):
return []
return [
(receiver, receiver(signal=self, sender=sender, **named))
for receiver in self._live_receivers(sender)
]
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any Python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers.
Return a list of tuple pairs [(receiver, response), ... ].
If any receiver raises an error (specifically any subclass of
Exception), return the error instance as the result for that receiver.
"""
if (
not self.receivers
or self.sender_receivers_cache.get(sender) is NO_RECEIVERS
):
return []
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
logger.error(
"Error calling %s in Signal.send_robust() (%s)",
receiver.__qualname__,
err,
exc_info=err,
)
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
self.receivers = [
r
for r in self.receivers
if not (isinstance(r[1], weakref.ReferenceType) and r[1]() is None)
]
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator | PypiClean |
/OctoPrint-Nanny-0.16.0.tar.gz/OctoPrint-Nanny-0.16.0/octoprint_nanny/utils/printnanny_os.py | import os
from typing import Optional, Any, Dict, List, TypedDict
import logging
import json
import subprocess
import printnanny_api_client
from printnanny_api_client.models import Pi
logger = logging.getLogger("octoprint.plugins.octoprint_nanny.utils")
PRINTNANNY_BIN = os.environ.get("PRINTNANNY_BIN", "/usr/bin/printnanny")
PRINTNANNY_DEBUG = os.environ.get("PRINTNANNY_DEBUG", False)
PRINTNANNY_DEBUG = PRINTNANNY_DEBUG in ["True", "true", "1", "yes"]
PRINTNANNY_CLOUD_PI: Optional[Pi] = None
PRINTNANNY_CLOUD_NATS_CREDS: Optional[str] = None
class PrintNannyApiConfig(TypedDict):
base_path: str
bearer_access_token: Optional[str]
PRINTNANNY_CLOUD_API: Optional[PrintNannyApiConfig] = None
class PrintNannyConfig(TypedDict):
cmd: List[str]
stdout: str
stderr: str
returncode: Optional[int]
config: Optional[Dict[str, Any]]
async def deserialize_pi(pi_dict) -> Pi:
async with printnanny_api_client.api_client.ApiClient() as client:
return client._ApiClient__deserialize(pi_dict, Pi) # type: ignore
async def load_pi_model(pi_dict: Dict[str, Any]) -> Pi:
result = await deserialize_pi(pi_dict)
global PRINTNANNY_CLOUD_PI
PRINTNANNY_CLOUD_PI = result
return PRINTNANNY_CLOUD_PI
def load_api_config(api_config_dict: Dict[str, str]) -> PrintNannyApiConfig:
global PRINTNANNY_CLOUD_API
PRINTNANNY_CLOUD_API = PrintNannyApiConfig(
base_path=api_config_dict.get("api_base_path", "https://printnanny.ai/"),
bearer_access_token=api_config_dict.get("api_bearer_access_token"),
)
return PRINTNANNY_CLOUD_API
def sync_printnanny_cloud_data():
logger.info("Attempting to sync PrintNanny Cloud data...")
cmd = [PRINTNANNY_BIN, "cloud", "sync-models"]
try:
p = subprocess.run(cmd, capture_output=True)
stdout = p.stdout.decode("utf-8")
stderr = p.stderr.decode("utf-8")
if p.returncode != 0:
logger.error(
f"Failed to get printnanny settings cmd={cmd} returncode={p.returncode} stdout={stdout} stderr={stderr}"
)
return
except Exception as e:
logger.error("Error running cmd %s %s", cmd, e)
async def load_printnanny_cloud_data():
cmd = [PRINTNANNY_BIN, "cloud", "show"]
# run /usr/bin/printnanny cloud show --format json
try:
p = subprocess.run(cmd, capture_output=True)
stdout = p.stdout.decode("utf-8")
stderr = p.stderr.decode("utf-8")
if p.returncode != 0:
logger.error(
f"Failed to get printnanny settings cmd={cmd} returncode={p.returncode} stdout={stdout} stderr={stderr}"
)
return
cloud_data = json.loads(stdout)
# try setting global PRINTNANNY_CLOUD_PI var
result = await load_pi_model(cloud_data)
logger.debug("Loaded PrintNanny Cloud pi data %s", result)
return result
except Exception as e:
logger.error("Error running cmd %s %s", cmd, e)
def load_printnanny_settings() -> PrintNannyConfig:
cmd = [PRINTNANNY_BIN, "settings", "show", "--format", "json"]
returncode = None
config = None
# run /usr/bin/printnanny settings show -F json
try:
p = subprocess.run(cmd, capture_output=True)
stdout = p.stdout.decode("utf-8")
stderr = p.stderr.decode("utf-8")
returncode = p.returncode
if p.returncode != 0:
logger.error(
f"Failed to get printnanny settings cmd={cmd} returncode={p.returncode} stdout={stdout} stderr={stderr}"
)
return PrintNannyConfig(
cmd=cmd,
stdout=stdout,
stderr=stderr,
returncode=returncode,
config=config,
)
# FileNotFoundError thrown when PRINTNANNY_BIN is not found
except FileNotFoundError as e:
logger.warning("%s is not installed", PRINTNANNY_BIN)
return PrintNannyConfig(
cmd=cmd,
stdout="",
stderr="",
returncode=1,
config=config,
)
try:
# parse JSON
config = json.loads(stdout)
logger.debug("Parsed PrintNanny conf.d, loaded keys: %s", config.keys())
api_config = config.get("cloud")
# try setting PRINTNANNY_CLOUD_API var
if api_config is not None:
load_api_config(api_config)
nats_creds = config.get("paths", {}).get("state_dir")
if nats_creds is not None:
nats_creds = os.path.join(
config.get("paths", {}).get("state_dir"),
"creds/printnanny-cloud-nats.creds",
)
global PRINTNANNY_CLOUD_NATS_CREDS
PRINTNANNY_CLOUD_NATS_CREDS = nats_creds
except json.JSONDecodeError as e:
logger.warning(f"Failed to decode printnanny config: %", e)
return PrintNannyConfig(
cmd=cmd,
stdout=stdout,
stderr=stderr,
config=config,
returncode=returncode,
)
def issue_txt() -> str:
"""
Captured the contents of /etc/issue as plain text
"""
try:
result = open("/etc/issue", "r").read().strip()
except Exception as e:
logger.error("Failed to read /etc/issue %s", e)
result = "Failed to read /etc/issue"
return result
def etc_os_release() -> Dict[str, str]:
"""
Captures the contents of /etc/os-release as a dictionary
"""
config = load_printnanny_settings()
os_release_path = "/etc/os-release"
if config["config"] is not None:
os_release_path = (
config["config"].get("paths", {}).get("os_release", os_release_path)
)
f = open(os_release_path, "r").read()
result = dict(ID="unknown")
try:
lines = f.strip().split("\n")
for line in lines:
k, v = line.split("=")
result[k] = v
except Exception as e:
logger.error("Error parsing contents of %s %s", os_release_path, e)
return result
def is_printnanny_os() -> bool:
osrelease = etc_os_release()
return osrelease.get("ID") == "printnanny" or PRINTNANNY_DEBUG is True
def set_octoprint_api_key(api_key: str):
cmd = [PRINTNANNY_BIN, "cloud", "set", "pi.octoprint_server.api_key", api_key]
# run /usr/bin/printnanny settings show -F json
try:
p = subprocess.run(cmd, capture_output=True)
stdout = p.stdout.decode("utf-8")
stderr = p.stderr.decode("utf-8")
returncode = p.returncode
if p.returncode != 0:
logger.error(
f"Failed to run cmd={cmd} returncode={p.returncode} stdout={stdout} stderr={stderr}"
)
except Exception as e:
logger.error(f"Failed to run cmd={cmd} with error={e}") | PypiClean |
/Firefly%20III%20API%20Python%20Client-1.5.6.post2.tar.gz/Firefly III API Python Client-1.5.6.post2/firefly_iii_client/model/rule_action_keyword.py | import re # noqa: F401
import sys # noqa: F401
from firefly_iii_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from firefly_iii_client.exceptions import ApiAttributeError
class RuleActionKeyword(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'USER_ACTION': "user_action",
'SET_CATEGORY': "set_category",
'CLEAR_CATEGORY': "clear_category",
'SET_BUDGET': "set_budget",
'CLEAR_BUDGET': "clear_budget",
'ADD_TAG': "add_tag",
'REMOVE_TAG': "remove_tag",
'REMOVE_ALL_TAGS': "remove_all_tags",
'SET_DESCRIPTION': "set_description",
'APPEND_DESCRIPTION': "append_description",
'PREPEND_DESCRIPTION': "prepend_description",
'SET_SOURCE_ACCOUNT': "set_source_account",
'SET_DESTINATION_ACCOUNT': "set_destination_account",
'SET_NOTES': "set_notes",
'APPEND_NOTES': "append_notes",
'PREPEND_NOTES': "prepend_notes",
'CLEAR_NOTES': "clear_notes",
'LINK_TO_BILL': "link_to_bill",
'CONVERT_WITHDRAWAL': "convert_withdrawal",
'CONVERT_DEPOSIT': "convert_deposit",
'CONVERT_TRANSFER': "convert_transfer",
'DELETE_TRANSACTION': "delete_transaction",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""RuleActionKeyword - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): The type of thing this action will do. A limited set is possible.., must be one of ["user_action", "set_category", "clear_category", "set_budget", "clear_budget", "add_tag", "remove_tag", "remove_all_tags", "set_description", "append_description", "prepend_description", "set_source_account", "set_destination_account", "set_notes", "append_notes", "prepend_notes", "clear_notes", "link_to_bill", "convert_withdrawal", "convert_deposit", "convert_transfer", "delete_transaction", ] # noqa: E501
Keyword Args:
value (str): The type of thing this action will do. A limited set is possible.., must be one of ["user_action", "set_category", "clear_category", "set_budget", "clear_budget", "add_tag", "remove_tag", "remove_all_tags", "set_description", "append_description", "prepend_description", "set_source_account", "set_destination_account", "set_notes", "append_notes", "prepend_notes", "clear_notes", "link_to_bill", "convert_withdrawal", "convert_deposit", "convert_transfer", "delete_transaction", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""RuleActionKeyword - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): The type of thing this action will do. A limited set is possible.., must be one of ["user_action", "set_category", "clear_category", "set_budget", "clear_budget", "add_tag", "remove_tag", "remove_all_tags", "set_description", "append_description", "prepend_description", "set_source_account", "set_destination_account", "set_notes", "append_notes", "prepend_notes", "clear_notes", "link_to_bill", "convert_withdrawal", "convert_deposit", "convert_transfer", "delete_transaction", ] # noqa: E501
Keyword Args:
value (str): The type of thing this action will do. A limited set is possible.., must be one of ["user_action", "set_category", "clear_category", "set_budget", "clear_budget", "add_tag", "remove_tag", "remove_all_tags", "set_description", "append_description", "prepend_description", "set_source_account", "set_destination_account", "set_notes", "append_notes", "prepend_notes", "clear_notes", "link_to_bill", "convert_withdrawal", "convert_deposit", "convert_transfer", "delete_transaction", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self | PypiClean |
/FragPELE-2.1.1.tar.gz/FragPELE-2.1.1/frag_pele/Growing/AddingFragHelpers/pdb_joiner.py | import prody
import Bio.PDB as bio
import logging
import numpy as np
import random
import re
import sys
# Getting the name of the module for the log system
logger = logging.getLogger(__name__)
def get_ligand_from_PDB(pdb_file):
"""
:param pdb_file: PDB file with only the ligand
:return: Bio.PDB object of the input PDB
"""
parser = bio.PDBParser()
structure = parser.get_structure("structure", pdb_file)
return structure
def get_atoms_from_structure(structure):
"""
:param structure: Bio.PDB object of the input PDB
:return: list with the atoms that form the input structure
"""
atom_list = []
for atom in structure.get_atoms():
atom_list.append(atom)
return atom_list
def select_atoms_from_list(PDB_atom_name, atoms_list):
"""
Given a pdb atom name string and a list of atoms (BioPython Atom) it returns the Bio.Atom correspondent to the atom
name.
:param PDB_atom_name: string with an atom name
:param atoms_list: list of Bio.Atoms
:return: Bio.Atom correspondent to the atom name
"""
for atom in atoms_list:
if atom.name == PDB_atom_name:
return atom
def get_H_bonded_to_grow(PDB_atom_name, prody_complex, PDB_atom_to_replace=None, chain="L"):
"""
Given a heavy atom name (string) and a complex (prody molecule) it returns the hydrogen atom of the chain L
placed at bonding distance of the input atom name. If there is more than one, a checking of contacts with the
protein will be performed. In case of finding a possible contact between the hydrogen and the protein, we will
reject this hydrogen and we will repeat the check in another one. If all the hydrogens have contacts with the
protein, the first of them will be selected and a warning will be printed.
:param PDB_atom_name: heavy atom name (string) of a ligand
:param prody_complex: prody molecule object
:param PDB_atom_to_replace: if selected, the name of the specific H atom that you want to bond.
:return: hydrogen atom of the ligand placed at bonding distance of the heavy atom
"""
# Select the hydrogens bonded to the heavy atom 'PDB_atom_name'
# When non specific atom is selected we search hydrogens automatically
selected_atom = prody_complex.select("chain {} and hydrogen within 1.70 of name {}".format(chain, PDB_atom_name)) # Replace for 1.53 :)
# If it is selected, we have to differentiate between hydrogens or heavy atoms
if PDB_atom_to_replace:
print("ATOM TO REPLACE: {}".format(PDB_atom_to_replace))
if not "H" in PDB_atom_to_replace:
replaceble_pdbatomname = PDB_atom_to_replace
return replaceble_pdbatomname
# In case that we found more than one we have to select one of them
try:
number_of_h = len(selected_atom)
print("Number of hydrogens bonded to {}: {}".format(PDB_atom_name, number_of_h))
except TypeError:
raise TypeError("Check either core or fragment atom to bound when passing parameters")
if len(selected_atom) > 1:
for idx, hydrogen in enumerate(selected_atom):
# We will select atoms of the protein in interaction distance
select_h_bonds = prody_complex.select("protein and within 2.5 of (name {} and chain {})"
.format(selected_atom.getNames()[idx], chain))
if PDB_atom_to_replace:
print("Forming a bond between {} and {}...".format(PDB_atom_name, PDB_atom_to_replace))
select_specific_h_bonds = selected_atom.select("name {}".format(PDB_atom_to_replace))
replaceble_pdbatomname = select_specific_h_bonds.getNames()[0]
return replaceble_pdbatomname
elif select_h_bonds is not None and PDB_atom_to_replace is None:
print("WARNING: {} is forming a close interaction with the protein! We will try to grow"
" in another direction.".format(selected_atom.getNames()[idx]))
# We put this elif to select one of H randomly if all of them have contacts
if (select_h_bonds is not None) and (int(idx) == int(len(selected_atom)-1)):
replaceble_pdbatomname = selected_atom.getNames()[1]
return replaceble_pdbatomname
elif select_h_bonds is None and PDB_atom_to_replace is None:
replaceble_pdbatomname = selected_atom.getNames()[idx]
return replaceble_pdbatomname
else:
replaceble_pdbatomname = selected_atom.getNames()
return replaceble_pdbatomname
def get_H_bonded_to_atom(PDB_atom_name, prody_complex, banned_hydrogen, chain="L"):
# When non specific atom is selected we search hydrogens automatically
selected_atom = prody_complex.select("chain {} and hydrogen within 1.70 of name {}".format(chain, PDB_atom_name))
try:
number_of_h = len(selected_atom)
print("Number of hydrogens bonded to {}: {}".format(PDB_atom_name, number_of_h))
except TypeError:
raise TypeError("Check either core or fragment atom to bound when passing parameters")
hydrogen = random.choice(selected_atom.getNames())
while hydrogen == banned_hydrogen:
hydrogen = random.choice(selected_atom.getNames())
return hydrogen
# This function is prepared to rename PDB atom names of the repeated names, but is not working currently
def change_repeated_atomnames(list_of_repeated_names, core_names):
list_of_lists_repeated = []
for atom in list_of_repeated_names:
find_name_and_number = re.search('([A-Z]*)([0-9]*)', atom)
element = find_name_and_number.group(1)
number = find_name_and_number.group(2)
list_of_lists_repeated.append([element, number])
list_of_lists_core = []
for atom in core_names:
find_name_and_number = re.search('([A-Z]*)([0-9]*)', atom)
element = find_name_and_number.group(1)
number = find_name_and_number.group(2)
list_of_lists_core.append([element, number])
for repeated_atom in list_of_lists_repeated:
for core_atom in list_of_lists_core:
if repeated_atom[0] is core_atom[0]:
new_name = [repeated_atom[0], int(repeated_atom[1]) + 1]
new_name = [new_name[0], str(new_name[1])]
switch = False
while not switch:
if new_name not in list_of_lists_core:
list_of_lists_core.append(new_name)
switch = True
new_name = [new_name[0], int(new_name[1])+1]
new_name = [new_name[0], str(new_name[1])]
def superimpose(fixed_vector, moving_vector, moving_atom_list):
"""
Rotates and translates a list of moving atoms from a moving vector to a fixed vector.
:param fixed_vector: vector used as reference.
:param moving_vector: vector that will rotate and translate.
:param moving_atom_list: list of atoms that we want to do the rotation and translation of the moving vector.
:return: the input list of atoms is rotated an translated.
"""
# Do the superimposition with BioPython
sup = bio.Superimposer()
# Set the vectors: first element is the fix vector (bond of the core) and second is the moving (bond of the fragment)
sup.set_atoms(fixed_vector, moving_vector)
# Apply the transformation to the atoms of the fragment (translate and rotate)
return sup.apply(moving_atom_list)
def transform_coords(atoms_with_coords):
"""
Transform the coords of a molecule (ProDy selection) into the coords from a list of atoms of Bio.PDB.
:param atoms_with_coords: list of atoms (from a Bio.PDB) with the coordinates that we want to set.
:return: perform the transformation of the coords.
"""
coords = []
for atom in atoms_with_coords:
coords.append(list(atom.get_coord()))
return np.asarray(coords)
def extract_and_change_atomnames(molecule, selected_resname, core_resname, rename=False):
"""
Given a ProDy molecule and a Resname this function will rename the PDB atom names for the selected residue following
the next pattern: G1, G2, G3...
:param molecule: ProDy molecule.
:param selected_resname: Residue name whose atoms you would like to rename.
:return: ProDy molecule with atoms renamed and dictionary {"original atom name" : "new atom name"}
"""
assert selected_resname != core_resname, "core and fragment residue name must be different"
fragment = molecule.select("resname {}".format(selected_resname))
core = molecule.select("resname {}".format(core_resname))
core_atom_names = [atom.getName() for atom in core]
fragment_atom_names = [atom.getName() for atom in fragment]
names_dictionary = {}
for n, atom_name in enumerate(fragment_atom_names):
if rename:
names_dictionary[atom_name] = "G{}".format(n)
else:
# If the atomname is repited
if atom_name in core_atom_names:
initial_atom_name = atom_name
while atom_name in core_atom_names:
atom_name_digit = re.findall('\d+', atom_name)[0]
new_atom_name_digit = int(atom_name_digit) + 1
atom_name = atom_name.replace(atom_name_digit, str(new_atom_name_digit))
final_atom_name = atom_name
core_atom_names.append(final_atom_name)
names_dictionary[initial_atom_name] = final_atom_name
else:
names_dictionary[atom_name] = atom_name
for atom in molecule:
if atom.getResname() == selected_resname:
if atom.getName() in names_dictionary:
atom.setName(names_dictionary[atom.getName()])
return molecule, names_dictionary
def check_overlapping_names(structures_to_bond):
"""
Checking that there is not duplications in the names of the structure. If not, it will return None, else, it will
return the repeated elements.
:param structures_to_bond: ProDy molecule
:return: set object with the repeated elements if they are found. Else, None object.
"""
all_atom_names = list(structures_to_bond.getNames())
return set([name for name in all_atom_names if all_atom_names.count(name) > 1]) | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/contrib/admin/templatetags/log.py | from django import template
from django.contrib.admin.models import LogEntry
register = template.Library()
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is None:
entries = LogEntry.objects.all()
else:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].pk
entries = LogEntry.objects.filter(user__pk=user_id)
context[self.varname] = entries.select_related("content_type", "user")[
: int(self.limit)
]
return ""
@register.tag
def get_admin_log(parser, token):
"""
Populate a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_with_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError(
"'get_admin_log' statements require two arguments"
)
if not tokens[1].isdigit():
raise template.TemplateSyntaxError(
"First argument to 'get_admin_log' must be an integer"
)
if tokens[2] != "as":
raise template.TemplateSyntaxError(
"Second argument to 'get_admin_log' must be 'as'"
)
if len(tokens) > 4:
if tokens[4] != "for_user":
raise template.TemplateSyntaxError(
"Fourth argument to 'get_admin_log' must be 'for_user'"
)
return AdminLogNode(
limit=tokens[1],
varname=tokens[3],
user=(tokens[5] if len(tokens) > 5 else None),
) | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/methodcalls/groups/join_group_call.py | import asyncio
import logging
import shlex
from fipper.exception import AlreadyJoinedError
from fipper.exception import InvalidStreamMode
from fipper.exception import NoActiveGroupCall
from fipper.exception import NodeJSNotRunning
from fipper.exception import ClientCallsNotSet
from fipper.exception import TelegramServerError
from fipper.file_manager import FileManager
from fipper.stream_type import StreamType
from fipper.xdcalls import AlreadyJoined
from fipper.xdcalls import ErrorDuringJoin
from fipper.xdcalls.input_stream import AudioPiped
from fipper.xdcalls.input_stream import AudioVideoPiped
from fipper.xdcalls.input_stream import InputStream
from fipper.xdcalls.input_stream import VideoPiped
from fipper.xdcalls.input_stream.audio_image_piped import AudioImagePiped
from fipper.xdcalls.session import Session
from fipper.viper import Viper
py_logger = logging.getLogger('xdcalls')
class JoinGroupCall(Viper):
async def join_group_call(
self,
chat_id: int,
stream: InputStream,
invite_hash: str = None,
join_as=None,
stream_type: StreamType = None,
):
"""Join a group call to stream a file
This method allow to stream a file to Telegram
Group Calls
Parameters:
chat_id (``int``):
Unique identifier (int) of the target chat.
stream (:obj:`~fipper.types.InputStream()`):
Input Streams descriptor, can be used also
:obj:`~fipper.types.AudioPiped()`,
:obj:`~fipper.types.AudioImagePiped()`,
:obj:`~fipper.types.AudioVideoPiped()` or
:obj:`~fipper.types.VideoPiped()`
invite_hash (``str``, **optional**):
Unique identifier for the invite in a group call
in form of a t.me link
join_as (`InputPeer (P)`_ | `InputPeer (T)`_, **optional**):
InputPeer of join as channel or profile
stream_type (:obj:`~fipper.StreamType`, **optional**)
The type of Stream
Raises:
ClientCallsNotSet: In case you try
to call this method without any MtProto client
NodeJSNotRunning: In case you try
to call this method without do
:meth:`~fipper.PyTgCalls.start` before
NoActiveGroupCall: In case you try
to edit a not started group call
FileNotFoundError: In case you try
a non existent file
InvalidStreamMode: In case you try
to set a void stream mode
FFmpegNotInstalled: In case you try
to use the Piped input stream and
you don't have ffmpeg installed
NoAudioSourceFound: In case you try
to play an audio file from a file
without the sound
NoVideoSourceFound: In case you try
to play an video file from a file
without the video
InvalidVideoProportion: In case you try
to play an video without correct
proportions
AlreadyJoinedError: In case you try
to join in already joined group
call
TelegramServerError: Error occurred when
joining to a group call (
Telegram Server Side)
Example:
.. code-block:: python
:emphasize-lines: 10-15
from fipper import Client
from fipper import idle
...
app = PyTgCalls(client)
app.start()
... # Call API methods
app.join_group_call(
-1001185324811,
AudioPiped(
'test.mp4',
)
)
idle()
"""
if join_as is None:
join_as = self._cache_local_peer
if stream_type is None:
stream_type = StreamType().local_stream
if stream_type.stream_mode == 0:
raise InvalidStreamMode()
self._cache_user_peer.put(chat_id, join_as)
headers = None
if isinstance(
stream,
AudioImagePiped,
) or isinstance(
stream,
AudioPiped,
) or isinstance(
stream,
AudioVideoPiped,
) or isinstance(
stream,
VideoPiped,
):
headers = stream.raw_headers
if stream.stream_video is not None:
await FileManager.check_file_exist(
stream.stream_video.path.replace(
'fifo://',
'',
).replace(
'image:',
'',
),
headers,
)
if stream.stream_audio is not None:
await FileManager.check_file_exist(
stream.stream_audio.path.replace(
'fifo://',
'',
).replace(
'image:',
'',
),
headers,
)
audio_f_parameters = ''
video_f_parameters = ''
if isinstance(
stream,
AudioImagePiped,
) or isinstance(
stream,
AudioPiped,
) or isinstance(
stream,
AudioVideoPiped,
) or isinstance(
stream,
VideoPiped,
):
await stream.check_pipe()
if stream.stream_audio:
if stream.stream_audio.header_enabled:
audio_f_parameters = stream.headers
audio_f_parameters += ':_cmd_:'.join(
shlex.split(stream.ffmpeg_parameters),
)
if stream.stream_video:
if stream.stream_video.header_enabled:
video_f_parameters = stream.headers
video_f_parameters += ':_cmd_:'.join(
shlex.split(stream.ffmpeg_parameters),
)
if self.assistant is not None:
if self._wait_until_run is not None:
if not self._wait_until_run.done():
await self._wait_until_run
chat_call = await self.assistant.get_full_chat(
chat_id,
)
stream_audio = stream.stream_audio
stream_video = stream.stream_video
if chat_call is not None:
solver_id = Session.generate_session_id(24)
async def internal_sender():
request = {
'action': 'join_call',
'chat_id': chat_id,
'invite_hash': invite_hash,
'buffer_long': stream_type.stream_mode,
'lip_sync': stream.lip_sync,
'solver_id': solver_id,
}
if stream_audio is not None:
request['stream_audio'] = {
'path': stream_audio.path,
'bitrate': stream_audio.parameters.bitrate,
'ffmpeg_parameters': audio_f_parameters,
}
if stream_video is not None:
video_parameters = stream_video.parameters
if video_parameters.frame_rate % 5 != 0 and \
not isinstance(stream, AudioImagePiped):
py_logger.warning(
'For better experience the '
'video frame rate must be a multiple of 5',
)
request['stream_video'] = {
'path': stream_video.path,
'width': video_parameters.width,
'height': video_parameters.height,
'framerate': video_parameters.frame_rate,
'ffmpeg_parameters': video_f_parameters,
}
await self._binding.send(request)
asyncio.ensure_future(internal_sender())
result = await self._wait_result.wait_future_update(
solver_id,
)
if isinstance(result, AlreadyJoined):
raise AlreadyJoinedError()
elif isinstance(result, ErrorDuringJoin):
raise TelegramServerError()
else:
raise NoActiveGroupCall()
else:
raise NodeJSNotRunning()
else:
raise ClientCallsNotSet() | PypiClean |
/Divisi2-2.2.5.tar.gz/Divisi2-2.2.5/divisi2/ccipca.py | import logging
import numpy as np
from divisi2.operators import projection
logger = logging.getLogger(__name__)
EPSILON = 1e-12
# The algorithm is based on:
# Book Series - Lecture Notes in Computer Science
# Book Title - Intelligent Data Engineering and Automated Learning
# Chapter Title - A Fast Algorithm for Incremental Principal Component Analysis
# First Page - 876
# Last Page - 881
# Copyright - 2003
# Author - Juyang Weng
# Author - Yilu Zhang
# Author - Wey-Shiuan Hwang
# DOI -
# Link - http://www.springerlink.com/content/cd8br967h808bw7h
#
# This is a purely dense, unlabeled version of ccipca.py, with the intent that
# it can be much faster as a result.
class CCIPCA(object):
"""A Candid Covariance-free Incremental Principal Component Analysis
implementation"""
def __init__(self, matrix, iteration=0, bootstrap=20, amnesia=3.0,
remembrance=100000, auto_baseline=True):
"""
Construct an object that incrementally computes a CCIPCA, given a
matrix that should hold the eigenvectors. If you want to make
such a matrix from scratch, try the `CCIPCA.make` factory method.
Parameters:
- *matrix*: The matrix of eigenvectors to start with. (It can be all
zeroes at the start.) Each column is an eigenvector, and rows
represent the different entries an eigenvector can have.
- *iteration*: the current time step.
- *bootstrap*: The actual CCIPCA computation will begin after this time
step. If you are starting from a zero matrix, this should be larger
than the number of eigenvectors, so that the eigenvectors can be
initialized properly.
- amnesia: A parameter that weights the present more strongly than the
past. amnesia=1 makes the present count the same as anything else.
- remembrance: inputs that are more than this many steps old will begin
to decay.
- auto_baseline: if true, the CCIPCA will calculate and subtract out a
moving average of the data. Otherwise, it will subtract out the
constant vector in column 0.
Construct a CCIPCA computation with k initial eigenvectors ev at
iteration i, using simple averaging until the iteration given by
bootstrap, afterward using CCIPCA given amnesic parameter amnesia and
rememberance parameter remembrance.
"""
self.matrix = matrix
self.iteration = iteration
self.bootstrap = bootstrap
self.amnesia = amnesia
self.remembrance = remembrance
self.auto_baseline = auto_baseline
@property
def shape(self):
return self.matrix.shape
def zero_column(self):
"""
Get a vector shaped like a column of the CCIPCA matrix, all of whose
entries are zero.
"""
return np.zeros((self.shape[0],), self.matrix.dtype)
def get_weighted_eigenvector(self, index):
"""
Get the weighted eigenvector with a specified index. "Weighted" means
that its magnitude will correspond to its eigenvalue.
Real eigenvectors start counting at 1. The 0th eigenvector represents
the moving average of the input data.
"""
return self.matrix[:,index]
def get_unit_eigenvector(self, index):
"""
Get the eigenvector with a specified index, as a unit vector.
Real eigenvectors start counting at 1. The 0th eigenvector represents
the moving average of the input data.
"""
eig = self.get_weighted_eigenvector(index)
return eig / (np.linalg.norm(eig) + EPSILON)
def set_eigenvector(self, index, vec):
"""
Sets eigenvector number `index` to the specified vector.
"""
self.matrix[:,index] = vec
def eigenvectors(self):
return self.matrix / self.eigenvalues()
def get_eigenvalue(self, index):
return np.linalg.norm(self.get_weighted_eigenvector(index))
def eigenvalues(self):
return np.sqrt(np.sum(self.matrix * self.matrix, axis=0))
def compute_attractor(self, index, vec):
"""
Compute the attractor vector for the eigenvector with index
`index` with the new vector `vec`: the projection of the
eigenvector onto `vec`.
"""
if index == 0:
# special case for the mean vector
return vec
eigvec = self.get_unit_eigenvector(index)
return projection(vec, eigvec)
def eigenvector_loading(self, index, vec):
"""
Returns the "loading" (the magnitude of the projection) of `vec` onto
the eigenvector with index `index`. If `vec` forms an obtuse angle
with the eigenvector, the loading will be negative.
"""
#if index == 0:
# # handle the mean vector case
# return self.get_eigenvalue(0)
#else:
return np.dot(self.get_unit_eigenvector(index).conj(), vec)
def eigenvector_projection(self, index, vec):
# Do we actually need this?
return self.get_unit_eigenvector(index) * self.eigenvector_loading(index, vec)
def eigenvector_residue(self, index, vec):
"""
Projects `vec` onto the eigenvector with index `index`. Returns the
projection as a multiple of the unit eigenvector, and the remaining
component that is orthogonal to the eigenvector.
"""
loading = self.eigenvector_loading(index, vec)
orth = vec - (loading * self.get_unit_eigenvector(index))
if index > 0:
assert np.abs(np.dot(orth.conj(), self.get_unit_eigenvector(index))) < 0.001
return loading, orth
def update_eigenvector(self, index, vec):
"""
Performs the learning step of CCIPCA to update an eigenvector toward
an input vector. Returns the magnitude of the eigenvector component,
and the residue vector that is orthogonal to the eigenvector.
"""
if self.iteration < index:
# there aren't enough eigenvectors yet
return 0.0, self.zero_column()
if self.iteration == index:
# create a new eigenvector
self.set_eigenvector(index, vec)
return np.linalg.norm(vec), self.zero_column()
n = min(self.iteration, self.remembrance)
if n < self.bootstrap:
old_weight = float(n-1) / n
new_weight = 1.0/n
else:
L = self.amnesia
old_weight = float(n-L) / n
new_weight = float(L)/n
attractor = self.compute_attractor(index, vec)
new_eig = ((self.get_weighted_eigenvector(index) * old_weight) +
(attractor * new_weight))
self.set_eigenvector(index, new_eig)
return self.eigenvector_residue(index, vec)
def sort_vectors(self):
"""
Sorts the eigenvector table in decreasing order, in place,
keeping the special eigenvector 0 first. Returns the mapping
from old to new eigenvectors.
"""
eigs = self.eigenvalues()
# keep eigenvector 0 in front (eigs was a new list)
eigs[0] = np.inf
sort_order = np.asarray(np.argsort(-eigs))
self.matrix[:] = self.matrix[:,sort_order]
return sort_order
def learn_vector(self, vec):
"""
Updates the eigenvectors to account for a new vector. Returns the
amount of error (the magnitude of the vector outside the space of the
eigenvectors).
"""
current_vec = vec.copy()
self.iteration += 1
magnitudes = np.zeros((self.shape[1],), self.matrix.dtype)
for index in xrange(min(self.shape[1], self.iteration+1)):
mag, new_vec = self.update_eigenvector(index, current_vec)
current_vec = new_vec
magnitudes[index] = mag
return np.linalg.norm(new_vec)
sort_order = self.sort_vectors()
magnitudes = magnitudes[sort_order]
return magnitudes
def project_vector(self, vec):
"""
Projects `vec` onto each eigenvector in succession. Returns
the magnitude of each eigenvector.
"""
current_vec = vec.copy()
magnitudes = np.zeros((self.shape[1],), self.matrix.dtype)
for index in xrange(min(self.shape[1], self.iteration+1)):
mag, new_vec = self.eigenvector_residue(index, current_vec)
current_vec = new_vec
magnitudes[index] = mag
return magnitudes
def reconstruct(self, weights):
sum = self.zero_column()
for index, w in enumerate(weights):
sum += self.get_weighted_eigenvector(index) * w
return sum
def smooth(self, vec, k_max=None):
mags = self.project_vector(vec)
if k_max is not None:
mags = mags[:k_max]
return self.reconstruct(mags)
def train_matrix(self, matrix):
for col in xrange(matrix.shape[1]):
print col, '/', matrix.shape[1]
self.learn_vector(matrix[:,col]) | PypiClean |
/Amipy-1.0.2.tar.gz/Amipy-1.0.2/amipy/core/scheduler.py | from amipy.log import getLogger
class Scheduler(object):
def __init__(self,settings):
self.req_limits = settings.gets('CONCURRENCY')
self.recv_req = []
self.waiting = False
self.spiders = None
self.logger = getLogger(__name__)
self.logger.debug('Loaded scheduler.')
def receive(self,req_queue):
def any_daemon():
return any(i.status in ['PAUSE','STOP'] for i in self.spiders)
if not self.waiting:
self.logger.debug(f'Requests Queue Size:{req_queue.qsize()}')
if not req_queue.empty():
self.waiting = False
for _ in range(min(self.req_limits,req_queue.qsize())):
self.recv_req.append(req_queue.get_nowait())
req_queue.task_done()
self.logger.debug(f'Left Requests:{req_queue.qsize()}')
else:
self.waiting = True
if all(i.status in ['RUNNING','CLOSE'] for i in self.spiders):
if self._gather_retry():
self.logger.info(f'Start to retry {len(self.recv_req)}'
f' Error and Exception pages.')
return
print('\n* [Done] No Requests to start the crawling.\n')
raise StopAsyncIteration
if any_daemon():
return
def export(self):
_export = []
while self.recv_req:
_export.append(self.recv_req.pop(0))
if not self.waiting:
self.logger.debug(f'Exported {len(_export)} Requests.')
return _export
def spiders_monitor(self,spiders):
self.spiders = spiders
def not_running():
return all([i.status in ['STOP','PAUSE'] for i in spiders])
while not_running():
for i in spiders:
if i.status=='STOP' and not i.stopped:
self.logger.debug(f'Stopping spider {i.name}.')
for req in i.binding_hub.requests._queue:
if req.spider.name == i.name:
i.binding_hub.requests._queue.remove(req)
self.logger.debug(f'Removing request {req}.')
i.stopped = True
continue
if all(i.status=='CLOSE' for i in spiders):
self.logger.info('* All spiders closed.')
raise StopAsyncIteration
for i in spiders:
if i.status == 'RESUME':
self.logger.debug(f'Resuming spider {i.name}.')
i.resume()
if i.status == 'RESTART':
self.logger.debug(f'Restarting spider {i.name}.')
i.restart()
if i.status == 'CLOSE':
self.logger.debug(f'Closing spider {i.name}.')
i.close(True)
def _gather_retry(self):
for i in self.spiders:
if any(i._retries):
while i._retries:
_req = i._retries.pop(0)
self.recv_req.append(_req)
self.logger.info(f'Got {len(self.recv_req)} retry Requests of {i.name}.')
return bool(self.recv_req) | PypiClean |
/AdsorptionBreakthroughAnalysis-0.0.2.tar.gz/AdsorptionBreakthroughAnalysis-0.0.2/docs/source/installation.rst | Installation
============
The most recent release can be installed from
`PyPI <https://pypi.org/project/AdsorptionBreakthroughAnalysis>`_ with:
.. code-block:: shell
$ pip install AdsorptionBreakthroughAnalysis
The most recent code and data can be installed directly from GitHub with:
.. code-block:: shell
$ pip install git+https://github.com/RCCS-CaptureTeam/Adsorption_Breakthrough_Analysis.git
To install in development mode, use the following:
.. code-block:: shell
$ git clone git+https://github.com/RCCS-CaptureTeam/Adsorption_Breakthrough_Analysis.git
$ cd Adsorption_Breakthrough_Analysis
$ pip install -e .
| PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.