repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
superclass/superwas
nagios.py
1
12906
#!/usr/bin/python # This file is part of Superwas. # # Superwas is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Superwas is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Superwas. If not, see <http://www.gnu.org/licenses/>. # Classes to create Nagios statistics from WAS PMI data # # Author: Andre van Dijk (SuperClass IT) # Date: $Date: 2013-01-18 16:38:05 +0100 (vr, 18 jan 2013) $ # $Id: nagios.py 428 2013-01-18 15:38:05Z andre $ class NagiosStatus: def __init__(self, code, message, perfdata): self.code=code self.message=message self.perfdata=perfdata def getCode(self): return self.code def getMessage(self): return self.message def getPerformanceData(self): return self.perfdata class NagiosStat: # Nagio Return values OK=0 # indicates a service is working properly. WARNING=1 # indicates a service is in warning state. CRITICAL=2 # indicates a service is in critical state. UNKNOWN=3 # indicates a service is in unknown state. STATUS=["OK","WARNING","CRITICAL","UNKOWN"] def __init__(self): self.criticalThreshold=0 self.warningThreshold=0 self.statusinput=[] def setStatus(self, stats): pass def setCriticalThreshold(self, critical): self.criticalThreshold=int(critical) def setWarningThreshold(self, warning): self.warningThreshold=int(warning) class HeapStat(NagiosStat): def __init__(self): NagiosStat.__init__(self) self.current=-1 self.count=-1 def setCurrentHeapSize(self, current): self.current=int(current) def setUsedMemory(self, count): self.count=int(count) def setStatus(self, stats): for stat in stats: pu=stat.getStatistic('HeapSize') if pu is not None: self.setCurrentHeapSize(pu.getCurrent()) pu=stat.getStatistic('UsedMemory') if pu is not None: self.setUsedMemory(pu.getCount()) def getStatus(self): percentage=-1 status=self.UNKNOWN message="HeapStatus unknown" if self.criticalThreshold<0 or self.warningThreshold<0: logger.debug("Heap stats off, returning OK") return NagiosStatus(self.OK, "Heap thresholds unset", "") if self.count!=-1 and self.current!=-1: if self.count!=0: percentage=(float(self.count)/self.current)*100 else: percentage=0 if percentage>=self.criticalThreshold: status=NagiosStat.CRITICAL message="CRITICAL heapSize %d/%d" % (percentage,self.criticalThreshold) elif percentage>=self.warningThreshold: status=NagiosStat.WARNING message="WARNING heapSize %d/%d" % (percentage,self.warningThreshold) else: status=NagiosStat.OK message="OK heapSize %d/%d" % (percentage,self.warningThreshold) logger.debug("Heap stats: %s %s" % (status,message)) return NagiosStatus(status, message,"Heap=%d%%;%d;%d;;;" % (percentage,self.warningThreshold,self.criticalThreshold)) class CPUStat(NagiosStat): def __init__(self): NagiosStat.__init__(self) self.percentage=-1 def setCPUPercentage(self, percentage): self.percentage=int(percentage) def getStatus(self): status=NagiosStat.UNKNOWN message="CPU Usage unknown" if self.criticalThreshold<0 or self.warningThreshold<0: logger.debug("CPU stats off, returning OK") return NagiosStatus(self.OK, "CPU thresholds unset", "") if self.percentage!=-1: if self.percentage >=self.criticalThreshold: status=NagiosStat.CRITICAL message="CRITICAL CPU Usage %d/%d" % (self.percentage,self.criticalThreshold) elif self.percentage >=self.warningThreshold: status=NagiosStat.WARNING message="WARNING CPU Usage %d/%d" % (self.percentage,self.warningThreshold) else: status=NagiosStat.OK message="OK CPU Usage %d/%d" % (self.percentage,self.warningThreshold) return NagiosStatus(status, message, "CPU=%d%%;%d;%d;;;" % (self.percentage,self.warningThreshold,self.criticalThreshold)) def setStatus(self, stats): for stat in stats: pu=stat.getStatistic('ProcessCpuUsage') if pu is not None: self.setCPUPercentage(pu.getCount()) class DataSourceUsageStat(NagiosStat): def __init__(self): NagiosStat.__init__(self) self.percentUsed=-1 def setPercentUsed(self, percentUsed): self.percentUsed=float(percentUsed) def setStatus(self, stats): for stat in stats: pu=stat.getStatistic('PercentUsed') if pu is not None: self.setPercentUsed(pu.getCurrent()) def getStatus(self): status=NagiosStat.UNKNOWN message="DataSource connection pool usage unknown" if self.criticalThreshold<0 or self.warningThreshold<0: logger.debug("DataSource usage stats off, returning OK") return NagiosStatus(self.OK, "DataSource usage thresholds unset", "") if self.percentUsed!=-1: if self.percentUsed >=self.criticalThreshold: status=NagiosStat.CRITICAL message="CRITICAL DataSource pool usage %d/%d" % (self.percentUsed,self.criticalThreshold) elif self.percentUsed >=self.warningThreshold: status=NagiosStat.WARNING message="WARNING DataSource pool usage %d/%d" % (self.percentUsed,self.warningThreshold) else: status=NagiosStat.OK message="OK DataSource usage %d/%d" % (self.percentUsed,self.warningThreshold) return NagiosStatus(status, message, "DataSourceUsage=%d%%;%d;%d;;;" % (self.percentUsed,self.warningThreshold,self.criticalThreshold)) class DataSourceWaitStat(NagiosStat): def __init__(self): NagiosStat.__init__(self) self.waitTime=-1 def setWaitTime(self, waitTime): self.waitTime=float(waitTime) def setStatus(self, stats): for stat in stats: pu=stat.getStatistic('WaitTime') if pu is not None: self.setWaitTime(pu.getMean()) def getStatus(self): status=NagiosStat.UNKNOWN message="DataSource connection pool wait time unknown" if self.criticalThreshold<0 or self.warningThreshold<0: logger.debug("DataSource wait stats off, returning OK") return NagiosStatus(self.OK, "DataSource wait time thresholds unset", "") if self.waitTime!=-1: if self.waitTime >=self.criticalThreshold: status=NagiosStat.CRITICAL message="CRITICAL DataSource wait time %d/%d" % (self.waitTime,self.criticalThreshold) elif self.waitTime >=self.warningThreshold: status=NagiosStat.WARNING message="WARNING DataSource wait time %d/%d" % (self.waitTime,self.warningThreshold) else: status=NagiosStat.OK message="OK DataSource wait time %d/%d" % (self.waitTime,self.warningThreshold) return NagiosStatus(status, message, "DataSourceWait=%dms;%d;%d;;;" % (self.waitTime,self.warningThreshold,self.criticalThreshold)) class DataSourceUsetimeStat(NagiosStat): def __init__(self): NagiosStat.__init__(self) self.useTime=-1 def setUseTime(self, useTime): self.useTime=float(useTime) def setStatus(self, stats): for stat in stats: pu=stat.getStatistic('UseTime') if pu is not None: self.setUseTime(pu.getMean()) def getStatus(self): status=NagiosStat.UNKNOWN message="DataSource connection pool use time unknown" if self.criticalThreshold<0 or self.warningThreshold<0: logger.debug("DataSource use time stats off, returning OK") return NagiosStatus(self.OK, "DataSource use time thresholds unset", "") if self.useTime!=-1: if self.useTime >=self.criticalThreshold: status=NagiosStat.CRITICAL message="CRITICAL DataSource use time %d/%d" % (self.useTime,self.criticalThreshold) elif self.useTime >=self.warningThreshold: status=NagiosStat.WARNING message="WARNING DataSource use time %d/%d" % (self.useTime,self.warningThreshold) else: status=NagiosStat.OK message="OK DataSource use time %d/%d" % (self.useTime,self.warningThreshold) return NagiosStatus(status, message, "DataSourceUsetime=%dms;%d;%d;;;" % (self.useTime,self.warningThreshold,self.criticalThreshold)) class WebContainerConcurrentHungThreadCount(NagiosStat): def __init__(self): NagiosStat.__init__(self) self.hungThreads=-1 self.maxPoolSize=-1 def setHungThreads(self, hungThreads): self.hungThreads=int(hungThreads) def setMaxPoolSize(self, maxpoolsize): self.maxPoolSize=int(maxpoolsize) def setStatus(self, stats): for stat in stats: pu=stat.getStatistic('ConcurrentHungThreadCount') if pu is not None: self.setHungThreads(pu.getCurrent()) pu=stat.getStatistic('PoolSize') if pu is not None: self.setMaxPoolSize(pu.getUpperBound()) def getStatus(self): status=NagiosStat.UNKNOWN message="Webcontainer hung threads unknown" if self.criticalThreshold<0 or self.warningThreshold<0: logger.debug("Webcontainer hung threads stats off, returning OK") return NagiosStatus(self.OK, "WebContainer hung threads thresholds unset", "") if self.hungThreads!=-1 and self.maxPoolSize!=-1: if self.maxPoolSize!=0: percentage=(float(self.hungThreads)/self.maxPoolSize)*100 else: percentage=0 if percentage >=self.criticalThreshold: status=NagiosStat.CRITICAL message="CRITICAL Webcontainer hung threads %d/%d" % (percentage,self.criticalThreshold) elif percentage >=self.warningThreshold: status=NagiosStat.WARNING message="WARNING Webcontainer hung threads %d/%d" % (percentage,self.warningThreshold) else: status=NagiosStat.OK message="OK Webcontainer hung threads %d/%d" % (percentage,self.warningThreshold) return NagiosStatus(status, message, "WebContainerConcurrentHungThreadCount=%d%%;%d;%d;;;" % (self.hungThreads,self.warningThreshold,self.criticalThreshold)) class WebContainerActiveStat(NagiosStat): def __init__(self): NagiosStat.__init__(self) self.active=-1 self.maxPoolSize=-1 def setActive(self, active): self.active=int(active) def setMaxPoolSize(self, maxpoolsize): self.maxPoolSize=int(maxpoolsize) def setStatus(self, stats): for stat in stats: pu=stat.getStatistic('ActiveCount') if pu is not None: self.setActive(pu.getCurrent()) pu=stat.getStatistic('PoolSize') if pu is not None: self.setMaxPoolSize(pu.getUpperBound()) def getStatus(self): status=NagiosStat.UNKNOWN message="Webcontainer usage unknown" if self.criticalThreshold<0 or self.warningThreshold<0: logger.debug("Webcontainer stats off, returning OK") return NagiosStatus(self.OK, "WebContainer thresholds unset", "") if self.active!=-1 and self.maxPoolSize!=-1: if self.maxPoolSize!=0: percentage=(float(self.active)/self.maxPoolSize)*100 else: percentage=0 if percentage >=self.criticalThreshold: status=NagiosStat.CRITICAL message="CRITICAL Webcontainer usage %d/%d" % (percentage,self.criticalThreshold) elif percentage >=self.warningThreshold: status=NagiosStat.WARNING message="WARNING Webcontainer usage %d/%d" % (percentage,self.warningThreshold) else: status=NagiosStat.OK message="OK Webcontainer usage %d/%d" % (percentage,self.warningThreshold) return NagiosStatus(status, message, "WebContainerActiveStat=%d%%;%d;%d;;;" % (self.active,self.warningThreshold,self.criticalThreshold)) class LiveSessionStat(NagiosStat): def __init__(self): NagiosStat.__init__(self) self.live=-1 def setLive(self, live): self.live=int(live) def setStatus(self, stats): for stat in stats: pu=stat.getStatistic('LiveCount') if pu is not None: self.setLive(pu.getCurrent()) def getStatus(self): status=NagiosStat.UNKNOWN message="Live sessions unknown" if self.criticalThreshold<0 or self.warningThreshold<0: logger.debug("Live sessions stats off, returning OK") return NagiosStatus(self.OK, "Live sesions thresholds unset", "") if self.live!=-1: if self.live>=self.criticalThreshold: status=NagiosStat.CRITICAL message="CRITICAL Live sessions %d/%d" % (self.live,self.criticalThreshold) elif self.live >=self.warningThreshold: status=NagiosStat.WARNING message="WARNING Live sessions %d/%d" % (self.live,self.warningThreshold) else: status=NagiosStat.OK message="OK Live sessions %d/%d" % (self.live,self.warningThreshold) return NagiosStatus(status, message, "LiveSession=%d;%d;%d;;;" % (self.live,self.warningThreshold,self.criticalThreshold))
gpl-2.0
wichert/rest_toolkit
tests/ext/test_colander.py
1
1039
import pytest from pyramid.httpexceptions import HTTPBadRequest from rest_toolkit.abc import EditableResource from rest_toolkit.ext.colander import ColanderSchemaValidationMixin import colander class AccountSchema(colander.Schema): email = colander.SchemaNode(colander.String()) password = colander.SchemaNode(colander.String()) class DummyResource(ColanderSchemaValidationMixin, EditableResource): schema = AccountSchema def to_dict(self): return {} def update_from_dict(self, data, partial): pass def test_valid_request(): resource = DummyResource() resource.validate({'email': 'john@example.com', 'password': 'Jane'}, partial=False) def test_validation_error(): resource = DummyResource() with pytest.raises(HTTPBadRequest): resource.validate({'email': 'john@example.com'}, partial=False) def test_partial_data(): resource = DummyResource() resource.to_dict = lambda: {'password': 'Jane'} resource.validate({'email': 'john@example.com'}, partial=True)
bsd-2-clause
jjmiranda/edx-platform
lms/djangoapps/verify_student/admin.py
7
2230
# encoding: utf-8 """ Admin site configurations for verify_student. """ from config_models.admin import ConfigurationModelAdmin from ratelimitbackend import admin from lms.djangoapps.verify_student.models import ( IcrvStatusEmailsConfiguration, SkippedReverification, SoftwareSecurePhotoVerification, VerificationStatus, ) class SoftwareSecurePhotoVerificationAdmin(admin.ModelAdmin): """ Admin for the SoftwareSecurePhotoVerification table. """ list_display = ('id', 'user', 'status', 'receipt_id', 'submitted_at', 'updated_at') raw_id_fields = ('user', 'reviewing_user') search_fields = ( 'receipt_id', 'user__username' ) class VerificationStatusAdmin(admin.ModelAdmin): """ Admin for the VerificationStatus table. """ list_display = ('timestamp', 'user', 'status', 'checkpoint') readonly_fields = () search_fields = ('checkpoint__checkpoint_location', 'user__username') raw_id_fields = ('user',) def get_readonly_fields(self, request, obj=None): """When editing an existing record, all fields should be read-only. VerificationStatus records should be immutable; to change the user's status, create a new record with the updated status and a more recent timestamp. """ if obj: return self.readonly_fields + ('status', 'checkpoint', 'user', 'response', 'error') return self.readonly_fields class SkippedReverificationAdmin(admin.ModelAdmin): """Admin for the SkippedReverification table. """ list_display = ('created_at', 'user', 'course_id', 'checkpoint') raw_id_fields = ('user',) readonly_fields = ('user', 'course_id') search_fields = ('user__username', 'course_id', 'checkpoint__checkpoint_location') def has_add_permission(self, request): """Skipped verifications can't be created in Django admin. """ return False admin.site.register(SoftwareSecurePhotoVerification, SoftwareSecurePhotoVerificationAdmin) admin.site.register(SkippedReverification, SkippedReverificationAdmin) admin.site.register(VerificationStatus, VerificationStatusAdmin) admin.site.register(IcrvStatusEmailsConfiguration, ConfigurationModelAdmin)
agpl-3.0
robinbach/adv-loop-perf
04modelPython/Regression.py
1
4435
from sklearn import svm from sklearn import linear_model from sklearn.kernel_ridge import KernelRidge import numpy as np import sys import random import matplotlib.pyplot as plt numTrain = 11 def readFile(fPath): data = np.genfromtxt(fPath, delimiter=',') random.shuffle(data) performance = data.T[-2] distortion = data.T[-1] numX = len(data.T) - 2 A = data.T[0:numX] for i in range(len(A)): A[i] = A[i] / max(max(A[i]), 1.0) A = A.T ATrain = A[0:numTrain] ATest = A[numTrain + 1:] performanceTrain = performance[0:numTrain] performanceTest = performance[numTrain + 1:] distortionTrain = distortion[0:numTrain] distortionTest = distortion[numTrain + 1:] return ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest def linearRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest): lr = linear_model.LinearRegression() lr.fit(ATrain, performanceTrain) performancePred = lr.predict(ATest) performanceErr = sum(abs(performancePred - performanceTest)) / len(performanceTest) print 'linear regression performance error: ', performanceErr lr.fit(ATrain, distortionTrain) distortionPred = lr.predict(ATest) distortionErr = sum(abs(distortionPred - distortionTest)) / len(distortionTest) print 'linear regression distortion error: ', distortionErr histoPlot(performancePred, performanceTest) histoPlot(distortionPred, distortionTest) def SVR(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest): clf = svm.SVR(C=100, epsilon=0.001) clf.fit(ATrain, performanceTrain) performancePred = clf.predict(ATest) performanceErr = sum(abs(performancePred - performanceTest)) / len(performanceTest) print 'SVR performance error: ', performanceErr clf.fit(ATrain, distortionTrain) distortionPred = clf.predict(ATest) distortionErr = sum(abs(distortionPred - distortionTest)) / len(distortionTest) print 'SVR distortion error: ', distortionErr histoPlot(performancePred, performanceTest) histoPlot(distortionPred, distortionTest) def ridgeRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest): model = KernelRidge(alpha=0.01, kernel='sigmoid') model.fit(ATrain, performanceTrain) performancePred = model.predict(ATest) performanceErr = sum(abs(performancePred - performanceTest)) / len(performanceTest) print 'Kernel ridge performance error: ', performanceErr model.fit(ATrain, distortionTrain) distortionPred = model.predict(ATest) distortionErr = sum(abs(distortionPred - distortionTest)) / len(distortionTest) print 'Kernel ridge distortion error: ', distortionErr histoPlot(performancePred, performanceTest) histoPlot(distortionPred, distortionTest) def robustRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest): model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression()) model_ransac.fit(ATrain, performanceTrain) model_ransac.predict(ATest) temp = model_ransac.predict(ATest) performancePred = [] for data in temp: performancePred.append(data[0]) model_ransac.fit(ATrain, distortionTrain) model_ransac.predict(ATest) temp = model_ransac.predict(ATest) distortionPred = [] for data in temp: distortionPred.append(data[0]) histoPlot(performancePred, performanceTest) histoPlot(distortionPred, distortionTest) def histoPlot(pred, actual): x = np.arange(len(actual)) plt.hold(True) rects1 = plt.bar(x, pred, 0.2, color='r') x = x + 0.2 rects2 = plt.bar(x, actual, 0.2) plt.legend((rects1[0], rects2[0]), ('Prediction', 'Actual'), fontsize=20) plt.xlabel('Data Point', fontsize=30) plt.ylabel('Value', fontsize=30) performanceErr = sum(abs(pred - actual)) / len(actual) print 'Error: ', performanceErr plt.title('Mean error: ' + ('%.3f' % performanceErr), fontsize=30) plt.hold(False) plt.show() def main(): dataPath = sys.argv[1] ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest = readFile(dataPath) linearRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest) SVR(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest) ridgeRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest) robustRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest) if __name__ == '__main__': main()
mit
tienfuc/iotivity-democlient-snap
extlibs/GrovePi/Software/Python/grove_moisture_sensor.py
7
2296
#!/usr/bin/env python # # GrovePi Example for using the Grove Moisture Sensor (http://www.seeedstudio.com/wiki/Grove_-_Moisture_sensor) # # The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi # # Have a question about this example? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi # ''' ## License The MIT License (MIT) GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi. Copyright (C) 2015 Dexter Industries Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' # NOTE: # The wiki suggests the following sensor values: # Min Typ Max Condition # 0 0 0 sensor in open air # 0 20 300 sensor in dry soil # 300 580 700 sensor in humid soil # 700 940 950 sensor in water # Sensor values observer: # Val Condition # 0 sensor in open air # 18 sensor in dry soil # 425 sensor in humid soil # 690 sensor in water import time import grovepi # Connect the Grove Moisture Sensor to analog port A0 # SIG,NC,VCC,GND sensor = 0 while True: try: print (grovepi.analogRead(sensor)) time.sleep(.5) except KeyboardInterrupt: break except IOError: print ("Error")
apache-2.0
signalfx/Diamond
src/diamond/handler/logentries_diamond.py
19
2499
# coding=utf-8 """ [Logentries: Log Management & Analytics Made Easy ](https://logentries.com/). Send Diamond stats to your Logentries Account where you can monitor and alert based on data in real time. #### Dependencies #### Configuration Enable this handler * handers = diamond.handler.logentries.LogentriesDiamondHandler * log_token = [Your Log Token](https://logentries.com/doc/input-token/) * queue_size = Integer value """ from Handler import Handler import logging import urllib2 import json from collections import deque class LogentriesDiamondHandler(Handler): """ Implements the abstract Handler class """ def __init__(self, config=None): """ New instance of LogentriesDiamondHandler class """ Handler.__init__(self, config) self.log_token = self.config.get('log_token', None) self.queue_size = int(self.config['queue_size']) self.queue = deque([]) if self.log_token is None: raise Exception def get_default_config_help(self): """ Help text """ config = super(LogentriesDiamondHandler, self).get_default_config_help() config.update({ 'log_token': '', 'queue_size': '' }) return config def get_default_config(self): """ Return default config for the handler """ config = super(LogentriesDiamondHandler, self).get_default_config() config.update({ 'log_token': '', 'queue_size': 100 }) return config def process(self, metric): """ Process metric by sending it to datadog api """ self.queue.append(metric) if len(self.queue) >= self.queue_size: logging.debug("Queue is full, sending logs to Logentries") self._send() def _send(self): """ Convert message to a json object and send to Lognetries """ while len(self.queue) > 0: metric = self.queue.popleft() topic, value, timestamp = str(metric).split() msg = json.dumps({"event": {topic: value}}) req = urllib2.Request("https://js.logentries.com/v1/logs/" + self.log_token, msg) try: urllib2.urlopen(req) except urllib2.URLError, e: logging.error("Can't send log message to Logentries %s", e)
mit
archf/ansible
lib/ansible/modules/cloud/cloudstack/cs_securitygroup_rule.py
19
14217
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, René Moser <mail@renemoser.net> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cs_securitygroup_rule short_description: Manages security group rules on Apache CloudStack based clouds. description: - Add and remove security group rules. version_added: '2.0' author: "René Moser (@resmo)" options: security_group: description: - Name of the security group the rule is related to. The security group must be existing. required: true state: description: - State of the security group rule. required: false default: 'present' choices: [ 'present', 'absent' ] protocol: description: - Protocol of the security group rule. required: false default: 'tcp' choices: [ 'tcp', 'udp', 'icmp', 'ah', 'esp', 'gre' ] type: description: - Ingress or egress security group rule. required: false default: 'ingress' choices: [ 'ingress', 'egress' ] cidr: description: - CIDR (full notation) to be used for security group rule. required: false default: '0.0.0.0/0' user_security_group: description: - Security group this rule is based of. required: false default: null start_port: description: - Start port for this rule. Required if C(protocol=tcp) or C(protocol=udp). required: false default: null aliases: [ 'port' ] end_port: description: - End port for this rule. Required if C(protocol=tcp) or C(protocol=udp), but C(start_port) will be used if not set. required: false default: null icmp_type: description: - Type of the icmp message being sent. Required if C(protocol=icmp). required: false default: null icmp_code: description: - Error code for this icmp message. Required if C(protocol=icmp). required: false default: null project: description: - Name of the project the security group to be created in. required: false default: null poll_async: description: - Poll async jobs until job has finished. required: false default: true extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' --- # Allow inbound port 80/tcp from 1.2.3.4 added to security group 'default' - local_action: module: cs_securitygroup_rule security_group: default port: 80 cidr: 1.2.3.4/32 # Allow tcp/udp outbound added to security group 'default' - local_action: module: cs_securitygroup_rule security_group: default type: egress start_port: 1 end_port: 65535 protocol: '{{ item }}' with_items: - tcp - udp # Allow inbound icmp from 0.0.0.0/0 added to security group 'default' - local_action: module: cs_securitygroup_rule security_group: default protocol: icmp icmp_code: -1 icmp_type: -1 # Remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default' - local_action: module: cs_securitygroup_rule security_group: default port: 80 state: absent # Allow inbound port 80/tcp from security group web added to security group 'default' - local_action: module: cs_securitygroup_rule security_group: default port: 80 user_security_group: web ''' RETURN = ''' --- id: description: UUID of the of the rule. returned: success type: string sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f security_group: description: security group of the rule. returned: success type: string sample: default type: description: type of the rule. returned: success type: string sample: ingress cidr: description: CIDR of the rule. returned: success and cidr is defined type: string sample: 0.0.0.0/0 user_security_group: description: user security group of the rule. returned: success and user_security_group is defined type: string sample: default protocol: description: protocol of the rule. returned: success type: string sample: tcp start_port: description: start port of the rule. returned: success type: int sample: 80 end_port: description: end port of the rule. returned: success type: int sample: 80 ''' # import cloudstack common from ansible.module_utils.cloudstack import * class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): def __init__(self, module): super(AnsibleCloudStackSecurityGroupRule, self).__init__(module) self.returns = { 'icmptype': 'icmp_type', 'icmpcode': 'icmp_code', 'endport': 'end_port', 'startport': 'start_port', 'protocol': 'protocol', 'cidr': 'cidr', 'securitygroupname': 'user_security_group', } def _tcp_udp_match(self, rule, protocol, start_port, end_port): return protocol in ['tcp', 'udp'] \ and protocol == rule['protocol'] \ and start_port == int(rule['startport']) \ and end_port == int(rule['endport']) def _icmp_match(self, rule, protocol, icmp_code, icmp_type): return protocol == 'icmp' \ and protocol == rule['protocol'] \ and icmp_code == int(rule['icmpcode']) \ and icmp_type == int(rule['icmptype']) def _ah_esp_gre_match(self, rule, protocol): return protocol in ['ah', 'esp', 'gre'] \ and protocol == rule['protocol'] def _type_security_group_match(self, rule, security_group_name): return security_group_name \ and 'securitygroupname' in rule \ and security_group_name == rule['securitygroupname'] def _type_cidr_match(self, rule, cidr): return 'cidr' in rule \ and cidr == rule['cidr'] def _get_rule(self, rules): user_security_group_name = self.module.params.get('user_security_group') cidr = self.module.params.get('cidr') protocol = self.module.params.get('protocol') start_port = self.module.params.get('start_port') end_port = self.get_or_fallback('end_port', 'start_port') icmp_code = self.module.params.get('icmp_code') icmp_type = self.module.params.get('icmp_type') if protocol in ['tcp', 'udp'] and (start_port is None or end_port is None): self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol) if protocol == 'icmp' and (icmp_type is None or icmp_code is None): self.module.fail_json(msg="no icmp_type or icmp_code set for protocol '%s'" % protocol) for rule in rules: if user_security_group_name: type_match = self._type_security_group_match(rule, user_security_group_name) else: type_match = self._type_cidr_match(rule, cidr) protocol_match = ( self._tcp_udp_match(rule, protocol, start_port, end_port) \ or self._icmp_match(rule, protocol, icmp_code, icmp_type) \ or self._ah_esp_gre_match(rule, protocol) ) if type_match and protocol_match: return rule return None def get_security_group(self, security_group_name=None): if not security_group_name: security_group_name = self.module.params.get('security_group') args = {} args['securitygroupname'] = security_group_name args['projectid'] = self.get_project('id') sgs = self.cs.listSecurityGroups(**args) if not sgs or 'securitygroup' not in sgs: self.module.fail_json(msg="security group '%s' not found" % security_group_name) return sgs['securitygroup'][0] def add_rule(self): security_group = self.get_security_group() args = {} user_security_group_name = self.module.params.get('user_security_group') # the user_security_group and cidr are mutually_exclusive, but cidr is defaulted to 0.0.0.0/0. # that is why we ignore if we have a user_security_group. if user_security_group_name: args['usersecuritygrouplist'] = [] user_security_group = self.get_security_group(user_security_group_name) args['usersecuritygrouplist'].append({ 'group': user_security_group['name'], 'account': user_security_group['account'], }) else: args['cidrlist'] = self.module.params.get('cidr') args['protocol'] = self.module.params.get('protocol') args['startport'] = self.module.params.get('start_port') args['endport'] = self.get_or_fallback('end_port', 'start_port') args['icmptype'] = self.module.params.get('icmp_type') args['icmpcode'] = self.module.params.get('icmp_code') args['projectid'] = self.get_project('id') args['securitygroupid'] = security_group['id'] rule = None res = None sg_type = self.module.params.get('type') if sg_type == 'ingress': if 'ingressrule' in security_group: rule = self._get_rule(security_group['ingressrule']) if not rule: self.result['changed'] = True if not self.module.check_mode: res = self.cs.authorizeSecurityGroupIngress(**args) elif sg_type == 'egress': if 'egressrule' in security_group: rule = self._get_rule(security_group['egressrule']) if not rule: self.result['changed'] = True if not self.module.check_mode: res = self.cs.authorizeSecurityGroupEgress(**args) if res and 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if res and poll_async: security_group = self.poll_job(res, 'securitygroup') key = sg_type + "rule" # ingressrule / egressrule if key in security_group: rule = security_group[key][0] return rule def remove_rule(self): security_group = self.get_security_group() rule = None res = None sg_type = self.module.params.get('type') if sg_type == 'ingress': rule = self._get_rule(security_group['ingressrule']) if rule: self.result['changed'] = True if not self.module.check_mode: res = self.cs.revokeSecurityGroupIngress(id=rule['ruleid']) elif sg_type == 'egress': rule = self._get_rule(security_group['egressrule']) if rule: self.result['changed'] = True if not self.module.check_mode: res = self.cs.revokeSecurityGroupEgress(id=rule['ruleid']) if res and 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if res and poll_async: res = self.poll_job(res, 'securitygroup') return rule def get_result(self, security_group_rule): super(AnsibleCloudStackSecurityGroupRule, self).get_result(security_group_rule) self.result['type'] = self.module.params.get('type') self.result['security_group'] = self.module.params.get('security_group') return self.result def main(): argument_spec = cs_argument_spec() argument_spec.update(dict( security_group = dict(required=True), type = dict(choices=['ingress', 'egress'], default='ingress'), cidr = dict(default='0.0.0.0/0'), user_security_group = dict(default=None), protocol = dict(choices=['tcp', 'udp', 'icmp', 'ah', 'esp', 'gre'], default='tcp'), icmp_type = dict(type='int', default=None), icmp_code = dict(type='int', default=None), start_port = dict(type='int', default=None, aliases=['port']), end_port = dict(type='int', default=None), state = dict(choices=['present', 'absent'], default='present'), project = dict(default=None), poll_async = dict(type='bool', default=True), )) required_together = cs_required_together() required_together.extend([ ['icmp_type', 'icmp_code'], ]) module = AnsibleModule( argument_spec=argument_spec, required_together=required_together, mutually_exclusive = ( ['icmp_type', 'start_port'], ['icmp_type', 'end_port'], ['icmp_code', 'start_port'], ['icmp_code', 'end_port'], ), supports_check_mode=True ) try: acs_sg_rule = AnsibleCloudStackSecurityGroupRule(module) state = module.params.get('state') if state in ['absent']: sg_rule = acs_sg_rule.remove_rule() else: sg_rule = acs_sg_rule.add_rule() result = acs_sg_rule.get_result(sg_rule) except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
tersmitten/ansible
lib/ansible/modules/network/fortimanager/fmgr_secprof_ssl_ssh.py
39
39490
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fmgr_secprof_ssl_ssh version_added: "2.8" notes: - Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/). author: - Luke Weighall (@lweighall) - Andrew Welsh (@Ghilli3) - Jim Huber (@p4r4n0y1ng) short_description: Manage SSL and SSH security profiles in FortiManager description: - Manage SSL and SSH security profiles in FortiManager via the FMG API options: adom: description: - The ADOM the configuration should belong to. required: false default: root mode: description: - Sets one of three modes for managing the object. - Allows use of soft-adds instead of overwriting existing values choices: ['add', 'set', 'delete', 'update'] required: false default: add whitelist: description: - Enable/disable exempting servers by FortiGuard whitelist. - choice | disable | Disable setting. - choice | enable | Enable setting. required: false choices: ["disable", "enable"] use_ssl_server: description: - Enable/disable the use of SSL server table for SSL offloading. - choice | disable | Don't use SSL server configuration. - choice | enable | Use SSL server configuration. required: false choices: ["disable", "enable"] untrusted_caname: description: - Untrusted CA certificate used by SSL Inspection. required: false ssl_exemptions_log: description: - Enable/disable logging SSL exemptions. - choice | disable | Disable logging SSL exemptions. - choice | enable | Enable logging SSL exemptions. required: false choices: ["disable", "enable"] ssl_anomalies_log: description: - Enable/disable logging SSL anomalies. - choice | disable | Disable logging SSL anomalies. - choice | enable | Enable logging SSL anomalies. required: false choices: ["disable", "enable"] server_cert_mode: description: - Re-sign or replace the server's certificate. - choice | re-sign | Multiple clients connecting to multiple servers. - choice | replace | Protect an SSL server. required: false choices: ["re-sign", "replace"] server_cert: description: - Certificate used by SSL Inspection to replace server certificate. required: false rpc_over_https: description: - Enable/disable inspection of RPC over HTTPS. - choice | disable | Disable inspection of RPC over HTTPS. - choice | enable | Enable inspection of RPC over HTTPS. required: false choices: ["disable", "enable"] name: description: - Name. required: false mapi_over_https: description: - Enable/disable inspection of MAPI over HTTPS. - choice | disable | Disable inspection of MAPI over HTTPS. - choice | enable | Enable inspection of MAPI over HTTPS. required: false choices: ["disable", "enable"] comment: description: - Optional comments. required: false caname: description: - CA certificate used by SSL Inspection. required: false ftps: description: - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! - List of multiple child objects to be added. Expects a list of dictionaries. - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. - If submitted, all other prefixed sub-parameters ARE IGNORED. - This object is MUTUALLY EXCLUSIVE with its options. - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS required: false ftps_allow_invalid_server_cert: description: - When enabled, allows SSL sessions whose server certificate validation failed. - choice | disable | Disable setting. - choice | enable | Enable setting. required: false choices: ["disable", "enable"] ftps_client_cert_request: description: - Action based on client certificate request failure. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] ftps_ports: description: - Ports to use for scanning (1 - 65535, default = 443). required: false ftps_status: description: - Configure protocol inspection status. - choice | disable | Disable. - choice | deep-inspection | Full SSL inspection. required: false choices: ["disable", "deep-inspection"] ftps_unsupported_ssl: description: - Action based on the SSL encryption used being unsupported. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] ftps_untrusted_cert: description: - Allow, ignore, or block the untrusted SSL session server certificate. - choice | allow | Allow the untrusted server certificate. - choice | block | Block the connection when an untrusted server certificate is detected. - choice | ignore | Always take the server certificate as trusted. required: false choices: ["allow", "block", "ignore"] https: description: - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! - List of multiple child objects to be added. Expects a list of dictionaries. - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. - If submitted, all other prefixed sub-parameters ARE IGNORED. - This object is MUTUALLY EXCLUSIVE with its options. - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS required: false https_allow_invalid_server_cert: description: - When enabled, allows SSL sessions whose server certificate validation failed. - choice | disable | Disable setting. - choice | enable | Enable setting. required: false choices: ["disable", "enable"] https_client_cert_request: description: - Action based on client certificate request failure. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] https_ports: description: - Ports to use for scanning (1 - 65535, default = 443). required: false https_status: description: - Configure protocol inspection status. - choice | disable | Disable. - choice | certificate-inspection | Inspect SSL handshake only. - choice | deep-inspection | Full SSL inspection. required: false choices: ["disable", "certificate-inspection", "deep-inspection"] https_unsupported_ssl: description: - Action based on the SSL encryption used being unsupported. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] https_untrusted_cert: description: - Allow, ignore, or block the untrusted SSL session server certificate. - choice | allow | Allow the untrusted server certificate. - choice | block | Block the connection when an untrusted server certificate is detected. - choice | ignore | Always take the server certificate as trusted. required: false choices: ["allow", "block", "ignore"] imaps: description: - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! - List of multiple child objects to be added. Expects a list of dictionaries. - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. - If submitted, all other prefixed sub-parameters ARE IGNORED. - This object is MUTUALLY EXCLUSIVE with its options. - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS required: false imaps_allow_invalid_server_cert: description: - When enabled, allows SSL sessions whose server certificate validation failed. - choice | disable | Disable setting. - choice | enable | Enable setting. required: false choices: ["disable", "enable"] imaps_client_cert_request: description: - Action based on client certificate request failure. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] imaps_ports: description: - Ports to use for scanning (1 - 65535, default = 443). required: false imaps_status: description: - Configure protocol inspection status. - choice | disable | Disable. - choice | deep-inspection | Full SSL inspection. required: false choices: ["disable", "deep-inspection"] imaps_unsupported_ssl: description: - Action based on the SSL encryption used being unsupported. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] imaps_untrusted_cert: description: - Allow, ignore, or block the untrusted SSL session server certificate. - choice | allow | Allow the untrusted server certificate. - choice | block | Block the connection when an untrusted server certificate is detected. - choice | ignore | Always take the server certificate as trusted. required: false choices: ["allow", "block", "ignore"] pop3s: description: - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! - List of multiple child objects to be added. Expects a list of dictionaries. - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. - If submitted, all other prefixed sub-parameters ARE IGNORED. - This object is MUTUALLY EXCLUSIVE with its options. - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS required: false pop3s_allow_invalid_server_cert: description: - When enabled, allows SSL sessions whose server certificate validation failed. - choice | disable | Disable setting. - choice | enable | Enable setting. required: false choices: ["disable", "enable"] pop3s_client_cert_request: description: - Action based on client certificate request failure. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] pop3s_ports: description: - Ports to use for scanning (1 - 65535, default = 443). required: false pop3s_status: description: - Configure protocol inspection status. - choice | disable | Disable. - choice | deep-inspection | Full SSL inspection. required: false choices: ["disable", "deep-inspection"] pop3s_unsupported_ssl: description: - Action based on the SSL encryption used being unsupported. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] pop3s_untrusted_cert: description: - Allow, ignore, or block the untrusted SSL session server certificate. - choice | allow | Allow the untrusted server certificate. - choice | block | Block the connection when an untrusted server certificate is detected. - choice | ignore | Always take the server certificate as trusted. required: false choices: ["allow", "block", "ignore"] smtps: description: - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! - List of multiple child objects to be added. Expects a list of dictionaries. - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. - If submitted, all other prefixed sub-parameters ARE IGNORED. - This object is MUTUALLY EXCLUSIVE with its options. - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS required: false smtps_allow_invalid_server_cert: description: - When enabled, allows SSL sessions whose server certificate validation failed. - choice | disable | Disable setting. - choice | enable | Enable setting. required: false choices: ["disable", "enable"] smtps_client_cert_request: description: - Action based on client certificate request failure. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] smtps_ports: description: - Ports to use for scanning (1 - 65535, default = 443). required: false smtps_status: description: - Configure protocol inspection status. - choice | disable | Disable. - choice | deep-inspection | Full SSL inspection. required: false choices: ["disable", "deep-inspection"] smtps_unsupported_ssl: description: - Action based on the SSL encryption used being unsupported. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] smtps_untrusted_cert: description: - Allow, ignore, or block the untrusted SSL session server certificate. - choice | allow | Allow the untrusted server certificate. - choice | block | Block the connection when an untrusted server certificate is detected. - choice | ignore | Always take the server certificate as trusted. required: false choices: ["allow", "block", "ignore"] ssh: description: - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! - List of multiple child objects to be added. Expects a list of dictionaries. - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. - If submitted, all other prefixed sub-parameters ARE IGNORED. - This object is MUTUALLY EXCLUSIVE with its options. - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS required: false ssh_inspect_all: description: - Level of SSL inspection. - choice | disable | Disable. - choice | deep-inspection | Full SSL inspection. required: false choices: ["disable", "deep-inspection"] ssh_ports: description: - Ports to use for scanning (1 - 65535, default = 443). required: false ssh_ssh_algorithm: description: - Relative strength of encryption algorithms accepted during negotiation. - choice | compatible | Allow a broader set of encryption algorithms for best compatibility. - choice | high-encryption | Allow only AES-CTR, AES-GCM ciphers and high encryption algorithms. required: false choices: ["compatible", "high-encryption"] ssh_ssh_policy_check: description: - Enable/disable SSH policy check. - choice | disable | Disable SSH policy check. - choice | enable | Enable SSH policy check. required: false choices: ["disable", "enable"] ssh_ssh_tun_policy_check: description: - Enable/disable SSH tunnel policy check. - choice | disable | Disable SSH tunnel policy check. - choice | enable | Enable SSH tunnel policy check. required: false choices: ["disable", "enable"] ssh_status: description: - Configure protocol inspection status. - choice | disable | Disable. - choice | deep-inspection | Full SSL inspection. required: false choices: ["disable", "deep-inspection"] ssh_unsupported_version: description: - Action based on SSH version being unsupported. - choice | block | Block. - choice | bypass | Bypass. required: false choices: ["block", "bypass"] ssl: description: - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! - List of multiple child objects to be added. Expects a list of dictionaries. - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. - If submitted, all other prefixed sub-parameters ARE IGNORED. - This object is MUTUALLY EXCLUSIVE with its options. - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS required: false ssl_allow_invalid_server_cert: description: - When enabled, allows SSL sessions whose server certificate validation failed. - choice | disable | Disable setting. - choice | enable | Enable setting. required: false choices: ["disable", "enable"] ssl_client_cert_request: description: - Action based on client certificate request failure. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] ssl_inspect_all: description: - Level of SSL inspection. - choice | disable | Disable. - choice | certificate-inspection | Inspect SSL handshake only. - choice | deep-inspection | Full SSL inspection. required: false choices: ["disable", "certificate-inspection", "deep-inspection"] ssl_unsupported_ssl: description: - Action based on the SSL encryption used being unsupported. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] ssl_untrusted_cert: description: - Allow, ignore, or block the untrusted SSL session server certificate. - choice | allow | Allow the untrusted server certificate. - choice | block | Block the connection when an untrusted server certificate is detected. - choice | ignore | Always take the server certificate as trusted. required: false choices: ["allow", "block", "ignore"] ssl_exempt: description: - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! - List of multiple child objects to be added. Expects a list of dictionaries. - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. - If submitted, all other prefixed sub-parameters ARE IGNORED. - This object is MUTUALLY EXCLUSIVE with its options. - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS required: false ssl_exempt_address: description: - IPv4 address object. required: false ssl_exempt_address6: description: - IPv6 address object. required: false ssl_exempt_fortiguard_category: description: - FortiGuard category ID. required: false ssl_exempt_regex: description: - Exempt servers by regular expression. required: false ssl_exempt_type: description: - Type of address object (IPv4 or IPv6) or FortiGuard category. - choice | fortiguard-category | FortiGuard category. - choice | address | Firewall IPv4 address. - choice | address6 | Firewall IPv6 address. - choice | wildcard-fqdn | Fully Qualified Domain Name with wildcard characters. - choice | regex | Regular expression FQDN. required: false choices: ["fortiguard-category", "address", "address6", "wildcard-fqdn", "regex"] ssl_exempt_wildcard_fqdn: description: - Exempt servers by wildcard FQDN. required: false ssl_server: description: - EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED! - List of multiple child objects to be added. Expects a list of dictionaries. - Dictionaries must use FortiManager API parameters, not the ansible ones listed below. - If submitted, all other prefixed sub-parameters ARE IGNORED. - This object is MUTUALLY EXCLUSIVE with its options. - We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide. - WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS required: false ssl_server_ftps_client_cert_request: description: - Action based on client certificate request failure during the FTPS handshake. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] ssl_server_https_client_cert_request: description: - Action based on client certificate request failure during the HTTPS handshake. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] ssl_server_imaps_client_cert_request: description: - Action based on client certificate request failure during the IMAPS handshake. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] ssl_server_ip: description: - IPv4 address of the SSL server. required: false ssl_server_pop3s_client_cert_request: description: - Action based on client certificate request failure during the POP3S handshake. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] ssl_server_smtps_client_cert_request: description: - Action based on client certificate request failure during the SMTPS handshake. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] ssl_server_ssl_other_client_cert_request: description: - Action based on client certificate request failure during an SSL protocol handshake. - choice | bypass | Bypass. - choice | inspect | Inspect. - choice | block | Block. required: false choices: ["bypass", "inspect", "block"] ''' EXAMPLES = ''' - name: DELETE Profile fmgr_secprof_ssl_ssh: name: Ansible_SSL_SSH_Profile mode: delete - name: CREATE Profile fmgr_secprof_ssl_ssh: name: Ansible_SSL_SSH_Profile comment: "Created by Ansible Module TEST" mode: set mapi_over_https: enable rpc_over_https: enable server_cert_mode: replace ssl_anomalies_log: enable ssl_exemptions_log: enable use_ssl_server: enable whitelist: enable ''' RETURN = """ api_result: description: full API response, includes status code and message returned: always type: str """ from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler from ansible.module_utils.network.fortimanager.common import FMGBaseException from ansible.module_utils.network.fortimanager.common import FMGRCommon from ansible.module_utils.network.fortimanager.common import FMGRMethods from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG from ansible.module_utils.network.fortimanager.common import prepare_dict from ansible.module_utils.network.fortimanager.common import scrub_dict ############### # START METHODS ############### def fmgr_firewall_ssl_ssh_profile_modify(fmgr, paramgram): """ :param fmgr: The fmgr object instance from fortimanager.py :type fmgr: class object :param paramgram: The formatted dictionary of options to process :type paramgram: dict :return: The response from the FortiManager :rtype: dict """ mode = paramgram["mode"] adom = paramgram["adom"] response = DEFAULT_RESULT_OBJ url = "" datagram = {} # EVAL THE MODE PARAMETER FOR SET OR ADD if mode in ['set', 'add', 'update']: url = '/pm/config/adom/{adom}/obj/firewall/ssl-ssh-profile'.format(adom=adom) datagram = scrub_dict(prepare_dict(paramgram)) # EVAL THE MODE PARAMETER FOR DELETE elif mode == "delete": # SET THE CORRECT URL FOR DELETE url = '/pm/config/adom/{adom}/obj/firewall/ssl-ssh-profile/{name}'.format(adom=adom, name=paramgram["name"]) datagram = {} response = fmgr.process_request(url, datagram, paramgram["mode"]) return response ############# # END METHODS ############# def main(): argument_spec = dict( adom=dict(type="str", default="root"), mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"), whitelist=dict(required=False, type="str", choices=["disable", "enable"]), use_ssl_server=dict(required=False, type="str", choices=["disable", "enable"]), untrusted_caname=dict(required=False, type="str"), ssl_exemptions_log=dict(required=False, type="str", choices=["disable", "enable"]), ssl_anomalies_log=dict(required=False, type="str", choices=["disable", "enable"]), server_cert_mode=dict(required=False, type="str", choices=["re-sign", "replace"]), server_cert=dict(required=False, type="str"), rpc_over_https=dict(required=False, type="str", choices=["disable", "enable"]), name=dict(required=False, type="str"), mapi_over_https=dict(required=False, type="str", choices=["disable", "enable"]), comment=dict(required=False, type="str"), caname=dict(required=False, type="str"), ftps=dict(required=False, type="list"), ftps_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]), ftps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), ftps_ports=dict(required=False, type="str"), ftps_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]), ftps_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), ftps_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]), https=dict(required=False, type="list"), https_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]), https_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), https_ports=dict(required=False, type="str"), https_status=dict(required=False, type="str", choices=["disable", "certificate-inspection", "deep-inspection"]), https_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), https_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]), imaps=dict(required=False, type="list"), imaps_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]), imaps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), imaps_ports=dict(required=False, type="str"), imaps_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]), imaps_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), imaps_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]), pop3s=dict(required=False, type="list"), pop3s_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]), pop3s_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), pop3s_ports=dict(required=False, type="str"), pop3s_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]), pop3s_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), pop3s_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]), smtps=dict(required=False, type="list"), smtps_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]), smtps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), smtps_ports=dict(required=False, type="str"), smtps_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]), smtps_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), smtps_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]), ssh=dict(required=False, type="list"), ssh_inspect_all=dict(required=False, type="str", choices=["disable", "deep-inspection"]), ssh_ports=dict(required=False, type="str"), ssh_ssh_algorithm=dict(required=False, type="str", choices=["compatible", "high-encryption"]), ssh_ssh_policy_check=dict(required=False, type="str", choices=["disable", "enable"]), ssh_ssh_tun_policy_check=dict(required=False, type="str", choices=["disable", "enable"]), ssh_status=dict(required=False, type="str", choices=["disable", "deep-inspection"]), ssh_unsupported_version=dict(required=False, type="str", choices=["block", "bypass"]), ssl=dict(required=False, type="list"), ssl_allow_invalid_server_cert=dict(required=False, type="str", choices=["disable", "enable"]), ssl_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), ssl_inspect_all=dict(required=False, type="str", choices=["disable", "certificate-inspection", "deep-inspection"]), ssl_unsupported_ssl=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), ssl_untrusted_cert=dict(required=False, type="str", choices=["allow", "block", "ignore"]), ssl_exempt=dict(required=False, type="list"), ssl_exempt_address=dict(required=False, type="str"), ssl_exempt_address6=dict(required=False, type="str"), ssl_exempt_fortiguard_category=dict(required=False, type="str"), ssl_exempt_regex=dict(required=False, type="str"), ssl_exempt_type=dict(required=False, type="str", choices=["fortiguard-category", "address", "address6", "wildcard-fqdn", "regex"]), ssl_exempt_wildcard_fqdn=dict(required=False, type="str"), ssl_server=dict(required=False, type="list"), ssl_server_ftps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), ssl_server_https_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), ssl_server_imaps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), ssl_server_ip=dict(required=False, type="str"), ssl_server_pop3s_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), ssl_server_smtps_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), ssl_server_ssl_other_client_cert_request=dict(required=False, type="str", choices=["bypass", "inspect", "block"]), ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, ) # MODULE PARAMGRAM paramgram = { "mode": module.params["mode"], "adom": module.params["adom"], "whitelist": module.params["whitelist"], "use-ssl-server": module.params["use_ssl_server"], "untrusted-caname": module.params["untrusted_caname"], "ssl-exemptions-log": module.params["ssl_exemptions_log"], "ssl-anomalies-log": module.params["ssl_anomalies_log"], "server-cert-mode": module.params["server_cert_mode"], "server-cert": module.params["server_cert"], "rpc-over-https": module.params["rpc_over_https"], "name": module.params["name"], "mapi-over-https": module.params["mapi_over_https"], "comment": module.params["comment"], "caname": module.params["caname"], "ftps": { "allow-invalid-server-cert": module.params["ftps_allow_invalid_server_cert"], "client-cert-request": module.params["ftps_client_cert_request"], "ports": module.params["ftps_ports"], "status": module.params["ftps_status"], "unsupported-ssl": module.params["ftps_unsupported_ssl"], "untrusted-cert": module.params["ftps_untrusted_cert"], }, "https": { "allow-invalid-server-cert": module.params["https_allow_invalid_server_cert"], "client-cert-request": module.params["https_client_cert_request"], "ports": module.params["https_ports"], "status": module.params["https_status"], "unsupported-ssl": module.params["https_unsupported_ssl"], "untrusted-cert": module.params["https_untrusted_cert"], }, "imaps": { "allow-invalid-server-cert": module.params["imaps_allow_invalid_server_cert"], "client-cert-request": module.params["imaps_client_cert_request"], "ports": module.params["imaps_ports"], "status": module.params["imaps_status"], "unsupported-ssl": module.params["imaps_unsupported_ssl"], "untrusted-cert": module.params["imaps_untrusted_cert"], }, "pop3s": { "allow-invalid-server-cert": module.params["pop3s_allow_invalid_server_cert"], "client-cert-request": module.params["pop3s_client_cert_request"], "ports": module.params["pop3s_ports"], "status": module.params["pop3s_status"], "unsupported-ssl": module.params["pop3s_unsupported_ssl"], "untrusted-cert": module.params["pop3s_untrusted_cert"], }, "smtps": { "allow-invalid-server-cert": module.params["smtps_allow_invalid_server_cert"], "client-cert-request": module.params["smtps_client_cert_request"], "ports": module.params["smtps_ports"], "status": module.params["smtps_status"], "unsupported-ssl": module.params["smtps_unsupported_ssl"], "untrusted-cert": module.params["smtps_untrusted_cert"], }, "ssh": { "inspect-all": module.params["ssh_inspect_all"], "ports": module.params["ssh_ports"], "ssh-algorithm": module.params["ssh_ssh_algorithm"], "ssh-policy-check": module.params["ssh_ssh_policy_check"], "ssh-tun-policy-check": module.params["ssh_ssh_tun_policy_check"], "status": module.params["ssh_status"], "unsupported-version": module.params["ssh_unsupported_version"], }, "ssl": { "allow-invalid-server-cert": module.params["ssl_allow_invalid_server_cert"], "client-cert-request": module.params["ssl_client_cert_request"], "inspect-all": module.params["ssl_inspect_all"], "unsupported-ssl": module.params["ssl_unsupported_ssl"], "untrusted-cert": module.params["ssl_untrusted_cert"], }, "ssl-exempt": { "address": module.params["ssl_exempt_address"], "address6": module.params["ssl_exempt_address6"], "fortiguard-category": module.params["ssl_exempt_fortiguard_category"], "regex": module.params["ssl_exempt_regex"], "type": module.params["ssl_exempt_type"], "wildcard-fqdn": module.params["ssl_exempt_wildcard_fqdn"], }, "ssl-server": { "ftps-client-cert-request": module.params["ssl_server_ftps_client_cert_request"], "https-client-cert-request": module.params["ssl_server_https_client_cert_request"], "imaps-client-cert-request": module.params["ssl_server_imaps_client_cert_request"], "ip": module.params["ssl_server_ip"], "pop3s-client-cert-request": module.params["ssl_server_pop3s_client_cert_request"], "smtps-client-cert-request": module.params["ssl_server_smtps_client_cert_request"], "ssl-other-client-cert-request": module.params["ssl_server_ssl_other_client_cert_request"], } } module.paramgram = paramgram fmgr = None if module._socket_path: connection = Connection(module._socket_path) fmgr = FortiManagerHandler(connection, module) fmgr.tools = FMGRCommon() else: module.fail_json(**FAIL_SOCKET_MSG) list_overrides = ['ftps', 'https', 'imaps', 'pop3s', 'smtps', 'ssh', 'ssl', 'ssl-exempt', 'ssl-server'] paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides, paramgram=paramgram, module=module) results = DEFAULT_RESULT_OBJ try: results = fmgr_firewall_ssl_ssh_profile_modify(fmgr, paramgram) fmgr.govern_response(module=module, results=results, ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram)) except Exception as err: raise FMGBaseException(err) return module.exit_json(**results[1]) if __name__ == "__main__": main()
gpl-3.0
rlbabyuk/integration_tests
fixtures/merkyl.py
2
3214
import pytest from urlparse import urlparse from fixtures.artifactor_plugin import fire_art_test_hook from utils.conf import env class MerkylInspector(object): def __init__(self, request): """ A simple adapter to aid in Merkyl Log Inspection during a test. This class is really only useful during a test and is designed to abstract away accessing the request object. The hooks which are fired can be done so during the test without this class/fixture, this is merely a convenience and does nothing special. """ self.node = request.node self.ip = urlparse(env['base_url']).netloc def get_log(self, log_name): """ A simple getter for log files. Returns the cached content of a particular log Args: log_name: Full path to the log file wishing to be received. """ res = fire_art_test_hook( self.node, 'get_log_merkyl', ip=self.ip, filename=log_name, grab_result=True) return res['merkyl_content'] def add_log(self, log_name): """ Adds a log file to the merkyl process. This function adds a log file path to the merkyl process on the appliance. This is relevant only for the duration of the test. At the end of the test, the file is removed from the merkyl tracker. Note that this is a blocking call, ie, we ensure that the file is being logged by merkyl, before we continue. This is important and prevents the file_add operation being queued and processes which generate log information activating before the log is being monitored. This is achieved using the grab_result switch, but in fact, nothing will be received. It is worth noting that the file path must be "discoverable" by merkyl. This may mean editing the allowed_files prior to deploying merkyl. Args: log_name: Full path to the log file wishing to be monitored. """ fire_art_test_hook( self.node, 'add_log_merkyl', ip=self.ip, filename=log_name, grab_result=True) def search_log(self, needle, log_name): """ A simple search, test if needle is in cached log_contents. Does a simple search of needle in contents. Note that this does not trawl the previous contents of the file, but only looks at the log information which has been gathered since merkyl was tracking the file. """ contents = self.get_log(log_name) if needle in contents: return True else: return False @pytest.fixture(scope='function') def merkyl_inspector(request): """ Provides a MerkylInspector instance. This fixture is used to gain access to a relevant MerkylInspector instance. Example usage is below: .. code-block:: python def test_test(merkyl_inspector): merkyl_inspector.add_log('/path/to/log/file') # Do something if merkyl_inspector.search_log('needle', '/path/to/log/file'): print(merkyl_inspector.get_log('/path/to/log/file')) """ return MerkylInspector(request)
gpl-2.0
xq262144/hue
desktop/core/ext-py/Pygments-1.3.1/pygments/cmdline.py
75
13055
# -*- coding: utf-8 -*- """ pygments.cmdline ~~~~~~~~~~~~~~~~ Command line interface. :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import sys import getopt from textwrap import dedent from pygments import __version__, highlight from pygments.util import ClassNotFound, OptionError, docstring_headline from pygments.lexers import get_all_lexers, get_lexer_by_name, get_lexer_for_filename, \ find_lexer_class, guess_lexer, TextLexer from pygments.formatters import get_all_formatters, get_formatter_by_name, \ get_formatter_for_filename, find_formatter_class, \ TerminalFormatter # pylint:disable-msg=E0611 from pygments.filters import get_all_filters, find_filter_class from pygments.styles import get_all_styles, get_style_by_name USAGE = """\ Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>] [-O <options>] [-P <option=value>] [-o <outfile>] [<infile>] %s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>] %s -L [<which> ...] %s -N <filename> %s -H <type> <name> %s -h | -V Highlight the input file and write the result to <outfile>. If no input file is given, use stdin, if -o is not given, use stdout. <lexer> is a lexer name (query all lexer names with -L). If -l is not given, the lexer is guessed from the extension of the input file name (this obviously doesn't work if the input is stdin). If -g is passed, attempt to guess the lexer from the file contents, or pass through as plain text if this fails (this can work for stdin). Likewise, <formatter> is a formatter name, and will be guessed from the extension of the output file name. If no output file is given, the terminal formatter will be used by default. With the -O option, you can give the lexer and formatter a comma- separated list of options, e.g. ``-O bg=light,python=cool``. The -P option adds lexer and formatter options like the -O option, but you can only give one option per -P. That way, the option value may contain commas and equals signs, which it can't with -O, e.g. ``-P "heading=Pygments, the Python highlighter". With the -F option, you can add filters to the token stream, you can give options in the same way as for -O after a colon (note: there must not be spaces around the colon). The -O, -P and -F options can be given multiple times. With the -S option, print out style definitions for style <style> for formatter <formatter>. The argument given by -a is formatter dependent. The -L option lists lexers, formatters, styles or filters -- set `which` to the thing you want to list (e.g. "styles"), or omit it to list everything. The -N option guesses and prints out a lexer name based solely on the given filename. It does not take input or highlight anything. If no specific lexer can be determined "text" is returned. The -H option prints detailed help for the object <name> of type <type>, where <type> is one of "lexer", "formatter" or "filter". The -h option prints this help. The -V option prints the package version. """ def _parse_options(o_strs): opts = {} if not o_strs: return opts for o_str in o_strs: if not o_str: continue o_args = o_str.split(',') for o_arg in o_args: o_arg = o_arg.strip() try: o_key, o_val = o_arg.split('=') o_key = o_key.strip() o_val = o_val.strip() except ValueError: opts[o_arg] = True else: opts[o_key] = o_val return opts def _parse_filters(f_strs): filters = [] if not f_strs: return filters for f_str in f_strs: if ':' in f_str: fname, fopts = f_str.split(':', 1) filters.append((fname, _parse_options([fopts]))) else: filters.append((f_str, {})) return filters def _print_help(what, name): try: if what == 'lexer': cls = find_lexer_class(name) print "Help on the %s lexer:" % cls.name print dedent(cls.__doc__) elif what == 'formatter': cls = find_formatter_class(name) print "Help on the %s formatter:" % cls.name print dedent(cls.__doc__) elif what == 'filter': cls = find_filter_class(name) print "Help on the %s filter:" % name print dedent(cls.__doc__) except AttributeError: print >>sys.stderr, "%s not found!" % what def _print_list(what): if what == 'lexer': print print "Lexers:" print "~~~~~~~" info = [] for fullname, names, exts, _ in get_all_lexers(): tup = (', '.join(names)+':', fullname, exts and '(filenames ' + ', '.join(exts) + ')' or '') info.append(tup) info.sort() for i in info: print ('* %s\n %s %s') % i elif what == 'formatter': print print "Formatters:" print "~~~~~~~~~~~" info = [] for cls in get_all_formatters(): doc = docstring_headline(cls) tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and '(filenames ' + ', '.join(cls.filenames) + ')' or '') info.append(tup) info.sort() for i in info: print ('* %s\n %s %s') % i elif what == 'filter': print print "Filters:" print "~~~~~~~~" for name in get_all_filters(): cls = find_filter_class(name) print "* " + name + ':' print " %s" % docstring_headline(cls) elif what == 'style': print print "Styles:" print "~~~~~~~" for name in get_all_styles(): cls = get_style_by_name(name) print "* " + name + ':' print " %s" % docstring_headline(cls) def main(args=sys.argv): """ Main command line entry point. """ # pylint: disable-msg=R0911,R0912,R0915 usage = USAGE % ((args[0],) * 6) try: popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:hVHg") except getopt.GetoptError, err: print >>sys.stderr, usage return 2 opts = {} O_opts = [] P_opts = [] F_opts = [] for opt, arg in popts: if opt == '-O': O_opts.append(arg) elif opt == '-P': P_opts.append(arg) elif opt == '-F': F_opts.append(arg) opts[opt] = arg if not opts and not args: print usage return 0 if opts.pop('-h', None) is not None: print usage return 0 if opts.pop('-V', None) is not None: print 'Pygments version %s, (c) 2006-2008 by Georg Brandl.' % __version__ return 0 # handle ``pygmentize -L`` L_opt = opts.pop('-L', None) if L_opt is not None: if opts: print >>sys.stderr, usage return 2 # print version main(['', '-V']) if not args: args = ['lexer', 'formatter', 'filter', 'style'] for arg in args: _print_list(arg.rstrip('s')) return 0 # handle ``pygmentize -H`` H_opt = opts.pop('-H', None) if H_opt is not None: if opts or len(args) != 2: print >>sys.stderr, usage return 2 what, name = args if what not in ('lexer', 'formatter', 'filter'): print >>sys.stderr, usage return 2 _print_help(what, name) return 0 # parse -O options parsed_opts = _parse_options(O_opts) opts.pop('-O', None) # parse -P options for p_opt in P_opts: try: name, value = p_opt.split('=', 1) except ValueError: parsed_opts[p_opt] = True else: parsed_opts[name] = value opts.pop('-P', None) # handle ``pygmentize -N`` infn = opts.pop('-N', None) if infn is not None: try: lexer = get_lexer_for_filename(infn, **parsed_opts) except ClassNotFound, err: lexer = TextLexer() except OptionError, err: print >>sys.stderr, 'Error:', err return 1 print lexer.aliases[0] return 0 # handle ``pygmentize -S`` S_opt = opts.pop('-S', None) a_opt = opts.pop('-a', None) if S_opt is not None: f_opt = opts.pop('-f', None) if not f_opt: print >>sys.stderr, usage return 2 if opts or args: print >>sys.stderr, usage return 2 try: parsed_opts['style'] = S_opt fmter = get_formatter_by_name(f_opt, **parsed_opts) except ClassNotFound, err: print >>sys.stderr, err return 1 arg = a_opt or '' try: print fmter.get_style_defs(arg) except Exception, err: print >>sys.stderr, 'Error:', err return 1 return 0 # if no -S is given, -a is not allowed if a_opt is not None: print >>sys.stderr, usage return 2 # parse -F options F_opts = _parse_filters(F_opts) opts.pop('-F', None) # select formatter outfn = opts.pop('-o', None) fmter = opts.pop('-f', None) if fmter: try: fmter = get_formatter_by_name(fmter, **parsed_opts) except (OptionError, ClassNotFound), err: print >>sys.stderr, 'Error:', err return 1 if outfn: if not fmter: try: fmter = get_formatter_for_filename(outfn, **parsed_opts) except (OptionError, ClassNotFound), err: print >>sys.stderr, 'Error:', err return 1 try: outfile = open(outfn, 'wb') except Exception, err: print >>sys.stderr, 'Error: cannot open outfile:', err return 1 else: if not fmter: fmter = TerminalFormatter(**parsed_opts) outfile = sys.stdout # select lexer lexer = opts.pop('-l', None) if lexer: try: lexer = get_lexer_by_name(lexer, **parsed_opts) except (OptionError, ClassNotFound), err: print >>sys.stderr, 'Error:', err return 1 if args: if len(args) > 1: print >>sys.stderr, usage return 2 infn = args[0] try: code = open(infn, 'rb').read() except Exception, err: print >>sys.stderr, 'Error: cannot read infile:', err return 1 if not lexer: try: lexer = get_lexer_for_filename(infn, code, **parsed_opts) except ClassNotFound, err: if '-g' in opts: try: lexer = guess_lexer(code) except ClassNotFound: lexer = TextLexer() else: print >>sys.stderr, 'Error:', err return 1 except OptionError, err: print >>sys.stderr, 'Error:', err return 1 else: if '-g' in opts: code = sys.stdin.read() try: lexer = guess_lexer(code) except ClassNotFound: lexer = TextLexer() elif not lexer: print >>sys.stderr, 'Error: no lexer name given and reading ' + \ 'from stdin (try using -g or -l <lexer>)' return 2 else: code = sys.stdin.read() # No encoding given? Use latin1 if output file given, # stdin/stdout encoding otherwise. # (This is a compromise, I'm not too happy with it...) if 'encoding' not in parsed_opts and 'outencoding' not in parsed_opts: if outfn: # encoding pass-through fmter.encoding = 'latin1' else: if sys.version_info < (3,): # use terminal encoding; Python 3's terminals already do that lexer.encoding = getattr(sys.stdin, 'encoding', None) or 'ascii' fmter.encoding = getattr(sys.stdout, 'encoding', None) or 'ascii' # ... and do it! try: # process filters for fname, fopts in F_opts: lexer.add_filter(fname, **fopts) highlight(code, lexer, fmter, outfile) except Exception, err: import traceback info = traceback.format_exception(*sys.exc_info()) msg = info[-1].strip() if len(info) >= 3: # extract relevant file and position info msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:] print >>sys.stderr print >>sys.stderr, '*** Error while highlighting:' print >>sys.stderr, msg return 1 return 0
apache-2.0
WhireCrow/openwrt-mt7620
staging_dir/host/lib/python2.7/unittest/suite.py
243
9809
"""TestSuite""" import sys from . import case from . import util __unittest = True def _call_if_exists(parent, attr): func = getattr(parent, attr, lambda: None) func() class BaseTestSuite(object): """A simple test suite that doesn't provide class or module shared fixtures. """ def __init__(self, tests=()): self._tests = [] self.addTests(tests) def __repr__(self): return "<%s tests=%s>" % (util.strclass(self.__class__), list(self)) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return list(self) == list(other) def __ne__(self, other): return not self == other # Can't guarantee hash invariant, so flag as unhashable __hash__ = None def __iter__(self): return iter(self._tests) def countTestCases(self): cases = 0 for test in self: cases += test.countTestCases() return cases def addTest(self, test): # sanity checks if not hasattr(test, '__call__'): raise TypeError("{} is not callable".format(repr(test))) if isinstance(test, type) and issubclass(test, (case.TestCase, TestSuite)): raise TypeError("TestCases and TestSuites must be instantiated " "before passing them to addTest()") self._tests.append(test) def addTests(self, tests): if isinstance(tests, basestring): raise TypeError("tests must be an iterable of tests, not a string") for test in tests: self.addTest(test) def run(self, result): for test in self: if result.shouldStop: break test(result) return result def __call__(self, *args, **kwds): return self.run(*args, **kwds) def debug(self): """Run the tests without collecting errors in a TestResult""" for test in self: test.debug() class TestSuite(BaseTestSuite): """A test suite is a composite test consisting of a number of TestCases. For use, create an instance of TestSuite, then add test case instances. When all tests have been added, the suite can be passed to a test runner, such as TextTestRunner. It will run the individual test cases in the order in which they were added, aggregating the results. When subclassing, do not forget to call the base class constructor. """ def run(self, result, debug=False): topLevel = False if getattr(result, '_testRunEntered', False) is False: result._testRunEntered = topLevel = True for test in self: if result.shouldStop: break if _isnotsuite(test): self._tearDownPreviousClass(test, result) self._handleModuleFixture(test, result) self._handleClassSetUp(test, result) result._previousTestClass = test.__class__ if (getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False)): continue if not debug: test(result) else: test.debug() if topLevel: self._tearDownPreviousClass(None, result) self._handleModuleTearDown(result) result._testRunEntered = False return result def debug(self): """Run the tests without collecting errors in a TestResult""" debug = _DebugResult() self.run(debug, True) ################################ def _handleClassSetUp(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if result._moduleSetUpFailed: return if getattr(currentClass, "__unittest_skip__", False): return try: currentClass._classSetupFailed = False except TypeError: # test may actually be a function # so its class will be a builtin-type pass setUpClass = getattr(currentClass, 'setUpClass', None) if setUpClass is not None: _call_if_exists(result, '_setupStdout') try: setUpClass() except Exception as e: if isinstance(result, _DebugResult): raise currentClass._classSetupFailed = True className = util.strclass(currentClass) errorName = 'setUpClass (%s)' % className self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout') def _get_previous_module(self, result): previousModule = None previousClass = getattr(result, '_previousTestClass', None) if previousClass is not None: previousModule = previousClass.__module__ return previousModule def _handleModuleFixture(self, test, result): previousModule = self._get_previous_module(result) currentModule = test.__class__.__module__ if currentModule == previousModule: return self._handleModuleTearDown(result) result._moduleSetUpFailed = False try: module = sys.modules[currentModule] except KeyError: return setUpModule = getattr(module, 'setUpModule', None) if setUpModule is not None: _call_if_exists(result, '_setupStdout') try: setUpModule() except Exception, e: if isinstance(result, _DebugResult): raise result._moduleSetUpFailed = True errorName = 'setUpModule (%s)' % currentModule self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout') def _addClassOrModuleLevelException(self, result, exception, errorName): error = _ErrorHolder(errorName) addSkip = getattr(result, 'addSkip', None) if addSkip is not None and isinstance(exception, case.SkipTest): addSkip(error, str(exception)) else: result.addError(error, sys.exc_info()) def _handleModuleTearDown(self, result): previousModule = self._get_previous_module(result) if previousModule is None: return if result._moduleSetUpFailed: return try: module = sys.modules[previousModule] except KeyError: return tearDownModule = getattr(module, 'tearDownModule', None) if tearDownModule is not None: _call_if_exists(result, '_setupStdout') try: tearDownModule() except Exception as e: if isinstance(result, _DebugResult): raise errorName = 'tearDownModule (%s)' % previousModule self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout') def _tearDownPreviousClass(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if getattr(previousClass, '_classSetupFailed', False): return if getattr(result, '_moduleSetUpFailed', False): return if getattr(previousClass, "__unittest_skip__", False): return tearDownClass = getattr(previousClass, 'tearDownClass', None) if tearDownClass is not None: _call_if_exists(result, '_setupStdout') try: tearDownClass() except Exception, e: if isinstance(result, _DebugResult): raise className = util.strclass(previousClass) errorName = 'tearDownClass (%s)' % className self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout') class _ErrorHolder(object): """ Placeholder for a TestCase inside a result. As far as a TestResult is concerned, this looks exactly like a unit test. Used to insert arbitrary errors into a test suite run. """ # Inspired by the ErrorHolder from Twisted: # http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py # attribute used by TestResult._exc_info_to_string failureException = None def __init__(self, description): self.description = description def id(self): return self.description def shortDescription(self): return None def __repr__(self): return "<ErrorHolder description=%r>" % (self.description,) def __str__(self): return self.id() def run(self, result): # could call result.addError(...) - but this test-like object # shouldn't be run anyway pass def __call__(self, result): return self.run(result) def countTestCases(self): return 0 def _isnotsuite(test): "A crude way to tell apart testcases and suites with duck-typing" try: iter(test) except TypeError: return True return False class _DebugResult(object): "Used by the TestSuite to hold previous class when running in debug." _previousTestClass = None _moduleSetUpFailed = False shouldStop = False
gpl-2.0
joaovitorsilvestre/MongographQL
tests/operators/test_operators.py
2
2901
import graphene def format_fields(fields): return sorted(['test__' + f for f in fields] + ['test']) def test_add_operators_to_field_respective_fields(): from graphene_mongodb.fields.respective import respective_fields from graphene_mongodb.operators import gen_operators_of_field, string_operators, allowed_operators, \ fields_string_operators for m_field, r_graphene in respective_fields.items(): m_field = m_field() applied_operators = gen_operators_of_field('test', m_field, r_graphene, allowed_operators(m_field)) expected = set(format_fields(allowed_operators(m_field)) + \ (format_fields(string_operators) if isinstance(m_field, fields_string_operators) else [])) assert len(applied_operators.keys()) == len(expected) assert sorted(list(applied_operators.keys())) == sorted(expected) def test_add_operators_to_field_list_field(): from mongoengine import ListField, SortedListField from graphene_mongodb.operators import gen_operators_of_field, allowed_operators from graphene_mongodb.fields.respective import respective_special_fields, respective_fields for m_field in [ListField, SortedListField]: for f, r_graphene in respective_fields.items(): field = m_field(f()) applied_operators = gen_operators_of_field('test', field, respective_special_fields[m_field], allowed_operators(field)) expected = format_fields(['size']) assert len(applied_operators.keys()) == len(expected) assert sorted(list(applied_operators.keys())) == sorted(expected) obj_list_field = applied_operators['test']('listTest', field) assert isinstance(obj_list_field, graphene.List) # here we test to assert that the type of items of the list is what is suppose to be assert isinstance(obj_list_field.of_type, type(r_graphene)) def test_add_operators_to_field_reference_field(): from mongoengine import ReferenceField, Document, StringField from graphene_mongodb.operators import gen_operators_of_field, allowed_operators from graphene_mongodb.fields import respective_special_fields class Other(Document): name = StringField() class Test(Document): test = ReferenceField(Other) field = Test.test r_graphene = respective_special_fields[type(field)] applied_operators = gen_operators_of_field('test', field, r_graphene('test', field), allowed_operators(field)) assert sorted(list(applied_operators.keys())) == format_fields(['in', 'nin', 'ne']) assert isinstance(applied_operators['test__in'], graphene.List) assert isinstance(applied_operators['test__nin'], graphene.List) assert isinstance(applied_operators['test__ne'], graphene.ID)
mit
bcoca/ansible
test/units/plugins/lookup/test_password.py
46
20778
# -*- coding: utf-8 -*- # (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type try: import passlib from passlib.handlers import pbkdf2 except ImportError: passlib = None pbkdf2 = None import pytest from units.mock.loader import DictDataLoader from units.compat import unittest from units.compat.mock import mock_open, patch from ansible.errors import AnsibleError from ansible.module_utils.six import text_type from ansible.module_utils.six.moves import builtins from ansible.module_utils._text import to_bytes from ansible.plugins.loader import PluginLoader from ansible.plugins.lookup import password DEFAULT_CHARS = sorted([u'ascii_letters', u'digits', u".,:-_"]) DEFAULT_CANDIDATE_CHARS = u'.,:-_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' # Currently there isn't a new-style old_style_params_data = ( # Simple case dict( term=u'/path/to/file', filename=u'/path/to/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), candidate_chars=DEFAULT_CANDIDATE_CHARS, ), # Special characters in path dict( term=u'/path/with/embedded spaces and/file', filename=u'/path/with/embedded spaces and/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), candidate_chars=DEFAULT_CANDIDATE_CHARS, ), dict( term=u'/path/with/equals/cn=com.ansible', filename=u'/path/with/equals/cn=com.ansible', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), candidate_chars=DEFAULT_CANDIDATE_CHARS, ), dict( term=u'/path/with/unicode/くらとみ/file', filename=u'/path/with/unicode/くらとみ/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), candidate_chars=DEFAULT_CANDIDATE_CHARS, ), # Mix several special chars dict( term=u'/path/with/utf 8 and spaces/くらとみ/file', filename=u'/path/with/utf 8 and spaces/くらとみ/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), candidate_chars=DEFAULT_CANDIDATE_CHARS, ), dict( term=u'/path/with/encoding=unicode/くらとみ/file', filename=u'/path/with/encoding=unicode/くらとみ/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), candidate_chars=DEFAULT_CANDIDATE_CHARS, ), dict( term=u'/path/with/encoding=unicode/くらとみ/and spaces file', filename=u'/path/with/encoding=unicode/くらとみ/and spaces file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS), candidate_chars=DEFAULT_CANDIDATE_CHARS, ), # Simple parameters dict( term=u'/path/to/file length=42', filename=u'/path/to/file', params=dict(length=42, encrypt=None, chars=DEFAULT_CHARS), candidate_chars=DEFAULT_CANDIDATE_CHARS, ), dict( term=u'/path/to/file encrypt=pbkdf2_sha256', filename=u'/path/to/file', params=dict(length=password.DEFAULT_LENGTH, encrypt='pbkdf2_sha256', chars=DEFAULT_CHARS), candidate_chars=DEFAULT_CANDIDATE_CHARS, ), dict( term=u'/path/to/file chars=abcdefghijklmnop', filename=u'/path/to/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'abcdefghijklmnop']), candidate_chars=u'abcdefghijklmnop', ), dict( term=u'/path/to/file chars=digits,abc,def', filename=u'/path/to/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'abc', u'def'])), candidate_chars=u'abcdef0123456789', ), # Including comma in chars dict( term=u'/path/to/file chars=abcdefghijklmnop,,digits', filename=u'/path/to/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'abcdefghijklmnop', u',', u'digits'])), candidate_chars=u',abcdefghijklmnop0123456789', ), dict( term=u'/path/to/file chars=,,', filename=u'/path/to/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u',']), candidate_chars=u',', ), # Including = in chars dict( term=u'/path/to/file chars=digits,=,,', filename=u'/path/to/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'=', u','])), candidate_chars=u',=0123456789', ), dict( term=u'/path/to/file chars=digits,abc=def', filename=u'/path/to/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'abc=def'])), candidate_chars=u'abc=def0123456789', ), # Including unicode in chars dict( term=u'/path/to/file chars=digits,くらとみ,,', filename=u'/path/to/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'くらとみ', u','])), candidate_chars=u',0123456789くらとみ', ), # Including only unicode in chars dict( term=u'/path/to/file chars=くらとみ', filename=u'/path/to/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'くらとみ'])), candidate_chars=u'くらとみ', ), # Include ':' in path dict( term=u'/path/to/file_with:colon chars=ascii_letters,digits', filename=u'/path/to/file_with:colon', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'ascii_letters', u'digits'])), candidate_chars=u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', ), # Including special chars in both path and chars # Special characters in path dict( term=u'/path/with/embedded spaces and/file chars=abc=def', filename=u'/path/with/embedded spaces and/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'abc=def']), candidate_chars=u'abc=def', ), dict( term=u'/path/with/equals/cn=com.ansible chars=abc=def', filename=u'/path/with/equals/cn=com.ansible', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'abc=def']), candidate_chars=u'abc=def', ), dict( term=u'/path/with/unicode/くらとみ/file chars=くらとみ', filename=u'/path/with/unicode/くらとみ/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'くらとみ']), candidate_chars=u'くらとみ', ), ) class TestParseParameters(unittest.TestCase): def test(self): for testcase in old_style_params_data: filename, params = password._parse_parameters(testcase['term']) params['chars'].sort() self.assertEqual(filename, testcase['filename']) self.assertEqual(params, testcase['params']) def test_unrecognized_value(self): testcase = dict(term=u'/path/to/file chars=くらとみi sdfsdf', filename=u'/path/to/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'くらとみ']), candidate_chars=u'くらとみ') self.assertRaises(AnsibleError, password._parse_parameters, testcase['term']) def test_invalid_params(self): testcase = dict(term=u'/path/to/file chars=くらとみi somethign_invalid=123', filename=u'/path/to/file', params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'くらとみ']), candidate_chars=u'くらとみ') self.assertRaises(AnsibleError, password._parse_parameters, testcase['term']) class TestReadPasswordFile(unittest.TestCase): def setUp(self): self.os_path_exists = password.os.path.exists def tearDown(self): password.os.path.exists = self.os_path_exists def test_no_password_file(self): password.os.path.exists = lambda x: False self.assertEqual(password._read_password_file(b'/nonexistent'), None) def test_with_password_file(self): password.os.path.exists = lambda x: True with patch.object(builtins, 'open', mock_open(read_data=b'Testing\n')) as m: self.assertEqual(password._read_password_file(b'/etc/motd'), u'Testing') class TestGenCandidateChars(unittest.TestCase): def _assert_gen_candidate_chars(self, testcase): expected_candidate_chars = testcase['candidate_chars'] params = testcase['params'] chars_spec = params['chars'] res = password._gen_candidate_chars(chars_spec) self.assertEqual(res, expected_candidate_chars) def test_gen_candidate_chars(self): for testcase in old_style_params_data: self._assert_gen_candidate_chars(testcase) class TestRandomPassword(unittest.TestCase): def _assert_valid_chars(self, res, chars): for res_char in res: self.assertIn(res_char, chars) def test_default(self): res = password.random_password() self.assertEqual(len(res), password.DEFAULT_LENGTH) self.assertTrue(isinstance(res, text_type)) self._assert_valid_chars(res, DEFAULT_CANDIDATE_CHARS) def test_zero_length(self): res = password.random_password(length=0) self.assertEqual(len(res), 0) self.assertTrue(isinstance(res, text_type)) self._assert_valid_chars(res, u',') def test_just_a_common(self): res = password.random_password(length=1, chars=u',') self.assertEqual(len(res), 1) self.assertEqual(res, u',') def test_free_will(self): # A Rush and Spinal Tap reference twofer res = password.random_password(length=11, chars=u'a') self.assertEqual(len(res), 11) self.assertEqual(res, 'aaaaaaaaaaa') self._assert_valid_chars(res, u'a') def test_unicode(self): res = password.random_password(length=11, chars=u'くらとみ') self._assert_valid_chars(res, u'くらとみ') self.assertEqual(len(res), 11) def test_gen_password(self): for testcase in old_style_params_data: params = testcase['params'] candidate_chars = testcase['candidate_chars'] params_chars_spec = password._gen_candidate_chars(params['chars']) password_string = password.random_password(length=params['length'], chars=params_chars_spec) self.assertEqual(len(password_string), params['length'], msg='generated password=%s has length (%s) instead of expected length (%s)' % (password_string, len(password_string), params['length'])) for char in password_string: self.assertIn(char, candidate_chars, msg='%s not found in %s from chars spect %s' % (char, candidate_chars, params['chars'])) class TestParseContent(unittest.TestCase): def test_empty_password_file(self): plaintext_password, salt = password._parse_content(u'') self.assertEqual(plaintext_password, u'') self.assertEqual(salt, None) def test(self): expected_content = u'12345678' file_content = expected_content plaintext_password, salt = password._parse_content(file_content) self.assertEqual(plaintext_password, expected_content) self.assertEqual(salt, None) def test_with_salt(self): expected_content = u'12345678 salt=87654321' file_content = expected_content plaintext_password, salt = password._parse_content(file_content) self.assertEqual(plaintext_password, u'12345678') self.assertEqual(salt, u'87654321') class TestFormatContent(unittest.TestCase): def test_no_encrypt(self): self.assertEqual( password._format_content(password=u'hunter42', salt=u'87654321', encrypt=False), u'hunter42 salt=87654321') def test_no_encrypt_no_salt(self): self.assertEqual( password._format_content(password=u'hunter42', salt=None, encrypt=None), u'hunter42') def test_encrypt(self): self.assertEqual( password._format_content(password=u'hunter42', salt=u'87654321', encrypt='pbkdf2_sha256'), u'hunter42 salt=87654321') def test_encrypt_no_salt(self): self.assertRaises(AssertionError, password._format_content, u'hunter42', None, 'pbkdf2_sha256') class TestWritePasswordFile(unittest.TestCase): def setUp(self): self.makedirs_safe = password.makedirs_safe self.os_chmod = password.os.chmod password.makedirs_safe = lambda path, mode: None password.os.chmod = lambda path, mode: None def tearDown(self): password.makedirs_safe = self.makedirs_safe password.os.chmod = self.os_chmod def test_content_written(self): with patch.object(builtins, 'open', mock_open()) as m: password._write_password_file(b'/this/is/a/test/caf\xc3\xa9', u'Testing Café') m.assert_called_once_with(b'/this/is/a/test/caf\xc3\xa9', 'wb') m().write.assert_called_once_with(u'Testing Café\n'.encode('utf-8')) class BaseTestLookupModule(unittest.TestCase): def setUp(self): self.fake_loader = DictDataLoader({'/path/to/somewhere': 'sdfsdf'}) self.password_lookup = password.LookupModule(loader=self.fake_loader) self.os_path_exists = password.os.path.exists self.os_open = password.os.open password.os.open = lambda path, flag: None self.os_close = password.os.close password.os.close = lambda fd: None self.os_remove = password.os.remove password.os.remove = lambda path: None self.makedirs_safe = password.makedirs_safe password.makedirs_safe = lambda path, mode: None def tearDown(self): password.os.path.exists = self.os_path_exists password.os.open = self.os_open password.os.close = self.os_close password.os.remove = self.os_remove password.makedirs_safe = self.makedirs_safe class TestLookupModuleWithoutPasslib(BaseTestLookupModule): @patch.object(PluginLoader, '_get_paths') @patch('ansible.plugins.lookup.password._write_password_file') def test_no_encrypt(self, mock_get_paths, mock_write_file): mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three'] results = self.password_lookup.run([u'/path/to/somewhere'], None) # FIXME: assert something useful for result in results: assert len(result) == password.DEFAULT_LENGTH assert isinstance(result, text_type) @patch.object(PluginLoader, '_get_paths') @patch('ansible.plugins.lookup.password._write_password_file') def test_password_already_created_no_encrypt(self, mock_get_paths, mock_write_file): mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three'] password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere') with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m: results = self.password_lookup.run([u'/path/to/somewhere chars=anything'], None) for result in results: self.assertEqual(result, u'hunter42') @patch.object(PluginLoader, '_get_paths') @patch('ansible.plugins.lookup.password._write_password_file') def test_only_a(self, mock_get_paths, mock_write_file): mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three'] results = self.password_lookup.run([u'/path/to/somewhere chars=a'], None) for result in results: self.assertEqual(result, u'a' * password.DEFAULT_LENGTH) @patch('time.sleep') def test_lock_been_held(self, mock_sleep): # pretend the lock file is here password.os.path.exists = lambda x: True try: with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m: # should timeout here results = self.password_lookup.run([u'/path/to/somewhere chars=anything'], None) self.fail("Lookup didn't timeout when lock already been held") except AnsibleError: pass def test_lock_not_been_held(self): # pretend now there is password file but no lock password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere') try: with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m: # should not timeout here results = self.password_lookup.run([u'/path/to/somewhere chars=anything'], None) except AnsibleError: self.fail('Lookup timeouts when lock is free') for result in results: self.assertEqual(result, u'hunter42') @pytest.mark.skipif(passlib is None, reason='passlib must be installed to run these tests') class TestLookupModuleWithPasslib(BaseTestLookupModule): def setUp(self): super(TestLookupModuleWithPasslib, self).setUp() # Different releases of passlib default to a different number of rounds self.sha256 = passlib.registry.get_crypt_handler('pbkdf2_sha256') sha256_for_tests = pbkdf2.create_pbkdf2_hash("sha256", 32, 20000) passlib.registry.register_crypt_handler(sha256_for_tests, force=True) def tearDown(self): super(TestLookupModuleWithPasslib, self).tearDown() passlib.registry.register_crypt_handler(self.sha256, force=True) @patch.object(PluginLoader, '_get_paths') @patch('ansible.plugins.lookup.password._write_password_file') def test_encrypt(self, mock_get_paths, mock_write_file): mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three'] results = self.password_lookup.run([u'/path/to/somewhere encrypt=pbkdf2_sha256'], None) # pbkdf2 format plus hash expected_password_length = 76 for result in results: self.assertEqual(len(result), expected_password_length) # result should have 5 parts split by '$' str_parts = result.split('$', 5) # verify the result is parseable by the passlib crypt_parts = passlib.hash.pbkdf2_sha256.parsehash(result) # verify it used the right algo type self.assertEqual(str_parts[1], 'pbkdf2-sha256') self.assertEqual(len(str_parts), 5) # verify the string and parsehash agree on the number of rounds self.assertEqual(int(str_parts[2]), crypt_parts['rounds']) self.assertIsInstance(result, text_type) @patch.object(PluginLoader, '_get_paths') @patch('ansible.plugins.lookup.password._write_password_file') def test_password_already_created_encrypt(self, mock_get_paths, mock_write_file): mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three'] password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere') with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m: results = self.password_lookup.run([u'/path/to/somewhere chars=anything encrypt=pbkdf2_sha256'], None) for result in results: self.assertEqual(result, u'$pbkdf2-sha256$20000$ODc2NTQzMjE$Uikde0cv0BKaRaAXMrUQB.zvG4GmnjClwjghwIRf2gU')
gpl-3.0
astronaut1712/taiga-back
taiga/projects/tasks/models.py
14
4574
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be> # Copyright (C) 2014 Jesús Espino <jespinog@gmail.com> # Copyright (C) 2014 David Barragán <bameda@dbarragan.com> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.db import models from django.contrib.contenttypes import generic from django.conf import settings from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from djorm_pgarray.fields import TextArrayField from taiga.projects.occ import OCCModelMixin from taiga.projects.notifications.mixins import WatchedModelMixin from taiga.projects.mixins.blocked import BlockedMixin from taiga.base.tags import TaggedMixin class Task(OCCModelMixin, WatchedModelMixin, BlockedMixin, TaggedMixin, models.Model): user_story = models.ForeignKey("userstories.UserStory", null=True, blank=True, related_name="tasks", verbose_name=_("user story")) ref = models.BigIntegerField(db_index=True, null=True, blank=True, default=None, verbose_name=_("ref")) owner = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, default=None, related_name="owned_tasks", verbose_name=_("owner")) status = models.ForeignKey("projects.TaskStatus", null=True, blank=True, related_name="tasks", verbose_name=_("status")) project = models.ForeignKey("projects.Project", null=False, blank=False, related_name="tasks", verbose_name=_("project")) milestone = models.ForeignKey("milestones.Milestone", null=True, blank=True, on_delete=models.SET_NULL, default=None, related_name="tasks", verbose_name=_("milestone")) created_date = models.DateTimeField(null=False, blank=False, verbose_name=_("created date"), default=timezone.now) modified_date = models.DateTimeField(null=False, blank=False, verbose_name=_("modified date")) finished_date = models.DateTimeField(null=True, blank=True, verbose_name=_("finished date")) subject = models.TextField(null=False, blank=False, verbose_name=_("subject")) us_order = models.IntegerField(null=False, blank=False, default=1, verbose_name=_("us order")) taskboard_order = models.IntegerField(null=False, blank=False, default=1, verbose_name=_("taskboard order")) description = models.TextField(null=False, blank=True, verbose_name=_("description")) assigned_to = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, default=None, related_name="tasks_assigned_to_me", verbose_name=_("assigned to")) attachments = generic.GenericRelation("attachments.Attachment") is_iocaine = models.BooleanField(default=False, null=False, blank=True, verbose_name=_("is iocaine")) external_reference = TextArrayField(default=None, verbose_name=_("external reference")) _importing = None class Meta: verbose_name = "task" verbose_name_plural = "tasks" ordering = ["project", "created_date", "ref"] # unique_together = ("ref", "project") permissions = ( ("view_task", "Can view task"), ) def save(self, *args, **kwargs): if not self._importing or not self.modified_date: self.modified_date = timezone.now() if not self.status: self.status = self.project.default_task_status return super().save(*args, **kwargs) def __str__(self): return "({1}) {0}".format(self.ref, self.subject)
agpl-3.0
mlperf/inference_results_v0.7
closed/Cisco/code/rnnt/tensorrt/preprocessing/dataset.py
12
11741
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This file contains classes and functions related to data loading """ import torch import numpy as np import math from torch.utils.data import Dataset, Sampler import torch.distributed as dist from parts.manifest import Manifest from parts.features import WaveformFeaturizer class DistributedBucketBatchSampler(Sampler): def __init__(self, dataset, batch_size, num_replicas=None, rank=None): """Distributed sampler that buckets samples with similar length to minimize padding, similar concept as pytorch BucketBatchSampler https://pytorchnlp.readthedocs.io/en/latest/source/torchnlp.samplers.html#torchnlp.samplers.BucketBatchSampler Args: dataset: Dataset used for sampling. batch_size: data batch size num_replicas (optional): Number of processes participating in distributed training. rank (optional): Rank of the current process within num_replicas. """ if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.dataset = dataset self.dataset_size = len(dataset) self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.batch_size = batch_size self.tile_size = batch_size * self.num_replicas self.num_buckets = 6 self.bucket_size = self.round_up_to(math.ceil(self.dataset_size / self.num_buckets), self.tile_size) self.index_count = self.round_up_to(self.dataset_size, self.tile_size) self.num_samples = self.index_count // self.num_replicas def round_up_to(self, x, mod): return (x + mod - 1) // mod * mod def __iter__(self): g = torch.Generator() g.manual_seed(self.epoch) indices = np.arange(self.index_count) % self.dataset_size for bucket in range(self.num_buckets): bucket_start = self.bucket_size * bucket bucket_end = min(bucket_start + self.bucket_size, self.index_count) indices[bucket_start:bucket_end] = indices[bucket_start:bucket_end][torch.randperm(bucket_end - bucket_start, generator=g)] tile_indices = torch.randperm(self.index_count // self.tile_size, generator=g) for tile_index in tile_indices: start_index = self.tile_size * tile_index + self.batch_size * self.rank end_index = start_index + self.batch_size yield indices[start_index:end_index] def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch class data_prefetcher(): def __init__(self, loader): self.loader = iter(loader) self.stream = torch.cuda.Stream() self.preload() def preload(self): try: self.next_input = next(self.loader) except StopIteration: self.next_input = None return with torch.cuda.stream(self.stream): self.next_input = [ x.cuda(non_blocking=True) for x in self.next_input] def __next__(self): torch.cuda.current_stream().wait_stream(self.stream) input = self.next_input self.preload() return input def next(self): return self.__next__() def __iter__(self): return self def seq_collate_fn(batch): """batches samples and returns as tensors Args: batch : list of samples Returns batches of tensors """ batch_size = len(batch) def _find_max_len(lst, ind): max_len = -1 for item in lst: if item[ind].size(0) > max_len: max_len = item[ind].size(0) return max_len max_audio_len = _find_max_len(batch, 0) max_transcript_len = _find_max_len(batch, 2) batched_audio_signal = torch.zeros(batch_size, max_audio_len) batched_transcript = torch.zeros(batch_size, max_transcript_len) audio_lengths = [] transcript_lengths = [] for ind, sample in enumerate(batch): batched_audio_signal[ind].narrow(0, 0, sample[0].size(0)).copy_(sample[0]) audio_lengths.append(sample[1]) batched_transcript[ind].narrow(0, 0, sample[2].size(0)).copy_(sample[2]) transcript_lengths.append(sample[3]) return batched_audio_signal, torch.stack(audio_lengths), batched_transcript, \ torch.stack(transcript_lengths) class AudioToTextDataLayer: """Data layer with data loader """ def __init__(self, **kwargs): self._device = torch.device("cuda") featurizer_config = kwargs['featurizer_config'] pad_to_max = kwargs.get('pad_to_max', False) perturb_config = kwargs.get('perturb_config', None) manifest_filepath = kwargs['manifest_filepath'] dataset_dir = kwargs['dataset_dir'] labels = kwargs['labels'] batch_size = kwargs['batch_size'] drop_last = kwargs.get('drop_last', False) shuffle = kwargs.get('shuffle', True) min_duration = featurizer_config.get('min_duration', 0.1) max_duration = featurizer_config.get('max_duration', None) normalize_transcripts = kwargs.get('normalize_transcripts', True) trim_silence = kwargs.get('trim_silence', False) multi_gpu = kwargs.get('multi_gpu', False) sampler_type = kwargs.get('sampler', 'default') speed_perturbation = featurizer_config.get('speed_perturbation', False) sort_by_duration=sampler_type == 'bucket' self._featurizer = WaveformFeaturizer.from_config(featurizer_config, perturbation_configs=perturb_config) self._dataset = AudioDataset( dataset_dir=dataset_dir, manifest_filepath=manifest_filepath, labels=labels, blank_index=len(labels), sort_by_duration=sort_by_duration, pad_to_max=pad_to_max, featurizer=self._featurizer, max_duration=max_duration, min_duration=min_duration, normalize=normalize_transcripts, trim=trim_silence, speed_perturbation=speed_perturbation) print('sort_by_duration', sort_by_duration) if not multi_gpu: self.sampler = None self._dataloader = torch.utils.data.DataLoader( dataset=self._dataset, batch_size=batch_size, collate_fn=lambda b: seq_collate_fn(b), drop_last=drop_last, shuffle=shuffle if self.sampler is None else False, num_workers=4, pin_memory=True, sampler=self.sampler ) elif sampler_type == 'bucket': self.sampler = DistributedBucketBatchSampler(self._dataset, batch_size=batch_size) print("DDBucketSampler") self._dataloader = torch.utils.data.DataLoader( dataset=self._dataset, collate_fn=lambda b: seq_collate_fn(b), num_workers=4, pin_memory=True, batch_sampler=self.sampler ) elif sampler_type == 'default': self.sampler = torch.utils.data.distributed.DistributedSampler(self._dataset) print("DDSampler") self._dataloader = torch.utils.data.DataLoader( dataset=self._dataset, batch_size=batch_size, collate_fn=lambda b: seq_collate_fn(b), drop_last=drop_last, shuffle=shuffle if self.sampler is None else False, num_workers=4, pin_memory=True, sampler=self.sampler ) else: raise RuntimeError("Sampler {} not supported".format(sampler_type)) def __len__(self): return len(self._dataset) @property def data_iterator(self): return self._dataloader class AudioDataset(Dataset): def __init__(self, dataset_dir, manifest_filepath, labels, featurizer, max_duration=None, pad_to_max=False, min_duration=None, blank_index=0, max_utts=0, normalize=True, sort_by_duration=False, trim=False, speed_perturbation=False): """Dataset that loads tensors via a json file containing paths to audio files, transcripts, and durations (in seconds). Each entry is a different audio sample. Args: dataset_dir: absolute path to dataset folder manifest_filepath: relative path from dataset folder to manifest json as described above. Can be coma-separated paths. labels: String containing all the possible characters to map to featurizer: Initialized featurizer class that converts paths of audio to feature tensors max_duration: If audio exceeds this length, do not include in dataset min_duration: If audio is less than this length, do not include in dataset pad_to_max: if specified input sequences into dnn model will be padded to max_duration blank_index: blank index for ctc loss / decoder max_utts: Limit number of utterances normalize: whether to normalize transcript text sort_by_duration: whether or not to sort sequences by increasing duration trim: if specified trims leading and trailing silence from an audio signal. speed_perturbation: specify if using data contains speed perburbation """ m_paths = manifest_filepath.split(',') self.manifest = Manifest(dataset_dir, m_paths, labels, blank_index, pad_to_max=pad_to_max, max_duration=max_duration, sort_by_duration=sort_by_duration, min_duration=min_duration, max_utts=max_utts, normalize=normalize, speed_perturbation=speed_perturbation) self.featurizer = featurizer self.blank_index = blank_index self.trim = trim print( "Dataset loaded with {0:.2f} hours. Filtered {1:.2f} hours.".format( self.manifest.duration / 3600, self.manifest.filtered_duration / 3600)) def __getitem__(self, index): sample = self.manifest[index] rn_indx = np.random.randint(len(sample['audio_filepath'])) duration = sample['audio_duration'][rn_indx] if 'audio_duration' in sample else 0 offset = sample['offset'] if 'offset' in sample else 0 features = self.featurizer.process(sample['audio_filepath'][rn_indx], offset=offset, duration=duration, trim=self.trim) return features, torch.tensor(features.shape[0]).int(), \ torch.tensor(sample["transcript"]), torch.tensor( len(sample["transcript"])).int() def __len__(self): return len(self.manifest)
apache-2.0
tiborsimko/invenio-utils
tests/test_utils_url.py
2
19982
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2009, 2010, 2011, 2013, 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Unit tests for the urlutils library.""" from cgi import parse_qs from flask import current_app from invenio_base.wrappers import lazy_import from invenio_testing import InvenioTestCase HASHLIB_IMPORTED = lazy_import('invenio_utils.url:HASHLIB_IMPORTED') create_AWS_request_url = lazy_import('invenio_utils.url:create_AWS_request_url') create_Indico_request_url = lazy_import('invenio_utils.url:create_Indico_request_url') create_html_link = lazy_import('invenio_utils.url:create_html_link') create_html_mailto = lazy_import('invenio_utils.url:create_html_mailto') create_url = lazy_import('invenio_utils.url:create_url') get_relative_url = lazy_import('invenio_utils.url:get_relative_url') make_canonical_urlargd = lazy_import('invenio_utils.url:make_canonical_urlargd') rewrite_to_secure_url = lazy_import('invenio_utils.url:rewrite_to_secure_url') same_urls_p = lazy_import('invenio_utils.url:same_urls_p') string_to_numeric_char_reference = lazy_import('invenio_utils.url:string_to_numeric_char_reference') wash_url_argument = lazy_import('invenio_utils.url:wash_url_argument') class TestWashUrlArgument(InvenioTestCase): def test_wash_url_argument(self): """urlutils - washing of URL arguments""" self.assertEqual(1, wash_url_argument(['1'], 'int')) self.assertEqual("1", wash_url_argument(['1'], 'str')) self.assertEqual(['1'], wash_url_argument(['1'], 'list')) self.assertEqual(0, wash_url_argument('ellis', 'int')) self.assertEqual("ellis", wash_url_argument('ellis', 'str')) self.assertEqual(["ellis"], wash_url_argument('ellis', 'list')) self.assertEqual(0, wash_url_argument(['ellis'], 'int')) self.assertEqual("ellis", wash_url_argument(['ellis'], 'str')) self.assertEqual(["ellis"], wash_url_argument(['ellis'], 'list')) class TestSecureUrlRewrite(InvenioTestCase): def test_to_secure_url(self): self.assertEqual(rewrite_to_secure_url("http://foo.bar", secure_base="https://foo.bar/"), "https://foo.bar") self.assertEqual(rewrite_to_secure_url("http://foo.bar/", secure_base="https://foo.bar"), "https://foo.bar/") self.assertEqual(rewrite_to_secure_url("http://foo.bar/some/path?query=a", secure_base="https://foo.bar"), "https://foo.bar/some/path?query=a") self.assertEqual(rewrite_to_secure_url("http://foo.bar:4000/some/path?query=a", secure_base="https://foo.bar:4001"), "https://foo.bar:4001/some/path?query=a") self.assertEqual(rewrite_to_secure_url("http://foo.bar:80/some/path?query=a", secure_base="https://foo.bar:443"), "https://foo.bar:443/some/path?query=a") self.assertEqual(rewrite_to_secure_url("http://foo.bar/some/path?query=a", secure_base="https://foo.bar:443"), "https://foo.bar:443/some/path?query=a") self.assertEqual(rewrite_to_secure_url("http://foo.bar:80/some/path?query=a&b=d#hd", secure_base="https://foo.bar"), "https://foo.bar/some/path?query=a&b=d#hd") class TestUrls(InvenioTestCase): """Tests on URLs""" def test_url_creation(self): """urlutils - test url creation""" self.assertEqual(create_url('http://www.a.com/search', {'recid':3, 'of':'hb&'}, escape_urlargd=True), 'http://www.a.com/search?of=hb%26&amp;recid=3') self.assertEqual(create_url('http://www.a.com/search', {'recid':3, 'of':'hb&'}, escape_urlargd=False), 'http://www.a.com/search?of=hb&&amp;recid=3') def test_canonical_urlargd_creation(self): """urlutils - test creation of canonical URLs""" self.assertEqual(make_canonical_urlargd({'a' : 1, 'b' : '2', 'b&': '2=', ':' : '?&'}, {'a': ('int', 1), 'b': ('str', 2)}), "?b%26=2%3D&%3A=%3F%26&b=2") #FIXME removed double escaping of '&' # "?b%26=2%3D&amp;%3A=%3F%26&amp;b=2") if HASHLIB_IMPORTED: def test_signed_aws_request_creation(self): """urlutils - test creation of signed AWS requests""" signed_aws_request_url = create_AWS_request_url("http://webservices.amazon.com/onca/xml", {'AWSAccessKeyId': '00000000000000000000', 'Service': 'AWSECommerceService', 'Operation': 'ItemLookup', 'ItemId': '0679722769', 'ResponseGroup': 'ItemAttributes,Offers,Images,Reviews', 'Version': '2009-01-06'}, "1234567890", _timestamp="2009-01-01T12:00:00Z") # Are we at least acccessing correct base url? self.assert_(signed_aws_request_url.startswith("http://webservices.amazon.com/onca/xml")) # Check that parameters with special characters (, :) get correctly # encoded/decoded ## Note: using parse_qs() url-decodes the string self.assertEqual(parse_qs(signed_aws_request_url)["ResponseGroup"], ['ItemAttributes,Offers,Images,Reviews']) self.assert_('ItemAttributes%2COffers%2CImages%2CReviews' \ in signed_aws_request_url) self.assertEqual(parse_qs(signed_aws_request_url)["Timestamp"], ['2009-01-01T12:00:00Z']) # Check signature exists and is correct self.assertEqual(parse_qs(signed_aws_request_url)["Signature"], ['Nace+U3Az4OhN7tISqgs1vdLBHBEijWcBeCqL5xN9xg=']) self.assert_('Nace%2BU3Az4OhN7tISqgs1vdLBHBEijWcBeCqL5xN9xg%3D&Operation' \ in signed_aws_request_url) # Continute with an additional request signed_aws_request_url_2 = \ create_AWS_request_url("http://ecs.amazonaws.co.uk/onca/xml", {'AWSAccessKeyId': '00000000000000000000', 'Actor': 'Johnny Depp', 'AssociateTag': 'mytag-20', 'Operation': 'ItemSearch', 'ResponseGroup': 'ItemAttributes,Offers,Images,Reviews,Variations', 'SearchIndex': 'DVD', 'Service': 'AWSECommerceService', 'Sort': 'salesrank', 'Version': '2009-01-01'}, "1234567890", _timestamp="2009-01-01T12:00:00Z") # Check signature exists and is correct self.assertEqual(parse_qs(signed_aws_request_url_2)["Signature"], ['TuM6E5L9u/uNqOX09ET03BXVmHLVFfJIna5cxXuHxiU=']) def test_signed_Indico_request_creation(self): """urlutils - test creation of signed Indico requests""" signed_Indico_request_url = create_Indico_request_url("https://indico.cern.ch", "categ", "", [1, 7], "xml", {'onlypublic': 'yes', 'order': 'title', 'from': 'today', 'to': 'tomorrow'}, '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000', _timestamp=1234) # Are we at least acccessing correct base url? self.assert_(signed_Indico_request_url.startswith("https://indico.cern.ch/export/categ/1-7.xml?")) # Check parameters self.assertEqual(parse_qs(signed_Indico_request_url)["order"], ['title']) self.assertEqual(parse_qs(signed_Indico_request_url)["timestamp"], ['1234']) # Check signature exists and is correct self.assertEqual(parse_qs(signed_Indico_request_url)["signature"], ['e984e0c683e36ce3544372f23a397fd2400f4954']) def test_same_urls_p(self): """urlutils - test checking URLs equality""" CFG_SITE_URL = current_app.config['CFG_SITE_URL'] self.assertEqual(same_urls_p(CFG_SITE_URL + '?a=b&c=d&e=f', CFG_SITE_URL + '?e=f&c=d&a=b'), True) self.assertEqual(same_urls_p(CFG_SITE_URL + '?a=b&c=d&e=f&ln=fr', CFG_SITE_URL + '?e=f&c=d&a=b&ln=en'), False) class TestHtmlLinks(InvenioTestCase): """Tests on HTML links""" def test_html_link_creation(self): """urlutils - test creation of HTML links""" # Check with various encoding and escaping traps self.assertEqual(create_html_link('http://www.a.com', {'a' : 1, 'b' : '2', 'b&': '2=', ':' : '?'}, 'my label > & better than yours', {'style': 'color:#f00', 'target': "_blank"}), '<a href="http://www.a.com?a=1&amp;%3A=%3F&amp;b%26=2%3D&amp;b=2" style="color:#f00" target="_blank">my label > & better than yours</a>') def test_html_link_creation_no_argument_escaping(self): """urlutils - test creation of HTML links, without arguments escaping""" self.assertEqual(create_html_link('http://www.a.com', {'a' : 1, 'b' : '2', 'b&': '2=', ':' : '?'}, 'my label > & better than yours', {'style': 'color:#f00', 'target': "_blank"}, escape_urlargd=False), '<a href="http://www.a.com?a=1&amp;:=?&amp;b&=2=&amp;b=2" style="color:#f00" target="_blank">my label > & better than yours</a>') def test_html_link_creation_no_attribute_escaping(self): """urlutils - test creation of HTML links, without attributes escaping""" self.assertEqual(create_html_link('http://www.a.com', {'a' : 1, 'b' : '2', 'b&': '2=', ':' : '?'}, 'my label > & better than yours', {'style': 'color:#f00', 'target': "_blank"}, escape_linkattrd=False), '<a href="http://www.a.com?a=1&amp;%3A=%3F&amp;b%26=2%3D&amp;b=2" style="color:#f00" target="_blank">my label > & better than yours</a>') def test_string_to_numeric_char_reference(self): """urlutils - test numeric character conversion from string""" self.assertEqual(string_to_numeric_char_reference('abc123'), "&#97;&#98;&#99;&#49;&#50;&#51;") self.assertEqual(string_to_numeric_char_reference('\/&;,#$%~é'), "&#92;&#47;&#38;&#59;&#44;&#35;&#36;&#37;&#126;&#195;&#169;") class TestEmailObfuscationMode(InvenioTestCase): """Tests on HTML mailto links creation and obfuscation modes""" def test_html_mailto_obfuscation_mode_minus1(self): """urlutils - test creation of HTML "mailto" links, obfuscation mode -1""" self.assertEqual(create_html_mailto('juliet@cds.cern.ch', subject='Hey there', body='Lunch at 8pm?\ncu!', bcc='romeo@cds.cern.ch', link_label="Date creator", linkattrd={'style': 'text-decoration: blink'}, email_obfuscation_mode=-1), '') def test_html_mailto_obfuscation_mode_0(self): """urlutils - test creation of HTML "mailto" links, obfuscation mode 0""" self.assertEqual(create_html_mailto('juliet@cds.cern.ch', subject='Hey there', body='Lunch at 8pm?\ncu!', bcc='romeo@cds.cern.ch', link_label="Date creator", linkattrd={'style': 'text-decoration: blink'}, email_obfuscation_mode=0), '<a href="mailto:juliet@cds.cern.ch?body=Lunch%20at%208pm%3F%0D%0Acu%21&amp;bcc=romeo%40cds.cern.ch&amp;subject=Hey%20there" style="text-decoration: blink">Date creator</a>') def test_html_mailto_obfuscation_mode_1(self): """urlutils - test creation of HTML "mailto" links, obfuscation mode 1""" self.assertEqual(create_html_mailto('juliet@cds.cern.ch', subject='Hey there', body='Lunch at 8pm?\ncu!', bcc='romeo@cds.cern.ch', link_label="Date creator", linkattrd={'style': 'text-decoration: blink'}, email_obfuscation_mode=1), '<a href="mailto:juliet [at] cds [dot] cern [dot] ch?body=Lunch%20at%208pm%3F%0D%0Acu%21&amp;bcc=romeo%40cds.cern.ch&amp;subject=Hey%20there" style="text-decoration: blink">Date creator</a>') def test_html_mailto_obfuscation_mode_2(self): """urlutils - test creation of HTML "mailto" links, obfuscation mode 2""" self.assertEqual(create_html_mailto('juliet@cds.cern.ch', subject='Hey there', body='Lunch at 8pm?\ncu!', bcc='romeo@cds.cern.ch', link_label="Date creator", linkattrd={'style': 'text-decoration: blink'}, email_obfuscation_mode=2), '<a href="mailto:&#106;&#117;&#108;&#105;&#101;&#116;&#64;&#99;&#100;&#115;&#46;&#99;&#101;&#114;&#110;&#46;&#99;&#104;?body=Lunch%20at%208pm%3F%0D%0Acu%21&amp;bcc=romeo%40cds.cern.ch&amp;subject=Hey%20there" style="text-decoration: blink">Date creator</a>') def test_html_mailto_obfuscation_mode_3(self): """urlutils - test creation of HTML "mailto" links, obfuscation mode 3""" self.assertEqual(create_html_mailto('juliet@cds.cern.ch', subject='Hey there', body='Lunch at 8pm?\ncu!', bcc='romeo@cds.cern.ch', link_label="Date creator", linkattrd={'style': 'text-decoration: blink'}, email_obfuscation_mode=3), '<script language="JavaScript" type="text/javascript">document.write(\'>a/<rotaerc etaD>"knilb :noitaroced-txet"=elyts "ereht02%yeH=tcejbus;pma&hc.nrec.sdc04%oemor=ccb;pma&12%ucA0%D0%F3%mp802%ta02%hcnuL=ydob?hc.nrec.sdc@teiluj:otliam"=ferh a<\'.split("").reverse().join(""))</script>') def test_html_mailto_obfuscation_mode_4(self): """urlutils - test creation of HTML "mailto" links, obfuscation mode 4""" CFG_SITE_URL = current_app.config['CFG_SITE_URL'] self.assertEqual(create_html_mailto('juliet@cds.cern.ch', subject='Hey there', body='Lunch at 8pm?\ncu!', bcc='romeo@cds.cern.ch', link_label="Date creator", linkattrd={'style': 'text-decoration: blink'}, email_obfuscation_mode=4), 'juliet<img src="%(CFG_SITE_URL)s/img/at.gif" alt=" [at] " style="vertical-align:baseline" />cds<img src="%(CFG_SITE_URL)s/img/dot.gif" alt=" [dot] " style="vertical-align:bottom" />cern<img src="%(CFG_SITE_URL)s/img/dot.gif" alt=" [dot] " style="vertical-align:bottom" />ch' % \ {'CFG_SITE_URL': CFG_SITE_URL}) class TestRelativeURL(InvenioTestCase): """Tests the get_relative_url function with different input strings""" def test_relative_url(self): """urlutils - test get_relative_url""" url_normal = "http://web.net" self.assertEqual("", get_relative_url(url_normal)) url_normal_trailing = "http://web.net/" self.assertEqual("", get_relative_url(url_normal_trailing)) url_more = "http://web.net/asd" self.assertEqual("/asd", get_relative_url(url_more)) url_more_trailing = "http://web.net/asd/" self.assertEqual("/asd", get_relative_url(url_more_trailing)) url_adv = "http://web.net/asd/qwe" self.assertEqual("/asd/qwe", get_relative_url(url_adv)) url_adv_trailing = "http://web.net/asd/qwe/" self.assertEqual("/asd/qwe", get_relative_url(url_adv_trailing))
gpl-2.0
EricCline/CEM_inc
env/lib/python2.7/site-packages/IPython/external/simplegeneric/_simplegeneric.py
17
3113
"""This is version 0.7 of Philip J. Eby's simplegeneric module (http://pypi.python.org/pypi/simplegeneric), patched to work with Python 3, which doesn't support old-style classes. """ #Name: simplegeneric #Version: 0.7 #Summary: Simple generic functions (similar to Python's own len(), pickle.dump(), etc.) #Home-page: http://pypi.python.org/pypi/simplegeneric #Author: Phillip J. Eby #Author-email: peak@eby-sarna.com #License: PSF or ZPL __all__ = ["generic"] try: from types import ClassType, InstanceType except ImportError: classtypes = type else: classtypes = type, ClassType def generic(func): """Create a simple generic function""" _sentinel = object() def _by_class(*args, **kw): cls = args[0].__class__ for t in type(cls.__name__, (cls,object), {}).__mro__: f = _gbt(t, _sentinel) if f is not _sentinel: return f(*args, **kw) else: return func(*args, **kw) _by_type = {object: func} try: _by_type[InstanceType] = _by_class except NameError: # Python 3 pass _gbt = _by_type.get def when_type(*types): """Decorator to add a method that will be called for the given types""" for t in types: if not isinstance(t, classtypes): raise TypeError( "%r is not a type or class" % (t,) ) def decorate(f): for t in types: if _by_type.setdefault(t,f) is not f: raise TypeError( "%r already has method for type %r" % (func, t) ) return f return decorate _by_object = {} _gbo = _by_object.get def when_object(*obs): """Decorator to add a method to be called for the given object(s)""" def decorate(f): for o in obs: if _by_object.setdefault(id(o), (o,f))[1] is not f: raise TypeError( "%r already has method for object %r" % (func, o) ) return f return decorate def dispatch(*args, **kw): f = _gbo(id(args[0]), _sentinel) if f is _sentinel: for t in type(args[0]).__mro__: f = _gbt(t, _sentinel) if f is not _sentinel: return f(*args, **kw) else: return func(*args, **kw) else: return f[1](*args, **kw) dispatch.__name__ = func.__name__ dispatch.__dict__ = func.__dict__.copy() dispatch.__doc__ = func.__doc__ dispatch.__module__ = func.__module__ dispatch.when_type = when_type dispatch.when_object = when_object dispatch.default = func dispatch.has_object = lambda o: id(o) in _by_object dispatch.has_type = lambda t: t in _by_type return dispatch def test_suite(): import doctest return doctest.DocFileSuite( 'README.txt', optionflags=doctest.ELLIPSIS|doctest.REPORT_ONLY_FIRST_FAILURE, )
mit
Hoekz/hackness-monster
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/charsetgroupprober.py
2929
3791
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from . import constants import sys from .charsetprober import CharSetProber class CharSetGroupProber(CharSetProber): def __init__(self): CharSetProber.__init__(self) self._mActiveNum = 0 self._mProbers = [] self._mBestGuessProber = None def reset(self): CharSetProber.reset(self) self._mActiveNum = 0 for prober in self._mProbers: if prober: prober.reset() prober.active = True self._mActiveNum += 1 self._mBestGuessProber = None def get_charset_name(self): if not self._mBestGuessProber: self.get_confidence() if not self._mBestGuessProber: return None # self._mBestGuessProber = self._mProbers[0] return self._mBestGuessProber.get_charset_name() def feed(self, aBuf): for prober in self._mProbers: if not prober: continue if not prober.active: continue st = prober.feed(aBuf) if not st: continue if st == constants.eFoundIt: self._mBestGuessProber = prober return self.get_state() elif st == constants.eNotMe: prober.active = False self._mActiveNum -= 1 if self._mActiveNum <= 0: self._mState = constants.eNotMe return self.get_state() return self.get_state() def get_confidence(self): st = self.get_state() if st == constants.eFoundIt: return 0.99 elif st == constants.eNotMe: return 0.01 bestConf = 0.0 self._mBestGuessProber = None for prober in self._mProbers: if not prober: continue if not prober.active: if constants._debug: sys.stderr.write(prober.get_charset_name() + ' not active\n') continue cf = prober.get_confidence() if constants._debug: sys.stderr.write('%s confidence = %s\n' % (prober.get_charset_name(), cf)) if bestConf < cf: bestConf = cf self._mBestGuessProber = prober if not self._mBestGuessProber: return 0.0 return bestConf # else: # self._mBestGuessProber = self._mProbers[0] # return self._mBestGuessProber.get_confidence()
mit
yaelelmatad/EtsyApiTest
findSimilarShopsAllShopsByPopularity.py
1
6336
from __future__ import division import json import sys import math import random #hard coded number of similar stores to spit out since HW said 5, could always add to command line nSimilarStores = 5 maxBonus = 0.0005 class vectors: def __init__(self, featureVectorName, multiplier, shopVectors): self.multiplier= multiplier #this will then hold 1/count self.shopVectors= shopVectors #spare feature vectors that only include features which appear in this particular shop #eventually get normalized so that |shopVector| = 1 self.featureVectorProperty = featureVectorName def getMultiplier(self): '''return the multiplier after training, make sure to train and normalize before calling this function''' return self.multiplier def getShopVectors(self): '''return the shopvectors. make sure to train and normalize before calling this routine''' return self.shopVectors def calculateDistance(self, shop1, shop2): '''given two shop names, calculate the distance for this typeOfVector only''' #check that both of the vectors are in this class, if not use the default empty dictionary vec1 = {} vec2 = {} if shop1 in self.shopVectors: vec1 = self.shopVectors[shop1] if shop2 in self.shopVectors: vec2 = self.shopVectors[shop2] #the vectors are sparse, so not all keys appear in all vectors. Figure out which keys are in just one, and which are in both allKeys = vec1.keys() + vec2.keys() sharedKeys = [] justInFirst = [] justInSecond = [] for key in set(allKeys): if key in vec1.keys() and key in vec2.keys(): sharedKeys.append(key) elif key in vec1.keys(): justInFirst.append(key) else: justInSecond.append(key) dist2 = 0 #actually the squared distance #since we used all our store data to train our multiplier, we know that the multiplier contains all keys for key in justInFirst: dist2 += math.pow(vec1[key],2)*(self.multiplier[key]) #dist2 += math.pow(vec1[key],2) for key in justInSecond: dist2 += math.pow(vec2[key],2)*(self.multiplier[key]) #dist2 += math.pow(vec2[key],2) for key in sharedKeys: dist2 += math.pow(vec2[key]-vec1[key],2)*(self.multiplier[key]) #dist2 += math.pow(vec2[key]-vec1[key],2) return math.sqrt(dist2) def main(jsonInputForMultiplier, jsonInputFileForVectors, jsonShopInfo, outputFileName): #read the json input multFile = open(jsonInputForMultiplier,'r') multipliers =json.load(multFile) multFile.close() shopVecFile = open(jsonInputFileForVectors,'r') shopVectors = json.load(shopVecFile) shopVecFile.close() jsonShopFile = open(jsonShopInfo,'r') shopDetails = json.load(jsonShopFile) jsonShopFile.close() #here is where I calculate what "bonus" to give the store if it is very popular maxPopularity = 1 for shop in shopDetails: currPop = shopDetails[shop][0]["num_favorers"] if currPop > maxPopularity: maxPopularity = currPop #max seems to be ~170 for my data #find out how many different things we trained against typesOfVectors = [key for key in multipliers] #initialize the vectorClasses with the trained data vectorClasses = {} for typeVec in typesOfVectors: vectorClasses[typeVec] = vectors(typeVec, multipliers[typeVec],shopVectors[typeVec]) #find all the shop names (not necessarily unique) shopNamesNotSet = [] #so we can get all shops, not all shops appear in all feature sets for typeVec in typesOfVectors: shopNamesNotSet += [shop for shop in shopVectors[typeVec]] #now remove duplicates shopNames = set(shopNamesNotSet) outputFile = open(outputFileName, 'wb') for originalShop in shopNames: distances = [] accum = 0 for shop in shopNames: dist = 0 #go through all the shops and calculate the distance if shop == originalShop: #don't waste your time calculating self distance continue for typeVec in typesOfVectors: #there are len(typesOfVectors) different "length" vectors to calculate dist+=vectorClasses[typeVec].calculateDistance(originalShop,shop) #if shop != originalShop: accum += dist #subtract a bit of distance if a store is really popular. dist+= (-1)*maxBonus*float(shopDetails[shop][0]["num_favorers"])/float(maxPopularity) distances.append((shop,dist)) #print "average ", float(accum)/float(len(distances)) #certainly not necessary to keep all the distances and then sort. could just keep the list of "nSimilarStores" currently with lowest distane values, but the sort is quick on only 5000 members sortedDist = sorted(distances, key=lambda t: t[1]) #sort on second element of tuple stringToPrint = originalShop+ ": " + sortedDist[0][0] for i in range(1,nSimilarStores): stringToPrint += ", " + sortedDist[i][0] stringToPrint += "\n" outputFile.write(stringToPrint) outputFile.close() def usage(): sys.stderr.write(""" given a multiplier.json and a shopvectors.json goes through ALL the stores and finds the five most similar stores. This version also gives stores that are more popular a bonus. Avg distance 0.3. Stores can reduce the distance to current store by up to 0.05 if they have most favorers of the list. If there are no favorers, there is no distance reduction. \n Third argument should be output file you want to write to like "similarShops.dat" for example you might use: \n python findSimilarShopsALlShopsByPopularity.py multiplier.json vectors.json storeData.json similarShopsByPopularity.dat \n""") if __name__ == "__main__": #check the usage is correct, user can specif 2 or 3 arguments if len(sys.argv) != 5: usage() sys.exit(1) main(sys.argv[1],sys.argv[2], sys.argv[3], sys.argv[4])
gpl-3.0
cntnboys/410Lab6
v1/lib/python2.7/site-packages/django/core/servers/basehttp.py
34
6449
""" HTTP server that implements the Python WSGI protocol (PEP 333, rev 1.21). Based on wsgiref.simple_server which is part of the standard library since 2.5. This is a simple server for use in testing or debugging Django apps. It hasn't been reviewed for security issues. DON'T USE IT FOR PRODUCTION USE! """ from __future__ import unicode_literals from io import BytesIO import socket import sys import traceback from wsgiref import simple_server from wsgiref.util import FileWrapper # NOQA: for backwards compatibility from django.core.exceptions import ImproperlyConfigured from django.core.management.color import color_style from django.core.wsgi import get_wsgi_application from django.utils import six from django.utils.module_loading import import_string from django.utils.six.moves import socketserver __all__ = ('WSGIServer', 'WSGIRequestHandler', 'MAX_SOCKET_CHUNK_SIZE') # If data is too large, socket will choke, so write chunks no larger than 32MB # at a time. The rationale behind the 32MB can be found on Django's Trac: # https://code.djangoproject.com/ticket/5596#comment:4 MAX_SOCKET_CHUNK_SIZE = 32 * 1024 * 1024 # 32 MB def get_internal_wsgi_application(): """ Loads and returns the WSGI application as configured by the user in ``settings.WSGI_APPLICATION``. With the default ``startproject`` layout, this will be the ``application`` object in ``projectname/wsgi.py``. This function, and the ``WSGI_APPLICATION`` setting itself, are only useful for Django's internal servers (runserver, runfcgi); external WSGI servers should just be configured to point to the correct application object directly. If settings.WSGI_APPLICATION is not set (is ``None``), we just return whatever ``django.core.wsgi.get_wsgi_application`` returns. """ from django.conf import settings app_path = getattr(settings, 'WSGI_APPLICATION') if app_path is None: return get_wsgi_application() try: return import_string(app_path) except ImportError as e: msg = ( "WSGI application '%(app_path)s' could not be loaded; " "Error importing module: '%(exception)s'" % ({ 'app_path': app_path, 'exception': e, }) ) six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2]) class ServerHandler(simple_server.ServerHandler, object): error_status = str("500 INTERNAL SERVER ERROR") def write(self, data): """'write()' callable as specified by PEP 3333""" assert isinstance(data, bytes), "write() argument must be bytestring" if not self.status: raise AssertionError("write() before start_response()") elif not self.headers_sent: # Before the first output, send the stored headers self.bytes_sent = len(data) # make sure we know content-length self.send_headers() else: self.bytes_sent += len(data) # XXX check Content-Length and truncate if too many bytes written? data = BytesIO(data) for chunk in iter(lambda: data.read(MAX_SOCKET_CHUNK_SIZE), b''): self._write(chunk) self._flush() def error_output(self, environ, start_response): super(ServerHandler, self).error_output(environ, start_response) return ['\n'.join(traceback.format_exception(*sys.exc_info()))] # Backport of http://hg.python.org/cpython/rev/d5af1b235dab. See #16241. # This can be removed when support for Python <= 2.7.3 is deprecated. def finish_response(self): try: if not self.result_is_file() or not self.sendfile(): for data in self.result: self.write(data) self.finish_content() finally: self.close() class WSGIServer(simple_server.WSGIServer, object): """BaseHTTPServer that implements the Python WSGI protocol""" request_queue_size = 10 def __init__(self, *args, **kwargs): if kwargs.pop('ipv6', False): self.address_family = socket.AF_INET6 super(WSGIServer, self).__init__(*args, **kwargs) def server_bind(self): """Override server_bind to store the server name.""" super(WSGIServer, self).server_bind() self.setup_environ() class WSGIRequestHandler(simple_server.WSGIRequestHandler, object): def __init__(self, *args, **kwargs): self.style = color_style() super(WSGIRequestHandler, self).__init__(*args, **kwargs) def address_string(self): # Short-circuit parent method to not call socket.getfqdn return self.client_address[0] def log_message(self, format, *args): msg = "[%s] %s\n" % (self.log_date_time_string(), format % args) # Utilize terminal colors, if available if args[1][0] == '2': # Put 2XX first, since it should be the common case msg = self.style.HTTP_SUCCESS(msg) elif args[1][0] == '1': msg = self.style.HTTP_INFO(msg) elif args[1] == '304': msg = self.style.HTTP_NOT_MODIFIED(msg) elif args[1][0] == '3': msg = self.style.HTTP_REDIRECT(msg) elif args[1] == '404': msg = self.style.HTTP_NOT_FOUND(msg) elif args[1][0] == '4': msg = self.style.HTTP_BAD_REQUEST(msg) else: # Any 5XX, or any other response msg = self.style.HTTP_SERVER_ERROR(msg) sys.stderr.write(msg) def get_environ(self): # Strip all headers with underscores in the name before constructing # the WSGI environ. This prevents header-spoofing based on ambiguity # between underscores and dashes both normalized to underscores in WSGI # env vars. Nginx and Apache 2.4+ both do this as well. for k, v in self.headers.items(): if '_' in k: del self.headers[k] return super(WSGIRequestHandler, self).get_environ() def run(addr, port, wsgi_handler, ipv6=False, threading=False): server_address = (addr, port) if threading: httpd_cls = type(str('WSGIServer'), (socketserver.ThreadingMixIn, WSGIServer), {}) else: httpd_cls = WSGIServer httpd = httpd_cls(server_address, WSGIRequestHandler, ipv6=ipv6) httpd.set_app(wsgi_handler) httpd.serve_forever()
apache-2.0
2947721120/curly-hockeypuck
examples/python/steel_lns.py
32
8547
# Copyright 2010 Pierre Schaus pschaus@gmail.com, lperron@google.com # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ortools.constraint_solver import pywrapcp from google.apputils import app import gflags import random FLAGS = gflags.FLAGS gflags.DEFINE_string('data', 'data/steel_mill/steel_mill_slab.txt', 'path to data file') gflags.DEFINE_integer('lns_fragment_size', 10, 'size of the random lns fragment') gflags.DEFINE_integer('lns_random_seed', 0, 'seed for the lns random generator') gflags.DEFINE_integer('lns_fail_limit', 30, 'fail limit when exploring fragments') gflags.DEFINE_integer('time_limit', 20000, 'global time limit') # ---------- helper for binpacking posting ---------- def BinPacking(solver, binvars, weights, loadvars): '''post the load constraint on bins. constraints forall j: loadvars[j] == sum_i (binvars[i] == j) * weights[i]) ''' pack = solver.Pack(binvars, len(binvars)) pack.AddWeightedSumEqualVarDimension(weights, loadvars) solver.Add(pack) solver.Add(solver.SumEquality(loadvars, sum(weights))) # ---------- data reading ---------- def ReadData(filename): """Read data from <filename>.""" f = open(filename) capacity = [int(nb) for nb in f.readline().split()] capacity.pop(0) capacity = [0] + capacity max_capacity = max(capacity) nb_colors = int(f.readline()) nb_slabs = int(f.readline()) wc = [[int(j) for j in f.readline().split()] for i in range(nb_slabs)] weights = [x[0] for x in wc] colors = [x[1] for x in wc] loss = [min(filter(lambda x: x >= c, capacity)) - c for c in range(max_capacity + 1)] color_orders = [filter(lambda o: colors[o] == c, range(nb_slabs)) for c in range(1, nb_colors + 1)] print 'Solving steel mill with', nb_slabs, 'slabs' return (nb_slabs, capacity, max_capacity, weights, colors, loss, color_orders) # ---------- dedicated search for this problem ---------- class SteelDecisionBuilder(pywrapcp.PyDecisionBuilder): '''Dedicated Decision Builder for steel mill slab. Search for the steel mill slab problem with Dynamic Symmetry Breaking during search is an adaptation (for binary tree) from the paper of Pascal Van Hentenryck and Laurent Michel CPAIOR-2008. The value heuristic comes from the paper Solving Steel Mill Slab Problems with Constraint-Based Techniques: CP, LNS, and CBLS, Schaus et. al. to appear in Constraints 2010 ''' def __init__(self, x, nb_slabs, weights, loss_array, loads): self.__x = x self.__nb_slabs = nb_slabs self.__weights = weights self.__loss_array = loss_array self.__loads = loads self.__max_capacity = len(loss_array) - 1 def Next(self, solver): var, weight = self.NextVar() if var: v = self.MaxBound() if v + 1 == var.Min(): # Symmetry breaking. If you need to assign to a new bin, # select the first one. solver.Add(var == v + 1) return self.Next(solver) else: # value heuristic (important for difficult problem): # try first to place the order in the slab that will induce # the least increase of the loss loads = self.getLoads() l, v = min((self.__loss_array[loads[i] + weight], i) for i in range(var.Min(), var.Max() + 1) if var.Contains(i) and loads[i] + weight <= self.__max_capacity) decision = solver.AssignVariableValue(var, v) return decision else: return None def getLoads(self): load = [0] * len(self.__loads) for (w, x) in zip(self.__weights, self.__x): if x.Bound(): load[x.Min()] += w return load def MaxBound(self): """ returns the max value bound to a variable, -1 if no variables bound""" return max([-1] + [self.__x[o].Min() for o in range(self.__nb_slabs) if self.__x[o].Bound()]) def NextVar(self): """ mindom size heuristic with tie break on the weights of orders """ res = [(self.__x[o].Size(), -self.__weights[o], self.__x[o]) for o in range(self.__nb_slabs) if self.__x[o].Size() > 1] if res: res.sort() return (res[0][2], -res[0][1]) # returns the order var and its weight else: return (None, None) def DebugString(self): return 'SteelMillDecisionBuilder(' + str(self.__x) + ')' # ----------- LNS Operator ---------- class SteelRandomLns(pywrapcp.PyLns): """Random LNS for Steel.""" def __init__(self, x, rand, lns_size): pywrapcp.PyLns.__init__(self, x) self.__random = rand self.__lns_size = lns_size def InitFragments(self): pass def NextFragment(self): fragment = [] while len(fragment) < self.__lns_size: pos = self.__random.randint(0, self.Size() - 1) fragment.append(pos) return fragment # ----------- Main Function ----------- def main(unused_argv): # ----- solver and variable declaration ----- (nb_slabs, capacity, max_capacity, weights, colors, loss, color_orders) =\ ReadData(FLAGS.data) nb_colors = len(color_orders) solver = pywrapcp.Solver('Steel Mill Slab') x = [solver.IntVar(0, nb_slabs - 1, 'x' + str(i)) for i in range(nb_slabs)] load_vars = [solver.IntVar(0, max_capacity - 1, 'load_vars' + str(i)) for i in range(nb_slabs)] # ----- post of the constraints ----- # Bin Packing. BinPacking(solver, x, weights, load_vars) # At most two colors per slab. for s in range(nb_slabs): solver.Add(solver.SumLessOrEqual( [solver.Max([solver.IsEqualCstVar(x[c], s) for c in o]) for o in color_orders], 2)) # ----- Objective ----- objective_var = \ solver.Sum([load_vars[s].IndexOf(loss) for s in range(nb_slabs)]).Var() objective = solver.Minimize(objective_var, 1) # ----- start the search and optimization ----- assign_db = SteelDecisionBuilder(x, nb_slabs, weights, loss, load_vars) first_solution = solver.Assignment() first_solution.Add(x) first_solution.AddObjective(objective_var) store_db = solver.StoreAssignment(first_solution) first_solution_db = solver.Compose([assign_db, store_db]) print 'searching for initial solution,', solver.Solve(first_solution_db) print 'initial cost =', first_solution.ObjectiveValue() # To search a fragment, we use a basic randomized decision builder. # We can also use assign_db instead of inner_db. inner_db = solver.Phase(x, solver.CHOOSE_RANDOM, solver.ASSIGN_MIN_VALUE) # The most important aspect is to limit the time exploring each fragment. inner_limit = solver.FailuresLimit(FLAGS.lns_fail_limit) continuation_db = solver.SolveOnce(inner_db, [inner_limit]) # Now, we create the LNS objects. rand = random.Random() rand.seed(FLAGS.lns_random_seed) local_search_operator = SteelRandomLns(x, rand, FLAGS.lns_fragment_size) # This is in fact equivalent to the following predefined LNS operator: # local_search_operator = solver.RandomLNSOperator(x, # FLAGS.lns_fragment_size, # FLAGS.lns_random_seed) local_search_parameters = solver.LocalSearchPhaseParameters( local_search_operator, continuation_db) local_search_db = solver.LocalSearchPhase(first_solution, local_search_parameters) global_limit = solver.TimeLimit(FLAGS.time_limit) print 'using LNS to improve the initial solution' search_log = solver.SearchLog(100000, objective_var) solver.NewSearch(local_search_db, [objective, search_log, global_limit]) while solver.NextSolution(): print 'Objective:', objective_var.Value(),\ 'check:', sum(loss[load_vars[s].Min()] for s in range(nb_slabs)) solver.EndSearch() if __name__ == '__main__': app.run()
apache-2.0
Plexxi/st2
st2client/dist_utils.py
57
5120
# -*- coding: utf-8 -*- # NOTE: This file is auto-generated - DO NOT EDIT MANUALLY # Instead modify scripts/dist_utils.py and run 'make .sdist-requirements' to # update dist_utils.py files for all components # Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import os import re import sys from distutils.version import StrictVersion # NOTE: This script can't rely on any 3rd party dependency so we need to use this code here # # TODO: Why can't this script rely on 3rd party dependencies? Is it because it has to import # from pip? # # TODO: Dear future developer, if you are back here fixing a bug with how we parse # requirements files, please look into using the packaging package on PyPI: # https://packaging.pypa.io/en/latest/requirements/ # and specifying that in the `setup_requires` argument to `setuptools.setup()` # for subpackages. # At the very least we can vendorize some of their code instead of reimplementing # each piece of their code every time our parsing breaks. PY3 = sys.version_info[0] == 3 if PY3: text_type = str else: text_type = unicode # noqa # pylint: disable=E0602 GET_PIP = "curl https://bootstrap.pypa.io/get-pip.py | python" __all__ = [ "check_pip_is_installed", "check_pip_version", "fetch_requirements", "apply_vagrant_workaround", "get_version_string", "parse_version_string", ] def check_pip_is_installed(): """ Ensure that pip is installed. """ try: import pip # NOQA except ImportError as e: print("Failed to import pip: %s" % (text_type(e))) print("") print("Download pip:\n%s" % (GET_PIP)) sys.exit(1) return True def check_pip_version(min_version="6.0.0"): """ Ensure that a minimum supported version of pip is installed. """ check_pip_is_installed() import pip if StrictVersion(pip.__version__) < StrictVersion(min_version): print( "Upgrade pip, your version '{0}' " "is outdated. Minimum required version is '{1}':\n{2}".format( pip.__version__, min_version, GET_PIP ) ) sys.exit(1) return True def fetch_requirements(requirements_file_path): """ Return a list of requirements and links by parsing the provided requirements file. """ links = [] reqs = [] def _get_link(line): vcs_prefixes = ["git+", "svn+", "hg+", "bzr+"] for vcs_prefix in vcs_prefixes: if line.startswith(vcs_prefix) or line.startswith("-e %s" % (vcs_prefix)): req_name = re.findall(".*#egg=(.+)([&|@]).*$", line) if not req_name: req_name = re.findall(".*#egg=(.+?)$", line) else: req_name = req_name[0] if not req_name: raise ValueError( 'Line "%s" is missing "#egg=<package name>"' % (line) ) link = line.replace("-e ", "").strip() return link, req_name[0] return None, None with open(requirements_file_path, "r") as fp: for line in fp.readlines(): line = line.strip() if line.startswith("#") or not line: continue link, req_name = _get_link(line=line) if link: links.append(link) else: req_name = line if ";" in req_name: req_name = req_name.split(";")[0].strip() reqs.append(req_name) return (reqs, links) def apply_vagrant_workaround(): """ Function which detects if the script is being executed inside vagrant and if it is, it deletes "os.link" attribute. Note: Without this workaround, setup.py sdist will fail when running inside a shared directory (nfs / virtualbox shared folders). """ if os.environ.get("USER", None) == "vagrant": del os.link def get_version_string(init_file): """ Read __version__ string for an init file. """ with open(init_file, "r") as fp: content = fp.read() version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", content, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string in %s." % (init_file)) # alias for get_version_string parse_version_string = get_version_string
apache-2.0
ProgressivePlanning/mongoengine
tests/queryset/field_list.py
30
15452
import sys sys.path[0:0] = [""] import unittest from mongoengine import * from mongoengine.queryset import QueryFieldList __all__ = ("QueryFieldListTest", "OnlyExcludeAllTest") class QueryFieldListTest(unittest.TestCase): def test_empty(self): q = QueryFieldList() self.assertFalse(q) q = QueryFieldList(always_include=['_cls']) self.assertFalse(q) def test_include_include(self): q = QueryFieldList() q += QueryFieldList(fields=['a', 'b'], value=QueryFieldList.ONLY, _only_called=True) self.assertEqual(q.as_dict(), {'a': 1, 'b': 1}) q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.ONLY) self.assertEqual(q.as_dict(), {'a': 1, 'b': 1, 'c': 1}) def test_include_exclude(self): q = QueryFieldList() q += QueryFieldList(fields=['a', 'b'], value=QueryFieldList.ONLY) self.assertEqual(q.as_dict(), {'a': 1, 'b': 1}) q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.EXCLUDE) self.assertEqual(q.as_dict(), {'a': 1}) def test_exclude_exclude(self): q = QueryFieldList() q += QueryFieldList(fields=['a', 'b'], value=QueryFieldList.EXCLUDE) self.assertEqual(q.as_dict(), {'a': 0, 'b': 0}) q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.EXCLUDE) self.assertEqual(q.as_dict(), {'a': 0, 'b': 0, 'c': 0}) def test_exclude_include(self): q = QueryFieldList() q += QueryFieldList(fields=['a', 'b'], value=QueryFieldList.EXCLUDE) self.assertEqual(q.as_dict(), {'a': 0, 'b': 0}) q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.ONLY) self.assertEqual(q.as_dict(), {'c': 1}) def test_always_include(self): q = QueryFieldList(always_include=['x', 'y']) q += QueryFieldList(fields=['a', 'b', 'x'], value=QueryFieldList.EXCLUDE) q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.ONLY) self.assertEqual(q.as_dict(), {'x': 1, 'y': 1, 'c': 1}) def test_reset(self): q = QueryFieldList(always_include=['x', 'y']) q += QueryFieldList(fields=['a', 'b', 'x'], value=QueryFieldList.EXCLUDE) q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.ONLY) self.assertEqual(q.as_dict(), {'x': 1, 'y': 1, 'c': 1}) q.reset() self.assertFalse(q) q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.ONLY) self.assertEqual(q.as_dict(), {'x': 1, 'y': 1, 'b': 1, 'c': 1}) def test_using_a_slice(self): q = QueryFieldList() q += QueryFieldList(fields=['a'], value={"$slice": 5}) self.assertEqual(q.as_dict(), {'a': {"$slice": 5}}) class OnlyExcludeAllTest(unittest.TestCase): def setUp(self): connect(db='mongoenginetest') class Person(Document): name = StringField() age = IntField() meta = {'allow_inheritance': True} Person.drop_collection() self.Person = Person def test_mixing_only_exclude(self): class MyDoc(Document): a = StringField() b = StringField() c = StringField() d = StringField() e = StringField() f = StringField() include = ['a', 'b', 'c', 'd', 'e'] exclude = ['d', 'e'] only = ['b', 'c'] qs = MyDoc.objects.fields(**dict(((i, 1) for i in include))) self.assertEqual(qs._loaded_fields.as_dict(), {'a': 1, 'b': 1, 'c': 1, 'd': 1, 'e': 1}) qs = qs.only(*only) self.assertEqual(qs._loaded_fields.as_dict(), {'b': 1, 'c': 1}) qs = qs.exclude(*exclude) self.assertEqual(qs._loaded_fields.as_dict(), {'b': 1, 'c': 1}) qs = MyDoc.objects.fields(**dict(((i, 1) for i in include))) qs = qs.exclude(*exclude) self.assertEqual(qs._loaded_fields.as_dict(), {'a': 1, 'b': 1, 'c': 1}) qs = qs.only(*only) self.assertEqual(qs._loaded_fields.as_dict(), {'b': 1, 'c': 1}) qs = MyDoc.objects.exclude(*exclude) qs = qs.fields(**dict(((i, 1) for i in include))) self.assertEqual(qs._loaded_fields.as_dict(), {'a': 1, 'b': 1, 'c': 1}) qs = qs.only(*only) self.assertEqual(qs._loaded_fields.as_dict(), {'b': 1, 'c': 1}) def test_slicing(self): class MyDoc(Document): a = ListField() b = ListField() c = ListField() d = ListField() e = ListField() f = ListField() include = ['a', 'b', 'c', 'd', 'e'] exclude = ['d', 'e'] only = ['b', 'c'] qs = MyDoc.objects.fields(**dict(((i, 1) for i in include))) qs = qs.exclude(*exclude) qs = qs.only(*only) qs = qs.fields(slice__b=5) self.assertEqual(qs._loaded_fields.as_dict(), {'b': {'$slice': 5}, 'c': 1}) qs = qs.fields(slice__c=[5, 1]) self.assertEqual(qs._loaded_fields.as_dict(), {'b': {'$slice': 5}, 'c': {'$slice': [5, 1]}}) qs = qs.exclude('c') self.assertEqual(qs._loaded_fields.as_dict(), {'b': {'$slice': 5}}) def test_only(self): """Ensure that QuerySet.only only returns the requested fields. """ person = self.Person(name='test', age=25) person.save() obj = self.Person.objects.only('name').get() self.assertEqual(obj.name, person.name) self.assertEqual(obj.age, None) obj = self.Person.objects.only('age').get() self.assertEqual(obj.name, None) self.assertEqual(obj.age, person.age) obj = self.Person.objects.only('name', 'age').get() self.assertEqual(obj.name, person.name) self.assertEqual(obj.age, person.age) obj = self.Person.objects.only(*('id', 'name',)).get() self.assertEqual(obj.name, person.name) self.assertEqual(obj.age, None) # Check polymorphism still works class Employee(self.Person): salary = IntField(db_field='wage') employee = Employee(name='test employee', age=40, salary=30000) employee.save() obj = self.Person.objects(id=employee.id).only('age').get() self.assertTrue(isinstance(obj, Employee)) # Check field names are looked up properly obj = Employee.objects(id=employee.id).only('salary').get() self.assertEqual(obj.salary, employee.salary) self.assertEqual(obj.name, None) def test_only_with_subfields(self): class User(EmbeddedDocument): name = StringField() email = StringField() class Comment(EmbeddedDocument): title = StringField() text = StringField() class BlogPost(Document): content = StringField() author = EmbeddedDocumentField(User) comments = ListField(EmbeddedDocumentField(Comment)) BlogPost.drop_collection() post = BlogPost(content='Had a good coffee today...') post.author = User(name='Test User') post.comments = [Comment(title='I aggree', text='Great post!'), Comment(title='Coffee', text='I hate coffee')] post.save() obj = BlogPost.objects.only('author.name',).get() self.assertEqual(obj.content, None) self.assertEqual(obj.author.email, None) self.assertEqual(obj.author.name, 'Test User') self.assertEqual(obj.comments, []) obj = BlogPost.objects.only('content', 'comments.title',).get() self.assertEqual(obj.content, 'Had a good coffee today...') self.assertEqual(obj.author, None) self.assertEqual(obj.comments[0].title, 'I aggree') self.assertEqual(obj.comments[1].title, 'Coffee') self.assertEqual(obj.comments[0].text, None) self.assertEqual(obj.comments[1].text, None) obj = BlogPost.objects.only('comments',).get() self.assertEqual(obj.content, None) self.assertEqual(obj.author, None) self.assertEqual(obj.comments[0].title, 'I aggree') self.assertEqual(obj.comments[1].title, 'Coffee') self.assertEqual(obj.comments[0].text, 'Great post!') self.assertEqual(obj.comments[1].text, 'I hate coffee') BlogPost.drop_collection() def test_exclude(self): class User(EmbeddedDocument): name = StringField() email = StringField() class Comment(EmbeddedDocument): title = StringField() text = StringField() class BlogPost(Document): content = StringField() author = EmbeddedDocumentField(User) comments = ListField(EmbeddedDocumentField(Comment)) BlogPost.drop_collection() post = BlogPost(content='Had a good coffee today...') post.author = User(name='Test User') post.comments = [Comment(title='I aggree', text='Great post!'), Comment(title='Coffee', text='I hate coffee')] post.save() obj = BlogPost.objects.exclude('author', 'comments.text').get() self.assertEqual(obj.author, None) self.assertEqual(obj.content, 'Had a good coffee today...') self.assertEqual(obj.comments[0].title, 'I aggree') self.assertEqual(obj.comments[0].text, None) BlogPost.drop_collection() def test_exclude_only_combining(self): class Attachment(EmbeddedDocument): name = StringField() content = StringField() class Email(Document): sender = StringField() to = StringField() subject = StringField() body = StringField() content_type = StringField() attachments = ListField(EmbeddedDocumentField(Attachment)) Email.drop_collection() email = Email(sender='me', to='you', subject='From Russia with Love', body='Hello!', content_type='text/plain') email.attachments = [ Attachment(name='file1.doc', content='ABC'), Attachment(name='file2.doc', content='XYZ'), ] email.save() obj = Email.objects.exclude('content_type').exclude('body').get() self.assertEqual(obj.sender, 'me') self.assertEqual(obj.to, 'you') self.assertEqual(obj.subject, 'From Russia with Love') self.assertEqual(obj.body, None) self.assertEqual(obj.content_type, None) obj = Email.objects.only('sender', 'to').exclude('body', 'sender').get() self.assertEqual(obj.sender, None) self.assertEqual(obj.to, 'you') self.assertEqual(obj.subject, None) self.assertEqual(obj.body, None) self.assertEqual(obj.content_type, None) obj = Email.objects.exclude('attachments.content').exclude('body').only('to', 'attachments.name').get() self.assertEqual(obj.attachments[0].name, 'file1.doc') self.assertEqual(obj.attachments[0].content, None) self.assertEqual(obj.sender, None) self.assertEqual(obj.to, 'you') self.assertEqual(obj.subject, None) self.assertEqual(obj.body, None) self.assertEqual(obj.content_type, None) Email.drop_collection() def test_all_fields(self): class Email(Document): sender = StringField() to = StringField() subject = StringField() body = StringField() content_type = StringField() Email.drop_collection() email = Email(sender='me', to='you', subject='From Russia with Love', body='Hello!', content_type='text/plain') email.save() obj = Email.objects.exclude('content_type', 'body').only('to', 'body').all_fields().get() self.assertEqual(obj.sender, 'me') self.assertEqual(obj.to, 'you') self.assertEqual(obj.subject, 'From Russia with Love') self.assertEqual(obj.body, 'Hello!') self.assertEqual(obj.content_type, 'text/plain') Email.drop_collection() def test_slicing_fields(self): """Ensure that query slicing an array works. """ class Numbers(Document): n = ListField(IntField()) Numbers.drop_collection() numbers = Numbers(n=[0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1]) numbers.save() # first three numbers = Numbers.objects.fields(slice__n=3).get() self.assertEqual(numbers.n, [0, 1, 2]) # last three numbers = Numbers.objects.fields(slice__n=-3).get() self.assertEqual(numbers.n, [-3, -2, -1]) # skip 2, limit 3 numbers = Numbers.objects.fields(slice__n=[2, 3]).get() self.assertEqual(numbers.n, [2, 3, 4]) # skip to fifth from last, limit 4 numbers = Numbers.objects.fields(slice__n=[-5, 4]).get() self.assertEqual(numbers.n, [-5, -4, -3, -2]) # skip to fifth from last, limit 10 numbers = Numbers.objects.fields(slice__n=[-5, 10]).get() self.assertEqual(numbers.n, [-5, -4, -3, -2, -1]) # skip to fifth from last, limit 10 dict method numbers = Numbers.objects.fields(n={"$slice": [-5, 10]}).get() self.assertEqual(numbers.n, [-5, -4, -3, -2, -1]) def test_slicing_nested_fields(self): """Ensure that query slicing an embedded array works. """ class EmbeddedNumber(EmbeddedDocument): n = ListField(IntField()) class Numbers(Document): embedded = EmbeddedDocumentField(EmbeddedNumber) Numbers.drop_collection() numbers = Numbers() numbers.embedded = EmbeddedNumber(n=[0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1]) numbers.save() # first three numbers = Numbers.objects.fields(slice__embedded__n=3).get() self.assertEqual(numbers.embedded.n, [0, 1, 2]) # last three numbers = Numbers.objects.fields(slice__embedded__n=-3).get() self.assertEqual(numbers.embedded.n, [-3, -2, -1]) # skip 2, limit 3 numbers = Numbers.objects.fields(slice__embedded__n=[2, 3]).get() self.assertEqual(numbers.embedded.n, [2, 3, 4]) # skip to fifth from last, limit 4 numbers = Numbers.objects.fields(slice__embedded__n=[-5, 4]).get() self.assertEqual(numbers.embedded.n, [-5, -4, -3, -2]) # skip to fifth from last, limit 10 numbers = Numbers.objects.fields(slice__embedded__n=[-5, 10]).get() self.assertEqual(numbers.embedded.n, [-5, -4, -3, -2, -1]) # skip to fifth from last, limit 10 dict method numbers = Numbers.objects.fields(embedded__n={"$slice": [-5, 10]}).get() self.assertEqual(numbers.embedded.n, [-5, -4, -3, -2, -1]) def test_exclude_from_subclasses_docs(self): class Base(Document): username = StringField() meta = {'allow_inheritance': True} class Anon(Base): anon = BooleanField() class User(Base): password = StringField() wibble = StringField() Base.drop_collection() User(username="mongodb", password="secret").save() user = Base.objects().exclude("password", "wibble").first() self.assertEqual(user.password, None) self.assertRaises(LookUpError, Base.objects.exclude, "made_up") if __name__ == '__main__': unittest.main()
mit
aeklant/scipy
scipy/stats/tests/test_contingency.py
9
5982
import numpy as np from numpy.testing import (assert_equal, assert_array_equal, assert_array_almost_equal, assert_approx_equal, assert_allclose) from pytest import raises as assert_raises from scipy.special import xlogy from scipy.stats.contingency import margins, expected_freq, chi2_contingency def test_margins(): a = np.array([1]) m = margins(a) assert_equal(len(m), 1) m0 = m[0] assert_array_equal(m0, np.array([1])) a = np.array([[1]]) m0, m1 = margins(a) expected0 = np.array([[1]]) expected1 = np.array([[1]]) assert_array_equal(m0, expected0) assert_array_equal(m1, expected1) a = np.arange(12).reshape(2, 6) m0, m1 = margins(a) expected0 = np.array([[15], [51]]) expected1 = np.array([[6, 8, 10, 12, 14, 16]]) assert_array_equal(m0, expected0) assert_array_equal(m1, expected1) a = np.arange(24).reshape(2, 3, 4) m0, m1, m2 = margins(a) expected0 = np.array([[[66]], [[210]]]) expected1 = np.array([[[60], [92], [124]]]) expected2 = np.array([[[60, 66, 72, 78]]]) assert_array_equal(m0, expected0) assert_array_equal(m1, expected1) assert_array_equal(m2, expected2) def test_expected_freq(): assert_array_equal(expected_freq([1]), np.array([1.0])) observed = np.array([[[2, 0], [0, 2]], [[0, 2], [2, 0]], [[1, 1], [1, 1]]]) e = expected_freq(observed) assert_array_equal(e, np.ones_like(observed)) observed = np.array([[10, 10, 20], [20, 20, 20]]) e = expected_freq(observed) correct = np.array([[12., 12., 16.], [18., 18., 24.]]) assert_array_almost_equal(e, correct) def test_chi2_contingency_trivial(): # Some very simple tests for chi2_contingency. # A trivial case obs = np.array([[1, 2], [1, 2]]) chi2, p, dof, expected = chi2_contingency(obs, correction=False) assert_equal(chi2, 0.0) assert_equal(p, 1.0) assert_equal(dof, 1) assert_array_equal(obs, expected) # A *really* trivial case: 1-D data. obs = np.array([1, 2, 3]) chi2, p, dof, expected = chi2_contingency(obs, correction=False) assert_equal(chi2, 0.0) assert_equal(p, 1.0) assert_equal(dof, 0) assert_array_equal(obs, expected) def test_chi2_contingency_R(): # Some test cases that were computed independently, using R. # Rcode = \ # """ # # Data vector. # data <- c( # 12, 34, 23, 4, 47, 11, # 35, 31, 11, 34, 10, 18, # 12, 32, 9, 18, 13, 19, # 12, 12, 14, 9, 33, 25 # ) # # # Create factor tags:r=rows, c=columns, t=tiers # r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4"))) # c <- factor(gl(3, 1, 2*3*4, labels=c("c1", "c2", "c3"))) # t <- factor(gl(2, 3, 2*3*4, labels=c("t1", "t2"))) # # # 3-way Chi squared test of independence # s = summary(xtabs(data~r+c+t)) # print(s) # """ # Routput = \ # """ # Call: xtabs(formula = data ~ r + c + t) # Number of cases in table: 478 # Number of factors: 3 # Test for independence of all factors: # Chisq = 102.17, df = 17, p-value = 3.514e-14 # """ obs = np.array( [[[12, 34, 23], [35, 31, 11], [12, 32, 9], [12, 12, 14]], [[4, 47, 11], [34, 10, 18], [18, 13, 19], [9, 33, 25]]]) chi2, p, dof, expected = chi2_contingency(obs) assert_approx_equal(chi2, 102.17, significant=5) assert_approx_equal(p, 3.514e-14, significant=4) assert_equal(dof, 17) # Rcode = \ # """ # # Data vector. # data <- c( # # # 12, 17, # 11, 16, # # # 11, 12, # 15, 16, # # # 23, 15, # 30, 22, # # # 14, 17, # 15, 16 # ) # # # Create factor tags:r=rows, c=columns, d=depths(?), t=tiers # r <- factor(gl(2, 2, 2*2*2*2, labels=c("r1", "r2"))) # c <- factor(gl(2, 1, 2*2*2*2, labels=c("c1", "c2"))) # d <- factor(gl(2, 4, 2*2*2*2, labels=c("d1", "d2"))) # t <- factor(gl(2, 8, 2*2*2*2, labels=c("t1", "t2"))) # # # 4-way Chi squared test of independence # s = summary(xtabs(data~r+c+d+t)) # print(s) # """ # Routput = \ # """ # Call: xtabs(formula = data ~ r + c + d + t) # Number of cases in table: 262 # Number of factors: 4 # Test for independence of all factors: # Chisq = 8.758, df = 11, p-value = 0.6442 # """ obs = np.array( [[[[12, 17], [11, 16]], [[11, 12], [15, 16]]], [[[23, 15], [30, 22]], [[14, 17], [15, 16]]]]) chi2, p, dof, expected = chi2_contingency(obs) assert_approx_equal(chi2, 8.758, significant=4) assert_approx_equal(p, 0.6442, significant=4) assert_equal(dof, 11) def test_chi2_contingency_g(): c = np.array([[15, 60], [15, 90]]) g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=False) assert_allclose(g, 2*xlogy(c, c/e).sum()) g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=True) c_corr = c + np.array([[-0.5, 0.5], [0.5, -0.5]]) assert_allclose(g, 2*xlogy(c_corr, c_corr/e).sum()) c = np.array([[10, 12, 10], [12, 10, 10]]) g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood') assert_allclose(g, 2*xlogy(c, c/e).sum()) def test_chi2_contingency_bad_args(): # Test that "bad" inputs raise a ValueError. # Negative value in the array of observed frequencies. obs = np.array([[-1, 10], [1, 2]]) assert_raises(ValueError, chi2_contingency, obs) # The zeros in this will result in zeros in the array # of expected frequencies. obs = np.array([[0, 1], [0, 1]]) assert_raises(ValueError, chi2_contingency, obs) # A degenerate case: `observed` has size 0. obs = np.empty((0, 8)) assert_raises(ValueError, chi2_contingency, obs)
bsd-3-clause
Dhivyap/ansible
lib/ansible/module_utils/network/nxos/facts/vlans/vlans.py
12
3922
# # -*- coding: utf-8 -*- # Copyright 2019 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)#!/usr/bin/python """ The nxos vlans fact class It is in this file the configuration is collected from the device for a given resource, parsed, and the facts tree is populated based on the configuration. """ from __future__ import absolute_import, division, print_function __metaclass__ = type import re from copy import deepcopy from ansible.module_utils.network.common import utils from ansible.module_utils.network.common.utils import parse_conf_arg, parse_conf_cmd_arg from ansible.module_utils.network.nxos.argspec.vlans.vlans import VlansArgs class VlansFacts(object): """ The nxos vlans fact class """ def __init__(self, module, subspec='config', options='options'): self._module = module self.argument_spec = VlansArgs.argument_spec spec = deepcopy(self.argument_spec) if subspec: if options: facts_argument_spec = spec[subspec][options] else: facts_argument_spec = spec[subspec] else: facts_argument_spec = spec self.generated_spec = utils.generate_dict(facts_argument_spec) def populate_facts(self, connection, ansible_facts, data=None): """ Populate the facts for vlans :param connection: the device connection :param data: previously collected conf :rtype: dictionary :returns: facts """ objs = [] if not data: data = connection.get('show running-config | section ^vlan') vlans = re.split(r'(,|-)', data.split()[1]) for v in vlans: if not v.isdigit(): vlans.remove(v) config = re.split(r'(^|\n)vlan', data) for conf in config: conf = conf.strip() if conf: if conf[0] in vlans: vlans.remove(conf[0]) obj = self.render_config(self.generated_spec, conf) if obj and len(obj.keys()) > 1: objs.append(obj) for v in vlans: obj = self.render_config(self.generated_spec, v) if obj: objs.append(obj) ansible_facts['ansible_network_resources'].pop('vlans', None) facts = {} if objs: facts['vlans'] = [] params = utils.validate_config(self.argument_spec, {'config': objs}) for cfg in params['config']: facts['vlans'].append(utils.remove_empties(cfg)) ansible_facts['ansible_network_resources'].update(facts) return ansible_facts def render_config(self, spec, conf): """ Render config as dictionary structure and delete keys from spec for null values :param spec: The facts tree, generated from the argspec :param conf: The configuration :rtype: dictionary :returns: The generated config """ config = deepcopy(spec) if len(conf) == 1: return utils.remove_empties({'vlan_id': conf}) match = re.search(r'^(\S+)?', conf, re.M) if match: if len(match.group(1)) == 1: config['vlan_id'] = match.group(1) config['name'] = parse_conf_arg(conf, 'name') config['mode'] = parse_conf_arg(conf, 'mode') config['mapped_vni'] = parse_conf_arg(conf, 'vn-segment') config['state'] = parse_conf_arg(conf, 'state') admin_state = parse_conf_cmd_arg(conf, 'shutdown', 'down', 'up') if admin_state == 'up': config['enabled'] = True elif admin_state == 'down': config['enabled'] = False vlans_cfg = utils.remove_empties(config) return vlans_cfg
gpl-3.0
Batch21/pywr
docs/source/conf.py
2
9485
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Pywr documentation build configuration file, created by # sphinx-quickstart on Mon Jun 8 20:10:37 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex import alabaster # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'matplotlib.sphinxext.plot_directive', 'alabaster', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Pywr' copyright = '2015, Joshua Arnott' author = 'Joshua Arnott' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'github_user': 'pywr', 'github_repo': 'pywr', } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [alabaster.get_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', 'searchbox.html', 'donate.html', ] } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'Pywrdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Pywr.tex', 'Pywr Documentation', 'Joshua Arnott', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pywr', 'Pywr Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Pywr', 'Pywr Documentation', author, 'Pywr', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
gpl-3.0
Francis-Liu/animated-broccoli
nova/tests/unit/test_flavors.py
39
24595
# Copyright 2011 Ken Pepple # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for flavors code """ import time import six from nova.compute import flavors from nova import context from nova import db from nova.db.sqlalchemy import api as sql_session from nova.db.sqlalchemy import models from nova import exception from nova import objects from nova.objects import base as obj_base from nova import test DEFAULT_FLAVORS = [ {'memory_mb': 512, 'root_gb': 1, 'deleted_at': None, 'name': 'm1.tiny', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, 'vcpus': 1, 'extra_specs': {}, 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '1', 'vcpu_weight': None, 'id': 2}, {'memory_mb': 2048, 'root_gb': 20, 'deleted_at': None, 'name': 'm1.small', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, 'vcpus': 1, 'extra_specs': {}, 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '2', 'vcpu_weight': None, 'id': 5}, {'memory_mb': 4096, 'root_gb': 40, 'deleted_at': None, 'name': 'm1.medium', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, 'vcpus': 2, 'extra_specs': {}, 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '3', 'vcpu_weight': None, 'id': 1}, {'memory_mb': 8192, 'root_gb': 80, 'deleted_at': None, 'name': 'm1.large', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, 'vcpus': 4, 'extra_specs': {}, 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '4', 'vcpu_weight': None, 'id': 3}, {'memory_mb': 16384, 'root_gb': 160, 'deleted_at': None, 'name': 'm1.xlarge', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, 'vcpus': 8, 'extra_specs': {}, 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '5', 'vcpu_weight': None, 'id': 4} ] CONTEXT = context.RequestContext('fake', 'fake', is_admin=False) DEFAULT_FLAVOR_OBJS = [ objects.Flavor._obj_from_primitive(CONTEXT, objects.Flavor.VERSION, {'nova_object.data': flavor}) for flavor in DEFAULT_FLAVORS ] class InstanceTypeTestCase(test.TestCase): """Test cases for flavor code.""" def _generate_name(self): """return a name not in the DB.""" nonexistent_flavor = str(int(time.time())) all_flavors = flavors.get_all_flavors() while nonexistent_flavor in all_flavors: nonexistent_flavor += "z" else: return nonexistent_flavor def _generate_flavorid(self): """return a flavorid not in the DB.""" nonexistent_flavor = 2700 flavor_ids = [value.id for key, value in six.iteritems(flavors.get_all_flavors())] while nonexistent_flavor in flavor_ids: nonexistent_flavor += 1 else: return nonexistent_flavor def _existing_flavor(self): """return first flavor name.""" return flavors.get_all_flavors().keys()[0] def test_get_all_instance_types(self): # Ensures that all flavors can be retrieved. session = sql_session.get_session() total_instance_types = session.query(models.InstanceTypes).count() inst_types = flavors.get_all_flavors() self.assertEqual(total_instance_types, len(inst_types)) def test_non_existent_inst_type_should_not_delete(self): # Ensures that flavor creation fails with invalid args. self.assertRaises(exception.FlavorNotFoundByName, flavors.destroy, 'unknown_flavor') def test_will_not_destroy_with_no_name(self): # Ensure destroy said path of no name raises error. self.assertRaises(exception.FlavorNotFoundByName, flavors.destroy, None) def test_will_not_get_bad_default_instance_type(self): # ensures error raised on bad default flavor. self.flags(default_flavor='unknown_flavor') self.assertRaises(exception.FlavorNotFound, flavors.get_default_flavor) def test_will_get_flavor_by_id(self): default_instance_type = flavors.get_default_flavor() instance_type_id = default_instance_type.id fetched = flavors.get_flavor(instance_type_id) self.assertIsInstance(fetched, objects.Flavor) self.assertEqual(default_instance_type.flavorid, fetched.flavorid) def test_will_not_get_flavor_by_unknown_id(self): # Ensure get by name returns default flavor with no name. self.assertRaises(exception.FlavorNotFound, flavors.get_flavor, 10000) def test_will_not_get_flavor_with_bad_id(self): # Ensure get by name returns default flavor with bad name. self.assertRaises(exception.FlavorNotFound, flavors.get_flavor, 'asdf') def test_flavor_get_by_None_name_returns_default(self): # Ensure get by name returns default flavor with no name. default = flavors.get_default_flavor() actual = flavors.get_flavor_by_name(None) self.assertIsInstance(default, objects.Flavor) self.assertIsInstance(actual, objects.Flavor) self.assertEqual(default.flavorid, actual.flavorid) def test_will_not_get_flavor_with_bad_name(self): # Ensure get by name returns default flavor with bad name. self.assertRaises(exception.FlavorNotFound, flavors.get_flavor_by_name, 10000) def test_will_not_get_instance_by_unknown_flavor_id(self): # Ensure get by flavor raises error with wrong flavorid. self.assertRaises(exception.FlavorNotFound, flavors.get_flavor_by_flavor_id, 'unknown_flavor') def test_will_get_instance_by_flavor_id(self): default_instance_type = flavors.get_default_flavor() flavorid = default_instance_type.flavorid fetched = flavors.get_flavor_by_flavor_id(flavorid) self.assertIsInstance(fetched, objects.Flavor) self.assertEqual(default_instance_type.flavorid, fetched.flavorid) def test_can_read_deleted_types_using_flavor_id(self): # Ensure deleted flavors can be read when querying flavor_id. inst_type_name = "test" inst_type_flavor_id = "test1" inst_type = flavors.create(inst_type_name, 256, 1, 120, 100, inst_type_flavor_id) self.assertEqual(inst_type_name, inst_type.name) # NOTE(jk0): The deleted flavor will show up here because the context # in get_flavor_by_flavor_id() is set to use read_deleted by # default. flavors.destroy(inst_type.name) deleted_inst_type = flavors.get_flavor_by_flavor_id( inst_type_flavor_id) self.assertEqual(inst_type_name, deleted_inst_type.name) def test_read_deleted_false_converting_flavorid(self): """Ensure deleted flavors are not returned when not needed (for example when creating a server and attempting to translate from flavorid to instance_type_id. """ flavors.create("instance_type1", 256, 1, 120, 100, "test1") flavors.destroy("instance_type1") flavors.create("instance_type1_redo", 256, 1, 120, 100, "test1") instance_type = flavors.get_flavor_by_flavor_id( "test1", read_deleted="no") self.assertEqual("instance_type1_redo", instance_type.name) def test_get_all_flavors_sorted_list_sort(self): # Test default sort all_flavors = flavors.get_all_flavors_sorted_list() self.assertEqual(len(DEFAULT_FLAVORS), len(all_flavors)) for i in range(len(all_flavors)): f = all_flavors[i] self.assertIsInstance(f, objects.Flavor) self.assertEqual(DEFAULT_FLAVORS[i]['flavorid'], f.flavorid) # Test sorted by name all_flavors = flavors.get_all_flavors_sorted_list(sort_key='name') expected = sorted(DEFAULT_FLAVORS, key=lambda item: item['name']) self.assertEqual(len(expected), len(all_flavors)) for i in range(len(all_flavors)): f = all_flavors[i] self.assertIsInstance(f, objects.Flavor) self.assertEqual(expected[i]['flavorid'], f.flavorid) def test_get_all_flavors_sorted_list_limit(self): limited_flavors = flavors.get_all_flavors_sorted_list(limit=2) self.assertEqual(2, len(limited_flavors)) def test_get_all_flavors_sorted_list_marker(self): all_flavors = flavors.get_all_flavors_sorted_list() # Set the 3rd result as the marker marker_flavorid = all_flavors[2].flavorid marked_flavors = flavors.get_all_flavors_sorted_list( marker=marker_flavorid) # We expect everything /after/ the 3rd result expected_results = all_flavors[3:] self.assertEqual(len(expected_results), len(marked_flavors)) for i in range(len(marked_flavors)): f = marked_flavors[i] self.assertIsInstance(f, objects.Flavor) self.assertEqual(expected_results[i].flavorid, f.flavorid) def test_get_inactive_flavors(self): flav1 = flavors.create('flavor1', 256, 1, 120) flav2 = flavors.create('flavor2', 512, 4, 250) flavors.destroy('flavor1') returned_flavors_ids = flavors.get_all_flavors().keys() self.assertNotIn(flav1.id, returned_flavors_ids) self.assertIn(flav2.id, returned_flavors_ids) returned_flavors_ids = flavors.get_all_flavors(inactive=True).keys() self.assertIn(flav1.id, returned_flavors_ids) self.assertIn(flav2.id, returned_flavors_ids) def test_get_inactive_flavors_with_same_name(self): flav1 = flavors.create('flavor', 256, 1, 120) flavors.destroy('flavor') flav2 = flavors.create('flavor', 512, 4, 250) returned_flavors_ids = flavors.get_all_flavors().keys() self.assertNotIn(flav1.id, returned_flavors_ids) self.assertIn(flav2.id, returned_flavors_ids) returned_flavors_ids = flavors.get_all_flavors(inactive=True).keys() self.assertIn(flav1.id, returned_flavors_ids) self.assertIn(flav2.id, returned_flavors_ids) def test_get_inactive_flavors_with_same_flavorid(self): flav1 = flavors.create('flavor', 256, 1, 120, 100, "flavid") flavors.destroy('flavor') flav2 = flavors.create('flavor', 512, 4, 250, 100, "flavid") returned_flavors_ids = flavors.get_all_flavors().keys() self.assertNotIn(flav1.id, returned_flavors_ids) self.assertIn(flav2.id, returned_flavors_ids) returned_flavors_ids = flavors.get_all_flavors(inactive=True).keys() self.assertIn(flav1.id, returned_flavors_ids) self.assertIn(flav2.id, returned_flavors_ids) class InstanceTypeToolsTest(test.TestCase): def _dict_to_metadata(self, data): return [{'key': key, 'value': value} for key, value in data.items()] def _test_extract_flavor(self, prefix): instance_type = flavors.get_default_flavor() instance_type_p = obj_base.obj_to_primitive(instance_type) metadata = {} flavors.save_flavor_info(metadata, instance_type, prefix) instance = {'system_metadata': self._dict_to_metadata(metadata)} _instance_type = flavors.extract_flavor(instance, prefix) _instance_type_p = obj_base.obj_to_primitive(_instance_type) props = flavors.system_metadata_flavor_props.keys() for key in instance_type_p.keys(): if key not in props: del instance_type_p[key] self.assertEqual(instance_type_p, _instance_type_p) def test_extract_flavor(self): self._test_extract_flavor('') def test_extract_flavor_no_sysmeta(self): instance = {} prefix = '' result = flavors.extract_flavor(instance, prefix) self.assertIsNone(result) def test_extract_flavor_prefix(self): self._test_extract_flavor('foo_') def test_save_flavor_info(self): instance_type = flavors.get_default_flavor() example = {} example_prefix = {} for key in flavors.system_metadata_flavor_props.keys(): example['instance_type_%s' % key] = instance_type[key] example_prefix['fooinstance_type_%s' % key] = instance_type[key] metadata = {} flavors.save_flavor_info(metadata, instance_type) self.assertEqual(example, metadata) metadata = {} flavors.save_flavor_info(metadata, instance_type, 'foo') self.assertEqual(example_prefix, metadata) def test_delete_flavor_info(self): instance_type = flavors.get_default_flavor() metadata = {} flavors.save_flavor_info(metadata, instance_type) flavors.save_flavor_info(metadata, instance_type, '_') flavors.delete_flavor_info(metadata, '', '_') self.assertEqual(metadata, {}) def test_flavor_numa_extras_are_saved(self): instance_type = flavors.get_default_flavor() instance_type['extra_specs'] = { 'hw:numa_mem.0': '123', 'hw:numa_cpus.0': '456', 'hw:numa_mem.1': '789', 'hw:numa_cpus.1': 'ABC', 'foo': 'bar', } sysmeta = flavors.save_flavor_info({}, instance_type) _instance_type = flavors.extract_flavor({'system_metadata': sysmeta}) expected_extra_specs = { 'hw:numa_mem.0': '123', 'hw:numa_cpus.0': '456', 'hw:numa_mem.1': '789', 'hw:numa_cpus.1': 'ABC', } self.assertEqual(expected_extra_specs, _instance_type['extra_specs']) flavors.delete_flavor_info(sysmeta, '') self.assertEqual({}, sysmeta) class InstanceTypeFilteringTest(test.TestCase): """Test cases for the filter option available for instance_type_get_all.""" def setUp(self): super(InstanceTypeFilteringTest, self).setUp() self.context = context.get_admin_context() def assertFilterResults(self, filters, expected): inst_types = db.flavor_get_all( self.context, filters=filters) inst_names = [i['name'] for i in inst_types] self.assertEqual(inst_names, expected) def test_no_filters(self): filters = None expected = ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge'] self.assertFilterResults(filters, expected) def test_min_memory_mb_filter(self): # Exclude tiny instance which is 512 MB. filters = dict(min_memory_mb=513) expected = ['m1.small', 'm1.medium', 'm1.large', 'm1.xlarge'] self.assertFilterResults(filters, expected) def test_min_root_gb_filter(self): # Exclude everything but large and xlarge which have >= 80 GB. filters = dict(min_root_gb=80) expected = ['m1.large', 'm1.xlarge'] self.assertFilterResults(filters, expected) def test_min_memory_mb_AND_root_gb_filter(self): # Exclude everything but large and xlarge which have >= 80 GB. filters = dict(min_memory_mb=16384, min_root_gb=80) expected = ['m1.xlarge'] self.assertFilterResults(filters, expected) class CreateInstanceTypeTest(test.TestCase): def assertInvalidInput(self, *create_args, **create_kwargs): self.assertRaises(exception.InvalidInput, flavors.create, *create_args, **create_kwargs) def test_create_with_valid_name(self): # Names can contain alphanumeric and [_.- ] flavors.create('azAZ09. -_', 64, 1, 120) # And they are not limited to ascii characters # E.g.: m1.huge in simplified Chinese flavors.create(u'm1.\u5DE8\u5927', 6400, 100, 12000) def test_name_with_special_characters(self): # Names can contain all printable characters flavors.create('_foo.bar-123', 64, 1, 120) # Ensure instance types raises InvalidInput for invalid characters. self.assertInvalidInput('foobar\x00', 64, 1, 120) def test_name_with_non_printable_characters(self): # Names cannot contain printable characters self.assertInvalidInput(u'm1.\u0868 #', 64, 1, 120) def test_name_length_checks(self): MAX_LEN = 255 # Flavor name with 255 characters or less is valid. flavors.create('a' * MAX_LEN, 64, 1, 120) # Flavor name which is more than 255 characters will cause error. self.assertInvalidInput('a' * (MAX_LEN + 1), 64, 1, 120) # Flavor name which is empty should cause an error self.assertInvalidInput('', 64, 1, 120) def test_all_whitespace_flavor_names_rejected(self): self.assertInvalidInput(' ', 64, 1, 120) def test_flavorid_with_invalid_characters(self): # Ensure Flavor ID can only contain [a-zA-Z0-9_.- ] self.assertInvalidInput('a', 64, 1, 120, flavorid=u'\u2605') self.assertInvalidInput('a', 64, 1, 120, flavorid='%%$%$@#$#@$@#$^%') def test_flavorid_length_checks(self): MAX_LEN = 255 # Flavor ID which is more than 255 characters will cause error. self.assertInvalidInput('a', 64, 1, 120, flavorid='a' * (MAX_LEN + 1)) def test_memory_must_be_positive_db_integer(self): self.assertInvalidInput('flavor1', 'foo', 1, 120) self.assertInvalidInput('flavor1', -1, 1, 120) self.assertInvalidInput('flavor1', 0, 1, 120) self.assertInvalidInput('flavor1', db.MAX_INT + 1, 1, 120) flavors.create('flavor1', 1, 1, 120) def test_vcpus_must_be_positive_db_integer(self): self.assertInvalidInput('flavor`', 64, 'foo', 120) self.assertInvalidInput('flavor1', 64, -1, 120) self.assertInvalidInput('flavor1', 64, 0, 120) self.assertInvalidInput('flavor1', 64, db.MAX_INT + 1, 120) flavors.create('flavor1', 64, 1, 120) def test_root_gb_must_be_nonnegative_db_integer(self): self.assertInvalidInput('flavor1', 64, 1, 'foo') self.assertInvalidInput('flavor1', 64, 1, -1) self.assertInvalidInput('flavor1', 64, 1, db.MAX_INT + 1) flavors.create('flavor1', 64, 1, 0) flavors.create('flavor2', 64, 1, 120) def test_ephemeral_gb_must_be_nonnegative_db_integer(self): self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb='foo') self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb=-1) self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb=db.MAX_INT + 1) flavors.create('flavor1', 64, 1, 120, ephemeral_gb=0) flavors.create('flavor2', 64, 1, 120, ephemeral_gb=120) def test_swap_must_be_nonnegative_db_integer(self): self.assertInvalidInput('flavor1', 64, 1, 120, swap='foo') self.assertInvalidInput('flavor1', 64, 1, 120, swap=-1) self.assertInvalidInput('flavor1', 64, 1, 120, swap=db.MAX_INT + 1) flavors.create('flavor1', 64, 1, 120, swap=0) flavors.create('flavor2', 64, 1, 120, swap=1) def test_rxtx_factor_must_be_positive_float(self): self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor='foo') self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=-1.0) self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=0.0) flavor = flavors.create('flavor1', 64, 1, 120, rxtx_factor=1.0) self.assertEqual(1.0, flavor.rxtx_factor) flavor = flavors.create('flavor2', 64, 1, 120, rxtx_factor=1.1) self.assertEqual(1.1, flavor.rxtx_factor) def test_rxtx_factor_must_be_within_sql_float_range(self): _context = context.get_admin_context() db.flavor_get_all(_context) # We do * 10 since this is an approximation and we need to make sure # the difference is noticeble. over_rxtx_factor = flavors.SQL_SP_FLOAT_MAX * 10 self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=over_rxtx_factor) flavor = flavors.create('flavor2', 64, 1, 120, rxtx_factor=flavors.SQL_SP_FLOAT_MAX) self.assertEqual(flavors.SQL_SP_FLOAT_MAX, flavor.rxtx_factor) def test_is_public_must_be_valid_bool_string(self): self.assertInvalidInput('flavor1', 64, 1, 120, is_public='foo') flavors.create('flavor1', 64, 1, 120, is_public='TRUE') flavors.create('flavor2', 64, 1, 120, is_public='False') flavors.create('flavor3', 64, 1, 120, is_public='Yes') flavors.create('flavor4', 64, 1, 120, is_public='No') flavors.create('flavor5', 64, 1, 120, is_public='Y') flavors.create('flavor6', 64, 1, 120, is_public='N') flavors.create('flavor7', 64, 1, 120, is_public='1') flavors.create('flavor8', 64, 1, 120, is_public='0') flavors.create('flavor9', 64, 1, 120, is_public='true') def test_flavorid_populated(self): flavor1 = flavors.create('flavor1', 64, 1, 120) self.assertIsNot(None, flavor1.flavorid) flavor2 = flavors.create('flavor2', 64, 1, 120, flavorid='') self.assertIsNot(None, flavor2.flavorid) flavor3 = flavors.create('flavor3', 64, 1, 120, flavorid='foo') self.assertEqual('foo', flavor3.flavorid) def test_default_values(self): flavor1 = flavors.create('flavor1', 64, 1, 120) self.assertIsNot(None, flavor1.flavorid) self.assertEqual(flavor1.ephemeral_gb, 0) self.assertEqual(flavor1.swap, 0) self.assertEqual(flavor1.rxtx_factor, 1.0) def test_basic_create(self): # Ensure instance types can be created. original_list = flavors.get_all_flavors() # Create new type and make sure values stick flavor = flavors.create('flavor', 64, 1, 120) self.assertEqual(flavor.name, 'flavor') self.assertEqual(flavor.memory_mb, 64) self.assertEqual(flavor.vcpus, 1) self.assertEqual(flavor.root_gb, 120) # Ensure new type shows up in list new_list = flavors.get_all_flavors() self.assertNotEqual(len(original_list), len(new_list), 'flavor was not created') def test_create_then_delete(self): original_list = flavors.get_all_flavors() flavor = flavors.create('flavor', 64, 1, 120) # Ensure new type shows up in list new_list = flavors.get_all_flavors() self.assertNotEqual(len(original_list), len(new_list), 'instance type was not created') flavors.destroy('flavor') self.assertRaises(exception.FlavorNotFound, flavors.get_flavor, flavor.id) # Deleted instance should not be in list anymore new_list = flavors.get_all_flavors() self.assertEqual(len(original_list), len(new_list)) for k in original_list.keys(): f = original_list[k] self.assertIsInstance(f, objects.Flavor) self.assertEqual(f.flavorid, new_list[k].flavorid) def test_duplicate_names_fail(self): # Ensures that name duplicates raise FlavorCreateFailed. flavors.create('flavor', 256, 1, 120, 200, 'flavor1') self.assertRaises(exception.FlavorExists, flavors.create, 'flavor', 64, 1, 120) def test_duplicate_flavorids_fail(self): # Ensures that flavorid duplicates raise FlavorCreateFailed. flavors.create('flavor1', 64, 1, 120, flavorid='flavorid') self.assertRaises(exception.FlavorIdExists, flavors.create, 'flavor2', 64, 1, 120, flavorid='flavorid')
apache-2.0
N3MIS15/maraschino-webcam
lib/sqlalchemy/event.py
21
11430
# sqlalchemy/event.py # Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Base event API.""" from sqlalchemy import util, exc CANCEL = util.symbol('CANCEL') NO_RETVAL = util.symbol('NO_RETVAL') def listen(target, identifier, fn, *args, **kw): """Register a listener function for the given target. e.g.:: from sqlalchemy import event from sqlalchemy.schema import UniqueConstraint def unique_constraint_name(const, table): const.name = "uq_%s_%s" % ( table.name, list(const.columns)[0].name ) event.listen( UniqueConstraint, "after_parent_attach", unique_constraint_name) """ for evt_cls in _registrars[identifier]: tgt = evt_cls._accept_with(target) if tgt is not None: tgt.dispatch._listen(tgt, identifier, fn, *args, **kw) return raise exc.InvalidRequestError("No such event '%s' for target '%s'" % (identifier,target)) def listens_for(target, identifier, *args, **kw): """Decorate a function as a listener for the given target + identifier. e.g.:: from sqlalchemy import event from sqlalchemy.schema import UniqueConstraint @event.listens_for(UniqueConstraint, "after_parent_attach") def unique_constraint_name(const, table): const.name = "uq_%s_%s" % ( table.name, list(const.columns)[0].name ) """ def decorate(fn): listen(target, identifier, fn, *args, **kw) return fn return decorate def remove(target, identifier, fn): """Remove an event listener. Note that some event removals, particularly for those event dispatchers which create wrapper functions and secondary even listeners, may not yet be supported. """ for evt_cls in _registrars[identifier]: for tgt in evt_cls._accept_with(target): tgt.dispatch._remove(identifier, tgt, fn, *args, **kw) return _registrars = util.defaultdict(list) def _is_event_name(name): return not name.startswith('_') and name != 'dispatch' class _UnpickleDispatch(object): """Serializable callable that re-generates an instance of :class:`_Dispatch` given a particular :class:`.Events` subclass. """ def __call__(self, _parent_cls): for cls in _parent_cls.__mro__: if 'dispatch' in cls.__dict__: return cls.__dict__['dispatch'].dispatch_cls(_parent_cls) else: raise AttributeError("No class with a 'dispatch' member present.") class _Dispatch(object): """Mirror the event listening definitions of an Events class with listener collections. Classes which define a "dispatch" member will return a non-instantiated :class:`._Dispatch` subclass when the member is accessed at the class level. When the "dispatch" member is accessed at the instance level of its owner, an instance of the :class:`._Dispatch` class is returned. A :class:`._Dispatch` class is generated for each :class:`.Events` class defined, by the :func:`._create_dispatcher_class` function. The original :class:`.Events` classes remain untouched. This decouples the construction of :class:`.Events` subclasses from the implementation used by the event internals, and allows inspecting tools like Sphinx to work in an unsurprising way against the public API. """ def __init__(self, _parent_cls): self._parent_cls = _parent_cls def __reduce__(self): return _UnpickleDispatch(), (self._parent_cls, ) def _update(self, other, only_propagate=True): """Populate from the listeners in another :class:`_Dispatch` object.""" for ls in _event_descriptors(other): getattr(self, ls.name)._update(ls, only_propagate=only_propagate) def _event_descriptors(target): return [getattr(target, k) for k in dir(target) if _is_event_name(k)] class _EventMeta(type): """Intercept new Event subclasses and create associated _Dispatch classes.""" def __init__(cls, classname, bases, dict_): _create_dispatcher_class(cls, classname, bases, dict_) return type.__init__(cls, classname, bases, dict_) def _create_dispatcher_class(cls, classname, bases, dict_): """Create a :class:`._Dispatch` class corresponding to an :class:`.Events` class.""" # there's all kinds of ways to do this, # i.e. make a Dispatch class that shares the '_listen' method # of the Event class, this is the straight monkeypatch. dispatch_base = getattr(cls, 'dispatch', _Dispatch) cls.dispatch = dispatch_cls = type("%sDispatch" % classname, (dispatch_base, ), {}) dispatch_cls._listen = cls._listen dispatch_cls._clear = cls._clear for k in dict_: if _is_event_name(k): setattr(dispatch_cls, k, _DispatchDescriptor(dict_[k])) _registrars[k].append(cls) def _remove_dispatcher(cls): for k in dir(cls): if _is_event_name(k): _registrars[k].remove(cls) if not _registrars[k]: del _registrars[k] class Events(object): """Define event listening functions for a particular target type.""" __metaclass__ = _EventMeta @classmethod def _accept_with(cls, target): # Mapper, ClassManager, Session override this to # also accept classes, scoped_sessions, sessionmakers, etc. if hasattr(target, 'dispatch') and ( isinstance(target.dispatch, cls.dispatch) or \ isinstance(target.dispatch, type) and \ issubclass(target.dispatch, cls.dispatch) ): return target else: return None @classmethod def _listen(cls, target, identifier, fn, propagate=False, insert=False): if insert: getattr(target.dispatch, identifier).insert(fn, target, propagate) else: getattr(target.dispatch, identifier).append(fn, target, propagate) @classmethod def _remove(cls, target, identifier, fn): getattr(target.dispatch, identifier).remove(fn, target) @classmethod def _clear(cls): for attr in dir(cls.dispatch): if _is_event_name(attr): getattr(cls.dispatch, attr).clear() class _DispatchDescriptor(object): """Class-level attributes on :class:`._Dispatch` classes.""" def __init__(self, fn): self.__name__ = fn.__name__ self.__doc__ = fn.__doc__ self._clslevel = util.defaultdict(list) def insert(self, obj, target, propagate): assert isinstance(target, type), \ "Class-level Event targets must be classes." stack = [target] while stack: cls = stack.pop(0) stack.extend(cls.__subclasses__()) self._clslevel[cls].insert(0, obj) def append(self, obj, target, propagate): assert isinstance(target, type), \ "Class-level Event targets must be classes." stack = [target] while stack: cls = stack.pop(0) stack.extend(cls.__subclasses__()) self._clslevel[cls].append(obj) def remove(self, obj, target): stack = [target] while stack: cls = stack.pop(0) stack.extend(cls.__subclasses__()) self._clslevel[cls].remove(obj) def clear(self): """Clear all class level listeners""" for dispatcher in self._clslevel.values(): dispatcher[:] = [] def __get__(self, obj, cls): if obj is None: return self obj.__dict__[self.__name__] = result = \ _ListenerCollection(self, obj._parent_cls) return result class _ListenerCollection(object): """Instance-level attributes on instances of :class:`._Dispatch`. Represents a collection of listeners. """ _exec_once = False def __init__(self, parent, target_cls): self.parent_listeners = parent._clslevel[target_cls] self.name = parent.__name__ self.listeners = [] self.propagate = set() def exec_once(self, *args, **kw): """Execute this event, but only if it has not been executed already for this collection.""" if not self._exec_once: self(*args, **kw) self._exec_once = True def __call__(self, *args, **kw): """Execute this event.""" for fn in self.parent_listeners: fn(*args, **kw) for fn in self.listeners: fn(*args, **kw) # I'm not entirely thrilled about the overhead here, # but this allows class-level listeners to be added # at any point. # # alternatively, _DispatchDescriptor could notify # all _ListenerCollection objects, but then we move # to a higher memory model, i.e.weakrefs to all _ListenerCollection # objects, the _DispatchDescriptor collection repeated # for all instances. def __len__(self): return len(self.parent_listeners + self.listeners) def __iter__(self): return iter(self.parent_listeners + self.listeners) def __getitem__(self, index): return (self.parent_listeners + self.listeners)[index] def __nonzero__(self): return bool(self.listeners or self.parent_listeners) def _update(self, other, only_propagate=True): """Populate from the listeners in another :class:`_Dispatch` object.""" existing_listeners = self.listeners existing_listener_set = set(existing_listeners) self.propagate.update(other.propagate) existing_listeners.extend([l for l in other.listeners if l not in existing_listener_set and not only_propagate or l in self.propagate ]) def insert(self, obj, target, propagate): if obj not in self.listeners: self.listeners.insert(0, obj) if propagate: self.propagate.add(obj) def append(self, obj, target, propagate): if obj not in self.listeners: self.listeners.append(obj) if propagate: self.propagate.add(obj) def remove(self, obj, target): if obj in self.listeners: self.listeners.remove(obj) self.propagate.discard(obj) def clear(self): self.listeners[:] = [] self.propagate.clear() class dispatcher(object): """Descriptor used by target classes to deliver the _Dispatch class at the class level and produce new _Dispatch instances for target instances. """ def __init__(self, events): self.dispatch_cls = events.dispatch self.events = events def __get__(self, obj, cls): if obj is None: return self.dispatch_cls obj.__dict__['dispatch'] = disp = self.dispatch_cls(cls) return disp
mit
atuljain/odoo
addons/hr_recruitment/report/hr_recruitment_report.py
56
5161
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import tools from openerp.osv import fields, osv from .. import hr_recruitment from openerp.addons.decimal_precision import decimal_precision as dp class hr_recruitment_report(osv.Model): _name = "hr.recruitment.report" _description = "Recruitments Statistics" _auto = False _rec_name = 'date_create' _order = 'date_create desc' _columns = { 'user_id': fields.many2one('res.users', 'User', readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'date_create': fields.datetime('Create Date', readonly=True), 'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True), 'date_closed': fields.date('Closed', readonly=True), 'job_id': fields.many2one('hr.job', 'Applied Job',readonly=True), 'stage_id': fields.many2one ('hr.recruitment.stage', 'Stage'), 'type_id': fields.many2one('hr.recruitment.degree', 'Degree'), 'department_id': fields.many2one('hr.department','Department',readonly=True), 'priority': fields.selection(hr_recruitment.AVAILABLE_PRIORITIES, 'Appreciation'), 'salary_prop' : fields.float("Salary Proposed", digits_compute=dp.get_precision('Account')), 'salary_prop_avg' : fields.float("Avg. Proposed Salary", group_operator="avg", digits_compute=dp.get_precision('Account')), 'salary_exp' : fields.float("Salary Expected", digits_compute=dp.get_precision('Account')), 'salary_exp_avg' : fields.float("Avg. Expected Salary", group_operator="avg", digits_compute=dp.get_precision('Account')), 'partner_id': fields.many2one('res.partner', 'Partner',readonly=True), 'available': fields.float("Availability"), 'delay_close': fields.float('Avg. Delay to Close', digits=(16,2), readonly=True, group_operator="avg", help="Number of Days to close the project issue"), 'last_stage_id': fields.many2one ('hr.recruitment.stage', 'Last Stage'), } def init(self, cr): tools.drop_view_if_exists(cr, 'hr_recruitment_report') cr.execute(""" create or replace view hr_recruitment_report as ( select min(s.id) as id, s.create_date as date_create, date_trunc('day',s.date_closed) as date_closed, date_trunc('day',s.date_last_stage_update) as date_last_stage_update, to_char(s.create_date, 'YYYY') as year, to_char(s.create_date, 'MM') as month, to_char(s.create_date, 'YYYY-MM-DD') as day, s.partner_id, s.company_id, s.user_id, s.job_id, s.type_id, sum(s.availability) as available, s.department_id, s.priority, s.stage_id, s.last_stage_id, sum(salary_proposed) as salary_prop, (sum(salary_proposed)/count(*)) as salary_prop_avg, sum(salary_expected) as salary_exp, (sum(salary_expected)/count(*)) as salary_exp_avg, extract('epoch' from (s.write_date-s.create_date))/(3600*24) as delay_close, count(*) as nbr from hr_applicant s group by date_trunc('day',s.create_date), date_trunc('day',s.date_closed), s.date_open, s.create_date, s.write_date, s.date_closed, s.date_last_stage_update, s.partner_id, s.company_id, s.user_id, s.stage_id, s.last_stage_id, s.type_id, s.priority, s.job_id, s.department_id ) """) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ZeitOnline/zeit.content.article
src/zeit/content/article/edit/browser/tests/test_rawxml.py
1
1493
import zeit.content.article.edit.browser.testing class Form(zeit.content.article.edit.browser.testing.BrowserTestCase): block_type = 'raw' def test_inline_form_saves_values(self): self.get_article(with_empty_block=True) b = self.browser b.open('editable-body/blockname/@@edit-rawxml?show_form=1') b.getControl('XML source').value = """\ <raw xmlns:ns0="http://namespaces.zeit.de/CMS/cp" ns0:__name__="blockname"> <foo> </foo> </raw> """ b.getControl('Apply').click() b.open('@@edit-rawxml?show_form=1') self.assertEllipsis("""\ <raw...xmlns:ns0="http://namespaces.zeit.de/CMS/cp"...ns0:__name__="blockname"...> <foo> </foo> </raw> """, b.getControl('XML source').value) def test_xml_is_validated_root_must_be_raw_element(self): self.get_article(with_empty_block=True) b = self.browser b.open('editable-body/blockname/@@edit-rawxml?show_form=1') b.getControl('XML source').value = '<foo />' b.getControl('Apply').click() self.assertIn( '<span class="error">The root element must be &lt;raw&gt;.</span>', b.contents) class FormLoader(zeit.content.article.edit.browser.testing.EditorTestCase): def test_rawxml_form_is_loaded(self): s = self.selenium self.add_article() self.create_block('raw') s.assertElementPresent('css=.block.type-raw .inline-form ' '.field.fieldname-xml')
bsd-3-clause
JohnAbel/StochKit
libs/boost_1_53_0/tools/build/v2/tools/pch.py
61
2850
# Status: Being ported by Steven Watanabe # Base revision: 47077 # # Copyright (c) 2005 Reece H. Dunn. # Copyright 2006 Ilya Sokolov # Copyright (c) 2008 Steven Watanabe # # Use, modification and distribution is subject to the Boost Software # License Version 1.0. (See accompanying file LICENSE_1_0.txt or # http://www.boost.org/LICENSE_1_0.txt) ##### Using Precompiled Headers (Quick Guide) ##### # # Make precompiled mypch.hpp: # # import pch ; # # cpp-pch mypch # : # sources # mypch.hpp # : # requiremnts # <toolset>msvc:<source>mypch.cpp # ; # # Add cpp-pch to sources: # # exe hello # : main.cpp hello.cpp mypch # ; from b2.build import type, feature, generators from b2.tools import builtin type.register('PCH', ['pch']) type.register('C_PCH', [], 'PCH') type.register('CPP_PCH', [], 'PCH') # Control precompiled header (PCH) generation. feature.feature('pch', ['on', 'off'], ['propagated']) feature.feature('pch-header', [], ['free', 'dependency']) feature.feature('pch-file', [], ['free', 'dependency']) class PchGenerator(generators.Generator): """ Base PCH generator. The 'run' method has the logic to prevent this generator from being run unless it's being used for a top-level PCH target. """ def action_class(self): return builtin.CompileAction def run(self, project, name, prop_set, sources): if not name: # Unless this generator is invoked as the top-most generator for a # main target, fail. This allows using 'H' type as input type for # this generator, while preventing Boost.Build to try this generator # when not explicitly asked for. # # One bad example is msvc, where pch generator produces both PCH # target and OBJ target, so if there's any header generated (like by # bison, or by msidl), we'd try to use pch generator to get OBJ from # that H, which is completely wrong. By restricting this generator # only to pch main target, such problem is solved. pass else: r = self.run_pch(project, name, prop_set.add_raw(['<define>BOOST_BUILD_PCH_ENABLED']), sources) return generators.add_usage_requirements( r, ['<define>BOOST_BUILD_PCH_ENABLED']) # This rule must be overridden by the derived classes. def run_pch(self, project, name, prop_set, sources): pass # NOTE: requirements are empty, default pch generator can be applied when # pch=off. generators.register(builtin.DummyGenerator( "pch.default-c-pch-generator", False, [], ['C_PCH'], [])) generators.register(builtin.DummyGenerator( "pch.default-cpp-pch-generator", False, [], ['CPP_PCH'], []))
gpl-3.0
amith01994/intellij-community
python/lib/Lib/site-packages/django/contrib/admin/models.py
228
2207
from django.db import models from django.contrib.contenttypes.models import ContentType from django.contrib.auth.models import User from django.contrib.admin.util import quote from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import smart_unicode from django.utils.safestring import mark_safe ADDITION = 1 CHANGE = 2 DELETION = 3 class LogEntryManager(models.Manager): def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''): e = self.model(None, None, user_id, content_type_id, smart_unicode(object_id), object_repr[:200], action_flag, change_message) e.save() class LogEntry(models.Model): action_time = models.DateTimeField(_('action time'), auto_now=True) user = models.ForeignKey(User) content_type = models.ForeignKey(ContentType, blank=True, null=True) object_id = models.TextField(_('object id'), blank=True, null=True) object_repr = models.CharField(_('object repr'), max_length=200) action_flag = models.PositiveSmallIntegerField(_('action flag')) change_message = models.TextField(_('change message'), blank=True) objects = LogEntryManager() class Meta: verbose_name = _('log entry') verbose_name_plural = _('log entries') db_table = 'django_admin_log' ordering = ('-action_time',) def __repr__(self): return smart_unicode(self.action_time) def is_addition(self): return self.action_flag == ADDITION def is_change(self): return self.action_flag == CHANGE def is_deletion(self): return self.action_flag == DELETION def get_edited_object(self): "Returns the edited object represented by this log entry" return self.content_type.get_object_for_this_type(pk=self.object_id) def get_admin_url(self): """ Returns the admin URL to edit the object represented by this log entry. This is relative to the Django admin index page. """ if self.content_type and self.object_id: return mark_safe(u"%s/%s/%s/" % (self.content_type.app_label, self.content_type.model, quote(self.object_id))) return None
apache-2.0
shrimpboyho/git.js
emscript/python/2.7.5.1_32bit/Tools/Scripts/classfix.py
96
5953
#! /usr/bin/env python # This script is obsolete -- it is kept for historical purposes only. # # Fix Python source files to use the new class definition syntax, i.e., # the syntax used in Python versions before 0.9.8: # class C() = base(), base(), ...: ... # is changed to the current syntax: # class C(base, base, ...): ... # # The script uses heuristics to find class definitions that usually # work but occasionally can fail; carefully check the output! # # Command line arguments are files or directories to be processed. # Directories are searched recursively for files whose name looks # like a python module. # Symbolic links are always ignored (except as explicit directory # arguments). Of course, the original file is kept as a back-up # (with a "~" attached to its name). # # Changes made are reported to stdout in a diff-like format. # # Undoubtedly you can do this using find and sed or perl, but this is # a nice example of Python code that recurses down a directory tree # and uses regular expressions. Also note several subtleties like # preserving the file's mode and avoiding to even write a temp file # when no changes are needed for a file. # # NB: by changing only the function fixline() you can turn this # into a program for a different change to Python programs... import sys import re import os from stat import * err = sys.stderr.write dbg = err rep = sys.stdout.write def main(): bad = 0 if not sys.argv[1:]: # No arguments err('usage: ' + sys.argv[0] + ' file-or-directory ...\n') sys.exit(2) for arg in sys.argv[1:]: if os.path.isdir(arg): if recursedown(arg): bad = 1 elif os.path.islink(arg): err(arg + ': will not process symbolic links\n') bad = 1 else: if fix(arg): bad = 1 sys.exit(bad) ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$') def ispython(name): return ispythonprog.match(name) >= 0 def recursedown(dirname): dbg('recursedown(%r)\n' % (dirname,)) bad = 0 try: names = os.listdir(dirname) except os.error, msg: err('%s: cannot list directory: %r\n' % (dirname, msg)) return 1 names.sort() subdirs = [] for name in names: if name in (os.curdir, os.pardir): continue fullname = os.path.join(dirname, name) if os.path.islink(fullname): pass elif os.path.isdir(fullname): subdirs.append(fullname) elif ispython(name): if fix(fullname): bad = 1 for fullname in subdirs: if recursedown(fullname): bad = 1 return bad def fix(filename): ## dbg('fix(%r)\n' % (filename,)) try: f = open(filename, 'r') except IOError, msg: err('%s: cannot open: %r\n' % (filename, msg)) return 1 head, tail = os.path.split(filename) tempname = os.path.join(head, '@' + tail) g = None # If we find a match, we rewind the file and start over but # now copy everything to a temp file. lineno = 0 while 1: line = f.readline() if not line: break lineno = lineno + 1 while line[-2:] == '\\\n': nextline = f.readline() if not nextline: break line = line + nextline lineno = lineno + 1 newline = fixline(line) if newline != line: if g is None: try: g = open(tempname, 'w') except IOError, msg: f.close() err('%s: cannot create: %r\n' % (tempname, msg)) return 1 f.seek(0) lineno = 0 rep(filename + ':\n') continue # restart from the beginning rep(repr(lineno) + '\n') rep('< ' + line) rep('> ' + newline) if g is not None: g.write(newline) # End of file f.close() if not g: return 0 # No changes # Finishing touch -- move files # First copy the file's mode to the temp file try: statbuf = os.stat(filename) os.chmod(tempname, statbuf[ST_MODE] & 07777) except os.error, msg: err('%s: warning: chmod failed (%r)\n' % (tempname, msg)) # Then make a backup of the original file as filename~ try: os.rename(filename, filename + '~') except os.error, msg: err('%s: warning: backup failed (%r)\n' % (filename, msg)) # Now move the temp file to the original file try: os.rename(tempname, filename) except os.error, msg: err('%s: rename failed (%r)\n' % (filename, msg)) return 1 # Return succes return 0 # This expression doesn't catch *all* class definition headers, # but it's pretty darn close. classexpr = '^([ \t]*class +[a-zA-Z0-9_]+) *( *) *((=.*)?):' classprog = re.compile(classexpr) # Expressions for finding base class expressions. baseexpr = '^ *(.*) *( *) *$' baseprog = re.compile(baseexpr) def fixline(line): if classprog.match(line) < 0: # No 'class' keyword -- no change return line (a0, b0), (a1, b1), (a2, b2) = classprog.regs[:3] # a0, b0 = Whole match (up to ':') # a1, b1 = First subexpression (up to classname) # a2, b2 = Second subexpression (=.*) head = line[:b1] tail = line[b0:] # Unmatched rest of line if a2 == b2: # No base classes -- easy case return head + ':' + tail # Get rid of leading '=' basepart = line[a2+1:b2] # Extract list of base expressions bases = basepart.split(',') # Strip trailing '()' from each base expression for i in range(len(bases)): if baseprog.match(bases[i]) >= 0: x1, y1 = baseprog.regs[1] bases[i] = bases[i][x1:y1] # Join the bases back again and build the new line basepart = ', '.join(bases) return head + '(' + basepart + '):' + tail if __name__ == '__main__': main()
gpl-2.0
kostoulhs/android_kernel_samsung_expressltexx
tools/perf/scripts/python/syscall-counts-by-pid.py
11180
1927
# system call counts, by pid # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts-by-pid.py [comm]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return try: syscalls[common_comm][common_pid][id] += 1 except TypeError: syscalls[common_comm][common_pid][id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events by comm/pid:\n\n", print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id, val in sorted(syscalls[comm][pid].iteritems(), \ key = lambda(k, v): (v, k), reverse = True): print " %-38s %10d\n" % (syscall_name(id), val),
gpl-2.0
TathagataChakraborti/resource-conflicts
PLANROB-2015/seq-sat-lama/py2.5/lib/python2.5/tokenize.py
4
13554
"""Tokenization help for Python programs. generate_tokens(readline) is a generator that breaks a stream of text into Python tokens. It accepts a readline-like method which is called repeatedly to get the next line of input (or "" for EOF). It generates 5-tuples with these members: the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string) It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators Older entry points tokenize_loop(readline, tokeneater) tokenize(readline, tokeneater=printtoken) are the same, except instead of generating tokens, tokeneater is a callback function to which the 5 fields described above are passed as 5 arguments, each time a new token is found.""" __author__ = 'Ka-Ping Yee <ping@lfw.org>' __credits__ = \ 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro' import string, re from token import * import token __all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"] del x del token COMMENT = N_TOKENS tok_name[COMMENT] = 'COMMENT' NL = N_TOKENS + 1 tok_name[NL] = 'NL' N_TOKENS += 2 def group(*choices): return '(' + '|'.join(choices) + ')' def any(*choices): return group(*choices) + '*' def maybe(*choices): return group(*choices) + '?' Whitespace = r'[ \f\t]*' Comment = r'#[^\r\n]*' Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) Name = r'[a-zA-Z_]\w*' Hexnumber = r'0[xX][\da-fA-F]*[lL]?' Octnumber = r'0[0-7]*[lL]?' Decnumber = r'[1-9]\d*[lL]?' Intnumber = group(Hexnumber, Octnumber, Decnumber) Exponent = r'[eE][-+]?\d+' Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent) Expfloat = r'\d+' + Exponent Floatnumber = group(Pointfloat, Expfloat) Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]') Number = group(Imagnumber, Floatnumber, Intnumber) # Tail end of ' string. Single = r"[^'\\]*(?:\\.[^'\\]*)*'" # Tail end of " string. Double = r'[^"\\]*(?:\\.[^"\\]*)*"' # Tail end of ''' string. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""') # Single-line ' or " string. String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') # Because of leftmost-then-longest match semantics, be sure to put the # longest operators first (e.g., if = came before ==, == would get # recognized as two instances of =). Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=", r"//=?", r"[+\-*/%&|^=<>]=?", r"~") Bracket = '[][(){}]' Special = group(r'\r?\n', r'[:;.,`@]') Funny = group(Operator, Bracket, Special) PlainToken = group(Number, Funny, String, Name) Token = Ignore + PlainToken # First (or only) line of ' or " string. ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'), r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) PseudoExtras = group(r'\\\r?\n', Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) tokenprog, pseudoprog, single3prog, double3prog = map( re.compile, (Token, PseudoToken, Single3, Double3)) endprogs = {"'": re.compile(Single), '"': re.compile(Double), "'''": single3prog, '"""': double3prog, "r'''": single3prog, 'r"""': double3prog, "u'''": single3prog, 'u"""': double3prog, "ur'''": single3prog, 'ur"""': double3prog, "R'''": single3prog, 'R"""': double3prog, "U'''": single3prog, 'U"""': double3prog, "uR'''": single3prog, 'uR"""': double3prog, "Ur'''": single3prog, 'Ur"""': double3prog, "UR'''": single3prog, 'UR"""': double3prog, 'r': None, 'R': None, 'u': None, 'U': None} triple_quoted = {} for t in ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""', "u'''", 'u"""', "U'''", 'U"""', "ur'''", 'ur"""', "Ur'''", 'Ur"""', "uR'''", 'uR"""', "UR'''", 'UR"""'): triple_quoted[t] = t single_quoted = {} for t in ("'", '"', "r'", 'r"', "R'", 'R"', "u'", 'u"', "U'", 'U"', "ur'", 'ur"', "Ur'", 'Ur"', "uR'", 'uR"', "UR'", 'UR"' ): single_quoted[t] = t tabsize = 8 class TokenError(Exception): pass class StopTokenizing(Exception): pass def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing print "%d,%d-%d,%d:\t%s\t%s" % \ (srow, scol, erow, ecol, tok_name[type], repr(token)) def tokenize(readline, tokeneater=printtoken): """ The tokenize() function accepts two parameters: one representing the input stream, and one providing an output mechanism for tokenize(). The first parameter, readline, must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. The second parameter, tokeneater, must also be a callable object. It is called once for each token, with five arguments, corresponding to the tuples generated by generate_tokens(). """ try: tokenize_loop(readline, tokeneater) except StopTokenizing: pass # backwards compatible interface def tokenize_loop(readline, tokeneater): for token_info in generate_tokens(readline): tokeneater(*token_info) def untokenize(iterable): """Transform tokens back into Python source code. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. Round-trip invariant: # Output text will tokenize the back to the input t1 = [tok[:2] for tok in generate_tokens(f.readline)] newcode = untokenize(t1) readline = iter(newcode.splitlines(1)).next t2 = [tok[:2] for tokin generate_tokens(readline)] assert t1 == t2 """ startline = False indents = [] toks = [] toks_append = toks.append for tok in iterable: toknum, tokval = tok[:2] if toknum in (NAME, NUMBER): tokval += ' ' if toknum == INDENT: indents.append(tokval) continue elif toknum == DEDENT: indents.pop() continue elif toknum in (NEWLINE, COMMENT, NL): startline = True elif startline and indents: toks_append(indents[-1]) startline = False toks_append(tokval) return ''.join(toks) def generate_tokens(readline): """ The generate_tokens() generator requires one argment, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. Alternately, readline can be a callable function terminating with StopIteration: readline = open(myfile).next # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. """ lnum = parenlev = continued = 0 namechars, numchars = string.ascii_letters + '_', '0123456789' contstr, needcont = '', 0 contline = None indents = [0] while 1: # loop over lines in stream try: line = readline() except StopIteration: line = '' lnum = lnum + 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError, ("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield (STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield (ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column = column + 1 elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos = pos + 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("<tokenize>", lnum, pos, line)) indents = indents[:-1] yield (DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError, ("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = pseudoprog.match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end token, initial = line[start:end], line[start] if initial in numchars or \ (initial == '.' and token != '.'): # ordinary number yield (NUMBER, token, spos, epos, line) elif initial in '\r\n': yield (parenlev > 0 and NL or NEWLINE, token, spos, epos, line) elif initial == '#': yield (COMMENT, token, spos, epos, line) elif token in triple_quoted: endprog = endprogs[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield (STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break elif initial in single_quoted or \ token[:2] in single_quoted or \ token[:3] in single_quoted: if token[-1] == '\n': # continued string strstart = (lnum, start) endprog = (endprogs[initial] or endprogs[token[1]] or endprogs[token[2]]) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield (STRING, token, spos, epos, line) elif initial in namechars: # ordinary name yield (NAME, token, spos, epos, line) elif initial == '\\': # continued stmt continued = 1 else: if initial in '([{': parenlev = parenlev + 1 elif initial in ')]}': parenlev = parenlev - 1 yield (OP, token, spos, epos, line) else: yield (ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos = pos + 1 for indent in indents[1:]: # pop remaining indent levels yield (DEDENT, '', (lnum, 0), (lnum, 0), '') yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '') if __name__ == '__main__': # testing import sys if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) else: tokenize(sys.stdin.readline)
mit
yeleman/snisi
snisi_maint/management/commands/update-cluster-from-std-csv.py
1
3120
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim: ai ts=4 sts=4 et sw=4 nu from __future__ import (unicode_literals, absolute_import, division, print_function) import logging import os from django.core.management.base import BaseCommand from optparse import make_option from py3compat import PY2 from snisi_core.models.Entities import Entity from snisi_core.models.Projects import Cluster, Participation if PY2: import unicodecsv as csv else: import csv logger = logging.getLogger(__name__) class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option('-f', help='CSV file', action='store', dest='filename'), ) def handle(self, *args, **options): if not os.path.exists(options.get('filename') or ""): logger.error("CSV file `{}` does not exist." .format(options.get('filename'))) return headers = ['action', 'slug', 'cluster', 'include_hc'] input_csv_file = open(options.get('filename'), 'r') csv_reader = csv.DictReader(input_csv_file, fieldnames=headers) for entry in csv_reader: if csv_reader.line_num == 1: continue entity = Entity.get_or_none(entry.get('slug')) if entity is None: logger.warning("Entity `{}` does not exist." .format(entry.get('SNISI'))) continue cluster = Cluster.get_or_none(entry.get('cluster')) if cluster is None: logger.error("Cluster `{}` does not exist." .format(options.get('cluster_slug'))) continue include_hc = bool(entry.get('include_hc')) entities = [entity] if include_hc: entities += entity.get_health_centers() if entry.get('action') == 'add': for e in entities: p, created = Participation.objects.get_or_create( cluster=cluster, entity=e, is_active=True) logger.info(p) if entry.get('action') == 'disable': for p in Participation.objects.filter( cluster=cluster, entity__slug__in=[e.slug for e in entities]): p.is_active = False p.save() logger.info(p) if entry.get('action') == 'enable': for p in Participation.objects.filter( cluster=cluster, entity__slug__in=[e.slug for e in entities]): p.is_active = True p.save() logger.info(p) if entry.get('action') == 'remove': Participation.objects.filter( cluster=cluster, entity__slug__in=[e.slug for e in entities]).delete() logger.info("All Done")
mit
dronefly/dronefly.github.io
flask/lib/python2.7/site-packages/whoosh/formats.py
74
16710
# Copyright 2009 Matt Chaput. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of Matt Chaput. """ The classes in this module encode and decode posting information for a field. The field format essentially determines what information is stored about each occurance of a term. """ from collections import defaultdict from whoosh.analysis import unstopped, entoken from whoosh.compat import iteritems, dumps, loads, b from whoosh.system import emptybytes from whoosh.system import _INT_SIZE, _FLOAT_SIZE from whoosh.system import pack_uint, unpack_uint, pack_float, unpack_float # Format base class class Format(object): """Abstract base class representing a storage format for a field or vector. Format objects are responsible for writing and reading the low-level representation of a field. It controls what kind/level of information to store about the indexed fields. """ posting_size = -1 textual = True __inittypes__ = dict(field_boost=float) def __init__(self, field_boost=1.0, **options): """ :param field_boost: A constant boost factor to scale to the score of all queries matching terms in this field. """ self.field_boost = field_boost self.options = options def __eq__(self, other): return (other and self.__class__ is other.__class__ and self.__dict__ == other.__dict__) def __repr__(self): return "%s(boost=%s)" % (self.__class__.__name__, self.field_boost) def fixed_value_size(self): if self.posting_size < 0: return None return self.posting_size def word_values(self, value, analyzer, **kwargs): """Takes the text value to be indexed and yields a series of ("tokentext", frequency, weight, valuestring) tuples, where frequency is the number of times "tokentext" appeared in the value, weight is the weight (a float usually equal to frequency in the absence of per-term boosts) and valuestring is encoded field-specific posting value for the token. For example, in a Frequency format, the value string would be the same as frequency; in a Positions format, the value string would encode a list of token positions at which "tokentext" occured. :param value: The unicode text to index. :param analyzer: The analyzer to use to process the text. """ raise NotImplementedError def supports(self, name): """Returns True if this format supports interpreting its posting value as 'name' (e.g. "frequency" or "positions"). """ return hasattr(self, "decode_" + name) def decoder(self, name): """Returns the bound method for interpreting value as 'name', where 'name' is for example "frequency" or "positions". This object must have a corresponding Format.decode_<name>() method. """ return getattr(self, "decode_" + name) def decode_as(self, astype, valuestring): """Interprets the encoded value string as 'astype', where 'astype' is for example "frequency" or "positions". This object must have a corresponding decode_<astype>() method. """ return self.decoder(astype)(valuestring) # Concrete field classes # TODO: as a legacy thing most of these formats store the frequency but not the # weight in the value string, so if you use field or term boosts # postreader.value_as("weight") will not match postreader.weight() def tokens(value, analyzer, kwargs): if isinstance(value, (tuple, list)): gen = entoken(value, **kwargs) else: gen = analyzer(value, **kwargs) return unstopped(gen) class Existence(Format): """Only indexes whether a given term occurred in a given document; it does not store frequencies or positions. This is useful for fields that should be searchable but not scorable, such as file path. Supports: frequency, weight (always reports frequency = 1). """ posting_size = 0 __inittypes__ = dict(field_boost=float) def __init__(self, field_boost=1.0, **options): self.field_boost = field_boost self.options = options def word_values(self, value, analyzer, **kwargs): fb = self.field_boost wordset = set(t.text for t in tokens(value, analyzer, kwargs)) return ((w, 1, fb, emptybytes) for w in wordset) def encode(self, value): return emptybytes def decode_frequency(self, valuestring): return 1 def decode_weight(self, valuestring): return self.field_boost def combine(self, vs): return emptybytes class Frequency(Format): """Stores frequency information for each posting. Supports: frequency, weight. """ posting_size = _INT_SIZE __inittypes__ = dict(field_boost=float, boost_as_freq=bool) def __init__(self, field_boost=1.0, boost_as_freq=False, **options): """ :param field_boost: A constant boost factor to scale to the score of all queries matching terms in this field. """ assert isinstance(field_boost, float) self.field_boost = field_boost self.options = options def word_values(self, value, analyzer, **kwargs): fb = self.field_boost length = 0 freqs = defaultdict(int) weights = defaultdict(float) kwargs["boosts"] = True for t in tokens(value, analyzer, kwargs): length += 1 freqs[t.text] += 1 weights[t.text] += t.boost wvs = ((w, freq, weights[w] * fb, pack_uint(freq)) for w, freq in iteritems(freqs)) return wvs def decode_frequency(self, valuestring): return unpack_uint(valuestring)[0] def decode_weight(self, valuestring): freq = unpack_uint(valuestring)[0] return freq * self.field_boost def combine(self, vs): return pack_uint(sum(self.decode_value(v) for v in vs)) class Positions(Format): """Stores position information in each posting, to allow phrase searching and "near" queries. Supports: frequency, weight, positions, position_boosts (always reports position boost = 1.0). """ def word_values(self, value, analyzer, **kwargs): fb = self.field_boost poses = defaultdict(list) weights = defaultdict(float) kwargs["positions"] = True kwargs["boosts"] = True for t in tokens(value, analyzer, kwargs): poses[t.text].append(t.pos) weights[t.text] += t.boost for w, poslist in iteritems(poses): value = self.encode(poslist) yield (w, len(poslist), weights[w] * fb, value) def encode(self, poslist): deltas = [] base = 0 for pos in poslist: deltas.append(pos - base) base = pos return pack_uint(len(deltas)) + dumps(deltas, -1) def decode_positions(self, valuestring): if not valuestring.endswith(b(".")): valuestring += b(".") codes = loads(valuestring[_INT_SIZE:]) position = 0 positions = [] for code in codes: position += code positions.append(position) return positions def decode_frequency(self, valuestring): return unpack_uint(valuestring[:_INT_SIZE])[0] def decode_weight(self, valuestring): return self.decode_frequency(valuestring) * self.field_boost def decode_position_boosts(self, valuestring): return [(pos, 1) for pos in self.decode_positions(valuestring)] def combine(self, vs): s = set() for v in vs: s.update(self.decode_positions(v)) return self.encode(sorted(s)) class Characters(Positions): """Stores token position and character start and end information for each posting. Supports: frequency, weight, positions, position_boosts (always reports position boost = 1.0), characters. """ def word_values(self, value, analyzer, **kwargs): fb = self.field_boost seen = defaultdict(list) weights = defaultdict(float) kwargs["positions"] = True kwargs["chars"] = True kwargs["boosts"] = True for t in tokens(value, analyzer, kwargs): seen[t.text].append((t.pos, t.startchar, t.endchar)) weights[t.text] += t.boost for w, poslist in iteritems(seen): value = self.encode(poslist) yield (w, len(poslist), weights[w] * fb, value) def encode(self, poslist): deltas = [] posbase = 0 charbase = 0 for pos, startchar, endchar in poslist: deltas.append((pos - posbase, startchar - charbase, endchar - startchar)) posbase = pos charbase = endchar return pack_uint(len(deltas)) + dumps(deltas, -1) def decode_characters(self, valuestring): if not valuestring.endswith(b(".")): valuestring += b(".") codes = loads(valuestring[_INT_SIZE:]) position = 0 endchar = 0 posns_chars = [] for code in codes: position = code[0] + position startchar = code[1] + endchar endchar = code[2] + startchar posns_chars.append((position, startchar, endchar)) return posns_chars def decode_positions(self, valuestring): if not valuestring.endswith(b(".")): valuestring += b(".") codes = loads(valuestring[_INT_SIZE:]) position = 0 posns = [] for code in codes: position = code[0] + position posns.append(position) return posns def combine(self, vs): s = {} for v in vs: for pos, sc, ec in self.decode_characters(v): if pos in s: old_sc, old_ec = pos[s] s[pos] = (min(sc, old_sc), max(ec, old_ec)) else: s[pos] = (sc, ec) poses = [(pos, s[pos][0], s[pos][1]) for pos in sorted(s.keys())] return self.encode(poses) class PositionBoosts(Positions): """A format that stores positions and per-position boost information in each posting. Supports: frequency, weight, positions, position_boosts. """ def word_values(self, value, analyzer, **kwargs): fb = self.field_boost seen = defaultdict(list) kwargs["positions"] = True kwargs["boosts"] = True for t in tokens(value, analyzer, kwargs): pos = t.pos boost = t.boost seen[t.text].append((pos, boost)) for w, poses in iteritems(seen): value = self.encode(poses) yield (w, len(poses), sum(p[1] for p in poses) * fb, value) def encode(self, poses): codes = [] base = 0 summedboost = 0 for pos, boost in poses: summedboost += boost codes.append((pos - base, boost)) base = pos return (pack_uint(len(poses)) + pack_float(summedboost) + dumps(codes, -1)) def decode_position_boosts(self, valuestring): if not valuestring.endswith(b(".")): valuestring += b(".") codes = loads(valuestring[_INT_SIZE + _FLOAT_SIZE:]) position = 0 posns_boosts = [] for code in codes: position = code[0] + position posns_boosts.append((position, code[1])) return posns_boosts def decode_positions(self, valuestring): if not valuestring.endswith(b(".")): valuestring += b(".") codes = loads(valuestring[_INT_SIZE + _FLOAT_SIZE:]) position = 0 posns = [] for code in codes: position = code[0] + position posns.append(position) return posns def decode_weight(self, v): summedboost = unpack_float(v[_INT_SIZE:_INT_SIZE + _FLOAT_SIZE])[0] return summedboost * self.field_boost def combine(self, vs): s = defaultdict(float) for v in vs: for pos, boost in self.decode_position_boosts(v): s[pos] += boost return self.encode(sorted(s.items())) class CharacterBoosts(Characters): """A format that stores positions, character start and end, and per-position boost information in each posting. Supports: frequency, weight, positions, position_boosts, characters, character_boosts. """ def word_values(self, value, analyzer, **kwargs): seen = defaultdict(list) kwargs["positions"] = True kwargs["chars"] = True kwargs["boosts"] = True for t in tokens(value, analyzer, kwargs): seen[t.text].append((t.pos, t.startchar, t.endchar, t.boost)) for w, poses in iteritems(seen): value, summedboost = self.encode(poses) yield (w, len(poses), summedboost, value) def encode(self, poses): fb = self.field_boost # posns_chars_boosts = [(pos, startchar, endchar, boost), ...] codes = [] posbase = 0 charbase = 0 summedboost = 0 for pos, startchar, endchar, boost in poses: codes.append((pos - posbase, startchar - charbase, endchar - startchar, boost)) posbase = pos charbase = endchar summedboost += boost return ((pack_uint(len(poses)) + pack_float(summedboost * fb) + dumps(codes, -1)), summedboost) def decode_character_boosts(self, valuestring): if not valuestring.endswith(b(".")): valuestring += b(".") codes = loads(valuestring[_INT_SIZE + _FLOAT_SIZE:]) position = 0 endchar = 0 posn_char_boosts = [] for code in codes: position = position + code[0] startchar = endchar + code[1] endchar = startchar + code[2] posn_char_boosts.append((position, startchar, endchar, code[3])) return posn_char_boosts def decode_positions(self, valuestring): return [item[0] for item in self.decode_character_boosts(valuestring)] def decode_characters(self, valuestring): return [(pos, startchar, endchar) for pos, startchar, endchar, _ in self.decode_character_boosts(valuestring)] def decode_position_boosts(self, valuestring): return [(pos, boost) for pos, _, _, boost in self.decode_character_boosts(valuestring)] def combine(self, vs): s = {} for v in vs: for pos, sc, ec, boost in self.decode_character_boosts(v): if pos in s: old_sc, old_ec, old_boost = pos[s] s[pos] = (min(sc, old_sc), max(ec, old_ec), old_boost + boost) else: s[pos] = (sc, ec, boost) poses = [(pos, sc, ec, boost) for pos, (sc, ec, boost) in sorted(s.items())] return self.encode(poses)[0] # encode() returns value, summedboost
apache-2.0
archf/ansible
lib/ansible/modules/system/gluster_volume.py
21
15494
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2014, Taneli Leppä <taneli@crasman.fi> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ module: gluster_volume short_description: Manage GlusterFS volumes description: - Create, remove, start, stop and tune GlusterFS volumes version_added: '1.9' options: name: description: - The volume name required: true state: description: - Use present/absent ensure if a volume exists or not. Use started/stopped to control its availability. required: true choices: ['present', 'absent', 'started', 'stopped'] cluster: description: - List of hosts to use for probing and brick setup host: description: - Override local hostname (for peer probing purposes) replicas: description: - Replica count for volume arbiter: description: - Arbiter count for volume version_added: '2.3' stripes: description: - Stripe count for volume disperses: description: - Disperse count for volume version_added: '2.2' redundancies: description: - Redundancy count for volume version_added: '2.2' transport: description: - Transport type for volume default: 'tcp' choices: ['tcp', 'rdma', 'tcp,rdma'] bricks: description: - Brick paths on servers. Multiple brick paths can be separated by commas. aliases: ['brick'] start_on_create: description: - Controls whether the volume is started after creation or not default: 'yes' type: bool rebalance: description: - Controls whether the cluster is rebalanced after changes default: 'no' type: bool directory: description: - Directory for limit-usage options: description: - A dictionary/hash with options/settings for the volume quota: description: - Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list) force: description: - If brick is being created in the root partition, module will fail. Set force to true to override this behaviour. type: bool notes: - Requires cli tools for GlusterFS on servers - Will add new bricks, but not remove them author: Taneli Leppä (@rosmo) """ EXAMPLES = """ - name: create gluster volume gluster_volume: state: present name: test1 bricks: /bricks/brick1/g1 rebalance: yes cluster: - 192.0.2.10 - 192.0.2.11 run_once: true - name: tune gluster_volume: state: present name: test1 options: performance.cache-size: 256MB - name: start gluster volume gluster_volume: state: started name: test1 - name: limit usage gluster_volume: state: present name: test1 directory: /foo quota: 20.0MB - name: stop gluster volume gluster_volume: state: stopped name: test1 - name: remove gluster volume gluster_volume: state: absent name: test1 - name: create gluster volume with multiple bricks gluster_volume: state: present name: test2 bricks: /bricks/brick1/g2,/bricks/brick2/g2 cluster: - 192.0.2.10 - 192.0.2.11 run_once: true """ import re import socket import time import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native glusterbin = '' def run_gluster(gargs, **kwargs): global glusterbin global module args = [glusterbin, '--mode=script'] args.extend(gargs) try: rc, out, err = module.run_command(args, **kwargs) if rc != 0: module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err), exception=traceback.format_exc()) except Exception as e: module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), to_native(e)), exception=traceback.format_exc()) return out def run_gluster_nofail(gargs, **kwargs): global glusterbin global module args = [glusterbin] args.extend(gargs) rc, out, err = module.run_command(args, **kwargs) if rc != 0: return None return out def get_peers(): out = run_gluster([ 'peer', 'status']) peers = {} hostname = None uuid = None state = None shortNames = False for row in out.split('\n'): if ': ' in row: key, value = row.split(': ') if key.lower() == 'hostname': hostname = value shortNames = False if key.lower() == 'uuid': uuid = value if key.lower() == 'state': state = value peers[hostname] = [uuid, state] elif row.lower() == 'other names:': shortNames = True elif row != '' and shortNames is True: peers[row] = [uuid, state] elif row == '': shortNames = False return peers def get_volumes(): out = run_gluster([ 'volume', 'info' ]) volumes = {} volume = {} for row in out.split('\n'): if ': ' in row: key, value = row.split(': ') if key.lower() == 'volume name': volume['name'] = value volume['options'] = {} volume['quota'] = False if key.lower() == 'volume id': volume['id'] = value if key.lower() == 'status': volume['status'] = value if key.lower() == 'transport-type': volume['transport'] = value if value.lower().endswith(' (arbiter)'): if not 'arbiters' in volume: volume['arbiters'] = [] value = value[:-10] volume['arbiters'].append(value) if key.lower() != 'bricks' and key.lower()[:5] == 'brick': if not 'bricks' in volume: volume['bricks'] = [] volume['bricks'].append(value) # Volume options if '.' in key: if not 'options' in volume: volume['options'] = {} volume['options'][key] = value if key == 'features.quota' and value == 'on': volume['quota'] = True else: if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:': if len(volume) > 0: volumes[volume['name']] = volume volume = {} return volumes def get_quotas(name, nofail): quotas = {} if nofail: out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ]) if not out: return quotas else: out = run_gluster([ 'volume', 'quota', name, 'list' ]) for row in out.split('\n'): if row[:1] == '/': q = re.split('\s+', row) quotas[q[0]] = q[1] return quotas def wait_for_peer(host): for x in range(0, 4): peers = get_peers() if host in peers and peers[host][1].lower().find('peer in cluster') != -1: return True time.sleep(1) return False def probe(host, myhostname): global module out = run_gluster([ 'peer', 'probe', host ]) if out.find('localhost') == -1 and not wait_for_peer(host): module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname)) def probe_all_peers(hosts, peers, myhostname): for host in hosts: host = host.strip() # Clean up any extra space for exact comparison if host not in peers: probe(host, myhostname) def create_volume(name, stripe, replica, arbiter, disperse, redundancy, transport, hosts, bricks, force): args = [ 'volume', 'create' ] args.append(name) if stripe: args.append('stripe') args.append(str(stripe)) if replica: args.append('replica') args.append(str(replica)) if arbiter: args.append('arbiter') args.append(str(arbiter)) if disperse: args.append('disperse') args.append(str(disperse)) if redundancy: args.append('redundancy') args.append(str(redundancy)) args.append('transport') args.append(transport) for brick in bricks: for host in hosts: args.append(('%s:%s' % (host, brick))) if force: args.append('force') run_gluster(args) def start_volume(name): run_gluster([ 'volume', 'start', name ]) def stop_volume(name): run_gluster([ 'volume', 'stop', name ]) def set_volume_option(name, option, parameter): run_gluster([ 'volume', 'set', name, option, parameter ]) def add_bricks(name, new_bricks, stripe, replica, force): args = [ 'volume', 'add-brick', name ] if stripe: args.append('stripe') args.append(str(stripe)) if replica: args.append('replica') args.append(str(replica)) args.extend(new_bricks) if force: args.append('force') run_gluster(args) def do_rebalance(name): run_gluster([ 'volume', 'rebalance', name, 'start' ]) def enable_quota(name): run_gluster([ 'volume', 'quota', name, 'enable' ]) def set_quota(name, directory, value): run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ]) def main(): ### MAIN ### global module module = AnsibleModule( argument_spec=dict( name=dict(required=True, aliases=['volume']), state=dict(required=True, choices=['present', 'absent', 'started', 'stopped']), cluster=dict(default=None, type='list'), host=dict(default=None), stripes=dict(default=None, type='int'), replicas=dict(default=None, type='int'), arbiters=dict(default=None, type='int'), disperses=dict(default=None, type='int'), redundancies=dict(default=None, type='int'), transport=dict(default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']), bricks=dict(default=None, aliases=['brick']), start_on_create=dict(default=True, type='bool'), rebalance=dict(default=False, type='bool'), options=dict(default={}, type='dict'), quota=dict(), directory=dict(default=None), force=dict(default=False, type='bool'), ) ) global glusterbin glusterbin = module.get_bin_path('gluster', True) changed = False action = module.params['state'] volume_name = module.params['name'] cluster= module.params['cluster'] brick_paths = module.params['bricks'] stripes = module.params['stripes'] replicas = module.params['replicas'] arbiters = module.params['arbiters'] disperses = module.params['disperses'] redundancies = module.params['redundancies'] transport = module.params['transport'] myhostname = module.params['host'] start_on_create = module.boolean(module.params['start_on_create']) rebalance = module.boolean(module.params['rebalance']) force = module.boolean(module.params['force']) if not myhostname: myhostname = socket.gethostname() # Clean up if last element is empty. Consider that yml can look like this: # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}" if cluster is not None and len(cluster) > 1 and cluster[-1] == '': cluster = cluster[0:-1] if cluster is None or cluster[0] == '': cluster = [myhostname] if brick_paths is not None and "," in brick_paths: brick_paths = brick_paths.split(",") else: brick_paths = [brick_paths] options = module.params['options'] quota = module.params['quota'] directory = module.params['directory'] # get current state info peers = get_peers() volumes = get_volumes() quotas = {} if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started': quotas = get_quotas(volume_name, True) # do the work! if action == 'absent': if volume_name in volumes: if volumes[volume_name]['status'].lower() != 'stopped': stop_volume(volume_name) run_gluster([ 'volume', 'delete', volume_name ]) changed = True if action == 'present': probe_all_peers(cluster, peers, myhostname) # create if it doesn't exist if volume_name not in volumes: create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force) volumes = get_volumes() changed = True if volume_name in volumes: if volumes[volume_name]['status'].lower() != 'started' and start_on_create: start_volume(volume_name) changed = True # switch bricks new_bricks = [] removed_bricks = [] all_bricks = [] for node in cluster: for brick_path in brick_paths: brick = '%s:%s' % (node, brick_path) all_bricks.append(brick) if brick not in volumes[volume_name]['bricks']: new_bricks.append(brick) # this module does not yet remove bricks, but we check those anyways for brick in volumes[volume_name]['bricks']: if brick not in all_bricks: removed_bricks.append(brick) if new_bricks: add_bricks(volume_name, new_bricks, stripes, replicas, force) changed = True # handle quotas if quota: if not volumes[volume_name]['quota']: enable_quota(volume_name) quotas = get_quotas(volume_name, False) if directory not in quotas or quotas[directory] != quota: set_quota(volume_name, directory, quota) changed = True # set options for option in options.keys(): if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]: set_volume_option(volume_name, option, options[option]) changed = True else: module.fail_json(msg='failed to create volume %s' % volume_name) if action != 'delete' and volume_name not in volumes: module.fail_json(msg='volume not found %s' % volume_name) if action == 'started': if volumes[volume_name]['status'].lower() != 'started': start_volume(volume_name) changed = True if action == 'stopped': if volumes[volume_name]['status'].lower() != 'stopped': stop_volume(volume_name) changed = True if changed: volumes = get_volumes() if rebalance: do_rebalance(volume_name) facts = {} facts['glusterfs'] = { 'peers': peers, 'volumes': volumes, 'quotas': quotas } module.exit_json(changed=changed, ansible_facts=facts) if __name__ == '__main__': main()
gpl-3.0
Patola/Cura
plugins/PostProcessingPlugin/scripts/BQ_PauseAtHeight.py
5
1805
from ..Script import Script class BQ_PauseAtHeight(Script): def __init__(self): super().__init__() def getSettingDataString(self): return """{ "name":"Pause at height (BQ Printers)", "key": "BQ_PauseAtHeight", "metadata":{}, "version": 2, "settings": { "pause_height": { "label": "Pause height", "description": "At what height should the pause occur", "unit": "mm", "type": "float", "default_value": 5.0 } } }""" def execute(self, data): x = 0. y = 0. current_z = 0. pause_z = self.getSettingValueByKey("pause_height") for layer in data: lines = layer.split("\n") for line in lines: if self.getValue(line, 'G') == 1 or self.getValue(line, 'G') == 0: current_z = self.getValue(line, 'Z') if current_z != None: if current_z >= pause_z: prepend_gcode = ";TYPE:CUSTOM\n" prepend_gcode += "; -- Pause at height (%.2f mm) --\n" % pause_z # Insert Pause gcode prepend_gcode += "M25 ; Pauses the print and waits for the user to resume it\n" index = data.index(layer) layer = prepend_gcode + layer data[index] = layer # Override the data of this layer with the modified data return data break return data
lgpl-3.0
EliotBerriot/1flow
oneflow/landing/admin.py
2
2668
# -*- coding: utf-8 -*- """ Copyright 2013-2014 Olivier Cortès <oc@1flow.io> This file is part of the 1flow project. 1flow is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. 1flow is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with 1flow. If not, see http://www.gnu.org/licenses/ """ from django.conf import settings from django.utils.translation import ugettext_lazy as _ from django.contrib import admin as django_admin import mongoadmin as admin from sparks.django.admin import languages, truncate_field from .models import LandingContent, LandingUser from ..base.admin import CSVAdminMixin class LandingUserAdmin(CSVAdminMixin): list_display = ('id', 'email', 'register_language_display', 'date_joined', ) list_display_links = ('id', 'email', ) list_filter = ('email_announcements', ) ordering = ('-date_joined', ) date_hierarchy = 'date_joined' search_fields = ('email', ) change_list_template = "admin/change_list_filter_sidebar.html" change_list_filter_template = "admin/filter_listing.html" def register_language_display(self, obj): return obj.register_data.get('language', u'—').split(',', 1)[0] register_language_display.short_description = _(u'language') admin.site.register(LandingUser, LandingUserAdmin) if settings.FULL_ADMIN: content_fields_names = tuple(('content_' + code) for code, lang in languages) content_fields_displays = tuple((field + '_display') for field in content_fields_names) class LandingContentAdmin(django_admin.ModelAdmin): list_display = ('name', ) + content_fields_displays search_fields = ('name', ) + content_fields_names ordering = ('name', ) #change_list_template = "admin/change_list_filter_sidebar.html" #change_list_filter_template = "admin/filter_listing.html" for attr, attr_name in zip(content_fields_names, content_fields_displays): setattr(LandingContentAdmin, attr_name, truncate_field(LandingContent, attr)) admin.site.register(LandingContent, LandingContentAdmin)
agpl-3.0
mbartling/TAMU_senior_design
Python/get_xbee_servo.py
1
4195
#! /usr/bin/env python import serial import sys import os import MySQLdb from subprocess import call from datetime import date FORCE_WRITE = 0 HORIZONTAL = 0 VERTICAL = 90 today = date.today() try: address_array = [] # open data base db = MySQLdb.connect(host="localhost", user="", passwd="team05", db="xbee_teensy") cur = db.cursor() cur.execute("select version()") data = cur.fetchone() print "Database version: ", data cur.execute("truncate table raw_data") # open serial port xbee = serial.Serial() xbee.baudrate = 57600 if len(sys.argv) > 1: xbee.port = sys.argv[1] else: xbee.port = '/dev/ttyACM0' if xbee.isOpen(): xbee.close() xbee.open() print xbee xbee.write("?") if xbee.isOpen: for line in xbee: line = line.strip() packet = line.split() print line; if len(packet) > 1 and packet[0] == '7E': if len(packet) < 26 or int(packet[11], 16) != 0x64: print "Packet len is: " + "{0}".format(len(packet)) continue; # calling system command for timestamp p = os.popen('date "+%F %T"') timestamp = p.readline() p.close() timestamp = timestamp.rstrip('\n') timestamp = timestamp.rstrip('\0') print "Time is: " + timestamp # parse address addressH = packet[8:11] addressH.append(packet[14]) # convert to dec, then string addressString = '' for item in addressH: x = int(item, 16) addressString += str(x) + '.' addressString = addressString[:-1] print "Initial Address: " + addressString # parse rssi rssi = int(packet[15], 16) print "RSSI = ", rssi # parse survo position servoPos = int(packet[16], 16) print "servoPos =", servoPos # parse gps latArray = packet[17:21] latHex = ''.join(latArray) print latHex if latHex == '0000': lat = 0 else: lat = int(latHex, 16) lonArray = packet [21:25] lonHex = ''.join(lonArray) print lonHex if lonHex == '0000': lon = 0; else: lon = int(lonHex, 16) lon = lon ^ 0xFFFFFFFF lon += 1 lon *= -1 print lat, lon if FORCE_WRITE: cmd = "insert into raw_data values(\"%s\",\"%s\", %d, %d, %d, %d)" %(timestamp, addressString, servoPos, rssi, lat, lon) print cmd cur.execute(cmd) db.commit() print "new row added to mysql" if not addressString in address_array: print "Adding address string: " + addressString address_array.append(addressString) else: if lon > -970000000 and lon < -960000000 and lat > 306000000 and lat < 307000000: cmd = "insert into raw_data values(\"%s\",\"%s\", %d, %d, %d, %d)" %(timestamp, addressString, rssi, servoPos, lat, lon) print cmd cur.execute(cmd) db.commit() print "new row added to mysql" if not addressString in address_array: print "Adding address string: " + addressString address_array.append(addressString) print "Closing Xbee Port" finally: print "output data to file" # os.popen('rm -f /home/walter/Code/rawData/*.txt') # os.popen('rm -f /tmp/raw101.txt') for address in address_array: # write horizontal address_split = address.split('.'); filename = '/tmp/raw' + address_split[3] + 'horiz.txt' os.popen('rm ' + filename) print filename cmd = "select row, col, rssi from raw_data where address = \'%s\' and servoPos = %d into outfile \'%s\' fields terminated by ','" %(address, HORIZONTAL, filename) print cmd cur.execute(cmd) cmd = 'cp ' + filename + ' /home/walter/Code/rawData/raw' + address_split[3] + today.strftime("-%y-%m-%d") + 'horiz.out' print cmd os.popen(cmd) filename = '/tmp/raw' + address_split[3] + 'vert.txt' os.popen('rm ' + filename) print filename cmd = "select row, col, rssi from raw_data where address = \'%s\' and servoPos = %d into outfile \'%s\' fields terminated by ','" %(address, VERTICAL, filename) print cmd cur.execute(cmd) cmd = 'cp ' + filename + ' /home/walter/Code/rawData/raw' + address_split[3] + today.strftime("-%y-%m-%d") + 'vert.out' print cmd os.popen(cmd) print "closing xbee port and database" db.close() xbee.close()
mit
Syncleus/apex
src/apex/kiss/kiss_serial.py
1
3486
#!/usr/bin/env python # -*- coding: utf-8 -*- """KISS Core Classes.""" # These imports are for python3 compatibility inside python2 from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import serial import six from apex.kiss import constants as kiss_constants from .kiss import Kiss __author__ = 'Jeffrey Phillips Freeman (WI2ARD)' __maintainer__ = 'Jeffrey Phillips Freeman (WI2ARD)' __email__ = 'jeffrey.freeman@syncleus.com' __license__ = 'Apache License, Version 2.0' __copyright__ = 'Copyright 2016, Syncleus, Inc. and contributors' __credits__ = [] class KissSerial(Kiss): """KISS Serial Object Class.""" logger = logging.getLogger(__name__) logger.setLevel(kiss_constants.LOG_LEVEL) console_handler = logging.StreamHandler() console_handler.setLevel(kiss_constants.LOG_LEVEL) formatter = logging.Formatter(kiss_constants.LOG_FORMAT) console_handler.setFormatter(formatter) logger.addHandler(console_handler) logger.propagate = False def __init__(self, strip_df_start=True, com_port=None, baud=38400, parity=serial.PARITY_NONE, stop_bits=serial.STOPBITS_ONE, byte_size=serial.EIGHTBITS): super(KissSerial, self).__init__(strip_df_start) self.com_port = com_port self.baud = baud self.parity = parity self.stop_bits = stop_bits self.byte_size = byte_size self.serial = None self.logger.info('Using interface_mode=Serial') def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.serial.close() def __del__(self): if self.serial and self.serial.isOpen(): self.serial.close() def _read_interface(self): read_data = self.serial.read(kiss_constants.READ_BYTES) waiting_data = self.serial.inWaiting() if waiting_data: read_data += self.serial.read(waiting_data) return [ord(c) if six.PY2 else c for c in read_data] def _write_interface(self, data): self.serial.write(data) def connect(self, mode_init=None, **kwargs): """ Initializes the KISS device and commits configuration. See http://en.wikipedia.org/wiki/KISS_(TNC)#Command_codes for configuration names. :param **kwargs: name/value pairs to use as initial config values. """ self.logger.debug('kwargs=%s', kwargs) self.serial = serial.Serial(port=self.com_port, baudrate=self.baud, parity=self.parity, stopbits=self.stop_bits, bytesize=self.byte_size) self.serial.timeout = kiss_constants.SERIAL_TIMEOUT if mode_init is not None: self.serial.write(mode_init) self.exit_kiss = True else: self.exit_kiss = False # Previous verious defaulted to Xastir-friendly configs. Unfortunately # those don't work with Bluetooth TNCs, so we're reverting to None. if kwargs: for name, value in kwargs.items(): super(KissSerial, self)._write_setting(name, value) def close(self): super(KissSerial, self).close() if not self.serial: raise RuntimeError('Attempting to close before the class has been started.') elif self.serial.isOpen(): self.serial.close()
apache-2.0
khertan/gedit_flake8
gedit_flake8/__init__.py
1
14861
#!/usr/bin/env python # -*- coding: utf-8 -*- """gedit-flake8 : A plugin for gedit to display error and warning from flake8.""" __author__ = "Benoît HERVIER" __copyright__ = "Copyright 2012 " + __author__ __license__ = "GPLv3" __version__ = "0.7.0" __maintainer__ = "Benoît HERVIER" __email__ = "khertan@khertan.net" __status__ = "Beta" try: from gi.repository import GObject, Gtk, Gedit, Pango except ImportError as err: print('GEdit-Flake8 need to be launched by GEdit 3') print(err) import re from subprocess import Popen, PIPE, call import threading GObject.threads_init() def _remove_tags(document, errors_tag): """Remove not anymore used tags""" if errors_tag: start, end = document.get_bounds() document.remove_tag(errors_tag, start, end) def apply_style(style, tag): """Apply a style to a tag from the default theme style This lightly modified code come from the synctext.py gedit plugin""" def apply_style_prop(tag, style, prop): if style.get_property(prop + "-set"): tag.set_property(prop, style.get_property(prop)) else: tag.set_property(prop, None) def apply_style_prop_bool(tag, style, prop, whentrue, whenfalse): if style.get_property(prop + "-set"): prop_value = whentrue if style.get_property(prop) else whenfalse tag.set_property(prop, prop_value) apply_style_prop(tag, style, "foreground") apply_style_prop(tag, style, "background") try: apply_style_prop_bool(tag, style, "weight", Pango.Weight.BOLD, Pango.Weight.NORMAL) except TypeError as err: # Different version of gtk 3 have different properties ... :( print(err) apply_style_prop_bool(tag, style, "italic", Pango.Style.ITALIC, Pango.Style.NORMAL) apply_style_prop_bool(tag, style, "underline", Pango.Underline.SINGLE, Pango.Underline.NONE) apply_style_prop(tag, style, "strikethrough") class _IdleObject(GObject.Object): """ Override gobject.GObject to always emit signals in the main thread by emmitting on an idle handler """ def __init__(self): GObject.Object.__init__(self) def emit(self, *args): GObject.idle_add(GObject.Object.emit, self, *args) class Message(object): def __init__(self, document, lineno, column, message): self._doc = document self._lineno = lineno self._column = column self._message = message self._start_iter = None self._end_iter = None self._stock_id = self._get_stock_id(message) def _get_stock_id(self, message): if message.startswith('E'): return Gtk.STOCK_DIALOG_ERROR elif message.startswith('W'): return Gtk.STOCK_DIALOG_WARNING elif message.startswith('C'): return Gtk.STOCK_DIALOG_INFO else: return Gtk.STOCK_DIALOG_INFO def setWordBounds(self, start, end): self._start_iter = start self._end_iter = end doc = property(lambda self: self.__doc) lineno = property(lambda self: self._lineno) column = property(lambda self: self._lineno) message = property(lambda self: self._message) start = property(lambda self: self._start_iter) end = property(lambda self: self._end_iter) stock_id = property(lambda self: self._stock_id) class ResultsModel(Gtk.ListStore): def __init__(self): super(ResultsModel, self).__init__(int, int, str) def add(self, msg): self.append([msg.lineno, msg.column, msg.message]) class ResultsView(Gtk.TreeView): def __init__(self, panel): super(ResultsView, self).__init__() self._panel = panel linha = Gtk.TreeViewColumn("Line") linha_cell = Gtk.CellRendererText() linha.pack_start(linha_cell, True) linha.add_attribute(linha_cell, 'text', 0) linha.set_sort_column_id(0) self.append_column(linha) msgtype = Gtk.TreeViewColumn("Column") msgtype_cell = Gtk.CellRendererText() msgtype.pack_start(msgtype_cell, True) msgtype.add_attribute(msgtype_cell, 'text', 1) msgtype.set_sort_column_id(1) self.append_column(msgtype) msg = Gtk.TreeViewColumn("Message") msg_cell = Gtk.CellRendererText() msg.pack_start(msg_cell, True) msg.add_attribute(msg_cell, 'text', 2) msg.set_sort_column_id(2) self.append_column(msg) self.connect("row-activated", self._row_activated_cb) def _row_activated_cb(self, view, row, column): model = view.get_model() iter = model.get_iter(row) window = self._panel.get_window() document = window.get_active_document() line = model.get_value(iter, 0) - 1 document.goto_line(line) view = window.get_active_view() text_iter = document.get_iter_at_line(line) view.scroll_to_iter(text_iter, 0.25, False, 0.5, 0.5) view.grab_focus() class ResultsPanel(Gtk.ScrolledWindow): def __init__(self, window): super(ResultsPanel, self).__init__() self.window = window self.view = ResultsView(self) self.add(self.view) self.view.show() def set_model(self, model): self.view.set_model(model) def get_window(self): return self.window class Worker(threading.Thread, _IdleObject): __gsignals__ = { "completed": ( GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, []), } def __init__(self, document, errors_tag): self.document = document threading.Thread.__init__(self) _IdleObject.__init__(self) if errors_tag is None: self._add_tags(document) else: self._errors_tag = errors_tag self._results = [] self._errors = [] self.cancelled = False def _add_tags(self, document): """Register new tags in the sourcebuffer""" style = document.get_style_scheme().get_style('def:error') self._errors_tag = \ document.create_tag("flake8-error", underline=Pango.Underline.ERROR) apply_style(style, self._errors_tag) def _highlight_errors(self, errors): """Colorize error in the sourcebuffer""" document = self.document for err in errors: start = document.get_iter_at_line(err.lineno - 1) end = document.get_iter_at_line(err.lineno - 1) end.forward_to_line_end() # apply tag to entire line document.apply_tag(self._errors_tag, start, end) def _flake8_bin(self): """Returns a flake8 valid executable flake8 is the default executable, but in Debian systems, for example, package pyflakes provides a pyflakes binary instead of flake8 """ # list of flake binaries flake8_binaries = ('flake8', 'pyflakes') def cmd_exists(cmd): return call("type " + cmd, shell=True, stdout=PIPE, stderr=PIPE) == 0 for flake8 in flake8_binaries: if cmd_exists(flake8): return flake8 # default return "flake8" def run(self): errors = [] location = self.document.get_location() _remove_tags(self.document, self._errors_tag) if location is None: print('Location not found ...') return path = location.get_path() if path is None: import codecs try: encoding = self.document.get_encoding().get_charset() except Exception as err: encoding = 'utf-8' path = '/tmp/gedit_flake8.py' start, end = self.document.get_bounds() with codecs.open(path, 'w', encoding=encoding) as fh: fh.write(str( self.document.get_text(start, end, include_hidden_chars=True), encoding)) stdout, stderr = Popen([self._flake8_bin(), path], stdout=PIPE, stderr=PIPE).communicate() output = stdout if stdout else stderr line_format = re.compile( '(?P<path>[^:]+):(?P<line>\d+):' + '(?P<character>\d+:)?\s(?P<message>.*$)') self._results = ResultsModel() if not output: if not self.cancelled: self.emit("completed") return for line in output.splitlines(): m = line_format.match(line.decode('utf-8')) if not m: continue groups = m.groupdict() if groups['character']: err = Message(self.document, int(groups['line']), int(groups['character'].strip(':')), groups['message'],) else: err = Message(self.document, int(groups['line']), 0, groups['message'],) errors.append(err) self._results.add(err) _remove_tags(self.document, self._errors_tag) self._errors = errors self._highlight_errors(self._errors) if not self.cancelled: self.emit("completed") class Flake8Plugin(GObject.Object, Gedit.WindowActivatable): __gtype_name__ = "Flake8" window = GObject.property(type=Gedit.Window) documents = [] _errors_tag = {} _results = {} _errors = {} _worker = None def __init__(self): GObject.Object.__init__(self) def do_activate(self): # self._insert_panel() self._panel = ResultsPanel(self.window) self._panel.show() bottom = self.window.get_bottom_panel() bottom.add_titled(self._panel, "ResultsPanel", "Flake8 Results") self.window.connect("tab-added", self.on_tab_added) self.window.connect("tab-removed", self.on_tab_removed) self.window.connect("active-tab-changed", self.on_active_tab_changed) def do_deactivate(self): # self._remove_panel() pass def on_notify_style_scheme(self, document, param_object): style = document.get_style_scheme().get_style('def:error') apply_style(style, self._errors_tag[document]) def _insert_panel(self): """Insert bottom GEdit panel""" self._panel = ResultsPanel(self.window) image = Gtk.Image() image.set_from_icon_name('gnome-mime-text-x-python', Gtk.IconSize.MENU) bottom_panel = self.window.get_bottom_panel() bottom_panel.add_item(self._panel, 'ResultsPanel', 'Flake8 Results', image) def display_error_msg(self, document): """Display a statusbar message if the current line have errors""" if document is None: return True try: if document.get_language().get_name() != 'Python': return True except AttributeError as err: return True curline = document.get_iter_at_mark( document.get_insert()).get_line() + 1 for err in self._errors[document]: if err.lineno == curline: statusbar = self.window.get_statusbar() statusbar_ctxtid = statusbar.get_context_id('Flake8') statusbar.push(statusbar_ctxtid, 'Line : %s : %s' % (err.lineno, err.message)) return True return False def _remove_panel(self): """Remove the inserted panel from GEdit""" bottom_panel = self.window.get_bottom_panel() bottom_panel.remove_item(self._panel) def on_active_tab_changed(self, window, tab): self._panel.set_model(self._results[tab.get_document()]) def on_tab_added(self, window, tab): """Initialize the required vars""" document = tab.get_document() self._results[document] = ResultsModel() self._errors[document] = [] self._errors_tag[document] = None document.connect('loaded', self.analyse) document.connect('saved', self.analyse) document.connect('cursor-moved', self.display_error_msg) def on_tab_removed(self, window, tab): """Cleaning results not needed anymore""" document = tab.get_document() if document in self._results: self._results[document] = None del self._results[document] self._errors[document] = None del self._errors[document] _remove_tags(document, self._errors_tag[document]) def completedCb(self, *userData): errors = self._worker._errors document = self._worker.document self._errors[document] = errors self._results[document] = self._worker._results self._errors_tag[document] = self._worker._errors_tag if len(errors) > 0: if not self.display_error_msg(document): statusbar = self.window.get_statusbar() statusbar_ctxtid = statusbar.get_context_id('Flake8') statusbar.push(statusbar_ctxtid, 'Line : %s : %s' % (errors[0].lineno, errors[0].message)) else: statusbar = self.window.get_statusbar() statusbar_ctxtid = statusbar.get_context_id('Flake8') statusbar.push(statusbar_ctxtid, "No errors found") try: self._panel.set_model(self._results[document]) except: pass self._worker = None def analyse(self, doc): """Launch a process and populate vars""" document = self.window.get_active_document() if document is None: return True try: if document.get_language().get_name() != 'Python': return True except AttributeError: return True if self._worker is not None: self._worker.cancelled = True self._worker = Worker(document, self._errors_tag[document]) self._worker.connect("completed", self.completedCb) self._worker.start()
gpl-3.0
koobonil/Boss2D
Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/framework/function.py
14
36606
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Python front-end supports for functions. NOTE: functions are currently experimental and subject to change! """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import hashlib import re from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import function_pb2 from tensorflow.core.framework import op_def_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import op_def_registry from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.util import compat from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_inspect def _make_argname_from_tensor_name(name): return re.sub(":0$", "", name).replace(":", "_o") def _tensor_to_argdef(t, name=None, used_names=None): """Convert tensor t to an argdef, with a specified name or a unique name.""" arg = op_def_pb2.OpDef.ArgDef() if name is None: arg.name = _make_argname_from_tensor_name(t.name) if used_names is not None: if arg.name in used_names: i = 0 while True: new_name = "%s_U%d" % (arg.name, i) if new_name not in used_names: arg.name = new_name break i += 1 used_names.add(arg.name) else: arg.name = name arg.type = t.dtype.as_datatype_enum return arg def _get_node_def(op): return op._node_def # pylint: disable=protected-access def _get_op_def(op): return op.op_def or op_def_registry.get_registered_ops()[op.type] def _is_in_placeholders(op, func_arg_placeholders): return op.values() and (op.values()[0].name in func_arg_placeholders) def _create_input_dict(function_graph, func_arg_placeholders): """Create a mapping from graph tensor names to function tensor names.""" input_dict = {} for op in function_graph.get_operations(): if _is_in_placeholders(op, func_arg_placeholders): input_dict[op.values()[0].name] = op.values()[0].name input_dict[op.name] = op.name else: op_def = _get_op_def(op) attrs = _get_node_def(op).attr o = 0 for arg_def in op_def.output_arg: if arg_def.number_attr: num = attrs[arg_def.number_attr].i elif arg_def.type_list_attr: num = len(attrs[arg_def.type_list_attr].list.type) else: num = 1 for i in range(num): result = "%s:%s:%d" % (op.name, arg_def.name, i) input_dict[op.values()[o].name] = result if o == 0: input_dict[op.name] = result o += 1 return input_dict def _add_op_node(op, func, input_dict): """Converts an op to a function def node and add it to `func`.""" # Add an entry in func.node_def # Note that extend() makes a copy in this case, see: # https://developers.google.com/protocol-buffers/docs/reference/python-generated#repeated-message-fields func.node_def.extend([_get_node_def(op)]) node_def = func.node_def[-1] for i in range(len(node_def.input)): if not node_def.input[i].startswith("^"): assert node_def.input[i] in input_dict, ("%s missing from %s" % (node_def.input[i], input_dict.items())) node_def.input[i] = input_dict[node_def.input[i]] def _graph_to_function_def(graph, operations, inputs, outputs, out_names=None): """Returns `graph` as a `FunctionDef` protocol buffer. This method creates a [`FunctionDef`]( https://www.tensorflow.org/code/tensorflow/core/framework/function.proto) protocol buffer that contains all the ops in `operations`. The operations become the body of the function. The arguments `inputs` and `outputs` will be listed as the inputs and outputs tensors of the function. They must be lists of tensors present in the graph. The lists can optionally be empty. Args: graph: Graph. operations: the operations to put in the function. Must be a subset of the operations in the graph. inputs: List of tensors. Inputs to the function. outputs: List of tensors. Outputs of the function. out_names: Optional list of string names for the outputs. Returns: A FunctionDef protocol buffer. Raises: ValueError: if out_names is specified and the wrong length. """ func = function_pb2.FunctionDef() func.signature.name = "_" used_names = set() func.signature.input_arg.extend( [_tensor_to_argdef(i, used_names=used_names) for i in inputs]) if out_names is None: used_names = set() func.signature.output_arg.extend( [_tensor_to_argdef(o, used_names=used_names) for o in outputs]) elif len(outputs) != len(out_names): raise ValueError( "Length of out_names (%d) does not match number of outputs (%d): %s" % (len(out_names), len(outputs), ", ".join(out_names))) elif len(out_names) != len(set(out_names)): raise ValueError( "Must not have duplicates in out_names: %s" % ", ".join(out_names)) else: func.signature.output_arg.extend( [_tensor_to_argdef(o, name=n) for o, n in zip(outputs, out_names)]) func_arg_placeholders = set([i.name for i in inputs]) input_dict = _create_input_dict(graph, func_arg_placeholders) for op in operations: if _is_in_placeholders(op, func_arg_placeholders): continue _add_op_node(op, func, input_dict) if out_names is None: for index, o in enumerate(outputs): k = func.signature.output_arg[index].name func.ret[k] = input_dict[o.name] else: for o, n in zip(outputs, out_names): func.ret[n] = input_dict[o.name] return func def _parse_kwargs_as_attrs(func_name, **kwargs): """Parses **kwargs into a node's attributes.""" attrs = {} noinline = kwargs.pop("noinline", None) if noinline is not None: attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline)) compiled = kwargs.pop("compiled", None) separate_compiled_gradients = kwargs.pop("separate_compiled_gradients", None) if compiled is not None: attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled)) attrs["_XlaSeparateCompiledGradients"] = attr_value_pb2.AttrValue( b=bool(separate_compiled_gradients)) attrs["_XlaScope"] = attr_value_pb2.AttrValue( s=("function_%s" % func_name).encode()) if kwargs: raise ValueError("Unknown keyword arguments: %s" % kwargs.keys()) return attrs def _call(sig, *inputs, **kwargs): """Adds a node calling a function. This adds a `call` op to the default graph that calls the function of signature `sig`, passing the tensors in `inputs` as arguments. It returns the outputs of the call, which are one or more tensors. `sig` is OpDefArg.a `_DefinedFunction` object. You can pass an optional keyword parameter `name=string` to name the added operation. You can pass an optional keyword parameter `noinline=True|False` to instruct the runtime not to inline the function body into the call site. Args: sig: OpDefArg. The signature of the function. *inputs: arguments to the function. **kwargs: Optional keyword arguments. Can only contain 'name' or 'noinline'. Returns: A 2-element tuple. First element: a Tensor if the function returns a single value; a list of Tensors if the function returns multiple value; the Operation if the function returns no values. Second element: the Operation. Raises: ValueError: if the arguments are invalid. """ if len(inputs) != len(sig.input_arg): raise ValueError("Expected number of arguments: %d, received: %d" % (len(sig.input_arg), len(inputs))) name = kwargs.pop("name", None) g = ops.get_default_graph() func_name = sig.name attrs = _parse_kwargs_as_attrs(func_name, **kwargs) output_types = [dtypes.DType(x.type) for x in sig.output_arg] with ops.name_scope(name, func_name, inputs) as name: op = g.create_op( func_name, list(inputs), output_types, name=name, attrs=attrs, op_def=sig, compute_shapes=False) if op.outputs: if len(op.outputs) == 1: ret = op.outputs[0] else: ret = tuple(op.outputs) else: ret = op return ret, op def _get_func_name(func): _, func = tf_decorator.unwrap(func) if callable(func): if tf_inspect.isfunction(func): return func.__name__ elif tf_inspect.ismethod(func): return "%s.%s" % (func.__self__.__name__, func.__name__) else: # Probably a class instance with __call__ return type(func) else: raise ValueError("Argument must be callable") class _FuncGraph(ops.Graph): """A helper for construction a function. _FuncGraph overrides ops.Graph's create_op() so that we can keep track of every inputs into every op created inside the function. If any input is from other graphs, we keep track of it in self.capture and substitue the input with a place holder. Each captured input's corresponding place holder is converted into a function argument and the caller passes in the captured tensor. """ def __init__(self, *args, **kwargs): super(_FuncGraph, self).__init__(*args, **kwargs) self._building_function = True self._outer_graph = ops.get_default_graph() self._vscope = vs.get_variable_scope() self._old_custom_getter = self._vscope.custom_getter self._captured = {} self.extra_inputs = [] self.extra_args = [] self.extra_vars = [] def getvar( self, getter, name, shape=None, dtype=None, initializer=None, reuse=None, trainable=True, collections=None, # pylint: disable=redefined-outer-name use_resource=None, **kwargs): """A custom variable getter.""" # Here, we switch the default graph to the outer graph and ask the # variable scope in which the function is defined to give us the # variable. The variable is stashed in extra_vars and returned to # the caller. # # We capture these variables so that the variable definition is # hoisted upward to the outer most graph. with self._outer_graph.as_default(): # pylint: disable=protected-access var = self._vscope.get_variable( vs._get_default_variable_store(), name, shape=shape, dtype=dtype, initializer=initializer, reuse=reuse, trainable=trainable, collections=collections, use_resource=use_resource) self.extra_vars.append(var) if isinstance(var, resource_variable_ops.ResourceVariable): # For resource-based variables read the variable outside the function # and pass in the value. This ensures that the function is pure and # differentiable. TODO(apassos) this may have performance problems if # the function will only do embedding lookups on the variable. return var.value() return var def create_op(self, op_type, inputs, data_types, **kwargs): for i, x in enumerate(inputs): if x.graph is not self: # Referring to a tensor from other graph. if x in self._captured: # Captured already. inputs[i] = self._captured[x] else: # Substitute with a placeholder. self.extra_inputs.append(x) ph = array_ops.placeholder(x.dtype, shape=x.get_shape()) # pylint: disable=protected-access ph._handle_shape = x._handle_shape ph._handle_dtype = x._handle_dtype # pylint: enable=protected-access inputs[i] = ph self._captured[x] = ph self.extra_args.append(ph) return super(_FuncGraph, self).create_op(op_type, inputs, data_types, **kwargs) def get_extra_vars(): """Returns the captured variables by the function. Returns: If the default graph is being used to define a function, the returned list of variables are those created inside the function body so far. Otherwise, returns an empty list. """ g = ops.get_default_graph() if isinstance(g, _FuncGraph): return g.extra_vars else: return [] def get_extra_inputs(): """Returns the captured input tensors by the function. Returns: If the default graph is being used to define a function, the returned list of tensors are those accessed inside the function body but defined outside the function body so far. Otherwise, returns an empty list. """ g = ops.get_default_graph() if isinstance(g, _FuncGraph): return g.extra_inputs else: return [] def get_extra_args(): """Returns the corresponding function arguments for the captured inputs. Returns: If the default graph is being used to define a function, the returned list of place holders are those used inside the function body corresponding those returned by get_extra_inputs(). Otherwise, returns an empty list. """ g = ops.get_default_graph() if isinstance(g, _FuncGraph): return g.extra_args else: return [] class _DefinedFunction(object): """_DefinedFunction encapsulates a function definition and its properties. Attributes: name: The function name. definition: The definition of this function. A FunctionDef proto. grad_func_name: If not None, the name of this function's gradient function. python_grad_func: A python callable implementing the gradient of the function python-side. """ def __init__(self, func, argnames, input_types, func_name=None, grad_func=None, python_grad_func=None, out_names=None, shape_func=None, **kwargs): """Creates _DefinedFunction. Args: func: A python callable which constructs a tf function body. argnames: A list of strings for function argument names. input_types: The function's argument types. Can be a tuple, list of tf data types. func_name: The function name. Defaults to None, in which derives from 'func'. grad_func: This function's gradient function, if not None. Defaults to None. python_grad_func: A python callable implementing the gradient of the function python-side. out_names: An optional list of strings for the function return value names. shape_func: An optional function mapping an op to a list of static output shapes. **kwargs: The keyword arguments. **kwargs is passed to every call site of this function. Raises: ValueError: The function definition is invalid. """ self._func = func self._input_types = input_types self._func_name = func_name self._grad_func = grad_func self._python_grad_func = python_grad_func self._out_names = out_names self._shape_func = shape_func self._extra_kwargs = kwargs self._definition = None # Constructed lazily. self._sub_functions = dict() # Constructed with definition. self._args = [] assert isinstance(input_types, (list, tuple)) for i in range(len(input_types)): argname = argnames[i] if i < len(argnames) else ("arg%d" % i) argtype = input_types[i] self._args.append((argname, argtype)) @property def name(self): """Function name.""" self._create_definition_if_needed() return self._func_name @property def definition(self): """Function definition proto.""" self._create_definition_if_needed() return self._definition def set_grad_func(self, grad_func): """Specifies the gradient function of this function.""" assert not self._grad_func assert isinstance(grad_func, _DefinedFunction) self._grad_func = grad_func @property def grad_func_name(self): """Its gradient function's name.""" return self._grad_func.name if self._grad_func else None @property def python_grad_func(self): """Python gradient function callable.""" return self._python_grad_func @property def declared_input_types(self): """Returns the list of data types of explicit declared inputs.""" return self._input_types @property def captured_inputs(self): """Returns the list of implicitly captured inputs.""" self._create_definition_if_needed() return self._extra_inputs def _create_definition_if_needed(self): """Creates the function definition if it's not created yet.""" if self._definition is not None: return # Create the func_def object. temp_graph = _FuncGraph() with temp_graph.as_default(): # List of placeholders for the function_def. inputs = [] for (argname, argtype) in self._args: argholder = array_ops.placeholder(argtype, name=argname) inputs.append(argholder) # Call func and gather the output tensors. with vs.variable_scope("", custom_getter=temp_graph.getvar): outputs = self._func(*inputs) # If func only returned one value, make it a tuple. if not isinstance(outputs, (list, tuple)): outputs = (outputs,) if any([_ is None for _ in outputs]): raise ValueError("Function can not return None.") # Ensures each output is a Tensor. outputs = [ops.convert_to_tensor(_) for _ in outputs] self._extra_inputs = temp_graph.extra_inputs inputs.extend(temp_graph.extra_args) # pylint: disable=protected-access self._sub_functions = temp_graph._functions # pylint: enable=protected-access # Build the FunctionDef self._definition = _graph_to_function_def( temp_graph, temp_graph.get_operations(), inputs, outputs, out_names=self._out_names) # Extra kwargs are treated as attrs on the function def. sig_pre_func_name = self._func_name or _get_func_name(self._func) kwargs_attr = _parse_kwargs_as_attrs(sig_pre_func_name, **self._extra_kwargs) for k in kwargs_attr: self._definition.attr[k].CopyFrom(kwargs_attr[k]) # Hash the definition and its dependencies. self._hash_str = self._create_hash_str( self._definition.signature.input_arg, self._definition.signature.output_arg, self._definition.node_def) # Finally, we decide the function name to use. If not specified, # make up something which is almost certainly unique (but deterministic). if not self._func_name: self._func_name = "_".join([_get_func_name(self._func), self._hash_str]) self._definition.signature.name = self._func_name if self._func.__doc__: self._definition.signature.description = self._func.__doc__ def _create_hash_str(self, input_arg, output_arg, node_def): """Creates an 8-character string unique to this input. Args: input_arg: the input_arg field of an OpDef (e.g. self._definition.signature.input_arg) output_arg: the output_arg field of an OpDef (e.g. self._definition.signature.output_arg) node_def: the node_def field of a FunctionDef (e.g. self._definition.node_def) Returns: The unique string for this input """ hasher = hashlib.sha1() def update_num(n): hasher.update(compat.as_bytes("%x" % n)) def update_str(s): update_num(len(s)) hasher.update(compat.as_bytes(s)) def update_strs(slist): update_num(len(slist)) for s in slist: update_str(s) for adef in input_arg: update_str(adef.SerializeToString()) for adef in output_arg: update_str(adef.SerializeToString()) for n in sorted(node_def, key=lambda n: n.name): update_str(n.name) update_str(n.op) update_strs(n.input) update_num(len(n.attr)) # NOTE: protobuf map serialization does not guarantee ordering. for k in sorted(n.attr): update_str(k) update_str(n.attr[k].SerializeToString()) return hasher.hexdigest()[:8] def add_to_graph(self, g): """Adds this function into the graph g.""" self._create_definition_if_needed() # pylint: disable=protected-access # If 'g' has an identical function already, do nothing. prev = g._get_function(self.name) if prev and (prev._hash_str == self._hash_str): return # Adds this function into 'g'. g._add_function(self) # pylint: enable=protected-access # Ensures related sub-routines are defined in 'g', too. for f in self._sub_functions.values(): f.add_to_graph(g) # Adds its gradient function, too. if self._grad_func: self._grad_func.add_to_graph(g) def __call__(self, *args, **kwargs): self.add_to_graph(ops.get_default_graph()) args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs ret, op = _call(self._definition.signature, *args, **kwargs) if self._shape_func is not None: shapes = self._shape_func(op) if len(shapes) != len(op.outputs): raise ValueError("shape_func produced %d shapes for %d outputs" % (len(shapes), len(op.outputs))) for (t, shape) in zip(op.outputs, shapes): t.set_shape(shape) return ret def _from_definition(fdef, grad_func=None): """Creates a _DefinedFunction initialized from a FunctionDef proto. Args: fdef: a FunctionDef grad_func: a _DefinedFunction or None Returns: A _DefinedFunction representing fdef """ # The Python callable is only needed to create a FunctionDef. Since we have # the FunctionDef here, we don't need to set _DefinedFunction._func (nor do we # have access to such a callable here). func = None argnames = [arg.name for arg in fdef.signature.input_arg] input_types = tuple( dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg) func_name = fdef.signature.name # Note: FunctionDefs do not include python gradient functions, so if the # original _DefinedFunction included one it will not be reflected here. python_grad_func = None out_names = [arg.name for arg in fdef.signature.output_arg] result = _DefinedFunction(func, argnames, input_types, func_name, grad_func, python_grad_func, out_names) # pylint: disable=protected-access result._definition = fdef # Captured inputs are added as regular inputs to a function when it's # serialized, i.e. any extra inputs from the original function are now # included in `result`._args result._extra_inputs = [] result._hash_str = result._create_hash_str( result._definition.signature.input_arg, result._definition.signature.output_arg, result._definition.node_def) # pylint: enable=protected-access return result def _from_library(lib): """Creates _DefinedFunctions initialized from a FunctionDefLibrary proto. This method handles assigning the correct gradient functions to each function. Args: lib: a FunctionDefLibrary Returns: A list of _DefinedFunctions Raises: ValueError: `lib` is invalid """ if not lib.function and not lib.gradient: return [] # function name -> FunctionDef proto funcs = {fdef.signature.name: fdef for fdef in lib.function} # Validate that all references function names have function defs for g in lib.gradient: if g.function_name not in funcs: raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" % (g.function_name, str(lib))) if g.gradient_func not in funcs: raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" % (g.gradient_func, str(lib))) # function name -> gradient function name func_to_grad = collections.defaultdict(lambda: None) # gradient function name -> names of functions having that grad function grad_to_funcs = collections.defaultdict(list) for gdef in lib.gradient: func_to_grad[gdef.function_name] = gdef.gradient_func grad_to_funcs[gdef.gradient_func].append(gdef.function_name) # Start with functions without gradients ready = [ fdef for fdef in lib.function if func_to_grad[fdef.signature.name] is None ] if not ready: raise ValueError("FunctionDefLibrary contains cyclic gradient functions!\n" + str(lib)) # function name -> _DefinedFunction initialized = {} while ready: fdef = ready.pop() name = fdef.signature.name grad = initialized.get(func_to_grad[name]) if func_to_grad[name]: assert grad defined_func = _from_definition(fdef, grad_func=grad) initialized[name] = defined_func ready.extend(funcs[f] for f in grad_to_funcs[name]) return initialized.values() # NOTE: The list needs to be extended when more data types are added. _DTYPE_TO_STR = { dtypes.float16: "f16", dtypes.float32: "f32", dtypes.float64: "f64", dtypes.int32: "i32", dtypes.uint8: "i8", dtypes.uint16: "u16", dtypes.int16: "i16", dtypes.int8: "i8", dtypes.string: "s", dtypes.complex64: "c64", dtypes.complex128: "c128", dtypes.int64: "i64", dtypes.bool: "b", dtypes.qint8: "qi8", dtypes.quint8: "qu8", dtypes.qint16: "qi16", dtypes.quint16: "qu16", dtypes.qint32: "qi32", dtypes.bfloat16: "b16" } def _type_list_to_str(types): if any([_ not in _DTYPE_TO_STR for _ in types]): raise ValueError("Unsupported dtypes: %s" % types) return "".join([_DTYPE_TO_STR[_] for _ in types]) class _OverloadedFunction(object): """_OverloadedFunction encapsulates an overloaded function. _OverloadedFunction maintains a mapping from input types to instantiated _DefinedFunction in self._overload. """ def __init__(self, func, argnames, func_name=None, grad_func=None, python_grad_func=None, out_names=None, **kwargs): """Creates _DefinedFunction. Args: func: A python callable which constructs a tf function body. argnames: A list of strings for function argument names. func_name: The function name. Defaults to None, in which derives from 'func'. grad_func: This function's gradient function, if not None. Defaults to None. python_grad_func: A python callable implementing the gradient of the function python-side. out_names: A list of strings for the function return value names. **kwargs: The keyword arguments. **kwargs is passed to every call site of this function. Raises: ValueError: The function definition is invalid. """ self._func = func self._argnames = argnames self._func_name = func_name assert grad_func is None or isinstance(grad_func, _OverloadedFunction) self._grad_func = grad_func self._python_grad_func = python_grad_func self._out_names = out_names self._extra_kwargs = kwargs self._overload = {} def instantiate(self, input_types): """Instantiate this function given input argument types. Args: input_types: A list of data types for the inputs. Returns: _DefinedFunction for the given input types. """ # Stringify the type list. key = _type_list_to_str(input_types) defined = self._overload.get(key) if not defined: # If not defined yet, define the function given the input types. name = self._func_name if name is not None: name = "_".join([name, key]) defined = _DefinedFunction( self._func, self._argnames, input_types, name, None, self._python_grad_func, out_names=self._out_names, **self._extra_kwargs) _ = defined.name # Fully instantiate the function definition. if self._grad_func: # If _grad_func is given, it is another # _OverloadedFunction. We need to instantiate it with the # right input types. output_types = [ dtypes.DType(_.type) for _ in defined.definition.signature.output_arg ] # pylint: disable=protected-access defined._grad_func = self._grad_func.instantiate( input_types + output_types) # pylint: enable=protected-access self._overload[key] = defined return defined def __call__(self, *args, **kwargs): input_types = [] args = list(args) for (i, x) in enumerate(args): x = ops.convert_to_tensor(x) if not isinstance(x, ops.Tensor): raise ValueError("Expect a Tensor but get ", x) input_types.append(x.dtype) args[i] = x return self.instantiate(input_types)(*args, **kwargs) class Defun(object): """Decorator used to define TensorFlow functions. Use this decorator to make a Python function usable directly as a TensorFlow function. The decorated function must add ops to the default graph and return zero or more `Tensor` objects. Call the decorator with named arguments, one for each argument of the function to decorate, with the expected type of the argument as value. For example if the function to decorate accepts two `tf.float32` arguments named `x` and `y`, call the decorator with: @Defun(tf.float32, tf.float32) def foo(x, y): ... When you call the decorated function it will add `call` ops to the default graph and adds the definition of the function into the default graph. Because the addition of the function into the graph is deferred, the decorator can be used anywhere in the program. Any variables created inside of the function are hoisted into the outer graph. Note that the variables are created in the variable scope that was active during the first call to the function. Subsequent function calls will refer to the same set of variables. Definitions of functions are frozen in a graph as soon as the graph is used to create a session. Therefore, nodes using the function must be created in the graph before the corresponding session is created. Example, but also see the [How To on functions](link_needed). ```python # Defining the function. @tf.Defun(tf.float32, tf.float32) def MyFunc(x, y): return x + y, x - y # Building the graph. a = tf.Constant([1.0]) b = tf.Constant([2.0]) c, d = MyFunc(a, b, name='mycall') ``` """ def __init__(self, *input_types, **kwargs): """Create a `Defun` decorator. Args: *input_types: A list of `tf.DType` **kwargs: Optional keyword arguments, including func_name - (optional). A python string, the name to use to declare this `Function` in the graph. grad_func - (optional). A function implementing the gradient of the function-to-register. This is either a `_DefinedFunction` or a `Declare` object. The gradient function must satisify the criterion defined in function.proto:GradientDef. python_grad_func - (optional). A function implementing the gradient of the function python-side. This function must take the current op and the gradients w.r.t. its outputs, and return the gradients w.r.t. the inputs. That is it must implement the interface expected by `tf.RegisterGradient`). This will be called by tf.gradients to add the gradient ops to the graph. At most one of grad_func and python_grad_func can be specified. out_names = (optional). A list of strings, one per output tensor. shape_func - (optional). A function taking the op and returning a list of static shapes to set for the function's outputs. """ self._input_types = input_types self._func_name = kwargs.pop("func_name", None) self._grad_func = kwargs.pop("grad_func", None) self._python_grad_func = kwargs.pop("python_grad_func", None) self._out_names = kwargs.pop("out_names", None) self._extra_kwargs = kwargs def __call__(self, func): # Various sanity checks on the callable func. if not callable(func): raise ValueError("func %s must be callable" % func) # Func should not use kwargs and defaults. argspec = tf_inspect.getargspec(func) if argspec.keywords or argspec.defaults: raise ValueError("Functions with argument defaults or keyword " "arguments are not supported.") # Computes how many arguments 'func' has. min_args = len(argspec.args) max_args = min_args if argspec.varargs: max_args = 1000000 argnames = argspec.args if tf_inspect.ismethod(func): # 1st argument is the "class" type. min_args -= 1 argnames = argnames[1:] if self._input_types: # If Defun is given a list of types for the inputs, the number # of input types should be compatible with 'func'. num = len(self._input_types) if num < min_args or num > max_args: raise ValueError( "The function has fewer arguments than the number of specified " "input types.") return _DefinedFunction( func, argnames, self._input_types, self._func_name, self._grad_func, self._python_grad_func, out_names=self._out_names, **self._extra_kwargs) # 'func' expects no arguments and input types is an empty list. if min_args == 0 and max_args == 0: return _DefinedFunction( func, [], [], self._func_name, self._grad_func, self._python_grad_func, out_names=self._out_names, **self._extra_kwargs) # Input types are unknown. It's an overloaded function and hence # its definition needs to be deferred until it's called. return _OverloadedFunction( func, argnames, self._func_name, self._grad_func, self._python_grad_func, out_names=self._out_names, **self._extra_kwargs) class Declare(object): """Declares a TensorFlow function. The object represents a TensorFlow function which will be defined later during a graph construction. For example, # Declares a function Foo, which takes a tf.int32 named "n" and a # tf.float32 named "x" as inputs and returns a tf.float32 named "z" # as its output. foo = Declare("Foo", [("n", tf.int32), ("x", tf.float32)], [("z", tf.float32)]) # Defines a function Bar calls Foo. @tf.Defun(tf.float32) def Bar(x): return foo(6, x) # Defines Foo, with output named "z". @tf.Defun(tf.int32, tf.float32, out_names=["z"]) def Foo(n, x): ... # Calculation. return result """ def __init__(self, func_name, inputs, outputs): """Creates a `Declare` object. Args: func_name: The name of the function. inputs: A list of (name, data type) pairs of function arguments. outputs: A list of (name, data type) pairs of function return values. """ self._sig = op_def_pb2.OpDef() self._sig.name = func_name def _to_argdef_list(args): names = [n for n, t in args] if len(names) != len(set(names)): raise ValueError("Expected names to all be unique: %s" % str(names)) return [ op_def_pb2.OpDef.ArgDef(type=t.as_datatype_enum, name=n) for n, t in args ] self._sig.input_arg.extend(_to_argdef_list(inputs)) self._sig.output_arg.extend(_to_argdef_list(outputs)) def __call__(self, *inputs, **kwargs): inputs = [ops.convert_to_tensor(_) for _ in inputs] return _call(self._sig, *inputs, **kwargs)[0]
mit
dnlm92/chokoretto
main/lib/unidecode/x05e.py
250
4668
data = ( 'Za ', # 0x00 'Bi ', # 0x01 'Shi ', # 0x02 'Bu ', # 0x03 'Ding ', # 0x04 'Shuai ', # 0x05 'Fan ', # 0x06 'Nie ', # 0x07 'Shi ', # 0x08 'Fen ', # 0x09 'Pa ', # 0x0a 'Zhi ', # 0x0b 'Xi ', # 0x0c 'Hu ', # 0x0d 'Dan ', # 0x0e 'Wei ', # 0x0f 'Zhang ', # 0x10 'Tang ', # 0x11 'Dai ', # 0x12 'Ma ', # 0x13 'Pei ', # 0x14 'Pa ', # 0x15 'Tie ', # 0x16 'Fu ', # 0x17 'Lian ', # 0x18 'Zhi ', # 0x19 'Zhou ', # 0x1a 'Bo ', # 0x1b 'Zhi ', # 0x1c 'Di ', # 0x1d 'Mo ', # 0x1e 'Yi ', # 0x1f 'Yi ', # 0x20 'Ping ', # 0x21 'Qia ', # 0x22 'Juan ', # 0x23 'Ru ', # 0x24 'Shuai ', # 0x25 'Dai ', # 0x26 'Zheng ', # 0x27 'Shui ', # 0x28 'Qiao ', # 0x29 'Zhen ', # 0x2a 'Shi ', # 0x2b 'Qun ', # 0x2c 'Xi ', # 0x2d 'Bang ', # 0x2e 'Dai ', # 0x2f 'Gui ', # 0x30 'Chou ', # 0x31 'Ping ', # 0x32 'Zhang ', # 0x33 'Sha ', # 0x34 'Wan ', # 0x35 'Dai ', # 0x36 'Wei ', # 0x37 'Chang ', # 0x38 'Sha ', # 0x39 'Qi ', # 0x3a 'Ze ', # 0x3b 'Guo ', # 0x3c 'Mao ', # 0x3d 'Du ', # 0x3e 'Hou ', # 0x3f 'Zheng ', # 0x40 'Xu ', # 0x41 'Mi ', # 0x42 'Wei ', # 0x43 'Wo ', # 0x44 'Fu ', # 0x45 'Yi ', # 0x46 'Bang ', # 0x47 'Ping ', # 0x48 'Tazuna ', # 0x49 'Gong ', # 0x4a 'Pan ', # 0x4b 'Huang ', # 0x4c 'Dao ', # 0x4d 'Mi ', # 0x4e 'Jia ', # 0x4f 'Teng ', # 0x50 'Hui ', # 0x51 'Zhong ', # 0x52 'Shan ', # 0x53 'Man ', # 0x54 'Mu ', # 0x55 'Biao ', # 0x56 'Guo ', # 0x57 'Ze ', # 0x58 'Mu ', # 0x59 'Bang ', # 0x5a 'Zhang ', # 0x5b 'Jiong ', # 0x5c 'Chan ', # 0x5d 'Fu ', # 0x5e 'Zhi ', # 0x5f 'Hu ', # 0x60 'Fan ', # 0x61 'Chuang ', # 0x62 'Bi ', # 0x63 'Hei ', # 0x64 '[?] ', # 0x65 'Mi ', # 0x66 'Qiao ', # 0x67 'Chan ', # 0x68 'Fen ', # 0x69 'Meng ', # 0x6a 'Bang ', # 0x6b 'Chou ', # 0x6c 'Mie ', # 0x6d 'Chu ', # 0x6e 'Jie ', # 0x6f 'Xian ', # 0x70 'Lan ', # 0x71 'Gan ', # 0x72 'Ping ', # 0x73 'Nian ', # 0x74 'Qian ', # 0x75 'Bing ', # 0x76 'Bing ', # 0x77 'Xing ', # 0x78 'Gan ', # 0x79 'Yao ', # 0x7a 'Huan ', # 0x7b 'You ', # 0x7c 'You ', # 0x7d 'Ji ', # 0x7e 'Yan ', # 0x7f 'Pi ', # 0x80 'Ting ', # 0x81 'Ze ', # 0x82 'Guang ', # 0x83 'Zhuang ', # 0x84 'Mo ', # 0x85 'Qing ', # 0x86 'Bi ', # 0x87 'Qin ', # 0x88 'Dun ', # 0x89 'Chuang ', # 0x8a 'Gui ', # 0x8b 'Ya ', # 0x8c 'Bai ', # 0x8d 'Jie ', # 0x8e 'Xu ', # 0x8f 'Lu ', # 0x90 'Wu ', # 0x91 '[?] ', # 0x92 'Ku ', # 0x93 'Ying ', # 0x94 'Di ', # 0x95 'Pao ', # 0x96 'Dian ', # 0x97 'Ya ', # 0x98 'Miao ', # 0x99 'Geng ', # 0x9a 'Ci ', # 0x9b 'Fu ', # 0x9c 'Tong ', # 0x9d 'Pang ', # 0x9e 'Fei ', # 0x9f 'Xiang ', # 0xa0 'Yi ', # 0xa1 'Zhi ', # 0xa2 'Tiao ', # 0xa3 'Zhi ', # 0xa4 'Xiu ', # 0xa5 'Du ', # 0xa6 'Zuo ', # 0xa7 'Xiao ', # 0xa8 'Tu ', # 0xa9 'Gui ', # 0xaa 'Ku ', # 0xab 'Pang ', # 0xac 'Ting ', # 0xad 'You ', # 0xae 'Bu ', # 0xaf 'Ding ', # 0xb0 'Cheng ', # 0xb1 'Lai ', # 0xb2 'Bei ', # 0xb3 'Ji ', # 0xb4 'An ', # 0xb5 'Shu ', # 0xb6 'Kang ', # 0xb7 'Yong ', # 0xb8 'Tuo ', # 0xb9 'Song ', # 0xba 'Shu ', # 0xbb 'Qing ', # 0xbc 'Yu ', # 0xbd 'Yu ', # 0xbe 'Miao ', # 0xbf 'Sou ', # 0xc0 'Ce ', # 0xc1 'Xiang ', # 0xc2 'Fei ', # 0xc3 'Jiu ', # 0xc4 'He ', # 0xc5 'Hui ', # 0xc6 'Liu ', # 0xc7 'Sha ', # 0xc8 'Lian ', # 0xc9 'Lang ', # 0xca 'Sou ', # 0xcb 'Jian ', # 0xcc 'Pou ', # 0xcd 'Qing ', # 0xce 'Jiu ', # 0xcf 'Jiu ', # 0xd0 'Qin ', # 0xd1 'Ao ', # 0xd2 'Kuo ', # 0xd3 'Lou ', # 0xd4 'Yin ', # 0xd5 'Liao ', # 0xd6 'Dai ', # 0xd7 'Lu ', # 0xd8 'Yi ', # 0xd9 'Chu ', # 0xda 'Chan ', # 0xdb 'Tu ', # 0xdc 'Si ', # 0xdd 'Xin ', # 0xde 'Miao ', # 0xdf 'Chang ', # 0xe0 'Wu ', # 0xe1 'Fei ', # 0xe2 'Guang ', # 0xe3 'Koc ', # 0xe4 'Kuai ', # 0xe5 'Bi ', # 0xe6 'Qiang ', # 0xe7 'Xie ', # 0xe8 'Lin ', # 0xe9 'Lin ', # 0xea 'Liao ', # 0xeb 'Lu ', # 0xec '[?] ', # 0xed 'Ying ', # 0xee 'Xian ', # 0xef 'Ting ', # 0xf0 'Yong ', # 0xf1 'Li ', # 0xf2 'Ting ', # 0xf3 'Yin ', # 0xf4 'Xun ', # 0xf5 'Yan ', # 0xf6 'Ting ', # 0xf7 'Di ', # 0xf8 'Po ', # 0xf9 'Jian ', # 0xfa 'Hui ', # 0xfb 'Nai ', # 0xfc 'Hui ', # 0xfd 'Gong ', # 0xfe 'Nian ', # 0xff )
mit
iver333/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/outputtee.py
192
3142
# Copyright (c) 2009, Google Inc. All rights reserved. # Copyright (c) 2009 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import codecs import os import sys # Simple class to split output between multiple destinations class Tee: def __init__(self, *files): self.files = files # Callers should pass an already encoded string for writing. def write(self, bytes): for file in self.files: file.write(bytes) class OutputTee: def __init__(self): self._original_stdout = None self._original_stderr = None self._files_for_output = [] def add_log(self, path): log_file = self._open_log_file(path) self._files_for_output.append(log_file) self._tee_outputs_to_files(self._files_for_output) return log_file def remove_log(self, log_file): self._files_for_output.remove(log_file) self._tee_outputs_to_files(self._files_for_output) log_file.close() @staticmethod def _open_log_file(log_path): (log_directory, log_name) = os.path.split(log_path) if log_directory and not os.path.exists(log_directory): os.makedirs(log_directory) return codecs.open(log_path, "a+", "utf-8") def _tee_outputs_to_files(self, files): if not self._original_stdout: self._original_stdout = sys.stdout self._original_stderr = sys.stderr if files and len(files): sys.stdout = Tee(self._original_stdout, *files) sys.stderr = Tee(self._original_stderr, *files) else: sys.stdout = self._original_stdout sys.stderr = self._original_stderr
bsd-3-clause
yahman72/robotframework
src/robot/run.py
4
27219
#!/usr/bin/env python # Copyright 2008-2015 Nokia Solutions and Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module implementing the command line entry point for executing tests. This module can be executed from the command line using the following approaches:: python -m robot.run python path/to/robot/run.py Instead of ``python`` it is possible to use also other Python interpreters. This module is also used by the installed ``pybot``, ``jybot`` and ``ipybot`` start-up scripts. This module also provides :func:`run` and :func:`run_cli` functions that can be used programmatically. Other code is for internal usage. """ USAGE = """Robot Framework -- A generic test automation framework Version: <VERSION> Usage: pybot|jybot|ipybot [options] data_sources or: python|jython|ipy -m robot.run [options] data_sources or: python|jython|ipy path/to/robot/run.py [options] data_sources or: java -jar robotframework.jar run [options] data_sources Robot Framework is a Python-based keyword-driven test automation framework for acceptance level testing and acceptance test-driven development (ATDD). It has an easy-to-use tabular syntax for creating test cases and its testing capabilities can be extended by test libraries implemented either with Python or Java. Users can also create new keywords from existing ones using the same simple syntax that is used for creating test cases. Depending is Robot Framework installed using Python, Jython, or IronPython interpreter, it has a start-up script, `pybot`, `jybot` or `ipybot`, respectively. Alternatively, it is possible to directly execute `robot.run` module (e.g. `python -m robot.run`) or `robot/run.py` script using a selected interpreter. Finally, there is also a standalone JAR distribution. Data sources given to Robot Framework are either test case files or directories containing them and/or other directories. Single test case file creates a test suite containing all the test cases in it and a directory containing test case files creates a higher level test suite with test case files or other directories as sub test suites. If multiple data sources are given, a virtual top level suite containing suites generated from given data sources is created. By default Robot Framework creates an XML output file and a log and a report in HTML format, but this can be configured using various options listed below. Outputs in HTML format are for human consumption and XML output for integration with other systems. XML outputs can also be combined and otherwise further processed with `rebot` tool. Run `rebot --help` for more information. Robot Framework is open source software released under Apache License 2.0. Its copyrights are owned and development supported by Nokia Solutions and Networks. For more information about the framework see http://robotframework.org/. Options ======= -N --name name Set the name of the top level test suite. Underscores in the name are converted to spaces. Default name is created from the name of the executed data source. -D --doc documentation Set the documentation of the top level test suite. Underscores in the documentation are converted to spaces and it may also contain simple HTML formatting (e.g. *bold* and http://url/). -M --metadata name:value * Set metadata of the top level suite. Underscores in the name and value are converted to spaces. Value can contain same HTML formatting as --doc. Example: --metadata version:1.2 -G --settag tag * Sets given tag(s) to all executed test cases. -t --test name * Select test cases to run by name or long name. Name is case and space insensitive and it can also be a simple pattern where `*` matches anything and `?` matches any char. If using `*` and `?` in the console is problematic see --escape and --argumentfile. -s --suite name * Select test suites to run by name. When this option is used with --test, --include or --exclude, only test cases in matching suites and also matching other filtering criteria are selected. Name can be a simple pattern similarly as with --test and it can contain parent name separated with a dot. For example `-s X.Y` selects suite `Y` only if its parent is `X`. -i --include tag * Select test cases to run by tag. Similarly as name with --test, tag is case and space insensitive and it is possible to use patterns with `*` and `?` as wildcards. Tags and patterns can also be combined together with `AND`, `OR`, and `NOT` operators. Examples: --include foo --include bar* --include fooANDbar* -e --exclude tag * Select test cases not to run by tag. These tests are not run even if included with --include. Tags are matched using the rules explained with --include. -R --rerunfailed output Select failed tests from an earlier output file to be re-executed. Equivalent to selecting same tests individually using --test option. --runfailed output Deprecated since RF 2.8.4. Use --rerunfailed instead. -c --critical tag * Tests having given tag are considered critical. If no critical tags are set, all tags are critical. Tags can be given as a pattern like with --include. -n --noncritical tag * Tests with given tag are not critical even if they have a tag set with --critical. Tag can be a pattern. -v --variable name:value * Set variables in the test data. Only scalar variables are supported and name is given without `${}`. See --escape for how to use special characters and --variablefile for a more powerful variable setting mechanism that allows also list variables. Examples: --variable str:Hello => ${str} = `Hello` -v str:Hi_World -E space:_ => ${str} = `Hi World` -v x: -v y:42 => ${x} = ``, ${y} = `42` -V --variablefile path * File to read variables from (e.g. `path/vars.py`). Example file: | import random | __all__ = [`scalar`, `LIST__var`, `integer`] | scalar = `Hello world!` | LIST__var = [`Hello`, `list`, `world`] | integer = random.randint(1,10) => ${scalar} = `Hello world!` @{var} = [`Hello`,`list`,`world`] ${integer} = <random integer from 1 to 10> -d --outputdir dir Where to create output files. The default is the directory where tests are run from and the given path is considered relative to that unless it is absolute. -o --output file XML output file. Given path, similarly as paths given to --log, --report, --xunit, and --debugfile, is relative to --outputdir unless given as an absolute path. Other output files are created based on XML output files after the test execution and XML outputs can also be further processed with Rebot tool. Can be disabled by giving a special value `NONE`. In this case, also log and report are automatically disabled. Default: output.xml -l --log file HTML log file. Can be disabled by giving a special value `NONE`. Default: log.html Examples: `--log mylog.html`, `-l NONE` -r --report file HTML report file. Can be disabled with `NONE` similarly as --log. Default: report.html -x --xunit file xUnit compatible result file. Not created unless this option is specified. --xunitskipnoncritical Mark non-critical tests on xUnit output as skipped. -b --debugfile file Debug file written during execution. Not created unless this option is specified. -T --timestampoutputs When this option is used, timestamp in a format `YYYYMMDD-hhmmss` is added to all generated output files between their basename and extension. For example `-T -o output.xml -r report.html -l none` creates files like `output-20070503-154410.xml` and `report-20070503-154410.html`. --splitlog Split log file into smaller pieces that open in browser transparently. --logtitle title Title for the generated test log. The default title is `<Name Of The Suite> Test Log`. Underscores in the title are converted into spaces in all titles. --reporttitle title Title for the generated test report. The default title is `<Name Of The Suite> Test Report`. --reportbackground colors Background colors to use in the report file. Either `all_passed:critical_passed:failed` or `passed:failed`. Both color names and codes work. Examples: --reportbackground green:yellow:red --reportbackground #00E:#E00 -L --loglevel level Threshold level for logging. Available levels: TRACE, DEBUG, INFO (default), WARN, NONE (no logging). Use syntax `LOGLEVEL:DEFAULT` to define the default visible log level in log files. Examples: --loglevel DEBUG --loglevel DEBUG:INFO --suitestatlevel level How many levels to show in `Statistics by Suite` in log and report. By default all suite levels are shown. Example: --suitestatlevel 3 --tagstatinclude tag * Include only matching tags in `Statistics by Tag` and `Test Details` in log and report. By default all tags set in test cases are shown. Given `tag` can also be a simple pattern (see e.g. --test). --tagstatexclude tag * Exclude matching tags from `Statistics by Tag` and `Test Details`. This option can be used with --tagstatinclude similarly as --exclude is used with --include. --tagstatcombine tags:name * Create combined statistics based on tags. These statistics are added into `Statistics by Tag` and matching tests into `Test Details`. If optional `name` is not given, name of the combined tag is got from the specified tags. Tags are combined using the rules explained in --include. Examples: --tagstatcombine requirement-* --tagstatcombine tag1ANDtag2:My_name --tagdoc pattern:doc * Add documentation to tags matching given pattern. Documentation is shown in `Test Details` and also as a tooltip in `Statistics by Tag`. Pattern can contain characters `*` (matches anything) and `?` (matches any char). Documentation can contain formatting similarly as with --doc option. Examples: --tagdoc mytag:My_documentation --tagdoc regression:*See*_http://info.html --tagdoc owner-*:Original_author --tagstatlink pattern:link:title * Add external links into `Statistics by Tag`. Pattern can contain characters `*` (matches anything) and `?` (matches any char). Characters matching to wildcard expressions can be used in link and title with syntax %N, where N is index of the match (starting from 1). In title underscores are automatically converted to spaces. Examples: --tagstatlink mytag:http://my.domain:Link --tagstatlink bug-*:http://tracker/id=%1:Bug_Tracker --removekeywords all|passed|for|wuks|name:<pattern> * Remove keyword data from the generated log file. Keywords containing warnings are not removed except in `all` mode. all: remove data from all keywords passed: remove data only from keywords in passed test cases and suites for: remove passed iterations from for loops wuks: remove all but the last failing keyword inside `BuiltIn.Wait Until Keyword Succeeds` name:<pattern>: remove data from keywords that match the given pattern. The pattern is matched against the full name of the keyword (e.g. 'MyLib.Keyword', 'resource.Second Keyword'), is case, space, and underscore insensitive, and may contain `*` and `?` as wildcards. Examples: --removekeywords name:Lib.HugeKw --removekeywords name:myresource.* --flattenkeywords for|foritem|name:<pattern> * Flattens matching keywords in the generated log file. Matching keywords get all log messages from their child keywords and children are discarded otherwise. for: flatten for loops fully foritem: flatten individual for loop iterations name:<pattern>: flatten matched keywords using same matching rules as with `--removekeywords name:<pattern>` --listener class * A class for monitoring test execution. Gets notifications e.g. when a test case starts and ends. Arguments to listener class can be given after class name, using colon as separator. For example: --listener MyListenerClass:arg1:arg2 --warnonskippedfiles If this option is used, skipped test data files will cause a warning that is visible in the console output and the log file. By default skipped files only cause an info level syslog message. --nostatusrc Sets the return code to zero regardless of failures in test cases. Error codes are returned normally. --runemptysuite Executes tests also if the top level test suite is empty. Useful e.g. with --include/--exclude when it is not an error that no test matches the condition. --dryrun Verifies test data and runs tests so that library keywords are not executed. --exitonfailure Stops test execution if any critical test fails. --exitonerror Stops test execution if any error occurs when parsing test data, importing libraries, and so on. --skipteardownonexit Causes teardowns to be skipped if test execution is stopped prematurely. --randomize all|suites|tests|none Randomizes the test execution order. all: randomizes both suites and tests suites: randomizes suites tests: randomizes tests none: no randomization (default) Use syntax `VALUE:SEED` to give a custom random seed. The seed must be an integer. Examples: --randomize all --randomize tests:1234 -W --monitorwidth chars Width of the monitor output. Default is 78. -C --monitorcolors auto|on|ansi|off Use colors on console output or not. auto: use colors when output not redirected (default) on: always use colors ansi: like `on` but use ANSI colors also on Windows off: disable colors altogether Note that colors do not work with Jython on Windows. -K --monitormarkers auto|on|off Show `.` (success) or `F` (failure) on console when top level keywords in test cases end. Values have same semantics as with --monitorcolors. -P --pythonpath path * Additional locations (directories, ZIPs, JARs) where to search test libraries from when they are imported. Multiple paths can be given by separating them with a colon (`:`) or using this option several times. Given path can also be a glob pattern matching multiple paths but then it normally must be escaped or quoted. Examples: --pythonpath libs/ --pythonpath /opt/testlibs:mylibs.zip:yourlibs -E star:STAR -P lib/STAR.jar -P mylib.jar -E --escape what:with * Escape characters which are problematic in console. `what` is the name of the character to escape and `with` is the string to escape it with. Note that all given arguments, incl. data sources, are escaped so escape characters ought to be selected carefully. <--------------------ESCAPES------------------------> Examples: --escape space:_ --metadata X:Value_with_spaces -E space:SP -E quot:Q -v var:QhelloSPworldQ -A --argumentfile path * Text file to read more arguments from. Use special path `STDIN` to read contents from the standard input stream. File can have both options and data sources one per line. Contents do not need to be escaped but spaces in the beginning and end of lines are removed. Empty lines and lines starting with a hash character (#) are ignored. Example file: | --include regression | --name Regression Tests | # This is a comment line | my_tests.html | path/to/test/directory/ Examples: --argumentfile argfile.txt --argumentfile STDIN -h -? --help Print usage instructions. --version Print version information. Options that are marked with an asterisk (*) can be specified multiple times. For example, `--test first --test third` selects test cases with name `first` and `third`. If an option accepts a value but is not marked with an asterisk, the last given value has precedence. For example, `--log A.html --log B.html` creates log file `B.html`. Options accepting no values can be disabled by using the same option again with `no` prefix added or dropped. The last option has precedence regardless of how many times options are used. For example, `--dryrun --dryrun --nodryrun --nostatusrc --statusrc` would not activate the dry-run mode and would return normal status rc. Long option format is case-insensitive. For example, --SuiteStatLevel is equivalent to but easier to read than --suitestatlevel. Long options can also be shortened as long as they are unique. For example, `--logti Title` works while `--lo log.html` does not because the former matches only --logtitle but the latter matches --log, --loglevel and --logtitle. Environment Variables ===================== ROBOT_OPTIONS Space separated list of default options to be placed in front of any explicit options on the command line. ROBOT_SYSLOG_FILE Path to a file where Robot Framework writes internal information about parsing test case files and running tests. Can be useful when debugging problems. If not set, or set to special value `NONE`, writing to the syslog file is disabled. ROBOT_SYSLOG_LEVEL Log level to use when writing to the syslog file. Available levels are the same as for --loglevel command line option and the default is INFO. Examples ======== # Simple test run with `pybot` without options. $ pybot tests.html # Using options and running with `jybot`. $ jybot --include smoke --name Smoke_Tests path/to/tests.txt # Executing `robot.run` module using Python. $ python -m robot.run --test test1 --test test2 test_directory # Running `robot/run.py` script with Jython. $ jython /path/to/robot/run.py tests.robot # Executing multiple test case files and using case-insensitive long options. $ pybot --SuiteStatLevel 2 /my/tests/*.html /your/tests.html # Setting default options and syslog file before running tests. $ export ROBOT_OPTIONS="--critical regression --suitestatlevel 2" $ export ROBOT_SYSLOG_FILE=/tmp/syslog.txt $ pybot tests.tsv """ import sys # Allows running as a script. __name__ check needed with multiprocessing: # http://code.google.com/p/robotframework/issues/detail?id=1137 if 'robot' not in sys.modules and __name__ == '__main__': import pythonpathsetter from robot.conf import RobotSettings from robot.output import LOGGER, pyloggingconf from robot.reporting import ResultWriter from robot.running import TestSuiteBuilder from robot.utils import Application class RobotFramework(Application): def __init__(self): Application.__init__(self, USAGE, arg_limits=(1,), env_options='ROBOT_OPTIONS', logger=LOGGER) def main(self, datasources, **options): settings = RobotSettings(options) LOGGER.register_console_logger(**settings.console_logger_config) LOGGER.info('Settings:\n%s' % unicode(settings)) suite = TestSuiteBuilder(settings['SuiteNames'], settings['WarnOnSkipped'], settings['RunEmptySuite']).build(*datasources) suite.configure(**settings.suite_config) with pyloggingconf.robot_handler_enabled(settings.log_level): result = suite.run(settings) LOGGER.info("Tests execution ended. Statistics:\n%s" % result.suite.stat_message) if settings.log or settings.report or settings.xunit: writer = ResultWriter(settings.output if settings.log else result) writer.write_results(settings.get_rebot_settings()) return result.return_code def validate(self, options, arguments): return self._filter_options_without_value(options), arguments def _filter_options_without_value(self, options): return dict((name, value) for name, value in options.items() if value not in (None, [])) def run_cli(arguments): """Command line execution entry point for running tests. :param arguments: Command line arguments as a list of strings. For programmatic usage the :func:`run` function is typically better. It has a better API for that usage and does not call :func:`sys.exit` like this function. Example:: from robot import run_cli run_cli(['--include', 'tag', 'path/to/tests.html']) """ RobotFramework().execute_cli(arguments) def run(*datasources, **options): """Executes given Robot Framework data sources with given options. Data sources are paths to files and directories, similarly as when running `pybot` command from the command line. Options are given as keyword arguments and their names are same as long command line options except without hyphens. Options that can be given on the command line multiple times can be passed as lists like `include=['tag1', 'tag2']`. If such option is used only once, it can be given also as a single string like `include='tag'`. To capture stdout and/or stderr streams, pass open file objects in as special keyword arguments `stdout` and `stderr`, respectively. A return code is returned similarly as when running on the command line. Example:: from robot import run run('path/to/tests.html', include=['tag1', 'tag2']) with open('stdout.txt', 'w') as stdout: run('t1.txt', 't2.txt', report='r.html', log='NONE', stdout=stdout) Equivalent command line usage:: pybot --include tag1 --include tag2 path/to/tests.html pybot --report r.html --log NONE t1.txt t2.txt > stdout.txt """ return RobotFramework().execute(*datasources, **options) if __name__ == '__main__': run_cli(sys.argv[1:])
apache-2.0
gangadhar-kadam/verve-erp
erpnext/controllers/selling_controller.py
3
15620
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import cint, flt, rounded, cstr, comma_or from erpnext.setup.utils import get_company_currency from frappe import _, throw from erpnext.stock.get_item_details import get_available_qty from erpnext.controllers.stock_controller import StockController class SellingController(StockController): def __setup__(self): if hasattr(self, "items"): self.table_print_templates = { "items": "templates/print_formats/includes/item_grid.html", "taxes": "templates/print_formats/includes/taxes.html", } def get_feed(self): return _("To {0} | {1} {2}").format(self.customer_name, self.currency, self.grand_total_export) def onload(self): if self.doctype in ("Sales Order", "Delivery Note", "Sales Invoice"): for item in self.get("items"): item.update(get_available_qty(item.item_code, item.warehouse)) def validate(self): super(SellingController, self).validate() self.validate_max_discount() check_active_sales_items(self) def check_credit_limit(self): from erpnext.selling.doctype.customer.customer import check_credit_limit check_credit_limit(self.customer, self.company) def set_missing_values(self, for_validate=False): super(SellingController, self).set_missing_values(for_validate) # set contact and address details for customer, if they are not mentioned self.set_missing_lead_customer_details() self.set_price_list_and_item_details() if self.get("__islocal"): self.set_taxes("taxes", "taxes_and_charges") def set_missing_lead_customer_details(self): if getattr(self, "customer", None): from erpnext.accounts.party import _get_party_details party_details = _get_party_details(self.customer, ignore_permissions=getattr(self, "ignore_permissions", None)) if not self.meta.get_field("sales_team"): party_details.pop("sales_team") self.update_if_missing(party_details) elif getattr(self, "lead", None): from erpnext.selling.doctype.lead.lead import get_lead_details self.update_if_missing(get_lead_details(self.lead)) def set_price_list_and_item_details(self): self.set_price_list_currency("Selling") self.set_missing_item_details() def apply_shipping_rule(self): if self.shipping_rule: shipping_rule = frappe.get_doc("Shipping Rule", self.shipping_rule) value = self.net_total # TODO # shipping rule calculation based on item's net weight shipping_amount = 0.0 for condition in shipping_rule.get("conditions"): if not condition.to_value or (flt(condition.from_value) <= value <= flt(condition.to_value)): shipping_amount = condition.shipping_amount break shipping_charge = { "doctype": "Sales Taxes and Charges", "charge_type": "Actual", "account_head": shipping_rule.account, "cost_center": shipping_rule.cost_center } existing_shipping_charge = self.get("taxes", filters=shipping_charge) if existing_shipping_charge: # take the last record found existing_shipping_charge[-1].rate = shipping_amount else: shipping_charge["rate"] = shipping_amount shipping_charge["description"] = shipping_rule.label self.append("taxes", shipping_charge) self.calculate_taxes_and_totals() def remove_shipping_charge(self): if self.shipping_rule: shipping_rule = frappe.get_doc("Shipping Rule", self.shipping_rule) existing_shipping_charge = self.get("taxes", { "doctype": "Sales Taxes and Charges", "charge_type": "Actual", "account_head": shipping_rule.account, "cost_center": shipping_rule.cost_center }) if existing_shipping_charge: self.get("taxes").remove(existing_shipping_charge[-1]) self.calculate_taxes_and_totals() def set_total_in_words(self): from frappe.utils import money_in_words company_currency = get_company_currency(self.company) disable_rounded_total = cint(frappe.db.get_value("Global Defaults", None, "disable_rounded_total")) if self.meta.get_field("in_words"): self.in_words = money_in_words(disable_rounded_total and self.grand_total or self.rounded_total, company_currency) if self.meta.get_field("in_words_export"): self.in_words_export = money_in_words(disable_rounded_total and self.grand_total_export or self.rounded_total_export, self.currency) def calculate_taxes_and_totals(self): super(SellingController, self).calculate_taxes_and_totals() self.calculate_total_advance("Sales Invoice", "advances") self.calculate_commission() self.calculate_contribution() def determine_exclusive_rate(self): if not any((cint(tax.included_in_print_rate) for tax in self.get("taxes"))): # no inclusive tax return for item in self.get("items"): item_tax_map = self._load_item_tax_rate(item.item_tax_rate) cumulated_tax_fraction = 0 for i, tax in enumerate(self.get("taxes")): tax.tax_fraction_for_current_item = self.get_current_tax_fraction(tax, item_tax_map) if i==0: tax.grand_total_fraction_for_current_item = 1 + tax.tax_fraction_for_current_item else: tax.grand_total_fraction_for_current_item = \ self.get("taxes")[i-1].grand_total_fraction_for_current_item \ + tax.tax_fraction_for_current_item cumulated_tax_fraction += tax.tax_fraction_for_current_item if cumulated_tax_fraction and not self.discount_amount_applied and item.qty: item.base_amount = flt((item.amount * self.conversion_rate) / (1 + cumulated_tax_fraction), self.precision("base_amount", item)) item.base_rate = flt(item.base_amount / item.qty, self.precision("base_rate", item)) item.discount_percentage = flt(item.discount_percentage, self.precision("discount_percentage", item)) if item.discount_percentage == 100: item.base_price_list_rate = item.base_rate item.base_rate = 0.0 else: item.base_price_list_rate = flt(item.base_rate / (1 - (item.discount_percentage / 100.0)), self.precision("base_price_list_rate", item)) def get_current_tax_fraction(self, tax, item_tax_map): """ Get tax fraction for calculating tax exclusive amount from tax inclusive amount """ current_tax_fraction = 0 if cint(tax.included_in_print_rate): tax_rate = self._get_tax_rate(tax, item_tax_map) if tax.charge_type == "On Net Total": current_tax_fraction = tax_rate / 100.0 elif tax.charge_type == "On Previous Row Amount": current_tax_fraction = (tax_rate / 100.0) * \ self.get("taxes")[cint(tax.row_id) - 1].tax_fraction_for_current_item elif tax.charge_type == "On Previous Row Total": current_tax_fraction = (tax_rate / 100.0) * \ self.get("taxes")[cint(tax.row_id) - 1].grand_total_fraction_for_current_item return current_tax_fraction def calculate_item_values(self): if not self.discount_amount_applied: for item in self.get("items"): self.round_floats_in(item) if item.discount_percentage == 100: item.rate = 0 elif not item.rate: item.rate = flt(item.price_list_rate * (1.0 - (item.discount_percentage / 100.0)), self.precision("rate", item)) item.amount = flt(item.rate * item.qty, self.precision("amount", item)) self._set_in_company_currency(item, "price_list_rate", "base_price_list_rate") self._set_in_company_currency(item, "rate", "base_rate") self._set_in_company_currency(item, "amount", "base_amount") def calculate_net_total(self): self.net_total = self.net_total_export = 0.0 for item in self.get("items"): self.net_total += item.base_amount self.net_total_export += item.amount self.round_floats_in(self, ["net_total", "net_total_export"]) def calculate_totals(self): self.grand_total = flt(self.get("taxes")[-1].total if self.get("taxes") else self.net_total) self.grand_total_export = flt(self.grand_total / self.conversion_rate) self.other_charges_total = flt(self.grand_total - self.net_total, self.precision("other_charges_total")) self.other_charges_total_export = flt(self.grand_total_export - self.net_total_export + flt(self.discount_amount), self.precision("other_charges_total_export")) self.grand_total = flt(self.grand_total, self.precision("grand_total")) self.grand_total_export = flt(self.grand_total_export, self.precision("grand_total_export")) self.rounded_total = rounded(self.grand_total) self.rounded_total_export = rounded(self.grand_total_export) def apply_discount_amount(self): if self.discount_amount: self.base_discount_amount = flt(self.discount_amount * self.conversion_rate, self.precision("base_discount_amount")) grand_total_for_discount_amount = self.get_grand_total_for_discount_amount() if grand_total_for_discount_amount: # calculate item amount after Discount Amount for item in self.get("items"): distributed_amount = flt(self.base_discount_amount) * item.base_amount / grand_total_for_discount_amount item.base_amount = flt(item.base_amount - distributed_amount, self.precision("base_amount", item)) self.discount_amount_applied = True self._calculate_taxes_and_totals() else: self.base_discount_amount = 0 def get_grand_total_for_discount_amount(self): actual_taxes_dict = {} for tax in self.get("taxes"): if tax.charge_type == "Actual": actual_taxes_dict.setdefault(tax.idx, tax.tax_amount) elif tax.row_id in actual_taxes_dict: actual_tax_amount = flt(actual_taxes_dict.get(tax.row_id, 0)) * \ flt(tax.rate) / 100 actual_taxes_dict.setdefault(tax.idx, actual_tax_amount) grand_total_for_discount_amount = flt(self.grand_total - sum(actual_taxes_dict.values()), self.precision("grand_total")) return grand_total_for_discount_amount def calculate_outstanding_amount(self): # NOTE: # write_off_amount is only for POS Invoice # total_advance is only for non POS Invoice if self.doctype == "Sales Invoice" and self.docstatus == 0: self.round_floats_in(self, ["grand_total", "total_advance", "write_off_amount", "paid_amount"]) total_amount_to_pay = self.grand_total - self.write_off_amount self.outstanding_amount = flt(total_amount_to_pay - self.total_advance \ - self.paid_amount, self.precision("outstanding_amount")) def calculate_commission(self): if self.meta.get_field("commission_rate"): self.round_floats_in(self, ["net_total", "commission_rate"]) if self.commission_rate > 100.0: throw(_("Commission rate cannot be greater than 100")) self.total_commission = flt(self.net_total * self.commission_rate / 100.0, self.precision("total_commission")) def calculate_contribution(self): if not self.meta.get_field("sales_team"): return total = 0.0 sales_team = self.get("sales_team") for sales_person in sales_team: self.round_floats_in(sales_person) sales_person.allocated_amount = flt( self.net_total * sales_person.allocated_percentage / 100.0, self.precision("allocated_amount", sales_person)) total += sales_person.allocated_percentage if sales_team and total != 100.0: throw(_("Total allocated percentage for sales team should be 100")) def validate_order_type(self): valid_types = ["Sales", "Maintenance", "Shopping Cart"] if not self.order_type: self.order_type = "Sales" elif self.order_type not in valid_types: throw(_("Order Type must be one of {0}").format(comma_or(valid_types))) def validate_max_discount(self): for d in self.get("items"): discount = flt(frappe.db.get_value("Item", d.item_code, "max_discount")) if discount and flt(d.discount_percentage) > discount: frappe.throw(_("Maxiumm discount for Item {0} is {1}%").format(d.item_code, discount)) def get_item_list(self): il = [] for d in self.get("items"): reserved_warehouse = "" reserved_qty_for_main_item = 0 if d.qty is None: frappe.throw(_("Row {0}: Qty is mandatory").format(d.idx)) if self.doctype == "Sales Order": if (frappe.db.get_value("Item", d.item_code, "is_stock_item") == 'Yes' or self.has_sales_bom(d.item_code)) and not d.warehouse: frappe.throw(_("Reserved Warehouse required for stock Item {0} in row {1}").format(d.item_code, d.idx)) reserved_warehouse = d.warehouse if flt(d.qty) > flt(d.delivered_qty): reserved_qty_for_main_item = flt(d.qty) - flt(d.delivered_qty) elif self.doctype == "Delivery Note" and d.against_sales_order: # if SO qty is 10 and there is tolerance of 20%, then it will allow DN of 12. # But in this case reserved qty should only be reduced by 10 and not 12 already_delivered_qty = self.get_already_delivered_qty(self.name, d.against_sales_order, d.so_detail) so_qty, reserved_warehouse = self.get_so_qty_and_warehouse(d.so_detail) if already_delivered_qty + d.qty > so_qty: reserved_qty_for_main_item = -(so_qty - already_delivered_qty) else: reserved_qty_for_main_item = -flt(d.qty) if self.has_sales_bom(d.item_code): for p in self.get("packed_items"): if p.parent_detail_docname == d.name and p.parent_item == d.item_code: # the packing details table's qty is already multiplied with parent's qty il.append(frappe._dict({ 'warehouse': p.warehouse, 'reserved_warehouse': reserved_warehouse, 'item_code': p.item_code, 'qty': flt(p.qty), 'reserved_qty': (flt(p.qty)/flt(d.qty)) * reserved_qty_for_main_item, 'uom': p.uom, 'batch_no': cstr(p.batch_no).strip(), 'serial_no': cstr(p.serial_no).strip(), 'name': d.name })) else: il.append(frappe._dict({ 'warehouse': d.warehouse, 'reserved_warehouse': reserved_warehouse, 'item_code': d.item_code, 'qty': d.qty, 'reserved_qty': reserved_qty_for_main_item, 'uom': d.stock_uom, 'batch_no': cstr(d.get("batch_no")).strip(), 'serial_no': cstr(d.get("serial_no")).strip(), 'name': d.name })) return il def has_sales_bom(self, item_code): return frappe.db.sql("""select name from `tabSales BOM` where new_item_code=%s and docstatus != 2""", item_code) def get_already_delivered_qty(self, dn, so, so_detail): qty = frappe.db.sql("""select sum(qty) from `tabDelivery Note Item` where so_detail = %s and docstatus = 1 and against_sales_order = %s and parent != %s""", (so_detail, so, dn)) return qty and flt(qty[0][0]) or 0.0 def get_so_qty_and_warehouse(self, so_detail): so_item = frappe.db.sql("""select qty, warehouse from `tabSales Order Item` where name = %s and docstatus = 1""", so_detail, as_dict=1) so_qty = so_item and flt(so_item[0]["qty"]) or 0.0 so_warehouse = so_item and so_item[0]["warehouse"] or "" return so_qty, so_warehouse def check_stop_sales_order(self, ref_fieldname): for d in self.get("items"): if d.get(ref_fieldname): status = frappe.db.get_value("Sales Order", d.get(ref_fieldname), "status") if status == "Stopped": frappe.throw(_("Sales Order {0} is stopped").format(d.get(ref_fieldname))) def check_active_sales_items(obj): for d in obj.get("items"): if d.item_code: item = frappe.db.sql("""select docstatus, is_sales_item, is_service_item, income_account from tabItem where name = %s""", d.item_code, as_dict=True)[0] if item.is_sales_item == 'No' and item.is_service_item == 'No': frappe.throw(_("Item {0} must be Sales or Service Item in {1}").format(d.item_code, d.idx)) if getattr(d, "income_account", None) and not item.income_account: frappe.db.set_value("Item", d.item_code, "income_account", d.income_account)
agpl-3.0
emailweixu/Paddle
demo/model_zoo/embedding/extract_para.py
13
3832
#!/bin/env python # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Example: python extract_para.py --preModel PREMODEL --preDict PREDICT \ --usrModel USRMODEL --usrDict USRDICT -d DIM Options: -h, --help show this help message and exit --preModel PREMODEL the name of pretrained embedding model --preDict PREDICT the name of pretrained dictionary --usrModel usrModel the name of output usr embedding model --usrDict usrDict the name of user specified dictionary -d DIM dimension of parameter """ from optparse import OptionParser import struct def get_row_index(preDict, usrDict): """ Get the row positions for all words in user dictionary from pre-trained dictionary. return: a list of row positions Example: preDict='a\nb\nc\n', usrDict='a\nc\n', then return [0,2] """ pos = [] index = dict() with open(preDict, "r") as f: for line_index, line in enumerate(f): word = line.strip().split()[0] index[word] = line_index with open(usrDict, "r") as f: for line in f: word = line.strip().split()[0] pos.append(index[word]) return pos def extract_parameters_by_usrDict(preModel, preDict, usrModel, usrDict, paraDim): """ Extract desired parameters from a pretrained embedding model based on user dictionary """ if paraDim not in [32, 64, 128, 256]: raise RuntimeError("We only support 32, 64, 128, 256 dimensions now") fi = open(preModel, "rb") fo = open(usrModel, "wb") # write filehead rowIndex = get_row_index(preDict, usrDict) newHead = struct.pack("iil", 0, 4, len(rowIndex) * paraDim) fo.write(newHead) bytes = 4 * paraDim for i in range(0, len(rowIndex)): # find the absolute position of input file fi.seek(rowIndex[i] * bytes + 16, 0) fo.write(fi.read(bytes)) print "extract parameters finish, total", len(rowIndex), "lines" fi.close() def main(): """ Main entry for running paraconvert.py """ usage = "usage: \n" \ "python %prog --preModel PREMODEL --preDict PREDICT" \ " --usrModel USRMODEL --usrDict USRDICT -d DIM" parser = OptionParser(usage) parser.add_option( "--preModel", action="store", dest="preModel", help="the name of pretrained embedding model") parser.add_option( "--preDict", action="store", dest="preDict", help="the name of pretrained dictionary") parser.add_option( "--usrModel", action="store", dest="usrModel", help="the name of output usr embedding model") parser.add_option( "--usrDict", action="store", dest="usrDict", help="the name of user specified dictionary") parser.add_option( "-d", action="store", dest="dim", help="dimension of parameter") (options, args) = parser.parse_args() extract_parameters_by_usrDict(options.preModel, options.preDict, options.usrModel, options.usrDict, int(options.dim)) if __name__ == '__main__': main()
apache-2.0
gojira/tensorflow
tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py
35
9112
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The RelaxedBernoulli distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.distributions.python.ops import logistic from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import Sigmoid # Bijectors must be directly imported because `remove_undocumented` prevents # individual file imports. from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops.distributions import transformed_distribution from tensorflow.python.ops.distributions import util as distribution_util from tensorflow.python.util import deprecation class RelaxedBernoulli(transformed_distribution.TransformedDistribution): """RelaxedBernoulli distribution with temperature and logits parameters. The RelaxedBernoulli is a distribution over the unit interval (0,1), which continuously approximates a Bernoulli. The degree of approximation is controlled by a temperature: as the temperature goes to 0 the RelaxedBernoulli becomes discrete with a distribution described by the `logits` or `probs` parameters, as the temperature goes to infinity the RelaxedBernoulli becomes the constant distribution that is identically 0.5. The RelaxedBernoulli distribution is a reparameterized continuous distribution that is the binary special case of the RelaxedOneHotCategorical distribution (Maddison et al., 2016; Jang et al., 2016). For details on the binary special case see the appendix of Maddison et al. (2016) where it is referred to as BinConcrete. If you use this distribution, please cite both papers. Some care needs to be taken for loss functions that depend on the log-probability of RelaxedBernoullis, because computing log-probabilities of the RelaxedBernoulli can suffer from underflow issues. In many case loss functions such as these are invariant under invertible transformations of the random variables. The KL divergence, found in the variational autoencoder loss, is an example. Because RelaxedBernoullis are sampled by a Logistic random variable followed by a `tf.sigmoid` op, one solution is to treat the Logistic as the random variable and `tf.sigmoid` as downstream. The KL divergences of two Logistics, which are always followed by a `tf.sigmoid` op, is equivalent to evaluating KL divergences of RelaxedBernoulli samples. See Maddison et al., 2016 for more details where this distribution is called the BinConcrete. An alternative approach is to evaluate Bernoulli log probability or KL directly on relaxed samples, as done in Jang et al., 2016. In this case, guarantees on the loss are usually violated. For instance, using a Bernoulli KL in a relaxed ELBO is no longer a lower bound on the log marginal probability of the observation. Thus care and early stopping are important. #### Examples Creates three continuous distributions, which approximate 3 Bernoullis with probabilities (0.1, 0.5, 0.4). Samples from these distributions will be in the unit interval (0,1). ```python temperature = 0.5 p = [0.1, 0.5, 0.4] dist = RelaxedBernoulli(temperature, probs=p) ``` Creates three continuous distributions, which approximate 3 Bernoullis with logits (-2, 2, 0). Samples from these distributions will be in the unit interval (0,1). ```python temperature = 0.5 logits = [-2, 2, 0] dist = RelaxedBernoulli(temperature, logits=logits) ``` Creates three continuous distributions, whose sigmoid approximate 3 Bernoullis with logits (-2, 2, 0). ```python temperature = 0.5 logits = [-2, 2, 0] dist = Logistic(logits/temperature, 1./temperature) samples = dist.sample() sigmoid_samples = tf.sigmoid(samples) # sigmoid_samples has the same distribution as samples from # RelaxedBernoulli(temperature, logits=logits) ``` Creates three continuous distributions, which approximate 3 Bernoullis with logits (-2, 2, 0). Samples from these distributions will be in the unit interval (0,1). Because the temperature is very low, samples from these distributions are almost discrete, usually taking values very close to 0 or 1. ```python temperature = 1e-5 logits = [-2, 2, 0] dist = RelaxedBernoulli(temperature, logits=logits) ``` Creates three continuous distributions, which approximate 3 Bernoullis with logits (-2, 2, 0). Samples from these distributions will be in the unit interval (0,1). Because the temperature is very high, samples from these distributions are usually close to the (0.5, 0.5, 0.5) vector. ```python temperature = 100 logits = [-2, 2, 0] dist = RelaxedBernoulli(temperature, logits=logits) ``` Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables. 2016. Eric Jang, Shixiang Gu, and Ben Poole. Categorical Reparameterization with Gumbel-Softmax. 2016. """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, temperature, logits=None, probs=None, validate_args=False, allow_nan_stats=True, name="RelaxedBernoulli"): """Construct RelaxedBernoulli distributions. Args: temperature: An 0-D `Tensor`, representing the temperature of a set of RelaxedBernoulli distributions. The temperature should be positive. logits: An N-D `Tensor` representing the log-odds of a positive event. Each entry in the `Tensor` parametrizes an independent RelaxedBernoulli distribution where the probability of an event is sigmoid(logits). Only one of `logits` or `probs` should be passed in. probs: An N-D `Tensor` representing the probability of a positive event. Each entry in the `Tensor` parameterizes an independent Bernoulli distribution. Only one of `logits` or `probs` should be passed in. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: ValueError: If both `probs` and `logits` are passed, or if neither. """ parameters = dict(locals()) with ops.name_scope(name, values=[logits, probs, temperature]) as name: with ops.control_dependencies([check_ops.assert_positive(temperature)] if validate_args else []): self._temperature = array_ops.identity(temperature, name="temperature") self._logits, self._probs = distribution_util.get_logits_and_probs( logits=logits, probs=probs, validate_args=validate_args) super(RelaxedBernoulli, self).__init__( distribution=logistic.Logistic( self._logits / self._temperature, 1. / self._temperature, validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name + "/Logistic"), bijector=Sigmoid(validate_args=validate_args), validate_args=validate_args, name=name) self._parameters = parameters @staticmethod def _param_shapes(sample_shape): return {"logits": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)} @property def temperature(self): """Distribution parameter for the location.""" return self._temperature @property def logits(self): """Log-odds of `1`.""" return self._logits @property def probs(self): """Probability of `1`.""" return self._probs
apache-2.0
noxora/flask-base
flask/lib/python3.4/site-packages/sqlalchemy/ext/horizontal_shard.py
32
4814
# ext/horizontal_shard.py # Copyright (C) 2005-2017 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Horizontal sharding support. Defines a rudimental 'horizontal sharding' system which allows a Session to distribute queries and persistence operations across multiple databases. For a usage example, see the :ref:`examples_sharding` example included in the source distribution. """ from .. import util from ..orm.session import Session from ..orm.query import Query __all__ = ['ShardedSession', 'ShardedQuery'] class ShardedQuery(Query): def __init__(self, *args, **kwargs): super(ShardedQuery, self).__init__(*args, **kwargs) self.id_chooser = self.session.id_chooser self.query_chooser = self.session.query_chooser self._shard_id = None def set_shard(self, shard_id): """return a new query, limited to a single shard ID. all subsequent operations with the returned query will be against the single shard regardless of other state. """ q = self._clone() q._shard_id = shard_id return q def _execute_and_instances(self, context): def iter_for_shard(shard_id): context.attributes['shard_id'] = shard_id result = self._connection_from_session( mapper=self._mapper_zero(), shard_id=shard_id).execute( context.statement, self._params) return self.instances(result, context) if self._shard_id is not None: return iter_for_shard(self._shard_id) else: partial = [] for shard_id in self.query_chooser(self): partial.extend(iter_for_shard(shard_id)) # if some kind of in memory 'sorting' # were done, this is where it would happen return iter(partial) def get(self, ident, **kwargs): if self._shard_id is not None: return super(ShardedQuery, self).get(ident) else: ident = util.to_list(ident) for shard_id in self.id_chooser(self, ident): o = self.set_shard(shard_id).get(ident, **kwargs) if o is not None: return o else: return None class ShardedSession(Session): def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None, query_cls=ShardedQuery, **kwargs): """Construct a ShardedSession. :param shard_chooser: A callable which, passed a Mapper, a mapped instance, and possibly a SQL clause, returns a shard ID. This id may be based off of the attributes present within the object, or on some round-robin scheme. If the scheme is based on a selection, it should set whatever state on the instance to mark it in the future as participating in that shard. :param id_chooser: A callable, passed a query and a tuple of identity values, which should return a list of shard ids where the ID might reside. The databases will be queried in the order of this listing. :param query_chooser: For a given Query, returns the list of shard_ids where the query should be issued. Results from all shards returned will be combined together into a single listing. :param shards: A dictionary of string shard names to :class:`~sqlalchemy.engine.Engine` objects. """ super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs) self.shard_chooser = shard_chooser self.id_chooser = id_chooser self.query_chooser = query_chooser self.__binds = {} self.connection_callable = self.connection if shards is not None: for k in shards: self.bind_shard(k, shards[k]) def connection(self, mapper=None, instance=None, shard_id=None, **kwargs): if shard_id is None: shard_id = self.shard_chooser(mapper, instance) if self.transaction is not None: return self.transaction.connection(mapper, shard_id=shard_id) else: return self.get_bind( mapper, shard_id=shard_id, instance=instance ).contextual_connect(**kwargs) def get_bind(self, mapper, shard_id=None, instance=None, clause=None, **kw): if shard_id is None: shard_id = self.shard_chooser(mapper, instance, clause=clause) return self.__binds[shard_id] def bind_shard(self, shard_id, bind): self.__binds[shard_id] = bind
mit
andim27/magiccamp
build/lib/django/contrib/gis/measure.py
398
12282
# Copyright (c) 2007, Robert Coup <robert.coup@onetrackmind.co.nz> # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Distance nor the names of its contributors may be used # to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """ Distance and Area objects to allow for sensible and convienient calculation and conversions. Authors: Robert Coup, Justin Bronn Inspired by GeoPy (http://exogen.case.edu/projects/geopy/) and Geoff Biggs' PhD work on dimensioned units for robotics. """ __all__ = ['A', 'Area', 'D', 'Distance'] from decimal import Decimal class MeasureBase(object): def default_units(self, kwargs): """ Return the unit value and the default units specified from the given keyword arguments dictionary. """ val = 0.0 for unit, value in kwargs.iteritems(): if not isinstance(value, float): value = float(value) if unit in self.UNITS: val += self.UNITS[unit] * value default_unit = unit elif unit in self.ALIAS: u = self.ALIAS[unit] val += self.UNITS[u] * value default_unit = u else: lower = unit.lower() if lower in self.UNITS: val += self.UNITS[lower] * value default_unit = lower elif lower in self.LALIAS: u = self.LALIAS[lower] val += self.UNITS[u] * value default_unit = u else: raise AttributeError('Unknown unit type: %s' % unit) return val, default_unit @classmethod def unit_attname(cls, unit_str): """ Retrieves the unit attribute name for the given unit string. For example, if the given unit string is 'metre', 'm' would be returned. An exception is raised if an attribute cannot be found. """ lower = unit_str.lower() if unit_str in cls.UNITS: return unit_str elif lower in cls.UNITS: return lower elif lower in cls.LALIAS: return cls.LALIAS[lower] else: raise Exception('Could not find a unit keyword associated with "%s"' % unit_str) class Distance(MeasureBase): UNITS = { 'chain' : 20.1168, 'chain_benoit' : 20.116782, 'chain_sears' : 20.1167645, 'british_chain_benoit' : 20.1167824944, 'british_chain_sears' : 20.1167651216, 'british_chain_sears_truncated' : 20.116756, 'cm' : 0.01, 'british_ft' : 0.304799471539, 'british_yd' : 0.914398414616, 'clarke_ft' : 0.3047972654, 'clarke_link' : 0.201166195164, 'fathom' : 1.8288, 'ft': 0.3048, 'german_m' : 1.0000135965, 'gold_coast_ft' : 0.304799710181508, 'indian_yd' : 0.914398530744, 'inch' : 0.0254, 'km': 1000.0, 'link' : 0.201168, 'link_benoit' : 0.20116782, 'link_sears' : 0.20116765, 'm': 1.0, 'mi': 1609.344, 'mm' : 0.001, 'nm': 1852.0, 'nm_uk' : 1853.184, 'rod' : 5.0292, 'sears_yd' : 0.91439841, 'survey_ft' : 0.304800609601, 'um' : 0.000001, 'yd': 0.9144, } # Unit aliases for `UNIT` terms encountered in Spatial Reference WKT. ALIAS = { 'centimeter' : 'cm', 'foot' : 'ft', 'inches' : 'inch', 'kilometer' : 'km', 'kilometre' : 'km', 'meter' : 'm', 'metre' : 'm', 'micrometer' : 'um', 'micrometre' : 'um', 'millimeter' : 'mm', 'millimetre' : 'mm', 'mile' : 'mi', 'yard' : 'yd', 'British chain (Benoit 1895 B)' : 'british_chain_benoit', 'British chain (Sears 1922)' : 'british_chain_sears', 'British chain (Sears 1922 truncated)' : 'british_chain_sears_truncated', 'British foot (Sears 1922)' : 'british_ft', 'British foot' : 'british_ft', 'British yard (Sears 1922)' : 'british_yd', 'British yard' : 'british_yd', "Clarke's Foot" : 'clarke_ft', "Clarke's link" : 'clarke_link', 'Chain (Benoit)' : 'chain_benoit', 'Chain (Sears)' : 'chain_sears', 'Foot (International)' : 'ft', 'German legal metre' : 'german_m', 'Gold Coast foot' : 'gold_coast_ft', 'Indian yard' : 'indian_yd', 'Link (Benoit)': 'link_benoit', 'Link (Sears)': 'link_sears', 'Nautical Mile' : 'nm', 'Nautical Mile (UK)' : 'nm_uk', 'US survey foot' : 'survey_ft', 'U.S. Foot' : 'survey_ft', 'Yard (Indian)' : 'indian_yd', 'Yard (Sears)' : 'sears_yd' } LALIAS = dict([(k.lower(), v) for k, v in ALIAS.items()]) def __init__(self, default_unit=None, **kwargs): # The base unit is in meters. self.m, self._default_unit = self.default_units(kwargs) if default_unit and isinstance(default_unit, str): self._default_unit = default_unit def __getattr__(self, name): if name in self.UNITS: return self.m / self.UNITS[name] else: raise AttributeError('Unknown unit type: %s' % name) def __repr__(self): return 'Distance(%s=%s)' % (self._default_unit, getattr(self, self._default_unit)) def __str__(self): return '%s %s' % (getattr(self, self._default_unit), self._default_unit) def __cmp__(self, other): if isinstance(other, Distance): return cmp(self.m, other.m) else: return NotImplemented def __add__(self, other): if isinstance(other, Distance): return Distance(default_unit=self._default_unit, m=(self.m + other.m)) else: raise TypeError('Distance must be added with Distance') def __iadd__(self, other): if isinstance(other, Distance): self.m += other.m return self else: raise TypeError('Distance must be added with Distance') def __sub__(self, other): if isinstance(other, Distance): return Distance(default_unit=self._default_unit, m=(self.m - other.m)) else: raise TypeError('Distance must be subtracted from Distance') def __isub__(self, other): if isinstance(other, Distance): self.m -= other.m return self else: raise TypeError('Distance must be subtracted from Distance') def __mul__(self, other): if isinstance(other, (int, float, long, Decimal)): return Distance(default_unit=self._default_unit, m=(self.m * float(other))) elif isinstance(other, Distance): return Area(default_unit='sq_' + self._default_unit, sq_m=(self.m * other.m)) else: raise TypeError('Distance must be multiplied with number or Distance') def __imul__(self, other): if isinstance(other, (int, float, long, Decimal)): self.m *= float(other) return self else: raise TypeError('Distance must be multiplied with number') def __rmul__(self, other): return self * other def __div__(self, other): if isinstance(other, (int, float, long, Decimal)): return Distance(default_unit=self._default_unit, m=(self.m / float(other))) else: raise TypeError('Distance must be divided with number') def __idiv__(self, other): if isinstance(other, (int, float, long, Decimal)): self.m /= float(other) return self else: raise TypeError('Distance must be divided with number') def __nonzero__(self): return bool(self.m) class Area(MeasureBase): # Getting the square units values and the alias dictionary. UNITS = dict([('sq_%s' % k, v ** 2) for k, v in Distance.UNITS.items()]) ALIAS = dict([(k, 'sq_%s' % v) for k, v in Distance.ALIAS.items()]) LALIAS = dict([(k.lower(), v) for k, v in ALIAS.items()]) def __init__(self, default_unit=None, **kwargs): self.sq_m, self._default_unit = self.default_units(kwargs) if default_unit and isinstance(default_unit, str): self._default_unit = default_unit def __getattr__(self, name): if name in self.UNITS: return self.sq_m / self.UNITS[name] else: raise AttributeError('Unknown unit type: ' + name) def __repr__(self): return 'Area(%s=%s)' % (self._default_unit, getattr(self, self._default_unit)) def __str__(self): return '%s %s' % (getattr(self, self._default_unit), self._default_unit) def __cmp__(self, other): if isinstance(other, Area): return cmp(self.sq_m, other.sq_m) else: return NotImplemented def __add__(self, other): if isinstance(other, Area): return Area(default_unit=self._default_unit, sq_m=(self.sq_m + other.sq_m)) else: raise TypeError('Area must be added with Area') def __iadd__(self, other): if isinstance(other, Area): self.sq_m += other.sq_m return self else: raise TypeError('Area must be added with Area') def __sub__(self, other): if isinstance(other, Area): return Area(default_unit=self._default_unit, sq_m=(self.sq_m - other.sq_m)) else: raise TypeError('Area must be subtracted from Area') def __isub__(self, other): if isinstance(other, Area): self.sq_m -= other.sq_m return self else: raise TypeError('Area must be subtracted from Area') def __mul__(self, other): if isinstance(other, (int, float, long, Decimal)): return Area(default_unit=self._default_unit, sq_m=(self.sq_m * float(other))) else: raise TypeError('Area must be multiplied with number') def __imul__(self, other): if isinstance(other, (int, float, long, Decimal)): self.sq_m *= float(other) return self else: raise TypeError('Area must be multiplied with number') def __rmul__(self, other): return self * other def __div__(self, other): if isinstance(other, (int, float, long, Decimal)): return Area(default_unit=self._default_unit, sq_m=(self.sq_m / float(other))) else: raise TypeError('Area must be divided with number') def __idiv__(self, other): if isinstance(other, (int, float, long, Decimal)): self.sq_m /= float(other) return self else: raise TypeError('Area must be divided with number') def __nonzero__(self): return bool(self.sq_m) # Shortcuts D = Distance A = Area
bsd-3-clause
turbomanage/training-data-analyst
courses/machine_learning/deepdive2/structured/labs/serving/application/lib/pyasn1_modules/rfc5940.py
13
1613
# # This file is part of pyasn1-modules software. # # Created by Russ Housley with assistance from asn1ate v.0.6.0. # Modified by Russ Housley to add map for use with opentypes. # # Copyright (c) 2019, Vigil Security, LLC # License: http://snmplabs.com/pyasn1/license.html # # Additional CMS Revocation Information Choices # # ASN.1 source from: # https://www.rfc-editor.org/rfc/rfc5940.txt # from pyasn1.type import namedtype from pyasn1.type import tag from pyasn1.type import univ from pyasn1_modules import rfc2560 from pyasn1_modules import rfc5652 # RevocationInfoChoice for OCSP response: # The OID is included in otherRevInfoFormat, and # signed OCSPResponse is included in otherRevInfo id_ri_ocsp_response = univ.ObjectIdentifier('1.3.6.1.5.5.7.16.2') OCSPResponse = rfc2560.OCSPResponse # RevocationInfoChoice for SCVP request/response: # The OID is included in otherRevInfoFormat, and # SCVPReqRes is included in otherRevInfo id_ri_scvp = univ.ObjectIdentifier('1.3.6.1.5.5.7.16.4') ContentInfo = rfc5652.ContentInfo class SCVPReqRes(univ.Sequence): pass SCVPReqRes.componentType = namedtype.NamedTypes( namedtype.OptionalNamedType('request', ContentInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), namedtype.NamedType('response', ContentInfo()) ) # Map of Revocation Info Format OIDs to Revocation Info Format # is added to the ones that are in rfc5652.py _otherRevInfoFormatMapUpdate = { id_ri_ocsp_response: OCSPResponse(), id_ri_scvp: SCVPReqRes(), } rfc5652.otherRevInfoFormatMap.update(_otherRevInfoFormatMapUpdate)
apache-2.0
eestay/edx-platform
lms/djangoapps/student_account/test/test_views.py
7
20001
# -*- coding: utf-8 -*- """ Tests for student account views. """ import re from unittest import skipUnless from urllib import urlencode import json import mock import ddt import markupsafe from django.test import TestCase from django.conf import settings from django.core.urlresolvers import reverse from django.core import mail from django.test.utils import override_settings from util.testing import UrlResetMixin from third_party_auth.tests.testutil import simulate_running_pipeline from embargo.test_utils import restrict_course from openedx.core.djangoapps.user_api.accounts.api import activate_account, create_account from openedx.core.djangoapps.user_api.accounts import EMAIL_MAX_LENGTH from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory from student.tests.factories import CourseModeFactory @ddt.ddt class StudentAccountUpdateTest(UrlResetMixin, TestCase): """ Tests for the student account views that update the user's account information. """ USERNAME = u"heisenberg" ALTERNATE_USERNAME = u"walt" OLD_PASSWORD = u"ḅḷüëṡḳÿ" NEW_PASSWORD = u"🄱🄸🄶🄱🄻🅄🄴" OLD_EMAIL = u"walter@graymattertech.com" NEW_EMAIL = u"walt@savewalterwhite.com" INVALID_ATTEMPTS = 100 INVALID_EMAILS = [ None, u"", u"a", "no_domain", "no+domain", "@", "@domain.com", "test@no_extension", # Long email -- subtract the length of the @domain # except for one character (so we exceed the max length limit) u"{user}@example.com".format( user=(u'e' * (EMAIL_MAX_LENGTH - 11)) ) ] INVALID_KEY = u"123abc" def setUp(self): super(StudentAccountUpdateTest, self).setUp("student_account.urls") # Create/activate a new account activation_key = create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL) activate_account(activation_key) # Login result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD) self.assertTrue(result) @skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS') def test_password_change(self): # Request a password change while logged in, simulating # use of the password reset link from the account page response = self._change_password() self.assertEqual(response.status_code, 200) # Check that an email was sent self.assertEqual(len(mail.outbox), 1) # Retrieve the activation link from the email body email_body = mail.outbox[0].body result = re.search('(?P<url>https?://[^\s]+)', email_body) self.assertIsNot(result, None) activation_link = result.group('url') # Visit the activation link response = self.client.get(activation_link) self.assertEqual(response.status_code, 200) # Submit a new password and follow the redirect to the success page response = self.client.post( activation_link, # These keys are from the form on the current password reset confirmation page. {'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD}, follow=True ) self.assertEqual(response.status_code, 200) self.assertContains(response, "Your password has been set.") # Log the user out to clear session data self.client.logout() # Verify that the new password can be used to log in result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD) self.assertTrue(result) # Try reusing the activation link to change the password again response = self.client.post( activation_link, {'new_password1': self.OLD_PASSWORD, 'new_password2': self.OLD_PASSWORD}, follow=True ) self.assertEqual(response.status_code, 200) self.assertContains(response, "The password reset link was invalid, possibly because the link has already been used.") self.client.logout() # Verify that the old password cannot be used to log in result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD) self.assertFalse(result) # Verify that the new password continues to be valid result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD) self.assertTrue(result) @ddt.data(True, False) def test_password_change_logged_out(self, send_email): # Log the user out self.client.logout() # Request a password change while logged out, simulating # use of the password reset link from the login page if send_email: response = self._change_password(email=self.OLD_EMAIL) self.assertEqual(response.status_code, 200) else: # Don't send an email in the POST data, simulating # its (potentially accidental) omission in the POST # data sent from the login page response = self._change_password() self.assertEqual(response.status_code, 400) def test_password_change_inactive_user(self): # Log out the user created during test setup self.client.logout() # Create a second user, but do not activate it create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL) # Send the view the email address tied to the inactive user response = self._change_password(email=self.NEW_EMAIL) # Expect that the activation email is still sent, # since the user may have lost the original activation email. self.assertEqual(response.status_code, 200) self.assertEqual(len(mail.outbox), 1) def test_password_change_no_user(self): # Log out the user created during test setup self.client.logout() # Send the view an email address not tied to any user response = self._change_password(email=self.NEW_EMAIL) self.assertEqual(response.status_code, 400) def test_password_change_rate_limited(self): # Log out the user created during test setup, to prevent the view from # selecting the logged-in user's email address over the email provided # in the POST data self.client.logout() # Make many consecutive bad requests in an attempt to trigger the rate limiter for attempt in xrange(self.INVALID_ATTEMPTS): self._change_password(email=self.NEW_EMAIL) response = self._change_password(email=self.NEW_EMAIL) self.assertEqual(response.status_code, 403) @ddt.data( ('post', 'password_change_request', []), ) @ddt.unpack def test_require_http_method(self, correct_method, url_name, args): wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method} url = reverse(url_name, args=args) for method in wrong_methods: response = getattr(self.client, method)(url) self.assertEqual(response.status_code, 405) def _change_password(self, email=None): """Request to change the user's password. """ data = {} if email: data['email'] = email return self.client.post(path=reverse('password_change_request'), data=data) @ddt.ddt class StudentAccountLoginAndRegistrationTest(UrlResetMixin, ModuleStoreTestCase): """ Tests for the student account views that update the user's account information. """ USERNAME = "bob" EMAIL = "bob@example.com" PASSWORD = "password" @mock.patch.dict(settings.FEATURES, {'EMBARGO': True}) def setUp(self): super(StudentAccountLoginAndRegistrationTest, self).setUp('embargo') @ddt.data( ("account_login", "login"), ("account_register", "register"), ) @ddt.unpack def test_login_and_registration_form(self, url_name, initial_mode): response = self.client.get(reverse(url_name)) expected_data = u"data-initial-mode=\"{mode}\"".format(mode=initial_mode) self.assertContains(response, expected_data) @ddt.data("account_login", "account_register") def test_login_and_registration_form_already_authenticated(self, url_name): # Create/activate a new account and log in activation_key = create_account(self.USERNAME, self.PASSWORD, self.EMAIL) activate_account(activation_key) result = self.client.login(username=self.USERNAME, password=self.PASSWORD) self.assertTrue(result) # Verify that we're redirected to the dashboard response = self.client.get(reverse(url_name)) self.assertRedirects(response, reverse("dashboard")) @ddt.data( (False, "account_login"), (False, "account_login"), (True, "account_login"), (True, "account_register"), ) @ddt.unpack def test_login_and_registration_form_signin_preserves_params(self, is_edx_domain, url_name): params = { 'enrollment_action': 'enroll', 'course_id': 'edX/DemoX/Demo_Course' } # The response should have a "Sign In" button with the URL # that preserves the querystring params with mock.patch.dict(settings.FEATURES, {'IS_EDX_DOMAIN': is_edx_domain}): response = self.client.get(reverse(url_name), params) self.assertContains(response, "login?course_id=edX%2FDemoX%2FDemo_Course&enrollment_action=enroll") # Add an additional "course mode" parameter params['course_mode'] = 'honor' # Verify that this parameter is also preserved with mock.patch.dict(settings.FEATURES, {'IS_EDX_DOMAIN': is_edx_domain}): response = self.client.get(reverse(url_name), params) expected_url = ( "login?course_id=edX%2FDemoX%2FDemo_Course" "&enrollment_action=enroll" "&course_mode=honor" ) self.assertContains(response, expected_url) @mock.patch.dict(settings.FEATURES, {"ENABLE_THIRD_PARTY_AUTH": False}) @ddt.data("account_login", "account_register") def test_third_party_auth_disabled(self, url_name): response = self.client.get(reverse(url_name)) self._assert_third_party_auth_data(response, None, []) @ddt.data( ("account_login", None, None), ("account_register", None, None), ("account_login", "google-oauth2", "Google"), ("account_register", "google-oauth2", "Google"), ("account_login", "facebook", "Facebook"), ("account_register", "facebook", "Facebook"), ) @ddt.unpack def test_third_party_auth(self, url_name, current_backend, current_provider): # Simulate a running pipeline if current_backend is not None: pipeline_target = "student_account.views.third_party_auth.pipeline" with simulate_running_pipeline(pipeline_target, current_backend): response = self.client.get(reverse(url_name)) # Do NOT simulate a running pipeline else: response = self.client.get(reverse(url_name)) # This relies on the THIRD_PARTY_AUTH configuration in the test settings expected_providers = [ { "name": "Facebook", "iconClass": "fa-facebook", "loginUrl": self._third_party_login_url("facebook", "login"), "registerUrl": self._third_party_login_url("facebook", "register") }, { "name": "Google", "iconClass": "fa-google-plus", "loginUrl": self._third_party_login_url("google-oauth2", "login"), "registerUrl": self._third_party_login_url("google-oauth2", "register") } ] self._assert_third_party_auth_data(response, current_provider, expected_providers) @ddt.data([], ["honor"], ["honor", "verified", "audit"], ["professional"], ["no-id-professional"]) def test_third_party_auth_course_id_verified(self, modes): # Create a course with the specified course modes course = CourseFactory.create() for slug in modes: CourseModeFactory.create( course_id=course.id, mode_slug=slug, mode_display_name=slug ) # Verify that the entry URL for third party auth # contains the course ID and redirects to the track selection page. course_modes_choose_url = reverse( "course_modes_choose", kwargs={"course_id": unicode(course.id)} ) expected_providers = [ { "name": "Facebook", "iconClass": "fa-facebook", "loginUrl": self._third_party_login_url( "facebook", "login", course_id=unicode(course.id), redirect_url=course_modes_choose_url ), "registerUrl": self._third_party_login_url( "facebook", "register", course_id=unicode(course.id), redirect_url=course_modes_choose_url ) }, { "name": "Google", "iconClass": "fa-google-plus", "loginUrl": self._third_party_login_url( "google-oauth2", "login", course_id=unicode(course.id), redirect_url=course_modes_choose_url ), "registerUrl": self._third_party_login_url( "google-oauth2", "register", course_id=unicode(course.id), redirect_url=course_modes_choose_url ) } ] # Verify that the login page contains the correct provider URLs response = self.client.get(reverse("account_login"), {"course_id": unicode(course.id)}) self._assert_third_party_auth_data(response, None, expected_providers) def test_third_party_auth_course_id_shopping_cart(self): # Create a course with a white-label course mode course = CourseFactory.create() CourseModeFactory.create( course_id=course.id, mode_slug="honor", mode_display_name="Honor", min_price=100 ) # Verify that the entry URL for third party auth # contains the course ID and redirects to the shopping cart shoppingcart_url = reverse("shoppingcart.views.show_cart") expected_providers = [ { "name": "Facebook", "iconClass": "fa-facebook", "loginUrl": self._third_party_login_url( "facebook", "login", course_id=unicode(course.id), redirect_url=shoppingcart_url ), "registerUrl": self._third_party_login_url( "facebook", "register", course_id=unicode(course.id), redirect_url=shoppingcart_url ) }, { "name": "Google", "iconClass": "fa-google-plus", "loginUrl": self._third_party_login_url( "google-oauth2", "login", course_id=unicode(course.id), redirect_url=shoppingcart_url ), "registerUrl": self._third_party_login_url( "google-oauth2", "register", course_id=unicode(course.id), redirect_url=shoppingcart_url ) } ] # Verify that the login page contains the correct provider URLs response = self.client.get(reverse("account_login"), {"course_id": unicode(course.id)}) self._assert_third_party_auth_data(response, None, expected_providers) @mock.patch.dict(settings.FEATURES, {'EMBARGO': True}) def test_third_party_auth_enrollment_embargo(self): course = CourseFactory.create() # Start the pipeline attempting to enroll in a restricted course with restrict_course(course.id) as redirect_url: response = self.client.get(reverse("account_login"), {"course_id": unicode(course.id)}) # Expect that the course ID has been removed from the # login URLs (so the user won't be enrolled) and # the ?next param sends users to the blocked message. expected_providers = [ { "name": "Facebook", "iconClass": "fa-facebook", "loginUrl": self._third_party_login_url( "facebook", "login", course_id=unicode(course.id), redirect_url=redirect_url ), "registerUrl": self._third_party_login_url( "facebook", "register", course_id=unicode(course.id), redirect_url=redirect_url ) }, { "name": "Google", "iconClass": "fa-google-plus", "loginUrl": self._third_party_login_url( "google-oauth2", "login", course_id=unicode(course.id), redirect_url=redirect_url ), "registerUrl": self._third_party_login_url( "google-oauth2", "register", course_id=unicode(course.id), redirect_url=redirect_url ) } ] self._assert_third_party_auth_data(response, None, expected_providers) @override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME) def test_microsite_uses_old_login_page(self): # Retrieve the login page from a microsite domain # and verify that we're served the old page. resp = self.client.get( reverse("account_login"), HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME ) self.assertContains(resp, "Log into your Test Microsite Account") self.assertContains(resp, "login-form") def test_microsite_uses_old_register_page(self): # Retrieve the register page from a microsite domain # and verify that we're served the old page. resp = self.client.get( reverse("account_register"), HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME ) self.assertContains(resp, "Register for Test Microsite") self.assertContains(resp, "register-form") def _assert_third_party_auth_data(self, response, current_provider, providers): """Verify that third party auth info is rendered correctly in a DOM data attribute. """ auth_info = markupsafe.escape( json.dumps({ "currentProvider": current_provider, "providers": providers }) ) expected_data = u"data-third-party-auth='{auth_info}'".format( auth_info=auth_info ) self.assertContains(response, expected_data) def _third_party_login_url(self, backend_name, auth_entry, course_id=None, redirect_url=None): """Construct the login URL to start third party authentication. """ params = [("auth_entry", auth_entry)] if redirect_url: params.append(("next", redirect_url)) if course_id: params.append(("enroll_course_id", course_id)) return u"{url}?{params}".format( url=reverse("social:begin", kwargs={"backend": backend_name}), params=urlencode(params) )
agpl-3.0
felixfontein/ansible
lib/ansible/plugins/action/package.py
11
4158
# (c) 2015, Ansible Inc, # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.errors import AnsibleAction, AnsibleActionFail from ansible.executor.module_common import get_action_args_with_defaults from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS from ansible.plugins.action import ActionBase from ansible.utils.display import Display display = Display() class ActionModule(ActionBase): TRANSFERS_FILES = False BUILTIN_PKG_MGR_MODULES = set([manager['name'] for manager in PKG_MGRS]) def run(self, tmp=None, task_vars=None): ''' handler for package operations ''' self._supports_check_mode = True self._supports_async = True result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect module = self._task.args.get('use', 'auto') if module == 'auto': try: if self._task.delegate_to: # if we delegate, we should use delegated host's facts module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to) else: module = self._templar.template('{{ansible_facts.pkg_mgr}}') except Exception: pass # could not get it from template! try: if module == 'auto': facts = self._execute_module( module_name='ansible.legacy.setup', module_args=dict(filter='ansible_pkg_mgr', gather_subset='!all'), task_vars=task_vars) display.debug("Facts %s" % facts) module = facts.get('ansible_facts', {}).get('ansible_pkg_mgr', 'auto') if module != 'auto': if not self._shared_loader_obj.module_loader.has_plugin(module): raise AnsibleActionFail('Could not find a module for %s.' % module) else: # run the 'package' module new_module_args = self._task.args.copy() if 'use' in new_module_args: del new_module_args['use'] # get defaults for specific module context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections) new_module_args = get_action_args_with_defaults( module, new_module_args, self._task.module_defaults, self._templar, context.redirect_list ) if module in self.BUILTIN_PKG_MGR_MODULES: # prefix with ansible.legacy to eliminate external collisions while still allowing library/ override module = 'ansible.legacy.' + module display.vvvv("Running %s" % module) result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val)) else: raise AnsibleActionFail('Could not detect which package manager to use. Try gathering facts or setting the "use" option.') except AnsibleAction as e: result.update(e.result) finally: if not self._task.async_val: # remove a temporary path we created self._remove_tmp_path(self._connection._shell.tmpdir) return result
gpl-3.0
schocco/mds-web
mds_website/settings.py
1
8687
# -*- coding: utf-8 -*- # Django settings for mds_website project. import os DEBUG = True TEMPLATE_DEBUG = DEBUG PROJECT_DIR = os.path.dirname(__file__) ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.contrib.gis.db.backends.postgis', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'mdsdb', # Or path to database file if using sqlite3. # The following settings are not used with sqlite3: 'USER': 'postgres', 'PASSWORD': '', 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. 'PORT': '', # Set to empty string for default. } } TEST_RUNNER = 'django.test.runner.DiscoverRunner' # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True #dont force trailing backslash #APPEND_SLASH = False #TASTYPIE_ALLOW_MISSING_SLASH = APPEND_SLASH TASTYPIE_DEFAULT_FORMATS = ['json'] # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = os.path.abspath(os.path.join(PROJECT_DIR, "media")) # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = os.path.abspath(os.path.join(PROJECT_DIR, "../sitestatic")) # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.abspath(os.path.join(PROJECT_DIR, "../mds-web-client/dist")), ) STATICFILES_STORAGE = 'webpack.storage.WebpackHashStorage' WEBPACK_ASSETS_FILE = os.path.abspath(os.path.join(PROJECT_DIR, "../mds-web-client/webpack-assets.json")) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'use your own secret key.' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.core.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'social.apps.django_app.context_processors.backends', 'social.apps.django_app.context_processors.login_redirect', ) # google api console: https://console.developers.google.com/project/api-access-tests/apiui/credential?authuser=0 AUTHENTICATION_BACKENDS = ( 'social.backends.facebook.FacebookOAuth2', 'social.backends.google.GoogleOAuth2', 'social.backends.twitter.TwitterOAuth', 'social.backends.vk.VKOAuth2', 'django.contrib.auth.backends.ModelBackend', ) #SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['https://www.googleapis.com/auth/profile', 'https://www.googleapis.com/auth/email'] LOGIN_REDIRECT_URL = '/' MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'apps.mds_auth.middleware.SocialAuthExceptionHandlerMiddleware' # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) SOCIAL_AUTH_PIPELINE = ( 'social.pipeline.social_auth.social_details', 'social.pipeline.social_auth.social_uid', 'social.pipeline.social_auth.auth_allowed', 'social.pipeline.social_auth.social_user', 'social.pipeline.user.get_username', 'social.pipeline.user.create_user', 'apps.mds_auth.auth_pipeline.save_profile', # get profile data from oauth resource 'social.pipeline.social_auth.associate_user', 'social.pipeline.social_auth.load_extra_data', 'social.pipeline.user.user_details', 'apps.mds_auth.auth_pipeline.device_redirect', # change ?next parameter to provide access token for mobile apps ) ROOT_URLCONF = 'mds_website.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'mds_website.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(PROJECT_DIR, 'templates'), ) ############# CELERY SETTINGS ## Using the database to store task state and results. CELERY_RESULT_BACKEND = 'amqp' BROKER_HOST = "localhost" #BROKER_URL = 'amqp://guest:guest@localhost:5672/celeryvhost' CELERY_TIMEZONE = TIME_ZONE INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', 'django.contrib.admindocs', 'django.contrib.gis', 'social.apps.django_app.default', 'tastypie', 'apps.muni_scales', 'apps.trails', 'apps.mds_auth', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'console':{ 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple' }, }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'custom': { 'handlers': ['console', 'mail_admins'], 'level': 'DEBUG', } } } # import local settings file if one exists # apparantly using system environments is the better solution try: from settings_local import * except Exception, e: print("Could not find a local settings file.")
mit
moijes12/oh-mainline
vendor/packages/twisted/twisted/internet/_posixserialport.py
18
1823
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Serial Port Protocol """ # system imports import os, errno # dependent on pyserial ( http://pyserial.sf.net/ ) # only tested w/ 1.18 (5 Dec 2002) import serial from serial import PARITY_NONE, PARITY_EVEN, PARITY_ODD from serial import STOPBITS_ONE, STOPBITS_TWO from serial import FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS from serialport import BaseSerialPort # twisted imports from twisted.internet import abstract, fdesc, main class SerialPort(BaseSerialPort, abstract.FileDescriptor): """ A select()able serial device, acting as a transport. """ connected = 1 def __init__(self, protocol, deviceNameOrPortNumber, reactor, baudrate = 9600, bytesize = EIGHTBITS, parity = PARITY_NONE, stopbits = STOPBITS_ONE, timeout = 0, xonxoff = 0, rtscts = 0): abstract.FileDescriptor.__init__(self, reactor) self._serial = serial.Serial(deviceNameOrPortNumber, baudrate = baudrate, bytesize = bytesize, parity = parity, stopbits = stopbits, timeout = timeout, xonxoff = xonxoff, rtscts = rtscts) self.reactor = reactor self.flushInput() self.flushOutput() self.protocol = protocol self.protocol.makeConnection(self) self.startReading() def fileno(self): return self._serial.fd def writeSomeData(self, data): """ Write some data to the serial device. """ return fdesc.writeToFD(self.fileno(), data) def doRead(self): """ Some data's readable from serial device. """ return fdesc.readFromFD(self.fileno(), self.protocol.dataReceived) def connectionLost(self, reason): abstract.FileDescriptor.connectionLost(self, reason) self._serial.close()
agpl-3.0
Big-B702/python-for-android
python-modules/twisted/twisted/test/test_text.py
49
5450
# Copyright (c) 2001-2010 Twisted Matrix Laboratories. # See LICENSE for details. from twisted.trial import unittest from twisted.python import text import string from cStringIO import StringIO sampleText = \ """Every attempt to employ mathematical methods in the study of chemical questions must be considered profoundly irrational and contrary to the spirit of chemistry ... If mathematical analysis should ever hold a prominent place in chemistry - an aberration which is happily almost impossible - it would occasion a rapid and widespread degeneration of that science. -- Auguste Comte, Philosophie Positive, Paris, 1838 """ lineWidth = 72 def set_lineWidth(n): global lineWidth lineWidth = n class WrapTest(unittest.TestCase): def setUp(self): self.sampleSplitText = string.split(sampleText) self.output = text.wordWrap(sampleText, lineWidth) def test_wordCount(self): """Compare the number of words.""" words = [] for line in self.output: words.extend(string.split(line)) wordCount = len(words) sampleTextWordCount = len(self.sampleSplitText) self.failUnlessEqual(wordCount, sampleTextWordCount) def test_wordMatch(self): """Compare the lists of words.""" words = [] for line in self.output: words.extend(string.split(line)) # Using failUnlessEqual here prints out some # rather too long lists. self.failUnless(self.sampleSplitText == words) def test_lineLength(self): """Check the length of the lines.""" failures = [] for line in self.output: if not len(line) <= lineWidth: failures.append(len(line)) if failures: self.fail("%d of %d lines were too long.\n" "%d < %s" % (len(failures), len(self.output), lineWidth, failures)) class SplitTest(unittest.TestCase): """Tests for text.splitQuoted()""" def test_oneWord(self): """Splitting strings with one-word phrases.""" s = 'This code "works."' r = text.splitQuoted(s) self.failUnlessEqual(['This', 'code', 'works.'], r) def test_multiWord(self): s = 'The "hairy monkey" likes pie.' r = text.splitQuoted(s) self.failUnlessEqual(['The', 'hairy monkey', 'likes', 'pie.'], r) # Some of the many tests that would fail: #def test_preserveWhitespace(self): # phrase = '"MANY SPACES"' # s = 'With %s between.' % (phrase,) # r = text.splitQuoted(s) # self.failUnlessEqual(['With', phrase, 'between.'], r) #def test_escapedSpace(self): # s = r"One\ Phrase" # r = text.splitQuoted(s) # self.failUnlessEqual(["One Phrase"], r) class StrFileTest(unittest.TestCase): def setUp(self): self.io = StringIO("this is a test string") def tearDown(self): pass def test_1_f(self): self.assertEquals(False, text.strFile("x", self.io)) def test_1_1(self): self.assertEquals(True, text.strFile("t", self.io)) def test_1_2(self): self.assertEquals(True, text.strFile("h", self.io)) def test_1_3(self): self.assertEquals(True, text.strFile("i", self.io)) def test_1_4(self): self.assertEquals(True, text.strFile("s", self.io)) def test_1_5(self): self.assertEquals(True, text.strFile("n", self.io)) def test_1_6(self): self.assertEquals(True, text.strFile("g", self.io)) def test_3_1(self): self.assertEquals(True, text.strFile("thi", self.io)) def test_3_2(self): self.assertEquals(True, text.strFile("his", self.io)) def test_3_3(self): self.assertEquals(True, text.strFile("is ", self.io)) def test_3_4(self): self.assertEquals(True, text.strFile("ing", self.io)) def test_3_f(self): self.assertEquals(False, text.strFile("bla", self.io)) def test_large_1(self): self.assertEquals(True, text.strFile("this is a test", self.io)) def test_large_2(self): self.assertEquals(True, text.strFile("is a test string", self.io)) def test_large_f(self): self.assertEquals(False, text.strFile("ds jhfsa k fdas", self.io)) def test_overlarge_f(self): self.assertEquals(False, text.strFile("djhsakj dhsa fkhsa s,mdbnfsauiw bndasdf hreew", self.io)) def test_self(self): self.assertEquals(True, text.strFile("this is a test string", self.io)) def test_insensitive(self): self.assertEquals(True, text.strFile("ThIs is A test STRING", self.io, False)) class DeprecationTest(unittest.TestCase): """ Tests for deprecations in L{twisted.python.text} """ def test_docstringLStrip(self): """ L{docstringLStrip} is deprecated as of 10.2.0 """ text.docstringLStrip("") warningsShown = self.flushWarnings([self.test_docstringLStrip]) self.assertEquals(1, len(warningsShown)) self.assertIdentical(warningsShown[0]['category'], DeprecationWarning) self.assertEquals(warningsShown[0]['message'], "twisted.python.text.docstringLStrip was " "deprecated in Twisted 10.2.0: Please use " "inspect.getdoc instead.") testCases = [WrapTest, SplitTest, StrFileTest]
apache-2.0
HybridF5/tempest_debug
tempest/stress/stressaction.py
6
3298
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import signal import sys import six from oslo_log import log as logging @six.add_metaclass(abc.ABCMeta) class StressAction(object): def __init__(self, manager, max_runs=None, stop_on_error=False): full_cname = self.__module__ + "." + self.__class__.__name__ self.logger = logging.getLogger(full_cname) self.manager = manager self.max_runs = max_runs self.stop_on_error = stop_on_error def _shutdown_handler(self, signal, frame): try: self.tearDown() except Exception: self.logger.exception("Error while tearDown") sys.exit(0) @property def action(self): """This methods returns the action. Overload this if you create a stress test wrapper. """ return self.__class__.__name__ def setUp(self, **kwargs): """Initialize test structures/resources This method is called before "run" method to help the test initialize any structures. kwargs contains arguments passed in from the configuration json file. setUp doesn't count against the time duration. """ self.logger.debug("setUp") def tearDown(self): """Cleanup test structures/resources This method is called to do any cleanup after the test is complete. """ self.logger.debug("tearDown") def execute(self, shared_statistic): """This is the main execution entry point called by the driver. We register a signal handler to allow us to tearDown gracefully, and then exit. We also keep track of how many runs we do. """ signal.signal(signal.SIGHUP, self._shutdown_handler) signal.signal(signal.SIGTERM, self._shutdown_handler) while self.max_runs is None or (shared_statistic['runs'] < self.max_runs): self.logger.debug("Trigger new run (run %d)" % shared_statistic['runs']) try: self.run() except Exception: shared_statistic['fails'] += 1 self.logger.exception("Failure in run") finally: shared_statistic['runs'] += 1 if self.stop_on_error and (shared_statistic['fails'] > 1): self.logger.warning("Stop process due to" "\"stop-on-error\" argument") self.tearDown() sys.exit(1) @abc.abstractmethod def run(self): """This method is where the stress test code runs.""" return
apache-2.0
pquerna/cloud-init-debian-pkg-dead
tests/unittests/test_cs_util.py
7
2050
from mocker import MockerTestCase from cloudinit.cs_utils import Cepko SERVER_CONTEXT = { "cpu": 1000, "cpus_instead_of_cores": False, "global_context": {"some_global_key": "some_global_val"}, "mem": 1073741824, "meta": {"ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe"}, "name": "test_server", "requirements": [], "smp": 1, "tags": ["much server", "very performance"], "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e889", "vnc_password": "9e84d6cb49e46379" } class CepkoMock(Cepko): def all(self): return SERVER_CONTEXT def get(self, key="", request_pattern=None): return SERVER_CONTEXT['tags'] class CepkoResultTests(MockerTestCase): def setUp(self): self.mocked = self.mocker.replace("cloudinit.cs_utils.Cepko", spec=CepkoMock, count=False, passthrough=False) self.mocked() self.mocker.result(CepkoMock()) self.mocker.replay() self.c = Cepko() def test_getitem(self): result = self.c.all() self.assertEqual("65b2fb23-8c03-4187-a3ba-8b7c919e889", result['uuid']) self.assertEqual([], result['requirements']) self.assertEqual("much server", result['tags'][0]) self.assertEqual(1, result['smp']) def test_len(self): self.assertEqual(len(SERVER_CONTEXT), len(self.c.all())) def test_contains(self): result = self.c.all() self.assertTrue('uuid' in result) self.assertFalse('uid' in result) self.assertTrue('meta' in result) self.assertFalse('ssh_public_key' in result) def test_iter(self): self.assertEqual(sorted(SERVER_CONTEXT.keys()), sorted([key for key in self.c.all()])) def test_with_list_as_result(self): result = self.c.get('tags') self.assertEqual('much server', result[0]) self.assertTrue('very performance' in result) self.assertEqual(2, len(result))
gpl-3.0
froyobin/horizon
openstack_dashboard/dashboards/project/data_processing/jobs/workflows/launch.py
14
16820
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import workflows from openstack_dashboard.api import sahara as saharaclient import openstack_dashboard.dashboards.project.data_processing. \ cluster_templates.workflows.create as t_flows import openstack_dashboard.dashboards.project.data_processing. \ clusters.workflows.create as c_flow import openstack_dashboard.dashboards.project.data_processing. \ utils.workflow_helpers as whelpers LOG = logging.getLogger(__name__) DATA_SOURCE_CREATE_URL = ("horizon:project:data_processing.data_sources" ":create-data-source") class JobExecutionGeneralConfigAction(workflows.Action): job_input = forms.DynamicChoiceField( label=_("Input"), initial=(None, "None"), add_item_link=DATA_SOURCE_CREATE_URL) job_output = forms.DynamicChoiceField( label=_("Output"), initial=(None, "None"), add_item_link=DATA_SOURCE_CREATE_URL) def __init__(self, request, *args, **kwargs): super(JobExecutionGeneralConfigAction, self).__init__(request, *args, **kwargs) if request.REQUEST.get("job_id", None) is None: self.fields["job"] = forms.ChoiceField( label=_("Job")) self.fields["job"].choices = self.populate_job_choices(request) else: self.fields["job"] = forms.CharField( widget=forms.HiddenInput(), initial=request.REQUEST.get("job_id", None)) def populate_job_input_choices(self, request, context): return self.get_data_source_choices(request, context) def populate_job_output_choices(self, request, context): return self.get_data_source_choices(request, context) def get_data_source_choices(self, request, context): try: data_sources = saharaclient.data_source_list(request) except Exception: data_sources = [] exceptions.handle(request, _("Unable to fetch data sources.")) choices = [(data_source.id, data_source.name) for data_source in data_sources] choices.insert(0, (None, 'None')) return choices def populate_job_choices(self, request): try: jobs = saharaclient.job_list(request) except Exception: jobs = [] exceptions.handle(request, _("Unable to fetch jobs.")) choices = [(job.id, job.name) for job in jobs] return choices class Meta: name = _("Job") help_text_template = ( "project/data_processing.jobs/_launch_job_help.html") class JobExecutionExistingGeneralConfigAction(JobExecutionGeneralConfigAction): cluster = forms.ChoiceField( label=_("Cluster"), initial=(None, "None"), widget=forms.Select(attrs={"class": "cluster_choice"})) def populate_cluster_choices(self, request, context): try: clusters = saharaclient.cluster_list(request) except Exception: clusters = [] exceptions.handle(request, _("Unable to fetch clusters.")) choices = [(cluster.id, cluster.name) for cluster in clusters] return choices class Meta: name = _("Job") help_text_template = ( "project/data_processing.jobs/_launch_job_help.html") class JobConfigAction(workflows.Action): MAIN_CLASS = "edp.java.main_class" JAVA_OPTS = "edp.java.java_opts" EDP_MAPPER = "edp.streaming.mapper" EDP_REDUCER = "edp.streaming.reducer" EDP_PREFIX = "edp." property_name = forms.ChoiceField( required=False, ) job_configs = forms.CharField( required=False, widget=forms.HiddenInput()) job_params = forms.CharField( required=False, widget=forms.HiddenInput()) job_args_array = forms.CharField( required=False, widget=forms.HiddenInput()) job_type = forms.CharField( required=False, widget=forms.HiddenInput()) main_class = forms.CharField(label=_("Main Class"), required=False) java_opts = forms.CharField(label=_("Java Opts"), required=False) streaming_mapper = forms.CharField(label=_("Mapper")) streaming_reducer = forms.CharField(label=_("Reducer")) def __init__(self, request, *args, **kwargs): super(JobConfigAction, self).__init__(request, *args, **kwargs) job_ex_id = request.REQUEST.get("job_execution_id") if job_ex_id is not None: job_ex_id = request.REQUEST.get("job_execution_id") job_ex = saharaclient.job_execution_get(request, job_ex_id) job_configs = job_ex.job_configs edp_configs = {} if 'configs' in job_configs: configs, edp_configs = ( self.clean_edp_configs(job_configs['configs'])) self.fields['job_configs'].initial = ( json.dumps(configs)) if 'params' in job_configs: self.fields['job_params'].initial = ( json.dumps(job_configs['params'])) job_args = json.dumps(job_configs['args']) self.fields['job_args_array'].initial = job_args if self.MAIN_CLASS in edp_configs: self.fields['main_class'].initial = ( edp_configs[self.MAIN_CLASS]) if self.JAVA_OPTS in edp_configs: self.fields['java_opts'].initial = ( edp_configs[self.JAVA_OPTS]) if self.EDP_MAPPER in edp_configs: self.fields['streaming_mapper'].initial = ( edp_configs[self.EDP_MAPPER]) if self.EDP_REDUCER in edp_configs: self.fields['streaming_reducer'].initial = ( edp_configs[self.EDP_REDUCER]) def clean(self): cleaned_data = super(workflows.Action, self).clean() job_type = cleaned_data.get("job_type", None) if job_type != "MapReduce.Streaming": if "streaming_mapper" in self._errors: del self._errors["streaming_mapper"] if "streaming_reducer" in self._errors: del self._errors["streaming_reducer"] return cleaned_data def populate_property_name_choices(self, request, context): job_id = request.REQUEST.get("job_id") or request.REQUEST.get("job") job_type = saharaclient.job_get(request, job_id).type job_configs = ( saharaclient.job_get_configs(request, job_type).job_config) choices = [(param['value'], param['name']) for param in job_configs['configs']] return choices def clean_edp_configs(self, configs): edp_configs = {} for key, value in configs.iteritems(): if key.startswith(self.EDP_PREFIX): edp_configs[key] = value for rmkey in edp_configs.keys(): del configs[rmkey] return (configs, edp_configs) class Meta: name = _("Configure") help_text_template = ( "project/data_processing.jobs/_launch_job_configure_help.html") class JobExecutionGeneralConfig(workflows.Step): action_class = JobExecutionGeneralConfigAction def contribute(self, data, context): for k, v in data.items(): if k in ["job_input", "job_output"]: context["job_general_" + k] = None if v == "None" else v else: context["job_general_" + k] = v return context class JobExecutionExistingGeneralConfig(workflows.Step): action_class = JobExecutionExistingGeneralConfigAction def contribute(self, data, context): for k, v in data.items(): if k in ["job_input", "job_output"]: context["job_general_" + k] = None if v == "None" else v else: context["job_general_" + k] = v return context class JobConfig(workflows.Step): action_class = JobConfigAction template_name = 'project/data_processing.jobs/config_template.html' def contribute(self, data, context): job_config = self.clean_configs( json.loads(data.get("job_configs", '{}'))) job_params = self.clean_configs( json.loads(data.get("job_params", '{}'))) job_args_array = self.clean_configs( json.loads(data.get("job_args_array", '[]'))) job_type = data.get("job_type", '') context["job_type"] = job_type context["job_config"] = {"configs": job_config} context["job_config"]["args"] = job_args_array if job_type in ["Java", "Spark"]: context["job_config"]["configs"][JobConfigAction.MAIN_CLASS] = ( data.get("main_class", "")) context["job_config"]["configs"][JobConfigAction.JAVA_OPTS] = ( data.get("java_opts", "")) elif job_type == "MapReduce.Streaming": context["job_config"]["configs"][JobConfigAction.EDP_MAPPER] = ( data.get("streaming_mapper", "")) context["job_config"]["configs"][JobConfigAction.EDP_REDUCER] = ( data.get("streaming_reducer", "")) else: context["job_config"]["params"] = job_params return context @staticmethod def clean_configs(configs): cleaned_conf = None if isinstance(configs, dict): cleaned_conf = dict([(k.strip(), v.strip()) for k, v in configs.items() if len(v.strip()) > 0 and len(k.strip()) > 0]) elif isinstance(configs, list): cleaned_conf = list([v.strip() for v in configs if len(v.strip()) > 0]) return cleaned_conf class NewClusterConfigAction(c_flow.GeneralConfigAction): persist_cluster = forms.BooleanField( label=_("Persist cluster after job exit"), required=False) class Meta: name = _("Configure Cluster") help_text_template = ( "project/data_processing.clusters/_configure_general_help.html") class ClusterGeneralConfig(workflows.Step): action_class = NewClusterConfigAction contributes = ("hidden_configure_field", ) def contribute(self, data, context): for k, v in data.items(): context["cluster_general_" + k] = v return context class LaunchJob(workflows.Workflow): slug = "launch_job" name = _("Launch Job") finalize_button_name = _("Launch") success_message = _("Job launched") failure_message = _("Could not launch job") success_url = "horizon:project:data_processing.job_executions:index" default_steps = (JobExecutionExistingGeneralConfig, JobConfig) def handle(self, request, context): saharaclient.job_execution_create( request, context["job_general_job"], context["job_general_cluster"], context["job_general_job_input"], context["job_general_job_output"], context["job_config"]) return True class SelectHadoopPluginAction(t_flows.SelectPluginAction): def __init__(self, request, *args, **kwargs): super(SelectHadoopPluginAction, self).__init__(request, *args, **kwargs) self.fields["job_id"] = forms.ChoiceField( label=_("Plugin name"), initial=request.GET.get("job_id") or request.POST.get("job_id"), widget=forms.HiddenInput(attrs={"class": "hidden_create_field"})) self.fields["job_configs"] = forms.ChoiceField( label=_("Job configs"), widget=forms.HiddenInput(attrs={"class": "hidden_create_field"})) self.fields["job_args"] = forms.ChoiceField( label=_("Job args"), widget=forms.HiddenInput(attrs={"class": "hidden_create_field"})) self.fields["job_params"] = forms.ChoiceField( label=_("Job params"), widget=forms.HiddenInput(attrs={"class": "hidden_create_field"})) job_ex_id = request.REQUEST.get("job_execution_id") if job_ex_id is not None: self.fields["job_execution_id"] = forms.ChoiceField( label=_("Job Execution ID"), initial=request.REQUEST.get("job_execution_id"), widget=forms.HiddenInput( attrs={"class": "hidden_create_field"})) job_ex_id = request.REQUEST.get("job_execution_id") job_configs = ( saharaclient.job_execution_get(request, job_ex_id).job_configs) if "configs" in job_configs: self.fields["job_configs"].initial = ( json.dumps(job_configs["configs"])) if "params" in job_configs: self.fields["job_params"].initial = ( json.dumps(job_configs["params"])) if "args" in job_configs: self.fields["job_args"].initial = ( json.dumps(job_configs["args"])) class Meta: name = _("Select plugin and hadoop version for cluster") help_text_template = ("project/data_processing.clusters/" "_create_general_help.html") class SelectHadoopPlugin(workflows.Step): action_class = SelectHadoopPluginAction class ChosePluginVersion(workflows.Workflow): slug = "lunch_job" name = _("Launch Job") finalize_button_name = _("Create") success_message = _("Created") failure_message = _("Could not create") success_url = "horizon:project:data_processing.cluster_templates:index" default_steps = (SelectHadoopPlugin,) class LaunchJobNewCluster(workflows.Workflow): slug = "launch_job" name = _("Launch Job") finalize_button_name = _("Launch") success_message = _("Job launched") failure_message = _("Could not launch job") success_url = "horizon:project:data_processing.jobs:index" default_steps = (ClusterGeneralConfig, JobExecutionGeneralConfig, JobConfig) def handle(self, request, context): node_groups = None plugin, hadoop_version = ( whelpers.get_plugin_and_hadoop_version(request)) ct_id = context["cluster_general_cluster_template"] or None user_keypair = context["cluster_general_keypair"] or None try: cluster = saharaclient.cluster_create( request, context["cluster_general_cluster_name"], plugin, hadoop_version, cluster_template_id=ct_id, default_image_id=context["cluster_general_image"], description=context["cluster_general_description"], node_groups=node_groups, user_keypair_id=user_keypair, is_transient=not(context["cluster_general_persist_cluster"]), net_id=context.get( "cluster_general_neutron_management_network", None)) except Exception: exceptions.handle(request, _("Unable to create new cluster for job.")) return False try: saharaclient.job_execution_create( request, context["job_general_job"], cluster.id, context["job_general_job_input"], context["job_general_job_output"], context["job_config"]) except Exception: exceptions.handle(request, _("Unable to launch job.")) return False return True
apache-2.0
ryfeus/lambda-packs
pytorch/source/numpy/polynomial/hermite_e.py
4
58237
""" Objects for dealing with Hermite_e series. This module provides a number of objects (mostly functions) useful for dealing with Hermite_e series, including a `HermiteE` class that encapsulates the usual arithmetic operations. (General information on how this module represents and works with such polynomials is in the docstring for its "parent" sub-package, `numpy.polynomial`). Constants --------- - `hermedomain` -- Hermite_e series default domain, [-1,1]. - `hermezero` -- Hermite_e series that evaluates identically to 0. - `hermeone` -- Hermite_e series that evaluates identically to 1. - `hermex` -- Hermite_e series for the identity map, ``f(x) = x``. Arithmetic ---------- - `hermeadd` -- add two Hermite_e series. - `hermesub` -- subtract one Hermite_e series from another. - `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``. - `hermemul` -- multiply two Hermite_e series. - `hermediv` -- divide one Hermite_e series by another. - `hermepow` -- raise a Hermite_e series to a positive integer power. - `hermeval` -- evaluate a Hermite_e series at given points. - `hermeval2d` -- evaluate a 2D Hermite_e series at given points. - `hermeval3d` -- evaluate a 3D Hermite_e series at given points. - `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product. - `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product. Calculus -------- - `hermeder` -- differentiate a Hermite_e series. - `hermeint` -- integrate a Hermite_e series. Misc Functions -------------- - `hermefromroots` -- create a Hermite_e series with specified roots. - `hermeroots` -- find the roots of a Hermite_e series. - `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials. - `hermevander2d` -- Vandermonde-like matrix for 2D power series. - `hermevander3d` -- Vandermonde-like matrix for 3D power series. - `hermegauss` -- Gauss-Hermite_e quadrature, points and weights. - `hermeweight` -- Hermite_e weight function. - `hermecompanion` -- symmetrized companion matrix in Hermite_e form. - `hermefit` -- least-squares fit returning a Hermite_e series. - `hermetrim` -- trim leading coefficients from a Hermite_e series. - `hermeline` -- Hermite_e series of given straight line. - `herme2poly` -- convert a Hermite_e series to a polynomial. - `poly2herme` -- convert a polynomial to a Hermite_e series. Classes ------- - `HermiteE` -- A Hermite_e series class. See also -------- `numpy.polynomial` """ from __future__ import division, absolute_import, print_function import warnings import numpy as np import numpy.linalg as la from numpy.core.multiarray import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase __all__ = [ 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', 'hermegauss', 'hermeweight'] hermetrim = pu.trimcoef def poly2herme(pol): """ poly2herme(pol) Convert a polynomial to a Hermite series. Convert an array representing the coefficients of a polynomial (relative to the "standard" basis) ordered from lowest degree to highest, to an array of the coefficients of the equivalent Hermite series, ordered from lowest to highest degree. Parameters ---------- pol : array_like 1-D array containing the polynomial coefficients Returns ------- c : ndarray 1-D array containing the coefficients of the equivalent Hermite series. See Also -------- herme2poly Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy.polynomial.hermite_e import poly2herme >>> poly2herme(np.arange(4)) array([ 2., 10., 2., 3.]) """ [pol] = pu.as_series([pol]) deg = len(pol) - 1 res = 0 for i in range(deg, -1, -1): res = hermeadd(hermemulx(res), pol[i]) return res def herme2poly(c): """ Convert a Hermite series to a polynomial. Convert an array representing the coefficients of a Hermite series, ordered from lowest degree to highest, to an array of the coefficients of the equivalent polynomial (relative to the "standard" basis) ordered from lowest to highest degree. Parameters ---------- c : array_like 1-D array containing the Hermite series coefficients, ordered from lowest order term to highest. Returns ------- pol : ndarray 1-D array containing the coefficients of the equivalent polynomial (relative to the "standard" basis) ordered from lowest order term to highest. See Also -------- poly2herme Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy.polynomial.hermite_e import herme2poly >>> herme2poly([ 2., 10., 2., 3.]) array([ 0., 1., 2., 3.]) """ from .polynomial import polyadd, polysub, polymulx [c] = pu.as_series([c]) n = len(c) if n == 1: return c if n == 2: return c else: c0 = c[-2] c1 = c[-1] # i is the current degree of c1 for i in range(n - 1, 1, -1): tmp = c0 c0 = polysub(c[i - 2], c1*(i - 1)) c1 = polyadd(tmp, polymulx(c1)) return polyadd(c0, polymulx(c1)) # # These are constant arrays are of integer type so as to be compatible # with the widest range of other types, such as Decimal. # # Hermite hermedomain = np.array([-1, 1]) # Hermite coefficients representing zero. hermezero = np.array([0]) # Hermite coefficients representing one. hermeone = np.array([1]) # Hermite coefficients representing the identity x. hermex = np.array([0, 1]) def hermeline(off, scl): """ Hermite series whose graph is a straight line. Parameters ---------- off, scl : scalars The specified line is given by ``off + scl*x``. Returns ------- y : ndarray This module's representation of the Hermite series for ``off + scl*x``. See Also -------- polyline, chebline Examples -------- >>> from numpy.polynomial.hermite_e import hermeline >>> from numpy.polynomial.hermite_e import hermeline, hermeval >>> hermeval(0,hermeline(3, 2)) 3.0 >>> hermeval(1,hermeline(3, 2)) 5.0 """ if scl != 0: return np.array([off, scl]) else: return np.array([off]) def hermefromroots(roots): """ Generate a HermiteE series with given roots. The function returns the coefficients of the polynomial .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), in HermiteE form, where the `r_n` are the roots specified in `roots`. If a zero has multiplicity n, then it must appear in `roots` n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear in any order. If the returned coefficients are `c`, then .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x) The coefficient of the last term is not generally 1 for monic polynomials in HermiteE form. Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-D array of coefficients. If all roots are real then `out` is a real array, if some of the roots are complex, then `out` is complex even if all the coefficients in the result are real (see Examples below). See Also -------- polyfromroots, legfromroots, lagfromroots, hermfromroots, chebfromroots. Examples -------- >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval >>> coef = hermefromroots((-1, 0, 1)) >>> hermeval((-1, 0, 1), coef) array([ 0., 0., 0.]) >>> coef = hermefromroots((-1j, 1j)) >>> hermeval((-1j, 1j), coef) array([ 0.+0.j, 0.+0.j]) """ if len(roots) == 0: return np.ones(1) else: [roots] = pu.as_series([roots], trim=False) roots.sort() p = [hermeline(-r, 1) for r in roots] n = len(p) while n > 1: m, r = divmod(n, 2) tmp = [hermemul(p[i], p[i+m]) for i in range(m)] if r: tmp[0] = hermemul(tmp[0], p[-1]) p = tmp n = m return p[0] def hermeadd(c1, c2): """ Add one Hermite series to another. Returns the sum of two Hermite series `c1` + `c2`. The arguments are sequences of coefficients ordered from lowest order term to highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the Hermite series of their sum. See Also -------- hermesub, hermemulx, hermemul, hermediv, hermepow Notes ----- Unlike multiplication, division, etc., the sum of two Hermite series is a Hermite series (without having to "reproject" the result onto the basis set) so addition, just like that of "standard" polynomials, is simply "component-wise." Examples -------- >>> from numpy.polynomial.hermite_e import hermeadd >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) array([ 2., 4., 6., 4.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c1[:c2.size] += c2 ret = c1 else: c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) def hermesub(c1, c2): """ Subtract one Hermite series from another. Returns the difference of two Hermite series `c1` - `c2`. The sequences of coefficients are from lowest order term to highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Of Hermite series coefficients representing their difference. See Also -------- hermeadd, hermemulx, hermemul, hermediv, hermepow Notes ----- Unlike multiplication, division, etc., the difference of two Hermite series is a Hermite series (without having to "reproject" the result onto the basis set) so subtraction, just like that of "standard" polynomials, is simply "component-wise." Examples -------- >>> from numpy.polynomial.hermite_e import hermesub >>> hermesub([1, 2, 3, 4], [1, 2, 3]) array([ 0., 0., 0., 4.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c1[:c2.size] -= c2 ret = c1 else: c2 = -c2 c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) def hermemulx(c): """Multiply a Hermite series by x. Multiply the Hermite series `c` by x, where x is the independent variable. Parameters ---------- c : array_like 1-D array of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. Notes ----- The multiplication uses the recursion relationship for Hermite polynomials in the form .. math:: xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) Examples -------- >>> from numpy.polynomial.hermite_e import hermemulx >>> hermemulx([1, 2, 3]) array([ 2., 7., 2., 3.]) """ # c is a trimmed copy [c] = pu.as_series([c]) # The zero series needs special treatment if len(c) == 1 and c[0] == 0: return c prd = np.empty(len(c) + 1, dtype=c.dtype) prd[0] = c[0]*0 prd[1] = c[0] for i in range(1, len(c)): prd[i + 1] = c[i] prd[i - 1] += c[i]*i return prd def hermemul(c1, c2): """ Multiply one Hermite series by another. Returns the product of two Hermite series `c1` * `c2`. The arguments are sequences of coefficients, from lowest order "term" to highest, e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Of Hermite series coefficients representing their product. See Also -------- hermeadd, hermesub, hermemulx, hermediv, hermepow Notes ----- In general, the (polynomial) product of two C-series results in terms that are not in the Hermite polynomial basis set. Thus, to express the product as a Hermite series, it is necessary to "reproject" the product onto said basis set, which may produce "unintuitive" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite_e import hermemul >>> hermemul([1, 2, 3], [0, 1, 2]) array([ 14., 15., 28., 7., 6.]) """ # s1, s2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c = c2 xs = c1 else: c = c1 xs = c2 if len(c) == 1: c0 = c[0]*xs c1 = 0 elif len(c) == 2: c0 = c[0]*xs c1 = c[1]*xs else: nd = len(c) c0 = c[-2]*xs c1 = c[-1]*xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 c0 = hermesub(c[-i]*xs, c1*(nd - 1)) c1 = hermeadd(tmp, hermemulx(c1)) return hermeadd(c0, hermemulx(c1)) def hermediv(c1, c2): """ Divide one Hermite series by another. Returns the quotient-with-remainder of two Hermite series `c1` / `c2`. The arguments are sequences of coefficients from lowest order "term" to highest, e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- [quo, rem] : ndarrays Of Hermite series coefficients representing the quotient and remainder. See Also -------- hermeadd, hermesub, hermemulx, hermemul, hermepow Notes ----- In general, the (polynomial) division of one Hermite series by another results in quotient and remainder terms that are not in the Hermite polynomial basis set. Thus, to express these results as a Hermite series, it is necessary to "reproject" the results onto the Hermite basis set, which may produce "unintuitive" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite_e import hermediv >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) (array([ 1., 2., 3.]), array([ 0.])) >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) (array([ 1., 2., 3.]), array([ 1., 2.])) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if c2[-1] == 0: raise ZeroDivisionError() lc1 = len(c1) lc2 = len(c2) if lc1 < lc2: return c1[:1]*0, c1 elif lc2 == 1: return c1/c2[-1], c1[:1]*0 else: quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) rem = c1 for i in range(lc1 - lc2, - 1, -1): p = hermemul([0]*i + [1], c2) q = rem[-1]/p[-1] rem = rem[:-1] - q*p[:-1] quo[i] = q return quo, pu.trimseq(rem) def hermepow(c, pow, maxpower=16): """Raise a Hermite series to a power. Returns the Hermite series `c` raised to the power `pow`. The argument `c` is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` Parameters ---------- c : array_like 1-D array of Hermite series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Hermite series of power. See Also -------- hermeadd, hermesub, hermemulx, hermemul, hermediv Examples -------- >>> from numpy.polynomial.hermite_e import hermepow >>> hermepow([1, 2, 3], 2) array([ 23., 28., 46., 12., 9.]) """ # c is a trimmed copy [c] = pu.as_series([c]) power = int(pow) if power != pow or power < 0: raise ValueError("Power must be a non-negative integer.") elif maxpower is not None and power > maxpower: raise ValueError("Power is too large") elif power == 0: return np.array([1], dtype=c.dtype) elif power == 1: return c else: # This can be made more efficient by using powers of two # in the usual way. prd = c for i in range(2, power + 1): prd = hermemul(prd, c) return prd def hermeder(c, m=1, scl=1, axis=0): """ Differentiate a Hermite_e series. Returns the series coefficients `c` differentiated `m` times along `axis`. At each iteration the result is multiplied by `scl` (the scaling factor is for use in a linear change of variable). The argument `c` is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2`` while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y) + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like Array of Hermite_e series coefficients. If `c` is multidimensional the different axis correspond to different variables with the degree in each axis given by the corresponding index. m : int, optional Number of derivatives taken, must be non-negative. (Default: 1) scl : scalar, optional Each differentiation is multiplied by `scl`. The end result is multiplication by ``scl**m``. This is for use in a linear change of variable. (Default: 1) axis : int, optional Axis over which the derivative is taken. (Default: 0). .. versionadded:: 1.7.0 Returns ------- der : ndarray Hermite series of the derivative. See Also -------- hermeint Notes ----- In general, the result of differentiating a Hermite series does not resemble the same operation on a power series. Thus the result of this function may be "unintuitive," albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite_e import hermeder >>> hermeder([ 1., 1., 1., 1.]) array([ 1., 2., 3.]) >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) array([ 1., 2., 3.]) """ c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) cnt, iaxis = [int(t) for t in [m, axis]] if cnt != m: raise ValueError("The order of derivation must be integer") if cnt < 0: raise ValueError("The order of derivation must be non-negative") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: return c[:1]*0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=c.dtype) for j in range(n, 0, -1): der[j - 1] = j*c[j] c = der c = np.moveaxis(c, 0, iaxis) return c def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): """ Integrate a Hermite_e series. Returns the Hermite_e series coefficients `c` integrated `m` times from `lbnd` along `axis`. At each iteration the resulting series is **multiplied** by `scl` and an integration constant, `k`, is added. The scaling factor is for use in a linear change of variable. ("Buyer beware": note that, depending on what one is doing, one may want `scl` to be the reciprocal of what one might expect; for more information, see the Notes section below.) The argument `c` is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like Array of Hermite_e series coefficients. If c is multidimensional the different axis correspond to different variables with the degree in each axis given by the corresponding index. m : int, optional Order of integration, must be positive. (Default: 1) k : {[], list, scalar}, optional Integration constant(s). The value of the first integral at ``lbnd`` is the first value in the list, the value of the second integral at ``lbnd`` is the second value, etc. If ``k == []`` (the default), all constants are set to zero. If ``m == 1``, a single scalar can be given instead of a list. lbnd : scalar, optional The lower bound of the integral. (Default: 0) scl : scalar, optional Following each integration the result is *multiplied* by `scl` before the integration constant is added. (Default: 1) axis : int, optional Axis over which the integral is taken. (Default: 0). .. versionadded:: 1.7.0 Returns ------- S : ndarray Hermite_e series coefficients of the integral. Raises ------ ValueError If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or ``np.ndim(scl) != 0``. See Also -------- hermeder Notes ----- Note that the result of each integration is *multiplied* by `scl`. Why is this important to note? Say one is making a linear change of variable :math:`u = ax + b` in an integral relative to `x`. Then :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a` - perhaps not what one would have first thought. Also note that, in general, the result of integrating a C-series needs to be "reprojected" onto the C-series basis set. Thus, typically, the result of this function is "unintuitive," albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite_e import hermeint >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. array([ 1., 1., 1., 1.]) >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. array([ 2., 1., 1., 1.]) >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 array([-1., 1., 1., 1.]) >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1) array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) """ c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) if not np.iterable(k): k = [k] cnt, iaxis = [int(t) for t in [m, axis]] if cnt != m: raise ValueError("The order of integration must be integer") if cnt < 0: raise ValueError("The order of integration must be non-negative") if len(k) > cnt: raise ValueError("Too many integration constants") if np.ndim(lbnd) != 0: raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) k = list(k) + [0]*(cnt - len(k)) for i in range(cnt): n = len(c) c *= scl if n == 1 and np.all(c[0] == 0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) tmp[0] = c[0]*0 tmp[1] = c[0] for j in range(1, n): tmp[j + 1] = c[j]/(j + 1) tmp[0] += k[i] - hermeval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) return c def hermeval(x, c, tensor=True): """ Evaluate an HermiteE series at points x. If `c` is of length `n + 1`, this function returns the value: .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x) The parameter `x` is converted to an array only if it is a tuple or a list, otherwise it is treated as a scalar. In either case, either `x` or its elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If `c` is multidimensional, then the shape of the result depends on the value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that scalars have shape (,). Trailing zeros in the coefficients will be used in the evaluation, so they should be avoided if efficiency is a concern. Parameters ---------- x : array_like, compatible object If `x` is a list or tuple, it is converted to an ndarray, otherwise it is left unchanged and treated as a scalar. In either case, `x` or its elements must support addition and multiplication with with themselves and with the elements of `c`. c : array_like Array of coefficients ordered so that the coefficients for terms of degree n are contained in c[n]. If `c` is multidimensional the remaining indices enumerate multiple polynomials. In the two dimensional case the coefficients may be thought of as stored in the columns of `c`. tensor : boolean, optional If True, the shape of the coefficient array is extended with ones on the right, one for each dimension of `x`. Scalars have dimension 0 for this action. The result is that every column of coefficients in `c` is evaluated for every element of `x`. If False, `x` is broadcast over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. .. versionadded:: 1.7.0 Returns ------- values : ndarray, algebra_like The shape of the return value is described above. See Also -------- hermeval2d, hermegrid2d, hermeval3d, hermegrid3d Notes ----- The evaluation uses Clenshaw recursion, aka synthetic division. Examples -------- >>> from numpy.polynomial.hermite_e import hermeval >>> coef = [1,2,3] >>> hermeval(1, coef) 3.0 >>> hermeval([[1,2],[3,4]], coef) array([[ 3., 14.], [ 31., 54.]]) """ c = np.array(c, ndmin=1, copy=0) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: c = c.reshape(c.shape + (1,)*x.ndim) if len(c) == 1: c0 = c[0] c1 = 0 elif len(c) == 2: c0 = c[0] c1 = c[1] else: nd = len(c) c0 = c[-2] c1 = c[-1] for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 c0 = c[-i] - c1*(nd - 1) c1 = tmp + c1*x return c0 + c1*x def hermeval2d(x, y, c): """ Evaluate a 2-D HermiteE series at points (x, y). This function returns the values: .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y) The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array a one is implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points `(x, y)`, where `x` and `y` must have the same shape. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and if it isn't an ndarray it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points formed with pairs of corresponding values from `x` and `y`. See Also -------- hermeval, hermegrid2d, hermeval3d, hermegrid3d Notes ----- .. versionadded:: 1.7.0 """ try: x, y = np.array((x, y), copy=0) except Exception: raise ValueError('x, y are incompatible') c = hermeval(x, c) c = hermeval(y, c, tensor=False) return c def hermegrid2d(x, y, c): """ Evaluate a 2-D HermiteE series on the Cartesian product of x and y. This function returns the values: .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) where the points `(a, b)` consist of all pairs formed by taking `a` from `x` and `b` from `y`. The resulting points form a grid with `x` in the first dimension and `y` in the second. The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than two dimensions, ones are implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points in the Cartesian product of `x` and `y`. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficients for terms of degree i,j are contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points in the Cartesian product of `x` and `y`. See Also -------- hermeval, hermeval2d, hermeval3d, hermegrid3d Notes ----- .. versionadded:: 1.7.0 """ c = hermeval(x, c) c = hermeval(y, c) return c def hermeval3d(x, y, z, c): """ Evaluate a 3-D Hermite_e series at points (x, y, z). This function returns the values: .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z) The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than 3 dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape. Parameters ---------- x, y, z : array_like, compatible object The three dimensional series is evaluated at the points `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If any of `x`, `y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and if it isn't an ndarray it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension greater than 3 the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the multidimensional polynomial on points formed with triples of corresponding values from `x`, `y`, and `z`. See Also -------- hermeval, hermeval2d, hermegrid2d, hermegrid3d Notes ----- .. versionadded:: 1.7.0 """ try: x, y, z = np.array((x, y, z), copy=0) except Exception: raise ValueError('x, y, z are incompatible') c = hermeval(x, c) c = hermeval(y, c, tensor=False) c = hermeval(z, c, tensor=False) return c def hermegrid3d(x, y, z, c): """ Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c) where the points `(a, b, c)` consist of all triples formed by taking `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form a grid with `x` in the first dimension, `y` in the second, and `z` in the third. The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than three dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape + y.shape + z.shape. Parameters ---------- x, y, z : array_like, compatible objects The three dimensional series is evaluated at the points in the Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficients for terms of degree i,j are contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points in the Cartesian product of `x` and `y`. See Also -------- hermeval, hermeval2d, hermegrid2d, hermeval3d Notes ----- .. versionadded:: 1.7.0 """ c = hermeval(x, c) c = hermeval(y, c) c = hermeval(z, c) return c def hermevander(x, deg): """Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree `deg` and sample points `x`. The pseudo-Vandermonde matrix is defined by .. math:: V[..., i] = He_i(x), where `0 <= i <= deg`. The leading indices of `V` index the elements of `x` and the last index is the degree of the HermiteE polynomial. If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and ``hermeval(x, c)`` are the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of HermiteE series of the same degree and sample points. Parameters ---------- x : array_like Array of points. The dtype is converted to float64 or complex128 depending on whether any of the elements are complex. If `x` is scalar it is converted to a 1-D array. deg : int Degree of the resulting matrix. Returns ------- vander : ndarray The pseudo-Vandermonde matrix. The shape of the returned matrix is ``x.shape + (deg + 1,)``, where The last index is the degree of the corresponding HermiteE polynomial. The dtype will be the same as the converted `x`. Examples -------- >>> from numpy.polynomial.hermite_e import hermevander >>> x = np.array([-1, 0, 1]) >>> hermevander(x, 3) array([[ 1., -1., 0., 2.], [ 1., 0., -1., -0.], [ 1., 1., 0., -2.]]) """ ideg = int(deg) if ideg != deg: raise ValueError("deg must be integer") if ideg < 0: raise ValueError("deg must be non-negative") x = np.array(x, copy=0, ndmin=1) + 0.0 dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) v[0] = x*0 + 1 if ideg > 0: v[1] = x for i in range(2, ideg + 1): v[i] = (v[i-1]*x - v[i-2]*(i - 1)) return np.moveaxis(v, 0, -1) def hermevander2d(x, y, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample points `(x, y)`. The pseudo-Vandermonde matrix is defined by .. math:: V[..., (deg[1] + 1)*i + j] = He_i(x) * He_j(y), where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of `V` index the points `(x, y)` and the last index encodes the degrees of the HermiteE polynomials. If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V` correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of 2-D HermiteE series of the same degrees and sample points. Parameters ---------- x, y : array_like Arrays of point coordinates, all of the same shape. The dtypes will be converted to either float64 or complex128 depending on whether any of the elements are complex. Scalars are converted to 1-D arrays. deg : list of ints List of maximum degrees of the form [x_deg, y_deg]. Returns ------- vander2d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same as the converted `x` and `y`. See Also -------- hermevander, hermevander3d. hermeval2d, hermeval3d Notes ----- .. versionadded:: 1.7.0 """ ideg = [int(d) for d in deg] is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] if is_valid != [1, 1]: raise ValueError("degrees must be non-negative integers") degx, degy = ideg x, y = np.array((x, y), copy=0) + 0.0 vx = hermevander(x, degx) vy = hermevander(y, degy) v = vx[..., None]*vy[..., None,:] return v.reshape(v.shape[:-2] + (-1,)) def hermevander3d(x, y, z, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, then Hehe pseudo-Vandermonde matrix is defined by .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z), where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading indices of `V` index the points `(x, y, z)` and the last index encodes the degrees of the HermiteE polynomials. If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns of `V` correspond to the elements of a 3-D coefficient array `c` of shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of 3-D HermiteE series of the same degrees and sample points. Parameters ---------- x, y, z : array_like Arrays of point coordinates, all of the same shape. The dtypes will be converted to either float64 or complex128 depending on whether any of the elements are complex. Scalars are converted to 1-D arrays. deg : list of ints List of maximum degrees of the form [x_deg, y_deg, z_deg]. Returns ------- vander3d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will be the same as the converted `x`, `y`, and `z`. See Also -------- hermevander, hermevander3d. hermeval2d, hermeval3d Notes ----- .. versionadded:: 1.7.0 """ ideg = [int(d) for d in deg] is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] if is_valid != [1, 1, 1]: raise ValueError("degrees must be non-negative integers") degx, degy, degz = ideg x, y, z = np.array((x, y, z), copy=0) + 0.0 vx = hermevander(x, degx) vy = hermevander(y, degy) vz = hermevander(z, degz) v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] return v.reshape(v.shape[:-3] + (-1,)) def hermefit(x, y, deg, rcond=None, full=False, w=None): """ Least squares fit of Hermite series to data. Return the coefficients of a HermiteE series of degree `deg` that is the least squares fit to the data values `y` given at points `x`. If `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple fits are done, one for each column of `y`, and the resulting coefficients are stored in the corresponding columns of a 2-D return. The fitted polynomial(s) are in the form .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x), where `n` is `deg`. Parameters ---------- x : array_like, shape (M,) x-coordinates of the M sample points ``(x[i], y[i])``. y : array_like, shape (M,) or (M, K) y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. deg : int or 1-D array_like Degree(s) of the fitting polynomials. If `deg` is a single integer all terms up to and including the `deg`'th term are included in the fit. For NumPy versions >= 1.11.0 a list of integers specifying the degrees of the terms to include may be used instead. rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The default value is len(x)*eps, where eps is the relative precision of the float type, about 2e-16 in most cases. full : bool, optional Switch determining nature of return value. When it is False (the default) just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. Returns ------- coef : ndarray, shape (M,) or (M, K) Hermite coefficients ordered from low to high. If `y` was 2-D, the coefficients for the data in column k of `y` are in column `k`. [residuals, rank, singular_values, rcond] : list These values are only returned if `full` = True resid -- sum of squared residuals of the least squares fit rank -- the numerical rank of the scaled Vandermonde matrix sv -- singular values of the scaled Vandermonde matrix rcond -- value of `rcond`. For more details, see `linalg.lstsq`. Warns ----- RankWarning The rank of the coefficient matrix in the least-squares fit is deficient. The warning is only raised if `full` = False. The warnings can be turned off by >>> import warnings >>> warnings.simplefilter('ignore', RankWarning) See Also -------- chebfit, legfit, polyfit, hermfit, polyfit hermeval : Evaluates a Hermite series. hermevander : pseudo Vandermonde matrix of Hermite series. hermeweight : HermiteE weight function. linalg.lstsq : Computes a least-squares fit from the matrix. scipy.interpolate.UnivariateSpline : Computes spline fits. Notes ----- The solution is the coefficients of the HermiteE series `p` that minimizes the sum of the weighted squared errors .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, where the :math:`w_j` are the weights. This problem is solved by setting up the (typically) overdetermined matrix equation .. math:: V(x) * c = w * y, where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c` are the coefficients to be solved for, and the elements of `y` are the observed values. This equation is then solved using the singular value decomposition of `V`. If some of the singular values of `V` are so small that they are neglected, then a `RankWarning` will be issued. This means that the coefficient values may be poorly determined. Using a lower order fit will usually get rid of the warning. The `rcond` parameter can also be set to a value smaller than its default, but the resulting fit may be spurious and have large contributions from roundoff error. Fits using HermiteE series are probably most useful when the data can be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE weight. In that case the weight ``sqrt(w(x[i])`` should be used together with data values ``y[i]/sqrt(w(x[i])``. The weight function is available as `hermeweight`. References ---------- .. [1] Wikipedia, "Curve fitting", https://en.wikipedia.org/wiki/Curve_fitting Examples -------- >>> from numpy.polynomial.hermite_e import hermefit, hermeval >>> x = np.linspace(-10, 10) >>> err = np.random.randn(len(x))/10 >>> y = hermeval(x, [1, 2, 3]) + err >>> hermefit(x, y, 2) array([ 1.01690445, 1.99951418, 2.99948696]) """ x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 deg = np.asarray(deg) # check arguments. if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: raise TypeError("deg must be an int or non-empty 1-D array of int") if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if len(x) != len(y): raise TypeError("expected x and y to have same length") if deg.ndim == 0: lmax = deg order = lmax + 1 van = hermevander(x, lmax) else: deg = np.sort(deg) lmax = deg[-1] order = len(deg) van = hermevander(x, lmax)[:, deg] # set up the least squares matrices in transposed form lhs = van.T rhs = y.T if w is not None: w = np.asarray(w) + 0.0 if w.ndim != 1: raise TypeError("expected 1D vector for w") if len(x) != len(w): raise TypeError("expected x and w to have same length") # apply weights. Don't use inplace operations as they # can cause problems with NA. lhs = lhs * w rhs = rhs * w # set rcond if rcond is None: rcond = len(x)*np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. if issubclass(lhs.dtype.type, np.complexfloating): scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) else: scl = np.sqrt(np.square(lhs).sum(1)) scl[scl == 0] = 1 # Solve the least squares problem. c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) c = (c.T/scl).T # Expand c to include non-fitted coefficients which are set to zero if deg.ndim > 0: if c.ndim == 2: cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) else: cc = np.zeros(lmax+1, dtype=c.dtype) cc[deg] = c c = cc # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" warnings.warn(msg, pu.RankWarning, stacklevel=2) if full: return c, [resids, rank, s, rcond] else: return c def hermecompanion(c): """ Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is symmetric when `c` is an HermiteE basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. Parameters ---------- c : array_like 1-D array of HermiteE series coefficients ordered from low to high degree. Returns ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg). Notes ----- .. versionadded:: 1.7.0 """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: return np.array([[-c[0]/c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1)))) scl = np.multiply.accumulate(scl)[::-1] top = mat.reshape(-1)[1::n+1] bot = mat.reshape(-1)[n::n+1] top[...] = np.sqrt(np.arange(1, n)) bot[...] = top mat[:, -1] -= scl*c[:-1]/c[-1] return mat def hermeroots(c): """ Compute the roots of a HermiteE series. Return the roots (a.k.a. "zeros") of the polynomial .. math:: p(x) = \\sum_i c[i] * He_i(x). Parameters ---------- c : 1-D array_like 1-D array of coefficients. Returns ------- out : ndarray Array of the roots of the series. If all the roots are real, then `out` is also real, otherwise it is complex. See Also -------- polyroots, legroots, lagroots, hermroots, chebroots Notes ----- The root estimates are obtained as the eigenvalues of the companion matrix, Roots far from the origin of the complex plane may have large errors due to the numerical instability of the series for such values. Roots with multiplicity greater than 1 will also show larger errors as the value of the series near such points is relatively insensitive to errors in the roots. Isolated roots near the origin can be improved by a few iterations of Newton's method. The HermiteE series basis polynomials aren't powers of `x` so the results of this function may seem unintuitive. Examples -------- >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots >>> coef = hermefromroots([-1, 0, 1]) >>> coef array([ 0., 2., 0., 1.]) >>> hermeroots(coef) array([-1., 0., 1.]) """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) <= 1: return np.array([], dtype=c.dtype) if len(c) == 2: return np.array([-c[0]/c[1]]) m = hermecompanion(c) r = la.eigvals(m) r.sort() return r def _normed_hermite_e_n(x, n): """ Evaluate a normalized HermiteE polynomial. Compute the value of the normalized HermiteE polynomial of degree ``n`` at the points ``x``. Parameters ---------- x : ndarray of double. Points at which to evaluate the function n : int Degree of the normalized HermiteE function to be evaluated. Returns ------- values : ndarray The shape of the return value is described above. Notes ----- .. versionadded:: 1.10.0 This function is needed for finding the Gauss points and integration weights for high degrees. The values of the standard HermiteE functions overflow when n >= 207. """ if n == 0: return np.full(x.shape, 1/np.sqrt(np.sqrt(2*np.pi))) c0 = 0. c1 = 1./np.sqrt(np.sqrt(2*np.pi)) nd = float(n) for i in range(n - 1): tmp = c0 c0 = -c1*np.sqrt((nd - 1.)/nd) c1 = tmp + c1*x*np.sqrt(1./nd) nd = nd - 1.0 return c0 + c1*x def hermegauss(deg): """ Gauss-HermiteE quadrature. Computes the sample points and weights for Gauss-HermiteE quadrature. These sample points and weights will correctly integrate polynomials of degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` with the weight function :math:`f(x) = \\exp(-x^2/2)`. Parameters ---------- deg : int Number of sample points and weights. It must be >= 1. Returns ------- x : ndarray 1-D ndarray containing the sample points. y : ndarray 1-D ndarray containing the weights. Notes ----- .. versionadded:: 1.7.0 The results have only been tested up to degree 100, higher degrees may be problematic. The weights are determined by using the fact that .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) where :math:`c` is a constant independent of :math:`k` and :math:`x_k` is the k'th root of :math:`He_n`, and then scaling the results to get the right value when integrating 1. """ ideg = int(deg) if ideg != deg or ideg < 1: raise ValueError("deg must be a non-negative integer") # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0]*deg + [1]) m = hermecompanion(c) x = la.eigvalsh(m) # improve roots by one application of Newton dy = _normed_hermite_e_n(x, ideg) df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) x -= dy/df # compute the weights. We scale the factor to avoid possible numerical # overflow. fm = _normed_hermite_e_n(x, ideg - 1) fm /= np.abs(fm).max() w = 1/(fm * fm) # for Hermite_e we can also symmetrize w = (w + w[::-1])/2 x = (x - x[::-1])/2 # scale w to get the right value w *= np.sqrt(2*np.pi) / w.sum() return x, w def hermeweight(x): """Weight function of the Hermite_e polynomials. The weight function is :math:`\\exp(-x^2/2)` and the interval of integration is :math:`[-\\inf, \\inf]`. the HermiteE polynomials are orthogonal, but not normalized, with respect to this weight function. Parameters ---------- x : array_like Values at which the weight function will be computed. Returns ------- w : ndarray The weight function at `x`. Notes ----- .. versionadded:: 1.7.0 """ w = np.exp(-.5*x**2) return w # # HermiteE series class # class HermiteE(ABCPolyBase): """An HermiteE series class. The HermiteE class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the attributes and methods listed in the `ABCPolyBase` documentation. Parameters ---------- coef : array_like HermiteE coefficients in order of increasing degree, i.e, ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``. domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. The default value is [-1, 1]. window : (2,) array_like, optional Window, see `domain` for its use. The default value is [-1, 1]. .. versionadded:: 1.6.0 """ # Virtual Functions _add = staticmethod(hermeadd) _sub = staticmethod(hermesub) _mul = staticmethod(hermemul) _div = staticmethod(hermediv) _pow = staticmethod(hermepow) _val = staticmethod(hermeval) _int = staticmethod(hermeint) _der = staticmethod(hermeder) _fit = staticmethod(hermefit) _line = staticmethod(hermeline) _roots = staticmethod(hermeroots) _fromroots = staticmethod(hermefromroots) # Virtual properties nickname = 'herme' domain = np.array(hermedomain) window = np.array(hermedomain) basis_name = 'He'
mit
micjabbour/AndroidGuard-WebApp
AndroidGuard/models.py
1
3073
from . import db from .config import AppConfig from datetime import datetime from werkzeug.security import generate_password_hash, check_password_hash from flask_login import UserMixin from sqlalchemy import desc from itsdangerous import Serializer, BadSignature class Location(db.Model): id = db.Column(db.Integer, primary_key=True) latitude = db.Column(db.DECIMAL(9,6), nullable=False) longitude = db.Column(db.DECIMAL(9,6), nullable=False) timestamp = db.Column(db.DateTime, default=datetime.utcnow) device_id = db.Column(db.Integer, db.ForeignKey('device.id'), nullable=False) def serialize(self): return {'latitude': str(self.latitude), 'longitude': str(self.longitude), 'timestamp': self.timestamp.isoformat()+'Z' # HACK } class Device(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.Text) fcm_token = db.Column(db.Text) locations = db.relationship('Location', backref='device', lazy='select') user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False) db.UniqueConstraint('name', 'user_id') @property def last_location(self): return Location.query.filter_by(device_id=self.id).order_by(desc('location.id')).first() def get_device_dict(self): device_dict = {'id': self.id, 'name': self.name} if self.last_location: device_dict['last_location'] = self.last_location.serialize() return device_dict def generate_auth_token(self): s = Serializer(AppConfig.SECRET_KEY) return s.dumps(self.id) @staticmethod def verify_auth_token(token): s = Serializer(AppConfig.SECRET_KEY) try: id = s.loads(token) except BadSignature: return None device = Device.query.get(id) return device @staticmethod def get_by_devicename(user, name): device_list = user.devices for device in device_list: if device.name == name: return device return None class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.Text, unique=True) password_hash = db.Column(db.Text) devices = db.relationship('Device', backref='user', lazy='dynamic') @property def password(self): raise AttributeError('password: write-only field') @password.setter def password(self, password): self.password_hash = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.password_hash, password) @staticmethod def get_by_username(username): return User.query.filter_by(username=username).first() @staticmethod def verify_credentials(username, password): user = User.get_by_username(username) if user is not None and user.check_password(password): return user return None def __repr__(self): return "<User '{}'>".format(self.username)
unlicense
peter-jang/ansible
lib/ansible/plugins/action/raw.py
10
1572
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.action import ActionBase class ActionModule(ActionBase): TRANSFERS_FILES = False def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() if self._task.environment: self._display.warning('raw module does not support the environment keyword') result = super(ActionModule, self).run(tmp, task_vars) if self._play_context.check_mode: # in --check mode, always skip this module execution result['skipped'] = True return result executable = self._task.args.get('executable', False) result.update(self._low_level_execute_command(self._task.args.get('_raw_params'), executable=executable)) return result
gpl-3.0
noironetworks/neutron
neutron/db/rbac_db_mixin.py
1
6467
# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions as c_exc from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.db import api as db_api from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc from oslo_db import exception as db_exc from neutron.db import common_db_mixin from neutron.extensions import rbac as ext_rbac from neutron.objects import base as base_obj from neutron.objects import rbac as rbac_obj class RbacPluginMixin(common_db_mixin.CommonDbMixin): """Plugin mixin that implements the RBAC DB operations.""" object_type_cache = {} supported_extension_aliases = ['rbac-policies'] @db_api.retry_if_session_inactive() def create_rbac_policy(self, context, rbac_policy): e = rbac_policy['rbac_policy'] try: registry.notify(resources.RBAC_POLICY, events.BEFORE_CREATE, self, context=context, object_type=e['object_type'], policy=e) except c_exc.CallbackFailure as e: raise n_exc.InvalidInput(error_message=e) rbac_class = ( rbac_obj.RBACBaseObject.get_type_class_map()[e['object_type']]) try: rbac_args = {'project_id': e['project_id'], 'object_id': e['object_id'], 'action': e['action'], 'target_tenant': e['target_tenant']} _rbac_obj = rbac_class(context, **rbac_args) _rbac_obj.create() except db_exc.DBDuplicateEntry: raise ext_rbac.DuplicateRbacPolicy() return self._make_rbac_policy_dict(_rbac_obj) @staticmethod def _make_rbac_policy_dict(entry, fields=None): res = {f: entry[f] for f in ('id', 'project_id', 'target_tenant', 'action', 'object_id')} res['object_type'] = entry.db_model.object_type return db_utils.resource_fields(res, fields) @db_api.retry_if_session_inactive() def update_rbac_policy(self, context, id, rbac_policy): pol = rbac_policy['rbac_policy'] entry = self._get_rbac_policy(context, id) object_type = entry.db_model.object_type try: registry.notify(resources.RBAC_POLICY, events.BEFORE_UPDATE, self, context=context, policy=entry, object_type=object_type, policy_update=pol) except c_exc.CallbackFailure as ex: raise ext_rbac.RbacPolicyInUse(object_id=entry.object_id, details=ex) entry.update_fields(pol) entry.update() return self._make_rbac_policy_dict(entry) @db_api.retry_if_session_inactive() def delete_rbac_policy(self, context, id): entry = self._get_rbac_policy(context, id) object_type = entry.db_model.object_type try: registry.notify(resources.RBAC_POLICY, events.BEFORE_DELETE, self, context=context, object_type=object_type, policy=entry) except c_exc.CallbackFailure as ex: raise ext_rbac.RbacPolicyInUse(object_id=entry.object_id, details=ex) # make a dict copy because deleting the entry will nullify its # object_id link to network entry_dict = entry.to_dict() entry.delete() registry.notify(resources.RBAC_POLICY, events.AFTER_DELETE, self, context=context, object_type=object_type, policy=entry_dict) self.object_type_cache.pop(id, None) def _get_rbac_policy(self, context, id): object_type = self._get_object_type(context, id) rbac_class = rbac_obj.RBACBaseObject.get_type_class_map()[object_type] _rbac_obj = rbac_class.get_object(context, id=id) if not _rbac_obj: raise ext_rbac.RbacPolicyNotFound(id=id, object_type=object_type) return _rbac_obj @db_api.retry_if_session_inactive() def get_rbac_policy(self, context, id, fields=None): return self._make_rbac_policy_dict( self._get_rbac_policy(context, id), fields=fields) @db_api.retry_if_session_inactive() def get_rbac_policies(self, context, filters=None, fields=None, sorts=None, limit=None, page_reverse=False): pager = base_obj.Pager(sorts, limit, page_reverse) filters = filters or {} object_types = filters.pop('object_type', None) rbac_classes_to_query = [ o for t, o in rbac_obj.RBACBaseObject.get_type_class_map().items() if not object_types or t in object_types] rbac_objs = [] for rbac_class in rbac_classes_to_query: rbac_objs += rbac_class.get_objects(context, _pager=pager, **filters) return [self._make_rbac_policy_dict(_rbac_obj, fields) for _rbac_obj in rbac_objs] def _get_object_type(self, context, entry_id): """Scans all RBAC tables for an ID to figure out the type. This will be an expensive operation as the number of RBAC tables grows. The result is cached since object types cannot be updated for a policy. """ if entry_id in self.object_type_cache: return self.object_type_cache[entry_id] for otype, rbac_class in \ rbac_obj.RBACBaseObject.get_type_class_map().items(): if rbac_class.count(context, id=entry_id): self.object_type_cache[entry_id] = otype return otype raise ext_rbac.RbacPolicyNotFound(id=entry_id, object_type='unknown')
apache-2.0
VasuAgrawal/tartanHacks2015
site/flask/lib/python2.7/site-packages/sqlalchemy/ext/declarative/base.py
33
20036
# ext/declarative/base.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Internal implementation for declarative.""" from ...schema import Table, Column from ...orm import mapper, class_mapper, synonym from ...orm.interfaces import MapperProperty from ...orm.properties import ColumnProperty, CompositeProperty from ...orm.attributes import QueryableAttribute from ...orm.base import _is_mapped_class from ... import util, exc from ...util import topological from ...sql import expression from ... import event from . import clsregistry import collections import weakref def _declared_mapping_info(cls): # deferred mapping if _DeferredMapperConfig.has_cls(cls): return _DeferredMapperConfig.config_for_cls(cls) # regular mapping elif _is_mapped_class(cls): return class_mapper(cls, configure=False) else: return None def _as_declarative(cls, classname, dict_): from .api import declared_attr # dict_ will be a dictproxy, which we can't write to, and we need to! dict_ = dict(dict_) column_copies = {} potential_columns = {} mapper_args_fn = None table_args = inherited_table_args = None tablename = None declarative_props = (declared_attr, util.classproperty) for base in cls.__mro__: _is_declarative_inherits = hasattr(base, '_decl_class_registry') if '__declare_last__' in base.__dict__: @event.listens_for(mapper, "after_configured") def go(): cls.__declare_last__() if '__declare_first__' in base.__dict__: @event.listens_for(mapper, "before_configured") def go(): cls.__declare_first__() if '__abstract__' in base.__dict__ and base.__abstract__: if (base is cls or (base in cls.__bases__ and not _is_declarative_inherits)): return class_mapped = _declared_mapping_info(base) is not None for name, obj in vars(base).items(): if name == '__mapper_args__': if not mapper_args_fn and ( not class_mapped or isinstance(obj, declarative_props) ): # don't even invoke __mapper_args__ until # after we've determined everything about the # mapped table. # make a copy of it so a class-level dictionary # is not overwritten when we update column-based # arguments. mapper_args_fn = lambda: dict(cls.__mapper_args__) elif name == '__tablename__': if not tablename and ( not class_mapped or isinstance(obj, declarative_props) ): tablename = cls.__tablename__ elif name == '__table_args__': if not table_args and ( not class_mapped or isinstance(obj, declarative_props) ): table_args = cls.__table_args__ if not isinstance(table_args, (tuple, dict, type(None))): raise exc.ArgumentError( "__table_args__ value must be a tuple, " "dict, or None") if base is not cls: inherited_table_args = True elif class_mapped: if isinstance(obj, declarative_props): util.warn("Regular (i.e. not __special__) " "attribute '%s.%s' uses @declared_attr, " "but owning class %s is mapped - " "not applying to subclass %s." % (base.__name__, name, base, cls)) continue elif base is not cls: # we're a mixin. if isinstance(obj, Column): if getattr(cls, name) is not obj: # if column has been overridden # (like by the InstrumentedAttribute of the # superclass), skip continue if obj.foreign_keys: raise exc.InvalidRequestError( "Columns with foreign keys to other columns " "must be declared as @declared_attr callables " "on declarative mixin classes. ") if name not in dict_ and not ( '__table__' in dict_ and (obj.name or name) in dict_['__table__'].c ) and name not in potential_columns: potential_columns[name] = \ column_copies[obj] = \ obj.copy() column_copies[obj]._creation_order = \ obj._creation_order elif isinstance(obj, MapperProperty): raise exc.InvalidRequestError( "Mapper properties (i.e. deferred," "column_property(), relationship(), etc.) must " "be declared as @declared_attr callables " "on declarative mixin classes.") elif isinstance(obj, declarative_props): dict_[name] = ret = \ column_copies[obj] = getattr(cls, name) if isinstance(ret, (Column, MapperProperty)) and \ ret.doc is None: ret.doc = obj.__doc__ # apply inherited columns as we should for k, v in potential_columns.items(): dict_[k] = v if inherited_table_args and not tablename: table_args = None clsregistry.add_class(classname, cls) our_stuff = util.OrderedDict() for k in list(dict_): # TODO: improve this ? all dunders ? if k in ('__table__', '__tablename__', '__mapper_args__'): continue value = dict_[k] if isinstance(value, declarative_props): value = getattr(cls, k) elif isinstance(value, QueryableAttribute) and \ value.class_ is not cls and \ value.key != k: # detect a QueryableAttribute that's already mapped being # assigned elsewhere in userland, turn into a synonym() value = synonym(value.key) setattr(cls, k, value) if (isinstance(value, tuple) and len(value) == 1 and isinstance(value[0], (Column, MapperProperty))): util.warn("Ignoring declarative-like tuple value of attribute " "%s: possibly a copy-and-paste error with a comma " "left at the end of the line?" % k) continue if not isinstance(value, (Column, MapperProperty)): if not k.startswith('__'): dict_.pop(k) setattr(cls, k, value) continue if k == 'metadata': raise exc.InvalidRequestError( "Attribute name 'metadata' is reserved " "for the MetaData instance when using a " "declarative base class." ) prop = clsregistry._deferred_relationship(cls, value) our_stuff[k] = prop # set up attributes in the order they were created our_stuff.sort(key=lambda key: our_stuff[key]._creation_order) # extract columns from the class dict declared_columns = set() name_to_prop_key = collections.defaultdict(set) for key, c in list(our_stuff.items()): if isinstance(c, (ColumnProperty, CompositeProperty)): for col in c.columns: if isinstance(col, Column) and \ col.table is None: _undefer_column_name(key, col) if not isinstance(c, CompositeProperty): name_to_prop_key[col.name].add(key) declared_columns.add(col) elif isinstance(c, Column): _undefer_column_name(key, c) name_to_prop_key[c.name].add(key) declared_columns.add(c) # if the column is the same name as the key, # remove it from the explicit properties dict. # the normal rules for assigning column-based properties # will take over, including precedence of columns # in multi-column ColumnProperties. if key == c.key: del our_stuff[key] for name, keys in name_to_prop_key.items(): if len(keys) > 1: util.warn( "On class %r, Column object %r named directly multiple times, " "only one will be used: %s" % (classname, name, (", ".join(sorted(keys)))) ) declared_columns = sorted( declared_columns, key=lambda c: c._creation_order) table = None if hasattr(cls, '__table_cls__'): table_cls = util.unbound_method_to_callable(cls.__table_cls__) else: table_cls = Table if '__table__' not in dict_: if tablename is not None: args, table_kw = (), {} if table_args: if isinstance(table_args, dict): table_kw = table_args elif isinstance(table_args, tuple): if isinstance(table_args[-1], dict): args, table_kw = table_args[0:-1], table_args[-1] else: args = table_args autoload = dict_.get('__autoload__') if autoload: table_kw['autoload'] = True cls.__table__ = table = table_cls( tablename, cls.metadata, *(tuple(declared_columns) + tuple(args)), **table_kw) else: table = cls.__table__ if declared_columns: for c in declared_columns: if not table.c.contains_column(c): raise exc.ArgumentError( "Can't add additional column %r when " "specifying __table__" % c.key ) if hasattr(cls, '__mapper_cls__'): mapper_cls = util.unbound_method_to_callable(cls.__mapper_cls__) else: mapper_cls = mapper for c in cls.__bases__: if _declared_mapping_info(c) is not None: inherits = c break else: inherits = None if table is None and inherits is None: raise exc.InvalidRequestError( "Class %r does not have a __table__ or __tablename__ " "specified and does not inherit from an existing " "table-mapped class." % cls ) elif inherits: inherited_mapper = _declared_mapping_info(inherits) inherited_table = inherited_mapper.local_table inherited_mapped_table = inherited_mapper.mapped_table if table is None: # single table inheritance. # ensure no table args if table_args: raise exc.ArgumentError( "Can't place __table_args__ on an inherited class " "with no table." ) # add any columns declared here to the inherited table. for c in declared_columns: if c.primary_key: raise exc.ArgumentError( "Can't place primary key columns on an inherited " "class with no table." ) if c.name in inherited_table.c: if inherited_table.c[c.name] is c: continue raise exc.ArgumentError( "Column '%s' on class %s conflicts with " "existing column '%s'" % (c, cls, inherited_table.c[c.name]) ) inherited_table.append_column(c) if inherited_mapped_table is not None and \ inherited_mapped_table is not inherited_table: inherited_mapped_table._refresh_for_new_column(c) defer_map = hasattr(cls, '_sa_decl_prepare') if defer_map: cfg_cls = _DeferredMapperConfig else: cfg_cls = _MapperConfig mt = cfg_cls(mapper_cls, cls, table, inherits, declared_columns, column_copies, our_stuff, mapper_args_fn) if not defer_map: mt.map() class _MapperConfig(object): mapped_table = None def __init__(self, mapper_cls, cls, table, inherits, declared_columns, column_copies, properties, mapper_args_fn): self.mapper_cls = mapper_cls self.cls = cls self.local_table = table self.inherits = inherits self.properties = properties self.mapper_args_fn = mapper_args_fn self.declared_columns = declared_columns self.column_copies = column_copies def _prepare_mapper_arguments(self): properties = self.properties if self.mapper_args_fn: mapper_args = self.mapper_args_fn() else: mapper_args = {} # make sure that column copies are used rather # than the original columns from any mixins for k in ('version_id_col', 'polymorphic_on',): if k in mapper_args: v = mapper_args[k] mapper_args[k] = self.column_copies.get(v, v) assert 'inherits' not in mapper_args, \ "Can't specify 'inherits' explicitly with declarative mappings" if self.inherits: mapper_args['inherits'] = self.inherits if self.inherits and not mapper_args.get('concrete', False): # single or joined inheritance # exclude any cols on the inherited table which are # not mapped on the parent class, to avoid # mapping columns specific to sibling/nephew classes inherited_mapper = _declared_mapping_info(self.inherits) inherited_table = inherited_mapper.local_table if 'exclude_properties' not in mapper_args: mapper_args['exclude_properties'] = exclude_properties = \ set([c.key for c in inherited_table.c if c not in inherited_mapper._columntoproperty]) exclude_properties.difference_update( [c.key for c in self.declared_columns]) # look through columns in the current mapper that # are keyed to a propname different than the colname # (if names were the same, we'd have popped it out above, # in which case the mapper makes this combination). # See if the superclass has a similar column property. # If so, join them together. for k, col in list(properties.items()): if not isinstance(col, expression.ColumnElement): continue if k in inherited_mapper._props: p = inherited_mapper._props[k] if isinstance(p, ColumnProperty): # note here we place the subclass column # first. See [ticket:1892] for background. properties[k] = [col] + p.columns result_mapper_args = mapper_args.copy() result_mapper_args['properties'] = properties return result_mapper_args def map(self): mapper_args = self._prepare_mapper_arguments() self.cls.__mapper__ = self.mapper_cls( self.cls, self.local_table, **mapper_args ) class _DeferredMapperConfig(_MapperConfig): _configs = util.OrderedDict() @property def cls(self): return self._cls() @cls.setter def cls(self, class_): self._cls = weakref.ref(class_, self._remove_config_cls) self._configs[self._cls] = self @classmethod def _remove_config_cls(cls, ref): cls._configs.pop(ref, None) @classmethod def has_cls(cls, class_): # 2.6 fails on weakref if class_ is an old style class return isinstance(class_, type) and \ weakref.ref(class_) in cls._configs @classmethod def config_for_cls(cls, class_): return cls._configs[weakref.ref(class_)] @classmethod def classes_for_base(cls, base_cls, sort=True): classes_for_base = [m for m in cls._configs.values() if issubclass(m.cls, base_cls)] if not sort: return classes_for_base all_m_by_cls = dict( (m.cls, m) for m in classes_for_base ) tuples = [] for m_cls in all_m_by_cls: tuples.extend( (all_m_by_cls[base_cls], all_m_by_cls[m_cls]) for base_cls in m_cls.__bases__ if base_cls in all_m_by_cls ) return list( topological.sort( tuples, classes_for_base ) ) def map(self): self._configs.pop(self._cls, None) super(_DeferredMapperConfig, self).map() def _add_attribute(cls, key, value): """add an attribute to an existing declarative class. This runs through the logic to determine MapperProperty, adds it to the Mapper, adds a column to the mapped Table, etc. """ if '__mapper__' in cls.__dict__: if isinstance(value, Column): _undefer_column_name(key, value) cls.__table__.append_column(value) cls.__mapper__.add_property(key, value) elif isinstance(value, ColumnProperty): for col in value.columns: if isinstance(col, Column) and col.table is None: _undefer_column_name(key, col) cls.__table__.append_column(col) cls.__mapper__.add_property(key, value) elif isinstance(value, MapperProperty): cls.__mapper__.add_property( key, clsregistry._deferred_relationship(cls, value) ) elif isinstance(value, QueryableAttribute) and value.key != key: # detect a QueryableAttribute that's already mapped being # assigned elsewhere in userland, turn into a synonym() value = synonym(value.key) cls.__mapper__.add_property( key, clsregistry._deferred_relationship(cls, value) ) else: type.__setattr__(cls, key, value) else: type.__setattr__(cls, key, value) def _declarative_constructor(self, **kwargs): """A simple constructor that allows initialization from kwargs. Sets attributes on the constructed instance using the names and values in ``kwargs``. Only keys that are present as attributes of the instance's class are allowed. These could be, for example, any mapped columns or relationships. """ cls_ = type(self) for k in kwargs: if not hasattr(cls_, k): raise TypeError( "%r is an invalid keyword argument for %s" % (k, cls_.__name__)) setattr(self, k, kwargs[k]) _declarative_constructor.__name__ = '__init__' def _undefer_column_name(key, column): if column.key is None: column.key = key if column.name is None: column.name = key
mit
francbartoli/geonode
geonode/decorators.py
2
11169
# -*- coding: utf-8 -*- ######################################################################### # # Copyright (C) 2016 OSGeo # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### import json import base64 import logging from functools import wraps from django.contrib import auth from django.conf import settings from django.http import HttpResponse from django.contrib.auth import authenticate, login from django.utils.decorators import classonlymethod from django.core.exceptions import PermissionDenied from geonode.utils import (check_ogc_backend, get_client_ip, get_client_host) logger = logging.getLogger(__name__) def on_ogc_backend(backend_package): """Decorator for function specific to a certain ogc backend. This decorator will wrap function so it only gets executed if the specified ogc backend is currently used. If not, the function will just be skipped. Useful to decorate features/tests that only available for specific backend. """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): on_backend = check_ogc_backend(backend_package) if on_backend: return func(*args, **kwargs) return wrapper return decorator def view_or_basicauth(view, request, test_func, realm="", *args, **kwargs): """ This is a helper function used by both 'logged_in_or_basicauth' and 'has_perm_or_basicauth' that does the nitty of determining if they are already logged in or if they have provided proper http-authorization and returning the view if all goes well, otherwise responding with a 401. """ if test_func(request.user): # Already logged in, just return the view. # return view(request, *args, **kwargs) # They are not logged in. See if they provided login credentials # if 'HTTP_AUTHORIZATION' in request.META: auth = request.META['HTTP_AUTHORIZATION'].split() if len(auth) == 2: # NOTE: We are only support basic authentication for now. # if auth[0].lower() == "basic": uname, passwd = base64.b64decode(auth[1]).decode('utf-8').split(':', 1) user = authenticate(username=uname, password=passwd) if user is not None: if user.is_active: login(request, user) request.user = user if test_func(request.user): return view(request, *args, **kwargs) # Either they did not provide an authorization header or # something in the authorization attempt failed. Send a 401 # back to them to ask them to authenticate. # response = HttpResponse() response.status_code = 401 response['WWW-Authenticate'] = 'Basic realm="%s"' % realm return response def view_decorator(fdec, subclass=False): """ Change a function decorator into a view decorator. https://github.com/lqc/django/tree/cbvdecoration_ticket14512 """ def decorator(cls): if not hasattr(cls, "as_view"): raise TypeError( "You should only decorate subclasses of View, not mixins.") if subclass: cls = type("%sWithDecorator(%s)" % (cls.__name__, fdec.__name__), (cls,), {}) original = cls.as_view.__func__ @wraps(original) def as_view(current, **initkwargs): return fdec(original(current, **initkwargs)) cls.as_view = classonlymethod(as_view) return cls return decorator def view_or_apiauth(view, request, test_func, *args, **kwargs): """ This is a helper function used by both 'logged_in_or_basicauth' and 'has_perm_or_basicauth' that does the nitty of determining if they are already logged in or if they have provided proper http-authorization and returning the view if all goes well, otherwise responding with a 401. """ if test_func(auth.get_user(request)) or not settings.OAUTH2_API_KEY: # Already logged in, just return the view. # return view(request, *args, **kwargs) # They are not logged in. See if they provided login credentials # if 'HTTP_AUTHORIZATION' in request.META: _auth = request.META['HTTP_AUTHORIZATION'].split() if len(_auth) == 2: # NOTE: We are only support basic authentication for now. # if _auth[0].lower() == "apikey": auth_api_key = _auth[1] if auth_api_key and auth_api_key == settings.OAUTH2_API_KEY: return view(request, *args, **kwargs) # Either they did not provide an authorization header or # something in the authorization attempt failed. Send a 401 # back to them to ask them to authenticate. # response = HttpResponse() response.status_code = 401 return response def has_perm_or_basicauth(perm, realm=""): """ This is similar to the above decorator 'logged_in_or_basicauth' except that it requires the logged in user to have a specific permission. Use: @logged_in_or_basicauth('asforums.view_forumcollection') def your_view: ... """ def view_decorator(func): def wrapper(request, *args, **kwargs): return view_or_basicauth(func, request, lambda u: u.has_perm(perm), realm, *args, **kwargs) return wrapper return view_decorator def superuser_only(function): """ Limit view to superusers only. Usage: -------------------------------------------------------------------------- @superuser_only def my_view(request): ... -------------------------------------------------------------------------- or in urls: -------------------------------------------------------------------------- urlpatterns = patterns('', (r'^foobar/(.*)', is_staff(my_view)), ) -------------------------------------------------------------------------- """ def _inner(request, *args, **kwargs): if not auth.get_user(request).is_superuser and not auth.get_user(request).is_staff: raise PermissionDenied return function(request, *args, **kwargs) return _inner def check_keyword_write_perms(function): def _inner(request, *args, **kwargs): keyword_readonly = settings.FREETEXT_KEYWORDS_READONLY and request.method == "POST" \ and not auth.get_user(request).is_superuser request.keyword_readonly = keyword_readonly if keyword_readonly and 'resource-keywords' in request.POST: return HttpResponse( "Unauthorized: Cannot edit/create Free-text Keywords", status=401, content_type="application/json" ) return function(request, *args, **kwargs) return _inner def superuser_protected(function): """Decorator that forces a view to be accessible by SUPERUSERS only. """ def _inner(request, *args, **kwargs): if not auth.get_user(request).is_superuser: return HttpResponse( json.dumps({ 'error': 'unauthorized_request' }), status=403, content_type="application/json" ) return function(request, *args, **kwargs) return _inner def whitelist_protected(function): """Decorator that forces a view to be accessible by WHITE_LISTED IPs only. """ def _inner(request, *args, **kwargs): if not settings.AUTH_IP_WHITELIST or \ (get_client_ip(request) not in settings.AUTH_IP_WHITELIST and get_client_host(request) not in settings.AUTH_IP_WHITELIST): return HttpResponse( json.dumps({ 'error': 'unauthorized_request' }), status=403, content_type="application/json" ) return function(request, *args, **kwargs) return _inner def logged_in_or_basicauth(realm=""): """ A simple decorator that requires a user to be logged in. If they are not logged in the request is examined for a 'authorization' header. If the header is present it is tested for basic authentication and the user is logged in with the provided credentials. If the header is not present a http 401 is sent back to the requestor to provide credentials. The purpose of this is that in several django projects I have needed several specific views that need to support basic authentication, yet the web site as a whole used django's provided authentication. The uses for this are for urls that are access programmatically such as by rss feed readers, yet the view requires a user to be logged in. Many rss readers support supplying the authentication credentials via http basic auth (and they do NOT support a redirect to a form where they post a username/password.) Use is simple: @logged_in_or_basicauth() def your_view: ... You can provide the name of the realm to ask for authentication within. """ def view_decorator(func): def wrapper(request, *args, **kwargs): return view_or_basicauth(func, request, lambda u: u.is_authenticated, realm, *args, **kwargs) return wrapper return view_decorator def logged_in_or_apiauth(): def view_decorator(func): def wrapper(request, *args, **kwargs): return view_or_apiauth(func, request, lambda u: u.is_authenticated, *args, **kwargs) return wrapper return view_decorator def superuser_or_apiauth(): def view_decorator(func): def wrapper(request, *args, **kwargs): return view_or_apiauth(func, request, lambda u: u.is_superuser, *args, **kwargs) return wrapper return view_decorator def dump_func_name(func): def echo_func(*func_args, **func_kwargs): logger.debug('Start func: {}'.format(func.__name__)) return func(*func_args, **func_kwargs) return echo_func
gpl-3.0
graik/biskit
archive_biskit2/Biskit/deprecated/ChainSeparator.py
1
19962
## numpy-oldnumeric calls replaced by custom script; 09/06/2016 ## Automatically adapted for numpy-oldnumeric Mar 26, 2007 by alter_code1.py ## class ChainSeperator: ## ## Biskit, a toolkit for the manipulation of macromolecular structures ## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner ## ## This program is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 3 of the ## License, or any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You find a copy of the GNU General Public License in the file ## license.txt along with this program; if not, write to the Free ## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ## ## """ Seperate PDB into continuous peptide chains for XPlor. Remove duplicate peptide chains. Required by pdb2xplor.py This is vintage code. See L{Biskit.PDBCleaner} for a more recent version (though yet lacking some functions). @todo: Create an override for the chain comparison if one wants to keep identical chains (i.e homodimers) """ ## from Blast2Seq import * # compare 2 sequences from molUtils import singleAA import Biskit.tools as T from LogFile import LogFile from Scientific.IO.PDB import * import Biskit.oldnumeric as N0 import string from difflib import SequenceMatcher import re class ChainSeparator: """ Open PDB file; give back one chain whenever next() is called. This class is used by the pdb2xplor script. This class constitutes vintage code. See L{Biskit.PDBCleaner} and L{Biskit.Mod.TemplateCleaner} for a more recent implementation of PDB cleaning. @todo: The removal of duplicate chains should be transferred to the PDBCleaner so that this class can be retired """ def __init__(self, fname, outPath='', chainIdOffset=0, capBreaks=0, chainMask=0, log=None ): """ @param fname: pdb filename @type fname: str @param outPath: path for log file @type outPath: str @param chainIdOffset: start chain numbering at this offset @type chainIdOffset: int @param capBreaks: add ACE and NME to N- and C-term. of chain breaks [0] @type capBreaks: 0|1 @param chainMask: chain mask for overriding the default sequence identity [None] @type chainMask: [1|0] @param log: LogFile object @type log: object """ self.pdb = Structure(fname); self.fname = fname self.outPath = T.absfile( outPath ) self.chainIdOffset = chainIdOffset self.capBreaks = capBreaks self.log = LogFile( T.absfile(outPath)+'/' + self.pdbname()+'.log') if log: self.log = log self.chains = self.pdb.peptide_chains self.counter = -1 self.threshold = 0.9 # sequence identity between multiple copies in PDB self._expressionCheck( "[^\n].*[Hh][Oo][Mm][Oo].?[Dd][Ii][Mm][eE][Rr].*\n", 'HOMODIMER') self._expressionCheck("[^\n].*[Tt][Rr][Ii][Mm][Ee][Rr].*\n", 'TRIMER') self._hetatomCheck() self.log.add("Separate chains: \n------------------") self._removeDuplicateChains(chainMask) # keep only one copy of molecule self._separateChainBreaks() self._assign_seg_ids() # new segment id for each chain def pdbname(self): """ Extract pdb code from file name. @return: (assumed) pdb code @rtype: str """ return T.stripFilename(self.pdb.filename) def _expressionCheck(self, findExpression, findClean): """ Check and report if the regular expression 'findExpression' exists in the PDB-file. Use this to locate data in the REMARK section of a pdb file. Prints a warning to stdOut if the regular expression is found. @param findExpression: regular expression @type findExpression: str @param findClean: clean name of regular expression @type findClean: str """ pdb = open(self.fname,'r') pdbFile = pdb.read() searchResult = re.findall(findExpression,pdbFile) warningMessage = """ WARNINGR! The text string'%s' was found in the PDB-file. If this PDB-file contains a homodimer one of the chains will be deleted by this script. To avoid this prepare the file for Xplor manualy \n""" %\ ( findClean ) warningMessage2 = """--------------------------------------------\n""" if len(searchResult) != 0: self.log.add(warningMessage) self.log.add("String found in line(s): \n") for i in range(0,len(searchResult)): self.log.add(searchResult[i]) self.log.add(warningMessage2) pdb.close() def _hetatomCheck(self): """ Check and report if there are any none-water HETATMs in the PDB-file """ pdb = open(self.fname,'r') pdbFile = pdb.read() findExpression = "HETATM.*\n" searchResult = re.findall(findExpression,pdbFile) i=0 j = len(searchResult) while i<j: if searchResult[i][17:20] == "HOH" or \ searchResult[i][0:6] != "HETATM" : del searchResult[i] i=i-1 j=j-1 i=i+1 warningMessage = """ WARNING! The PDB-file contains coordinates for none water HETATMs. If you want to keep the HETATM - prepare the file for Xplor manualy \n""" warningMessage2 = "\n"+ 80*"-" + "\n" if len(searchResult) != 0: self.log.add(warningMessage) self.log.add("String found in line(s): \n") for i in range(0,len(searchResult)): self.log.add(searchResult[i][0:-1]) self.log.add(warningMessage2) pdb.close() def _compareSequences( self, seq1, seq2 ): """ @param seq1: sequence 1 to compare @type seq1: str @param seq2: sequence 1 to compare @type seq2: str @return: identity (0.0 - 1.0) between the two sequences @rtype : float """ # compare the 2 sequences ## blast = Blast2Seq( seq1, seq2 ) ## id = blast.run() matcher = SequenceMatcher( None, ''.join(seq1) , ''.join(seq2) ) return matcher.ratio() def _removeDuplicateChains(self, chainMask=None): """ Get rid of identical chains by comparing all chains with Blast2seq. @param chainMask: chain mask for overriding the chain identity checking (default: None) @type chainMask: [int] @return: number of chains removed @rtype: int """ chainCount = len(self.chains) matrix = 1.0 * N0.zeros((chainCount,chainCount)) chain_ids = [] ## create identity matrix for all chains against all chains for i in range(0, chainCount): chain_ids = chain_ids + [self.chains[i].chain_id] # collect for log file for j in range(i, len(self.chains)): # convert 3-letter-code res list into 1-letter-code String seq1 = singleAA( self.chains[i].sequence() ) seq2 = singleAA( self.chains[j].sequence() ) ## if len(seq1) > len(seq2): # take shorter sequence ## # aln len at least half the len of the shortest sequence ## alnCutoff = len(seq2) * 0.5 ## else: ## alnCutoff = len(seq1) * 0.5 ## if id['aln_len'] > alnCutoff: ## matrix[i,j] = id['aln_id'] ## else: # aln length too short, ignore ## matrix[i,j] = 0 matrix[i,j] = self._compareSequences( seq1, seq2 ) ## report activity self.log.add("\n Chain ID's of compared chains: "+str(chain_ids)) self.log.add(" Cross-Identity between chains:\n"+str(matrix)) self.log.add(" Identity threshold used: "+str(self.threshold)) ## override the automatic chain deletion by supplying a ## chain mask to this function if chainMask: if len(chainMask) == chainCount: self.chains = N0.compress(chainMask, self.chains) self.log.add("NOTE: chain mask %s used for removing chains.\n"%chainMask) else: self.log.add("########## ERROR ###############") self.log.add("# Chain mask is only %i chains long"%len(chainMask)) self.log.add("# when a mask of length %i is needed"%chainCount) self.log.add("# No cleaning will be performed.\n") if not chainMask: ## look at diagonals in "identity matrix" ## (each chain against each) duplicate = len(self.chains) for offset in range(1,chainCount): diag = N0.diagonal(matrix, offset ,0,1) # diagonal of 1's mark begin of duplicate avg = 1.0 * N0.sum(diag)/len(diag) if (avg >= self.threshold): duplicate = offset break self.chains = self.chains[:duplicate] self.log.add("NOTE: Identity matrix will be used for removing identical chains.") ## report activit self.log.add(str(chainCount - len(self.chains))+\ " chains have been removed.\n") # how many chains have been removed? return (chainCount - len(self.chains)) def _assign_seg_ids(self): """ Assign new segment id to each chain. """ counter = self.chainIdOffset for chain in self.chains: ## Assemble segid from pdb code + one letter out of A to Z chain.segment_id = self.pdbname()[:3] + string.uppercase[counter] counter = counter + 1 try: # report changed segement ids chain_id = chain.chain_id self.log.add("changed segment ID of chain "+chain_id+\ " to "+chain.segment_id) except: T.errWriteln("_assign_seg_ids(): logerror") def _sequentialDist(self, chain, cutoff, atom): """ Calculate sequential atom-atom distance, report residues with longer distance than cutoff (chain break positions). @param chain: Scientific.IO.PDB.PeptideChain object @type chain: object @param cutoff: threshold for reporting gap (chain break) @type cutoff: float @param atom: type of atoms to check (i.e. 'CA') @type atom: str @return: list of chain break positions (residue index for each first residue of two that are too distant) @rtype: list of int """ distanceList = [] v0 = Vector( 0,0,0 ) jump = 1 for res in range(0,len(chain)-2): try: v1 = Vector(chain[res][atom].position.array) ## ignore CA with 0,0,0 coordinate if v1 != v0: jump = 1 v2 = Vector(chain[ res+jump ][atom].position.array) ## look for next CA with non-zero coordinate while v2 == v0 and jump + res < len( chain ): jump += 1 v2 = Vector(chain[ res+jump ][atom].position.array) if (v1 - v2).length() > cutoff * jump: distanceList = distanceList + [res + jump - 1] except: self.log.add( "_sequentialDist():\nError while checking CA-CA distance"+\ " between residues "+str(chain[res].name)+\ str(chain[res].number)+" and "+\ str(chain[res+jump].name)+\ str(chain[res+jump].number)+ " in chain "+chain.chain_id) self.log.add("Error: " + T.lastError() ) return distanceList ## def _sequentialDist(self, chain, cutoff, atom): ## """ ## Calculate sequential atom-atom distance, report residues with ## longer distance than cutoff (chain break positions). ## chain - PDB.PeptideChain ## cutoff - float, threshold for reporting gap (chain break) ## atom - str, type of atoms to check (i.e. 'CA') ## -> [int, int, ...], list of chain break positions (residue index ## for each first residue of two that are too distant) ## """ ## distanceList = [] ## for residue in range(0,len(chain)-1): ## # iterate through residue 1 to ter-1 ## try: ## vectorAtom1 = Vector(chain[residue][atom].position.array) ## vectorAtom2 = Vector(chain[residue+1][atom].position.array) ## if (vectorAtom1 - vectorAtom2).length() > cutoff: ## distanceList = distanceList + [residue] ## except: ## self.log.add( ## "_sequentialDist():\nError while checking CA-CA distance"+ \ ## " between residues "+str(chain[residue].name)+\ ## str(chain[residue].number)+" and "+str(chain[residue+1].name)+\ ## str(chain[residue+1].number)+ " in chain "+chain.chain_id) ## self.log.add("Error: " + T.lastError() ) ## return distanceList def _separateChainBreaks(self): """ Separate chains with breaks into 2 chains. The new chain(s) is/are added to the internal PDB instance (self.chains). """ fragments = [] for chain in self.chains: # res number of residues before a break breaks = self._sequentialDist(chain, 4.5, 'CA') self.log.add(str(len(breaks)) + " breaks found in chain " +\ "(" + str(len(chain)) \ + " residues) " + chain.chain_id + ": "+str(breaks)) previous = 0 ncap_next = 0 for breakRes in breaks: residues = chain.residues[previous:breakRes+1] previous = breakRes + 1 chainNew = PeptideChain(residues, chain.chain_id, chain.segment_id) if ncap_next: self.__nCap( chainNew ) ncap_next = 0 if self.capBreaks: ## add N-Methyl to c terminal self.__cCap( chainNew ) ncap_next = 1 fragments = fragments + [chainNew] chainNew = PeptideChain(chain.residues[previous:], chain.chain_id, chain.segment_id) if ncap_next: self.__nCap( chainNew ) fragments = fragments + [chainNew] self.chains = fragments def __nCap( self, pep_chain ): """ Add acetyl capping to N-terminal of peptide chain """ n = (pep_chain[0].number or 1) - 1 r = AminoAcidResidue('ACE', number=n, atoms=[Atom('CA', Vector(0,0,0), element='C')]) pep_chain.residues = [r] + pep_chain.residues self.log.add('Capping chain break with ACE %i' % n) def __cCap( self, pep_chain ): """ Add methyle amine capping to C-terminal of peptide chain """ n = (pep_chain[-1].number or len(pep_chain)) + 1 r = AminoAcidResidue('NME', number=n, atoms=[Atom('CA', Vector(0,0,0), element='C')]) pep_chain.residues = pep_chain.residues + [r] self.log.add('Capping chain break at with NME %i' % n) def extractWaters(self): """ Write waters into separate pdb file, called |pdbCode|_waters.pdb. """ try: fTarget = self.outPath + '/' +\ self.pdbname()[:4] + '_waters.pdb' pdb = PDBFile( fTarget, mode='w' ) waters = [] for key in ['HOH', 'DOD']: if self.pdb.molecules.has_key( key ): waters += self.pdb.molecules[ key ] pdb.nextChain(chain_id='', segment_id='1XWW') for w in waters: pdb.nextResidue('TIP3') ## XPLOR wants "ATOM" not "HETATM": pdb.het_flag = 0 pdb.writeAtom('OH2', w.atoms['O'].position) ## keep TIP3 waters as well if len(waters) == 0: try: TIP3_waters = self.pdb.molecules[ 'TIP3' ] except: TIP3_waters = [] for w in TIP3_waters: pdb.nextResidue('TIP3') ## XPLOR wants "ATOM" not "HETATM": pdb.het_flag = 0 pdb.writeAtom('OH2', w.atoms['OH2'].position) pdb.writeAtom('H1', w.atoms['H1'].position) pdb.writeAtom('H2', w.atoms['H2'].position) pdb.close() except: T.errWriteln("Error writing waters to %s: " % fTarget ) T.errWriteln( T.lastError() ) def next(self): """ Return next 'clean', non-redundant, non-broken chain from PDB @return: Scientific.IO.PDB.PeptideChain, completed chain OR if no chain is left @rtype: chain object OR None """ self.counter = self.counter + 1 if (len(self.chains) > self.counter): return self.chains[self.counter] else: return None ############# ## TESTING ############# import Biskit.test as BT class Test(BT.BiskitTest): """Test ChainSeparator """ def prepare(self): self.fname = T.testRoot() + '/com/1BGS_original.pdb' self.outPath = T.tempDir() def cleanUp(self): T.tryRemove( self.sep.log.fname ) def test_ChainSeparator( self ): """ChainSeparator test""" self.sep = ChainSeparator( self.fname, self.outPath, 1) self.chain = self.sep.next() i=1 all_chains = [] while self.chain <> None: if self.local: print 'Chain %i:'%i, ''.join(singleAA(self.chain.sequence() ) ) all_chains += self.chain.sequence() self.chain = self.sep.next() i += 1 if self.local: print 'ChainSeparator log file written to: %s'%self.sep.log.fname r = ''.join( singleAA( all_chains ) ) self.assertEqual(r, self.EXPECTED) EXPECTED='AQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGRTWREADINYTSGFRNSDRILYSSDWLIYKTTDHYQTFTKIRAQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGRTWREADINYTSGFRNSDRILYSSDWLIYKTTDHYQTFTKIRAQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGRTWREADINYTSGFRNSDRILYSSDWLIYKTTDHYQTFTKIRKKAVINGEQIRSISDLHQTLKKELALPEYYGENLDALWDALTGWVEYPLVLEWRQFEQSKQLTENGAESVLQVFREAKAEGADITIILSKKAVINGEQIRSISDLHQTLKKELALPEYYGENLDALWDALTGWVEYPLVLEWRQFEQSKQLTENGAESVLQVFREAKAEGADITIILSKKAVINGEQIRSISDLHQTLKKELALPEYYGENLDALWDALTGWVEYPLVLEWRQFEQSKQLTENGAESVLQVFREAKAEGADITIILS' if __name__ == '__main__': BT.localTest()
gpl-3.0
eyurtsev/FlowCytometryTools
FlowCytometryTools/core/docstring.py
1
2522
from __future__ import print_function import string from matplotlib import inspect class FormatDict(dict): """Adapted from http://stackoverflow.com/questions/11283961/partial-string-formatting""" def __missing__(self, key): return "{" + key + "}" class DocReplacer(object): """Decorator object for replacing patterns in docstrings using string.format.""" def __init__(self, auto_dedent=True, allow_partial_formatting=False, **doc_dict): ''' Parameters ------------- auto_indent : bool Flag for automatically indenting the replaced lines to the level of the docstring. allow_partial_formatting : bool Emnables partial formatting (i.e., not all keys are available in the dictionary) doc_dict : kwargs Pattern in docstring that a key in this dict will be replaced by the corresponding values. Example ------------- TODO: Update this documentation @DocReplacer({'p1': 'p1 : int\n\tFirst parameter'}) def foo(p1): """ Some functions. Params: {p1} """ will result in foo's docstring being: """ Some functions. Params: p1 : int First parameter """ ''' self.doc_dict = doc_dict self.auto_dedent = auto_dedent self.allow_partial_formatting = allow_partial_formatting def __call__(self, func): if func.__doc__: doc = func.__doc__ if self.auto_dedent: doc = inspect.cleandoc(doc) func.__doc__ = self._format(doc) return func def replace(self): """Reformat values inside the self.doc_dict using self.doc_dict TODO: Make support for partial_formatting """ doc_dict = self.doc_dict.copy() for k, v in doc_dict.items(): if '{' and '}' in v: self.doc_dict[k] = v.format(**doc_dict) def update(self, *args, **kwargs): "Assume self.params is a dict and update it with supplied args" self.doc_dict.update(*args, **kwargs) def _format(self, doc): """ Formats the docstring using self.doc_dict """ if self.allow_partial_formatting: mapping = FormatDict(self.doc_dict) else: mapping = self.doc_dict formatter = string.Formatter() return formatter.vformat(doc, (), mapping)
mit
alphafoobar/intellij-community
python/lib/Lib/distutils/cmd.py
138
19253
"""distutils.cmd Provides the Command class, the base class for the command classes in the distutils.command package. """ # This module should be kept compatible with Python 2.1. __revision__ = "$Id: cmd.py 37828 2004-11-10 22:23:15Z loewis $" import sys, os, string, re from types import * from distutils.errors import * from distutils import util, dir_util, file_util, archive_util, dep_util from distutils import log class Command: """Abstract base class for defining command classes, the "worker bees" of the Distutils. A useful analogy for command classes is to think of them as subroutines with local variables called "options". The options are "declared" in 'initialize_options()' and "defined" (given their final values, aka "finalized") in 'finalize_options()', both of which must be defined by every command class. The distinction between the two is necessary because option values might come from the outside world (command line, config file, ...), and any options dependent on other options must be computed *after* these outside influences have been processed -- hence 'finalize_options()'. The "body" of the subroutine, where it does all its work based on the values of its options, is the 'run()' method, which must also be implemented by every command class. """ # 'sub_commands' formalizes the notion of a "family" of commands, # eg. "install" as the parent with sub-commands "install_lib", # "install_headers", etc. The parent of a family of commands # defines 'sub_commands' as a class attribute; it's a list of # (command_name : string, predicate : unbound_method | string | None) # tuples, where 'predicate' is a method of the parent command that # determines whether the corresponding command is applicable in the # current situation. (Eg. we "install_headers" is only applicable if # we have any C header files to install.) If 'predicate' is None, # that command is always applicable. # # 'sub_commands' is usually defined at the *end* of a class, because # predicates can be unbound methods, so they must already have been # defined. The canonical example is the "install" command. sub_commands = [] # -- Creation/initialization methods ------------------------------- def __init__ (self, dist): """Create and initialize a new Command object. Most importantly, invokes the 'initialize_options()' method, which is the real initializer and depends on the actual command being instantiated. """ # late import because of mutual dependence between these classes from distutils.dist import Distribution if not isinstance(dist, Distribution): raise TypeError, "dist must be a Distribution instance" if self.__class__ is Command: raise RuntimeError, "Command is an abstract class" self.distribution = dist self.initialize_options() # Per-command versions of the global flags, so that the user can # customize Distutils' behaviour command-by-command and let some # commands fall back on the Distribution's behaviour. None means # "not defined, check self.distribution's copy", while 0 or 1 mean # false and true (duh). Note that this means figuring out the real # value of each flag is a touch complicated -- hence "self._dry_run" # will be handled by __getattr__, below. # XXX This needs to be fixed. self._dry_run = None # verbose is largely ignored, but needs to be set for # backwards compatibility (I think)? self.verbose = dist.verbose # Some commands define a 'self.force' option to ignore file # timestamps, but methods defined *here* assume that # 'self.force' exists for all commands. So define it here # just to be safe. self.force = None # The 'help' flag is just used for command-line parsing, so # none of that complicated bureaucracy is needed. self.help = 0 # 'finalized' records whether or not 'finalize_options()' has been # called. 'finalize_options()' itself should not pay attention to # this flag: it is the business of 'ensure_finalized()', which # always calls 'finalize_options()', to respect/update it. self.finalized = 0 # __init__ () # XXX A more explicit way to customize dry_run would be better. def __getattr__ (self, attr): if attr == 'dry_run': myval = getattr(self, "_" + attr) if myval is None: return getattr(self.distribution, attr) else: return myval else: raise AttributeError, attr def ensure_finalized (self): if not self.finalized: self.finalize_options() self.finalized = 1 # Subclasses must define: # initialize_options() # provide default values for all options; may be customized by # setup script, by options from config file(s), or by command-line # options # finalize_options() # decide on the final values for all options; this is called # after all possible intervention from the outside world # (command-line, option file, etc.) has been processed # run() # run the command: do whatever it is we're here to do, # controlled by the command's various option values def initialize_options (self): """Set default values for all the options that this command supports. Note that these defaults may be overridden by other commands, by the setup script, by config files, or by the command-line. Thus, this is not the place to code dependencies between options; generally, 'initialize_options()' implementations are just a bunch of "self.foo = None" assignments. This method must be implemented by all command classes. """ raise RuntimeError, \ "abstract method -- subclass %s must override" % self.__class__ def finalize_options (self): """Set final values for all the options that this command supports. This is always called as late as possible, ie. after any option assignments from the command-line or from other commands have been done. Thus, this is the place to code option dependencies: if 'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as long as 'foo' still has the same value it was assigned in 'initialize_options()'. This method must be implemented by all command classes. """ raise RuntimeError, \ "abstract method -- subclass %s must override" % self.__class__ def dump_options (self, header=None, indent=""): from distutils.fancy_getopt import longopt_xlate if header is None: header = "command options for '%s':" % self.get_command_name() print indent + header indent = indent + " " for (option, _, _) in self.user_options: option = string.translate(option, longopt_xlate) if option[-1] == "=": option = option[:-1] value = getattr(self, option) print indent + "%s = %s" % (option, value) def run (self): """A command's raison d'etre: carry out the action it exists to perform, controlled by the options initialized in 'initialize_options()', customized by other commands, the setup script, the command-line, and config files, and finalized in 'finalize_options()'. All terminal output and filesystem interaction should be done by 'run()'. This method must be implemented by all command classes. """ raise RuntimeError, \ "abstract method -- subclass %s must override" % self.__class__ def announce (self, msg, level=1): """If the current verbosity level is of greater than or equal to 'level' print 'msg' to stdout. """ log.log(level, msg) def debug_print (self, msg): """Print 'msg' to stdout if the global DEBUG (taken from the DISTUTILS_DEBUG environment variable) flag is true. """ from distutils.debug import DEBUG if DEBUG: print msg sys.stdout.flush() # -- Option validation methods ------------------------------------- # (these are very handy in writing the 'finalize_options()' method) # # NB. the general philosophy here is to ensure that a particular option # value meets certain type and value constraints. If not, we try to # force it into conformance (eg. if we expect a list but have a string, # split the string on comma and/or whitespace). If we can't force the # option into conformance, raise DistutilsOptionError. Thus, command # classes need do nothing more than (eg.) # self.ensure_string_list('foo') # and they can be guaranteed that thereafter, self.foo will be # a list of strings. def _ensure_stringlike (self, option, what, default=None): val = getattr(self, option) if val is None: setattr(self, option, default) return default elif type(val) is not StringType: raise DistutilsOptionError, \ "'%s' must be a %s (got `%s`)" % (option, what, val) return val def ensure_string (self, option, default=None): """Ensure that 'option' is a string; if not defined, set it to 'default'. """ self._ensure_stringlike(option, "string", default) def ensure_string_list (self, option): """Ensure that 'option' is a list of strings. If 'option' is currently a string, we split it either on /,\s*/ or /\s+/, so "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become ["foo", "bar", "baz"]. """ val = getattr(self, option) if val is None: return elif type(val) is StringType: setattr(self, option, re.split(r',\s*|\s+', val)) else: if type(val) is ListType: types = map(type, val) ok = (types == [StringType] * len(val)) else: ok = 0 if not ok: raise DistutilsOptionError, \ "'%s' must be a list of strings (got %r)" % \ (option, val) def _ensure_tested_string (self, option, tester, what, error_fmt, default=None): val = self._ensure_stringlike(option, what, default) if val is not None and not tester(val): raise DistutilsOptionError, \ ("error in '%s' option: " + error_fmt) % (option, val) def ensure_filename (self, option): """Ensure that 'option' is the name of an existing file.""" self._ensure_tested_string(option, os.path.isfile, "filename", "'%s' does not exist or is not a file") def ensure_dirname (self, option): self._ensure_tested_string(option, os.path.isdir, "directory name", "'%s' does not exist or is not a directory") # -- Convenience methods for commands ------------------------------ def get_command_name (self): if hasattr(self, 'command_name'): return self.command_name else: return self.__class__.__name__ def set_undefined_options (self, src_cmd, *option_pairs): """Set the values of any "undefined" options from corresponding option values in some other command object. "Undefined" here means "is None", which is the convention used to indicate that an option has not been changed between 'initialize_options()' and 'finalize_options()'. Usually called from 'finalize_options()' for options that depend on some other command rather than another option of the same command. 'src_cmd' is the other command from which option values will be taken (a command object will be created for it if necessary); the remaining arguments are '(src_option,dst_option)' tuples which mean "take the value of 'src_option' in the 'src_cmd' command object, and copy it to 'dst_option' in the current command object". """ # Option_pairs: list of (src_option, dst_option) tuples src_cmd_obj = self.distribution.get_command_obj(src_cmd) src_cmd_obj.ensure_finalized() for (src_option, dst_option) in option_pairs: if getattr(self, dst_option) is None: setattr(self, dst_option, getattr(src_cmd_obj, src_option)) def get_finalized_command (self, command, create=1): """Wrapper around Distribution's 'get_command_obj()' method: find (create if necessary and 'create' is true) the command object for 'command', call its 'ensure_finalized()' method, and return the finalized command object. """ cmd_obj = self.distribution.get_command_obj(command, create) cmd_obj.ensure_finalized() return cmd_obj # XXX rename to 'get_reinitialized_command()'? (should do the # same in dist.py, if so) def reinitialize_command (self, command, reinit_subcommands=0): return self.distribution.reinitialize_command( command, reinit_subcommands) def run_command (self, command): """Run some other command: uses the 'run_command()' method of Distribution, which creates and finalizes the command object if necessary and then invokes its 'run()' method. """ self.distribution.run_command(command) def get_sub_commands (self): """Determine the sub-commands that are relevant in the current distribution (ie., that need to be run). This is based on the 'sub_commands' class attribute: each tuple in that list may include a method that we call to determine if the subcommand needs to be run for the current distribution. Return a list of command names. """ commands = [] for (cmd_name, method) in self.sub_commands: if method is None or method(self): commands.append(cmd_name) return commands # -- External world manipulation ----------------------------------- def warn (self, msg): sys.stderr.write("warning: %s: %s\n" % (self.get_command_name(), msg)) def execute (self, func, args, msg=None, level=1): util.execute(func, args, msg, dry_run=self.dry_run) def mkpath (self, name, mode=0777): dir_util.mkpath(name, mode, dry_run=self.dry_run) def copy_file (self, infile, outfile, preserve_mode=1, preserve_times=1, link=None, level=1): """Copy a file respecting verbose, dry-run and force flags. (The former two default to whatever is in the Distribution object, and the latter defaults to false for commands that don't define it.)""" return file_util.copy_file( infile, outfile, preserve_mode, preserve_times, not self.force, link, dry_run=self.dry_run) def copy_tree (self, infile, outfile, preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1): """Copy an entire directory tree respecting verbose, dry-run, and force flags. """ return dir_util.copy_tree( infile, outfile, preserve_mode,preserve_times,preserve_symlinks, not self.force, dry_run=self.dry_run) def move_file (self, src, dst, level=1): """Move a file respectin dry-run flag.""" return file_util.move_file(src, dst, dry_run = self.dry_run) def spawn (self, cmd, search_path=1, level=1): """Spawn an external command respecting dry-run flag.""" from distutils.spawn import spawn spawn(cmd, search_path, dry_run= self.dry_run) def make_archive (self, base_name, format, root_dir=None, base_dir=None): return archive_util.make_archive( base_name, format, root_dir, base_dir, dry_run=self.dry_run) def make_file (self, infiles, outfile, func, args, exec_msg=None, skip_msg=None, level=1): """Special case of 'execute()' for operations that process one or more input files and generate one output file. Works just like 'execute()', except the operation is skipped and a different message printed if 'outfile' already exists and is newer than all files listed in 'infiles'. If the command defined 'self.force', and it is true, then the command is unconditionally run -- does no timestamp checks. """ if exec_msg is None: exec_msg = "generating %s from %s" % \ (outfile, string.join(infiles, ', ')) if skip_msg is None: skip_msg = "skipping %s (inputs unchanged)" % outfile # Allow 'infiles' to be a single string if type(infiles) is StringType: infiles = (infiles,) elif type(infiles) not in (ListType, TupleType): raise TypeError, \ "'infiles' must be a string, or a list or tuple of strings" # If 'outfile' must be regenerated (either because it doesn't # exist, is out-of-date, or the 'force' flag is true) then # perform the action that presumably regenerates it if self.force or dep_util.newer_group (infiles, outfile): self.execute(func, args, exec_msg, level) # Otherwise, print the "skip" message else: log.debug(skip_msg) # make_file () # class Command # XXX 'install_misc' class not currently used -- it was the base class for # both 'install_scripts' and 'install_data', but they outgrew it. It might # still be useful for 'install_headers', though, so I'm keeping it around # for the time being. class install_misc (Command): """Common base class for installing some files in a subdirectory. Currently used by install_data and install_scripts. """ user_options = [('install-dir=', 'd', "directory to install the files to")] def initialize_options (self): self.install_dir = None self.outfiles = [] def _install_dir_from (self, dirname): self.set_undefined_options('install', (dirname, 'install_dir')) def _copy_files (self, filelist): self.outfiles = [] if not filelist: return self.mkpath(self.install_dir) for f in filelist: self.copy_file(f, self.install_dir) self.outfiles.append(os.path.join(self.install_dir, f)) def get_outputs (self): return self.outfiles if __name__ == "__main__": print "ok"
apache-2.0
simodalla/django-custom-email-user
setup.py
1
1526
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys import custom_email_user try: from setuptools import setup except ImportError: from distutils.core import setup version = custom_email_user.__version__ if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') print("You probably want to also tag the version now:") print(" git tag -a %s -m 'version %s'" % (version, version)) print(" git push --tags") sys.exit() readme = open('README.rst').read() history = open('HISTORY.rst').read().replace('.. :changelog:', '') setup( name='django-custom-email-user', version=version, description="""Cus""", long_description=readme + '\n\n' + history, author='Simone Dalla', author_email='simodalla@gmail.com', url='https://github.com/simodalla/django-custom-email-user', packages=[ 'custom_email_user', ], include_package_data=True, install_requires=[ ], license="BSD", zip_safe=False, keywords='django-custom-email-user', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], )
bsd-3-clause
greggian/TapdIn
django/contrib/gis/tests/relatedapp/tests.py
1
15132
import os, unittest from django.contrib.gis.geos import * from django.contrib.gis.db.backend import SpatialBackend from django.contrib.gis.db.models import Collect, Count, Extent, F, Union from django.contrib.gis.tests.utils import no_mysql, no_oracle, no_spatialite from django.conf import settings from models import City, Location, DirectoryEntry, Parcel, Book, Author cities = (('Aurora', 'TX', -97.516111, 33.058333), ('Roswell', 'NM', -104.528056, 33.387222), ('Kecksburg', 'PA', -79.460734, 40.18476), ) class RelatedGeoModelTest(unittest.TestCase): def test01_setup(self): "Setting up for related model tests." for name, state, lon, lat in cities: loc = Location.objects.create(point=Point(lon, lat)) c = City.objects.create(name=name, state=state, location=loc) @no_oracle # TODO: Fix select_related() problems w/Oracle and pagination. def test02_select_related(self): "Testing `select_related` on geographic models (see #7126)." qs1 = City.objects.all() qs2 = City.objects.select_related() qs3 = City.objects.select_related('location') for qs in (qs1, qs2, qs3): for ref, c in zip(cities, qs): nm, st, lon, lat = ref self.assertEqual(nm, c.name) self.assertEqual(st, c.state) self.assertEqual(Point(lon, lat), c.location.point) @no_mysql @no_oracle # Pagination problem is implicated in this test as well. def test03_transform_related(self): "Testing the `transform` GeoQuerySet method on related geographic models." # All the transformations are to state plane coordinate systems using # US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot). tol = 0 def check_pnt(ref, pnt): self.assertAlmostEqual(ref.x, pnt.x, tol) self.assertAlmostEqual(ref.y, pnt.y, tol) self.assertEqual(ref.srid, pnt.srid) # Each city transformed to the SRID of their state plane coordinate system. transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'), ('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'), ('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'), ) for name, srid, wkt in transformed: # Doing this implicitly sets `select_related` select the location. # TODO: Fix why this breaks on Oracle. qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point')) check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point) @no_mysql @no_spatialite def test04a_related_extent_aggregate(self): "Testing the `extent` GeoQuerySet aggregates on related geographic models." # This combines the Extent and Union aggregates into one query aggs = City.objects.aggregate(Extent('location__point')) # One for all locations, one that excludes Roswell. all_extent = (-104.528060913086, 33.0583305358887,-79.4607315063477, 40.1847610473633) txpa_extent = (-97.51611328125, 33.0583305358887,-79.4607315063477, 40.1847610473633) e1 = City.objects.extent(field_name='location__point') e2 = City.objects.exclude(name='Roswell').extent(field_name='location__point') e3 = aggs['location__point__extent'] # The tolerance value is to four decimal places because of differences # between the Oracle and PostGIS spatial backends on the extent calculation. tol = 4 for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]: for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol) @no_mysql def test04b_related_union_aggregate(self): "Testing the `unionagg` GeoQuerySet aggregates on related geographic models." # This combines the Extent and Union aggregates into one query aggs = City.objects.aggregate(Union('location__point')) # These are the points that are components of the aggregate geographic # union that is returned. p1 = Point(-104.528056, 33.387222) p2 = Point(-97.516111, 33.058333) p3 = Point(-79.460734, 40.18476) # Creating the reference union geometry depending on the spatial backend, # as Oracle will have a different internal ordering of the component # geometries than PostGIS. The second union aggregate is for a union # query that includes limiting information in the WHERE clause (in other # words a `.filter()` precedes the call to `.unionagg()`). if SpatialBackend.oracle: ref_u1 = MultiPoint(p3, p1, p2, srid=4326) ref_u2 = MultiPoint(p3, p2, srid=4326) else: ref_u1 = MultiPoint(p1, p2, p3, srid=4326) ref_u2 = MultiPoint(p2, p3, srid=4326) u1 = City.objects.unionagg(field_name='location__point') u2 = City.objects.exclude(name='Roswell').unionagg(field_name='location__point') u3 = aggs['location__point__union'] self.assertEqual(ref_u1, u1) self.assertEqual(ref_u2, u2) self.assertEqual(ref_u1, u3) def test05_select_related_fk_to_subclass(self): "Testing that calling select_related on a query over a model with an FK to a model subclass works" # Regression test for #9752. l = list(DirectoryEntry.objects.all().select_related()) def test06_f_expressions(self): "Testing F() expressions on GeometryFields." # Constructing a dummy parcel border and getting the City instance for # assigning the FK. b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326) pcity = City.objects.get(name='Aurora') # First parcel has incorrect center point that is equal to the City; # it also has a second border that is different from the first as a # 100ft buffer around the City. c1 = pcity.location.point c2 = c1.transform(2276, clone=True) b2 = c2.buffer(100) p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2) # Now creating a second Parcel where the borders are the same, just # in different coordinate systems. The center points are also the # the same (but in different coordinate systems), and this time they # actually correspond to the centroid of the border. c1 = b1.centroid c2 = c1.transform(2276, clone=True) p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1) # Should return the second Parcel, which has the center within the # border. qs = Parcel.objects.filter(center1__within=F('border1')) self.assertEqual(1, len(qs)) self.assertEqual('P2', qs[0].name) if not SpatialBackend.mysql: # This time center2 is in a different coordinate system and needs # to be wrapped in transformation SQL. qs = Parcel.objects.filter(center2__within=F('border1')) self.assertEqual(1, len(qs)) self.assertEqual('P2', qs[0].name) # Should return the first Parcel, which has the center point equal # to the point in the City ForeignKey. qs = Parcel.objects.filter(center1=F('city__location__point')) self.assertEqual(1, len(qs)) self.assertEqual('P1', qs[0].name) if not SpatialBackend.mysql: # This time the city column should be wrapped in transformation SQL. qs = Parcel.objects.filter(border2__contains=F('city__location__point')) self.assertEqual(1, len(qs)) self.assertEqual('P1', qs[0].name) def test07_values(self): "Testing values() and values_list() and GeoQuerySets." # GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively. gqs = Location.objects.all() gvqs = Location.objects.values() gvlqs = Location.objects.values_list() # Incrementing through each of the models, dictionaries, and tuples # returned by the different types of GeoQuerySets. for m, d, t in zip(gqs, gvqs, gvlqs): # The values should be Geometry objects and not raw strings returned # by the spatial database. self.failUnless(isinstance(d['point'], SpatialBackend.Geometry)) self.failUnless(isinstance(t[1], SpatialBackend.Geometry)) self.assertEqual(m.point, d['point']) self.assertEqual(m.point, t[1]) def test08_defer_only(self): "Testing defer() and only() on Geographic models." qs = Location.objects.all() def_qs = Location.objects.defer('point') for loc, def_loc in zip(qs, def_qs): self.assertEqual(loc.point, def_loc.point) def test09_pk_relations(self): "Ensuring correct primary key column is selected across relations. See #10757." # Adding two more cities, but this time making sure that their location # ID values do not match their City ID values. loc1 = Location.objects.create(point='POINT (-95.363151 29.763374)') loc2 = Location.objects.create(point='POINT (-96.801611 32.782057)') dallas = City.objects.create(name='Dallas', state='TX', location=loc2) houston = City.objects.create(name='Houston', state='TX', location=loc1) # The expected ID values -- notice the last two location IDs # are out of order. We want to make sure that the related # location ID column is selected instead of ID column for # the city. city_ids = (1, 2, 3, 4, 5) loc_ids = (1, 2, 3, 5, 4) ids_qs = City.objects.order_by('id').values('id', 'location__id') for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids): self.assertEqual(val_dict['id'], c_id) self.assertEqual(val_dict['location__id'], l_id) def test10_combine(self): "Testing the combination of two GeoQuerySets. See #10807." buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1) buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1) qs1 = City.objects.filter(location__point__within=buf1) qs2 = City.objects.filter(location__point__within=buf2) combined = qs1 | qs2 names = [c.name for c in combined] self.assertEqual(2, len(names)) self.failUnless('Aurora' in names) self.failUnless('Kecksburg' in names) def test11_geoquery_pickle(self): "Ensuring GeoQuery objects are unpickled correctly. See #10839." import pickle from django.contrib.gis.db.models.sql import GeoQuery qs = City.objects.all() q_str = pickle.dumps(qs.query) q = pickle.loads(q_str) self.assertEqual(GeoQuery, q.__class__) # TODO: fix on Oracle -- get the following error because the SQL is ordered # by a geometry object, which Oracle apparently doesn't like: # ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type @no_oracle def test12a_count(self): "Testing `Count` aggregate use with the `GeoManager` on geo-fields." # Creating a new City, 'Fort Worth', that uses the same location # as Dallas. dallas = City.objects.get(name='Dallas') ftworth = City.objects.create(name='Fort Worth', state='TX', location=dallas.location) # Count annotation should be 2 for the Dallas location now. loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id) self.assertEqual(2, loc.num_cities) def test12b_count(self): "Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087." # Creating some data for the Book/Author non-geo models that # use GeoManager. See #11087. tp = Author.objects.create(name='Trevor Paglen') Book.objects.create(title='Torture Taxi', author=tp) Book.objects.create(title='I Could Tell You But Then You Would Have to be Destroyed by Me', author=tp) Book.objects.create(title='Blank Spots on the Map', author=tp) wp = Author.objects.create(name='William Patry') Book.objects.create(title='Patry on Copyright', author=wp) # Should only be one author (Trevor Paglen) returned by this query, and # the annotation should have 3 for the number of books. Also testing # with a `GeoValuesQuerySet` (see #11489). qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1) vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1) self.assertEqual(1, len(qs)) self.assertEqual(3, qs[0].num_books) self.assertEqual(1, len(vqs)) self.assertEqual(3, vqs[0]['num_books']) # TODO: The phantom model does appear on Oracle. @no_oracle def test13_select_related_null_fk(self): "Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381." no_author = Book.objects.create(title='Without Author') b = Book.objects.select_related('author').get(title='Without Author') # Should be `None`, and not a 'dummy' model. self.assertEqual(None, b.author) @no_mysql @no_oracle @no_spatialite def test14_collect(self): "Testing the `collect` GeoQuerySet method and `Collect` aggregate." # Reference query: # SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN # "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id") # WHERE "relatedapp_city"."state" = 'TX'; ref_geom = fromstr('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)') c1 = City.objects.filter(state='TX').collect(field_name='location__point') c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect'] for coll in (c1, c2): # Even though Dallas and Ft. Worth share same point, Collect doesn't # consolidate -- that's why 4 points in MultiPoint. self.assertEqual(4, len(coll)) self.assertEqual(ref_geom, coll) # TODO: Related tests for KML, GML, and distance lookups. def suite(): s = unittest.TestSuite() s.addTest(unittest.makeSuite(RelatedGeoModelTest)) return s
apache-2.0
Kitware/libkml
third_party/googletest-r108/test/gtest_uninitialized_test.py
15
3254
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test warns the user when not initialized properly.""" __author__ = 'wan@google.com (Zhanyong Wan)' import gtest_test_utils import os import sys import unittest IS_WINDOWS = os.name == 'nt' IS_LINUX = os.name == 'posix' if IS_WINDOWS: BUILD_DIRS = [ 'build.dbg\\', 'build.opt\\', 'build.dbg8\\', 'build.opt8\\', ] COMMAND = 'gtest_uninitialized_test_.exe' if IS_LINUX: COMMAND = os.path.join(gtest_test_utils.GetBuildDir(), 'gtest_uninitialized_test_') def Assert(condition): if not condition: raise AssertionError def AssertEq(expected, actual): if expected != actual: print 'Expected: %s' % (expected,) print ' Actual: %s' % (actual,) raise AssertionError def GetOutput(command): """Runs the given command and returns its output.""" stdin, stdout = os.popen2(command, 't') stdin.close() output = stdout.read() stdout.close() return output def TestExitCodeAndOutput(command): """Runs the given command and verifies its exit code and output.""" # Verifies that 'command' exits with code 1. AssertEq(1, gtest_test_utils.GetExitStatus(os.system(command))) output = GetOutput(command) Assert('InitGoogleTest' in output) if IS_WINDOWS: def main(): for build_dir in BUILD_DIRS: command = build_dir + COMMAND print 'Testing with %s . . .' % (command,) TestExitCodeAndOutput(command) return 0 if __name__ == '__main__': main() if IS_LINUX: class GTestUninitializedTest(unittest.TestCase): def testExitCodeAndOutput(self): TestExitCodeAndOutput(COMMAND) if __name__ == '__main__': gtest_test_utils.Main()
bsd-3-clause
Hackplayers/Empire-mod-Hpys-tests
lib/modules/python/collection/linux/sniffer.py
2
11134
class Module: def __init__(self, mainMenu, params=[]): # metadata info about the module, not modified during runtime self.info = { # name for the module that will appear in module menus 'Name': 'PcapSniffer', # list of one or more authors for the module 'Author': ['@Killswitch_GUI'], # more verbose multi-line description of the module 'Description': 'This module will sniff all interfaces on the target, and write in pcap format.', # True if the module needs to run in the background 'Background': False, # File extension to save the file as 'OutputExtension': "pcap", # if the module needs administrative privileges 'NeedsAdmin': True, # True if the method doesn't touch disk/is reasonably opsec safe 'OpsecSafe': False, # the module language 'Language' : 'python', # the minimum language version needed 'MinLanguageVersion' : '2.6', # list of any references/other comments 'Comments': ['For full comments and code: https://gist.github.com/killswitch-GUI/314e79581f2619a18d94c81d53e5466f'] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent': { # The 'Agent' option is the only one that MUST be in a module 'Description' : 'Agent to run sniffer on.', 'Required' : True, 'Value' : '' }, 'IpFilter': { 'Description' : 'Set IP to filter on (dst & src).', 'Required' : False, 'Value' : '0' }, 'PortFilter': { 'Description' : 'Set port to filter on (dst & src).', 'Required' : False, 'Value' : '0' }, 'MaxSize': { 'Description' : 'Set max file size to save to disk/memory (MB).', 'Required' : True, 'Value' : '1' }, 'MaxPackets': { 'Description' : 'Set max packets to capture.', 'Required' : True, 'Value' : '100' }, 'InMemory': { 'Description' : 'Store binary data in memory, never drop to disk (WARNING: set MaxSize).', 'Required' : False, 'Value' : 'True' }, 'SavePath': { 'Description' : 'Path of the file to save (Not used if InMemory is True.', 'Required' : True, 'Value' : '/tmp/debug.pcap' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu # During instantiation, any settable option parameters # are passed as an object set to the module and the # options dictionary is automatically set. This is mostly # in case options are passed on the command line if params: for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self): savePath = self.options['SavePath']['Value'] inMemory = self.options['InMemory']['Value'] maxPackets = self.options['MaxPackets']['Value'] maxSize = self.options['MaxSize']['Value'] portFilter = self.options['PortFilter']['Value'] ipFilter = self.options['IpFilter']['Value'] if ipFilter != '0': ipFilter = "'" + str(ipFilter) + "'" # the Python script itself, with the command to invoke # for execution appended to the end. Scripts should output # everything to the pipeline for proper parsing. # # the script should be stripped of comments, with a link to any # original reference script included in the comments. script = """ import socket, time from datetime import datetime import struct def outputPcapPFile(fileName, inMemory=False): pcapHeader = struct.pack("@IHHIIII",0xa1b2c3d4,2,4,0,0,0x040000,1) if inMemory: return pcapHeader with open(str(fileName), 'wb+') as f: f.write(pcapHeader) def ouputPcapPacket(fileName, pLen, packet, inMemory=False): t0, t1, t2, t3, t4, t5, t6, t7, t8 = time.gmtime() tstamp = time.mktime((t0, t1, t2, t3, t4, t5, 0, 0, 0)) dt = datetime.now() mstamp = dt.microsecond pcapPacket = struct.pack("@IIII",tstamp,mstamp,pLen,pLen) if inMemory: return pcapPacket with open(str(fileName), 'ab+') as f: f.write(pcapPacket) f.write(packet) def parseEthernetHeader(data): dst = struct.unpack('!BBBBBB',data[:6]) # destination host address src = struct.unpack('!BBBBBB',data[6:12]) # source host address nextType = struct.unpack('!H',data[12:14])[0] # IP? ARP? RARP? etc return dst, src, nextType def parseIpHeader(data): ihl = struct.unpack('!B',data[14:15])[0] # 4 bit version 4 bit ihl tos = struct.unpack('!B',data[15:16])[0] # Type of service totalLen = struct.unpack('!H',data[16:18])[0] # IP header length ident = struct.unpack('!H',data[18:20])[0] # IP ident fragFlags = struct.unpack('!H',data[20:22])[0] # Frag_and_flags ttl = struct.unpack('!B',data[22:23])[0] # Packet Time-to-Live proto = struct.unpack('!B',data[23:24])[0] # Next protocol checksum = struct.unpack('!H',data[24:26])[0] # checksum sourceIp = struct.unpack('!I',data[26:30])[0] # Source IP addr destIp = struct.unpack('!I',data[30:34])[0] # Dest IP addr sourceIpStr = parseIpAddr(data[26:30]) # hton ip destIpStr = parseIpAddr(data[30:34]) # hton ip return proto, sourceIpStr, destIpStr def parseTcpHeader(data): sourcePort = struct.unpack('!H',data[34:36])[0] # source port (set pointer to end of IP Header) destPort = struct.unpack('!H',data[36:38])[0] # destination port sequence = struct.unpack('!I',data[38:42])[0] # sequence number - 32 bits acknowledge = struct.unpack('!I',data[42:46])[0] # acknowledgement number - 32 bits return sourcePort, destPort def parseUdpHeader(data): sourcePort = struct.unpack('!H',data[34:36])[0] # source port (set pointer to end of IP Header) destPort = struct.unpack('!H',data[36:38])[0] # destination port udpLength = struct.unpack('!H',data[38:40])[0] # Udp packet length udpChecksum = struct.unpack('!H',data[40:42])[0] # Udp checksum (optional) return sourcePort, destPort def parseIcmpHeader(data): typeCode = struct.unpack('!H',data[34:36])[0] # ICMP Error type code = struct.unpack('!H',data[36:38])[0] # Type sub code checksum = struct.unpack('!H',data[38:40])[0] # checksum idCode = struct.unpack('!H',data[40:42])[0] # ICMP ID code seq = struct.unpack('!H',data[42:44])[0] # Seq number def parseIpAddr(data): ipOct = [] ipOct.append(str(struct.unpack('!B', data[0:1])[0])) # octet 1 ipOct.append(str(struct.unpack('!B', data[1:2])[0])) # octet 2 ipOct.append(str(struct.unpack('!B', data[2:3])[0])) # octet 3 ipOct.append(str(struct.unpack('!B', data[3:4])[0])) # octet 4 ipStr = '.'.join(ipOct) return ipStr def socketSniffer(fileName,ipFilter,portFilter,maxSize, maxPackets, inMemory): try: s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW ,socket.ntohs(0x0003)) except socket.error , msg: return # build pcap file header and output memoryPcap = '' if inMemory: memoryPcap += outputPcapPFile(fileName, inMemory=inMemory) else: outputPcapPFile(fileName, inMemory=inMemory) packetCounter = 0 sizeCounter = 0 maxSize = maxSize * 1024 * 1024 while (packetCounter < maxPackets): if (sizeCounter > maxSize): break packet = s.recvfrom(65565) pLen = len(packet[0]) if (ipFilter or portFilter): packetOut = False dst, src, nextType = parseEthernetHeader(packet[0]) if (hex(nextType) == hex(0x800)): proto, sourceIpStr, destIpStr = parseIpHeader(packet[0]) # ICMP (1) # TCP (6) # UDP (17) if (proto == 6): sourcePort, destPort = parseTcpHeader(packet[0]) if ipFilter and portFilter: if (ipFilter == sourceIpStr or ipFilter == destIpStr) and (portFilter == sourcePort or portFilter == destPort): packetOut = True elif (ipFilter == sourceIpStr or ipFilter == destIpStr): packetOut = True elif (portFilter == sourcePort or portFilter == destPort): packetOut = True elif (proto == 17): sourcePort, destPort = parseUdpHeader(packet[0]) if ipFilter and portFilter: if (ipFilter == sourceIpStr or ipFilter == destIpStr) and (portFilter == sourcePort or portFilter == destPort): packetOut = True elif (ipFilter == sourceIpStr or ipFilter == destIpStr): packetOut = True elif (portFilter == sourcePort or portFilter == destPort): packetOut = True else: if (ipFilter == sourceIpStr or ipFilter == destIpStr): packetOut = True if packetOut: if inMemory: memoryPcap += ouputPcapPacket(fileName ,pLen, packet[0], inMemory=inMemory) memoryPcap += packet[0] sizeCounter += pLen packetCounter += 1 else: ouputPcapPacket(fileName ,pLen, packet[0], inMemory=inMemory) sizeCounter += pLen packetCounter += 1 else: if inMemory: memoryPcap += ouputPcapPacket(fileName ,pLen, packet[0], inMemory=inMemory) memoryPcap += packet[0] sizeCounter += pLen packetCounter += 1 else: ouputPcapPacket(fileName ,pLen, packet[0], inMemory=inMemory) sizeCounter += pLen packetCounter += 1 try: if inMemory: print memoryPcap else: f = open('%s', 'rb') data = base64.b64encode(f.read()) f.close() run_command('rm -f %s') print data except Exception as e: print e fileNameSave = '%s' ipFilter = %s portFilter = %s maxSize = %s maxPackets = %s inMemory = %s socketSniffer(fileNameSave,ipFilter,portFilter,maxSize,maxPackets, inMemory) """ % (savePath, savePath, savePath, ipFilter, portFilter, maxSize, maxPackets, inMemory) return script
bsd-3-clause
metasmile/awesome-strings
raw/chromium/grit/grit/format/c_format.py
37
2775
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Formats as a .C file for compilation. """ import os import re import types from grit import util def _FormatHeader(root, output_dir): """Returns the required preamble for C files.""" # Find the location of the resource header file, so that we can include # it. resource_header = 'resource.h' # fall back to this for output in root.GetOutputFiles(): if output.attrs['type'] == 'rc_header': resource_header = os.path.abspath(output.GetOutputFilename()) resource_header = util.MakeRelativePath(output_dir, resource_header) return """// This file is automatically generated by GRIT. Do not edit. #include "%s" // All strings are UTF-8 """ % (resource_header) # end _FormatHeader() function def Format(root, lang='en', output_dir='.'): """Outputs a C switch statement representing the string table.""" from grit.node import message assert isinstance(lang, types.StringTypes) yield _FormatHeader(root, output_dir) yield 'const char* GetString(int id) {\n switch (id) {' for item in root.ActiveDescendants(): with item: if isinstance(item, message.MessageNode): yield _FormatMessage(item, lang) yield '\n default:\n return 0;\n }\n}' def _HexToOct(match): "Return the octal form of the hex numbers" hex = match.group("hex") result = "" while len(hex): next_num = int(hex[2:4], 16) result += "\\" + '%03d' % int(oct(next_num), 10) hex = hex[4:] return match.group("escaped_backslashes") + result def _FormatMessage(item, lang): """Format a single <message> element.""" message = item.ws_at_start + item.Translate(lang) + item.ws_at_end # output message with non-ascii chars escaped as octal numbers # C's grammar allows escaped hexadecimal numbers to be infinite, # but octal is always of the form \OOO message = message.encode('utf-8').encode('string_escape') # an escaped char is (\xHH)+ but only if the initial # backslash is not escaped. not_a_backslash = r"(^|[^\\])" # beginning of line or a non-backslash char escaped_backslashes = not_a_backslash + r"(\\\\)*" hex_digits = r"((\\x)[0-9a-f]{2})+" two_digit_hex_num = re.compile( r"(?P<escaped_backslashes>%s)(?P<hex>%s)" % (escaped_backslashes, hex_digits)) message = two_digit_hex_num.sub(_HexToOct, message) # unescape \ (convert \\ back to \) message = message.replace('\\\\', '\\') message = message.replace('"', '\\"') message = util.LINEBREAKS.sub(r'\\n', message) name_attr = item.GetTextualIds()[0] return '\n case %s:\n return "%s";' % (name_attr, message)
mit
ceph/ceph-deploy
docs/source/conf.py
1
8511
# -*- coding: utf-8 -*- # # ceph-deploy documentation build configuration file, created by # sphinx-quickstart on Mon Oct 21 09:32:42 2013. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('_themes')) sys.path.insert(0, os.path.abspath('../..')) import ceph_deploy # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'contents' # General information about the project. project = u'ceph-deploy' copyright = u'2013, Inktank' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = ceph_deploy.__version__ # The full version, including alpha/beta/rc tags. release = ceph_deploy.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'ceph' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = False # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ceph-deploydoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'ceph-deploy.tex', u'ceph-deploy Documentation', u'Inktank', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ceph-deploy', u'ceph-deploy Documentation', [u'Inktank'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'ceph-deploy', u'ceph-deploy Documentation', u'Inktank', 'ceph-deploy', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # XXX Uncomment when we are ready to link to ceph docs # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None}
mit
nitzmahone/ansible
test/units/modules/network/f5/test_bigip_profile_tcp.py
21
3343
# -*- coding: utf-8 -*- # # Copyright: (c) 2017, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest import sys if sys.version_info < (2, 7): pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7") from ansible.module_utils.basic import AnsibleModule try: from library.modules.bigip_profile_tcp import ApiParameters from library.modules.bigip_profile_tcp import ModuleParameters from library.modules.bigip_profile_tcp import ModuleManager from library.modules.bigip_profile_tcp import ArgumentSpec # In Ansible 2.8, Ansible changed import paths. from test.units.compat import unittest from test.units.compat.mock import Mock from test.units.compat.mock import patch from test.units.modules.utils import set_module_args except ImportError: from ansible.modules.network.f5.bigip_profile_tcp import ApiParameters from ansible.modules.network.f5.bigip_profile_tcp import ModuleParameters from ansible.modules.network.f5.bigip_profile_tcp import ModuleManager from ansible.modules.network.f5.bigip_profile_tcp import ArgumentSpec # Ansible 2.8 imports from units.compat import unittest from units.compat.mock import Mock from units.compat.mock import patch from units.modules.utils import set_module_args fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestParameters(unittest.TestCase): def test_module_parameters(self): args = dict( name='foo', parent='bar', idle_timeout='500' ) p = ModuleParameters(params=args) assert p.name == 'foo' assert p.parent == '/Common/bar' assert p.idle_timeout == 500 def test_api_parameters(self): args = load_fixture('load_ltm_profile_tcp_1.json') p = ApiParameters(params=args) assert p.name == 'foo' assert p.idle_timeout == 300 class TestManager(unittest.TestCase): def setUp(self): self.spec = ArgumentSpec() def test_create(self, *args): # Configure the arguments that would be sent to the Ansible module set_module_args(dict( name='foo', parent='bar', idle_timeout=500, password='password', server='localhost', user='admin' )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) mm = ModuleManager(module=module) # Override methods to force specific logic in the module to happen mm.exists = Mock(return_value=False) mm.create_on_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True assert results['idle_timeout'] == 500
gpl-3.0
danielballan/scikit-xray
skbeam/core/recip.py
3
12753
# ###################################################################### # Copyright (c) 2014, Brookhaven Science Associates, Brookhaven # # National Laboratory. All rights reserved. # # # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: # # # # * Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # # # * Redistributions in binary form must reproduce the above copyright # # notice this list of conditions and the following disclaimer in # # the documentation and/or other materials provided with the # # distribution. # # # # * Neither the name of the Brookhaven Science Associates, Brookhaven # # National Laboratory nor the names of its contributors may be used # # to endorse or promote products derived from this software without # # specific prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING # # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # ######################################################################## """ This module is for functions and classes specific to reciprocal space calculations. """ from __future__ import absolute_import, division, print_function import numpy as np from .utils import verbosedict from collections import namedtuple import time try: from pyFAI import geometry as geo except ImportError: geo = None import logging logger = logging.getLogger(__name__) def process_to_q(setting_angles, detector_size, pixel_size, calibrated_center, dist_sample, wavelength, ub, frame_mode=None): """ This will compute the hkl values for all pixels in a shape specified by detector_size. Parameters ---------- setting_angles : ndarray six angles of all the images - Required shape is [num_images][6] and required type is something that can be cast to a 2D numpy array Angle order: delta, theta, chi, phi, mu, gamma (degrees) detector_size : tuple 2 element tuple defining the number of pixels in the detector. Order is (num_columns, num_rows) pixel_size : tuple 2 element tuple defining the size of each pixel in mm. Order is (column_pixel_size, row_pixel_size). If not in mm, must be in the same units as `dist_sample` calibrated_center : tuple 2 element tuple defining the center of the detector in pixels. Order is (column_center, row_center)(x y) dist_sample : float distance from the sample to the detector (mm). If not in mm, must be in the same units as `pixel_size` wavelength : float wavelength of incident radiation (Angstroms) ub : ndarray UB matrix (orientation matrix) 3x3 matrix frame_mode : str, optional Frame mode defines the data collection mode and thus the desired output from this function. Defaults to hkl mode (frame_mode=4) 'theta' : Theta axis frame. 'phi' : Phi axis frame. 'cart' : Crystal cartesian frame. 'hkl' : Reciprocal lattice units frame. See the `process_to_q.frame_mode` attribute for an exact list of valid options. Returns ------- hkl : ndarray (Qx, Qy, Qz) - HKL values shape is [num_images * num_rows * num_columns][3] Notes ----- Six angles of an image: (delta, theta, chi, phi, mu, gamma ) These axes are defined according to the following references. References: text [1]_, text [2]_ .. [1] M. Lohmeier and E.Vlieg, "Angle calculations for a six-circle surface x-ray diffractometer," J. Appl. Cryst., vol 26, pp 706-716, 1993. .. [2] E. Vlieg, "A (2+3)-Type surface diffractometer: Mergence of the z-axis and (2+2)-Type geometries," J. Appl. Cryst., vol 31, pp 198-203, 1998. """ try: from ..ext import ctrans except ImportError: raise NotImplementedError( "ctrans is not available on your platform. See" "https://github.com/scikit-beam/scikit-beam/issues/418" "to follow updates to this problem.") # Set default threads # set default frame_mode if frame_mode is None: frame_mode = 4 else: str_to_int = verbosedict((k, j + 1) for j, k in enumerate(process_to_q.frame_mode)) frame_mode = str_to_int[frame_mode] # ensure the ub matrix is an array ub = np.asarray(ub) # ensure setting angles is a 2-D setting_angles = np.atleast_2d(setting_angles) if setting_angles.ndim != 2: raise ValueError('setting_angles is expected to be a 2-D array with' ' dimensions [num_images][num_angles]. You provided ' 'an array with dimensions {0}' ''.format(setting_angles.shape)) if setting_angles.shape[1] != 6: raise ValueError('It is expected that there should be six angles in ' 'the setting_angles parameter. You provided {0}' ' angles.'.format(setting_angles.shape[1])) # *********** Converting to Q ************** # starting time for the process t1 = time.time() # ctrans - c routines for fast data analysis hkl = ctrans.ccdToQ(angles=setting_angles * np.pi / 180.0, mode=frame_mode, ccd_size=(detector_size), ccd_pixsize=(pixel_size), ccd_cen=(calibrated_center), dist=dist_sample, wavelength=wavelength, UBinv=np.matrix(ub).I) # ending time for the process t2 = time.time() logger.info("Processing time for {0} {1} x {2} images took {3} seconds." "".format(setting_angles.shape[0], detector_size[0], detector_size[1], (t2 - t1))) return hkl # Assign frame_mode as an attribute to the process_to_q function so that the # autowrapping knows what the valid options are process_to_q.frame_mode = ['theta', 'phi', 'cart', 'hkl'] def hkl_to_q(hkl_arr): """ This module compute the reciprocal space (q) values from known HKL array for each pixel of the detector for all the images Parameters ---------- hkl_arr : ndarray (Qx, Qy, Qz) - HKL array shape is [num_images * num_rows * num_columns][3] Returns ------- q_val : ndarray Reciprocal values for each pixel for all images shape is [num_images * num_rows * num_columns] """ return np.linalg.norm(hkl_arr, axis=1) def calibrated_pixels_to_q(detector_size, pyfai_kwargs): """ For a given detector and pyfai calibrated geometry give back the q value for each pixel in the detector. Parameters ----------- detector_size : tuple 2 element tuple defining the number of pixels in the detector. Order is (num_columns, num_rows) pyfai_kwargs: dict The dictionary of pyfai geometry kwargs, given by pyFAI's calibration Ex: dist, poni1, poni2, rot1, rot2, rot3, splineFile, wavelength, detector, pixel1, pixel2 Returns ------- q_val : ndarray Reciprocal values for each pixel shape is [num_rows * num_columns] """ if geo is None: raise RuntimeError("You must have pyFAI installed to use this " "function.") a = geo.Geometry(**pyfai_kwargs) return a.qArray(detector_size) gisaxs_output = namedtuple( 'gisaxs_output', ['alpha_i', 'theta_f', 'alpha_f', 'tilt_angle', 'qx', 'qy', 'qz', 'qr'] ) def gisaxs(incident_beam, reflected_beam, pixel_size, detector_size, dist_sample, wavelength, theta_i=0.0): """ This function will provide scattering wave vector(q) components(x, y, z), q parallel and incident and reflected angles for grazing-incidence small angle X-ray scattering (GISAXS) geometry. Parameters ---------- incident_beam : tuple x and y co-ordinates of the incident beam in pixels reflected_beam : tuple x and y co-ordinates of the reflected beam in pixels pixel_size : tuple pixel_size in um detector_size: tuple 2 element tuple defining no. of pixels(size) in the detector X and Y direction dist_sample : float sample to detector distance, in meters wavelength : float wavelength of the x-ray beam in Angstroms theta_i : float, optional out of plane angle, default 0.0 Returns ------- namedtuple `gisaxs_output` object is returned This `gisaxs_output` object contains, in this order: - alpha_i : float incident angle - theta_f : array out of plane angle shape (detector_size[0], detector_size[1]) - alpha_f : array exit angle shape (detector_size[0], detector_size[1]) - tilt_angle : float tilt angle - qx : array x component of the scattering wave vector shape (detector_size[0], detector_size[1]) - qy : array y component of the scattering wave vector shape (detector_size[0], detector_size[1]) - qz : array z component of the scattering wave vector shape (detector_size[0], detector_size[1]) - qr : array q parallel component shape (detector_size[0], detector_size[1]) Notes ----- This implementation is based on published work. [1]_ References ---------- .. [1] R. Lazzari, "IsGISAXS: a program for grazing-incidence small- angle X-ray scattering analysis of supported islands," J. Appl. Cryst., vol 35, p 406-421, 2002. """ inc_x, inc_y = incident_beam refl_x, refl_y = reflected_beam # convert pixel_size to meters pixel_size = np.asarray(pixel_size) * 10 ** (-6) # tilt angle tilt_angle = np.arctan2((refl_x - inc_x) * pixel_size[0], (refl_y - inc_y) * pixel_size[1]) # incident angle alpha_i = np.arctan2((refl_y - inc_y) * pixel_size[1], dist_sample) / 2. y, x = np.indices(detector_size) # exit angle alpha_f = np.arctan2((y - inc_y) * pixel_size[1], dist_sample) - alpha_i # out of plane angle two_theta = np.arctan2((x - inc_x) * pixel_size[0], dist_sample) theta_f = two_theta / 2 - theta_i # wave number wave_number = 2*np.pi/wavelength # x component qx = (np.cos(alpha_f) * np.cos(2*theta_f) - np.cos(alpha_i) * np.cos(2*theta_i)) * wave_number # y component # the variables post-fixed with an underscore are intermediate steps qy_ = (np.cos(alpha_f) * np.sin(2*theta_f) - np.cos(alpha_i) * np.sin(2*theta_i)) qz_ = np.sin(alpha_f) + np.sin(alpha_i) qy = (qz_ * np.sin(tilt_angle) + qy_ * np.cos(tilt_angle)) * wave_number # z component qz = (qz_ * np.cos(tilt_angle) - qy_ * np.sin(tilt_angle)) * wave_number # q parallel qr = np.sqrt(qx**2 + qy**2) return gisaxs_output(alpha_i, theta_f, alpha_f, tilt_angle, qx, qy, qz, qr)
bsd-3-clause
PawarPawan/h2o-v3
py2/testdir_single_jvm/test_import2.py
20
5372
import unittest, sys sys.path.extend(['.','..','../..','py']) import string import h2o2 as h2o import h2o_cmd, h2o_import as h2i, h2o_browse as h2b from h2o_test import find_file, dump_json, verboseprint expectedZeros = [0, 4914, 656, 24603, 38665, 124, 13, 5, 1338, 51, 320216, 551128, 327648, 544044, 577981, 573487, 576189, 568616, 579415, 574437, 580907, 580833, 579865, 548378, 568602, 551041, 563581, 580413, 581009, 578167, 577590, 579113, 576991, 571753, 580174, 547639, 523260, 559734, 580538, 578423, 579926, 580066, 465765, 550842, 555346, 528493, 535858, 579401, 579121, 580893, 580714, 565439, 567206, 572262, 0] DO_2X_SRC = False DO_TEST_BAD_COLNAME = False DO_TEST_BAD_COL_LENGTH = False DO_IMPORT_PARSE = True SINGLE_CSVFILENAME = 'covtype.data.sorted' SINGLE_CSVFILENAME = 'covtype.data' def assertEqualMsg(a, b): assert a == b, "%s %s" % (a, b) def parseKeyIndexedCheck(frames_result, multiplyExpected, expectedColumnNames): # get the name of the frame? print "" frame = frames_result['frames'][0] rows = frame['rows'] columns = frame['columns'] for i,c in enumerate(columns): label = c['label'] stype = c['type'] missing = c['missing_count'] zeros = c['zero_count'] domain = c['domain'] print "column: %s label: %s type: %s missing: %s zeros: %s domain: %s" %\ (i,label,stype,missing,zeros,domain) # files are concats of covtype. so multiply expected # assertEqualMsg(zeros, expectedZeros[i] * multiplyExpected) assertEqualMsg(label, expectedColumnNames[i]) assertEqualMsg(stype,"int") # assertEqualMsg(missing, 0) assertEqualMsg(domain, None) class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): h2o.init() @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_parse_covtype(self): tryList = [ (['covtype.data', 'covtype.shuffled.data', 'covtype.sorted.data'], 3, 30), ] for (csvFilenameList, multiplyExpected, timeoutSecs) in tryList: # h2o-dev doesn't take ../.. type paths? make find_file return absolute pathj a_node = h2o.nodes[0] # import_result = a_node.import_files(path=find_file("smalldata/logreg/prostate.csv")) importFolderPath = "/home/0xdiag/datasets/standard" # keep a list of the keys you import, to feed to parse kList = [] for csvFilename in csvFilenameList: csvPathname = importFolderPath + "/" + csvFilename if not DO_IMPORT_PARSE: import_result = a_node.import_files(path=csvPathname) k = import_result['keys'][0] frames_result = a_node.frames(key=k, row_count=5, timeoutSecs=timeoutSecs) kList.append(k) # print "frames_result from the first import_result key", dump_json(frames_result) print "I think I imported these keys:", kList # what happens if I put the kList in twice? can it touch the same source file without lock issues? if DO_2X_SRC: kList2 = kList + kList multiplyExpected = 2 * multiplyExpected else: kList2 = kList # try passing column names also. # questions to try # what if you pass missing (,,) # what if you pass too many, too few, or some with same name? # let's try all the characters basename = string.printable # remove the ',' in the string (remember strings are immutable..can't use .replace to remove # other characters are illegal? [] ' if DO_TEST_BAD_COLNAME: basename = basename.translate(None, ",[]!#$%&'()*+-./:;<=>?@\^_`{|}~" + '"') else: basename = "abcd012345" colLength = 1 if DO_TEST_BAD_COL_LENGTH else 55 expectedColumnNames = map(lambda x: basename + "_" + str(x+1), range(colLength)) # need to quote each column name in the string passed column_names = '[' + ','.join(map((lambda x: '"' + x + '"'), expectedColumnNames)) + ']' kwargs = { 'column_names': column_names, 'intermediateResults': False, } print kwargs if DO_IMPORT_PARSE: multiplyExpected = 1 csvPathname = importFolderPath + "/" + SINGLE_CSVFILENAME parse_result = h2i.import_parse(path=csvPathname, timeoutSecs=timeoutSecs, **kwargs) else: parse_result = a_node.parse(key=kList2, timeoutSecs=timeoutSecs, **kwargs) k = parse_result['frames'][0]['frame_id']['name'] # print "parse_result:", dump_json(parse_result) frames_result = a_node.frames(key=k, row_count=5) # print "frames_result from the first parse_result key", dump_json(frames_result) # we doubled the keyList, from what was in tryList parseKeyIndexedCheck(frames_result, multiplyExpected, expectedColumnNames) h2o.nodes[0].log_download() if __name__ == '__main__': h2o.unit_main()
apache-2.0
tarunkapadia93/kernel_msm
tools/perf/scripts/python/futex-contention.py
11261
1486
# futex contention # (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com> # Licensed under the terms of the GNU GPL License version 2 # # Translation of: # # http://sourceware.org/systemtap/wiki/WSFutexContention # # to perf python scripting. # # Measures futex contention import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from Util import * process_names = {} thread_thislock = {} thread_blocktime = {} lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time process_names = {} # long-lived pid-to-execname mapping def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, nr, uaddr, op, val, utime, uaddr2, val3): cmd = op & FUTEX_CMD_MASK if cmd != FUTEX_WAIT: return # we don't care about originators of WAKE events process_names[tid] = comm thread_thislock[tid] = uaddr thread_blocktime[tid] = nsecs(s, ns) def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, nr, ret): if thread_blocktime.has_key(tid): elapsed = nsecs(s, ns) - thread_blocktime[tid] add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed) del thread_blocktime[tid] del thread_thislock[tid] def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): for (tid, lock) in lock_waits: min, max, avg, count = lock_waits[tid, lock] print "%s[%d] lock %x contended %d times, %d avg ns" % \ (process_names[tid], tid, lock, count, avg)
gpl-2.0
fbradyirl/home-assistant
homeassistant/components/zha/core/channels/__init__.py
1
12998
""" Channels module for Zigbee Home Automation. For more details about this component, please refer to the documentation at https://home-assistant.io/components/zha/ """ import asyncio from concurrent.futures import TimeoutError as Timeout from enum import Enum from functools import wraps import logging from random import uniform from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_send from ..const import ( CHANNEL_ATTRIBUTE, CHANNEL_EVENT_RELAY, CHANNEL_ZDO, REPORT_CONFIG_DEFAULT, REPORT_CONFIG_MAX_INT, REPORT_CONFIG_MIN_INT, REPORT_CONFIG_RPT_CHANGE, SIGNAL_ATTR_UPDATED, ) from ..helpers import LogMixin, get_attr_id_by_name, safe_read from ..registries import CLUSTER_REPORT_CONFIGS _LOGGER = logging.getLogger(__name__) def parse_and_log_command(channel, tsn, command_id, args): """Parse and log a zigbee cluster command.""" cmd = channel.cluster.server_commands.get(command_id, [command_id])[0] channel.debug( "received '%s' command with %s args on cluster_id '%s' tsn '%s'", cmd, args, channel.cluster.cluster_id, tsn, ) return cmd def decorate_command(channel, command): """Wrap a cluster command to make it safe.""" @wraps(command) async def wrapper(*args, **kwds): from zigpy.exceptions import DeliveryError try: result = await command(*args, **kwds) channel.debug( "executed command: %s %s %s %s", command.__name__, "{}: {}".format("with args", args), "{}: {}".format("with kwargs", kwds), "{}: {}".format("and result", result), ) return result except (DeliveryError, Timeout) as ex: channel.debug("command failed: %s exception: %s", command.__name__, str(ex)) return ex return wrapper class ChannelStatus(Enum): """Status of a channel.""" CREATED = 1 CONFIGURED = 2 INITIALIZED = 3 class ZigbeeChannel(LogMixin): """Base channel for a Zigbee cluster.""" CHANNEL_NAME = None REPORT_CONFIG = () def __init__(self, cluster, device): """Initialize ZigbeeChannel.""" self._channel_name = cluster.ep_attribute if self.CHANNEL_NAME: self._channel_name = self.CHANNEL_NAME self._generic_id = "channel_0x{:04x}".format(cluster.cluster_id) self._cluster = cluster self._zha_device = device self._unique_id = "{}:{}:0x{:04x}".format( str(device.ieee), cluster.endpoint.endpoint_id, cluster.cluster_id ) # this keeps logs consistent with zigpy logging self._log_id = "0x{:04x}:{}:0x{:04x}".format( device.nwk, cluster.endpoint.endpoint_id, cluster.cluster_id ) self._report_config = CLUSTER_REPORT_CONFIGS.get( self._cluster.cluster_id, self.REPORT_CONFIG ) self._status = ChannelStatus.CREATED self._cluster.add_listener(self) @property def generic_id(self): """Return the generic id for this channel.""" return self._generic_id @property def unique_id(self): """Return the unique id for this channel.""" return self._unique_id @property def cluster(self): """Return the zigpy cluster for this channel.""" return self._cluster @property def device(self): """Return the device this channel is linked to.""" return self._zha_device @property def name(self) -> str: """Return friendly name.""" return self._channel_name @property def status(self): """Return the status of the channel.""" return self._status def set_report_config(self, report_config): """Set the reporting configuration.""" self._report_config = report_config async def bind(self): """Bind a zigbee cluster. This also swallows DeliveryError exceptions that are thrown when devices are unreachable. """ from zigpy.exceptions import DeliveryError try: res = await self.cluster.bind() self.debug("bound '%s' cluster: %s", self.cluster.ep_attribute, res[0]) except (DeliveryError, Timeout) as ex: self.debug( "Failed to bind '%s' cluster: %s", self.cluster.ep_attribute, str(ex) ) async def configure_reporting( self, attr, report_config=( REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, REPORT_CONFIG_RPT_CHANGE, ), ): """Configure attribute reporting for a cluster. This also swallows DeliveryError exceptions that are thrown when devices are unreachable. """ from zigpy.exceptions import DeliveryError attr_name = self.cluster.attributes.get(attr, [attr])[0] kwargs = {} if self.cluster.cluster_id >= 0xFC00 and self.device.manufacturer_code: kwargs["manufacturer"] = self.device.manufacturer_code min_report_int, max_report_int, reportable_change = report_config try: res = await self.cluster.configure_reporting( attr, min_report_int, max_report_int, reportable_change, **kwargs ) self.debug( "reporting '%s' attr on '%s' cluster: %d/%d/%d: Result: '%s'", attr_name, self.cluster.ep_attribute, min_report_int, max_report_int, reportable_change, res, ) except (DeliveryError, Timeout) as ex: self.debug( "failed to set reporting for '%s' attr on '%s' cluster: %s", attr_name, self.cluster.ep_attribute, str(ex), ) async def async_configure(self): """Set cluster binding and attribute reporting.""" # Xiaomi devices don't need this and it disrupts pairing if self._zha_device.manufacturer != "LUMI": await self.bind() if self.cluster.cluster_id not in self.cluster.endpoint.out_clusters: for report_config in self._report_config: await self.configure_reporting( report_config["attr"], report_config["config"] ) await asyncio.sleep(uniform(0.1, 0.5)) self.debug("finished channel configuration") self._status = ChannelStatus.CONFIGURED async def async_initialize(self, from_cache): """Initialize channel.""" self.debug("initializing channel: from_cache: %s", from_cache) self._status = ChannelStatus.INITIALIZED @callback def cluster_command(self, tsn, command_id, args): """Handle commands received to this cluster.""" pass @callback def attribute_updated(self, attrid, value): """Handle attribute updates on this cluster.""" pass @callback def zdo_command(self, *args, **kwargs): """Handle ZDO commands on this cluster.""" pass @callback def zha_send_event(self, cluster, command, args): """Relay events to hass.""" self._zha_device.hass.bus.async_fire( "zha_event", { "unique_id": self._unique_id, "device_ieee": str(self._zha_device.ieee), "command": command, "args": args, }, ) async def async_update(self): """Retrieve latest state from cluster.""" pass async def get_attribute_value(self, attribute, from_cache=True): """Get the value for an attribute.""" manufacturer = None manufacturer_code = self._zha_device.manufacturer_code if self.cluster.cluster_id >= 0xFC00 and manufacturer_code: manufacturer = manufacturer_code result = await safe_read( self._cluster, [attribute], allow_cache=from_cache, only_cache=from_cache, manufacturer=manufacturer, ) return result.get(attribute) def log(self, level, msg, *args): """Log a message.""" msg = "[%s]: " + msg args = (self._log_id,) + args _LOGGER.log(level, msg, *args) def __getattr__(self, name): """Get attribute or a decorated cluster command.""" if hasattr(self._cluster, name) and callable(getattr(self._cluster, name)): command = getattr(self._cluster, name) command.__name__ = name return decorate_command(self, command) return self.__getattribute__(name) class AttributeListeningChannel(ZigbeeChannel): """Channel for attribute reports from the cluster.""" CHANNEL_NAME = CHANNEL_ATTRIBUTE REPORT_CONFIG = [{"attr": 0, "config": REPORT_CONFIG_DEFAULT}] def __init__(self, cluster, device): """Initialize AttributeListeningChannel.""" super().__init__(cluster, device) attr = self._report_config[0].get("attr") if isinstance(attr, str): self.value_attribute = get_attr_id_by_name(self.cluster, attr) else: self.value_attribute = attr @callback def attribute_updated(self, attrid, value): """Handle attribute updates on this cluster.""" if attrid == self.value_attribute: async_dispatcher_send( self._zha_device.hass, "{}_{}".format(self.unique_id, SIGNAL_ATTR_UPDATED), value, ) async def async_initialize(self, from_cache): """Initialize listener.""" await self.get_attribute_value( self._report_config[0].get("attr"), from_cache=from_cache ) await super().async_initialize(from_cache) class ZDOChannel(LogMixin): """Channel for ZDO events.""" def __init__(self, cluster, device): """Initialize ZDOChannel.""" self.name = CHANNEL_ZDO self._cluster = cluster self._zha_device = device self._status = ChannelStatus.CREATED self._unique_id = "{}:{}_ZDO".format(str(device.ieee), device.name) self._cluster.add_listener(self) @property def unique_id(self): """Return the unique id for this channel.""" return self._unique_id @property def cluster(self): """Return the aigpy cluster for this channel.""" return self._cluster @property def status(self): """Return the status of the channel.""" return self._status @callback def device_announce(self, zigpy_device): """Device announce handler.""" pass @callback def permit_duration(self, duration): """Permit handler.""" pass async def async_initialize(self, from_cache): """Initialize channel.""" entry = self._zha_device.gateway.zha_storage.async_get_or_create( self._zha_device ) self.debug("entry loaded from storage: %s", entry) self._status = ChannelStatus.INITIALIZED async def async_configure(self): """Configure channel.""" self._status = ChannelStatus.CONFIGURED def log(self, level, msg, *args): """Log a message.""" msg = "[%s:ZDO](%s): " + msg args = (self._zha_device.nwk, self._zha_device.model) + args _LOGGER.log(level, msg, *args) class EventRelayChannel(ZigbeeChannel): """Event relay that can be attached to zigbee clusters.""" CHANNEL_NAME = CHANNEL_EVENT_RELAY @callback def attribute_updated(self, attrid, value): """Handle an attribute updated on this cluster.""" self.zha_send_event( self._cluster, SIGNAL_ATTR_UPDATED, { "attribute_id": attrid, "attribute_name": self._cluster.attributes.get(attrid, ["Unknown"])[0], "value": value, }, ) @callback def cluster_command(self, tsn, command_id, args): """Handle a cluster command received on this cluster.""" if ( self._cluster.server_commands is not None and self._cluster.server_commands.get(command_id) is not None ): self.zha_send_event( self._cluster, self._cluster.server_commands.get(command_id)[0], args ) # pylint: disable=wrong-import-position from . import closures # noqa from . import general # noqa from . import homeautomation # noqa from . import hvac # noqa from . import lighting # noqa from . import lightlink # noqa from . import manufacturerspecific # noqa from . import measurement # noqa from . import protocol # noqa from . import security # noqa from . import smartenergy # noqa
apache-2.0
kenshay/ImageScript
ProgramData/SystemFiles/Python/Lib/site-packages/pyautogui/tweens.py
5
1410
import pytweening # This is just left here for backwards compatibility. I'll be deprecating this in favor of pyautogui.linear, pyautogui.easeInQuad, etc. getPointOnLine = pytweening.getPointOnLine linear = pytweening.linear easeInQuad = pytweening.easeInQuad easeOutQuad = pytweening.easeOutQuad easeInOutQuad = pytweening.easeInOutQuad easeInCubic = pytweening.easeInCubic easeOutCubic = pytweening.easeOutCubic easeInOutCubic = pytweening.easeInOutCubic easeInQuart = pytweening.easeInQuart easeOutQuart = pytweening.easeOutQuart easeInOutQuart = pytweening.easeInOutQuart easeInQuint = pytweening.easeInQuint easeOutQuint = pytweening.easeOutQuint easeInOutQuint = pytweening.easeInOutQuint easeInSine = pytweening.easeInSine easeOutSine = pytweening.easeOutSine easeInOutSine = pytweening.easeInOutSine easeInExpo = pytweening.easeInExpo easeOutExpo = pytweening.easeOutExpo easeInOutExpo = pytweening.easeInOutExpo easeInCirc = pytweening.easeInCirc easeOutCirc = pytweening.easeOutCirc easeInOutCirc = pytweening.easeInOutCirc easeInElastic = pytweening.easeInElastic easeOutElastic = pytweening.easeOutElastic easeInOutElastic = pytweening.easeInOutElastic easeInBack = pytweening.easeInBack easeOutBack = pytweening.easeOutBack easeInOutBack = pytweening.easeInOutBack easeInBounce = pytweening.easeInBounce easeOutBounce = pytweening.easeOutBounce easeInOutBounce = pytweening.easeInOutBounce
gpl-3.0
hnjamba/onaclone
onadata/libs/data/tests/test_tools.py
5
7229
from datetime import datetime, timedelta from django.utils.timezone import utc import os from mock import patch from onadata.apps.logger.models.instance import Instance from onadata.apps.main.tests.test_base import TestBase from onadata.libs.data.query import get_form_submissions_grouped_by_field,\ get_date_fields, get_field_records class TestTools(TestBase): def setUp(self): super(self.__class__, self).setUp() self._create_user_and_login() self._publish_transportation_form() @patch('django.utils.timezone.now') def test_get_form_submissions_grouped_by_field(self, mock_time): mock_time.return_value = datetime.utcnow().replace(tzinfo=utc) self._make_submissions() count_key = 'count' fields = ['_submission_time', '_xform_id_string'] count = len(self.xform.instances.all()) for field in fields: result = get_form_submissions_grouped_by_field( self.xform, field)[0] self.assertEqual([field, count_key], sorted(result.keys())) self.assertEqual(result[count_key], count) @patch('onadata.apps.logger.models.instance.submission_time') def test_get_form_submissions_grouped_by_field_datetime_to_date( self, mock_time): now = datetime(2014, 01, 01, tzinfo=utc) times = [now, now + timedelta(seconds=1), now + timedelta(seconds=2), now + timedelta(seconds=3)] mock_time.side_effect = times self._make_submissions() for i in self.xform.instances.all().order_by('-pk'): i.date_created = times.pop() i.save() count_key = 'count' fields = ['_submission_time'] count = len(self.xform.instances.all()) for field in fields: result = get_form_submissions_grouped_by_field( self.xform, field)[0] self.assertEqual([field, count_key], sorted(result.keys())) self.assertEqual(result[field], str(now.date())) self.assertEqual(result[count_key], count) @patch('django.utils.timezone.now') def test_get_form_submissions_two_xforms(self, mock_time): mock_time.return_value = datetime.utcnow().replace(tzinfo=utc) self._make_submissions() self._publish_xls_file(os.path.join( "fixtures", "gps", "gps.xls")) first_xform = self.xform self.xform = self.user.xforms.all().order_by('-pk')[0] self._make_submission(os.path.join( 'onadata', 'apps', 'main', 'tests', 'fixtures', 'gps', 'instances', 'gps_1980-01-23_20-52-08.xml')) count_key = 'count' fields = ['_submission_time', '_xform_id_string'] count = len(self.xform.instances.all()) for field in fields: result = get_form_submissions_grouped_by_field( self.xform, field)[0] self.assertEqual([field, count_key], sorted(result.keys())) self.assertEqual(result[count_key], count) count = len(first_xform.instances.all()) for field in fields: result = get_form_submissions_grouped_by_field( first_xform, field)[0] self.assertEqual([field, count_key], sorted(result.keys())) self.assertEqual(result[count_key], count) @patch('django.utils.timezone.now') def test_get_form_submissions_xform_no_submissions(self, mock_time): mock_time.return_value = datetime.utcnow().replace(tzinfo=utc) self._make_submissions() self._publish_xls_file(os.path.join( "fixtures", "gps", "gps.xls")) self.xform = self.user.xforms.all().order_by('-pk')[0] fields = ['_submission_time', '_xform_id_string'] count = len(self.xform.instances.all()) self.assertEqual(count, 0) for field in fields: result = get_form_submissions_grouped_by_field( self.xform, field) self.assertEqual(result, []) @patch('django.utils.timezone.now') def test_get_form_submissions_grouped_by_field_sets_name(self, mock_time): mock_time.return_value = datetime.utcnow().replace(tzinfo=utc) self._make_submissions() count_key = 'count' fields = ['_submission_time', '_xform_id_string'] name = '_my_name' xform = self.user.xforms.all()[0] count = len(xform.instances.all()) for field in fields: result = get_form_submissions_grouped_by_field( xform, field, name)[0] self.assertEqual([name, count_key], sorted(result.keys())) self.assertEqual(result[count_key], count) def test_get_form_submissions_when_response_not_provided(self): """ Test that the None value is stripped when of the submissions doesnt have a response for the specified field """ self._make_submissions() count = Instance.objects.count() # make submission that doesnt have a response for # `available_transportation_types_to_referral_facility` path = os.path.join( self.this_directory, 'fixtures', 'transportation', 'instances', 'transport_no_response', 'transport_no_response.xml') self._make_submission(path, self.user.username) self.assertEqual(Instance.objects.count(), count + 1) field = 'transport/available_transportation_types_to_referral_facility' xform = self.user.xforms.all()[0] results = get_form_submissions_grouped_by_field( xform, field, 'available_transportation_types_to_referral_facility') # we should have a similar number of aggregates as submissions as each # submission has a unique value for the field self.assertEqual(len(results), count + 1) # the count where the value is None should have a count of 1 result = filter( lambda r: r['available_transportation_types_to_referral_facility'] is None, results)[0] self.assertEqual(result['count'], 1) def test_get_date_fields_includes_start_end(self): path = os.path.join( os.path.dirname(__file__), "fixtures", "tutorial", "tutorial.xls") self._publish_xls_file_and_set_xform(path) fields = get_date_fields(self.xform) expected_fields = sorted( ['_submission_time', 'date', 'start_time', 'end_time', 'today', 'exactly']) self.assertEqual(sorted(fields), expected_fields) def test_get_field_records_when_some_responses_are_empty(self): submissions = ['1', '2', '3', 'no_age'] path = os.path.join( os.path.dirname(__file__), "fixtures", "tutorial", "tutorial.xls") self._publish_xls_file_and_set_xform(path) for i in submissions: self._make_submission(os.path.join( 'onadata', 'apps', 'api', 'tests', 'fixtures', 'forms', 'tutorial', 'instances', '{}.xml'.format(i))) field = 'age' records = get_field_records(field, self.xform) self.assertEqual(sorted(records), sorted([23, 23, 35]))
bsd-2-clause
opencobra/cobrapy
src/cobra/test/test_sampling/test_optgp.py
1
1936
"""Test functionalities of OptGPSampler.""" from typing import TYPE_CHECKING import numpy as np import pytest from cobra.sampling import OptGPSampler if TYPE_CHECKING: from cobra import Model from cobra.sampling import ACHRSampler @pytest.fixture(scope="function") def optgp(model: "Model") -> OptGPSampler: """Return OptGPSampler instance for tests.""" sampler = OptGPSampler(model, processes=1, thinning=1) assert (sampler.n_warmup > 0) and (sampler.n_warmup <= 2 * len(model.variables)) assert all(sampler.validate(sampler.warmup) == "v") return sampler def test_optgp_init_benchmark(model: "Model", benchmark) -> None: """Benchmark inital OptGP sampling.""" benchmark(lambda: OptGPSampler(model, processes=2)) def test_optgp_sample_benchmark(optgp: "Model", benchmark) -> None: """Benchmark OptGP sampling.""" benchmark(optgp.sample, 1) def test_sampling(optgp: OptGPSampler) -> None: """Test sampling.""" s = optgp.sample(10) assert all(optgp.validate(s) == "v") def test_batch_sampling(optgp: OptGPSampler) -> None: """Test batch sampling.""" for b in optgp.batch(5, 4): assert all(optgp.validate(b) == "v") def test_variables_samples(achr: "ACHRSampler", optgp: OptGPSampler) -> None: """Test variable samples.""" vnames = np.array([v.name for v in achr.model.variables]) s = optgp.sample(10, fluxes=False) assert s.shape == (10, optgp.warmup.shape[1]) assert (s.columns == vnames).all() assert (optgp.validate(s) == "v").all() def test_reproject(optgp: OptGPSampler) -> None: """Test reprojection of sampling.""" s = optgp.sample(10, fluxes=False).values proj = np.apply_along_axis(optgp._reproject, 1, s) assert all(optgp.validate(proj) == "v") s = np.random.rand(10, optgp.warmup.shape[1]) proj = np.apply_along_axis(optgp._reproject, 1, s) assert all(optgp.validate(proj) == "v")
gpl-2.0
andmos/ansible
test/units/modules/network/nos/nos_module.py
52
2511
# (c) 2018 Extreme Networks Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as file_desc: data = file_desc.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestNosModule(ModuleTestCase): def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False): self.load_fixtures(commands) if failed: result = self.failed() self.assertTrue(result['failed'], result) else: result = self.changed(changed) self.assertEqual(result['changed'], changed, result) if commands is not None: if sort: self.assertEqual(sorted(commands), sorted(result['commands']), result['commands']) else: self.assertEqual(commands, result['commands'], result['commands']) return result def failed(self): with self.assertRaises(AnsibleFailJson) as exc: self.module.main() result = exc.exception.args[0] self.assertTrue(result['failed'], result) return result def changed(self, changed=False): with self.assertRaises(AnsibleExitJson) as exc: self.module.main() result = exc.exception.args[0] self.assertEqual(result['changed'], changed, result) return result def load_fixtures(self, commands=None): pass
gpl-3.0
JustFixNYC/who-owns-what
wow/tests/conftest.py
1
1024
import pytest import psycopg2 import dbtool @pytest.fixture(scope='session') def django_db_setup(django_db_setup, django_db_blocker): from django.conf import settings wow = settings.DATABASES['wow'] with django_db_blocker.unblock(): db = dbtool.DbContext( host=wow['HOST'], database=wow['NAME'], user=wow['USER'], password=wow['PASSWORD'], port=wow['PORT'] or 5432, ) # If we're run with --reuse-db, the database might already # be scaffolded for us, in which case we don't need to # do anything. is_already_built = False conn = db.connection() with conn: with conn.cursor() as cursor: try: cursor.execute('select * from wow_bldgs limit 1;') is_already_built = True except psycopg2.errors.UndefinedTable: pass if not is_already_built: dbtool.loadtestdata(db)
gpl-3.0
sgerhart/ansible
lib/ansible/modules/network/nxos/nxos_facts.py
5
32486
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: nxos_facts extends_documentation_fragment: nxos version_added: "2.1" short_description: Gets facts about NX-OS switches description: - Collects facts from Cisco Nexus devices running the NX-OS operating system. Fact collection is supported over both Cli and Nxapi transports. This module prepends all of the base network fact keys with C(ansible_net_<fact>). The facts module will always collect a base set of facts from the device and can enable or disable collection of additional facts. author: - Jason Edelman (@jedelman8) - Gabriele Gerbino (@GGabriele) notes: - Tested against NXOSv 7.3.(0)D1(1) on VIRL options: gather_subset: description: - When supplied, this argument will restrict the facts collected to a given subset. Possible values for this argument include all, hardware, config, legacy, and interfaces. Can specify a list of values to include a larger subset. Values can also be used with an initial C(M(!)) to specify that a specific subset should not be collected. required: false default: '!config' version_added: "2.2" """ EXAMPLES = """ - nxos_facts: gather_subset: all # Collect only the config and default facts - nxos_facts: gather_subset: - config # Do not collect hardware facts - nxos_facts: gather_subset: - "!hardware" """ RETURN = """ ansible_net_gather_subset: description: The list of fact subsets collected from the device returned: always type: list # default ansible_net_model: description: The model name returned from the device returned: always type: str ansible_net_serialnum: description: The serial number of the remote device returned: always type: str ansible_net_version: description: The operating system version running on the remote device returned: always type: str ansible_net_hostname: description: The configured hostname of the device returned: always type: string ansible_net_image: description: The image file the device is running returned: always type: string # hardware ansible_net_filesystems: description: All file system names available on the device returned: when hardware is configured type: list ansible_net_memfree_mb: description: The available free memory on the remote device in Mb returned: when hardware is configured type: int ansible_net_memtotal_mb: description: The total memory on the remote device in Mb returned: when hardware is configured type: int # config ansible_net_config: description: The current active config from the device returned: when config is configured type: str # interfaces ansible_net_all_ipv4_addresses: description: All IPv4 addresses configured on the device returned: when interfaces is configured type: list ansible_net_all_ipv6_addresses: description: All IPv6 addresses configured on the device returned: when interfaces is configured type: list ansible_net_interfaces: description: A hash of all interfaces running on the system returned: when interfaces is configured type: dict ansible_net_neighbors: description: The list of LLDP/CDP neighbors from the remote device returned: when interfaces is configured type: dict # legacy (pre Ansible 2.2) fan_info: description: A hash of facts about fans in the remote device returned: when legacy is configured type: dict hostname: description: The configured hostname of the remote device returned: when legacy is configured type: dict interfaces_list: description: The list of interface names on the remote device returned: when legacy is configured type: dict kickstart: description: The software version used to boot the system returned: when legacy is configured type: str module: description: A hash of facts about the modules in a remote device returned: when legacy is configured type: dict platform: description: The hardware platform reported by the remote device returned: when legacy is configured type: str power_supply_info: description: A hash of facts about the power supplies in the remote device returned: when legacy is configured type: str vlan_list: description: The list of VLAN IDs configured on the remote device returned: when legacy is configured type: list """ import re from ansible.module_utils.network.nxos.nxos import run_commands, get_config from ansible.module_utils.network.nxos.nxos import get_capabilities, get_interface_type from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import ConnectionError from ansible.module_utils.six import string_types, iteritems g_config = None class FactsBase(object): def __init__(self, module): self.module = module self.warnings = list() self.facts = dict() def populate(self): pass def run(self, command, output='text'): command_string = command command = { 'command': command, 'output': output } resp = run_commands(self.module, [command], check_rc='retry_json') try: return resp[0] except IndexError: self.warnings.append('command %s failed, facts for this command will not be populated' % command_string) return None def get_config(self): global g_config if not g_config: g_config = get_config(self.module) return g_config def transform_dict(self, data, keymap): transform = dict() for key, fact in keymap: if key in data: transform[fact] = data[key] return transform def transform_iterable(self, iterable, keymap): for item in iterable: yield self.transform_dict(item, keymap) class Default(FactsBase): VERSION_MAP_7K = frozenset([ ('sys_ver_str', 'version'), ('proc_board_id', 'serialnum'), ('chassis_id', 'model'), ('isan_file_name', 'image'), ('host_name', 'hostname') ]) VERSION_MAP = frozenset([ ('kickstart_ver_str', 'version'), ('proc_board_id', 'serialnum'), ('chassis_id', 'model'), ('kick_file_name', 'image'), ('host_name', 'hostname') ]) def populate(self): data = None data = self.run('show version', output='json') if data: if isinstance(data, dict): if data.get('sys_ver_str'): self.facts.update(self.transform_dict(data, self.VERSION_MAP_7K)) else: self.facts.update(self.transform_dict(data, self.VERSION_MAP)) else: self.facts['version'] = self.parse_version(data) self.facts['serialnum'] = self.parse_serialnum(data) self.facts['model'] = self.parse_model(data) self.facts['image'] = self.parse_image(data) self.facts['hostname'] = self.parse_hostname(data) def parse_version(self, data): match = re.search(r'\s+system:\s+version\s*(\S+)', data, re.M) if match: return match.group(1) else: match = re.search(r'\s+kickstart:\s+version\s*(\S+)', data, re.M) if match: return match.group(1) def parse_serialnum(self, data): match = re.search(r'Processor Board ID\s*(\S+)', data, re.M) if match: return match.group(1) def parse_model(self, data): match = re.search(r'Hardware\n\s+cisco\s*(\S+\s+\S+)', data, re.M) if match: return match.group(1) def parse_image(self, data): match = re.search(r'\s+system image file is:\s*(\S+)', data, re.M) if match: return match.group(1) else: match = re.search(r'\s+kickstart image file is:\s*(\S+)', data, re.M) if match: return match.group(1) def parse_hostname(self, data): match = re.search(r'\s+Device name:\s*(\S+)', data, re.M) if match: return match.group(1) class Config(FactsBase): def populate(self): super(Config, self).populate() self.facts['config'] = self.get_config() class Features(FactsBase): def populate(self): super(Features, self).populate() data = self.get_config() if data: features = [] for line in data.splitlines(): if line.startswith('feature'): features.append(line.replace('feature', '').strip()) self.facts['features_enabled'] = features class Hardware(FactsBase): def populate(self): data = self.run('dir') if data: self.facts['filesystems'] = self.parse_filesystems(data) data = None data = self.run('show system resources', output='json') if data: if isinstance(data, dict): self.facts['memtotal_mb'] = int(data['memory_usage_total']) / 1024 self.facts['memfree_mb'] = int(data['memory_usage_free']) / 1024 else: self.facts['memtotal_mb'] = self.parse_memtotal_mb(data) self.facts['memfree_mb'] = self.parse_memfree_mb(data) def parse_filesystems(self, data): return re.findall(r'^Usage for (\S+)//', data, re.M) def parse_memtotal_mb(self, data): match = re.search(r'(\S+)K(\s+|)total', data, re.M) if match: memtotal = match.group(1) return int(memtotal) / 1024 def parse_memfree_mb(self, data): match = re.search(r'(\S+)K(\s+|)free', data, re.M) if match: memfree = match.group(1) return int(memfree) / 1024 class Interfaces(FactsBase): INTERFACE_MAP = frozenset([ ('state', 'state'), ('desc', 'description'), ('eth_bw', 'bandwidth'), ('eth_duplex', 'duplex'), ('eth_speed', 'speed'), ('eth_mode', 'mode'), ('eth_hw_addr', 'macaddress'), ('eth_mtu', 'mtu'), ('eth_hw_desc', 'type') ]) INTERFACE_SVI_MAP = frozenset([ ('svi_line_proto', 'state'), ('svi_bw', 'bandwidth'), ('svi_mac', 'macaddress'), ('svi_mtu', 'mtu'), ('type', 'type') ]) INTERFACE_IPV4_MAP = frozenset([ ('eth_ip_addr', 'address'), ('eth_ip_mask', 'masklen') ]) INTERFACE_SVI_IPV4_MAP = frozenset([ ('svi_ip_addr', 'address'), ('svi_ip_mask', 'masklen') ]) INTERFACE_IPV6_MAP = frozenset([ ('addr', 'address'), ('prefix', 'subnet') ]) def ipv6_structure_op_supported(self): data = get_capabilities(self.module) if data: nxos_os_version = data['device_info']['network_os_version'] unsupported_versions = ['I2', 'F1', 'A8'] for ver in unsupported_versions: if ver in nxos_os_version: return False return True def populate(self): self.facts['all_ipv4_addresses'] = list() self.facts['all_ipv6_addresses'] = list() data = None data = self.run('show interface', output='json') if data: if isinstance(data, dict): self.facts['interfaces'] = self.populate_structured_interfaces(data) else: interfaces = self.parse_interfaces(data) self.facts['interfaces'] = self.populate_interfaces(interfaces) if self.ipv6_structure_op_supported(): data = self.run('show ipv6 interface', output='json') else: data = None if data: if isinstance(data, dict): self.populate_structured_ipv6_interfaces(data) else: interfaces = self.parse_interfaces(data) self.populate_ipv6_interfaces(interfaces) data = self.run('show lldp neighbors') if data: self.facts['neighbors'] = self.populate_neighbors(data) data = self.run('show cdp neighbors detail', output='json') if data: if isinstance(data, dict): self.facts['neighbors'] = self.populate_structured_neighbors_cdp(data) else: self.facts['neighbors'] = self.populate_neighbors_cdp(data) def populate_structured_interfaces(self, data): interfaces = dict() for item in data['TABLE_interface']['ROW_interface']: name = item['interface'] intf = dict() if 'type' in item: intf.update(self.transform_dict(item, self.INTERFACE_SVI_MAP)) else: intf.update(self.transform_dict(item, self.INTERFACE_MAP)) if 'eth_ip_addr' in item: intf['ipv4'] = self.transform_dict(item, self.INTERFACE_IPV4_MAP) self.facts['all_ipv4_addresses'].append(item['eth_ip_addr']) if 'svi_ip_addr' in item: intf['ipv4'] = self.transform_dict(item, self.INTERFACE_SVI_IPV4_MAP) self.facts['all_ipv4_addresses'].append(item['svi_ip_addr']) interfaces[name] = intf return interfaces def populate_structured_ipv6_interfaces(self, data): try: data = data['TABLE_intf'] if data: if isinstance(data, dict): data = [data] for item in data: name = item['ROW_intf']['intf-name'] intf = self.facts['interfaces'][name] intf['ipv6'] = self.transform_dict(item, self.INTERFACE_IPV6_MAP) try: addr = item['ROW_intf']['addr'] except KeyError: addr = item['ROW_intf']['TABLE_addr']['ROW_addr']['addr'] self.facts['all_ipv6_addresses'].append(addr) else: return "" except TypeError: return "" def populate_structured_neighbors_cdp(self, data): objects = dict() data = data['TABLE_cdp_neighbor_detail_info']['ROW_cdp_neighbor_detail_info'] if isinstance(data, dict): data = [data] for item in data: local_intf = item['intf_id'] objects[local_intf] = list() nbor = dict() nbor['port'] = item['port_id'] nbor['sysname'] = item['device_id'] objects[local_intf].append(nbor) return objects def parse_interfaces(self, data): parsed = dict() key = '' for line in data.split('\n'): if len(line) == 0: continue elif line.startswith('admin') or line[0] == ' ': parsed[key] += '\n%s' % line else: match = re.match(r'^(\S+)', line) if match: key = match.group(1) if not key.startswith('admin') or not key.startswith('IPv6 Interface'): parsed[key] = line return parsed def populate_interfaces(self, interfaces): facts = dict() for key, value in iteritems(interfaces): intf = dict() if get_interface_type(key) == 'svi': intf['state'] = self.parse_state(key, value, intf_type='svi') intf['macaddress'] = self.parse_macaddress(value, intf_type='svi') intf['mtu'] = self.parse_mtu(value, intf_type='svi') intf['bandwidth'] = self.parse_bandwidth(value, intf_type='svi') intf['type'] = self.parse_type(value, intf_type='svi') if 'Internet Address' in value: intf['ipv4'] = self.parse_ipv4_address(value, intf_type='svi') facts[key] = intf else: intf['state'] = self.parse_state(key, value) intf['description'] = self.parse_description(value) intf['macaddress'] = self.parse_macaddress(value) intf['mode'] = self.parse_mode(value) intf['mtu'] = self.parse_mtu(value) intf['bandwidth'] = self.parse_bandwidth(value) intf['duplex'] = self.parse_duplex(value) intf['speed'] = self.parse_speed(value) intf['type'] = self.parse_type(value) if 'Internet Address' in value: intf['ipv4'] = self.parse_ipv4_address(value) facts[key] = intf return facts def parse_state(self, key, value, intf_type='ethernet'): match = None if intf_type == 'svi': match = re.search(r'line protocol is\s*(\S+)', value, re.M) else: match = re.search(r'%s is\s*(\S+)' % key, value, re.M) if match: return match.group(1) def parse_macaddress(self, value, intf_type='ethernet'): match = None if intf_type == 'svi': match = re.search(r'address is\s*(\S+)', value, re.M) else: match = re.search(r'address:\s*(\S+)', value, re.M) if match: return match.group(1) def parse_mtu(self, value, intf_type='ethernet'): match = re.search(r'MTU\s*(\S+)', value, re.M) if match: return match.group(1) def parse_bandwidth(self, value, intf_type='ethernet'): match = re.search(r'BW\s*(\S+)', value, re.M) if match: return match.group(1) def parse_type(self, value, intf_type='ethernet'): match = None if intf_type == 'svi': match = re.search(r'Hardware is\s*(\S+)', value, re.M) else: match = re.search(r'Hardware:\s*(.+),', value, re.M) if match: return match.group(1) def parse_description(self, value, intf_type='ethernet'): match = re.search(r'Description: (.+)$', value, re.M) if match: return match.group(1) def parse_mode(self, value, intf_type='ethernet'): match = re.search(r'Port mode is (\S+)', value, re.M) if match: return match.group(1) def parse_duplex(self, value, intf_type='ethernet'): match = re.search(r'(\S+)-duplex', value, re.M) if match: return match.group(1) def parse_speed(self, value, intf_type='ethernet'): match = re.search(r'duplex, (.+)$', value, re.M) if match: return match.group(1) def parse_ipv4_address(self, value, intf_type='ethernet'): ipv4 = {} match = re.search(r'Internet Address is (.+)$', value, re.M) if match: address = match.group(1) addr = address.split('/')[0] ipv4['address'] = address.split('/')[0] ipv4['masklen'] = address.split('/')[1] self.facts['all_ipv4_addresses'].append(addr) return ipv4 def populate_neighbors(self, data): objects = dict() if isinstance(data, str): # if there are no neighbors the show command returns # ERROR: No neighbour information if data.startswith('ERROR'): return dict() regex = re.compile(r'(\S+)\s+(\S+)\s+\d+\s+\w+\s+(\S+)') for item in data.split('\n')[4:-1]: match = regex.match(item) if match: nbor = {'host': match.group(1), 'port': match.group(3)} if match.group(2) not in objects: objects[match.group(2)] = [] objects[match.group(2)].append(nbor) elif isinstance(data, dict): data = data['TABLE_nbor']['ROW_nbor'] if isinstance(data, dict): data = [data] for item in data: local_intf = item['l_port_id'] if local_intf not in objects: objects[local_intf] = list() nbor = dict() nbor['port'] = item['port_id'] nbor['host'] = item['chassis_id'] objects[local_intf].append(nbor) return objects def populate_neighbors_cdp(self, data): facts = dict() for item in data.split('----------------------------------------'): if item == '': continue local_intf = self.parse_lldp_intf(item) if local_intf not in facts: facts[local_intf] = list() fact = dict() fact['port'] = self.parse_lldp_port(item) fact['sysname'] = self.parse_lldp_sysname(item) facts[local_intf].append(fact) return facts def parse_lldp_intf(self, data): match = re.search(r'Interface:\s*(\S+)', data, re.M) if match: return match.group(1).strip(',') def parse_lldp_port(self, data): match = re.search(r'Port ID \(outgoing port\):\s*(\S+)', data, re.M) if match: return match.group(1) def parse_lldp_sysname(self, data): match = re.search(r'Device ID:(.+)$', data, re.M) if match: return match.group(1) def populate_ipv6_interfaces(self, interfaces): facts = dict() for key, value in iteritems(interfaces): intf = dict() intf['ipv6'] = self.parse_ipv6_address(value) facts[key] = intf def parse_ipv6_address(self, value): ipv6 = {} match_addr = re.search(r'IPv6 address:\s*(\S+)', value, re.M) if match_addr: addr = match_addr.group(1) ipv6['address'] = addr self.facts['all_ipv6_addresses'].append(addr) match_subnet = re.search(r'IPv6 subnet:\s*(\S+)', value, re.M) if match_subnet: ipv6['subnet'] = match_subnet.group(1) return ipv6 class Legacy(FactsBase): # facts from nxos_facts 2.1 VERSION_MAP = frozenset([ ('host_name', '_hostname'), ('kickstart_ver_str', '_os'), ('chassis_id', '_platform') ]) MODULE_MAP = frozenset([ ('model', 'model'), ('modtype', 'type'), ('ports', 'ports'), ('status', 'status') ]) FAN_MAP = frozenset([ ('fanname', 'name'), ('fanmodel', 'model'), ('fanhwver', 'hw_ver'), ('fandir', 'direction'), ('fanstatus', 'status') ]) POWERSUP_MAP = frozenset([ ('psmodel', 'model'), ('psnum', 'number'), ('ps_status', 'status'), ('actual_out', 'actual_output'), ('actual_in', 'actual_in'), ('total_capa', 'total_capacity') ]) def populate(self): data = None data = self.run('show version', output='json') if data: if isinstance(data, dict): self.facts.update(self.transform_dict(data, self.VERSION_MAP)) else: self.facts['_hostname'] = self.parse_hostname(data) self.facts['_os'] = self.parse_os(data) self.facts['_platform'] = self.parse_platform(data) data = self.run('show interface', output='json') if data: if isinstance(data, dict): self.facts['_interfaces_list'] = self.parse_structured_interfaces(data) else: self.facts['_interfaces_list'] = self.parse_interfaces(data) data = self.run('show vlan brief', output='json') if data: if isinstance(data, dict): self.facts['_vlan_list'] = self.parse_structured_vlans(data) else: self.facts['_vlan_list'] = self.parse_vlans(data) data = self.run('show module', output='json') if data: if isinstance(data, dict): self.facts['_module'] = self.parse_structured_module(data) else: self.facts['_module'] = self.parse_module(data) data = self.run('show environment fan', output='json') if data: if isinstance(data, dict): self.facts['_fan_info'] = self.parse_structured_fan_info(data) else: self.facts['_fan_info'] = self.parse_fan_info(data) data = self.run('show environment power', output='json') if data: if isinstance(data, dict): self.facts['_power_supply_info'] = self.parse_structured_power_supply_info(data) else: self.facts['_power_supply_info'] = self.parse_power_supply_info(data) def parse_structured_interfaces(self, data): objects = list() for item in data['TABLE_interface']['ROW_interface']: objects.append(item['interface']) return objects def parse_structured_vlans(self, data): objects = list() data = data['TABLE_vlanbriefxbrief']['ROW_vlanbriefxbrief'] if isinstance(data, dict): objects.append(data['vlanshowbr-vlanid-utf']) elif isinstance(data, list): for item in data: objects.append(item['vlanshowbr-vlanid-utf']) return objects def parse_structured_module(self, data): data = data['TABLE_modinfo']['ROW_modinfo'] if isinstance(data, dict): data = [data] objects = list(self.transform_iterable(data, self.MODULE_MAP)) return objects def parse_structured_fan_info(self, data): objects = list() if data.get('fandetails'): data = data['fandetails']['TABLE_faninfo']['ROW_faninfo'] elif data.get('fandetails_3k'): data = data['fandetails_3k']['TABLE_faninfo']['ROW_faninfo'] else: return objects objects = list(self.transform_iterable(data, self.FAN_MAP)) return objects def parse_structured_power_supply_info(self, data): if data.get('powersup').get('TABLE_psinfo_n3k'): data = data['powersup']['TABLE_psinfo_n3k']['ROW_psinfo_n3k'] else: data = data['powersup']['TABLE_psinfo']['ROW_psinfo'] objects = list(self.transform_iterable(data, self.POWERSUP_MAP)) return objects def parse_hostname(self, data): match = re.search(r'\s+Device name:\s+(\S+)', data, re.M) if match: return match.group(1) def parse_os(self, data): match = re.search(r'\s+system:\s+version\s*(\S+)', data, re.M) if match: return match.group(1) else: match = re.search(r'\s+kickstart:\s+version\s*(\S+)', data, re.M) if match: return match.group(1) def parse_platform(self, data): match = re.search(r'Hardware\n\s+cisco\s+(\S+\s+\S+)', data, re.M) if match: return match.group(1) def parse_interfaces(self, data): objects = list() for line in data.split('\n'): if len(line) == 0: continue elif line.startswith('admin') or line[0] == ' ': continue else: match = re.match(r'^(\S+)', line) if match: intf = match.group(1) if get_interface_type(intf) != 'unknown': objects.append(intf) return objects def parse_vlans(self, data): objects = list() for line in data.splitlines(): if line == '': continue if line[0].isdigit(): vlan = line.split()[0] objects.append(vlan) return objects def parse_module(self, data): objects = list() for line in data.splitlines(): if line == '': break if line[0].isdigit(): obj = {} match_port = re.search(r'\d\s*(\d*)', line, re.M) if match_port: obj['ports'] = match_port.group(1) match = re.search(r'\d\s*\d*\s*(.+)$', line, re.M) if match: l = match.group(1).split(' ') items = list() for item in l: if item == '': continue items.append(item.strip()) if items: obj['type'] = items[0] obj['model'] = items[1] obj['status'] = items[2] objects.append(obj) return objects def parse_fan_info(self, data): objects = list() for l in data.splitlines(): if '-----------------' in l or 'Status' in l: continue line = l.split() if len(line) > 1: obj = {} obj['name'] = line[0] obj['model'] = line[1] obj['hw_ver'] = line[-2] obj['status'] = line[-1] objects.append(obj) return objects def parse_power_supply_info(self, data): objects = list() for l in data.splitlines(): if l == '': break if l[0].isdigit(): obj = {} line = l.split() obj['model'] = line[1] obj['number'] = line[0] obj['status'] = line[-1] objects.append(obj) return objects FACT_SUBSETS = dict( default=Default, legacy=Legacy, hardware=Hardware, interfaces=Interfaces, config=Config, features=Features ) VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) def main(): spec = dict( gather_subset=dict(default=['!config'], type='list') ) spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=spec, supports_check_mode=True) warnings = list() check_args(module, warnings) gather_subset = module.params['gather_subset'] runable_subsets = set() exclude_subsets = set() for subset in gather_subset: if subset == 'all': runable_subsets.update(VALID_SUBSETS) continue if subset.startswith('!'): subset = subset[1:] if subset == 'all': exclude_subsets.update(VALID_SUBSETS) continue exclude = True else: exclude = False if subset not in VALID_SUBSETS: module.fail_json(msg='Bad subset') if exclude: exclude_subsets.add(subset) else: runable_subsets.add(subset) if not runable_subsets: runable_subsets.update(VALID_SUBSETS) runable_subsets.difference_update(exclude_subsets) runable_subsets.add('default') facts = dict() facts['gather_subset'] = list(runable_subsets) instances = list() for key in runable_subsets: instances.append(FACT_SUBSETS[key](module)) for inst in instances: inst.populate() facts.update(inst.facts) warnings.extend(inst.warnings) ansible_facts = dict() for key, value in iteritems(facts): # this is to maintain capability with nxos_facts 2.1 if key.startswith('_'): ansible_facts[key[1:]] = value else: key = 'ansible_net_%s' % key ansible_facts[key] = value module.exit_json(ansible_facts=ansible_facts, warnings=warnings) if __name__ == '__main__': main()
mit
ferdianjovan/strands_exploration
exploration_bid_manager/src/exploration_bid_manager/exploration_bidder.py
3
8148
import rospy from datetime import datetime from dateutil.tz import * from std_srvs.srv import Empty from strands_executive_msgs.srv import AddTasks from strands_executive_msgs.msg import TaskEvent from strands_exploration_msgs.msg import ExplorationTaskStatus from mongodb_store.message_store import MessageStoreProxy def msg_event_to_string(msg_event): if msg_event == TaskEvent.TASK_FAILED: return "TASK_FAILED" if msg_event == TaskEvent.TASK_SUCCEEDED: return "TASK_SUCCEEDED" if msg_event == TaskEvent.TASK_PREEMPTED: return "TASK_PREEMPTED" return str(msg_event) class ExplorationBidder(object): def __init__(self): self.add_task = rospy.ServiceProxy('/robot_routine/add_tasks', AddTasks) self.task_event_sub = rospy.Subscriber('/task_executor/events', TaskEvent, self.process_task_event, queue_size = None) self.get_info_srv = rospy.Service('~get_bid_info', Empty, self.get_info_cb) #REAL self.period = rospy.Duration(rospy.get_param('~period', 60*60*24)) self.tokens_per_period = rospy.get_param('/exploration_bidding/tokens_per_period', float(60*60*12)/3) ##TESTING #self.period = rospy.Duration(rospy.get_param('~period', 60)) #self.tokens_per_period = rospy.get_param('/exploration_bidding/tokens_per_period', 20) self.available_tokens = self.tokens_per_period self.currently_bid_tokens = 0 #total number of tokens that this node has currently in play self.currently_added_tokens = 0 #total number of tokens that this node has added to the executor (can never be more than the available tokens) self.added_tasks = {} self.queued_tasks = [] self.mongo = MessageStoreProxy(collection='exploration_tasks') self.timer=rospy.Timer(self.period, self.update_budget) def update_budget(self, timer_event): self.available_tokens+=self.tokens_per_period self.process_task_queue() def add_task_bid(self, task, bid): if isinstance(bid, float): rospy.logwarn("Float bids are not allowed. Ignoring." + str(task)) return False if bid <= 0: rospy.logwarn("Zero or negative bids are not allowed. Ignoring." + str(task)) self.mongo.insert(ExplorationTaskStatus(result = ExplorationTaskStatus.IGNORED, task = task, bid = bid)) return False if self.available_tokens - bid < 0: rospy.logwarn("Not enough tokens available to bid. Ignoring." + str(task)) self.mongo.insert(ExplorationTaskStatus(result = ExplorationTaskStatus.IGNORED, task = task, bid = bid)) return False else: task.priority = bid self.queued_tasks.append(task) self.currently_bid_tokens+=bid self.process_task_queue() return True def process_task_queue(self): i = 0 rospy.loginfo("Trying to add tasks to the schedule...") while i < len(self.queued_tasks): task = self.queued_tasks[i] if rospy.get_rostime() > task.end_before: rospy.logwarn("Task deadline was surpassed before having budget to add it to the schedule. " + str(task)) self.currently_bid_tokens-=task.priority self.mongo.insert(ExplorationTaskStatus(result = ExplorationTaskStatus.NOT_ADDED, task = task, bid = task.priority)) del self.queued_tasks[i] elif self.available_tokens - self.currently_added_tokens - task.priority >= 0: try: add_response = self.add_task([task]) task_id = add_response.task_ids[0] rospy.loginfo("Added task " + str(task) + " with ID: " + str(task_id)) self.added_tasks[task_id] = task self.currently_added_tokens+=task.priority del self.queued_tasks[i] except Exception, e: rospy.logerr("Error calling add task service: " + str(e)) rospy.sleep(0.1) else: i+=1 def process_task_event(self, msg): task = msg.task if self.added_tasks.has_key(task.task_id): rospy.loginfo("ANALYSING") if msg.event == TaskEvent.DROPPED: rospy.loginfo("Task was dropped by the executor. " + str(task)) self.currently_bid_tokens-=task.priority self.currently_added_tokens-=task.priority self.mongo.insert(ExplorationTaskStatus(result = ExplorationTaskStatus.DROPPED, task = task, bid = task.priority)) del self.added_tasks[task.task_id] elif msg.event == TaskEvent.TASK_FAILED or msg.event == TaskEvent.TASK_SUCCEEDED or msg.event == TaskEvent.TASK_PREEMPTED: rospy.loginfo("Task has finished execution with outcome " + msg_event_to_string(msg.event) + ". Retrieving bid of " + str(task.priority) + ". Task: " + str(task)) self.currently_bid_tokens-=task.priority self.available_tokens-=task.priority self.currently_added_tokens-=task.priority self.mongo.insert(ExplorationTaskStatus(result = ExplorationTaskStatus.EXECUTED, task = task, bid = task.priority, execution_status = msg.event)) del self.added_tasks[task.task_id] self.process_task_queue() def get_info_cb(self, req): print "\n\n\n--------------------------------------------------------------------------------------------------------------------" print "ADDED TASKS" for task_id in self.added_tasks: task = self.added_tasks[task_id] print "TASK_ID:", task_id print "TASK:", task.action print "BID:", task.priority print "EXECUTE BETWEEN " + datetime.fromtimestamp(task.start_after.to_sec(), tzlocal()).strftime('%d/%m/%y %H:%M:%S') + " AND " + datetime.fromtimestamp(task.end_before.to_sec(), tzlocal()).strftime('%d/%m/%y %H:%M:%S') print "\n" print "--------------------------------------------------------------------------------------------------------------------" print "QUEUED TASKS: " for task in self.queued_tasks: print "TASK:", task.action print "BID:", task.priority print "EXECUTE BETWEEN " + datetime.fromtimestamp(task.start_after.to_sec(), tzlocal()).strftime('%d/%m/%y %H:%M:%S') + " AND " + datetime.fromtimestamp(task.end_before.to_sec(), tzlocal()).strftime('%d/%m/%y %H:%M:%S') print "\n" print "--------------------------------------------------------------------------------------------------------------------" print "AVAILABLE TOKENS: ", self.available_tokens print "--------------------------------------------------------------------------------------------------------------------" print "BID TOKENS: ", self.currently_bid_tokens print "--------------------------------------------------------------------------------------------------------------------" print "ADDED TOKENS: ", self.currently_added_tokens print "--------------------------------------------------------------------------------------------------------------------" print "\n\n\n\n\n\n\n" return []
mit
kenshay/ImageScripter
ProgramData/SystemFiles/Python/Lib/site-packages/pip/_internal/utils/compat.py
7
8015
"""Stuff that differs in different Python versions and platform distributions.""" from __future__ import absolute_import, division import codecs import locale import logging import os import shutil import sys from pip._vendor.six import text_type try: import ipaddress except ImportError: try: from pip._vendor import ipaddress # type: ignore except ImportError: import ipaddr as ipaddress # type: ignore ipaddress.ip_address = ipaddress.IPAddress ipaddress.ip_network = ipaddress.IPNetwork __all__ = [ "ipaddress", "uses_pycache", "console_to_str", "native_str", "get_path_uid", "stdlib_pkgs", "WINDOWS", "samefile", "get_terminal_size", "get_extension_suffixes", ] logger = logging.getLogger(__name__) if sys.version_info >= (3, 4): uses_pycache = True from importlib.util import cache_from_source else: import imp try: cache_from_source = imp.cache_from_source # type: ignore except AttributeError: # does not use __pycache__ cache_from_source = None uses_pycache = cache_from_source is not None if sys.version_info >= (3, 5): backslashreplace_decode = "backslashreplace" else: # In version 3.4 and older, backslashreplace exists # but does not support use for decoding. # We implement our own replace handler for this # situation, so that we can consistently use # backslash replacement for all versions. def backslashreplace_decode_fn(err): raw_bytes = (err.object[i] for i in range(err.start, err.end)) if sys.version_info[0] == 2: # Python 2 gave us characters - convert to numeric bytes raw_bytes = (ord(b) for b in raw_bytes) return u"".join(u"\\x%x" % c for c in raw_bytes), err.end codecs.register_error( "backslashreplace_decode", backslashreplace_decode_fn, ) backslashreplace_decode = "backslashreplace_decode" def console_to_str(data): """Return a string, safe for output, of subprocess output. We assume the data is in the locale preferred encoding. If it won't decode properly, we warn the user but decode as best we can. We also ensure that the output can be safely written to standard output without encoding errors. """ # First, get the encoding we assume. This is the preferred # encoding for the locale, unless that is not found, or # it is ASCII, in which case assume UTF-8 encoding = locale.getpreferredencoding() if (not encoding) or codecs.lookup(encoding).name == "ascii": encoding = "utf-8" # Now try to decode the data - if we fail, warn the user and # decode with replacement. try: s = data.decode(encoding) except UnicodeDecodeError: logger.warning( "Subprocess output does not appear to be encoded as %s", encoding, ) s = data.decode(encoding, errors=backslashreplace_decode) # Make sure we can print the output, by encoding it to the output # encoding with replacement of unencodable characters, and then # decoding again. # We use stderr's encoding because it's less likely to be # redirected and if we don't find an encoding we skip this # step (on the assumption that output is wrapped by something # that won't fail). # The double getattr is to deal with the possibility that we're # being called in a situation where sys.__stderr__ doesn't exist, # or doesn't have an encoding attribute. Neither of these cases # should occur in normal pip use, but there's no harm in checking # in case people use pip in (unsupported) unusual situations. output_encoding = getattr(getattr(sys, "__stderr__", None), "encoding", None) if output_encoding: s = s.encode(output_encoding, errors="backslashreplace") s = s.decode(output_encoding) return s if sys.version_info >= (3,): def native_str(s, replace=False): if isinstance(s, bytes): return s.decode('utf-8', 'replace' if replace else 'strict') return s else: def native_str(s, replace=False): # Replace is ignored -- unicode to UTF-8 can't fail if isinstance(s, text_type): return s.encode('utf-8') return s def get_path_uid(path): """ Return path's uid. Does not follow symlinks: https://github.com/pypa/pip/pull/935#discussion_r5307003 Placed this function in compat due to differences on AIX and Jython, that should eventually go away. :raises OSError: When path is a symlink or can't be read. """ if hasattr(os, 'O_NOFOLLOW'): fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW) file_uid = os.fstat(fd).st_uid os.close(fd) else: # AIX and Jython # WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW if not os.path.islink(path): # older versions of Jython don't have `os.fstat` file_uid = os.stat(path).st_uid else: # raise OSError for parity with os.O_NOFOLLOW above raise OSError( "%s is a symlink; Will not return uid for symlinks" % path ) return file_uid if sys.version_info >= (3, 4): from importlib.machinery import EXTENSION_SUFFIXES def get_extension_suffixes(): return EXTENSION_SUFFIXES else: from imp import get_suffixes def get_extension_suffixes(): return [suffix[0] for suffix in get_suffixes()] def expanduser(path): """ Expand ~ and ~user constructions. Includes a workaround for https://bugs.python.org/issue14768 """ expanded = os.path.expanduser(path) if path.startswith('~/') and expanded.startswith('//'): expanded = expanded[1:] return expanded # packages in the stdlib that may have installation metadata, but should not be # considered 'installed'. this theoretically could be determined based on # dist.location (py27:`sysconfig.get_paths()['stdlib']`, # py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may # make this ineffective, so hard-coding stdlib_pkgs = {"python", "wsgiref", "argparse"} # windows detection, covers cpython and ironpython WINDOWS = (sys.platform.startswith("win") or (sys.platform == 'cli' and os.name == 'nt')) def samefile(file1, file2): """Provide an alternative for os.path.samefile on Windows/Python2""" if hasattr(os.path, 'samefile'): return os.path.samefile(file1, file2) else: path1 = os.path.normcase(os.path.abspath(file1)) path2 = os.path.normcase(os.path.abspath(file2)) return path1 == path2 if hasattr(shutil, 'get_terminal_size'): def get_terminal_size(): """ Returns a tuple (x, y) representing the width(x) and the height(y) in characters of the terminal window. """ return tuple(shutil.get_terminal_size()) else: def get_terminal_size(): """ Returns a tuple (x, y) representing the width(x) and the height(y) in characters of the terminal window. """ def ioctl_GWINSZ(fd): try: import fcntl import termios import struct cr = struct.unpack_from( 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '12345678') ) except Exception: return None if cr == (0, 0): return None return cr cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) if not cr: try: fd = os.open(os.ctermid(), os.O_RDONLY) cr = ioctl_GWINSZ(fd) os.close(fd) except Exception: pass if not cr: cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80)) return int(cr[1]), int(cr[0])
gpl-3.0
openshift/openshift-tools
openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/library/oc_serviceaccount.py
8
61081
#!/usr/bin/env python # pylint: disable=missing-docstring # flake8: noqa: T001 # ___ ___ _ _ ___ ___ _ _____ ___ ___ # / __| __| \| | __| _ \ /_\_ _| __| \ # | (_ | _|| .` | _|| / / _ \| | | _|| |) | # \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ # | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _| # | |) | (_) | | .` | (_) || | | _|| |) | | | | # |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_| # # Copyright 2016 Red Hat, Inc. and/or its affiliates # and other contributors as indicated by the @author tags. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- ''' OpenShiftCLI class that wraps the oc commands in a subprocess ''' # pylint: disable=too-many-lines from __future__ import print_function import atexit import copy import fcntl import json import time import os import re import shutil import subprocess import tempfile # pylint: disable=import-error try: import ruamel.yaml as yaml except ImportError: import yaml from ansible.module_utils.basic import AnsibleModule # -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- # -*- -*- -*- Begin included fragment: doc/serviceaccount -*- -*- -*- DOCUMENTATION = ''' --- module: oc_serviceaccount short_description: Module to manage openshift service accounts description: - Manage openshift service accounts programmatically. options: state: description: - If present, the service account will be created if it doesn't exist or updated if different. If absent, the service account will be removed if present. If list, information about the service account will be gathered and returned as part of the Ansible call results. required: false default: present choices: ["present", "absent", "list"] aliases: [] kubeconfig: description: - The path for the kubeconfig file to use for authentication required: false default: /etc/origin/master/admin.kubeconfig aliases: [] debug: description: - Turn on debug output. required: false default: false aliases: [] name: description: - Name of the service account. required: true default: None aliases: [] namespace: description: - Namespace of the service account. required: true default: default aliases: [] secrets: description: - A list of secrets that are associated with the service account. required: false default: None aliases: [] image_pull_secrets: description: - A list of the image pull secrets that are associated with the service account. required: false default: None aliases: [] author: - "Kenny Woodson <kwoodson@redhat.com>" extends_documentation_fragment: [] ''' EXAMPLES = ''' - name: create registry serviceaccount oc_serviceaccount: name: registry namespace: default secrets: - docker-registry-config - registry-secret register: sa_out ''' # -*- -*- -*- End included fragment: doc/serviceaccount -*- -*- -*- # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods,too-many-instance-attributes class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup_ext=None, backup=False): self.content = content self._separator = separator self.filename = filename self.__yaml_dict = content self.content_type = content_type self.backup = backup if backup_ext is None: self.backup_ext = ".{}".format(time.strftime("%Y%m%dT%H%M%S")) else: self.backup_ext = backup_ext self.load(content_type=self.content_type) if self.__yaml_dict is None: self.__yaml_dict = {} @property def separator(self): ''' getter method for separator ''' return self._separator @separator.setter def separator(self, inc_sep): ''' setter method for separator ''' self._separator = inc_sep @property def yaml_dict(self): ''' getter method for yaml_dict ''' return self.__yaml_dict @yaml_dict.setter def yaml_dict(self, value): ''' setter method for yaml_dict ''' self.__yaml_dict = value @staticmethod def parse_key(key, sep='.'): '''parse the key allowing the appropriate separator''' common_separators = list(Yedit.com_sep - set([sep])) return re.findall(Yedit.re_key.format(''.join(common_separators)), key) @staticmethod def valid_key(key, sep='.'): '''validate the incoming key''' common_separators = list(Yedit.com_sep - set([sep])) if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key): return False return True # pylint: disable=too-many-return-statements,too-many-branches @staticmethod def remove_entry(data, key, index=None, value=None, sep='.'): ''' remove data at location key ''' if key == '' and isinstance(data, dict): if value is not None: data.pop(value) elif index is not None: raise YeditException("remove_entry for a dictionary does not have an index {}".format(index)) else: data.clear() return True elif key == '' and isinstance(data, list): ind = None if value is not None: try: ind = data.index(value) except ValueError: return False elif index is not None: ind = index else: del data[:] if ind is not None: data.pop(ind) return True if not (key and Yedit.valid_key(key, sep)) and \ isinstance(data, (list, dict)): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes[:-1]: if dict_key and isinstance(data, dict): data = data.get(dict_key) elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: return None # process last index for remove # expected list entry if key_indexes[-1][0]: if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501 del data[int(key_indexes[-1][0])] return True # expected dict entry elif key_indexes[-1][1]: if isinstance(data, dict): del data[key_indexes[-1][1]] return True @staticmethod def add_entry(data, key, item=None, sep='.'): ''' Get an item from a dictionary with key notation a.b.c d = {'a': {'b': 'c'}}} key = a#b return c ''' if key == '': pass elif (not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict))): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes[:-1]: if dict_key: if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501 data = data[dict_key] continue elif data and not isinstance(data, dict): raise YeditException("Unexpected item type found while going through key " + "path: {} (at key: {})".format(key, dict_key)) data[dict_key] = {} data = data[dict_key] elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: raise YeditException("Unexpected item type found while going through key path: {}".format(key)) if key == '': data = item # process last index for add # expected list entry elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501 data[int(key_indexes[-1][0])] = item # expected dict entry elif key_indexes[-1][1] and isinstance(data, dict): data[key_indexes[-1][1]] = item # didn't add/update to an existing list, nor add/update key to a dict # so we must have been provided some syntax like a.b.c[<int>] = "data" for a # non-existent array else: raise YeditException("Error adding to object at path: {}".format(key)) return data @staticmethod def get_entry(data, key, sep='.'): ''' Get an item from a dictionary with key notation a.b.c d = {'a': {'b': 'c'}}} key = a.b return c ''' if key == '': pass elif (not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict))): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes: if dict_key and isinstance(data, dict): data = data.get(dict_key) elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: return None return data @staticmethod def _write(filename, contents): ''' Actually write the file contents to disk. This helps with mocking. ''' tmp_filename = filename + '.yedit' with open(tmp_filename, 'w') as yfd: fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB) yfd.write(contents) fcntl.flock(yfd, fcntl.LOCK_UN) os.rename(tmp_filename, filename) def write(self): ''' write to file ''' if not self.filename: raise YeditException('Please specify a filename.') if self.backup and self.file_exists(): shutil.copy(self.filename, '{}{}'.format(self.filename, self.backup_ext)) # Try to set format attributes if supported try: self.yaml_dict.fa.set_block_style() except AttributeError: pass # Try to use RoundTripDumper if supported. if self.content_type == 'yaml': try: Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) except AttributeError: Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) elif self.content_type == 'json': Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True)) else: raise YeditException('Unsupported content_type: {}.'.format(self.content_type) + 'Please specify a content_type of yaml or json.') return (True, self.yaml_dict) def read(self): ''' read from file ''' # check if it exists if self.filename is None or not self.file_exists(): return None contents = None with open(self.filename) as yfd: contents = yfd.read() return contents def file_exists(self): ''' return whether file exists ''' if os.path.exists(self.filename): return True return False def load(self, content_type='yaml'): ''' return yaml file ''' contents = self.read() if not contents and not self.content: return None if self.content: if isinstance(self.content, dict): self.yaml_dict = self.content return self.yaml_dict elif isinstance(self.content, str): contents = self.content # check if it is yaml try: if content_type == 'yaml' and contents: # Try to set format attributes if supported try: self.yaml_dict.fa.set_block_style() except AttributeError: pass # Try to use RoundTripLoader if supported. try: self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader) except AttributeError: self.yaml_dict = yaml.safe_load(contents) # Try to set format attributes if supported try: self.yaml_dict.fa.set_block_style() except AttributeError: pass elif content_type == 'json' and contents: self.yaml_dict = json.loads(contents) except yaml.YAMLError as err: # Error loading yaml or json raise YeditException('Problem with loading yaml file. {}'.format(err)) return self.yaml_dict def get(self, key): ''' get a specified key''' try: entry = Yedit.get_entry(self.yaml_dict, key, self.separator) except KeyError: entry = None return entry def pop(self, path, key_or_item): ''' remove a key, value pair from a dict or an item for a list''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry is None: return (False, self.yaml_dict) if isinstance(entry, dict): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member if key_or_item in entry: entry.pop(key_or_item) return (True, self.yaml_dict) return (False, self.yaml_dict) elif isinstance(entry, list): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member ind = None try: ind = entry.index(key_or_item) except ValueError: return (False, self.yaml_dict) entry.pop(ind) return (True, self.yaml_dict) return (False, self.yaml_dict) def delete(self, path, index=None, value=None): ''' remove path from a dict''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry is None: return (False, self.yaml_dict) result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator) if not result: return (False, self.yaml_dict) return (True, self.yaml_dict) def exists(self, path, value): ''' check if value exists at path''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if isinstance(entry, list): if value in entry: return True return False elif isinstance(entry, dict): if isinstance(value, dict): rval = False for key, val in value.items(): if entry[key] != val: rval = False break else: rval = True return rval return value in entry return entry == value def append(self, path, value): '''append value to a list''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry is None: self.put(path, []) entry = Yedit.get_entry(self.yaml_dict, path, self.separator) if not isinstance(entry, list): return (False, self.yaml_dict) # AUDIT:maybe-no-member makes sense due to loading data from # a serialized format. # pylint: disable=maybe-no-member entry.append(value) return (True, self.yaml_dict) # pylint: disable=too-many-arguments def update(self, path, value, index=None, curr_value=None): ''' put path, value into a dict ''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if isinstance(entry, dict): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member if not isinstance(value, dict): raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' + 'value=[{}] type=[{}]'.format(value, type(value))) entry.update(value) return (True, self.yaml_dict) elif isinstance(entry, list): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member ind = None if curr_value: try: ind = entry.index(curr_value) except ValueError: return (False, self.yaml_dict) elif index is not None: ind = index if ind is not None and entry[ind] != value: entry[ind] = value return (True, self.yaml_dict) # see if it exists in the list try: ind = entry.index(value) except ValueError: # doesn't exist, append it entry.append(value) return (True, self.yaml_dict) # already exists, return if ind is not None: return (False, self.yaml_dict) return (False, self.yaml_dict) def put(self, path, value): ''' put path, value into a dict ''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry == value: return (False, self.yaml_dict) # deepcopy didn't work # Try to use ruamel.yaml and fallback to pyyaml try: tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader) except AttributeError: tmp_copy = copy.deepcopy(self.yaml_dict) # set the format attributes if available try: tmp_copy.fa.set_block_style() except AttributeError: pass result = Yedit.add_entry(tmp_copy, path, value, self.separator) if result is None: return (False, self.yaml_dict) # When path equals "" it is a special case. # "" refers to the root of the document # Only update the root path (entire document) when its a list or dict if path == '': if isinstance(result, list) or isinstance(result, dict): self.yaml_dict = result return (True, self.yaml_dict) return (False, self.yaml_dict) self.yaml_dict = tmp_copy return (True, self.yaml_dict) def create(self, path, value): ''' create a yaml file ''' if not self.file_exists(): # deepcopy didn't work # Try to use ruamel.yaml and fallback to pyyaml try: tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader) except AttributeError: tmp_copy = copy.deepcopy(self.yaml_dict) # set the format attributes if available try: tmp_copy.fa.set_block_style() except AttributeError: pass result = Yedit.add_entry(tmp_copy, path, value, self.separator) if result is not None: self.yaml_dict = tmp_copy return (True, self.yaml_dict) return (False, self.yaml_dict) @staticmethod def get_curr_value(invalue, val_type): '''return the current value''' if invalue is None: return None curr_value = invalue if val_type == 'yaml': curr_value = yaml.safe_load(str(invalue)) elif val_type == 'json': curr_value = json.loads(invalue) return curr_value @staticmethod def parse_value(inc_value, vtype=''): '''determine value type passed''' true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', 'on', 'On', 'ON', ] false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', 'off', 'Off', 'OFF'] # It came in as a string but you didn't specify value_type as string # we will convert to bool if it matches any of the above cases if isinstance(inc_value, str) and 'bool' in vtype: if inc_value not in true_bools and inc_value not in false_bools: raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype)) elif isinstance(inc_value, bool) and 'str' in vtype: inc_value = str(inc_value) # There is a special case where '' will turn into None after yaml loading it so skip if isinstance(inc_value, str) and inc_value == '': pass # If vtype is not str then go ahead and attempt to yaml load it. elif isinstance(inc_value, str) and 'str' not in vtype: try: inc_value = yaml.safe_load(inc_value) except Exception: raise YeditException('Could not determine type of incoming value. ' + 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype)) return inc_value @staticmethod def process_edits(edits, yamlfile): '''run through a list of edits and process them one-by-one''' results = [] for edit in edits: value = Yedit.parse_value(edit['value'], edit.get('value_type', '')) if edit.get('action') == 'update': # pylint: disable=line-too-long curr_value = Yedit.get_curr_value( Yedit.parse_value(edit.get('curr_value')), edit.get('curr_value_format')) rval = yamlfile.update(edit['key'], value, edit.get('index'), curr_value) elif edit.get('action') == 'append': rval = yamlfile.append(edit['key'], value) else: rval = yamlfile.put(edit['key'], value) if rval[0]: results.append({'key': edit['key'], 'edit': rval[1]}) return {'changed': len(results) > 0, 'results': results} # pylint: disable=too-many-return-statements,too-many-branches @staticmethod def run_ansible(params): '''perform the idempotent crud operations''' yamlfile = Yedit(filename=params['src'], backup=params['backup'], content_type=params['content_type'], backup_ext=params['backup_ext'], separator=params['separator']) state = params['state'] if params['src']: rval = yamlfile.load() if yamlfile.yaml_dict is None and state != 'present': return {'failed': True, 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) + 'file exists, that it is has correct permissions, and is valid yaml.'} if state == 'list': if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) yamlfile.yaml_dict = content if params['key']: rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} elif state == 'absent': if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) yamlfile.yaml_dict = content if params['update']: rval = yamlfile.pop(params['key'], params['value']) else: rval = yamlfile.delete(params['key'], params['index'], params['value']) if rval[0] and params['src']: yamlfile.write() return {'changed': rval[0], 'result': rval[1], 'state': state} elif state == 'present': # check if content is different than what is in the file if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) # We had no edits to make and the contents are the same if yamlfile.yaml_dict == content and \ params['value'] is None: return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} yamlfile.yaml_dict = content # If we were passed a key, value then # we enapsulate it in a list and process it # Key, Value passed to the module : Converted to Edits list # edits = [] _edit = {} if params['value'] is not None: _edit['value'] = params['value'] _edit['value_type'] = params['value_type'] _edit['key'] = params['key'] if params['update']: _edit['action'] = 'update' _edit['curr_value'] = params['curr_value'] _edit['curr_value_format'] = params['curr_value_format'] _edit['index'] = params['index'] elif params['append']: _edit['action'] = 'append' edits.append(_edit) elif params['edits'] is not None: edits = params['edits'] if edits: results = Yedit.process_edits(edits, yamlfile) # if there were changes and a src provided to us we need to write if results['changed'] and params['src']: yamlfile.write() return {'changed': results['changed'], 'result': results['results'], 'state': state} # no edits to make if params['src']: # pylint: disable=redefined-variable-type rval = yamlfile.write() return {'changed': rval[0], 'result': rval[1], 'state': state} # We were passed content but no src, key or value, or edits. Return contents in memory return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} return {'failed': True, 'msg': 'Unkown state passed'} # -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- # -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- # pylint: disable=too-many-lines # noqa: E301,E302,E303,T001 class OpenShiftCLIError(Exception): '''Exception class for openshiftcli''' pass ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] def locate_oc_binary(): ''' Find and return oc binary file ''' # https://github.com/openshift/openshift-ansible/issues/3410 # oc can be in /usr/local/bin in some cases, but that may not # be in $PATH due to ansible/sudo paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS oc_binary = 'oc' # Use shutil.which if it is available, otherwise fallback to a naive path search try: which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) if which_result is not None: oc_binary = which_result except AttributeError: for path in paths: if os.path.exists(os.path.join(path, oc_binary)): oc_binary = os.path.join(path, oc_binary) break return oc_binary # pylint: disable=too-few-public-methods class OpenShiftCLI(object): ''' Class to wrap the command line tools ''' def __init__(self, namespace, kubeconfig='/etc/origin/master/admin.kubeconfig', verbose=False, all_namespaces=False): ''' Constructor for OpenshiftCLI ''' self.namespace = namespace self.verbose = verbose self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) self.all_namespaces = all_namespaces self.oc_binary = locate_oc_binary() # Pylint allows only 5 arguments to be passed. # pylint: disable=too-many-arguments def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'): ''' replace the current object with the content ''' res = self._get(resource, rname) if not res['results']: return res fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, res['results'][0], separator=sep) updated = False if content is not None: changes = [] for key, value in content.items(): changes.append(yed.put(key, value)) if any([change[0] for change in changes]): updated = True elif edits is not None: results = Yedit.process_edits(edits, yed) if results['changed']: updated = True if updated: yed.write() atexit.register(Utils.cleanup, [fname]) return self._replace(fname, force) return {'returncode': 0, 'updated': False} def _replace(self, fname, force=False): '''replace the current object with oc replace''' # We are removing the 'resourceVersion' to handle # a race condition when modifying oc objects yed = Yedit(fname) results = yed.delete('metadata.resourceVersion') if results[0]: yed.write() cmd = ['replace', '-f', fname] if force: cmd.append('--force') return self.openshift_cmd(cmd) def _create_from_content(self, rname, content): '''create a temporary file and then call oc create on it''' fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, content=content) yed.write() atexit.register(Utils.cleanup, [fname]) return self._create(fname) def _create(self, fname): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' cmd = ['delete', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) else: raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501 '''process a template template_name: the name of the template to process create: whether to send to oc create after processing params: the parameters for the template template_data: the incoming template's data; instead of a file ''' cmd = ['process'] if template_data: cmd.extend(['-f', '-']) else: cmd.append(template_name) if params: param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()] cmd.append('-p') cmd.extend(param_str) results = self.openshift_cmd(cmd, output=True, input_data=template_data) if results['returncode'] != 0 or not create: return results fname = Utils.create_tmpfile(template_name + '-') yed = Yedit(fname, results['results']) yed.write() atexit.register(Utils.cleanup, [fname]) return self.openshift_cmd(['create', '-f', fname]) def _get(self, resource, name=None, selector=None, field_selector=None): '''return a resource by name ''' cmd = ['get', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) if field_selector is not None: cmd.append('--field-selector={}'.format(field_selector)) # Name cannot be used with selector or field_selector. if selector is None and field_selector is None and name is not None: cmd.append(name) cmd.extend(['-o', 'json']) rval = self.openshift_cmd(cmd, output=True) # Ensure results are retuned in an array if 'items' in rval: rval['results'] = rval['items'] elif not isinstance(rval['results'], list): rval['results'] = [rval['results']] return rval def _schedulable(self, node=None, selector=None, schedulable=True): ''' perform oadm manage-node scheduable ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 def _list_pods(self, node=None, selector=None, pod_selector=None): ''' perform oadm list pods node: the node in which to list pods selector: the label selector filter if provided pod_selector: the pod selector filter if provided ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # pylint: disable=too-many-arguments def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): ''' perform oadm manage-node evacuate ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') cmd.append('--evacuate') return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') def _version(self): ''' return the openshift version''' return self.openshift_cmd(['version'], output=True, output_type='raw') def _import_image(self, url=None, name=None, tag=None): ''' perform image import ''' cmd = ['import-image'] image = '{0}'.format(name) if tag: image += ':{0}'.format(tag) cmd.append(image) if url: cmd.append('--from={0}/{1}'.format(url, image)) cmd.append('-n{0}'.format(self.namespace)) cmd.append('--confirm') return self.openshift_cmd(cmd) def _run(self, cmds, input_data): ''' Actually executes the command. This makes mocking easier. ''' curr_env = os.environ.copy() curr_env.update({'KUBECONFIG': self.kubeconfig}) proc = subprocess.Popen(cmds, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=curr_env) stdout, stderr = proc.communicate(input_data) return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): '''Base command for oc ''' cmds = [self.oc_binary] if oadm: cmds.append('adm') cmds.extend(cmd) if self.all_namespaces: cmds.extend(['--all-namespaces']) elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) if self.verbose: print(' '.join(cmds)) try: returncode, stdout, stderr = self._run(cmds, input_data) except OSError as ex: returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, "cmd": ' '.join(cmds)} if output_type == 'json': rval['results'] = {} if output and stdout: try: rval['results'] = json.loads(stdout) except ValueError as verr: if "No JSON object could be decoded" in verr.args: rval['err'] = verr.args elif output_type == 'raw': rval['results'] = stdout if output else '' if self.verbose: print("STDOUT: {0}".format(stdout)) print("STDERR: {0}".format(stderr)) if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, "stdout": stdout}) return rval class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod def _write(filename, contents): ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): ''' create a file in tmp with name and contents''' tmp = Utils.create_tmpfile(prefix=rname) if ftype == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripDumper'): Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) else: Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) elif ftype == 'json': Utils._write(tmp, json.dumps(data)) else: Utils._write(tmp, data) # Register cleanup when module is done atexit.register(Utils.cleanup, [tmp]) return tmp @staticmethod def create_tmpfile_copy(inc_file): '''create a temporary copy of a file''' tmpfile = Utils.create_tmpfile('lib_openshift-') Utils._write(tmpfile, open(inc_file).read()) # Cleanup the tmpfile atexit.register(Utils.cleanup, [tmpfile]) return tmpfile @staticmethod def create_tmpfile(prefix='tmp'): ''' Generates and returns a temporary file name ''' with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: return tmp.name @staticmethod def create_tmp_files_from_contents(content, content_type=None): '''Turn an array of dict: filename, content into a files array''' if not isinstance(content, list): content = [content] files = [] for item in content: path = Utils.create_tmp_file_from_contents(item['path'] + '-', item['data'], ftype=content_type) files.append({'name': os.path.basename(item['path']), 'path': path}) return files @staticmethod def cleanup(files): '''Clean up on exit ''' for sfile in files: if os.path.exists(sfile): if os.path.isdir(sfile): shutil.rmtree(sfile) elif os.path.isfile(sfile): os.remove(sfile) @staticmethod def exists(results, _name): ''' Check to see if the results include the name ''' if not results: return False if Utils.find_result(results, _name): return True return False @staticmethod def find_result(results, _name): ''' Find the specified result by name''' rval = None for result in results: if 'metadata' in result and result['metadata']['name'] == _name: rval = result break return rval @staticmethod def get_resource_file(sfile, sfile_type='yaml'): ''' return the service file ''' contents = None with open(sfile) as sfd: contents = sfd.read() if sfile_type == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripLoader'): contents = yaml.load(contents, yaml.RoundTripLoader) else: contents = yaml.safe_load(contents) elif sfile_type == 'json': contents = json.loads(contents) return contents @staticmethod def filter_versions(stdout): ''' filter the oc version output ''' version_dict = {} version_search = ['oc', 'openshift', 'kubernetes'] for line in stdout.strip().split('\n'): for term in version_search: if not line: continue if line.startswith(term): version_dict[term] = line.split()[-1] # horrible hack to get openshift version in Openshift 3.2 # By default "oc version in 3.2 does not return an "openshift" version if "openshift" not in version_dict: version_dict["openshift"] = version_dict["oc"] return version_dict @staticmethod def add_custom_versions(versions): ''' create custom versions strings ''' versions_dict = {} for tech, version in versions.items(): # clean up "-" from version if "-" in version: version = version.split("-")[0] if version.startswith('v'): version = version[1:] # Remove the 'v' prefix versions_dict[tech + '_numeric'] = version.split('+')[0] # "3.3.0.33" is what we have, we want "3.3" versions_dict[tech + '_short'] = "{}.{}".format(*version.split('.')) return versions_dict @staticmethod def openshift_installed(): ''' check if openshift is installed ''' import rpm transaction_set = rpm.TransactionSet() rpmquery = transaction_set.dbMatch("name", "atomic-openshift") return rpmquery.count() > 0 # Disabling too-many-branches. This is a yaml dictionary comparison function # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements @staticmethod def check_def_equal(user_def, result_def, skip_keys=None, debug=False): ''' Given a user defined definition, compare it with the results given back by our query. ''' # Currently these values are autogenerated and we do not need to check them skip = ['metadata', 'status'] if skip_keys: skip.extend(skip_keys) for key, value in result_def.items(): if key in skip: continue # Both are lists if isinstance(value, list): if key not in user_def: if debug: print('User data does not have key [%s]' % key) print('User data: %s' % user_def) return False if not isinstance(user_def[key], list): if debug: print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) return False if len(user_def[key]) != len(value): if debug: print("List lengths are not equal.") print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) print("user_def: %s" % user_def[key]) print("value: %s" % value) return False for values in zip(user_def[key], value): if isinstance(values[0], dict) and isinstance(values[1], dict): if debug: print('sending list - list') print(type(values[0])) print(type(values[1])) result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) if not result: print('list compare returned false') return False elif value != user_def[key]: if debug: print('value should be identical') print(user_def[key]) print(value) return False # recurse on a dictionary elif isinstance(value, dict): if key not in user_def: if debug: print("user_def does not have key [%s]" % key) return False if not isinstance(user_def[key], dict): if debug: print("dict returned false: not instance of dict") return False # before passing ensure keys match api_values = set(value.keys()) - set(skip) user_values = set(user_def[key].keys()) - set(skip) if api_values != user_values: if debug: print("keys are not equal in dict") print(user_values) print(api_values) return False result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) if not result: if debug: print("dict returned false") print(result) return False # Verify each key, value pair is the same else: if key not in user_def or value != user_def[key]: if debug: print("value not equal; user_def does not have key") print(key) print(value) if key in user_def: print(user_def[key]) return False if debug: print('returning true') return True class OpenShiftCLIConfig(object): '''Generic Config''' def __init__(self, rname, namespace, kubeconfig, options): self.kubeconfig = kubeconfig self.name = rname self.namespace = namespace self._options = options @property def config_options(self): ''' return config options ''' return self._options def to_option_list(self, ascommalist=''): '''return all options as a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs''' return self.stringify(ascommalist) def stringify(self, ascommalist=''): ''' return the options hash as cli params in a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs ''' rval = [] for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: val = data['value'] rval.append('--{}={}'.format(key.replace('_', '-'), val)) return rval # -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- # -*- -*- -*- Begin included fragment: lib/serviceaccount.py -*- -*- -*- class ServiceAccountConfig(object): '''Service account config class This class stores the options and returns a default service account ''' # pylint: disable=too-many-arguments def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None): self.name = sname self.kubeconfig = kubeconfig self.namespace = namespace self.secrets = secrets or [] self.image_pull_secrets = image_pull_secrets or [] self.data = {} self.create_dict() def create_dict(self): ''' instantiate a properly structured volume ''' self.data['apiVersion'] = 'v1' self.data['kind'] = 'ServiceAccount' self.data['metadata'] = {} self.data['metadata']['name'] = self.name self.data['metadata']['namespace'] = self.namespace self.data['secrets'] = [] if self.secrets: for sec in self.secrets: self.data['secrets'].append({"name": sec}) self.data['imagePullSecrets'] = [] if self.image_pull_secrets: for sec in self.image_pull_secrets: self.data['imagePullSecrets'].append({"name": sec}) class ServiceAccount(Yedit): ''' Class to wrap the oc command line tools ''' image_pull_secrets_path = "imagePullSecrets" secrets_path = "secrets" def __init__(self, content): '''ServiceAccount constructor''' super(ServiceAccount, self).__init__(content=content) self._secrets = None self._image_pull_secrets = None @property def image_pull_secrets(self): ''' property for image_pull_secrets ''' if self._image_pull_secrets is None: self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or [] return self._image_pull_secrets @image_pull_secrets.setter def image_pull_secrets(self, secrets): ''' property for secrets ''' self._image_pull_secrets = secrets @property def secrets(self): ''' property for secrets ''' if not self._secrets: self._secrets = self.get(ServiceAccount.secrets_path) or [] return self._secrets @secrets.setter def secrets(self, secrets): ''' property for secrets ''' self._secrets = secrets def delete_secret(self, inc_secret): ''' remove a secret ''' remove_idx = None for idx, sec in enumerate(self.secrets): if sec['name'] == inc_secret: remove_idx = idx break if remove_idx: del self.secrets[remove_idx] return True return False def delete_image_pull_secret(self, inc_secret): ''' remove a image_pull_secret ''' remove_idx = None for idx, sec in enumerate(self.image_pull_secrets): if sec['name'] == inc_secret: remove_idx = idx break if remove_idx: del self.image_pull_secrets[remove_idx] return True return False def find_secret(self, inc_secret): '''find secret''' for secret in self.secrets: if secret['name'] == inc_secret: return secret return None def find_image_pull_secret(self, inc_secret): '''find secret''' for secret in self.image_pull_secrets: if secret['name'] == inc_secret: return secret return None def add_secret(self, inc_secret): '''add secret''' if self.secrets: self.secrets.append({"name": inc_secret}) # pylint: disable=no-member else: self.put(ServiceAccount.secrets_path, [{"name": inc_secret}]) def add_image_pull_secret(self, inc_secret): '''add image_pull_secret''' if self.image_pull_secrets: self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member else: self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}]) # -*- -*- -*- End included fragment: lib/serviceaccount.py -*- -*- -*- # -*- -*- -*- Begin included fragment: class/oc_serviceaccount.py -*- -*- -*- # pylint: disable=too-many-instance-attributes class OCServiceAccount(OpenShiftCLI): ''' Class to wrap the oc command line tools ''' kind = 'sa' # pylint allows 5 # pylint: disable=too-many-arguments def __init__(self, config, verbose=False): ''' Constructor for OCVolume ''' super(OCServiceAccount, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose) self.config = config self.service_account = None def exists(self): ''' return whether a volume exists ''' if self.service_account: return True return False def get(self): '''return volume information ''' result = self._get(self.kind, self.config.name) if result['returncode'] == 0: self.service_account = ServiceAccount(content=result['results'][0]) elif '\"%s\" not found' % self.config.name in result['stderr']: result['returncode'] = 0 result['results'] = [{}] return result def delete(self): '''delete the object''' return self._delete(self.kind, self.config.name) def create(self): '''create the object''' return self._create_from_content(self.config.name, self.config.data) def update(self): '''update the object''' # need to update the tls information and the service name for secret in self.config.secrets: result = self.service_account.find_secret(secret) if not result: self.service_account.add_secret(secret) for secret in self.config.image_pull_secrets: result = self.service_account.find_image_pull_secret(secret) if not result: self.service_account.add_image_pull_secret(secret) return self._replace_content(self.kind, self.config.name, self.config.data) def needs_update(self): ''' verify an update is needed ''' # since creating an service account generates secrets and imagepullsecrets # check_def_equal will not work # Instead, verify all secrets passed are in the list for secret in self.config.secrets: result = self.service_account.find_secret(secret) if not result: return True for secret in self.config.image_pull_secrets: result = self.service_account.find_image_pull_secret(secret) if not result: return True return False @staticmethod # pylint: disable=too-many-return-statements,too-many-branches # TODO: This function should be refactored into its individual parts. def run_ansible(params, check_mode): '''run the ansible idempotent code''' rconfig = ServiceAccountConfig(params['name'], params['namespace'], params['kubeconfig'], params['secrets'], params['image_pull_secrets'], ) oc_sa = OCServiceAccount(rconfig, verbose=params['debug']) state = params['state'] api_rval = oc_sa.get() ##### # Get ##### if state == 'list': return {'changed': False, 'results': api_rval['results'], 'state': 'list'} ######## # Delete ######## if state == 'absent': if oc_sa.exists(): if check_mode: return {'changed': True, 'msg': 'Would have performed a delete.'} api_rval = oc_sa.delete() return {'changed': True, 'results': api_rval, 'state': 'absent'} return {'changed': False, 'state': 'absent'} if state == 'present': ######## # Create ######## if not oc_sa.exists(): if check_mode: return {'changed': True, 'msg': 'Would have performed a create.'} # Create it here api_rval = oc_sa.create() if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} # return the created object api_rval = oc_sa.get() if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} return {'changed': True, 'results': api_rval, 'state': 'present'} ######## # Update ######## if oc_sa.needs_update(): api_rval = oc_sa.update() if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} # return the created object api_rval = oc_sa.get() if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} return {'changed': True, 'results': api_rval, 'state': 'present'} return {'changed': False, 'results': api_rval, 'state': 'present'} return {'failed': True, 'changed': False, 'msg': 'Unknown state passed. %s' % state, 'state': 'unknown'} # -*- -*- -*- End included fragment: class/oc_serviceaccount.py -*- -*- -*- # -*- -*- -*- Begin included fragment: ansible/oc_serviceaccount.py -*- -*- -*- def main(): ''' ansible oc module for service accounts ''' module = AnsibleModule( argument_spec=dict( kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), secrets=dict(default=None, type='list'), image_pull_secrets=dict(default=None, type='list'), ), supports_check_mode=True, ) rval = OCServiceAccount.run_ansible(module.params, module.check_mode) if 'failed' in rval: module.fail_json(**rval) module.exit_json(**rval) if __name__ == '__main__': main() # -*- -*- -*- End included fragment: ansible/oc_serviceaccount.py -*- -*- -*-
apache-2.0
jhawkesworth/ansible
test/runner/lib/cloud/opennebula.py
14
1771
"""OpenNebula plugin for integration tests.""" from lib.cloud import ( CloudProvider, CloudEnvironment, CloudEnvironmentConfig, ) from lib.util import ( display, ConfigParser, ) class OpenNebulaCloudProvider(CloudProvider): """Checks if a configuration file has been passed or fixtures are going to be used for testing""" def filter(self, targets, exclude): """ no need to filter modules, they can either run from config file or from fixtures""" pass def setup(self): """Setup the cloud resource before delegation and register a cleanup callback.""" super(OpenNebulaCloudProvider, self).setup() if not self._use_static_config(): self._setup_dynamic() def _setup_dynamic(self): display.info('No config file provided, will run test from fixtures') config = self._read_config_template() values = dict( URL="http://localhost/RPC2", USERNAME='oneadmin', PASSWORD='onepass', FIXTURES='true', REPLAY='true', ) config = self._populate_config_template(config, values) self._write_config(config) class OpenNebulaCloudEnvironment(CloudEnvironment): """ Updates integration test environment after delegation. Will setup the config file as parameter. """ def get_environment_config(self): """ :rtype: CloudEnvironmentConfig """ parser = ConfigParser() parser.read(self.config_path) ansible_vars = dict( resource_prefix=self.resource_prefix, ) ansible_vars.update(dict(parser.items('default'))) return CloudEnvironmentConfig( ansible_vars=ansible_vars, )
gpl-3.0
Orav/kbengine
kbe/src/lib/python/Lib/tkinter/dnd.py
5
11809
"""Drag-and-drop support for Tkinter. This is very preliminary. I currently only support dnd *within* one application, between different windows (or within the same window). I an trying to make this as generic as possible -- not dependent on the use of a particular widget or icon type, etc. I also hope that this will work with Pmw. To enable an object to be dragged, you must create an event binding for it that starts the drag-and-drop process. Typically, you should bind <ButtonPress> to a callback function that you write. The function should call Tkdnd.dnd_start(source, event), where 'source' is the object to be dragged, and 'event' is the event that invoked the call (the argument to your callback function). Even though this is a class instantiation, the returned instance should not be stored -- it will be kept alive automatically for the duration of the drag-and-drop. When a drag-and-drop is already in process for the Tk interpreter, the call is *ignored*; this normally averts starting multiple simultaneous dnd processes, e.g. because different button callbacks all dnd_start(). The object is *not* necessarily a widget -- it can be any application-specific object that is meaningful to potential drag-and-drop targets. Potential drag-and-drop targets are discovered as follows. Whenever the mouse moves, and at the start and end of a drag-and-drop move, the Tk widget directly under the mouse is inspected. This is the target widget (not to be confused with the target object, yet to be determined). If there is no target widget, there is no dnd target object. If there is a target widget, and it has an attribute dnd_accept, this should be a function (or any callable object). The function is called as dnd_accept(source, event), where 'source' is the object being dragged (the object passed to dnd_start() above), and 'event' is the most recent event object (generally a <Motion> event; it can also be <ButtonPress> or <ButtonRelease>). If the dnd_accept() function returns something other than None, this is the new dnd target object. If dnd_accept() returns None, or if the target widget has no dnd_accept attribute, the target widget's parent is considered as the target widget, and the search for a target object is repeated from there. If necessary, the search is repeated all the way up to the root widget. If none of the target widgets can produce a target object, there is no target object (the target object is None). The target object thus produced, if any, is called the new target object. It is compared with the old target object (or None, if there was no old target widget). There are several cases ('source' is the source object, and 'event' is the most recent event object): - Both the old and new target objects are None. Nothing happens. - The old and new target objects are the same object. Its method dnd_motion(source, event) is called. - The old target object was None, and the new target object is not None. The new target object's method dnd_enter(source, event) is called. - The new target object is None, and the old target object is not None. The old target object's method dnd_leave(source, event) is called. - The old and new target objects differ and neither is None. The old target object's method dnd_leave(source, event), and then the new target object's method dnd_enter(source, event) is called. Once this is done, the new target object replaces the old one, and the Tk mainloop proceeds. The return value of the methods mentioned above is ignored; if they raise an exception, the normal exception handling mechanisms take over. The drag-and-drop processes can end in two ways: a final target object is selected, or no final target object is selected. When a final target object is selected, it will always have been notified of the potential drop by a call to its dnd_enter() method, as described above, and possibly one or more calls to its dnd_motion() method; its dnd_leave() method has not been called since the last call to dnd_enter(). The target is notified of the drop by a call to its method dnd_commit(source, event). If no final target object is selected, and there was an old target object, its dnd_leave(source, event) method is called to complete the dnd sequence. Finally, the source object is notified that the drag-and-drop process is over, by a call to source.dnd_end(target, event), specifying either the selected target object, or None if no target object was selected. The source object can use this to implement the commit action; this is sometimes simpler than to do it in the target's dnd_commit(). The target's dnd_commit() method could then simply be aliased to dnd_leave(). At any time during a dnd sequence, the application can cancel the sequence by calling the cancel() method on the object returned by dnd_start(). This will call dnd_leave() if a target is currently active; it will never call dnd_commit(). """ import tkinter # The factory function def dnd_start(source, event): h = DndHandler(source, event) if h.root: return h else: return None # The class that does the work class DndHandler: root = None def __init__(self, source, event): if event.num > 5: return root = event.widget._root() try: root.__dnd return # Don't start recursive dnd except AttributeError: root.__dnd = self self.root = root self.source = source self.target = None self.initial_button = button = event.num self.initial_widget = widget = event.widget self.release_pattern = "<B%d-ButtonRelease-%d>" % (button, button) self.save_cursor = widget['cursor'] or "" widget.bind(self.release_pattern, self.on_release) widget.bind("<Motion>", self.on_motion) widget['cursor'] = "hand2" def __del__(self): root = self.root self.root = None if root: try: del root.__dnd except AttributeError: pass def on_motion(self, event): x, y = event.x_root, event.y_root target_widget = self.initial_widget.winfo_containing(x, y) source = self.source new_target = None while target_widget: try: attr = target_widget.dnd_accept except AttributeError: pass else: new_target = attr(source, event) if new_target: break target_widget = target_widget.master old_target = self.target if old_target is new_target: if old_target: old_target.dnd_motion(source, event) else: if old_target: self.target = None old_target.dnd_leave(source, event) if new_target: new_target.dnd_enter(source, event) self.target = new_target def on_release(self, event): self.finish(event, 1) def cancel(self, event=None): self.finish(event, 0) def finish(self, event, commit=0): target = self.target source = self.source widget = self.initial_widget root = self.root try: del root.__dnd self.initial_widget.unbind(self.release_pattern) self.initial_widget.unbind("<Motion>") widget['cursor'] = self.save_cursor self.target = self.source = self.initial_widget = self.root = None if target: if commit: target.dnd_commit(source, event) else: target.dnd_leave(source, event) finally: source.dnd_end(target, event) # ---------------------------------------------------------------------- # The rest is here for testing and demonstration purposes only! class Icon: def __init__(self, name): self.name = name self.canvas = self.label = self.id = None def attach(self, canvas, x=10, y=10): if canvas is self.canvas: self.canvas.coords(self.id, x, y) return if self.canvas: self.detach() if not canvas: return label = tkinter.Label(canvas, text=self.name, borderwidth=2, relief="raised") id = canvas.create_window(x, y, window=label, anchor="nw") self.canvas = canvas self.label = label self.id = id label.bind("<ButtonPress>", self.press) def detach(self): canvas = self.canvas if not canvas: return id = self.id label = self.label self.canvas = self.label = self.id = None canvas.delete(id) label.destroy() def press(self, event): if dnd_start(self, event): # where the pointer is relative to the label widget: self.x_off = event.x self.y_off = event.y # where the widget is relative to the canvas: self.x_orig, self.y_orig = self.canvas.coords(self.id) def move(self, event): x, y = self.where(self.canvas, event) self.canvas.coords(self.id, x, y) def putback(self): self.canvas.coords(self.id, self.x_orig, self.y_orig) def where(self, canvas, event): # where the corner of the canvas is relative to the screen: x_org = canvas.winfo_rootx() y_org = canvas.winfo_rooty() # where the pointer is relative to the canvas widget: x = event.x_root - x_org y = event.y_root - y_org # compensate for initial pointer offset return x - self.x_off, y - self.y_off def dnd_end(self, target, event): pass class Tester: def __init__(self, root): self.top = tkinter.Toplevel(root) self.canvas = tkinter.Canvas(self.top, width=100, height=100) self.canvas.pack(fill="both", expand=1) self.canvas.dnd_accept = self.dnd_accept def dnd_accept(self, source, event): return self def dnd_enter(self, source, event): self.canvas.focus_set() # Show highlight border x, y = source.where(self.canvas, event) x1, y1, x2, y2 = source.canvas.bbox(source.id) dx, dy = x2-x1, y2-y1 self.dndid = self.canvas.create_rectangle(x, y, x+dx, y+dy) self.dnd_motion(source, event) def dnd_motion(self, source, event): x, y = source.where(self.canvas, event) x1, y1, x2, y2 = self.canvas.bbox(self.dndid) self.canvas.move(self.dndid, x-x1, y-y1) def dnd_leave(self, source, event): self.top.focus_set() # Hide highlight border self.canvas.delete(self.dndid) self.dndid = None def dnd_commit(self, source, event): self.dnd_leave(source, event) x, y = source.where(self.canvas, event) source.attach(self.canvas, x, y) def test(): root = tkinter.Tk() root.geometry("+1+1") tkinter.Button(command=root.quit, text="Quit").pack() t1 = Tester(root) t1.top.geometry("+1+60") t2 = Tester(root) t2.top.geometry("+120+60") t3 = Tester(root) t3.top.geometry("+240+60") i1 = Icon("ICON1") i2 = Icon("ICON2") i3 = Icon("ICON3") i1.attach(t1.canvas) i2.attach(t2.canvas) i3.attach(t3.canvas) root.mainloop() if __name__ == '__main__': test()
lgpl-3.0
JioEducation/edx-platform
lms/djangoapps/mobile_api/video_outlines/tests.py
13
33725
# -*- coding: utf-8 -*- """ Tests for video outline API """ import itertools from uuid import uuid4 from collections import namedtuple import ddt from nose.plugins.attrib import attr from edxval import api from xmodule.modulestore.tests.factories import ItemFactory from xmodule.video_module import transcripts_utils from xmodule.modulestore.django import modulestore from xmodule.partitions.partitions import Group, UserPartition from milestones.tests.utils import MilestonesTestCaseMixin from mobile_api.models import MobileApiConfig from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, remove_user_from_cohort from mobile_api.testutils import MobileAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin class TestVideoAPITestCase(MobileAPITestCase): """ Base test class for video related mobile APIs """ def setUp(self): super(TestVideoAPITestCase, self).setUp() self.section = ItemFactory.create( parent=self.course, category="chapter", display_name=u"test factory section omega \u03a9", ) self.sub_section = ItemFactory.create( parent=self.section, category="sequential", display_name=u"test subsection omega \u03a9", ) self.unit = ItemFactory.create( parent=self.sub_section, category="vertical", metadata={'graded': True, 'format': 'Homework'}, display_name=u"test unit omega \u03a9", ) self.other_unit = ItemFactory.create( parent=self.sub_section, category="vertical", metadata={'graded': True, 'format': 'Homework'}, display_name=u"test unit omega 2 \u03a9", ) self.nameless_unit = ItemFactory.create( parent=self.sub_section, category="vertical", metadata={'graded': True, 'format': 'Homework'}, display_name=None, ) self.edx_video_id = 'testing-123' self.video_url = 'http://val.edx.org/val/video.mp4' self.video_url_high = 'http://val.edx.org/val/video_high.mp4' self.youtube_url = 'http://val.edx.org/val/youtube.mp4' self.html5_video_url = 'http://video.edx.org/html5/video.mp4' api.create_profile('youtube') api.create_profile('mobile_high') api.create_profile('mobile_low') # create the video in VAL api.create_video({ 'edx_video_id': self.edx_video_id, 'status': 'test', 'client_video_id': u"test video omega \u03a9", 'duration': 12, 'courses': [unicode(self.course.id)], 'encoded_videos': [ { 'profile': 'youtube', 'url': 'xyz123', 'file_size': 0, 'bitrate': 1500 }, { 'profile': 'mobile_low', 'url': self.video_url, 'file_size': 12345, 'bitrate': 250 }, { 'profile': 'mobile_high', 'url': self.video_url_high, 'file_size': 99999, 'bitrate': 250 }, ]}) # Set requested profiles MobileApiConfig(video_profiles="mobile_low,mobile_high,youtube").save() class TestVideoAPIMixin(object): """ Mixin class that provides helpers for testing video related mobile APIs """ def _create_video_with_subs(self, custom_subid=None): """ Creates and returns a video with stored subtitles. """ subid = custom_subid or uuid4().hex transcripts_utils.save_subs_to_store( { 'start': [100, 200, 240, 390, 1000], 'end': [200, 240, 380, 1000, 1500], 'text': [ 'subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5' ] }, subid, self.course) return ItemFactory.create( parent=self.unit, category="video", edx_video_id=self.edx_video_id, display_name=u"test video omega \u03a9", sub=subid ) def _verify_paths(self, course_outline, path_list, outline_index=0): """ Takes a path_list and compares it against the course_outline Attributes: course_outline (list): A list of dictionaries that includes a 'path' and 'named_path' field which we will be comparing path_list to path_list (list): A list of the expected strings outline_index (int): Index into the course_outline list for which the path is being tested. """ path = course_outline[outline_index]['path'] self.assertEqual(len(path), len(path_list)) for i in range(len(path_list)): self.assertEqual(path_list[i], path[i]['name']) #named_path will be deprecated eventually named_path = course_outline[outline_index]['named_path'] self.assertEqual(len(named_path), len(path_list)) for i in range(len(path_list)): self.assertEqual(path_list[i], named_path[i]) def _setup_course_partitions(self, scheme_id='random', is_cohorted=False): """Helper method to configure the user partitions in the course.""" self.partition_id = 0 # pylint: disable=attribute-defined-outside-init self.course.user_partitions = [ UserPartition( self.partition_id, 'first_partition', 'First Partition', [Group(0, 'alpha'), Group(1, 'beta')], scheme=None, scheme_id=scheme_id ), ] self.course.cohort_config = {'cohorted': is_cohorted} self.store.update_item(self.course, self.user.id) def _setup_group_access(self, xblock, partition_id, group_ids): """Helper method to configure the partition and group mapping for the given xblock.""" xblock.group_access = {partition_id: group_ids} self.store.update_item(xblock, self.user.id) def _setup_split_module(self, sub_block_category): """Helper method to configure a split_test unit with children of type sub_block_category.""" self._setup_course_partitions() self.split_test = ItemFactory.create( # pylint: disable=attribute-defined-outside-init parent=self.unit, category="split_test", display_name=u"split test unit", user_partition_id=0, ) sub_block_a = ItemFactory.create( parent=self.split_test, category=sub_block_category, display_name=u"split test block a", ) sub_block_b = ItemFactory.create( parent=self.split_test, category=sub_block_category, display_name=u"split test block b", ) self.split_test.group_id_to_child = { str(index): url for index, url in enumerate([sub_block_a.location, sub_block_b.location]) } self.store.update_item(self.split_test, self.user.id) return sub_block_a, sub_block_b @attr('shard_2') class TestNonStandardCourseStructure(MobileAPITestCase, TestVideoAPIMixin): """ Tests /api/mobile/v0.5/video_outlines/courses/{course_id} with no course set """ REVERSE_INFO = {'name': 'video-summary-list', 'params': ['course_id']} def setUp(self): super(TestNonStandardCourseStructure, self).setUp() self.chapter_under_course = ItemFactory.create( parent=self.course, category="chapter", display_name=u"test factory chapter under course omega \u03a9", ) self.section_under_course = ItemFactory.create( parent=self.course, category="sequential", display_name=u"test factory section under course omega \u03a9", ) self.section_under_chapter = ItemFactory.create( parent=self.chapter_under_course, category="sequential", display_name=u"test factory section under chapter omega \u03a9", ) self.vertical_under_course = ItemFactory.create( parent=self.course, category="vertical", display_name=u"test factory vertical under course omega \u03a9", ) self.vertical_under_section = ItemFactory.create( parent=self.section_under_chapter, category="vertical", display_name=u"test factory vertical under section omega \u03a9", ) def test_structure_course_video(self): """ Tests when there is a video without a vertical directly under course """ self.login_and_enroll() ItemFactory.create( parent=self.course, category="video", display_name=u"test factory video omega \u03a9", ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertRegexpMatches(section_url, r'courseware$') self.assertEqual(section_url, unit_url) self._verify_paths(course_outline, []) def test_structure_course_vert_video(self): """ Tests when there is a video under vertical directly under course """ self.login_and_enroll() ItemFactory.create( parent=self.vertical_under_course, category="video", display_name=u"test factory video omega \u03a9", ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertRegexpMatches( section_url, r'courseware/test_factory_vertical_under_course_omega_%CE%A9/$' ) self.assertEqual(section_url, unit_url) self._verify_paths( course_outline, [ u'test factory vertical under course omega \u03a9' ] ) def test_structure_course_chap_video(self): """ Tests when there is a video directly under chapter """ self.login_and_enroll() ItemFactory.create( parent=self.chapter_under_course, category="video", display_name=u"test factory video omega \u03a9", ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertRegexpMatches( section_url, r'courseware/test_factory_chapter_under_course_omega_%CE%A9/$' ) self.assertEqual(section_url, unit_url) self._verify_paths( course_outline, [ u'test factory chapter under course omega \u03a9', ] ) def test_structure_course_section_video(self): """ Tests when chapter is none, and video under section under course """ self.login_and_enroll() ItemFactory.create( parent=self.section_under_course, category="video", display_name=u"test factory video omega \u03a9", ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertRegexpMatches( section_url, r'courseware/test_factory_section_under_course_omega_%CE%A9/$' ) self.assertEqual(section_url, unit_url) self._verify_paths( course_outline, [ u'test factory section under course omega \u03a9', ] ) def test_structure_course_chap_section_video(self): """ Tests when chapter and sequential exists, with a video with no vertical. """ self.login_and_enroll() ItemFactory.create( parent=self.section_under_chapter, category="video", display_name=u"meow factory video omega \u03a9", ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertRegexpMatches( section_url, ( r'courseware/test_factory_chapter_under_course_omega_%CE%A9/' + 'test_factory_section_under_chapter_omega_%CE%A9/$' ) ) self.assertEqual(section_url, unit_url) self._verify_paths( course_outline, [ u'test factory chapter under course omega \u03a9', u'test factory section under chapter omega \u03a9', ] ) def test_structure_course_section_vert_video(self): """ Tests chapter->section->vertical->unit """ self.login_and_enroll() ItemFactory.create( parent=self.vertical_under_section, category="video", display_name=u"test factory video omega \u03a9", ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertRegexpMatches( section_url, ( r'courseware/test_factory_chapter_under_course_omega_%CE%A9/' + 'test_factory_section_under_chapter_omega_%CE%A9/$' ) ) self.assertRegexpMatches( unit_url, ( r'courseware/test_factory_chapter_under_course_omega_%CE%A9/' + 'test_factory_section_under_chapter_omega_%CE%A9/1$' ) ) self._verify_paths( course_outline, [ u'test factory chapter under course omega \u03a9', u'test factory section under chapter omega \u03a9', u'test factory vertical under section omega \u03a9' ] ) @attr('shard_2') @ddt.ddt class TestVideoSummaryList(TestVideoAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin, TestVideoAPIMixin, MilestonesTestCaseMixin): """ Tests for /api/mobile/v0.5/video_outlines/courses/{course_id}.. """ REVERSE_INFO = {'name': 'video-summary-list', 'params': ['course_id']} def test_only_on_web(self): self.login_and_enroll() course_outline = self.api_response().data self.assertEqual(len(course_outline), 0) subid = uuid4().hex transcripts_utils.save_subs_to_store( { 'start': [100], 'end': [200], 'text': [ 'subs #1', ] }, subid, self.course) ItemFactory.create( parent=self.unit, category="video", display_name=u"test video", only_on_web=True, subid=subid ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) self.assertIsNone(course_outline[0]["summary"]["video_url"]) self.assertIsNone(course_outline[0]["summary"]["video_thumbnail_url"]) self.assertEqual(course_outline[0]["summary"]["duration"], 0) self.assertEqual(course_outline[0]["summary"]["size"], 0) self.assertEqual(course_outline[0]["summary"]["name"], "test video") self.assertEqual(course_outline[0]["summary"]["transcripts"], {}) self.assertIsNone(course_outline[0]["summary"]["language"]) self.assertEqual(course_outline[0]["summary"]["category"], "video") self.assertTrue(course_outline[0]["summary"]["only_on_web"]) def test_mobile_api_config(self): """ Tests VideoSummaryList with different MobileApiConfig video_profiles """ self.login_and_enroll() edx_video_id = "testing_mobile_high" api.create_video({ 'edx_video_id': edx_video_id, 'status': 'test', 'client_video_id': u"test video omega \u03a9", 'duration': 12, 'courses': [unicode(self.course.id)], 'encoded_videos': [ { 'profile': 'youtube', 'url': self.youtube_url, 'file_size': 2222, 'bitrate': 4444 }, { 'profile': 'mobile_high', 'url': self.video_url_high, 'file_size': 111, 'bitrate': 333 }, ]}) ItemFactory.create( parent=self.other_unit, category="video", display_name=u"testing mobile high video", edx_video_id=edx_video_id, ) expected_output = { 'category': u'video', 'video_thumbnail_url': None, 'language': u'en', 'name': u'testing mobile high video', 'video_url': self.video_url_high, 'duration': 12.0, 'transcripts': { 'en': 'http://testserver/api/mobile/v0.5/video_outlines/transcripts/{}/testing_mobile_high_video/en'.format(self.course.id) # pylint: disable=line-too-long }, 'only_on_web': False, 'encoded_videos': { u'mobile_high': { 'url': self.video_url_high, 'file_size': 111 }, u'youtube': { 'url': self.youtube_url, 'file_size': 2222 } }, 'size': 111 } # Testing when video_profiles='mobile_low,mobile_high,youtube' course_outline = self.api_response().data course_outline[0]['summary'].pop("id") self.assertEqual(course_outline[0]['summary'], expected_output) # Testing when there is no mobile_low, and that mobile_high doesn't show MobileApiConfig(video_profiles="mobile_low,youtube").save() course_outline = self.api_response().data expected_output['encoded_videos'].pop('mobile_high') expected_output['video_url'] = self.youtube_url expected_output['size'] = 2222 course_outline[0]['summary'].pop("id") self.assertEqual(course_outline[0]['summary'], expected_output) # Testing where youtube is the default video over mobile_high MobileApiConfig(video_profiles="youtube,mobile_high").save() course_outline = self.api_response().data expected_output['encoded_videos']['mobile_high'] = { 'url': self.video_url_high, 'file_size': 111 } course_outline[0]['summary'].pop("id") self.assertEqual(course_outline[0]['summary'], expected_output) def test_video_not_in_val(self): self.login_and_enroll() self._create_video_with_subs() ItemFactory.create( parent=self.other_unit, category="video", edx_video_id="some_non_existent_id_in_val", display_name=u"some non existent video in val", html5_sources=[self.html5_video_url] ) summary = self.api_response().data[1]['summary'] self.assertEqual(summary['name'], "some non existent video in val") self.assertIsNone(summary['encoded_videos']) self.assertIsNone(summary['duration']) self.assertEqual(summary['size'], 0) self.assertEqual(summary['video_url'], self.html5_video_url) def test_course_list(self): self.login_and_enroll() self._create_video_with_subs() ItemFactory.create( parent=self.other_unit, category="video", display_name=u"test video omega 2 \u03a9", html5_sources=[self.html5_video_url] ) ItemFactory.create( parent=self.other_unit, category="video", display_name=u"test video omega 3 \u03a9", source=self.html5_video_url ) ItemFactory.create( parent=self.unit, category="video", edx_video_id=self.edx_video_id, display_name=u"test draft video omega \u03a9", visible_to_staff_only=True, ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 3) vid = course_outline[0] self.assertTrue('test_subsection_omega_%CE%A9' in vid['section_url']) self.assertTrue('test_subsection_omega_%CE%A9/1' in vid['unit_url']) self.assertTrue(u'test_video_omega_\u03a9' in vid['summary']['id']) self.assertEqual(vid['summary']['video_url'], self.video_url) self.assertEqual(vid['summary']['size'], 12345) self.assertTrue('en' in vid['summary']['transcripts']) self.assertFalse(vid['summary']['only_on_web']) self.assertEqual(course_outline[1]['summary']['video_url'], self.html5_video_url) self.assertEqual(course_outline[1]['summary']['size'], 0) self.assertFalse(course_outline[1]['summary']['only_on_web']) self.assertEqual(course_outline[1]['path'][2]['name'], self.other_unit.display_name) self.assertEqual(course_outline[1]['path'][2]['id'], unicode(self.other_unit.location)) self.assertEqual(course_outline[2]['summary']['video_url'], self.html5_video_url) self.assertEqual(course_outline[2]['summary']['size'], 0) self.assertFalse(course_outline[2]['summary']['only_on_web']) def test_with_nameless_unit(self): self.login_and_enroll() ItemFactory.create( parent=self.nameless_unit, category="video", edx_video_id=self.edx_video_id, display_name=u"test draft video omega 2 \u03a9" ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) self.assertEqual(course_outline[0]['path'][2]['name'], self.nameless_unit.location.block_id) def test_with_video_in_sub_section(self): """ Tests a non standard xml format where a video is underneath a sequential We are expecting to return the same unit and section url since there is no unit vertical. """ self.login_and_enroll() ItemFactory.create( parent=self.sub_section, category="video", edx_video_id=self.edx_video_id, display_name=u"video in the sub section" ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) self.assertEqual(len(course_outline[0]['path']), 2) section_url = course_outline[0]["section_url"] unit_url = course_outline[0]["unit_url"] self.assertIn( u'courseware/test_factory_section_omega_%CE%A9/test_subsection_omega_%CE%A9', section_url ) self.assertTrue(section_url) self.assertTrue(unit_url) self.assertEqual(section_url, unit_url) @ddt.data( *itertools.product([True, False], ["video", "problem"]) ) @ddt.unpack def test_with_split_block(self, is_user_staff, sub_block_category): """Test with split_module->sub_block_category and for both staff and non-staff users.""" self.login_and_enroll() self.user.is_staff = is_user_staff self.user.save() self._setup_split_module(sub_block_category) video_outline = self.api_response().data num_video_blocks = 1 if sub_block_category == "video" else 0 self.assertEqual(len(video_outline), num_video_blocks) for block_index in range(num_video_blocks): self._verify_paths( video_outline, [ self.section.display_name, self.sub_section.display_name, self.unit.display_name, self.split_test.display_name ], block_index ) self.assertIn(u"split test block", video_outline[block_index]["summary"]["name"]) def test_with_split_vertical(self): """Test with split_module->vertical->video structure.""" self.login_and_enroll() split_vertical_a, split_vertical_b = self._setup_split_module("vertical") ItemFactory.create( parent=split_vertical_a, category="video", display_name=u"video in vertical a", ) ItemFactory.create( parent=split_vertical_b, category="video", display_name=u"video in vertical b", ) video_outline = self.api_response().data # user should see only one of the videos (a or b). self.assertEqual(len(video_outline), 1) self.assertIn(u"video in vertical", video_outline[0]["summary"]["name"]) a_or_b = video_outline[0]["summary"]["name"][-1:] self._verify_paths( video_outline, [ self.section.display_name, self.sub_section.display_name, self.unit.display_name, self.split_test.display_name, u"split test block " + a_or_b ], ) def _create_cohorted_video(self, group_id): """Creates a cohorted video block, giving access to only the given group_id.""" video_block = ItemFactory.create( parent=self.unit, category="video", display_name=u"video for group " + unicode(group_id), ) self._setup_group_access(video_block, self.partition_id, [group_id]) def _create_cohorted_vertical_with_video(self, group_id): """Creates a cohorted vertical with a child video block, giving access to only the given group_id.""" vertical_block = ItemFactory.create( parent=self.sub_section, category="vertical", display_name=u"vertical for group " + unicode(group_id), ) self._setup_group_access(vertical_block, self.partition_id, [group_id]) ItemFactory.create( parent=vertical_block, category="video", display_name=u"video for group " + unicode(group_id), ) @ddt.data("_create_cohorted_video", "_create_cohorted_vertical_with_video") def test_with_cohorted_content(self, content_creator_method_name): self.login_and_enroll() self._setup_course_partitions(scheme_id='cohort', is_cohorted=True) cohorts = [] for group_id in [0, 1]: getattr(self, content_creator_method_name)(group_id) cohorts.append(CohortFactory(course_id=self.course.id, name=u"Cohort " + unicode(group_id))) link = CourseUserGroupPartitionGroup( course_user_group=cohorts[group_id], partition_id=self.partition_id, group_id=group_id, ) link.save() for cohort_index in range(len(cohorts)): # add user to this cohort add_user_to_cohort(cohorts[cohort_index], self.user.username) # should only see video for this cohort video_outline = self.api_response().data self.assertEqual(len(video_outline), 1) self.assertEquals( u"video for group " + unicode(cohort_index), video_outline[0]["summary"]["name"] ) # remove user from this cohort remove_user_from_cohort(cohorts[cohort_index], self.user.username) # un-cohorted user should see no videos video_outline = self.api_response().data self.assertEqual(len(video_outline), 0) # staff user sees all videos self.user.is_staff = True self.user.save() video_outline = self.api_response().data self.assertEqual(len(video_outline), 2) def test_with_hidden_blocks(self): self.login_and_enroll() hidden_subsection = ItemFactory.create( parent=self.section, category="sequential", hide_from_toc=True, ) unit_within_hidden_subsection = ItemFactory.create( parent=hidden_subsection, category="vertical", ) hidden_unit = ItemFactory.create( parent=self.sub_section, category="vertical", hide_from_toc=True, ) ItemFactory.create( parent=unit_within_hidden_subsection, category="video", edx_video_id=self.edx_video_id, ) ItemFactory.create( parent=hidden_unit, category="video", edx_video_id=self.edx_video_id, ) course_outline = self.api_response().data self.assertEqual(len(course_outline), 0) def test_language(self): self.login_and_enroll() video = ItemFactory.create( parent=self.nameless_unit, category="video", edx_video_id=self.edx_video_id, display_name=u"test draft video omega 2 \u03a9" ) language_case = namedtuple('language_case', ['transcripts', 'expected_language']) language_cases = [ # defaults to english language_case({}, "en"), # supports english language_case({"en": 1}, "en"), # supports another language language_case({"lang1": 1}, "lang1"), # returns first alphabetically-sorted language language_case({"lang1": 1, "en": 2}, "en"), language_case({"lang1": 1, "lang2": 2}, "lang1"), ] for case in language_cases: video.transcripts = case.transcripts modulestore().update_item(video, self.user.id) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) self.assertEqual(course_outline[0]['summary']['language'], case.expected_language) def test_transcripts(self): self.login_and_enroll() video = ItemFactory.create( parent=self.nameless_unit, category="video", edx_video_id=self.edx_video_id, display_name=u"test draft video omega 2 \u03a9" ) transcript_case = namedtuple('transcript_case', ['transcripts', 'english_subtitle', 'expected_transcripts']) transcript_cases = [ # defaults to english transcript_case({}, "", ["en"]), transcript_case({}, "en-sub", ["en"]), # supports english transcript_case({"en": 1}, "", ["en"]), transcript_case({"en": 1}, "en-sub", ["en"]), # keeps both english and other languages transcript_case({"lang1": 1, "en": 2}, "", ["lang1", "en"]), transcript_case({"lang1": 1, "en": 2}, "en-sub", ["lang1", "en"]), # adds english to list of languages only if english_subtitle is specified transcript_case({"lang1": 1, "lang2": 2}, "", ["lang1", "lang2"]), transcript_case({"lang1": 1, "lang2": 2}, "en-sub", ["lang1", "lang2", "en"]), ] for case in transcript_cases: video.transcripts = case.transcripts video.sub = case.english_subtitle modulestore().update_item(video, self.user.id) course_outline = self.api_response().data self.assertEqual(len(course_outline), 1) self.assertSetEqual( set(course_outline[0]['summary']['transcripts'].keys()), set(case.expected_transcripts) ) @attr('shard_2') class TestTranscriptsDetail(TestVideoAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin, TestVideoAPIMixin, MilestonesTestCaseMixin): """ Tests for /api/mobile/v0.5/video_outlines/transcripts/{course_id}.. """ REVERSE_INFO = {'name': 'video-transcripts-detail', 'params': ['course_id']} def setUp(self): super(TestTranscriptsDetail, self).setUp() self.video = self._create_video_with_subs() def reverse_url(self, reverse_args=None, **kwargs): reverse_args = reverse_args or {} reverse_args.update({ 'block_id': self.video.location.block_id, 'lang': kwargs.get('lang', 'en'), }) return super(TestTranscriptsDetail, self).reverse_url(reverse_args, **kwargs) def test_incorrect_language(self): self.login_and_enroll() self.api_response(expected_response_code=404, lang='pl') def test_transcript_with_unicode_file_name(self): self.video = self._create_video_with_subs(custom_subid=u'你好') self.login_and_enroll() self.api_response(expected_response_code=200, lang='en')
agpl-3.0
Ingenico-ePayments/connect-sdk-python2
ingenico/connect/sdk/domain/payment/cancel_approval_payment_response.py
2
1362
# -*- coding: utf-8 -*- # # This class was auto-generated from the API references found at # https://epayments-api.developer-ingenico.com/s2sapi/v1/ # from ingenico.connect.sdk.data_object import DataObject from ingenico.connect.sdk.domain.payment.definitions.payment import Payment class CancelApprovalPaymentResponse(DataObject): __payment = None @property def payment(self): """ | Object that holds the payment related properties Type: :class:`ingenico.connect.sdk.domain.payment.definitions.payment.Payment` """ return self.__payment @payment.setter def payment(self, value): self.__payment = value def to_dictionary(self): dictionary = super(CancelApprovalPaymentResponse, self).to_dictionary() if self.payment is not None: dictionary['payment'] = self.payment.to_dictionary() return dictionary def from_dictionary(self, dictionary): super(CancelApprovalPaymentResponse, self).from_dictionary(dictionary) if 'payment' in dictionary: if not isinstance(dictionary['payment'], dict): raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['payment'])) value = Payment() self.payment = value.from_dictionary(dictionary['payment']) return self
mit
jemdwood/cs234_proj
pretrain-atari.py
1
11760
#!/usr/bin/env python # -*- coding: utf-8 -*- # File: train-atari.py # Original Author (we/jemdwood@gmail.com editted): Yuxin Wu <ppwwyyxxc@gmail.com> import numpy as np import os import sys import time import random import uuid import argparse import multiprocessing import threading import cv2 import tensorflow as tf import six from six.moves import queue from tensorpack import * from tensorpack.utils.concurrency import * from tensorpack.utils.serialize import * from tensorpack.utils.stats import * from tensorpack.tfutils import symbolic_functions as symbf from tensorpack.tfutils.gradproc import MapGradient, SummaryGradient from tensorpack.RL import * from simulator import * import common from common import (play_model, Evaluator, eval_model_multithread, play_one_episode, play_n_episodes) from records_dataflow import RecordsDataFlow from kurin_dataflow import KurinDataFlow if six.PY3: from concurrent import futures CancelledError = futures.CancelledError else: CancelledError = Exception IMAGE_SIZE = (84, 84) FRAME_HISTORY = 4 GAMMA = 0.99 CHANNEL = FRAME_HISTORY * 3 IMAGE_SHAPE3 = IMAGE_SIZE + (CHANNEL,) LOCAL_TIME_MAX = 5 EVAL_EPISODE = 50 BATCH_SIZE = 128 PREDICT_BATCH_SIZE = 15 # batch for efficient forward SIMULATOR_PROC = 50 PREDICTOR_THREAD_PER_GPU = 3 PREDICTOR_THREAD = None EVALUATE_PROC = min(multiprocessing.cpu_count() // 2, 20) NUM_ACTIONS = None ENV_NAME = None BASE_PATH = '/data_4/rl' def get_player(viz=False, train=False, dumpdir=None): pl = GymEnv(ENV_NAME, viz=viz, dumpdir=dumpdir) pl = MapPlayerState(pl, lambda img: cv2.resize(img, IMAGE_SIZE[::-1])) global NUM_ACTIONS NUM_ACTIONS = pl.get_action_space().num_actions() pl = HistoryFramePlayer(pl, FRAME_HISTORY) if not train: pl = PreventStuckPlayer(pl, 30, 1) else: pl = LimitLengthPlayer(pl, 40000) return pl class MySimulatorWorker(SimulatorProcess): def _build_player(self): return get_player(train=True) class Model(ModelDesc): def _get_inputs(self): assert NUM_ACTIONS is not None return [InputDesc(tf.uint8, (None,) + IMAGE_SHAPE3, 'state'), InputDesc(tf.int64, (None,), 'action'), InputDesc(tf.float32, (None,), 'futurereward')] def _get_NN_prediction(self, image): image = tf.cast(image, tf.float32) / 255.0 with argscope(Conv2D, nl=tf.nn.relu): l = Conv2D('conv0', image, out_channel=32, kernel_shape=5) l = MaxPooling('pool0', l, 2) l = Conv2D('conv1', l, out_channel=32, kernel_shape=5) l = MaxPooling('pool1', l, 2) l = Conv2D('conv2', l, out_channel=64, kernel_shape=4) l = MaxPooling('pool2', l, 2) l = Conv2D('conv3', l, out_channel=64, kernel_shape=3) l = FullyConnected('fc0', l, 512, nl=tf.identity) l = PReLU('prelu', l) logits = FullyConnected('fc-pi', l, out_dim=NUM_ACTIONS, nl=tf.identity) # unnormalized policy value = FullyConnected('fc-v', l, 1, nl=tf.identity) return logits, value def _build_graph(self, inputs): state, action, futurereward = inputs logits, self.value = self._get_NN_prediction(state) self.value = tf.squeeze(self.value, [1], name='pred_value') # (B,) self.policy = tf.nn.softmax(logits, name='policy') expf = tf.get_variable('explore_factor', shape=[], initializer=tf.constant_initializer(1), trainable=False) policy_explore = tf.nn.softmax(logits * expf, name='policy_explore') is_training = get_current_tower_context().is_training if not is_training: return log_probs = tf.log(self.policy + 1e-6) log_pi_a_given_s = tf.reduce_sum( log_probs * tf.one_hot(action, NUM_ACTIONS), 1) advantage = tf.subtract(tf.stop_gradient(self.value), futurereward, name='advantage') policy_loss = tf.reduce_sum(log_pi_a_given_s * advantage, name='policy_loss') xentropy_loss = tf.reduce_sum( self.policy * log_probs, name='xentropy_loss') value_loss = tf.nn.l2_loss(self.value - futurereward, name='value_loss') pred_reward = tf.reduce_mean(self.value, name='predict_reward') advantage = symbf.rms(advantage, name='rms_advantage') entropy_beta = tf.get_variable('entropy_beta', shape=[], initializer=tf.constant_initializer(0.01), trainable=False) self.cost = tf.add_n([policy_loss, xentropy_loss * entropy_beta, value_loss]) self.cost = tf.truediv(self.cost, tf.cast(tf.shape(futurereward)[0], tf.float32), name='cost') summary.add_moving_summary(policy_loss, xentropy_loss, value_loss, pred_reward, advantage, self.cost) def _get_optimizer(self): lr = symbf.get_scalar_var('learning_rate', 0.001, summary=True) opt = tf.train.AdamOptimizer(lr, epsilon=1e-3) gradprocs = [MapGradient(lambda grad: tf.clip_by_average_norm(grad, 0.1)), SummaryGradient()] opt = optimizer.apply_grad_processors(opt, gradprocs) return opt class MySimulatorMaster(SimulatorMaster, Callback): def __init__(self, pipe_c2s, pipe_s2c, model): super(MySimulatorMaster, self).__init__(pipe_c2s, pipe_s2c) self.M = model self.queue = queue.Queue(maxsize=BATCH_SIZE * 8 * 2) def _setup_graph(self): self.async_predictor = MultiThreadAsyncPredictor( self.trainer.get_predictors(['state'], ['policy_explore', 'pred_value'], PREDICTOR_THREAD), batch_size=PREDICT_BATCH_SIZE) def _before_train(self): self.async_predictor.start() def _on_state(self, state, ident): def cb(outputs): try: distrib, value = outputs.result() except CancelledError: logger.info("Client {} cancelled.".format(ident)) return assert np.all(np.isfinite(distrib)), distrib action = np.random.choice(len(distrib), p=distrib) client = self.clients[ident] client.memory.append(TransitionExperience(state, action, None, value=value)) self.send_queue.put([ident, dumps(action)]) self.async_predictor.put_task([state], cb) def _on_episode_over(self, ident): self._parse_memory(0, ident, True) def _on_datapoint(self, ident): client = self.clients[ident] if len(client.memory) == LOCAL_TIME_MAX + 1: R = client.memory[-1].value self._parse_memory(R, ident, False) def _parse_memory(self, init_r, ident, isOver): client = self.clients[ident] mem = client.memory if not isOver: last = mem[-1] mem = mem[:-1] mem.reverse() R = float(init_r) for idx, k in enumerate(mem): R = np.clip(k.reward, -1, 1) + GAMMA * R self.queue.put([k.state, k.action, R]) if not isOver: client.memory = [last] else: client.memory = [] def get_config(env): assert NUM_ACTIONS is not None dirname = os.path.join('train_log', 'pretrain-atari-{}'.format(ENV_NAME)) logger.set_logger_dir(dirname) M = Model() #name_base = str(uuid.uuid1())[:6] #PIPE_DIR = os.environ.get('TENSORPACK_PIPEDIR', '.').rstrip('/') #namec2s = 'ipc://{}/sim-c2s-{}'.format(PIPE_DIR, name_base) #names2c = 'ipc://{}/sim-s2c-{}'.format(PIPE_DIR, name_base) #procs = [MySimulatorWorker(k, namec2s, names2c) for k in range(SIMULATOR_PROC)] #ensure_proc_terminate(procs) #start_proc_mask_signal(procs) #master = MySimulatorMaster(namec2s, names2c, M) if env == 'Breakout-v0': df = RecordsDataFlow('all') else: df = KurinDataFlow('all', record_folder=BASE_PATH, gym_game_name=env) dataflow = BatchData(df, BATCH_SIZE) print('Pre-training dataset size: {}'.format(df.size())) #print('Average human performance: {}'.format(df.avg_human_score)) return TrainConfig( model=M, dataflow=dataflow, callbacks=[ ModelSaver(), ScheduledHyperParamSetter('learning_rate', [(20, 0.0003), (120, 0.0001)]), ScheduledHyperParamSetter('entropy_beta', [(80, 0.005)]), ScheduledHyperParamSetter('explore_factor', [(80, 2), (100, 3), (120, 4), (140, 5)]), HumanHyperParamSetter('learning_rate'), HumanHyperParamSetter('entropy_beta'), #master, #StartProcOrThread(master), PeriodicTrigger(Evaluator( EVAL_EPISODE, ['state'], ['policy'], get_player), every_k_epochs=1), ], session_creator=sesscreate.NewSessionCreator( config=get_default_sess_config(0.5)), steps_per_epoch=dataflow.size(), max_epoch=5, ) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--load', help='load model') parser.add_argument('--env', help='env', required=True) parser.add_argument('--task', help='task to perform', choices=['play', 'eval', 'train', 'gen_submit'], default='train') parser.add_argument('--output', help='output directory for submission', default='output_dir') parser.add_argument('--episode', help='number of episode to eval', default=100, type=int) args = parser.parse_args() ENV_NAME = args.env assert ENV_NAME logger.info("Environment Name: {}".format(ENV_NAME)) p = get_player() del p # set NUM_ACTIONS if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.task != 'train': assert args.load is not None if args.task != 'train': cfg = PredictConfig( model=Model(), session_init=get_model_loader(args.load), input_names=['state'], output_names=['policy']) if args.task == 'play': play_model(cfg, get_player(viz=0.01)) elif args.task == 'eval': eval_model_multithread(cfg, args.episode, get_player) elif args.task == 'gen_submit': play_n_episodes( get_player(train=False, dumpdir=args.output), OfflinePredictor(cfg), args.episode) # gym.upload(output, api_key='xxx') else: nr_gpu = get_nr_gpu() if nr_gpu > 0: if nr_gpu > 1: predict_tower = list(range(nr_gpu))[-nr_gpu // 2:] else: predict_tower = [0] PREDICTOR_THREAD = len(predict_tower) * PREDICTOR_THREAD_PER_GPU train_tower = list(range(nr_gpu))[:-nr_gpu // 2] or [0] logger.info("[BA3C] Train on gpu {} and infer on gpu {}".format( ','.join(map(str, train_tower)), ','.join(map(str, predict_tower)))) trainer = AsyncMultiGPUTrainer else: logger.warn("Without GPU this model will never learn! CPU is only useful for debug.") nr_gpu = 0 PREDICTOR_THREAD = 1 predict_tower, train_tower = [0], [0] trainer = QueueInputTrainer config = get_config(args.env) if args.load: config.session_init = get_model_loader(args.load) config.tower = train_tower config.predict_tower = predict_tower trainer(config).train()
mit
sahiljain/catapult
third_party/gsutil/third_party/boto/tests/unit/cloudsearch/test_connection.py
114
8513
#!/usr/bin env python from tests.unit import AWSMockServiceTestCase from boto.cloudsearch.domain import Domain from boto.cloudsearch.layer1 import Layer1 class TestCloudSearchCreateDomain(AWSMockServiceTestCase): connection_class = Layer1 def default_body(self): return b""" <CreateDomainResponse xmlns="http://cloudsearch.amazonaws.com/doc/2011-02-01"> <CreateDomainResult> <DomainStatus> <SearchPartitionCount>0</SearchPartitionCount> <SearchService> <Arn>arn:aws:cs:us-east-1:1234567890:search/demo</Arn> <Endpoint>search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint> </SearchService> <NumSearchableDocs>0</NumSearchableDocs> <Created>true</Created> <DomainId>1234567890/demo</DomainId> <Processing>false</Processing> <SearchInstanceCount>0</SearchInstanceCount> <DomainName>demo</DomainName> <RequiresIndexDocuments>false</RequiresIndexDocuments> <Deleted>false</Deleted> <DocService> <Arn>arn:aws:cs:us-east-1:1234567890:doc/demo</Arn> <Endpoint>doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint> </DocService> </DomainStatus> </CreateDomainResult> <ResponseMetadata> <RequestId>00000000-0000-0000-0000-000000000000</RequestId> </ResponseMetadata> </CreateDomainResponse> """ def test_create_domain(self): self.set_http_response(status_code=200) api_response = self.service_connection.create_domain('demo') self.assert_request_parameters({ 'Action': 'CreateDomain', 'DomainName': 'demo', 'Version': '2011-02-01', }) def test_cloudsearch_connect_result_endpoints(self): """Check that endpoints & ARNs are correctly returned from AWS""" self.set_http_response(status_code=200) api_response = self.service_connection.create_domain('demo') domain = Domain(self, api_response) self.assertEqual(domain.doc_service_arn, "arn:aws:cs:us-east-1:1234567890:doc/demo") self.assertEqual( domain.doc_service_endpoint, "doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") self.assertEqual(domain.search_service_arn, "arn:aws:cs:us-east-1:1234567890:search/demo") self.assertEqual( domain.search_service_endpoint, "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") def test_cloudsearch_connect_result_statuses(self): """Check that domain statuses are correctly returned from AWS""" self.set_http_response(status_code=200) api_response = self.service_connection.create_domain('demo') domain = Domain(self, api_response) self.assertEqual(domain.created, True) self.assertEqual(domain.processing, False) self.assertEqual(domain.requires_index_documents, False) self.assertEqual(domain.deleted, False) def test_cloudsearch_connect_result_details(self): """Check that the domain information is correctly returned from AWS""" self.set_http_response(status_code=200) api_response = self.service_connection.create_domain('demo') domain = Domain(self, api_response) self.assertEqual(domain.id, "1234567890/demo") self.assertEqual(domain.name, "demo") def test_cloudsearch_documentservice_creation(self): self.set_http_response(status_code=200) api_response = self.service_connection.create_domain('demo') domain = Domain(self, api_response) document = domain.get_document_service() self.assertEqual( document.endpoint, "doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") def test_cloudsearch_searchservice_creation(self): self.set_http_response(status_code=200) api_response = self.service_connection.create_domain('demo') domain = Domain(self, api_response) search = domain.get_search_service() self.assertEqual( search.endpoint, "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") class CloudSearchConnectionDeletionTest(AWSMockServiceTestCase): connection_class = Layer1 def default_body(self): return b""" <DeleteDomainResponse xmlns="http://cloudsearch.amazonaws.com/doc/2011-02-01"> <DeleteDomainResult> <DomainStatus> <SearchPartitionCount>0</SearchPartitionCount> <SearchService> <Arn>arn:aws:cs:us-east-1:1234567890:search/demo</Arn> <Endpoint>search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint> </SearchService> <NumSearchableDocs>0</NumSearchableDocs> <Created>true</Created> <DomainId>1234567890/demo</DomainId> <Processing>false</Processing> <SearchInstanceCount>0</SearchInstanceCount> <DomainName>demo</DomainName> <RequiresIndexDocuments>false</RequiresIndexDocuments> <Deleted>false</Deleted> <DocService> <Arn>arn:aws:cs:us-east-1:1234567890:doc/demo</Arn> <Endpoint>doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint> </DocService> </DomainStatus> </DeleteDomainResult> <ResponseMetadata> <RequestId>00000000-0000-0000-0000-000000000000</RequestId> </ResponseMetadata> </DeleteDomainResponse> """ def test_cloudsearch_deletion(self): """ Check that the correct arguments are sent to AWS when creating a cloudsearch connection. """ self.set_http_response(status_code=200) api_response = self.service_connection.delete_domain('demo') self.assert_request_parameters({ 'Action': 'DeleteDomain', 'DomainName': 'demo', 'Version': '2011-02-01', }) class CloudSearchConnectionIndexDocumentTest(AWSMockServiceTestCase): connection_class = Layer1 def default_body(self): return b""" <IndexDocumentsResponse xmlns="http://cloudsearch.amazonaws.com/doc/2011-02-01"> <IndexDocumentsResult> <FieldNames> <member>average_score</member> <member>brand_id</member> <member>colors</member> <member>context</member> <member>context_owner</member> <member>created_at</member> <member>creator_id</member> <member>description</member> <member>file_size</member> <member>format</member> <member>has_logo</member> <member>has_messaging</member> <member>height</member> <member>image_id</member> <member>ingested_from</member> <member>is_advertising</member> <member>is_photo</member> <member>is_reviewed</member> <member>modified_at</member> <member>subject_date</member> <member>tags</member> <member>title</member> <member>width</member> </FieldNames> </IndexDocumentsResult> <ResponseMetadata> <RequestId>eb2b2390-6bbd-11e2-ab66-93f3a90dcf2a</RequestId> </ResponseMetadata> </IndexDocumentsResponse> """ def test_cloudsearch_index_documents(self): """ Check that the correct arguments are sent to AWS when indexing a domain. """ self.set_http_response(status_code=200) api_response = self.service_connection.index_documents('demo') self.assert_request_parameters({ 'Action': 'IndexDocuments', 'DomainName': 'demo', 'Version': '2011-02-01', }) def test_cloudsearch_index_documents_resp(self): """ Check that the AWS response is being parsed correctly when indexing a domain. """ self.set_http_response(status_code=200) api_response = self.service_connection.index_documents('demo') self.assertEqual(api_response, ['average_score', 'brand_id', 'colors', 'context', 'context_owner', 'created_at', 'creator_id', 'description', 'file_size', 'format', 'has_logo', 'has_messaging', 'height', 'image_id', 'ingested_from', 'is_advertising', 'is_photo', 'is_reviewed', 'modified_at', 'subject_date', 'tags', 'title', 'width'])
bsd-3-clause
fanshaohua-fan/flask
scripts/flask-07-upgrade.py
148
10659
#!/usr/bin/env python # -*- coding: utf-8 -*- """ flask-07-upgrade ~~~~~~~~~~~~~~~~ This command line script scans a whole application tree and attempts to output an unified diff with all the changes that are necessary to easily upgrade the application to 0.7 and to not yield deprecation warnings. This will also attempt to find `after_request` functions that don't modify the response and appear to be better suited for `teardown_request`. This application is indeed an incredible hack, but because what it attempts to accomplish is impossible to do statically it tries to support the most common patterns at least. The diff it generates should be hand reviewed and not applied blindly without making backups. :copyright: (c) Copyright 2015 by Armin Ronacher. :license: see LICENSE for more details. """ import re import os import inspect import difflib import posixpath from optparse import OptionParser try: import ast except ImportError: ast = None TEMPLATE_LOOKAHEAD = 4096 _app_re_part = r'((?:[a-zA-Z_][a-zA-Z0-9_]*app)|app|application)' _string_re_part = r"('([^'\\]*(?:\\.[^'\\]*)*)'" \ r'|"([^"\\]*(?:\\.[^"\\]*)*)")' _from_import_re = re.compile(r'^\s*from flask import\s+') _url_for_re = re.compile(r'\b(url_for\()(%s)' % _string_re_part) _render_template_re = re.compile(r'\b(render_template\()(%s)' % _string_re_part) _after_request_re = re.compile(r'((?:@\S+\.(?:app_)?))(after_request)(\b\s*$)(?m)') _module_constructor_re = re.compile(r'([a-zA-Z0-9_][a-zA-Z0-9_]*)\s*=\s*Module' r'\(__name__\s*(?:,\s*(?:name\s*=\s*)?(%s))?' % _string_re_part) _error_handler_re = re.compile(r'%s\.error_handlers\[\s*(\d+)\s*\]' % _app_re_part) _mod_route_re = re.compile(r'@([a-zA-Z0-9_][a-zA-Z0-9_]*)\.route') _blueprint_related = [ (re.compile(r'request\.module'), 'request.blueprint'), (re.compile(r'register_module'), 'register_blueprint'), (re.compile(r'%s\.modules' % _app_re_part), '\\1.blueprints') ] def make_diff(filename, old, new): for line in difflib.unified_diff(old.splitlines(), new.splitlines(), posixpath.normpath(posixpath.join('a', filename)), posixpath.normpath(posixpath.join('b', filename)), lineterm=''): print line def looks_like_teardown_function(node): returns = [x for x in ast.walk(node) if isinstance(x, ast.Return)] if len(returns) != 1: return return_def = returns[0] resp_name = node.args.args[0] if not isinstance(return_def.value, ast.Name) or \ return_def.value.id != resp_name.id: return for body_node in node.body: for child in ast.walk(body_node): if isinstance(child, ast.Name) and \ child.id == resp_name.id: if child is not return_def.value: return return resp_name.id def fix_url_for(contents, module_declarations=None): if module_declarations is None: skip_module_test = True else: skip_module_test = False mapping = dict(module_declarations) annotated_lines = [] def make_line_annotations(): if not annotated_lines: last_index = 0 for line in contents.splitlines(True): last_index += len(line) annotated_lines.append((last_index, line)) def backtrack_module_name(call_start): make_line_annotations() for idx, (line_end, line) in enumerate(annotated_lines): if line_end > call_start: for _, line in reversed(annotated_lines[:idx]): match = _mod_route_re.search(line) if match is not None: shortname = match.group(1) return mapping.get(shortname) def handle_match(match): if not skip_module_test: modname = backtrack_module_name(match.start()) if modname is None: return match.group(0) prefix = match.group(1) endpoint = ast.literal_eval(match.group(2)) if endpoint.startswith('.'): endpoint = endpoint[1:] elif '.' not in endpoint: endpoint = '.' + endpoint else: return match.group(0) return prefix + repr(endpoint) return _url_for_re.sub(handle_match, contents) def fix_teardown_funcs(contents): def is_return_line(line): args = line.strip().split() return args and args[0] == 'return' def fix_single(match, lines, lineno): if not lines[lineno + 1].startswith('def'): return block_lines = inspect.getblock(lines[lineno + 1:]) func_code = ''.join(block_lines) if func_code[0].isspace(): node = ast.parse('if 1:\n' + func_code).body[0].body else: node = ast.parse(func_code).body[0] response_param_name = looks_like_teardown_function(node) if response_param_name is None: return before = lines[:lineno] decorator = [match.group(1) + match.group(2).replace('after_', 'teardown_') + match.group(3)] body = [line.replace(response_param_name, 'exception') for line in block_lines if not is_return_line(line)] after = lines[lineno + len(block_lines) + 1:] return before + decorator + body + after content_lines = contents.splitlines(True) while 1: found_one = False for idx, line in enumerate(content_lines): match = _after_request_re.match(line) if match is None: continue new_content_lines = fix_single(match, content_lines, idx) if new_content_lines is not None: content_lines = new_content_lines break else: break return ''.join(content_lines) def get_module_autoname(filename): directory, filename = os.path.split(filename) if filename != '__init__.py': return os.path.splitext(filename)[0] return os.path.basename(directory) def rewrite_from_imports(prefix, fromlist, lineiter): import_block = [prefix, fromlist] if fromlist[0] == '(' and fromlist[-1] != ')': for line in lineiter: import_block.append(line) if line.rstrip().endswith(')'): break elif fromlist[-1] == '\\': for line in lineiter: import_block.append(line) if line.rstrip().endswith('\\'): break return ''.join(import_block).replace('Module', 'Blueprint') def rewrite_blueprint_imports(contents): new_file = [] lineiter = iter(contents.splitlines(True)) for line in lineiter: match = _from_import_re.search(line) if match is not None: new_file.extend(rewrite_from_imports(match.group(), line[match.end():], lineiter)) else: new_file.append(line) return ''.join(new_file) def rewrite_for_blueprints(contents, filename): modules_declared = [] def handle_match(match): target = match.group(1) name_param = match.group(2) if name_param is None: modname = get_module_autoname(filename) else: modname = ast.literal_eval(name_param) modules_declared.append((target, modname)) return '%s = %s' % (target, 'Blueprint(%r, __name__' % modname) new_contents = _module_constructor_re.sub(handle_match, contents) if modules_declared: new_contents = rewrite_blueprint_imports(new_contents) for pattern, replacement in _blueprint_related: new_contents = pattern.sub(replacement, new_contents) return new_contents, dict(modules_declared) def upgrade_python_file(filename, contents, teardown): new_contents = contents if teardown: new_contents = fix_teardown_funcs(new_contents) new_contents, modules = rewrite_for_blueprints(new_contents, filename) new_contents = fix_url_for(new_contents, modules) new_contents = _error_handler_re.sub('\\1.error_handler_spec[None][\\2]', new_contents) make_diff(filename, contents, new_contents) def upgrade_template_file(filename, contents): new_contents = fix_url_for(contents, None) make_diff(filename, contents, new_contents) def walk_path(path): this_file = os.path.realpath(__file__).rstrip('c') for dirpath, dirnames, filenames in os.walk(path): dirnames[:] = [x for x in dirnames if not x.startswith('.')] for filename in filenames: filename = os.path.join(dirpath, filename) if os.path.realpath(filename) == this_file: continue if filename.endswith('.py'): yield filename, 'python' # skip files that are diffs. These might be false positives # when run multiple times. elif not filename.endswith(('.diff', '.patch', '.udiff')): with open(filename) as f: contents = f.read(TEMPLATE_LOOKAHEAD) if '{% for' or '{% if' or '{{ url_for' in contents: yield filename, 'template' def scan_path(path=None, teardown=True): for filename, type in walk_path(path): with open(filename) as f: contents = f.read() if type == 'python': upgrade_python_file(filename, contents, teardown) elif type == 'template': upgrade_template_file(filename, contents) def main(): """Entrypoint""" parser = OptionParser(usage='%prog [options] [paths]') parser.add_option('-T', '--no-teardown-detection', dest='no_teardown', action='store_true', help='Do not attempt to ' 'detect teardown function rewrites.') parser.add_option('-b', '--bundled-templates', dest='bundled_tmpl', action='store_true', help='Indicate to the system ' 'that templates are bundled with modules. Default ' 'is auto detect.') options, args = parser.parse_args() if not args: args = ['.'] if ast is None: parser.error('Python 2.6 or later is required to run the upgrade script.') for path in args: scan_path(path, teardown=not options.no_teardown) if __name__ == '__main__': main()
bsd-3-clause
douglas-larocca/urwid
urwid/text_layout.py
6
17276
#!/usr/bin/python # # Urwid Text Layout classes # Copyright (C) 2004-2011 Ian Ward # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Urwid web site: http://excess.org/urwid/ from urwid.util import calc_width, calc_text_pos, calc_trim_text, is_wide_char, \ move_prev_char, move_next_char from urwid.compat import bytes, PYTHON3, B class TextLayout: def supports_align_mode(self, align): """Return True if align is a supported align mode.""" return True def supports_wrap_mode(self, wrap): """Return True if wrap is a supported wrap mode.""" return True def layout(self, text, width, align, wrap ): """ Return a layout structure for text. :param text: string in current encoding or unicode string :param width: number of screen columns available :param align: align mode for text :param wrap: wrap mode for text Layout structure is a list of line layouts, one per output line. Line layouts are lists than may contain the following tuples: * (column width of text segment, start offset, end offset) * (number of space characters to insert, offset or None) * (column width of insert text, offset, "insert text") The offset in the last two tuples is used to determine the attribute used for the inserted spaces or text respectively. The attribute used will be the same as the attribute at that text offset. If the offset is None when inserting spaces then no attribute will be used. """ raise NotImplementedError("This function must be overridden by a real" " text layout class. (see StandardTextLayout)") class CanNotDisplayText(Exception): pass class StandardTextLayout(TextLayout): def __init__(self):#, tab_stops=(), tab_stop_every=8): pass #""" #tab_stops -- list of screen column indexes for tab stops #tab_stop_every -- repeated interval for following tab stops #""" #assert tab_stop_every is None or type(tab_stop_every)==int #if not tab_stops and tab_stop_every: # self.tab_stops = (tab_stop_every,) #self.tab_stops = tab_stops #self.tab_stop_every = tab_stop_every def supports_align_mode(self, align): """Return True if align is 'left', 'center' or 'right'.""" return align in ('left', 'center', 'right') def supports_wrap_mode(self, wrap): """Return True if wrap is 'any', 'space' or 'clip'.""" return wrap in ('any', 'space', 'clip') def layout(self, text, width, align, wrap ): """Return a layout structure for text.""" try: segs = self.calculate_text_segments( text, width, wrap ) return self.align_layout( text, width, segs, wrap, align ) except CanNotDisplayText: return [[]] def pack(self, maxcol, layout): """ Return a minimal maxcol value that would result in the same number of lines for layout. layout must be a layout structure returned by self.layout(). """ maxwidth = 0 assert layout, "huh? empty layout?: "+repr(layout) for l in layout: lw = line_width(l) if lw >= maxcol: return maxcol maxwidth = max(maxwidth, lw) return maxwidth def align_layout( self, text, width, segs, wrap, align ): """Convert the layout segs to an aligned layout.""" out = [] for l in segs: sc = line_width(l) if sc == width or align=='left': out.append(l) continue if align == 'right': out.append([(width-sc, None)] + l) continue assert align == 'center' out.append([((width-sc+1) // 2, None)] + l) return out def calculate_text_segments(self, text, width, wrap): """ Calculate the segments of text to display given width screen columns to display them. text - unicode text or byte string to display width - number of available screen columns wrap - wrapping mode used Returns a layout structure without alignment applied. """ nl, nl_o, sp_o = "\n", "\n", " " if PYTHON3 and isinstance(text, bytes): nl = B(nl) # can only find bytes in python3 bytestrings nl_o = ord(nl_o) # + an item of a bytestring is the ordinal value sp_o = ord(sp_o) b = [] p = 0 if wrap == 'clip': # no wrapping to calculate, so it's easy. while p<=len(text): n_cr = text.find(nl, p) if n_cr == -1: n_cr = len(text) sc = calc_width(text, p, n_cr) l = [(0,n_cr)] if p!=n_cr: l = [(sc, p, n_cr)] + l b.append(l) p = n_cr+1 return b while p<=len(text): # look for next eligible line break n_cr = text.find(nl, p) if n_cr == -1: n_cr = len(text) sc = calc_width(text, p, n_cr) if sc == 0: # removed character hint b.append([(0,n_cr)]) p = n_cr+1 continue if sc <= width: # this segment fits b.append([(sc,p,n_cr), # removed character hint (0,n_cr)]) p = n_cr+1 continue pos, sc = calc_text_pos( text, p, n_cr, width ) if pos == p: # pathological width=1 double-byte case raise CanNotDisplayText( "Wide character will not fit in 1-column width") if wrap == 'any': b.append([(sc,p,pos)]) p = pos continue assert wrap == 'space' if text[pos] == sp_o: # perfect space wrap b.append([(sc,p,pos), # removed character hint (0,pos)]) p = pos+1 continue if is_wide_char(text, pos): # perfect next wide b.append([(sc,p,pos)]) p = pos continue prev = pos while prev > p: prev = move_prev_char(text, p, prev) if text[prev] == sp_o: sc = calc_width(text,p,prev) l = [(0,prev)] if p!=prev: l = [(sc,p,prev)] + l b.append(l) p = prev+1 break if is_wide_char(text,prev): # wrap after wide char next = move_next_char(text, prev, pos) sc = calc_width(text,p,next) b.append([(sc,p,next)]) p = next break else: # unwrap previous line space if possible to # fit more text (we're breaking a word anyway) if b and (len(b[-1]) == 2 or ( len(b[-1])==1 and len(b[-1][0])==2 )): # look for removed space above if len(b[-1]) == 1: [(h_sc, h_off)] = b[-1] p_sc = 0 p_off = p_end = h_off else: [(p_sc, p_off, p_end), (h_sc, h_off)] = b[-1] if (p_sc < width and h_sc==0 and text[h_off] == sp_o): # combine with previous line del b[-1] p = p_off pos, sc = calc_text_pos( text, p, n_cr, width ) b.append([(sc,p,pos)]) # check for trailing " " or "\n" p = pos if p < len(text) and ( text[p] in (sp_o, nl_o)): # removed character hint b[-1].append((0,p)) p += 1 continue # force any char wrap b.append([(sc,p,pos)]) p = pos return b ###################################### # default layout object to use default_layout = StandardTextLayout() ###################################### class LayoutSegment: def __init__(self, seg): """Create object from line layout segment structure""" assert type(seg) == tuple, repr(seg) assert len(seg) in (2,3), repr(seg) self.sc, self.offs = seg[:2] assert type(self.sc) == int, repr(self.sc) if len(seg)==3: assert type(self.offs) == int, repr(self.offs) assert self.sc > 0, repr(seg) t = seg[2] if type(t) == bytes: self.text = t self.end = None else: assert type(t) == int, repr(t) self.text = None self.end = t else: assert len(seg) == 2, repr(seg) if self.offs is not None: assert self.sc >= 0, repr(seg) assert type(self.offs)==int self.text = self.end = None def subseg(self, text, start, end): """ Return a "sub-segment" list containing segment structures that make up a portion of this segment. A list is returned to handle cases where wide characters need to be replaced with a space character at either edge so two or three segments will be returned. """ if start < 0: start = 0 if end > self.sc: end = self.sc if start >= end: return [] # completely gone if self.text: # use text stored in segment (self.text) spos, epos, pad_left, pad_right = calc_trim_text( self.text, 0, len(self.text), start, end ) return [ (end-start, self.offs, bytes().ljust(pad_left) + self.text[spos:epos] + bytes().ljust(pad_right)) ] elif self.end: # use text passed as parameter (text) spos, epos, pad_left, pad_right = calc_trim_text( text, self.offs, self.end, start, end ) l = [] if pad_left: l.append((1,spos-1)) l.append((end-start-pad_left-pad_right, spos, epos)) if pad_right: l.append((1,epos)) return l else: # simple padding adjustment return [(end-start,self.offs)] def line_width( segs ): """ Return the screen column width of one line of a text layout structure. This function ignores any existing shift applied to the line, represented by an (amount, None) tuple at the start of the line. """ sc = 0 seglist = segs if segs and len(segs[0])==2 and segs[0][1]==None: seglist = segs[1:] for s in seglist: sc += s[0] return sc def shift_line( segs, amount ): """ Return a shifted line from a layout structure to the left or right. segs -- line of a layout structure amount -- screen columns to shift right (+ve) or left (-ve) """ assert type(amount)==int, repr(amount) if segs and len(segs[0])==2 and segs[0][1]==None: # existing shift amount += segs[0][0] if amount: return [(amount,None)]+segs[1:] return segs[1:] if amount: return [(amount,None)]+segs return segs def trim_line( segs, text, start, end ): """ Return a trimmed line of a text layout structure. text -- text to which this layout structure applies start -- starting screen column end -- ending screen column """ l = [] x = 0 for seg in segs: sc = seg[0] if start or sc < 0: if start >= sc: start -= sc x += sc continue s = LayoutSegment(seg) if x+sc >= end: # can all be done at once return s.subseg( text, start, end-x ) l += s.subseg( text, start, sc ) start = 0 x += sc continue if x >= end: break if x+sc > end: s = LayoutSegment(seg) l += s.subseg( text, 0, end-x ) break l.append( seg ) return l def calc_line_pos( text, line_layout, pref_col ): """ Calculate the closest linear position to pref_col given a line layout structure. Returns None if no position found. """ closest_sc = None closest_pos = None current_sc = 0 if pref_col == 'left': for seg in line_layout: s = LayoutSegment(seg) if s.offs is not None: return s.offs return elif pref_col == 'right': for seg in line_layout: s = LayoutSegment(seg) if s.offs is not None: closest_pos = s s = closest_pos if s is None: return if s.end is None: return s.offs return calc_text_pos( text, s.offs, s.end, s.sc-1)[0] for seg in line_layout: s = LayoutSegment(seg) if s.offs is not None: if s.end is not None: if (current_sc <= pref_col and pref_col < current_sc + s.sc): # exact match within this segment return calc_text_pos( text, s.offs, s.end, pref_col - current_sc )[0] elif current_sc <= pref_col: closest_sc = current_sc + s.sc - 1 closest_pos = s if closest_sc is None or ( abs(pref_col-current_sc) < abs(pref_col-closest_sc) ): # this screen column is closer closest_sc = current_sc closest_pos = s.offs if current_sc > closest_sc: # we're moving past break current_sc += s.sc if closest_pos is None or type(closest_pos) == int: return closest_pos # return the last positions in the segment "closest_pos" s = closest_pos return calc_text_pos( text, s.offs, s.end, s.sc-1)[0] def calc_pos( text, layout, pref_col, row ): """ Calculate the closest linear position to pref_col and row given a layout structure. """ if row < 0 or row >= len(layout): raise Exception("calculate_pos: out of layout row range") pos = calc_line_pos( text, layout[row], pref_col ) if pos is not None: return pos rows_above = range(row-1,-1,-1) rows_below = range(row+1,len(layout)) while rows_above and rows_below: if rows_above: r = rows_above.pop(0) pos = calc_line_pos(text, layout[r], pref_col) if pos is not None: return pos if rows_below: r = rows_below.pop(0) pos = calc_line_pos(text, layout[r], pref_col) if pos is not None: return pos return 0 def calc_coords( text, layout, pos, clamp=1 ): """ Calculate the coordinates closest to position pos in text with layout. text -- raw string or unicode string layout -- layout structure applied to text pos -- integer position into text clamp -- ignored right now """ closest = None y = 0 for line_layout in layout: x = 0 for seg in line_layout: s = LayoutSegment(seg) if s.offs is None: x += s.sc continue if s.offs == pos: return x,y if s.end is not None and s.offs<=pos and s.end>pos: x += calc_width( text, s.offs, pos ) return x,y distance = abs(s.offs - pos) if s.end is not None and s.end<pos: distance = pos - (s.end-1) if closest is None or distance < closest[0]: closest = distance, (x,y) x += s.sc y += 1 if closest: return closest[1] return 0,0
lgpl-2.1
jlaura/pysal
pysal/contrib/network/test_kfuncs.py
20
1334
"""network unittest""" import unittest import network as pynet import kfuncs class Kfuncs_Tester(unittest.TestCase): def setUp(self): self.distances = {1:[1,2,3,4],2:[1,1,2,3],3:[2,1,1,2], 4:[3,2,1,1],5:[4,3,2,1]} def test__fxrange(self): values = kfuncs._fxrange(0.0,1.0,0.2) for v1, v2 in zip(values, [0.0,0.2,0.4,0.6,0.8,1.0]): self.assertAlmostEqual(v1, v2) def test__binary_search(self): v = kfuncs._binary_search([0.0,0.2,0.4,0.6,0.8,1.0],0.9) self.assertEqual(v, 5) def test_kt_values(self): expected_values = {1: {0.5: 0, 1.5: 10, 2.5: 20}, 2: {0.5: 0, 1.5: 20, 2.5: 30}, 3: {0.5: 0, 1.5: 20, 2.5: 40}, 4: {0.5: 0, 1.5: 20, 2.5: 30}, 5: {0.5: 0, 1.5: 10, 2.5: 20}} kfunc_values = {} for k, v in self.distances.items(): kfunc_values[k] = kfuncs.kt_values((0.5,3.5,1.0),v,10) self.assertEqual(kfunc_values, expected_values) suite = unittest.TestSuite() test_classes = [Kfuncs_Tester] for i in test_classes: a = unittest.TestLoader().loadTestsFromTestCase(i) suite.addTest(a) if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite)
bsd-3-clause
imanolarrieta/RL
rlpy/Domains/HelicopterHover.py
4
16981
"""Helicopter hovering task.""" from .Domain import Domain import numpy as np import rlpy.Tools.transformations as trans from rlpy.Tools.GeneralTools import cartesian import matplotlib.pyplot as plt from matplotlib.patches import FancyArrowPatch, Circle, Ellipse from mpl_toolkits.mplot3d import proj3d __copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy" __credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann", "William Dabney", "Jonathan P. How"] __license__ = "BSD 3-Clause" __author__ = "Christoph Dann <cdann@cdann.de>" class Arrow3D(FancyArrowPatch): """ Helper class for plotting arrows in 3d """ def __init__(self, xs, ys, zs, *args, **kwargs): FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs) self._verts3d = xs, ys, zs def draw(self, renderer): xs3d, ys3d, zs3d = self._verts3d xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M) self.set_positions((xs[0], ys[0]), (xs[1], ys[1])) FancyArrowPatch.draw(self, renderer) class HelicopterHoverExtended(Domain): """ Implementation of a simulator that models one of the Stanford autonomous helicopters (an XCell Tempest helicopter) in the flight regime close to hover. Adapted from the `RL-Community Java Implementation <http://library.rl-community.org/wiki/Helicopter_(Java)>`_ **STATE:** The state of the helicopter is described by a 20-dimensional vector with the following entries: * 0: xerr [helicopter x-coord position - desired x-coord position] -- helicopter's x-axis points forward * 1: yerr [helicopter y-coord position - desired y-coord position] -- helicopter's y-axis points to the right * 2: zerr [helicopter z-coord position - desired z-coord position] -- helicopter's z-axis points down * 3: u [forward velocity] * 4: v [sideways velocity (to the right)] * 5: w [downward velocity] * 6: p [angular rate around helicopter's x axis] * 7: q [angular rate around helicopter's y axis] * 8: r [angular rate around helicopter's z axis] * 9-12: orientation of heli in world as quaterion * 13-18: current noise due to gusts (usually not observable!) * 19: t number of timesteps in current episode **REFERENCE:** .. seealso:: Abbeel, P., Ganapathi, V. & Ng, A. Learning vehicular dynamics, with application to modeling helicopters. Advances in Neural Information Systems (2006). """ MAX_POS = 20. #: [m] maximum deviation in position in each dimension MAX_VEL = 10. #: [m/s] maximum velocity in each dimension MAX_ANG_RATE = 4 * np.pi # : maximum angular velocity MAX_ANG = 1. WIND_MAX = 5. # : maximum gust indensity MIN_QW_BEFORE_HITTING_TERMINAL_STATE = np.cos(30. / 2. * np.pi / 180.) wind = np.array([.0, .0, 0.]) #: wind in neutral orientation discount_factor = 0.95 #: discount factor gust_memory = 0.8 domain_fig = None episodeCap = 6000 # model specific parameters from the learned model noise_std = np.array([0.1941, 0.2975, 0.6058, 0.1508, 0.2492, 0.0734]) drag_vel_body = np.array([.18, .43, .49]) drag_ang_rate = np.array([12.78, 10.12, 8.16]) u_coeffs = np.array([33.04, -33.32, 70.54, -42.15]) tail_rotor_side_thrust = -0.54 dt = 0.01 #: length of one timestep continuous_dims = np.arange(20) statespace_limits_full = np.array([[-MAX_POS, MAX_POS]] * 3 + [[-MAX_VEL, MAX_VEL]] * 3 + [[-MAX_ANG_RATE, MAX_ANG_RATE]] * 3 + [[-MAX_ANG, MAX_ANG]] * 4 + [[-2., 2.]] * 6 + [[0, episodeCap]]) statespace_limits = statespace_limits_full # create all combinations of possible actions _action_bounds = np.array([[-2., 2.]] * 4) # maximum action: 2 _actions_dim = np.array( [[-.2, -0.05, 0.05, 0.2]] * 3 + [[0., 0.15, 0.3, 0.5]]) actions = cartesian(list(_actions_dim)) #: all possible actions actions_num = np.prod(actions.shape[0]) def __init__(self, noise_level=1., discount_factor=0.95): self.noise_level = noise_level self.discount_factor = discount_factor super(HelicopterHoverExtended, self).__init__() def s0(self): self.state = np.zeros((20)) self.state[9] = 1. return self.state.copy(), self.isTerminal(), self.possibleActions() def isTerminal(self): s = self.state if np.any(self.statespace_limits_full[:9, 0] > s[:9]) or np.any(self.statespace_limits_full[:9, 1] < s[:9]): return True if len(s) <= 12: w = np.sqrt(1. - np.sum(s[9:12] ** 2)) else: w = s[9] return np.abs(w) < self.MIN_QW_BEFORE_HITTING_TERMINAL_STATE def _get_reward(self): s = self.state if self.isTerminal(): r = -np.sum(self.statespace_limits[:9, 1] ** 2) #r -= np.sum(self.statespace_limits[10:12, 1] ** 2) r -= (1. - self.MIN_QW_BEFORE_HITTING_TERMINAL_STATE ** 2) return r * (self.episodeCap - s[-1]) else: return -np.sum(s[:9] ** 2) - np.sum(s[10:12] ** 2) def possibleActions(self, s=None): return np.arange(self.actions_num) def step(self, a): a = self.actions[a] # make sure the actions are not beyond their limits a = np.maximum(self._action_bounds[:, 0], np.minimum(a, self._action_bounds[:, 1])) pos, vel, ang_rate, ori_bases, q = self._state_in_world(self.state) t = self.state[-1] gust_noise = self.state[13:19] gust_noise = (self.gust_memory * gust_noise + (1. - self.gust_memory) * self.random_state.randn(6) * self.noise_level * self.noise_std) # update noise which simulates gusts for i in range(10): # Euler integration # position pos += self.dt * vel # compute acceleration on the helicopter vel_body = self._in_world_coord(vel, q) wind_body = self._in_world_coord(self.wind, q) wind_body[-1] = 0. # the java implementation # has it this way acc_body = -self.drag_vel_body * (vel_body + wind_body) acc_body[-1] += self.u_coeffs[-1] * a[-1] acc_body[1] += self.tail_rotor_side_thrust acc_body += gust_noise[:3] acc = self._in_body_coord(acc_body, q) acc[-1] += 9.81 # gravity # velocity vel += self.dt * acc # orientation tmp = self.dt * ang_rate qdt = trans.quaternion_about_axis(np.linalg.norm(tmp), tmp) q = trans.quaternion_multiply(q, qdt) #assert np.allclose(1., np.sum(q**2)) # angular accelerations ang_acc = -ang_rate * self.drag_ang_rate + \ self.u_coeffs[:3] * a[:3] ang_acc += gust_noise[3:] ang_rate += self.dt * ang_acc st = np.zeros_like(self.state) st[:3] = -self._in_body_coord(pos, q) st[3:6] = self._in_body_coord(vel, q) st[6:9] = ang_rate st[9:13] = q st[13:19] = gust_noise st[-1] = t + 1 self.state = st.copy() return ( self._get_reward(), st, self.isTerminal(), self.possibleActions() ) def _state_in_world(self, s): """ transforms state from body coordinates in world coordinates .. warning:: angular rate still in body frame! """ pos_body = s[:3] vel_body = s[3:6] ang_rate = s[6:9].copy() q = s[9:13].copy() pos = self._in_world_coord(-pos_body, q) vel = self._in_world_coord(vel_body, q) rot = trans.quaternion_matrix(trans.quaternion_conjugate(q))[:3, :3] return pos, vel, ang_rate, rot, q def _in_body_coord(self, p, q): """ q is the inverse quaternion of the rotation of the helicopter in world coordinates """ q_pos = np.zeros((4)) q_pos[1:] = p q_p = trans.quaternion_multiply(trans.quaternion_multiply(q, q_pos), trans.quaternion_conjugate(q)) return q_p[1:] def _in_world_coord(self, p, q): """ q is the inverse quaternion of the rotation of the helicopter in world coordinates """ return self._in_body_coord(p, trans.quaternion_conjugate(q)) def showDomain(self, a=None): s = self.state if a is not None: a = self.actions[a].copy() * 3 # amplify for visualization pos, vel, ang_rate, ori_bases, _ = self._state_in_world(s) coords = np.zeros((3, 3, 2)) + pos[None, :, None] coords[:, :, 1] += ori_bases * 4 u, v = np.mgrid[0:2 * np.pi:10j, 0:2:1.] # rotor coordinates coord = np.zeros([3] + list(u.shape)) coord[0] = .1 * np.sin(u) * v coord[1] = 0. coord[2] = .1 * np.cos(u) * v coord[0] -= 0.8 coord_side = np.einsum("ij,jkl->ikl", np.linalg.pinv(ori_bases), coord) coord_side += pos[:, None, None] coord = np.zeros([3] + list(u.shape)) coord[0] = .6 * np.cos(u) * v coord[1] = .6 * np.sin(u) * v coord[2] = -.4 coord_main = np.einsum("ij,jkl->ikl", np.linalg.pinv(ori_bases), coord) coord_main += pos[:, None, None] style = dict(fc="r", ec="r", lw=2., head_width=0.05, head_length=0.1) if self.domain_fig is None: self.domain_fig = plt.figure(figsize=(12, 8)) # action axes ax1 = plt.subplot2grid((1, 3), (0, 0), frameon=False) ax1.get_xaxis().set_visible(False) ax1.get_yaxis().set_visible(False) lim = 2 # self.MAX_POS ax1.set_xlim(-lim, lim) ax1.set_ylim(-lim, lim) if a is None: a = np.zeros((4)) # main rotor ax1.add_artist(Circle(np.zeros((2)), radius=0.6)) ax1.add_artist(Ellipse(np.array([0, 1.5]), height=0.3, width=0.02)) # TODO make sure the actions are plotted right # main rotor direction? arr1 = ax1.arrow(0, 0, a[0], 0, **style) arr2 = ax1.arrow(0, 0, 0, a[1], **style) # side rotor throttle? arr3 = ax1.arrow(0, 1.5, a[2], 0, **style) # main rotor throttle arr4 = ax1.arrow(1.5, 0, 0, a[3], **style) ax1.set_aspect("equal") self.action_arrows = (arr1, arr2, arr3, arr4) self.action_ax = ax1 #ax = self.domain_fig.gca(projection='3d') ax = plt.subplot2grid((1, 3), (0, 1), colspan=2, projection='3d') ax.view_init(elev=np.pi) # print origin x = Arrow3D([0, 2], [0, 0], [0, 0], mutation_scale=30, lw=1, arrowstyle="-|>", color="r") y = Arrow3D([0, 0], [0, 2], [0, 0], mutation_scale=30, lw=1, arrowstyle="-|>", color="b") z = Arrow3D([0, 0], [0, 0], [0, 2], mutation_scale=30, lw=1, arrowstyle="-|>", color="g") ax.add_artist(x) ax.add_artist(y) ax.add_artist(z) # print helicopter coordinate axes x = Arrow3D(*coords[0], mutation_scale=30, lw=2, arrowstyle="-|>", color="r") y = Arrow3D(*coords[1], mutation_scale=30, lw=2, arrowstyle="-|>", color="b") z = Arrow3D(*coords[2], mutation_scale=30, lw=2, arrowstyle="-|>", color="g") ax.add_artist(x) ax.add_artist(y) ax.add_artist(z) self.heli_arrows = (x, y, z) self._wframe_main = ax.plot_wireframe(coord_main[0], coord_main[1], coord_main[2], color="k") self._wframe_side = ax.plot_wireframe(coord_side[0], coord_side[1], coord_side[2], color="k") self._ax = ax ax.set_aspect("equal") lim = 5 # self.MAX_POS ax.set_xlim(-lim, lim) ax.set_ylim(-lim, lim) ax.set_zlim(-lim, lim) ax.view_init(elev=-135) plt.show() else: self.heli_arrows[0]._verts3d = tuple(coords[0]) self.heli_arrows[1]._verts3d = tuple(coords[1]) self.heli_arrows[2]._verts3d = tuple(coords[2]) ax = self._ax ax.collections.remove(self._wframe_main) ax.collections.remove(self._wframe_side) for arr in self.action_arrows: self.action_ax.artists.remove(arr) ax1 = self.action_ax # TODO make sure the actions are plotted right # main rotor direction? arr1 = ax1.arrow(0, 0, a[0], 0, **style) arr2 = ax1.arrow(0, 0, 0, a[1], **style) # side rotor throttle? arr3 = ax1.arrow(0, 1.5, a[2], 0, **style) # main rotor throttle arr4 = ax1.arrow(1.5, 0, 0, a[3], **style) self.action_arrows = (arr1, arr2, arr3, arr4) self._wframe_main = ax.plot_wireframe(coord_main[0], coord_main[1], coord_main[2], color="k") self._wframe_side = ax.plot_wireframe(coord_side[0], coord_side[1], coord_side[2], color="k") ax.set_aspect("equal") lim = 5 # self.MAX_POS ax.set_xlim(-lim, lim) ax.set_ylim(-lim, lim) ax.set_zlim(-lim, lim) ax.view_init(elev=-135) self.domain_fig.canvas.draw() class HelicopterHover(HelicopterHoverExtended): """ .. warning:: This domain has an internal hidden state, as it actually is a POMDP. Besides the 12-dimensional observable state, there is an internal state saved as ``self.hidden_state_`` (time and long-term noise which simulated gusts of wind). be aware of this state if you use this class to produce samples which are not in order Implementation of a simulator that models one of the Stanford autonomous helicopters (an XCell Tempest helicopter) in the flight regime close to hover. Adapted from the `RL-Community Java Implementation <http://library.rl-community.org/wiki/Helicopter_(Java)>`_ **STATE:** The state of the helicopter is described by a 12-dimensional vector with the following entries: * 0: xerr [helicopter x-coord position - desired x-coord position] -- helicopter's x-axis points forward * 1: yerr [helicopter y-coord position - desired y-coord position] -- helicopter's y-axis points to the right * 2: zerr [helicopter z-coord position - desired z-coord position] -- helicopter's z-axis points down * 3: u [forward velocity] * 4: v [sideways velocity (to the right)] * 5: w [downward velocity] * 6: p [angular rate around helicopter's x axis] * 7: q [angular rate around helicopter's y axis] * 8: r [angular rate around helicopter's z axis] * 9-11: orientation of the world in the heli system as quaterion **REFERENCE:** .. seealso:: Abbeel, P., Ganapathi, V. & Ng, A. Learning vehicular dynamics, with application to modeling helicopters. Advances in Neural Information Systems (2006). """ episodeCap = 6000 MAX_POS = 20. # m MAX_VEL = 10. # m/s MAX_ANG_RATE = 4 * np.pi MAX_ANG = 1. WIND_MAX = 5. continuous_dims = np.arange(12) statespace_limits = np.array([[-MAX_POS, MAX_POS]] * 3 + [[-MAX_VEL, MAX_VEL]] * 3 + [[-MAX_ANG_RATE, MAX_ANG_RATE]] * 3 + [[-MAX_ANG, MAX_ANG]] * 3) #full_state_ = np.zeros((20)) def s0(self): #self.hidden_state_ = np.zeros((8)) #self.hidden_state_[0] = 1. s_full, term, p_actions = super(HelicopterHover, self).s0() s, _ = self._split_state(s_full) return s, term, p_actions def _split_state(self, s): s_observable = np.zeros((12)) s_observable[:9] = s[:9] s_observable[9:12] = s[10:13] s_hidden = np.zeros((8)) s_hidden[0] = s[9] s_hidden[1:] = s[13:] return s_observable, s_hidden def step(self, a): #s_extended = self._augment_state(s) r, st, term, p_actions = super(HelicopterHover, self).step(a) st, _ = self._split_state(st) return (r, st, term, p_actions)
bsd-3-clause