text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import print_function
def task(*fn, **kwargs):
# decorator without parameters
if fn:
function = fn[0]
function.task_metadata = {}
return function
# decorator with parameters
def wrap(function):
function.task_metadata = kwargs
return function
return wrap
@task
def simple():
print("thats all folks")
@task(output=['my_input.txt'])
def pre(to_create):
with open(to_create[0], 'w') as fp:
fp.write('foo')
@task(output=['out1.txt', 'out2.txt'])
def create(to_be_created):
print("I should create these files: %s" % " ".join(to_be_created))
@task(input=['my_input.txt'], output=['my_output_result.txt'])
def process(in_, out_):
print("processing %s" % in_[0])
print("creating %s" % out_[0])
|
{
"content_hash": "43b988278e330dbd26e6fe6721978b98",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 70,
"avg_line_length": 21.62162162162162,
"alnum_prop": 0.6075,
"repo_name": "wangpanjun/doit",
"id": "205fb81e16489df426fa554f160a8064a546e79b",
"size": "800",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/tutorial/my_tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "491553"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import talib
from pyalgotrade import strategy, plotter
from pyalgotrade.broker.backtesting import TradePercentage, Broker
from pyalgotrade.broker import Order
from pyalgotrade.barfeed import yahoofeed
from pyalgotrade.broker.slippage import NoSlippage, VolumeShareSlippage
from pyalgotrade.stratanalyzer import returns, trades
from pyalgotrade.talibext import indicator
from pyalgotrade.optimizer import server, local
import itertools
from sklearn import preprocessing, svm, cross_validation, metrics, pipeline, grid_search
from scipy.stats import sem
from DaysDataPrepare import readWSDFile, readWSDIndexFile, prepareData, optimizeSVM
def readAndReWriteCSV(baseDir, instrument, startYear, yearNum=1):
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d').date()
df = 0
for i in range(yearNum):
tempDF = pd.read_csv(baseDir + instrument + '/wsd_' + instrument + '_' + str(startYear + i) + '.csv',
index_col=0, sep='\t', usecols=[0, 2, 3, 4, 5, 6, 14], header=None,
skiprows=1, names=['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Close'],
parse_dates=True, date_parser=dateparse)
if i == 0:
df = tempDF
else:
df = df.append(tempDF)
pathName = None
resultDF = None
if yearNum==1:
pathName = baseDir+str(instrument)+'_'+str(startYear)+'.csv'
resultDF = df[str(startYear)]
else:
pathName = baseDir+str(instrument)+'_'+str(startYear)+'_'+str(startYear+yearNum-1)+'.csv'
resultDF = df[str(startYear):str(startYear+yearNum-1)]
resultDF.to_csv(pathName)
return pathName, resultDF
'''
计算收益率
'''
def returnRatio(V, C=100000.0):
return V/C-1.0
'''
计算收益率(多期)
'''
def returnRatioArr1(VArr, C=100000.0):
arr = []
for v in VArr: arr.append(v/C-1.0)
return arr
def returnRatioArr(VArr, C=100000.0):
arr = []
for v in VArr:
arr.append(v / C - 1.0)
C = v
return arr
'''
计算年化收益率(多期)
'''
def annualizedReturnRatio(returnRatioArr, T=250.0, D=250.0):
import math
tmp = 1
for r in returnRatioArr: tmp *= (r+1)
return math.pow(tmp, D/T)-1
'''
计算年化收益率(单期)
'''
def annualizedReturnRatioSingle(portfolio, C=100000.0, T=250.0, D=250.0):
import math
return math.pow(portfolio/C, D/T) - 1
baseDir = '/Users/eugene/Downloads/Data/'
# baseDir = '/Users/eugene/Downloads/marketQuotationData/'
# 沪深300 上证50 中证500
instruments = ['000300.SH', '000016.SH', '000905.SH', '002047.SZ', '600015.SH', '600674.SH', '000566.SZ']
instrument = instruments[6]
initCapital = 10000 #100000000.0 # 一亿
startYear = 2015; yearNum = 2
# startYear = 2014; yearNum = 3
# startYear = 2014; yearNum = 2
winK = 15
df = readWSDFile(baseDir, instrument, startYear, yearNum)
print 'Day count:', len(df)
# print df.head(5)
dfi = readWSDIndexFile(baseDir, instrument, startYear, yearNum)
X, y, actionDates = prepareData(df, dfi, win=winK)
print np.shape(X), np.shape(actionDates), np.shape(y); print y
normalizer = preprocessing.Normalizer().fit(X) # fit does nothing
X_norm = normalizer.transform(X)
gamma, C, score = optimizeSVM(X_norm, y, kFolds=10); print 'gamma=',gamma, 'C=',C, 'score=',score
clf = svm.SVC(kernel='rbf', gamma=gamma, C=C)
# clf = svm.SVC(kernel='rbf', gamma=0.125, C=0.125)
# clf = svm.SVC(kernel='rbf', gamma=512, C=32768)
# clf = svm.SVC(kernel='rbf', gamma=2048, C=32768)
# clf = svm.SVC(kernel='rbf', gamma=2048, C=32768)
# clf = svm.SVC(kernel='rbf', gamma=0.125, C=0.125)
# clf = svm.SVC(kernel='rbf', gamma=0.125, C=0.125)
pathName, df = readAndReWriteCSV(baseDir, instrument, startYear=startYear, yearNum=yearNum)
print pathName
# print df.sample(3)
feed = yahoofeed.Feed()
feed.addBarsFromCSV(instrument, pathName)
class SVMStrategy(strategy.BacktestingStrategy):
def __init__(self, feed, win=10):
super(SVMStrategy, self).__init__(feed)
self.__instrument = instrument
self.__position = None
self.getBroker().setCash(initCapital)
self.getBroker().setCommission(TradePercentage(0.003))
self.getBroker().setAllowNegativeCash(True)
self.getBroker().getFillStrategy().setVolumeLimit(1)
self.getBroker().getFillStrategy().setSlippageModel(VolumeShareSlippage(priceImpact=0.0))
self.__closeDataSeries = feed[instrument].getCloseDataSeries()
self.df = df
self.closeArr = []
self.portfolios = []
self.buys = []
self.sells = []
self.clf = clf
self.X_norm = X_norm
self.y = y
self.actionDates = actionDates
self.predictions = []
self.win = win
# print 'week count:', len(y)
self.segmentCount = 1
self.dayCount = 0
self.errorCount = 0
self.rightCount = 0
def getDF(self):
return self.df
def getBuys(self):
return self.buys
def getSells(self):
return self.sells
def getCorrectness(self):
return self.rightCount*1.0/(self.errorCount+self.rightCount)
def getPredictions(self):
return self.predictions
def onEnterOk(self, position):
# execInfo = position.getEntryOrder().getExecutionInfo()
# self.info("%s BUY %.0f shares at %.3f, commission=%.3f, PnL=%.3f" %
# (execInfo.getDateTime().date(), execInfo.getQuantity(), execInfo.getPrice(), execInfo.getCommission(), position.getPnL()))
pass
def onEnterCanceled(self, position):
self.__position = None
def onExitOk(self, position):
# execInfo = position.getExitOrder().getExecutionInfo()
# self.info("%s SELL %.0f shares at %.3f, commission=%.3f, PnL=%.3f" %
# (execInfo.getDateTime().date(), execInfo.getQuantity(), execInfo.getPrice(), execInfo.getCommission(), position.getPnL()))
self.__position = None
def onExitCanceled(self, position):
# If the exit was canceled, re-submit it.
self.__position.exitMarket()
def onStart(self):
pass
def onFinish(self, bars):
self.df['closeArr'] = self.closeArr
self.df['portfolio'] = self.portfolios
# print 'dayCount=',self.dayCount, 'weekCount=',self.weekCount-1
# print 'errorCount=',self.errorCount, 'rightCount=',self.rightCount
pass
def onOrderUpdated(self, order):
execInfo = order.getExecutionInfo()
fillDate = None
if execInfo!=None:
fillDate = execInfo.getDateTime().date()
if order.getAction()==1: self.buys.append(fillDate)
else: self.sells.append(fillDate)
# print 'id=',order.getId(), 'state=',Order.State.toString(order.getState()), 'type=',order.getType(), \
# 'submitAt=',order.getSubmitDateTime().date(), 'fillAt=',fillDate, \
# 'action=',order.getAction(), 'state=',order.getState(), 'active=',order.isActive(), \
# 'quantity=',order.getQuantity(), 'Positions=',self.getBroker().getPositions(), \
# 'cash=', self.getBroker().getCash()
def onBars(self, bars):
self.closeArr.append(bars[self.__instrument].getPrice())
self.portfolios.append(self.getBroker().getEquity())
self.dayCount += 1
curDate = bars[self.__instrument].getDateTime().date()
if curDate!=self.actionDates[self.segmentCount-1]: # 非区间最后一天
return
else: # 区间最后一天
if self.segmentCount < self.win+1:
self.segmentCount += 1
return
else:
X_train = self.X_norm[self.segmentCount - self.win - 1:self.segmentCount - 1]
y_train = self.y[self.segmentCount - self.win - 1:self.segmentCount - 1]
X_test = self.X_norm[self.segmentCount - 1]
y_test = self.y[self.segmentCount - 1]
self.clf.fit(X_train, y_train)
result = self.clf.predict([X_test])[0] # 为-1表示跌,为1表示涨
self.predictions.append(result)
if result!=y_test: self.errorCount += 1 # 分类错误
else: self.rightCount += 1 # 分类正确
# If a position was not opened, check if we should enter a long position.
if self.__position is None:
if result==1:
shares = int(self.getBroker().getCash() / bars[self.__instrument].getPrice())
hands = shares/100
# Enter a buy market order. The order is good till canceled.
self.__position = self.enterLong(self.__instrument, hands*100, False)
# Check if we have to exit the position.
elif not self.__position.exitActive() and result==-1:
self.__position.exitMarket()
self.segmentCount += 1
pass
def parameters_generator():
win = range(13, 23)
return itertools.product(win)
def testWithBestParameters(win=10):
# 用最佳参数回测
myStrategy = SVMStrategy(feed, win=win)
returnsAnalyzer = returns.Returns()
myStrategy.attachAnalyzer(returnsAnalyzer)
tradesAnalyzer = trades.Trades()
myStrategy.attachAnalyzer(tradesAnalyzer)
myStrategy.run()
df = myStrategy.getDF()
# print df[['Close', 'closeArr', 'fastSMA', 'slowSMA']].sample(5)
buys = myStrategy.getBuys()
sells = myStrategy.getSells()
print myStrategy.getPredictions(); print actionDates
# print 'TRADE INFO: ', 'count=',tradesAnalyzer.getCount(), 'allProfits=',tradesAnalyzer.getAll(), 'allReturns=',tradesAnalyzer.getAllReturns()
print "Accuracy: %.3f" % myStrategy.getCorrectness()
print "总净值: %.3f" % myStrategy.getResult()
print "总收益率: %.3f" % returnRatio(myStrategy.getResult(), C=initCapital)
print "年化收益率: %.3f" % annualizedReturnRatioSingle(myStrategy.getResult(), C=initCapital, T=250.0*yearNum, D=250.0)
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(211)
df[['closeArr']].plot(ax=ax1, lw=2.)
ax1.plot(buys, df.closeArr.ix[buys], '^', markersize=10, color='m')
ax1.plot(sells, df.closeArr.ix[sells], 'v', markersize=10, color='k')
ax2 = fig.add_subplot(212)
portfolio_ratio = df['portfolio']/initCapital
portfolio_ratio.plot(ax=ax2, lw=2.)
ax2.plot(buys, portfolio_ratio.ix[buys], '^', markersize=10, color='m')
ax2.plot(sells, portfolio_ratio.ix[sells], 'v', markersize=10, color='k')
# ax3 = fig.add_subplot(313)
# df['portfolio'].plot(ax=ax3, lw=2.)
# ax3.plot(buys, df['portfolio'].ix[buys], '^', markersize=10, color='m')
# ax3.plot(sells, df['portfolio'].ix[sells], 'v', markersize=10, color='k')
fig.tight_layout()
plt.show()
def test(isOptimize=True, win=9):
if isOptimize: # 寻找最佳参数
results = local.run(SVMStrategy, feed, parameters_generator())
print 'Parameters:', results.getParameters(), 'Result:', results.getResult()
print results.getParameters()[0]
else: # 用最佳参数回测
testWithBestParameters(win=win)
test(isOptimize=False, win=22)
|
{
"content_hash": "c00e3eb98b84a7bd5d43b518aa9faf16",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 147,
"avg_line_length": 37.35906040268456,
"alnum_prop": 0.6288511632084793,
"repo_name": "Ernestyj/PyStudy",
"id": "979517250bd22ade8dc76dab3e76886788290d70",
"size": "11380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "finance/DaysTest/TestingSVM.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "17511"
},
{
"name": "HTML",
"bytes": "192730"
},
{
"name": "Python",
"bytes": "333151"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
import os
version = '0.3.2'
setup(name='django-moderation',
version=version,
description="Generic Django objects moderation application",
long_description=open("README.rst").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
keywords='django moderation models',
author='Dominik Szopa',
author_email='dszopa@gmail.com',
url='http://github.com/dominno/django-moderation',
license='BSD',
packages = find_packages('src'),
package_dir = {'': 'src'},
include_package_data=True,
install_requires=[
'setuptools',
],
zip_safe=False,
)
|
{
"content_hash": "53b6f6e9198e3de6b14013246cd4ba4f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 31.636363636363637,
"alnum_prop": 0.5823754789272031,
"repo_name": "ebrelsford/django-moderation",
"id": "ac21d6f0f3d317ec23c9a7155dc4ae7490394b63",
"size": "1044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "150827"
},
{
"name": "Shell",
"bytes": "425"
}
],
"symlink_target": ""
}
|
'''OpenGL extension EXT.shader_integer_mix
This module customises the behaviour of the
OpenGL.raw.GL.EXT.shader_integer_mix to provide a more
Python-friendly API
Overview (from the spec)
GLSL 1.30 (and GLSL ES 3.00) expanded the mix() built-in function to
operate on a boolean third argument that does not interpolate but
selects. This extension extends mix() to select between int, uint,
and bool components.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/shader_integer_mix.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.shader_integer_mix import *
from OpenGL.raw.GL.EXT.shader_integer_mix import _EXTENSION_NAME
def glInitShaderIntegerMixEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
{
"content_hash": "63d48813965e85266b8c2de0112c6803",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 71,
"avg_line_length": 34.46666666666667,
"alnum_prop": 0.7823984526112185,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "c73a95cb3b41f93c436357b2ba406310170ebbd7",
"size": "1034",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GL/EXT/shader_integer_mix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
}
|
import sys
import random
from birdfish.input.midi import MidiDispatcher
from birdfish.input.osc import OSCDispatcher
from birdfish.lights import RGBLight, LightShow
from birdfish.output.lumos_network import LumosNetwork
# create a light show - manages the updating of all lights
show = LightShow()
# Create a network - in this case, universe 3
dmx3 = LumosNetwork(3)
# add the network to the show
show.networks.append(dmx3)
# create an input interface
dispatcher = MidiDispatcher("MidiKeys")
osc_dispatcher = OSCDispatcher(('0.0.0.0', 8998))
# create a single RGB light element
single = RGBLight(
start_channel=10,
name="singletestb",
attack_duration=0,
decay_duration=0,
release_duration=.75,
sustain_value=1,
)
single.hue = random.random()
single.saturation = 1
single.update_rgb()
single.bell_mode = True
oscsingle = RGBLight(
start_channel=91,
name="singletestb",
attack_duration=0,
decay_duration=0,
release_duration=.75,
sustain_value=1,
)
oscsingle.hue = random.random()
oscsingle.saturation = 1
oscsingle.update_rgb()
# add the light to a network
show.add_element(single, network=dmx3)
show.add_element(oscsingle, network=dmx3)
# set the input interface to trigger the element
# midi code 41 is the "Q" key on the qwerty keyboard for the midikeys app
dispatcher.add_observer((0, 41),single)
osc_dispatcher.add_trigger('/2/toggle2', oscsingle)
osc_dispatcher.add_map('/2/fader2', oscsingle, 'hue')
osc_dispatcher.add_map('/elements/fader1', oscsingle, 'saturation', in_range=(0,4))
# startup the midi communication - runs in its own thread
dispatcher.start()
osc_dispatcher.start()
# start the show in a try block so that we can catch ^C and stop the midi
# dispatcher thread
try:
show.run_live()
except KeyboardInterrupt:
# cleanup
dispatcher.stop()
osc_dispatcher.stop()
sys.exit(0)
|
{
"content_hash": "f19c8077b0c874c172a6fb43aa6a663a",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 83,
"avg_line_length": 25.85333333333333,
"alnum_prop": 0.7106756059824652,
"repo_name": "ptone/BirdFish",
"id": "9828cbca28bd832409b002cbf7bb5c6c22ecc868",
"size": "1939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/simpleRGB.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "176764"
},
{
"name": "Shell",
"bytes": "4515"
}
],
"symlink_target": ""
}
|
from .expectations import Expectation
from .spy import Method, Object
from .matchers import Base as MatcherBase, add
from .any import Any
def expect(actual, *args, **kwargs):
return Expectation(actual, *args, **kwargs)
def spyOn(target, methodName, **kwargs):
originalMethod = getattr(target, methodName, None)
return Method.create(target, methodName, originalMethod, **kwargs)
def createSpy(name=None, **methodsWithReturnValue):
return Object.create(name, **methodsWithReturnValue)
def stopSpying():
Method.stop()
def any(clazz):
return Any(clazz)
add(*MatcherBase.__subclasses__())
|
{
"content_hash": "ea78ca3e4f2257741a15526c79fe62dc",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 70,
"avg_line_length": 22.142857142857142,
"alnum_prop": 0.7306451612903225,
"repo_name": "endeepak/pungi",
"id": "14ff094311f125cca23526e1049c1f83b3178870",
"size": "620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pungi/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39110"
}
],
"symlink_target": ""
}
|
import gzip
import click
import os
from functools import wraps
def gzips(file_function):
@wraps(file_function)
def wrapper(*args, **kwargs):
output_file = file_function(*args, **kwargs)
if isinstance(output_file, list):
for o in output_file:
zipup(o)
else:
zipup(output_file)
return output_file
return wrapper
def zipup(file_path):
click.echo("Zipping {}".format(file_path))
path, filename = os.path.split(file_path)
zipname = "{}.gz".format(filename)
target = os.path.join(path, zipname)
with open(file_path, "rb") as p:
with gzip.open(target, "wb") as tf:
tf.write(p.read())
def unzip(path, target):
click.echo("Unzipping {}".format(path))
def chunk_gen():
with gzip.open(path, "rb") as p:
while True:
chunk = p.read(size=512 * 1024)
if not chunk:
break
yield chunk
with open(target, "wb") as tf:
with click.progressbar(iterable=chunk_gen()) as chunks:
for chunk in chunks:
tf.write(chunk)
def find(l, finder):
filtered = [n for n in l if finder(n)]
if len(filtered) == 0:
return None
else:
return filtered[0]
|
{
"content_hash": "24d5e4558bbb5ca17147595aa3b4c520",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 63,
"avg_line_length": 25.423076923076923,
"alnum_prop": 0.5514372163388804,
"repo_name": "biolink/ontobio",
"id": "0c57dea2e0ad880fce1091b0fc288c98974b1f56",
"size": "1322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ontobio/validation/tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "855912"
},
{
"name": "Makefile",
"bytes": "1827"
},
{
"name": "Python",
"bytes": "1080306"
},
{
"name": "Shell",
"bytes": "502"
}
],
"symlink_target": ""
}
|
"""
mbed SDK
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Implementation of mbed configuration mechanism
from copy import deepcopy
from collections import OrderedDict
from tools.utils import json_file_to_dict, ToolException
from tools.targets import Target
import os
# Base class for all configuration exceptions
class ConfigException(Exception):
pass
# This class keeps information about a single configuration parameter
class ConfigParameter:
# name: the name of the configuration parameter
# data: the data associated with the configuration parameter
# unit_name: the unit (target/library/application) that defines this parameter
# unit_ kind: the kind of the unit ("target", "library" or "application")
def __init__(self, name, data, unit_name, unit_kind):
self.name = self.get_full_name(name, unit_name, unit_kind, allow_prefix = False)
self.defined_by = self.get_display_name(unit_name, unit_kind)
self.set_value(data.get("value", None), unit_name, unit_kind)
self.help_text = data.get("help", None)
self.required = data.get("required", False)
self.macro_name = data.get("macro_name", "MBED_CONF_%s" % self.sanitize(self.name.upper()))
self.config_errors = []
# Return the full (prefixed) name of a parameter.
# If the parameter already has a prefix, check if it is valid
# name: the simple (unqualified) name of the parameter
# unit_name: the unit (target/library/application) that defines this parameter
# unit_kind: the kind of the unit ("target", "library" or "application")
# label: the name of the label in the 'target_config_overrides' section (optional)
# allow_prefix: True to allo the original name to have a prefix, False otherwise
@staticmethod
def get_full_name(name, unit_name, unit_kind, label = None, allow_prefix = True):
if name.find('.') == -1: # the name is not prefixed
if unit_kind == "target":
prefix = "target."
elif unit_kind == "application":
prefix = "app."
else:
prefix = unit_name + '.'
return prefix + name
# The name has a prefix, so check if it is valid
if not allow_prefix:
raise ConfigException("Invalid parameter name '%s' in '%s'" % (name, ConfigParameter.get_display_name(unit_name, unit_kind, label)))
temp = name.split(".")
# Check if the parameter syntax is correct (must be unit_name.parameter_name)
if len(temp) != 2:
raise ConfigException("Invalid parameter name '%s' in '%s'" % (name, ConfigParameter.get_display_name(unit_name, unit_kind, label)))
prefix = temp[0]
# Check if the given parameter prefix matches the expected prefix
if (unit_kind == "library" and prefix != unit_name) or (unit_kind == "target" and prefix != "target"):
raise ConfigException("Invalid prefix '%s' for parameter name '%s' in '%s'" % (prefix, name, ConfigParameter.get_display_name(unit_name, unit_kind, label)))
return name
# Return the name displayed for a unit when interogating the origin
# and the last set place of a parameter
# unit_name: the unit (target/library/application) that defines this parameter
# unit_kind: the kind of the unit ("target", "library" or "application")
# label: the name of the label in the 'target_config_overrides' section (optional)
@staticmethod
def get_display_name(unit_name, unit_kind, label = None):
if unit_kind == "target":
return "target:" + unit_name
elif unit_kind == "application":
return "application%s" % ("[%s]" % label if label else "")
else: # library
return "library:%s%s" % (unit_name, "[%s]" % label if label else "")
# "Sanitize" a name so that it is a valid C macro name
# Currently it simply replaces '.' and '-' with '_'
# name: the un-sanitized name.
@staticmethod
def sanitize(name):
return name.replace('.', '_').replace('-', '_')
# Sets a value for this parameter, remember the place where it was set.
# If the value is a boolean, it is converted to 1 (for True) or to 0 (for False).
# value: the value of the parameter
# unit_name: the unit (target/library/application) that defines this parameter
# unit_ kind: the kind of the unit ("target", "library" or "application")
# label: the name of the label in the 'target_config_overrides' section (optional)
def set_value(self, value, unit_name, unit_kind, label = None):
self.value = int(value) if isinstance(value, bool) else value
self.set_by = self.get_display_name(unit_name, unit_kind, label)
# Return the string representation of this configuration parameter
def __str__(self):
if self.value is not None:
return '%s = %s (macro name: "%s")' % (self.name, self.value, self.macro_name)
else:
return '%s has no value' % self.name
# Return a verbose description of this configuration paramater as a string
def get_verbose_description(self):
desc = "Name: %s%s\n" % (self.name, " (required parameter)" if self.required else "")
if self.help_text:
desc += " Description: %s\n" % self.help_text
desc += " Defined by: %s\n" % self.defined_by
if not self.value:
return desc + " No value set"
desc += " Macro name: %s\n" % self.macro_name
desc += " Value: %s (set by %s)" % (self.value, self.set_by)
return desc
# A representation of a configuration macro. It handles both macros without a value (MACRO)
# and with a value (MACRO=VALUE)
class ConfigMacro:
def __init__(self, name, unit_name, unit_kind):
self.name = name
self.defined_by = ConfigParameter.get_display_name(unit_name, unit_kind)
if name.find("=") != -1:
tmp = name.split("=")
if len(tmp) != 2:
raise ValueError("Invalid macro definition '%s' in '%s'" % (name, self.defined_by))
self.macro_name = tmp[0]
self.macro_value = tmp[1]
else:
self.macro_name = name
self.macro_value = None
# Representation of overrides for cumulative attributes
class ConfigCumulativeOverride:
def __init__(self, name, additions=set(), removals=set(), strict=False):
self.name = name
self.additions = set(additions)
self.removals = set(removals)
self.strict = strict
# Add attr to the cumulative override
def remove_cumulative_overrides(self, overrides):
for override in overrides:
if override in self.additions:
raise ConfigException("Configuration conflict. The %s %s both added and removed." % (self.name[:-1], override))
self.removals |= set(overrides)
# Remove attr from the cumulative overrides
def add_cumulative_overrides(self, overrides):
for override in overrides:
if (override in self.removals or (self.strict and override not in self.additions)):
raise ConfigException("Configuration conflict. The %s %s both added and removed." % (self.name[:-1], override))
self.additions |= set(overrides)
# Enable strict set of cumulative overrides for the specified attr
def strict_cumulative_overrides(self, overrides):
self.remove_cumulative_overrides(self.additions - set(overrides))
self.add_cumulative_overrides(overrides)
self.strict = True
def update_target(self, target):
setattr(target, self.name, list(
(set(getattr(target, self.name, [])) | self.additions) - self.removals))
# 'Config' implements the mbed configuration mechanism
class Config:
# Libraries and applications have different names for their configuration files
__mbed_app_config_name = "mbed_app.json"
__mbed_lib_config_name = "mbed_lib.json"
# Allowed keys in configuration dictionaries
# (targets can have any kind of keys, so this validation is not applicable to them)
__allowed_keys = {
"library": set(["name", "config", "target_overrides", "macros", "__config_path"]),
"application": set(["config", "custom_targets", "target_overrides", "macros", "__config_path"])
}
# Allowed features in configurations
__allowed_features = [
"UVISOR", "BLE", "CLIENT", "IPV4", "IPV6", "COMMON_PAL", "STORAGE"
]
# The initialization arguments for Config are:
# target: the name of the mbed target used for this configuration instance
# top_level_dirs: a list of top level source directories (where mbed_abb_config.json could be found)
# __init__ will look for the application configuration file in top_level_dirs.
# If found once, it'll parse it and check if it has a custom_targets function.
# If it does, it'll update the list of targets if need.
# If found more than once, an exception is raised
# top_level_dirs can be None (in this case, mbed_app_config.json will not be searched)
def __init__(self, target, top_level_dirs = []):
app_config_location = None
for s in (top_level_dirs or []):
full_path = os.path.join(s, self.__mbed_app_config_name)
if os.path.isfile(full_path):
if app_config_location is not None:
raise ConfigException("Duplicate '%s' file in '%s' and '%s'" % (self.__mbed_app_config_name, app_config_location, full_path))
else:
app_config_location = full_path
self.app_config_data = json_file_to_dict(app_config_location) if app_config_location else {}
# Check the keys in the application configuration data
unknown_keys = set(self.app_config_data.keys()) - self.__allowed_keys["application"]
if unknown_keys:
raise ConfigException("Unknown key(s) '%s' in %s" % (",".join(unknown_keys), self.__mbed_app_config_name))
# Update the list of targets with the ones defined in the application config, if applicable
Target.add_py_targets(self.app_config_data.get("custom_targets", {}))
self.lib_config_data = {}
# Make sure that each config is processed only once
self.processed_configs = {}
self.target = target if isinstance(target, basestring) else target.name
self.target_labels = Target.get_target(self.target).get_labels()
self.cumulative_overrides = { key: ConfigCumulativeOverride(key)
for key in Target._Target__cumulative_attributes }
self._process_config_and_overrides(self.app_config_data, {}, "app", "application")
self.target_labels = Target.get_target(self.target).get_labels()
# Add one or more configuration files
def add_config_files(self, flist):
for f in flist:
if not f.endswith(self.__mbed_lib_config_name):
continue
full_path = os.path.normpath(os.path.abspath(f))
# Check that we didn't already process this file
if self.processed_configs.has_key(full_path):
continue
self.processed_configs[full_path] = True
# Read the library configuration and add a "__full_config_path" attribute to it
cfg = json_file_to_dict(f)
cfg["__config_path"] = full_path
# If there's already a configuration for a module with the same name, exit with error
if self.lib_config_data.has_key(cfg["name"]):
raise ConfigException("Library name '%s' is not unique (defined in '%s' and '%s')" % (cfg["name"], full_path, self.lib_config_data[cfg["name"]]["__config_path"]))
self.lib_config_data[cfg["name"]] = cfg
# Helper function: process a "config_parameters" section in either a target, a library or the application
# data: a dictionary with the configuration parameters
# params: storage for the discovered configuration parameters
# unit_name: the unit (target/library/application) that defines this parameter
# unit_kind: the kind of the unit ("target", "library" or "application")
def _process_config_parameters(self, data, params, unit_name, unit_kind):
for name, v in data.items():
full_name = ConfigParameter.get_full_name(name, unit_name, unit_kind)
# If the parameter was already defined, raise an error
if full_name in params:
raise ConfigException("Parameter name '%s' defined in both '%s' and '%s'" % (name, ConfigParameter.get_display_name(unit_name, unit_kind), params[full_name].defined_by))
# Otherwise add it to the list of known parameters
# If "v" is not a dictionary, this is a shortcut definition, otherwise it is a full definition
params[full_name] = ConfigParameter(name, v if isinstance(v, dict) else {"value": v}, unit_name, unit_kind)
return params
# Helper function: process "config_parameters" and "target_config_overrides" in a given dictionary
# data: the configuration data of the library/appliation
# params: storage for the discovered configuration parameters
# unit_name: the unit (library/application) that defines this parameter
# unit_kind: the kind of the unit ("library" or "application")
def _process_config_and_overrides(self, data, params, unit_name, unit_kind):
self.config_errors = []
self._process_config_parameters(data.get("config", {}), params, unit_name, unit_kind)
for label, overrides in data.get("target_overrides", {}).items():
# If the label is defined by the target or it has the special value "*", process the overrides
if (label == '*') or (label in self.target_labels):
# Check for invalid cumulative overrides in libraries
if (unit_kind == 'library' and
any(attr.startswith('target.extra_labels') for attr in overrides.iterkeys())):
raise ConfigException("Target override '%s' in '%s' is only allowed at the application level"
% ("target.extra_labels", ConfigParameter.get_display_name(unit_name, unit_kind, label)))
# Parse out cumulative overrides
for attr, cumulatives in self.cumulative_overrides.iteritems():
if 'target.'+attr in overrides:
cumulatives.strict_cumulative_overrides(overrides['target.'+attr])
del overrides['target.'+attr]
if 'target.'+attr+'_add' in overrides:
cumulatives.add_cumulative_overrides(overrides['target.'+attr+'_add'])
del overrides['target.'+attr+'_add']
if 'target.'+attr+'_remove' in overrides:
cumulatives.remove_cumulative_overrides(overrides['target.'+attr+'_remove'])
del overrides['target.'+attr+'_remove']
# Consider the others as overrides
for name, v in overrides.items():
# Get the full name of the parameter
full_name = ConfigParameter.get_full_name(name, unit_name, unit_kind, label)
if full_name in params:
params[full_name].set_value(v, unit_name, unit_kind, label)
else:
self.config_errors.append(ConfigException("Attempt to override undefined parameter '%s' in '%s'"
% (full_name, ConfigParameter.get_display_name(unit_name, unit_kind, label))))
for cumulatives in self.cumulative_overrides.itervalues():
cumulatives.update_target(Target.get_target(self.target))
return params
# Read and interpret configuration data defined by targets
def get_target_config_data(self):
# We consider the resolution order for our target and sort it by level reversed,
# so that we first look at the top level target (the parent), then its direct children,
# then the children's children and so on, until we reach self.target
# TODO: this might not work so well in some multiple inheritance scenarios
# At each step, look at two keys of the target data:
# - config_parameters: used to define new configuration parameters
# - config_overrides: used to override already defined configuration parameters
params, json_data = {}, Target.get_json_target_data()
resolution_order = [e[0] for e in sorted(Target.get_target(self.target).resolution_order, key = lambda e: e[1], reverse = True)]
for tname in resolution_order:
# Read the target data directly from its description
t = json_data[tname]
# Process definitions first
self._process_config_parameters(t.get("config", {}), params, tname, "target")
# Then process overrides
for name, v in t.get("overrides", {}).items():
full_name = ConfigParameter.get_full_name(name, tname, "target")
# If the parameter name is not defined or if there isn't a path from this target to the target where the
# parameter was defined in the target inheritance tree, raise an error
# We need to use 'defined_by[7:]' to remove the "target:" prefix from defined_by
if (not full_name in params) or (not params[full_name].defined_by[7:] in Target.get_target(tname).resolution_order_names):
raise ConfigException("Attempt to override undefined parameter '%s' in '%s'" % (name, ConfigParameter.get_display_name(tname, "target")))
# Otherwise update the value of the parameter
params[full_name].set_value(v, tname, "target")
return params
# Helper function: process a macro definition, checking for incompatible duplicate definitions
# mlist: list of macro names to process
# macros: dictionary with currently discovered macros
# unit_name: the unit (library/application) that defines this macro
# unit_kind: the kind of the unit ("library" or "application")
def _process_macros(self, mlist, macros, unit_name, unit_kind):
for mname in mlist:
m = ConfigMacro(mname, unit_name, unit_kind)
if (m.macro_name in macros) and (macros[m.macro_name].name != mname):
# Found an incompatible definition of the macro in another module, so raise an error
full_unit_name = ConfigParameter.get_display_name(unit_name, unit_kind)
raise ConfigException("Macro '%s' defined in both '%s' and '%s' with incompatible values" % (m.macro_name, macros[m.macro_name].defined_by, full_unit_name))
macros[m.macro_name] = m
# Read and interpret configuration data defined by libs
# It is assumed that "add_config_files" above was already called and the library configuration data
# exists in self.lib_config_data
def get_lib_config_data(self):
all_params, macros = {}, {}
for lib_name, lib_data in self.lib_config_data.items():
unknown_keys = set(lib_data.keys()) - self.__allowed_keys["library"]
if unknown_keys:
raise ConfigException("Unknown key(s) '%s' in %s" % (",".join(unknown_keys), lib_name))
all_params.update(self._process_config_and_overrides(lib_data, {}, lib_name, "library"))
self._process_macros(lib_data.get("macros", []), macros, lib_name, "library")
return all_params, macros
# Read and interpret the configuration data defined by the target
# The target can override any configuration parameter, as well as define its own configuration data
# params: the dictionary with configuration parameters found so far (in the target and in libraries)
# macros: the list of macros defined in the configuration
def get_app_config_data(self, params, macros):
app_cfg = self.app_config_data
# The application can have a "config_parameters" and a "target_config_overrides" section just like a library
self._process_config_and_overrides(app_cfg, params, "app", "application")
# The application can also defined macros
self._process_macros(app_cfg.get("macros", []), macros, "app", "application")
# Return the configuration data in two parts:
# - params: a dictionary with (name, ConfigParam) entries
# - macros: the list of macros defined with "macros" in libraries and in the application (as ConfigMacro instances)
def get_config_data(self):
all_params = self.get_target_config_data()
lib_params, macros = self.get_lib_config_data()
all_params.update(lib_params)
self.get_app_config_data(all_params, macros)
return all_params, macros
# Helper: verify if there are any required parameters without a value in 'params'
@staticmethod
def _check_required_parameters(params):
for p in params.values():
if p.required and (p.value is None):
raise ConfigException("Required parameter '%s' defined by '%s' doesn't have a value" % (p.name, p.defined_by))
# Return the macro definitions generated for a dictionary of configuration parameters
# params: a dictionary of (name, ConfigParameters instance) mappings
@staticmethod
def parameters_to_macros(params):
return ['%s=%s' % (m.macro_name, m.value) for m in params.values() if m.value is not None]
# Return the macro definitions generated for a dictionary of ConfigMacros (as returned by get_config_data)
# params: a dictionary of (name, ConfigMacro instance) mappings
@staticmethod
def config_macros_to_macros(macros):
return [m.name for m in macros.values()]
# Return the configuration data converted to a list of C macros
# config - configuration data as (ConfigParam instances, ConfigMacro instances) tuple
# (as returned by get_config_data())
@staticmethod
def config_to_macros(config):
params, macros = config[0], config[1]
Config._check_required_parameters(params)
return Config.config_macros_to_macros(macros) + Config.parameters_to_macros(params)
# Return the configuration data converted to a list of C macros
def get_config_data_macros(self):
return self.config_to_macros(self.get_config_data())
# Returns any features in the configuration data
def get_features(self):
params, _ = self.get_config_data()
self._check_required_parameters(params)
self.cumulative_overrides['features'].update_target(Target.get_target(self.target))
features = Target.get_target(self.target).features
for feature in features:
if feature not in self.__allowed_features:
raise ConfigException("Feature '%s' is not a supported features" % feature)
return features
# Validate configuration settings. This either returns True or raises an exception
def validate_config(self):
if self.config_errors:
raise self.config_errors[0]
return True
# Loads configuration data from resources. Also expands resources based on defined features settings
def load_resources(self, resources):
# Update configuration files until added features creates no changes
prev_features = set()
while True:
# Add/update the configuration with any .json files found while scanning
self.add_config_files(resources.json_files)
# Add features while we find new ones
features = set(self.get_features())
if features == prev_features:
break
for feature in features:
if feature in resources.features:
resources.add(resources.features[feature])
prev_features = features
self.validate_config()
return resources
# Return the configuration data converted to the content of a C header file,
# meant to be included to a C/C++ file. The content is returned as a string.
# If 'fname' is given, the content is also written to the file called "fname".
# WARNING: if 'fname' names an existing file, that file will be overwritten!
# config - configuration data as (ConfigParam instances, ConfigMacro instances) tuple
# (as returned by get_config_data())
@staticmethod
def config_to_header(config, fname = None):
params, macros = config[0], config[1]
Config._check_required_parameters(params)
header_data = "// Automatically generated configuration file.\n"
header_data += "// DO NOT EDIT, content will be overwritten.\n\n"
header_data += "#ifndef __MBED_CONFIG_DATA__\n"
header_data += "#define __MBED_CONFIG_DATA__\n\n"
# Compute maximum length of macro names for proper alignment
max_param_macro_name_len = max([len(m.macro_name) for m in params.values() if m.value is not None]) if params else 0
max_direct_macro_name_len = max([len(m.macro_name) for m in macros.values()]) if macros else 0
max_macro_name_len = max(max_param_macro_name_len, max_direct_macro_name_len)
# Compute maximum length of macro values for proper alignment
max_param_macro_val_len = max([len(str(m.value)) for m in params.values() if m.value is not None]) if params else 0
max_direct_macro_val_len = max([len(m.macro_value or "") for m in macros.values()]) if macros else 0
max_macro_val_len = max(max_param_macro_val_len, max_direct_macro_val_len)
# Generate config parameters first
if params:
header_data += "// Configuration parameters\n"
for m in params.values():
if m.value is not None:
header_data += "#define {0:<{1}} {2!s:<{3}} // set by {4}\n".format(m.macro_name, max_macro_name_len, m.value, max_macro_val_len, m.set_by)
# Then macros
if macros:
header_data += "// Macros\n"
for m in macros.values():
if m.macro_value:
header_data += "#define {0:<{1}} {2!s:<{3}} // defined by {4}\n".format(m.macro_name, max_macro_name_len, m.macro_value, max_macro_val_len, m.defined_by)
else:
header_data += "#define {0:<{1}} // defined by {2}\n".format(m.macro_name, max_macro_name_len + max_macro_val_len + 1, m.defined_by)
header_data += "\n#endif\n"
# If fname is given, write "header_data" to it
if fname:
with open(fname, "wt") as f:
f.write(header_data)
return header_data
# Return the configuration data converted to the content of a C header file,
# meant to be included to a C/C++ file. The content is returned as a string.
# If 'fname' is given, the content is also written to the file called "fname".
# WARNING: if 'fname' names an existing file, that file will be overwritten!
def get_config_data_header(self, fname = None):
return self.config_to_header(self.get_config_data(), fname)
|
{
"content_hash": "d536c4c903abd3e2bbd28fb7af5d4339",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 185,
"avg_line_length": 54.71653543307087,
"alnum_prop": 0.6416031083609153,
"repo_name": "Neuromancer2701/mbedROS2_STF7",
"id": "905bc20b6e4bf6ee34b52c9295feb9735ccc0614",
"size": "27796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mbed-os/tools/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "5505461"
},
{
"name": "C",
"bytes": "142124708"
},
{
"name": "C++",
"bytes": "6219975"
},
{
"name": "CMake",
"bytes": "27635"
},
{
"name": "HTML",
"bytes": "1534120"
},
{
"name": "JavaScript",
"bytes": "1494"
},
{
"name": "Makefile",
"bytes": "131050"
},
{
"name": "Objective-C",
"bytes": "354995"
},
{
"name": "Python",
"bytes": "853760"
},
{
"name": "Shell",
"bytes": "24790"
},
{
"name": "XSLT",
"bytes": "11192"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from zerver.models import get_client
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
import ujson
CODESHIP_SUBJECT_TEMPLATE = '{project_name}'
CODESHIP_MESSAGE_TEMPLATE = '[Build]({build_url}) triggered by {committer} on {branch} branch {status}.'
CODESHIP_DEFAULT_STATUS = 'has {status} status'
CODESHIP_STATUS_MAPPER = {
'testing': 'started',
'error': 'failed',
'success': 'succeeded',
}
@api_key_only_webhook_view
@has_request_variables
def api_codeship_webhook(request, user_profile, stream=REQ(default='codeship')):
try:
payload = ujson.loads(request.body)['build']
subject = get_subject_for_http_request(payload)
body = get_body_for_http_request(payload)
except KeyError as e:
return json_error("Missing key {} in JSON".format(e.message))
except ValueError as e:
return json_error("Malformed JSON")
check_send_message(user_profile, get_client('ZulipCodeshipWebhook'), 'stream', [stream], subject, body)
return json_success()
def get_subject_for_http_request(payload):
return CODESHIP_SUBJECT_TEMPLATE.format(project_name=payload['project_name'])
def get_body_for_http_request(payload):
return CODESHIP_MESSAGE_TEMPLATE.format(
build_url=payload['build_url'],
committer=payload['committer'],
branch=payload['branch'],
status=get_status_message(payload)
)
def get_status_message(payload):
build_status = payload['status']
return CODESHIP_STATUS_MAPPER.get(build_status, CODESHIP_DEFAULT_STATUS.format(status=build_status))
|
{
"content_hash": "f1884c6eee9472d794ca9c07c7b53f11",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 107,
"avg_line_length": 33.55769230769231,
"alnum_prop": 0.712893982808023,
"repo_name": "dwrpayne/zulip",
"id": "d706ed1fb2fda85a48eeea73b95f4c62609b464e",
"size": "1783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/views/webhooks/codeship.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "182566"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5516"
},
{
"name": "HTML",
"bytes": "385505"
},
{
"name": "JavaScript",
"bytes": "1571916"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "95624"
},
{
"name": "Python",
"bytes": "1877232"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "32357"
}
],
"symlink_target": ""
}
|
from Base import *
from Drawing import *
from Math import *
|
{
"content_hash": "de251864a8cba4bd9cdbfcbd1f0abfbc",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 21,
"avg_line_length": 20,
"alnum_prop": 0.75,
"repo_name": "kgn/pygml",
"id": "e6cf5010dd0b58ec15b50183b49e28654837f9e3",
"size": "60",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyGML/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26500"
}
],
"symlink_target": ""
}
|
from tempest.services.compute.json import hypervisor_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestHypervisorClient(base.BaseComputeServiceTest):
hypervisor_id = "1"
hypervisor_name = "hyper.hostname.com"
def setUp(self):
super(TestHypervisorClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = hypervisor_client.HypervisorClient(
fake_auth, 'compute', 'regionOne')
def test_list_hypervisor_str_body(self):
self._test_list_hypervisor(bytes_body=False)
def test_list_hypervisor_byte_body(self):
self._test_list_hypervisor(bytes_body=True)
def _test_list_hypervisor(self, bytes_body=False):
expected = {"hypervisors": [{
"id": 1,
"hypervisor_hostname": "hypervisor1.hostname.com"},
{
"id": 2,
"hypervisor_hostname": "hypervisor2.hostname.com"}]}
self.check_service_client_function(
self.client.list_hypervisors,
'tempest.common.service_client.ServiceClient.get',
expected, bytes_body)
def test_show_hypervisor_str_body(self):
self._test_show_hypervisor(bytes_body=False)
def test_show_hypervisor_byte_body(self):
self._test_show_hypervisor(bytes_body=True)
def _test_show_hypervisor(self, bytes_body=False):
expected = {"hypervisor": {
"cpu_info": "?",
"current_workload": 0,
"disk_available_least": 1,
"host_ip": "10.10.10.10",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
"hypervisor_version": 1,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "fake_host",
"id": 2},
"vcpus": 1,
"vcpus_used": 0}}
self.check_service_client_function(
self.client.show_hypervisor,
'tempest.common.service_client.ServiceClient.get',
expected, bytes_body,
hypervisor_id=self.hypervisor_id)
def test_list_servers_on_hypervisor_str_body(self):
self._test_list_servers_on_hypervisor(bytes_body=False)
def test_list_servers_on_hypervisor_byte_body(self):
self._test_list_servers_on_hypervisor(bytes_body=True)
def _test_list_servers_on_hypervisor(self, bytes_body=False):
expected = {"hypervisors": [{
"id": 1,
"hypervisor_hostname": "hyper.hostname.com",
"servers": [{
"uuid": "e1ae8fc4-b72d-4c2f-a427-30dd420b6277",
"name": "instance-00000001"},
{
"uuid": "e1ae8fc4-b72d-4c2f-a427-30dd42066666",
"name": "instance-00000002"}
]}
]}
self.check_service_client_function(
self.client.list_servers_on_hypervisor,
'tempest.common.service_client.ServiceClient.get',
expected, bytes_body,
hypervisor_name=self.hypervisor_name)
def test_show_hypervisor_statistics_str_body(self):
self._test_show_hypervisor_statistics(bytes_body=False)
def test_show_hypervisor_statistics_byte_body(self):
self._test_show_hypervisor_statistics(bytes_body=True)
def _test_show_hypervisor_statistics(self, bytes_body=False):
expected = {
"hypervisor_statistics": {
"count": 1,
"current_workload": 0,
"disk_available_least": 0,
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"vcpus": 1,
"vcpus_used": 0}}
self.check_service_client_function(
self.client.show_hypervisor_statistics,
'tempest.common.service_client.ServiceClient.get',
expected, bytes_body)
def test_show_hypervisor_uptime_str_body(self):
self._test_show_hypervisor_uptime(bytes_body=False)
def test_show_hypervisor_uptime_byte_body(self):
self._test_show_hypervisor_uptime(bytes_body=True)
def _test_show_hypervisor_uptime(self, bytes_body=False):
expected = {
"hypervisor": {
"hypervisor_hostname": "fake-mini",
"id": 1,
"uptime": (" 08:32:11 up 93 days, 18:25, 12 users, "
" load average: 0.20, 0.12, 0.14")
}}
self.check_service_client_function(
self.client.show_hypervisor_uptime,
'tempest.common.service_client.ServiceClient.get',
expected, bytes_body,
hypervisor_id=self.hypervisor_id)
def test_search_hypervisor_str_body(self):
self._test_search_hypervisor(bytes_body=False)
def test_search_hypervisor_byte_body(self):
self._test_search_hypervisor(bytes_body=True)
def _test_search_hypervisor(self, bytes_body=False):
expected = {"hypervisors": [{
"id": 2,
"hypervisor_hostname": "hyper.hostname.com"}]}
self.check_service_client_function(
self.client.search_hypervisor,
'tempest.common.service_client.ServiceClient.get',
expected, bytes_body,
hypervisor_name=self.hypervisor_name)
|
{
"content_hash": "41cd51b639f0671070ef7648e2564a7e",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 68,
"avg_line_length": 37.3202614379085,
"alnum_prop": 0.5709281961471103,
"repo_name": "rakeshmi/tempest",
"id": "441e7e6348b4e7eaf25b08de2086ddb7791827eb",
"size": "6312",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/tests/services/compute/test_hypervisor_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2867452"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.translation import ugettext_lazy as _
if 'countries' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("The address extension requires incuna-countries (http://github.com/incuna/incuna-countries) - ensure that 'countries' is added to your INSTALLED_APPS.")
def register(cls, admin_cls):
cls.add_to_class('address1', models.CharField(max_length=255, verbose_name=_('address'), null=True, blank=True))
cls.add_to_class('address2', models.CharField(max_length=255, verbose_name=_('address 2'), null=True, blank=True))
cls.add_to_class('city', models.CharField(max_length=255, verbose_name=_('town/city'), null=True, blank=True))
cls.add_to_class('region', models.CharField(max_length=255, verbose_name=_('county/state/province'), null=True, blank=True))
cls.add_to_class('postcode', models.CharField(max_length=15, verbose_name=_('postcode'), null=True, blank=True))
cls.add_to_class('country', models.ForeignKey('countries.Country', null=True, blank=True))
cls.add_to_class('telephone', models.CharField(max_length=32, verbose_name=_('telephone'), null=True, blank=True))
if admin_cls:
admin_cls.search_fields += ['address1', 'address2', 'city', 'region', 'postcode', 'telephone']
admin_cls.list_display_filter += ['country', ]
if admin_cls.fieldsets:
admin_cls.fieldsets.append((_('Address'), {
'fields': ['address1', 'address2','city', 'region', 'postcode', 'country', 'telephone'],
'classes': ('collapse',),
}))
|
{
"content_hash": "c2ed4c53dce643409ae6ab8fac1223f4",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 184,
"avg_line_length": 58.37931034482759,
"alnum_prop": 0.6822209096278795,
"repo_name": "incuna/django-extensible-profiles",
"id": "b4be388fecd613c4127988f035eb5e72b568e59d",
"size": "1693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiles/extensions/address.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "35206"
}
],
"symlink_target": ""
}
|
import logging
from django.template import defaultfilters as filters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class NodeGroupTemplatesFilterAction(tables.FilterAction):
filter_type = "server"
filter_choices = (('name', _("Name"), True),
('plugin', _("Plugin"), True),
('hadoop_version', _("Version"), True))
class CreateNodegroupTemplate(tables.LinkAction):
name = "create"
verbose_name = _("Create Template")
url = ("horizon:project:data_processing.nodegroup_templates:"
"create-nodegroup-template")
classes = ("ajax-modal", "create-nodegrouptemplate-btn")
icon = "plus"
class ConfigureNodegroupTemplate(tables.LinkAction):
name = "configure"
verbose_name = _("Configure Template")
url = ("horizon:project:data_processing.nodegroup_templates:"
"configure-nodegroup-template")
classes = ("ajax-modal", "configure-nodegrouptemplate-btn")
icon = "plus"
attrs = {"style": "display: none"}
class CopyTemplate(tables.LinkAction):
name = "copy"
verbose_name = _("Copy Template")
url = "horizon:project:data_processing.nodegroup_templates:copy"
classes = ("ajax-modal", )
class EditTemplate(tables.LinkAction):
name = "edit"
verbose_name = _("Edit Template")
url = "horizon:project:data_processing.nodegroup_templates:edit"
classes = ("ajax-modal", )
class DeleteTemplate(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Template",
u"Delete Templates",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Template",
u"Deleted Templates",
count
)
def delete(self, request, template_id):
saharaclient.nodegroup_template_delete(request, template_id)
class NodegroupTemplatesTable(tables.DataTable):
name = tables.Column(
"name",
verbose_name=_("Name"),
link="horizon:project:data_processing.nodegroup_templates:details")
plugin_name = tables.Column("plugin_name",
verbose_name=_("Plugin"))
hadoop_version = tables.Column("hadoop_version",
verbose_name=_("Version"))
node_processes = tables.Column("node_processes",
verbose_name=_("Node Processes"),
wrap_list=True,
filters=(filters.unordered_list,))
class Meta(object):
name = "nodegroup_templates"
verbose_name = _("Node Group Templates")
table_actions = (CreateNodegroupTemplate,
ConfigureNodegroupTemplate,
DeleteTemplate,
NodeGroupTemplatesFilterAction,)
row_actions = (EditTemplate,
CopyTemplate,
DeleteTemplate)
|
{
"content_hash": "d3b90ee8a1989c2a9b940a959e1bae8e",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 75,
"avg_line_length": 32.876288659793815,
"alnum_prop": 0.6083411727814362,
"repo_name": "yjxtogo/horizon",
"id": "cfa7a189122b0b46bc2c01d2a16f4c3bb7019deb",
"size": "3735",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/data_processing/nodegroup_templates/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "94677"
},
{
"name": "HTML",
"bytes": "475954"
},
{
"name": "JavaScript",
"bytes": "806884"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4687618"
},
{
"name": "Shell",
"bytes": "18657"
}
],
"symlink_target": ""
}
|
import unittest
import config
import thread_cert
from pktverify.consts import MLE_CHILD_ID_RESPONSE, MLE_DATA_RESPONSE, MGMT_PENDING_SET_URI, MGMT_ACTIVE_SET_URI, MGMT_DATASET_CHANGED_URI, COAP_CODE_ACK, ACTIVE_OPERATION_DATASET_TLV, ACTIVE_TIMESTAMP_TLV, PENDING_TIMESTAMP_TLV, NM_CHANNEL_TLV, NM_CHANNEL_MASK_TLV, NM_EXTENDED_PAN_ID_TLV, NM_NETWORK_KEY_TLV, NM_NETWORK_MESH_LOCAL_PREFIX_TLV, NM_NETWORK_NAME_TLV, NM_PAN_ID_TLV, NM_PSKC_TLV, NM_SECURITY_POLICY_TLV, SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ACTIVE_TIMESTAMP_TLV, NETWORK_DATA_TLV, NM_BORDER_AGENT_LOCATOR_TLV, NM_COMMISSIONER_SESSION_ID_TLV, NM_DELAY_TIMER_TLV, PENDING_OPERATION_DATASET_TLV
from pktverify.packet_verifier import PacketVerifier
PANID_INIT = 0xface
COMMISSIONER = 1
LEADER = 2
ROUTER = 3
LEADER_ACTIVE_TIMESTAMP = 10
ROUTER_ACTIVE_TIMESTAMP = 20
ROUTER_PENDING_TIMESTAMP = 30
ROUTER_PENDING_ACTIVE_TIMESTAMP = 25
ROUTER_DELAY_TIMER = 3600000
COMMISSIONER_PENDING_TIMESTAMP = 40
COMMISSIONER_PENDING_ACTIVE_TIMESTAMP = 80
COMMISSIONER_DELAY_TIMER = 60000
COMMISSIONER_PENDING_CHANNEL = 20
COMMISSIONER_PENDING_PANID = 0xafce
class Cert_9_2_7_DelayTimer(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'name': 'COMMISSIONER',
'mode': 'rdn',
'allowlist': [LEADER]
},
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'partition_id': 0xffffffff,
'allowlist': [COMMISSIONER]
},
ROUTER: {
'name': 'ROUTER',
'mode': 'rdn',
'partition_id': 1,
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(config.LEADER_STARTUP_DELAY)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(config.ROUTER_STARTUP_DELAY)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[COMMISSIONER].send_mgmt_active_set(active_timestamp=LEADER_ACTIVE_TIMESTAMP,)
self.simulator.go(5)
self.nodes[ROUTER].start()
self.simulator.go(config.LEADER_STARTUP_DELAY)
self.assertEqual(self.nodes[ROUTER].get_state(), 'leader')
self.nodes[LEADER].add_allowlist(self.nodes[ROUTER].get_addr64())
self.nodes[ROUTER].add_allowlist(self.nodes[LEADER].get_addr64())
self.simulator.go(35)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ROUTER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER].send_mgmt_active_set(active_timestamp=ROUTER_ACTIVE_TIMESTAMP,)
self.simulator.go(30)
self.nodes[ROUTER].send_mgmt_pending_set(
pending_timestamp=ROUTER_PENDING_TIMESTAMP,
active_timestamp=ROUTER_PENDING_ACTIVE_TIMESTAMP,
delay_timer=ROUTER_DELAY_TIMER,
)
self.simulator.go(60)
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=COMMISSIONER_PENDING_TIMESTAMP,
active_timestamp=COMMISSIONER_PENDING_ACTIVE_TIMESTAMP,
delay_timer=COMMISSIONER_DELAY_TIMER,
channel=COMMISSIONER_PENDING_CHANNEL,
panid=COMMISSIONER_PENDING_PANID,
)
self.simulator.go(120)
self.assertEqual(self.nodes[LEADER].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[COMMISSIONER].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[ROUTER].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[LEADER].get_channel(), COMMISSIONER_PENDING_CHANNEL)
self.assertEqual(
self.nodes[COMMISSIONER].get_channel(),
COMMISSIONER_PENDING_CHANNEL,
)
self.assertEqual(self.nodes[ROUTER].get_channel(), COMMISSIONER_PENDING_CHANNEL)
self.collect_rloc16s()
self.collect_rlocs()
ipaddrs = self.nodes[ROUTER].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.assertTrue(self.nodes[LEADER].ping(ipaddr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
LEADER_RLOC16 = pv.vars['LEADER_RLOC16']
COMMISSIONER = pv.vars['COMMISSIONER']
COMMISSIONER_RLOC = pv.vars['COMMISSIONER_RLOC']
ROUTER = pv.vars['ROUTER']
ROUTER_RLOC = pv.vars['ROUTER_RLOC']
ROUTER_RLOC16 = pv.vars['ROUTER_RLOC16']
_lpkts = pkts.filter_wpan_src64(LEADER)
# Step 1: Ensure the topology is formed correctly
_lpkts.filter_wpan_dst64(COMMISSIONER).filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next()
_lpkts_coap = _lpkts.copy()
# Step 4: Leader MUST send a unicast MLE Child ID Response to the Router
_lpkts.filter_wpan_dst64(ROUTER).filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next(
).must_verify(lambda p: {ACTIVE_OPERATION_DATASET_TLV, ACTIVE_TIMESTAMP_TLV} < set(p.mle.tlv.type) and {
NM_CHANNEL_TLV, NM_CHANNEL_MASK_TLV, NM_EXTENDED_PAN_ID_TLV, NM_NETWORK_KEY_TLV,
NM_NETWORK_MESH_LOCAL_PREFIX_TLV, NM_NETWORK_NAME_TLV, NM_PAN_ID_TLV, NM_PSKC_TLV, NM_SECURITY_POLICY_TLV
} <= set(p.thread_meshcop.tlv.type) and p.mle.tlv.active_tstamp == LEADER_ACTIVE_TIMESTAMP)
# Step 6: Leader automatically sends a MGMT_ACTIVE_SET.rsp to the Router
_lpkts.filter_ipv6_dst(ROUTER_RLOC).filter_coap_ack(MGMT_ACTIVE_SET_URI).must_next().must_verify(
lambda p: p.coap.code == COAP_CODE_ACK and p.thread_meshcop.tlv.state == 1)
# Step 7: Leader multicasts a MLE Data Response with the new information
_lpkts.filter_LLANMA().filter_mle_cmd(MLE_DATA_RESPONSE).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ACTIVE_TIMESTAMP_TLV, NETWORK_DATA_TLV} <= set(
p.mle.tlv.type) and {NM_BORDER_AGENT_LOCATOR_TLV, NM_COMMISSIONER_SESSION_ID_TLV} <= set(
p.thread_meshcop.tlv.type) and p.thread_nwd.tlv.stable == [0] and p.mle.tlv.active_tstamp ==
ROUTER_ACTIVE_TIMESTAMP)
# Step 10: Leader MUST send a unicast MLE Data Response to the Router
_lpkts.filter_wpan_dst64(ROUTER).filter_mle_cmd(MLE_DATA_RESPONSE).must_next(
).must_verify(lambda p: {ACTIVE_OPERATION_DATASET_TLV, ACTIVE_TIMESTAMP_TLV} < set(p.mle.tlv.type) and {
NM_CHANNEL_TLV, NM_CHANNEL_MASK_TLV, NM_EXTENDED_PAN_ID_TLV, NM_NETWORK_KEY_TLV,
NM_NETWORK_MESH_LOCAL_PREFIX_TLV, NM_NETWORK_NAME_TLV, NM_PAN_ID_TLV, NM_PSKC_TLV, NM_SECURITY_POLICY_TLV
} <= set(p.thread_meshcop.tlv.type) and p.mle.tlv.active_tstamp == ROUTER_ACTIVE_TIMESTAMP)
# Step 12: Leader sends a MGMT_PENDING_SET.rsp to the Router with Status = Accept
_lpkts_coap.filter_ipv6_dst(ROUTER_RLOC).filter_coap_ack(MGMT_PENDING_SET_URI).must_next().must_verify(
lambda p: p.coap.code == COAP_CODE_ACK and p.thread_meshcop.tlv.state == 1)
# Step 13: Leader sends a multicast MLE Data Response
_lpkts.filter_LLANMA().filter_mle_cmd(MLE_DATA_RESPONSE).must_next().must_verify(
lambda p: {
SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ACTIVE_TIMESTAMP_TLV, PENDING_TIMESTAMP_TLV, NETWORK_DATA_TLV
} <= set(p.mle.tlv.type) and p.thread_nwd.tlv.stable == [0] and p.mle.tlv.active_tstamp ==
ROUTER_ACTIVE_TIMESTAMP and p.mle.tlv.pending_tstamp == ROUTER_PENDING_TIMESTAMP)
# Step 14: The DUT MUST send MGMT_DATASET_CHANGED.ntf to the Router
_lpkts_coap.filter_wpan_dst16(ROUTER_RLOC16).filter_coap_request(MGMT_DATASET_CHANGED_URI).must_next()
# Step 16: Leader MUST send a unicast MLE Data Response to the Router
_lpkts.filter_wpan_dst64(ROUTER).filter_mle_cmd(MLE_DATA_RESPONSE).must_next().must_verify(
lambda p: {ACTIVE_TIMESTAMP_TLV, PENDING_TIMESTAMP_TLV} < set(p.mle.tlv.type) and p.mle.tlv.active_tstamp
== ROUTER_ACTIVE_TIMESTAMP and p.mle.tlv.pending_tstamp == ROUTER_PENDING_TIMESTAMP)
# Step 18: The DUT MUST send MGMT_PENDING_SET.rsp to the Commissioner
_lpkts_coap.filter_ipv6_dst(COMMISSIONER_RLOC).filter_coap_ack(MGMT_PENDING_SET_URI).must_next().must_verify(
lambda p: p.coap.code == COAP_CODE_ACK and p.thread_meshcop.tlv.state == 1)
# Step 19: Leader MUST send a unicast MLE Data Response to the Router
_lpkts.filter_LLANMA().filter_mle_cmd(MLE_DATA_RESPONSE).must_next().must_verify(
lambda p: {
SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, NETWORK_DATA_TLV, ACTIVE_TIMESTAMP_TLV, PENDING_TIMESTAMP_TLV
} <= set(p.mle.tlv.type) and p.thread_nwd.tlv.stable == [0] and p.mle.tlv.active_tstamp ==
ROUTER_ACTIVE_TIMESTAMP and p.mle.tlv.pending_tstamp == COMMISSIONER_PENDING_TIMESTAMP)
# Step 20: Leader MUST send a unicast MLE Data Response to the Router
_lpkts.filter_wpan_dst64(ROUTER).filter_mle_cmd(MLE_DATA_RESPONSE).must_next(
).must_verify(lambda p: {ACTIVE_TIMESTAMP_TLV, PENDING_TIMESTAMP_TLV, PENDING_OPERATION_DATASET_TLV} < set(
p.mle.tlv.type) and {NM_CHANNEL_TLV, NM_COMMISSIONER_SESSION_ID_TLV, NM_PAN_ID_TLV, NM_DELAY_TIMER_TLV} <=
set(p.thread_meshcop.tlv.type) and p.mle.tlv.active_tstamp == ROUTER_ACTIVE_TIMESTAMP and p.mle.
tlv.pending_tstamp == COMMISSIONER_PENDING_TIMESTAMP and p.thread_meshcop.tlv.pan_id ==
[COMMISSIONER_PENDING_PANID] and p.thread_meshcop.tlv.channel == [COMMISSIONER_PENDING_CHANNEL])
# Step 21: Router MUST respond with an ICMPv6 Echo Reply
pkts.filter_wpan_src16_dst16(ROUTER_RLOC16, LEADER_RLOC16).filter_ping_reply().must_next()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "e599dc63f364e0f49dc565ab8e26fc25",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 605,
"avg_line_length": 50.914572864321606,
"alnum_prop": 0.6607777339123568,
"repo_name": "openthread/openthread",
"id": "0460f1dc2a5f788b7820af32da75e358524abd02",
"size": "11737",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "tests/scripts/thread-cert/Cert_9_2_07_DelayTimer.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2610"
},
{
"name": "C",
"bytes": "1602099"
},
{
"name": "C++",
"bytes": "8403018"
},
{
"name": "CMake",
"bytes": "110320"
},
{
"name": "Dockerfile",
"bytes": "10426"
},
{
"name": "M4",
"bytes": "32369"
},
{
"name": "Makefile",
"bytes": "192544"
},
{
"name": "Python",
"bytes": "4630721"
},
{
"name": "Shell",
"bytes": "165349"
}
],
"symlink_target": ""
}
|
from ..azure_common import BaseTest, arm_template, cassette_name
from c7n_azure.resources.key_vault import (KeyVaultUpdateAccessPolicyAction, WhiteListFilter,
KeyVaultFirewallRulesFilter,
KeyVaultFirewallBypassFilter)
from c7n_azure.session import Session
from c7n_azure.utils import GraphHelper
from mock import patch, Mock
from azure.core.exceptions import HttpResponseError
from netaddr import IPSet
from parameterized import parameterized
import pytest
from requests import Response
from c7n.utils import local_session
class KeyVaultTest(BaseTest):
def setUp(self):
super(KeyVaultTest, self).setUp()
def test_key_vault_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'whitelist',
'key': 'test'}
],
'actions': [
{'type': 'update-access-policy',
'operation': 'add',
'access-policies': []}
]
}, validate=True)
self.assertTrue(p)
@arm_template('keyvault.json')
@cassette_name('common')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault1*'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
def test_compare_permissions(self):
p1 = {"keys": ['get'], "secrets": ['get'], "certificates": ['get']}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertTrue(WhiteListFilter.compare_permissions(p1, p2))
p1 = {"keys": ['delete']}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertFalse(WhiteListFilter.compare_permissions(p1, p2))
p1 = {"secrets": ['delete']}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertFalse(WhiteListFilter.compare_permissions(p1, p2))
p1 = {"certificates": ['delete']}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertFalse(WhiteListFilter.compare_permissions(p1, p2))
p1 = {}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertTrue(WhiteListFilter.compare_permissions(p1, p2))
p1 = {"keys": ['get'], "secrets": ['get'], "certificates": ['get']}
p2 = {}
self.assertFalse(WhiteListFilter.compare_permissions(p1, p2))
# Requires Graph access
@arm_template('keyvault.json')
@pytest.mark.skiplive
def test_whitelist(self):
"""Tests basic whitelist functionality"""
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault1*'},
{'not': [
{'type': 'whitelist',
'key': 'principalName',
'users': ['account1@sample.com']}
]}
]
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('keyvault-no-policies.json')
def test_whitelist_zero_access_policies(self):
"""Tests that a keyvault with 0 access policies is processed properly
and doesn't raise an exception.
"""
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault2*'},
{'not': [
{'type': 'whitelist',
'key': 'principalName',
'users': ['account1@sample.com']}
]}
]
})
resources = p.run()
self.assertEqual(len(resources), 0)
@arm_template('keyvault.json')
@patch.object(GraphHelper, 'get_principal_dictionary')
def test_whitelist_not_authorized(self, get_principal_dictionary):
"""Tests that an exception is thrown when both:
The Microsoft Graph call fails.
This is mocked because it is impractical to have
identities with varying levels of graph access for
live test runs or recordings"""
mock_response = Mock(spec=Response)
mock_response.status_code = 403
mock_response.reason = 'forbidden'
get_principal_dictionary.side_effect = HttpResponseError(response=mock_response)
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault1*'},
{'not': [
{'type': 'whitelist',
'key': 'principalName',
'users': ['account1@sample.com']}
]}
]
})
with self.assertRaises(HttpResponseError) as e:
p.run()
self.assertEqual(403, e.exception.status_code)
def test_update_access_policy_action(self):
with patch(self._get_key_vault_client_string() + '.update_access_policy')\
as access_policy_action_mock:
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault1*'}],
'actions': [
{'type': 'update-access-policy',
'operation': 'replace',
'access-policies': [{
'tenant-id': '00000000-0000-0000-0000-000000000000',
'object-id': '11111111-1111-1111-1111-111111111111',
'permissions': {'keys': ['Get']}}]}]
})
p.run()
access_policy_action_mock.assert_called()
def test_transform_access_policies(self):
mock_access_policies = [{"object-id": "mockObjectId",
"tenant-id": "mockTenantId",
"permissions": {"keys": ["Get"]}}]
transformed_access_policies = KeyVaultUpdateAccessPolicyAction._transform_access_policies(
mock_access_policies).get("accessPolicies")[0]
self.assertTrue("objectId" in transformed_access_policies)
self.assertTrue("tenantId" in transformed_access_policies)
self.assertTrue("permissions" in transformed_access_policies)
def _get_key_vault_client_string(self):
client = local_session(Session) \
.client('azure.mgmt.keyvault.KeyVaultManagementClient').vaults
return client.__module__ + '.' + client.__class__.__name__
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_include(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'include': ['1.0.0.0']}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_not_include_all_ranges(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'include': ['1.0.0.0', '127.0.0.1']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'include': ['128.0.0.0/1']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_not_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'include': ['127.0.0.0/8']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_equal(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'equal': ['0.0.0.0-126.255.255.255', '128.0.0.0-255.255.255.255']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_not_equal(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'equal': ['0.0.0.0-126.255.255.255', '128.0.0.0-255.255.255.254']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_bypass(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'firewall-bypass',
'mode': 'equal',
'list': ['AzureServices']}],
})
resources = p.run()
self.assertEqual(1, len(resources))
class KeyVaultFirewallFilterTest(BaseTest):
def test_query_empty_network_acl(self):
resource = {'properties': {}}
expected = IPSet(['0.0.0.0/0'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def test_query_default_action_allow(self):
resource = {'properties': {'networkAcls': {'defaultAction': 'Allow'}}}
expected = IPSet(['0.0.0.0/0'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def test_query_default_action_deny(self):
resource = {'properties': {'networkAcls': {'defaultAction': 'Deny',
'ipRules': [{'value': '10.0.0.0/16'},
{'value': '8.8.8.8'}]}}}
expected = IPSet(['8.8.8.8', '10.0.0.0/16'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def _get_filter(self, mode='equal'):
data = {mode: ['10.0.0.0/8', '127.0.0.1']}
return KeyVaultFirewallRulesFilter(data, Mock())
class KeyVaultFirewallBypassFilterTest(BaseTest):
scenarios = [
[{}, []],
[{'networkAcls': {'defaultAction': 'Allow', 'bypass': ''}}, ['AzureServices']],
[{'networkAcls': {'defaultAction': 'Deny', 'bypass': ''}}, []],
[{'networkAcls': {'defaultAction': 'Deny', 'bypass': 'AzureServices'}},
['AzureServices']],
]
@parameterized.expand(scenarios)
def test_run(self, properties, expected):
resource = {'properties': properties}
f = KeyVaultFirewallBypassFilter({'mode': 'equal', 'list': []})
self.assertEqual(expected, f._query_bypass(resource))
|
{
"content_hash": "ed81458a31da1c92ea6e3db4e1960b80",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 99,
"avg_line_length": 37.779329608938546,
"alnum_prop": 0.4978927911275416,
"repo_name": "thisisshi/cloud-custodian",
"id": "ff6ca89ef0583f67779ea60b2bc7f47accd83b3d",
"size": "13604",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/tests_azure/tests_resources/test_keyvault.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2126"
},
{
"name": "Go",
"bytes": "146637"
},
{
"name": "HCL",
"bytes": "62085"
},
{
"name": "Jinja",
"bytes": "19775"
},
{
"name": "Makefile",
"bytes": "14242"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "6684814"
},
{
"name": "Shell",
"bytes": "15323"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
}
|
"""
Unit Tests for `pdb_delresname`.
"""
import os
import sys
import unittest
from config import data_dir
from utils import OutputCapture
class TestTool(unittest.TestCase):
"""
Generic class for testing tools.
"""
def setUp(self):
# Dynamically import the module
name = 'pdbtools.pdb_delresname'
self.module = __import__(name, fromlist=[''])
def exec_module(self):
"""
Execs module.
"""
with OutputCapture() as output:
try:
self.module.main()
except SystemExit as e:
self.retcode = e.code
self.stdout = output.stdout
self.stderr = output.stderr
return
def test_single_sele(self):
"""$ pdb_delresname -ARG data/dummy.pdb"""
# Simulate input
sys.argv = ['', '-ARG', os.path.join(data_dir, 'dummy.pdb')]
# Execute the script
self.exec_module()
# Validate results
self.assertEqual(self.retcode, 0)
self.assertEqual(len(self.stdout), 128)
self.assertEqual(len(self.stderr), 0)
def test_multiple_sele(self):
"""$ pdb_delresname -ARG,CA data/dummy.pdb"""
# Simulate input
sys.argv = ['', '-ARG,CA', os.path.join(data_dir, 'dummy.pdb')]
# Execute the script
self.exec_module()
# Validate results
self.assertEqual(self.retcode, 0)
self.assertEqual(len(self.stdout), 127)
self.assertEqual(len(self.stderr), 0)
def test_empty_sele(self):
"""$ pdb_delresname - data/dummy.pdb"""
# Simulate input
sys.argv = ['', '-', os.path.join(data_dir, 'dummy.pdb')]
# Execute the script
self.exec_module()
# Validate results
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0)
self.assertEqual(self.stderr[0][:22],
"ERROR!! Residue name s")
def test_invalid_sele(self):
"""$ pdb_delresname -ABCD data/dummy.pdb"""
# Simulate input
sys.argv = ['', '-ABCD', os.path.join(data_dir, 'dummy.pdb')]
# Execute the script
self.exec_module()
# Validate results
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0)
self.assertEqual(self.stderr[0][:22],
"ERROR!! Residue name i")
def test_file_not_found(self):
"""$ pdb_delresname not_existing.pdb"""
afile = os.path.join(data_dir, 'not_existing.pdb')
sys.argv = ['', afile]
self.exec_module()
self.assertEqual(self.retcode, 1) # exit code is 1 (error)
self.assertEqual(len(self.stdout), 0) # nothing written to stdout
self.assertEqual(self.stderr[0][:22],
"ERROR!! File not found") # proper error message
@unittest.skipIf(os.getenv('SKIP_TTY_TESTS'), 'skip on GHA - no TTY')
def test_file_missing(self):
"""$ pdb_delresname -ARG"""
sys.argv = ['', '-ARG']
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0) # no output
self.assertEqual(self.stderr[0],
"ERROR!! No data to process!")
@unittest.skipIf(os.getenv('SKIP_TTY_TESTS'), 'skip on GHA - no TTY')
def test_helptext(self):
"""$ pdb_delresname"""
sys.argv = ['']
self.exec_module()
self.assertEqual(self.retcode, 1) # ensure the program exited gracefully.
self.assertEqual(len(self.stdout), 0) # no output
self.assertEqual(self.stderr, self.module.__doc__.split("\n")[:-1])
def test_not_an_option(self):
"""$ pdb_delresname 20 data/dummy.pdb"""
sys.argv = ['', '20', os.path.join(data_dir, 'dummy.pdb')]
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0)
self.assertEqual(self.stderr[0],
"ERROR! First argument is not an option: '20'")
if __name__ == '__main__':
from config import test_dir
mpath = os.path.abspath(os.path.join(test_dir, '..'))
sys.path.insert(0, mpath) # so we load dev files before any installation
unittest.main()
|
{
"content_hash": "6a2a682b261fd053f8af29058991ce6e",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 82,
"avg_line_length": 27.928571428571427,
"alnum_prop": 0.5659149035108114,
"repo_name": "JoaoRodrigues/pdb-tools",
"id": "156ba905d28557292c04782e3a67d90b299c3c8e",
"size": "4934",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_pdb_delresname.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "567862"
},
{
"name": "TeX",
"bytes": "980"
}
],
"symlink_target": ""
}
|
__author__ = 'richard'
import numpy as np
from roboskeeter.math import math_toolbox
class Flight():
def __init__(self, random_f_strength, stim_f_strength, damping_coeff):
self.random_f_strength = random_f_strength
self.stim_f_strength = stim_f_strength # TODO: separate surge strength, cast strength, gradient strenght
self.damping_coeff = damping_coeff
self.max_stim_f = 1e-5 # putting a maximum value on the stim_f
def random(self):
"""Generate random-direction force vector at each timestep from double-
exponential distribution given exponent term rf.
"""
unit_vector = math_toolbox.generate_random_unit_vector()
force = self.random_f_strength * unit_vector
return force
def stimulus(self, decision, plume_signal):
force = np.array([0., 0., 0.])
if decision is 'search':
pass # there is no stimulus_f in the absence of stimulus
elif decision is 'ga':
force += self.surge_up_gradient(plume_signal)
elif decision is 'surge':
force += self.surge_upwind()
elif 'cast' in decision:
force += self.cast(decision)
elif decision == 'ignore':
pass
else:
raise LookupError('unknown decision {}'.format(decision))
return force
def calc_forces(self, current_velocity, decision, plume_signal):
################################################
# Calculate driving forces at this timestep
################################################
random_f = self.random()
stim_f = self.stimulus(decision, plume_signal)
################################################
# calculate total force
################################################
total_f = -self.damping_coeff * current_velocity + random_f + stim_f
###############################
return stim_f, random_f, total_f
def cast(self, decision):
"""
Parameters
----------
decision
cast_l or cast_r
Returns
-------
force
the appropriate cast force
"""
cast_f = self.stim_f_strength
if 'l' in decision: # need to cast left
cast_f *= -1.
else:
pass
force = np.array([0., cast_f, 0.])
return force
def surge_upwind(self):
force = np.array([self.stim_f_strength, 0., 0.])
force = self._shrink_huge_stim_f(force)
return force
def surge_up_gradient(self, gradient):
"""
Parameters
----------
gradient
the current plume gradient
Returns
-------
force
the stimulus force to ascend the gradient, properly scaled
"""
scalar = self.stim_f_strength
force = scalar * gradient
force = self._shrink_huge_stim_f(force)
# catch bugs in gradient multiplication
if np.isnan(force).any():
raise ValueError("Nans in gradient force!! force = {} gradient = {}".format(force, gradient))
if np.isinf(force).any():
raise ValueError("infs in gradient force! force = {} gradient = {}".format(force, gradient))
return force
def _shrink_huge_stim_f(self, force):
norm = np.linalg.norm(force)
if norm > self.max_stim_f:
force *= self.max_stim_f / norm # shrink force to maximum allowed value
return force
|
{
"content_hash": "9c10c9cc9e2124321df38a9725ce0d9e",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 113,
"avg_line_length": 30.135593220338983,
"alnum_prop": 0.5292463442069741,
"repo_name": "isomerase/mozziesniff",
"id": "f98be204f03bcfe5f6497a4f18b5966718223a1a",
"size": "3556",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "roboskeeter/flight.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "272171"
}
],
"symlink_target": ""
}
|
"""
Defines data types and models required specifically
for VRF (for IPv6 Flow Specification) support.
Represents data structures for VRF not VPN/global.
(Inside VRF you have IPv6 Flow Specification prefixes
and inside VPN you have VPNV6 Flow Specification prefixes)
"""
import logging
from ryu.lib.packet.bgp import RF_IPv6_FLOWSPEC
from ryu.lib.packet.bgp import RF_VPNv6_FLOWSPEC
from ryu.lib.packet.bgp import FlowSpecIPv6NLRI
from ryu.lib.packet.bgp import FlowSpecVPNv6NLRI
from ryu.services.protocols.bgp.info_base.vpnv6fs import VPNv6FlowSpecPath
from ryu.services.protocols.bgp.info_base.vrffs import VRFFlowSpecDest
from ryu.services.protocols.bgp.info_base.vrffs import VRFFlowSpecPath
from ryu.services.protocols.bgp.info_base.vrffs import VRFFlowSpecTable
LOG = logging.getLogger('bgpspeaker.info_base.vrf6fs')
class Vrf6FlowSpecPath(VRFFlowSpecPath):
"""Represents a way of reaching an IP destination with
a VPN Flow Specification.
"""
ROUTE_FAMILY = RF_IPv6_FLOWSPEC
VPN_PATH_CLASS = VPNv6FlowSpecPath
VPN_NLRI_CLASS = FlowSpecVPNv6NLRI
class Vrf6FlowSpecDest(VRFFlowSpecDest):
ROUTE_FAMILY = RF_IPv6_FLOWSPEC
class Vrf6FlowSpecTable(VRFFlowSpecTable):
"""Virtual Routing and Forwarding information base
for IPv6 Flow Specification.
"""
ROUTE_FAMILY = RF_IPv6_FLOWSPEC
VPN_ROUTE_FAMILY = RF_VPNv6_FLOWSPEC
NLRI_CLASS = FlowSpecIPv6NLRI
VRF_PATH_CLASS = Vrf6FlowSpecPath
VRF_DEST_CLASS = Vrf6FlowSpecDest
|
{
"content_hash": "5d4c296b7964bcabee4c8cedf152edfa",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 74,
"avg_line_length": 33.2,
"alnum_prop": 0.7817938420348058,
"repo_name": "lagopus/ryu-lagopus-ext",
"id": "17b8ce57f22374f9482b0d97fd82b947dbe50d44",
"size": "2107",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ryu/services/protocols/bgp/info_base/vrf6fs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28540"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "874721"
},
{
"name": "Gnuplot",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "6137808"
},
{
"name": "Shell",
"bytes": "17573"
}
],
"symlink_target": ""
}
|
from .interpreter import start_eval
|
{
"content_hash": "3f0cfc01c59664a87d6209724548588a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 35,
"avg_line_length": 36,
"alnum_prop": 0.8333333333333334,
"repo_name": "pdarragh/Viper",
"id": "bb276efdf4b8a8f2ae9c901c39fa90dc7f8385db",
"size": "36",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "viper/interpreter/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "195762"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class AppInsightsReference(Model):
"""Specifies Azure Application Insights information for performance counters
reporting.
All required parameters must be populated in order to send to Azure.
:param component: Required. Specifies the Azure Application Insights
component resource id.
:type component: ~azure.mgmt.batchai.models.ResourceId
:param instrumentation_key: Value of the Azure Application Insights
instrumentation key.
:type instrumentation_key: str
:param instrumentation_key_secret_reference: Specifies a KeyVault Secret
containing Azure Application Insights instrumentation key. Specifies
KeyVault Store and Secret which contains Azure Application Insights
instrumentation key. One of instumentationKey or
instrumentationKeySecretReference must be specified.
:type instrumentation_key_secret_reference:
~azure.mgmt.batchai.models.KeyVaultSecretReference
"""
_validation = {
'component': {'required': True},
}
_attribute_map = {
'component': {'key': 'component', 'type': 'ResourceId'},
'instrumentation_key': {'key': 'instrumentationKey', 'type': 'str'},
'instrumentation_key_secret_reference': {'key': 'instrumentationKeySecretReference', 'type': 'KeyVaultSecretReference'},
}
def __init__(self, **kwargs):
super(AppInsightsReference, self).__init__(**kwargs)
self.component = kwargs.get('component', None)
self.instrumentation_key = kwargs.get('instrumentation_key', None)
self.instrumentation_key_secret_reference = kwargs.get('instrumentation_key_secret_reference', None)
|
{
"content_hash": "474a46cdcf433c40b538a234d861013b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 128,
"avg_line_length": 43.38461538461539,
"alnum_prop": 0.7198581560283688,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "2ce8077641215e832e4dafaa02b4b6e56e90568e",
"size": "2166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-batchai/azure/mgmt/batchai/models/app_insights_reference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import logging
import os
import tempfile
from os.path import join
import pygame
from entrypoint2 import entrypoint
from genimg import generate_image
from pygame.locals import K_ESCAPE, KEYDOWN
log = logging.getLogger(__name__)
def fillscreen_pygame(fimage):
# DBus org.kde.kwin.Screenshot disappears on Kubuntu 20.04 after starting pygame
# fix: System Settings > Compositor > uncheck Allow apps to turn off compositing.
# or https://www.pygame.org/docs/ref/pygame.html
# SDL_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
# Set to "0" to re-enable the compositor.
os.environ["SDL_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR"] = "0"
pygame.init()
pygame.mouse.set_visible(0)
log.info("pygame modes:%s", pygame.display.list_modes())
# log.info("pygame info:%s", pygame.display.Info())
log.info("env $DISPLAY= %s", os.environ.get("DISPLAY"))
infoObject = pygame.display.Info()
im = generate_image(infoObject.current_w, infoObject.current_h)
im.save(fimage)
windowSurface = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
img = pygame.image.load(fimage)
mainLoop = True
while mainLoop:
for event in pygame.event.get():
if event.type == pygame.QUIT:
mainLoop = False
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
mainLoop = False
windowSurface.blit(img, (0, 0))
# pygame.display.flip()
pygame.display.update()
pygame.quit()
@entrypoint
def main(image=""):
if not image:
d = tempfile.mkdtemp(prefix="fillscreen")
# atexit.register(lambda: rmtree(d))
image = join(d, "ref.bmp")
# im = generate_image()
# im.save(image)
fillscreen_pygame(image)
|
{
"content_hash": "c96bb6a4fd4f264837175a1f9f9cf89d",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 86,
"avg_line_length": 30.35593220338983,
"alnum_prop": 0.638749302065885,
"repo_name": "ponty/pyscreenshot",
"id": "b545d8afc2a761edd04b583b1c45de3d3a6405bc",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/fillscreen_pygame.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "77446"
},
{
"name": "Ruby",
"bytes": "13811"
},
{
"name": "Shell",
"bytes": "16329"
}
],
"symlink_target": ""
}
|
import ast
import falcon
import json
from oslo_config import cfg
import re
from stevedore import driver
import uuid
from kiloeyes.common import es_conn
from kiloeyes.common import namespace
from kiloeyes.common import resource_api
from kiloeyes.openstack.common import log
NOTIFICATION_METHOD_OPTS = [
cfg.StrOpt('doc_type', default='notificationmethods',
help='The doc type that notification_methods '
'will be saved into.'),
cfg.StrOpt('index_strategy', default='fixed',
help='The index strategy used to create index name.'),
cfg.StrOpt('index_prefix', default='data_',
help='The index prefix where metrics were saved to.'),
cfg.IntOpt('size', default=10000,
help=('The query result limit. Any result set more than '
'the limit will be discarded. To see all the matching '
'result, narrow your search by using a small time '
'window or strong matching name')),
]
cfg.CONF.register_opts(NOTIFICATION_METHOD_OPTS, group="notificationmethods")
LOG = log.getLogger(__name__)
class ParamUtil(object):
@staticmethod
def validateEmail(addr):
if len(addr) > 7:
if (re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\."
"([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", addr)
is not None):
return True
return False
@staticmethod
def name(req):
# parse name from request
name = req.get_param('name')
if name and name.strip():
return name
else:
return "DefaultNotificationMethods"
@staticmethod
def type_address(req):
# parse notification type from request
# Default is EMAIL
type = req.get_param('type')
address = req.get_param('address')
# Currently, notification method types of email,
# PagerDuty and webhooks are supported.
# In the case of email, the address is the email address.
# For PagerDuty, the address is the PagerDuty Service API Key.
# For webhook, the address is the URL of the webhook.
if (type and type.strip() == 'EMAIL'
and address and address.strip()
and ParamUtil.validateEmail(address.strip())):
return ("EMAIL", address.strip())
elif (type and type.strip() == 'PAGEDUTY'
and address and address.strip()):
return ("PAGEDUTY", address.strip())
elif (type and type.strip() == 'WEBHOOK'
and address and address.strip()):
return ("WEBHOOK", address.strip())
else:
return None
class NotificationMethodDispatcher(object):
def __init__(self, global_conf):
LOG.debug('initializing V2API in NotificationMethodDispatcher!')
super(NotificationMethodDispatcher, self).__init__()
self.doc_type = cfg.CONF.notificationmethods.doc_type
self.size = cfg.CONF.notificationmethods.size
# load index strategy
if cfg.CONF.notificationmethods.index_strategy:
self.index_strategy = driver.DriverManager(
namespace.STRATEGY_NS,
cfg.CONF.notificationmethods.index_strategy,
invoke_on_load=True,
invoke_kwds={}).driver
LOG.debug(dir(self.index_strategy))
else:
self.index_strategy = None
self.index_prefix = cfg.CONF.notificationmethods.index_prefix
self._es_conn = es_conn.ESConnection(
self.doc_type, self.index_strategy, self.index_prefix)
def post_data(self, req, res):
LOG.debug('In NotificationMethodDispatcher::post_data.')
msg = req.stream.read()
# convert msg to dict
dict_msg = ast.literal_eval(msg)
# random uuid used for store the methods in database
id = str(uuid.uuid4())
# add an id to store in elasticsearch
dict_msg["id"] = id
# add an item "request" in the msg to tell
# the receiver this is a POST request
# The final msg is something like:
# {"id":"c60ec47e-5038-4bf1-9f95-4046c6e9a759",
# "request":"POST",
# "name":"TheName",
# "type":"TheType",
# "Address":"TheAddress"}
dict_msg["request"] = "POST"
LOG.debug("post notification method: %s" % dict_msg)
code = self.handle_notification_msg(dict_msg)
res.status = getattr(falcon, 'HTTP_' + str(code))
def put_data(self, req, res, id):
LOG.debug('In NotificationMethodDispatcher::put_data.')
msg = req.stream.read()
dict_msg = ast.literal_eval(msg)
# specify the id to match in elasticsearch for update
dict_msg["id"] = id
# add an item "request" in the msg to tell the receiver this is a PUT
# request
dict_msg["request"] = "PUT"
LOG.debug("put notification method: %s" % dict_msg)
code = self.handle_notification_msg(dict_msg)
res.status = getattr(falcon, 'HTTP_' + str(code))
def del_data(self, req, res, id):
LOG.debug('In NotificationMethodDispatcher::del_data.')
dict_msg = {}
# specify the id to match in elasticsearch for deletion
dict_msg["id"] = id
# add an item "request" in the msg to tell the receiver this is a DEL
# request
dict_msg["request"] = "DEL"
LOG.debug("delete notification method: %s" % dict_msg)
code = self.handle_notification_msg(dict_msg)
res.status = getattr(falcon, 'HTTP_' + str(code))
def _get_notification_method_response(self, res):
if res and res.status_code == 200:
obj = res.json()
if obj:
return obj.get('hits')
return None
else:
return None
def handle_notification_msg(self, dict_msg):
# dict_msg's format is:
# {"id":"c60ec47e-5038-4bf1-9f95-4046c6e9a759",
# "request":"POST",
# "name":"TheName",
# "type":"TheType",
# "Address":"TheAddress"}
# We add the POS/PUT/DEL in the message to indicate the request
# type
# Get the notification id from the message,
# this id will be used as _id for elasticsearch,
# and also stored as id in the notification_methods document
# type
# convert to dict, pop request, and get id
# after request is removed, the dict can be converted to
# request body for elasticsearch
request_type = dict_msg.pop("request", None)
id = dict_msg["id"]
if request_type is not None and id is not None:
# post
if request_type == 'POST':
return self._es_conn.post_messages(json.dumps(dict_msg), id)
# put
if request_type == 'PUT':
return self._es_conn.put_messages(json.dumps(dict_msg), id)
# delete
if request_type == 'DEL':
return self._es_conn.del_messages(id)
@resource_api.Restify('/v2.0/notification-methods/', method='get')
def do_get_notification_methods(self, req, res):
LOG.debug("The notification_methods GET request is received!")
es_res = self._es_conn.get_messages({})
res.status = getattr(falcon, 'HTTP_%s' % es_res.status_code)
LOG.debug('Query to ElasticSearch returned: %s' % es_res.status_code)
es_res = self._get_notification_method_response(es_res)
LOG.debug('Query to ElasticSearch returned: %s' % es_res)
res_data = es_res["hits"]
if res_data:
def _make_body(elements):
yield '{"links": [{"rel": "self", "href":"'
yield req.uri + '"}],'
yield '"elements": ['
first = True
for element in elements:
if element['_source']:
if not first:
yield ','
else:
first = False
links = [{"rel": "self",
"href": req.uri + "/" +
element['_source']['id']}]
element['_source']['links'] = links
yield json.dumps(element['_source'])
yield ']}'
res.body = ''.join(_make_body(res_data))
else:
res.body = ''
res.content_type = 'application/json;charset=utf-8'
@resource_api.Restify('/v2.0/notification-methods/{id}', method='get')
def do_get_notification_method_by_id(self, req, res, id):
LOG.debug("The notification_methods GET by id request is received!")
es_res = self._es_conn.get_message_by_id(id)
res.status = getattr(falcon, 'HTTP_%s' % es_res.status_code)
LOG.debug('Query to ElasticSearch returned: %s' % es_res.status_code)
es_res = self._get_notification_method_response(es_res)
LOG.debug('Query to ElasticSearch returned: %s' % es_res)
if es_res and es_res.get('hits'):
res_data = es_res['hits'][0]
obj = res_data['_source']
obj['id'] = id
obj['links'] = [{"rel": "self",
"href": req.uri}]
res.body = json.dumps(obj)
res.content_type = 'application/json;charset=utf-8'
else:
res.body = ''
@resource_api.Restify('/v2.0/notification-methods/', method='post')
def do_post_notification_methods(self, req, res):
self.post_data(req, res)
@resource_api.Restify('/v2.0/notification-methods/{id}', method='put')
def do_put_notification_methods(self, req, res, id):
self.put_data(req, res, id)
@resource_api.Restify('/v2.0/notification-methods/{id}', method='delete')
def do_delete_notification_methods(self, req, res, id):
self.del_data(req, res, id)
|
{
"content_hash": "1ed0a663484da1036c96b71f5bc27094",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 77,
"avg_line_length": 36.1294964028777,
"alnum_prop": 0.5649143767423337,
"repo_name": "litong01/python-monasca",
"id": "7e58af8d9af4004365d573223a3ffa23c58599ae",
"size": "10636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kiloeyes/v2/elasticsearch/notificationmethods.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "413655"
},
{
"name": "Shell",
"bytes": "8277"
}
],
"symlink_target": ""
}
|
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from satchmo_utils.thumbnail.utils import make_admin_thumbnail
class AdminImageWithThumbnailWidget(forms.FileInput):
"""
A FileField Widget that shows its current image as a thumbnail if it has one.
"""
def __init__(self, attrs={}):
super(AdminImageWithThumbnailWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
output = []
if value and hasattr(value, "url"):
thumb = make_admin_thumbnail(value.url)
if not thumb:
thumb = value.url
output.append('<img src="%s" /><br/>%s<br/> %s ' % \
(thumb, value.url, _('Change:')))
output.append(super(AdminImageWithThumbnailWidget, self).render(name, value, attrs))
return mark_safe(u''.join(output))
|
{
"content_hash": "d1b37b95d814d80f9cc39d0a00bce51e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 92,
"avg_line_length": 41.73913043478261,
"alnum_prop": 0.6416666666666667,
"repo_name": "grengojbo/satchmo",
"id": "db2ade3dd439425d18e7fa6de43b49f880f04c32",
"size": "960",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "satchmo/apps/satchmo_utils/thumbnail/widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "73898"
},
{
"name": "Python",
"bytes": "1752948"
}
],
"symlink_target": ""
}
|
"""
Management class for basic VM operations.
"""
import os
import uuid
from nova.api.metadata import base as instance_metadata
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import configdrive
from nova.virt.hyperv import baseops
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.StrOpt('vswitch_name',
default=None,
help='Default vSwitch Name, '
'if none provided first external is used'),
cfg.BoolOpt('limit_cpu_features',
default=False,
help='Required for live migration among '
'hosts with different CPU features'),
cfg.BoolOpt('config_drive_inject_password',
default=False,
help='Sets the admin password in the config drive image'),
cfg.StrOpt('qemu_img_cmd',
default="qemu-img.exe",
help='qemu-img is used to convert between '
'different image types'),
cfg.BoolOpt('config_drive_cdrom',
default=False,
help='Attaches the Config Drive image as a cdrom drive '
'instead of a disk drive')
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts)
CONF.import_opt('use_cow_images', 'nova.virt.driver')
class VMOps(baseops.BaseOps):
def __init__(self, volumeops):
super(VMOps, self).__init__()
self._vmutils = vmutils.VMUtils()
self._volumeops = volumeops
def list_instances(self):
"""Return the names of all the instances known to Hyper-V. """
vms = [v.ElementName
for v in self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
return vms
def get_info(self, instance):
"""Get information about the VM"""
LOG.debug(_("get_info called for instance"), instance=instance)
return self._get_info(instance['name'])
def _get_info(self, instance_name):
vm = self._vmutils.lookup(self._conn, instance_name)
if vm is None:
raise exception.InstanceNotFound(instance=instance_name)
vm = self._conn.Msvm_ComputerSystem(
ElementName=instance_name)[0]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')
settings_paths = [v.path_() for v in vmsettings]
#See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
summary_info = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
constants.VM_SUMMARY_MEMORY_USAGE,
constants.VM_SUMMARY_UPTIME],
settings_paths)[1]
info = summary_info[0]
LOG.debug(_("hyperv vm state: %s"), info.EnabledState)
state = constants.HYPERV_POWER_STATE[info.EnabledState]
memusage = str(info.MemoryUsage)
numprocs = str(info.NumberOfProcessors)
uptime = str(info.UpTime)
LOG.debug(_("Got Info for vm %(instance_name)s: state=%(state)d,"
" mem=%(memusage)s, num_cpu=%(numprocs)s,"
" uptime=%(uptime)s"), locals())
return {'state': state,
'max_mem': info.MemoryUsage,
'mem': info.MemoryUsage,
'num_cpu': info.NumberOfProcessors,
'cpu_time': info.UpTime}
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
"""Create a new VM and start it."""
vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is not None:
raise exception.InstanceExists(name=instance['name'])
ebs_root = self._volumeops.volume_in_mapping(
self._volumeops.get_default_root_device(),
block_device_info)
#If is not a boot from volume spawn
if not (ebs_root):
#Fetch the file, assume it is a VHD file.
vhdfile = self._vmutils.get_vhd_path(instance['name'])
try:
self._cache_image(fn=self._vmutils.fetch_image,
context=context,
target=vhdfile,
fname=instance['image_ref'],
image_id=instance['image_ref'],
user=instance['user_id'],
project=instance['project_id'],
cow=CONF.use_cow_images)
except Exception as exn:
LOG.exception(_('cache image failed: %s'), exn)
self.destroy(instance)
try:
self._create_vm(instance)
if not ebs_root:
self._attach_ide_drive(instance['name'], vhdfile, 0, 0,
constants.IDE_DISK)
else:
self._volumeops.attach_boot_volume(block_device_info,
instance['name'])
#A SCSI controller for volumes connection is created
self._create_scsi_controller(instance['name'])
for vif in network_info:
mac_address = vif['address'].replace(':', '')
self._create_nic(instance['name'], mac_address)
if configdrive.required_by(instance):
self._create_config_drive(instance, injected_files,
admin_password)
LOG.debug(_('Starting VM %s '), instance['name'])
self._set_vm_state(instance['name'], 'Enabled')
LOG.info(_('Started VM %s '), instance['name'])
except Exception as exn:
LOG.exception(_('spawn vm failed: %s'), exn)
self.destroy(instance)
raise exn
def _create_config_drive(self, instance, injected_files, admin_password):
if CONF.config_drive_format != 'iso9660':
vmutils.HyperVException(_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
LOG.info(_('Using config drive'), instance=instance)
extra_md = {}
if admin_password and CONF.config_drive_inject_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files, extra_md=extra_md)
instance_path = self._vmutils.get_instance_path(
instance['name'])
configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path_iso}, instance=instance)
with configdrive.config_drive_helper(instance_md=inst_md) as cdb:
try:
cdb.make_drive(configdrive_path_iso)
except exception.ProcessExecutionError, e:
LOG.error(_('Creating config drive failed with error: %s'),
e, instance=instance)
raise
if not CONF.config_drive_cdrom:
drive_type = constants.IDE_DISK
configdrive_path = os.path.join(instance_path,
'configdrive.vhd')
utils.execute(CONF.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
configdrive_path_iso,
configdrive_path,
attempts=1)
os.remove(configdrive_path_iso)
else:
drive_type = constants.IDE_DVD
configdrive_path = configdrive_path_iso
self._attach_ide_drive(instance['name'], configdrive_path, 1, 0,
drive_type)
def _create_vm(self, instance):
"""Create a VM but don't start it. """
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = instance["name"]
(job, ret_val) = vs_man_svc.DefineVirtualSystem(
[], None, vs_gs_data.GetText_(1))[1:]
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
else:
success = (ret_val == 0)
if not success:
raise vmutils.HyperVException(_('Failed to create VM %s') %
instance["name"])
LOG.debug(_('Created VM %s...'), instance["name"])
vm = self._conn.Msvm_ComputerSystem(ElementName=instance["name"])[0]
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
vmsetting = [s for s in vmsettings
if s.SettingType == 3][0] # avoid snapshots
memsetting = vmsetting.associators(
wmi_result_class='Msvm_MemorySettingData')[0]
#No Dynamic Memory, so reservation, limit and quantity are identical.
mem = long(str(instance['memory_mb']))
memsetting.VirtualQuantity = mem
memsetting.Reservation = mem
memsetting.Limit = mem
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [memsetting.GetText_(1)])
LOG.debug(_('Set memory for vm %s...'), instance["name"])
procsetting = vmsetting.associators(
wmi_result_class='Msvm_ProcessorSettingData')[0]
vcpus = long(instance['vcpus'])
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
if CONF.limit_cpu_features:
procsetting.LimitProcessorFeatures = True
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [procsetting.GetText_(1)])
LOG.debug(_('Set vcpus for vm %s...'), instance["name"])
def _create_scsi_controller(self, vm_name):
"""Create an iscsi controller ready to mount volumes """
LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume '
'attaching') % locals())
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
scsicontrldefault = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType = 'Microsoft Synthetic SCSI Controller'\
AND InstanceID LIKE '%Default%'")[0]
if scsicontrldefault is None:
raise vmutils.HyperVException(_('Controller not found'))
scsicontrl = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_ResourceAllocationSettingData', scsicontrldefault)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
scsiresource = self._vmutils.add_virt_resource(self._conn,
scsicontrl, vm)
if scsiresource is None:
raise vmutils.HyperVException(
_('Failed to add scsi controller to VM %s') %
vm_name)
def _get_ide_controller(self, vm, ctrller_addr):
#Find the IDE controller for the vm.
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
ctrller = [r for r in rasds
if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
and r.Address == str(ctrller_addr)]
return ctrller
def _attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.IDE_DISK):
"""Create an IDE drive and attach it to the vm"""
LOG.debug(_('Creating disk for %(vm_name)s by attaching'
' disk file %(path)s') % locals())
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
ctrller = self._get_ide_controller(vm, ctrller_addr)
if drive_type == constants.IDE_DISK:
resSubType = 'Microsoft Synthetic Disk Drive'
elif drive_type == constants.IDE_DVD:
resSubType = 'Microsoft Synthetic DVD Drive'
#Find the default disk drive object for the vm and clone it.
drivedflt = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE '%(resSubType)s'\
AND InstanceID LIKE '%%Default%%'" % locals())[0]
drive = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_ResourceAllocationSettingData', drivedflt)
#Set the IDE ctrller as parent.
drive.Parent = ctrller[0].path_()
drive.Address = drive_addr
#Add the cloned disk drive object to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn,
drive, vm)
if new_resources is None:
raise vmutils.HyperVException(
_('Failed to add drive to VM %s') %
vm_name)
drive_path = new_resources[0]
LOG.debug(_('New %(drive_type)s drive path is %(drive_path)s') %
locals())
if drive_type == constants.IDE_DISK:
resSubType = 'Microsoft Virtual Hard Disk'
elif drive_type == constants.IDE_DVD:
resSubType = 'Microsoft Virtual CD/DVD Disk'
#Find the default VHD disk object.
drivedefault = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE '%(resSubType)s' AND \
InstanceID LIKE '%%Default%%' " % locals())[0]
#Clone the default and point it to the image file.
res = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_ResourceAllocationSettingData', drivedefault)
#Set the new drive as the parent.
res.Parent = drive_path
res.Connection = [path]
#Add the new vhd object as a virtual hard disk to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn, res, vm)
if new_resources is None:
raise vmutils.HyperVException(
_('Failed to add %(drive_type)s image to VM %(vm_name)s') %
locals())
LOG.info(_('Created drive type %(drive_type)s for %(vm_name)s') %
locals())
def _create_nic(self, vm_name, mac):
"""Create a (synthetic) nic and attach it to the vm"""
LOG.debug(_('Creating nic for %s '), vm_name)
#Find the vswitch that is connected to the physical nic.
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
extswitch = self._find_external_network()
if extswitch is None:
raise vmutils.HyperVException(_('Cannot find vSwitch'))
vm = vms[0]
switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
#Find the default nic and clone it to create a new nic for the vm.
#Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with
#Linux Integration Components installed.
syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData()
default_nic_data = [n for n in syntheticnics_data
if n.InstanceID.rfind('Default') > 0]
new_nic_data = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_SyntheticEthernetPortSettingData',
default_nic_data[0])
#Create a port on the vswitch.
(new_port, ret_val) = switch_svc.CreateSwitchPort(
Name=str(uuid.uuid4()),
FriendlyName=vm_name,
ScopeOfResidence="",
VirtualSwitch=extswitch.path_())
if ret_val != 0:
LOG.error(_('Failed creating a port on the external vswitch'))
raise vmutils.HyperVException(_('Failed creating port for %s') %
vm_name)
ext_path = extswitch.path_()
LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
% locals())
#Connect the new nic to the new port.
new_nic_data.Connection = [new_port]
new_nic_data.ElementName = vm_name + ' nic'
new_nic_data.Address = mac
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
#Add the new nic to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn,
new_nic_data, vm)
if new_resources is None:
raise vmutils.HyperVException(_('Failed to add nic to VM %s') %
vm_name)
LOG.info(_("Created nic for %s "), vm_name)
def _find_external_network(self):
"""Find the vswitch that is connected to the physical nic.
Assumes only one physical nic on the host
"""
#If there are no physical nics connected to networks, return.
LOG.debug(_("Attempting to bind NIC to %s ")
% CONF.vswitch_name)
if CONF.vswitch_name:
LOG.debug(_("Attempting to bind NIC to %s ")
% CONF.vswitch_name)
bound = self._conn.Msvm_VirtualSwitch(
ElementName=CONF.vswitch_name)
else:
LOG.debug(_("No vSwitch specified, attaching to default"))
self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
if len(bound) == 0:
return None
if CONF.vswitch_name:
return self._conn.Msvm_VirtualSwitch(
ElementName=CONF.vswitch_name)[0]\
.associators(wmi_result_class='Msvm_SwitchPort')[0]\
.associators(wmi_result_class='Msvm_VirtualSwitch')[0]
else:
return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
.associators(wmi_result_class='Msvm_SwitchPort')[0]\
.associators(wmi_result_class='Msvm_VirtualSwitch')[0]
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is None:
raise exception.InstanceNotFound(instance_id=instance["id"])
self._set_vm_state(instance['name'], 'Reboot')
def destroy(self, instance, network_info=None, cleanup=True,
destroy_disks=True):
"""Destroy the VM. Also destroy the associated VHD disk files"""
LOG.debug(_("Got request to destroy vm %s"), instance['name'])
vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is None:
return
vm = self._conn.Msvm_ComputerSystem(ElementName=instance['name'])[0]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
#Stop the VM first.
self._set_vm_state(instance['name'], 'Disabled')
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
disks = [r for r in rasds
if r.ResourceSubType == 'Microsoft Virtual Hard Disk']
disk_files = []
volumes = [r for r in rasds
if r.ResourceSubType == 'Microsoft Physical Disk Drive']
volumes_drives_list = []
#collect the volumes information before destroying the VM.
for volume in volumes:
hostResources = volume.HostResource
drive_path = hostResources[0]
#Appending the Msvm_Disk path
volumes_drives_list.append(drive_path)
#Collect disk file information before destroying the VM.
for disk in disks:
disk_files.extend([c for c in disk.Connection])
#Nuke the VM. Does not destroy disks.
(job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
elif ret_val == 0:
success = True
if not success:
raise vmutils.HyperVException(_('Failed to destroy vm %s') %
instance['name'])
if destroy_disks:
#Disconnect volumes
for volume_drive in volumes_drives_list:
self._volumeops.disconnect_volume(volume_drive)
#Delete associated vhd disk files.
for disk in disk_files:
vhdfile = self._conn_cimv2.query(
"Select * from CIM_DataFile where Name = '" +
disk.replace("'", "''") + "'")[0]
LOG.debug(_("Del: disk %(vhdfile)s vm %(name)s")
% {'vhdfile': vhdfile, 'name': instance['name']})
vhdfile.Delete()
def pause(self, instance):
"""Pause VM instance."""
LOG.debug(_("Pause instance"), instance=instance)
self._set_vm_state(instance["name"], 'Paused')
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug(_("Unpause instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def suspend(self, instance):
"""Suspend the specified instance."""
print instance
LOG.debug(_("Suspend instance"), instance=instance)
self._set_vm_state(instance["name"], 'Suspended')
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug(_("Resume instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def power_off(self, instance):
"""Power off the specified instance."""
LOG.debug(_("Power off instance"), instance=instance)
self._set_vm_state(instance["name"], 'Disabled')
def power_on(self, instance):
"""Power on the specified instance"""
LOG.debug(_("Power on instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def _set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM"""
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
if len(vms) == 0:
return False
(job, ret_val) = vms[0].RequestStateChange(
constants.REQ_POWER_STATE[req_state])
success = False
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
elif ret_val == 0:
success = True
elif ret_val == 32775:
#Invalid state for current operation. Typically means it is
#already in the state requested
success = True
if success:
LOG.info(_("Successfully changed vm state of %(vm_name)s"
" to %(req_state)s") % locals())
else:
msg = _("Failed to change vm state of %(vm_name)s"
" to %(req_state)s") % locals()
LOG.error(msg)
raise vmutils.HyperVException(msg)
def _cache_image(self, fn, target, fname, cow=False, Size=None,
*args, **kwargs):
"""Wrapper for a method that creates an image that caches the image.
This wrapper will save the image into a common store and create a
copy for use by the hypervisor.
The underlying method should specify a kwarg of target representing
where the image will be saved.
fname is used as the filename of the base image. The filename needs
to be unique to a given image.
If cow is True, it will make a CoW image instead of a copy.
"""
@lockutils.synchronized(fname, 'nova-')
def call_if_not_exists(path, fn, *args, **kwargs):
if not os.path.exists(path):
fn(target=path, *args, **kwargs)
if not os.path.exists(target):
LOG.debug(_("use_cow_image:%s"), cow)
if cow:
base = self._vmutils.get_base_vhd_path(fname)
call_if_not_exists(base, fn, *args, **kwargs)
image_service = self._conn.query(
"Select * from Msvm_ImageManagementService")[0]
(job, ret_val) = \
image_service.CreateDifferencingVirtualHardDisk(
Path=target, ParentPath=base)
LOG.debug(
"Creating difference disk: JobID=%s, Source=%s, Target=%s",
job, base, target)
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
else:
success = (ret_val == 0)
if not success:
raise vmutils.HyperVException(
_('Failed to create Difference Disk from '
'%(base)s to %(target)s') % locals())
else:
call_if_not_exists(target, fn, *args, **kwargs)
|
{
"content_hash": "a9b9d37548906adca0b9084b651fbee7",
"timestamp": "",
"source": "github",
"line_count": 587,
"max_line_length": 79,
"avg_line_length": 43.262350936967636,
"alnum_prop": 0.5713329395550305,
"repo_name": "fajoy/nova",
"id": "43c6e6af538b0373cce4ef21e7ea0dbc22c0da65",
"size": "26079",
"binary": false,
"copies": "1",
"ref": "refs/heads/grizzly-2",
"path": "nova/virt/hyperv/vmops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7567423"
},
{
"name": "Shell",
"bytes": "15428"
}
],
"symlink_target": ""
}
|
__author__ == "ttn6ew"
import csv
import psycopg2
PG_USER = "postgres"
PG_USER_PASS = "cs3240"
PG_HOST_INFO = ""
def load_course_database(db_name, csv_filename):
conn = psycopg2.connect("dbname=" + db_name + " user=" + PG_USER + " password=" + PG_USER_PASS + PG_HOST_INFO)
print("** Connected to database.")
cur = conn.cursor()
with open(csv_filename, 'rU') as data:
reader = csv.reader(data)
for row in reader:
cur.execute("INSERT INTO coursedata (deptID, courseNum, semester, meetingType, seatsTaken, "
"seatsOffered, instructor) VALUES (%s, %s, %s, %s, %s, %s, %s)", (row[0], row[1], row[2],
row[3], row[4], row[5],
row[6]))
print("** Executed SQL INSERT into database.")
if __name__ == "__main__":
load_course_database("coursedata", "seas-courses-5years.csv")
|
{
"content_hash": "c9a3eafa255f1bb3d7d690b8c036efaf",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 117,
"avg_line_length": 40.23076923076923,
"alnum_prop": 0.48565965583174,
"repo_name": "ttn6ew/cs3240-labdemo",
"id": "c8bb2a532b9a9c848a5af4e3be1fd6149fc48fc6",
"size": "1046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "course_reader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1210"
}
],
"symlink_target": ""
}
|
import numpy, theano, unittest
from theano.compile.pfunc import pfunc
from theano.compile.sharedvalue import shared
from theano import tensor
from theano.tensor.nnet import sigmoid
class NNet(object):
def __init__(self,
input=tensor.dvector('input'),
target=tensor.dvector('target'),
n_input=1, n_hidden=1, n_output=1, lr=1e-3, **kw):
super(NNet, self).__init__(**kw)
self.input = input
self.target = target
self.lr = shared(lr, 'learning_rate')
self.w1 = shared(numpy.zeros((n_hidden, n_input)), 'w1')
self.w2 = shared(numpy.zeros((n_output, n_hidden)), 'w2')
# print self.lr.type
self.hidden = sigmoid(tensor.dot(self.w1, self.input))
self.output = tensor.dot(self.w2, self.hidden)
self.cost = tensor.sum((self.output - self.target)**2)
self.sgd_updates = {
self.w1: self.w1 - self.lr * tensor.grad(self.cost, self.w1),
self.w2: self.w2 - self.lr * tensor.grad(self.cost, self.w2)}
self.sgd_step = pfunc(
params=[self.input, self.target],
outputs=[self.output, self.cost],
updates=self.sgd_updates)
self.compute_output = pfunc([self.input], self.output)
self.output_from_hidden = pfunc([self.hidden], self.output)
class TestNnet(unittest.TestCase):
def test_nnet(self):
rng = numpy.random.RandomState(1827)
data = rng.rand(10, 4)
nnet = NNet(n_input=3, n_hidden=10)
for epoch in range(3):
mean_cost = 0
for x in data:
input = x[0:3]
target = x[3:]
output, cost = nnet.sgd_step(input, target)
mean_cost += cost
mean_cost /= float(len(data))
# print 'Mean cost at epoch %s: %s' % (epoch, mean_cost)
self.assertTrue(abs(mean_cost - 0.20588975452) < 1e-6)
# Just call functions to make sure they do not crash.
out = nnet.compute_output(input)
out = nnet.output_from_hidden(numpy.ones(10))
|
{
"content_hash": "aeeda68cdc9d78c20645855fc8a59c34",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 81,
"avg_line_length": 35.35,
"alnum_prop": 0.5695426685525695,
"repo_name": "rizar/attention-lvcsr",
"id": "dbbb23371117d77f98235768307340cdecf7c847",
"size": "2121",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "libs/Theano/theano/compile/tests/test_misc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1288"
},
{
"name": "C",
"bytes": "156742"
},
{
"name": "C++",
"bytes": "209135"
},
{
"name": "CSS",
"bytes": "3500"
},
{
"name": "Cuda",
"bytes": "231732"
},
{
"name": "Gnuplot",
"bytes": "484"
},
{
"name": "HTML",
"bytes": "33356"
},
{
"name": "Jupyter Notebook",
"bytes": "191071"
},
{
"name": "Makefile",
"bytes": "973"
},
{
"name": "Python",
"bytes": "9313243"
},
{
"name": "Shell",
"bytes": "34454"
},
{
"name": "TeX",
"bytes": "102624"
}
],
"symlink_target": ""
}
|
"""Network client for reading (and eventually writing) parameters."""
from makani.avionics.common import aio
from makani.avionics.common import pack_avionics_messages
from makani.avionics.firmware.params import codec
SECTION_CONFIG = pack_avionics_messages.kParamSectionConfig
SECTION_CALIB = pack_avionics_messages.kParamSectionCalib
SECTION_SERIAL = pack_avionics_messages.kParamSectionSerial
SECTION_CARRIER_SERIAL = pack_avionics_messages.kParamSectionCarrierSerial
class Client(object):
"""Network client for reading (and eventually writing) parameters."""
def __init__(self, timeout=None):
self.aio_client = aio.AioClient(['kMessageTypeParamRequest',
'kMessageTypeParamResponse'],
timeout=timeout)
def _SendBlockRequest(self, node_id, section, offset):
"""Fill out and send a ParamRequestMessage."""
request = pack_avionics_messages.ParamRequestMessage()
request.node_id = node_id
request.section = section
request.offset = offset
self.aio_client.Send(request, 'kMessageTypeParamRequest',
'kAioNodeOperator')
def _GetBlock(self, node_id, section, data, offset):
"""Query a node for a block of parameters from the specified section."""
self._SendBlockRequest(node_id, section, offset)
while True:
(_, _, msg) = self.aio_client.Recv()
if isinstance(msg, pack_avionics_messages.ParamResponseMessage):
break
# TODO: Verify section, offset, and length.
if msg.length > 0:
data[offset:offset + msg.length] = msg.data[0:msg.length]
return msg.length
def GetSection(self, node_id, section):
"""Obtain parameters from the specified section in the node node_id.
Args:
node_id: AIO node number integer.
section: Parameter section identifier, e.g. SECTION_CALIB.
Returns:
A parameter object for the particular node.
Raises:
socket.timeout if a timeout was specified in the constructor and the
timeout was exceeded while querying parameters.
"""
offset = 0
data = bytearray(64 * 1024) # TODO: Define a max param size.
while offset < len(data):
length = self._GetBlock(node_id, section, data, offset)
offset += length
if length < 1024:
break
return codec.DecodeBin(data)
|
{
"content_hash": "e557f543a1c06f4fc60b04cafd498063",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 76,
"avg_line_length": 37.36507936507937,
"alnum_prop": 0.6856414613423959,
"repo_name": "google/makani",
"id": "4d190573b05e620cd7d45ef6b66872af3ed82a72",
"size": "2943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avionics/firmware/params/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "119408"
},
{
"name": "C",
"bytes": "20174258"
},
{
"name": "C++",
"bytes": "30512322"
},
{
"name": "CSS",
"bytes": "8921"
},
{
"name": "Dockerfile",
"bytes": "1381"
},
{
"name": "Emacs Lisp",
"bytes": "1134"
},
{
"name": "HTML",
"bytes": "65745"
},
{
"name": "Java",
"bytes": "1558475"
},
{
"name": "JavaScript",
"bytes": "130727"
},
{
"name": "Jupyter Notebook",
"bytes": "1154728"
},
{
"name": "MATLAB",
"bytes": "1026162"
},
{
"name": "Makefile",
"bytes": "2798"
},
{
"name": "Objective-C",
"bytes": "62972"
},
{
"name": "Perl",
"bytes": "870724"
},
{
"name": "Python",
"bytes": "5552781"
},
{
"name": "RPC",
"bytes": "195736"
},
{
"name": "Roff",
"bytes": "2567875"
},
{
"name": "SWIG",
"bytes": "8663"
},
{
"name": "Shell",
"bytes": "297941"
},
{
"name": "Starlark",
"bytes": "462998"
},
{
"name": "Vim Script",
"bytes": "2281"
},
{
"name": "XC",
"bytes": "50398"
},
{
"name": "XS",
"bytes": "49289"
}
],
"symlink_target": ""
}
|
__author__ = 'piratos'
from django import template, templatetags
register = template.Library()
@register.filter(name='url_it')
def url_it(value):
return value.replace(' ', '_')
|
{
"content_hash": "a479d31b0346ec266eccec19965eae25",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 41,
"avg_line_length": 25.857142857142858,
"alnum_prop": 0.6961325966850829,
"repo_name": "piratos/ctfbulletin",
"id": "c140a83ae00879ee919b93da2d36d4c3b1c0b526",
"size": "181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/templatetags/blog_filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2231"
},
{
"name": "JavaScript",
"bytes": "22972"
},
{
"name": "Python",
"bytes": "33522"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
# Examples:
url(r'^$', 'catcurator.views.home', name='home'),
)
|
{
"content_hash": "97c03b241efbbeddb496f8ecb36775c5",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 53,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6578947368421053,
"repo_name": "momamene/catcurator",
"id": "982d956c9f9656e24f184ed0cbc2e2d33a00774f",
"size": "152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catcurator/catcurator/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "673"
},
{
"name": "Python",
"bytes": "3077"
}
],
"symlink_target": ""
}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
###
import evaluation
import check_tests as ct
import features
###
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.metrics import roc_curve, auc
train = pd.read_csv('../data/training.csv', index_col='id')
subset = [1,2,3,4,5]
variables = train.columns[subset]
trained_model = features.main('rf', variables)
ct.agreement(trained_model, variables)
ct.correlation(trained_model, variables)
ct.weightedAuc(trained_model, variables, train)
|
{
"content_hash": "245307d13115f0d275fc1cad58e74334",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 26.904761904761905,
"alnum_prop": 0.7823008849557522,
"repo_name": "ZahidP/tasty-physics",
"id": "bf9352c311291330805c2167155eb243fa0c7cae",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/select_train_predict.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9084"
}
],
"symlink_target": ""
}
|
"""
=======================
eJWST Dummy Tap Handler
=======================
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
"""
from astropy.table import Table
import numpy as np
from astroquery.utils.tap.model.job import Job
class DummyTapHandler:
def __init__(self):
self.__invokedMethod = None
self.__parameters = {}
self.__dummy_results = {"filename": ["dummy_filename"],
"artifactid": ["dummy_artifact"],
"observationid": Table({'obs': np.arange(1)})}
self.__job = Job(async_job=False)
self.__job.set_results(self.__dummy_results)
def reset(self):
self.__parameters = {}
self.__invokedMethod = None
self.__dummy_results = {"filename": ["dummy_filename"],
"artifactid": ["dummy_artifact"],
"observationid": Table({'a': np.arange(1)})}
self.__job = Job(async_job=False)
self.__job.set_results(self.__dummy_results)
def set_job(self, job):
self.__job = job
def get_job(self):
return self.__job
def check_call(self, method_name, parameters):
self.check_method(method_name)
self.check_parameters(parameters, method_name)
def check_method(self, method):
if method == self.__invokedMethod:
return
else:
raise ValueError(f"Method '+{str(method)}" +
"' not invoked. (Invoked method is '" +
f"{str(self.__invokedMethod)}"+"')")
def check_parameters(self, parameters, method_name):
print("FOUND")
print(self.__parameters)
print("EXPECTED")
print(parameters)
if parameters is None:
return len(self.__parameters) == 0
if len(parameters) != len(self.__parameters):
raise ValueError(f"Wrong number of parameters "
f"for method '{method_name}'"
f" Found: {len(self.__parameters)}. "
f"Expected {len(parameters)}")
for key in parameters:
if key in self.__parameters:
# check value
if self.__parameters[key] != parameters[key]:
raise ValueError(f"Wrong {key} parameter value for "
f" method '{method_name}'. "
f"Found: {self.__parameters[key]}. "
f"Expected: {parameters[key]}")
else:
raise ValueError(f"Parameter '{str(key)}' not found "
f"for method '{method_name}'")
def load_tables(self, only_names=False, include_shared_tables=False,
verbose=False):
self.__invokedMethod = 'load_tables'
self.__parameters['only_names'] = only_names
self.__parameters['include_shared_tables'] = include_shared_tables
self.__parameters['verbose'] = verbose
return None
def load_table(self, table, verbose=False):
self.__invokedMethod = 'load_table'
self.__parameters['table'] = table
self.__parameters['verbose'] = verbose
return None
def launch_job(self, query, name=None, output_file=None,
output_format="votable", verbose=False, dump_to_file=False,
upload_resource=None, upload_table_name=None):
self.__invokedMethod = 'launch_job'
self.__parameters['query'] = query
self.__parameters['name'] = name
self.__parameters['output_file'] = output_file
self.__parameters['output_format'] = output_format
self.__parameters['verbose'] = verbose
self.__parameters['dump_to_file'] = dump_to_file
self.__parameters['upload_resource'] = upload_resource
self.__parameters['upload_table_name'] = upload_table_name
return self.__job
def launch_job_async(self, query, name=None, output_file=None,
output_format="votable", verbose=False,
dump_to_file=False, background=False,
upload_resource=None, upload_table_name=None):
self.__invokedMethod = 'launch_job_async'
self.__parameters['query'] = query
self.__parameters['name'] = name
self.__parameters['output_file'] = output_file
self.__parameters['output_format'] = output_format
self.__parameters['verbose'] = verbose
self.__parameters['dump_to_file'] = dump_to_file
self.__parameters['background'] = background
self.__parameters['upload_resource'] = upload_resource
self.__parameters['upload_table_name'] = upload_table_name
return self.__job
def load_async_job(self, jobid=None, name=None, verbose=False):
self.__invokedMethod = 'load_async_job'
self.__parameters['jobid'] = jobid
self.__parameters['name'] = name
self.__parameters['verbose'] = verbose
return None
def search_async_jobs(self, jobfilter=None, verbose=False):
self.__invokedMethod = 'search_async_jobs'
self.__parameters['jobfilter'] = jobfilter
self.__parameters['verbose'] = verbose
return None
def list_async_jobs(self, verbose=False):
self.__invokedMethod = 'list_async_jobs'
self.__parameters['verbose'] = verbose
return None
def query_object(self, coordinate, radius=None, width=None, height=None,
verbose=False):
self.__invokedMethod = 'query_object'
self.__parameters['coordinate'] = coordinate
self.__parameters['radius'] = radius
self.__parameters['width'] = width
self.__parameters['height'] = height
self.__parameters['verbose'] = verbose
return None
def query_object_async(self, coordinate, radius=None, width=None,
height=None, verbose=False):
self.__invokedMethod = 'query_object_async'
self.__parameters['coordinate'] = coordinate
self.__parameters['radius'] = radius
self.__parameters['width'] = width
self.__parameters['height'] = height
self.__parameters['verbose'] = verbose
return None
def query_region(self, coordinate, radius=None, width=None):
self.__invokedMethod = 'query_region'
self.__parameters['coordinate'] = coordinate
self.__parameters['radius'] = radius
self.__parameters['width'] = width
return None
def query_region_async(self, coordinate, radius=None, width=None):
self.__invokedMethod = 'query_region_async'
self.__parameters['coordinate'] = coordinate
self.__parameters['radius'] = radius
self.__parameters['width'] = width
return None
def get_images(self, coordinate):
self.__invokedMethod = 'get_images'
self.__parameters['coordinate'] = coordinate
return None
def get_images_async(self, coordinate):
self.__invokedMethod = 'get_images_sync'
self.__parameters['coordinate'] = coordinate
return None
def cone_search(self, coordinate, radius, output_file=None,
output_format="votable", verbose=False,
dump_to_file=False):
self.__invokedMethod = 'cone_search'
self.__parameters['coordinate'] = coordinate
self.__parameters['radius'] = radius
self.__parameters['output_file'] = output_file
self.__parameters['output_format'] = output_format
self.__parameters['verbose'] = verbose
self.__parameters['dump_to_file'] = dump_to_file
return None
def cone_search_async(self, coordinate, radius, background=False,
output_file=None, output_format="votable",
verbose=False, dump_to_file=False):
self.__invokedMethod = 'cone_search_async'
self.__parameters['coordinate'] = coordinate
self.__parameters['radius'] = radius
self.__parameters['background'] = background
self.__parameters['output_file'] = output_file
self.__parameters['output_format'] = output_format
self.__parameters['verbose'] = verbose
self.__parameters['dump_to_file'] = dump_to_file
return None
def remove_jobs(self, jobs_list, verbose=False):
self.__invokedMethod = 'remove_jobs'
self.__parameters['jobs_list'] = jobs_list
self.__parameters['verbose'] = verbose
return None
def save_results(self, job, verbose=False):
self.__invokedMethod = 'save_results'
self.__parameters['job'] = job
self.__parameters['verbose'] = verbose
return None
def login(self, user=None, password=None, credentials_file=None,
verbose=False):
self.__invokedMethod = 'login'
self.__parameters['user'] = user
self.__parameters['password'] = password
self.__parameters['credentials_file'] = credentials_file
self.__parameters['verbose'] = verbose
return None
def login_gui(self, verbose=False):
self.__invokedMethod = 'login_gui'
self.__parameters['verbose'] = verbose
return None
def logout(self, verbose=False):
self.__invokedMethod = 'logout'
self.__parameters['verbose'] = verbose
return None
def load_data(self, params_dict, output_file=None, verbose=False):
self.__invokedMethod = 'load_data'
self.__parameters['params_dict'] = params_dict
self.__parameters['output_file'] = output_file
self.__parameters['verbose'] = verbose
def set_job_results(self, results):
self.__dummy_results = results
self.__job.set_results(self.__dummy_results)
|
{
"content_hash": "61abc0aa36f1ab990f13c964516c3341",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 78,
"avg_line_length": 39.95564516129032,
"alnum_prop": 0.5747300433948935,
"repo_name": "imbasimba/astroquery",
"id": "d8e940cc7d6894204e246e86ba4a430c795ee365",
"size": "9973",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astroquery/esa/jwst/tests/DummyTapHandler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "493404"
},
{
"name": "Python",
"bytes": "2852847"
}
],
"symlink_target": ""
}
|
"""
Contains general node and attribute functions, as well as the main `PyNode` base class.
For the rest of the class hierarchy, including `DependNode <pymel.core.nodetypes.DependNode>`, `Transform <pymel.core.nodetypes.Transform>`,
and `Attribute <pymel.core.nodetypes.Attribute>`, see :mod:`pymel.core.nodetypes`.
"""
from __future__ import with_statement
import sys
import os
import re
import itertools
import inspect
import pymel.internal.pmcmds as cmds
import pymel.util as _util
import pymel.internal.factories as _factories
import pymel.internal.pwarnings as _warnings
import pymel.internal.startup as _startup
import pymel.api as _api
import pymel.versions as _versions
import pymel.core.system as system
import datatypes
from maya.cmds import about as _about
from pymel.internal import getLogger as _getLogger
_logger = _getLogger(__name__)
# TODO: factories.functionFactory should automatically handle conversion of output to PyNodes...
# ...so we shouldn't always have to do it here as well?
# Get config settings for checking if an attribute is referenced before changing the lock state
CHECK_ATTR_BEFORE_LOCK = _startup.pymel_options.get('check_attr_before_lock', False)
def _getPymelTypeFromObject(obj, name):
if obj.hasFn(_api.MFn.kDependencyNode):
fnDepend = _api.MFnDependencyNode(obj)
mayaType = fnDepend.typeName()
import nodetypes
# make sure that if we have a dag node, we return at least DagNode
# instead of DependNode - otherwise, we will end up with
# __apiobjects__ = {'MDagPath':MDagPath(...)}, but a pymel type of
# DependNode... and DependNode.__apihandle__() always assumes that
# MObjectHandle is always in __apiobjects__
pymelTypeName = nodetypes.mayaTypeNameToPymelTypeName.get(
mayaType,
'DagNode' if obj.hasFn(_api.MFn.kDagNode) else 'DependNode')
pymelType = getattr(nodetypes, pymelTypeName)
pymelType = _factories.virtualClasses.getVirtualClass(pymelType, obj, name, fnDepend)
elif obj.hasFn(_api.MFn.kComponent):
compTypes = _factories.apiEnumsToPyComponents.get(obj.apiType(), None)
if compTypes is None:
_logger.raiseLog(_logger.DEBUG, 'Got an instance of a component which could not be mapped to a pymel class: %s' % obj.apiTypeStr())
compTypes = [Component]
if len(compTypes) != 1:
_logger.raiseLog(_logger.WARNING, 'Got an instance of a component with more than one possible PyNode type: %s' % obj.apiTypeStr())
pymelType = compTypes[0]
elif obj.hasFn(_api.MFn.kAttribute):
pymelType = AttributeDefaults
else:
raise RuntimeError('Could not determine pymel type for object of type %s' % obj.apiTypeStr())
return pymelType
def _getPymelType(arg, name):
""" Get the correct Pymel Type for an object that can be a MObject, PyNode or name of an existing Maya object,
if no correct type is found returns DependNode by default.
If the name of an existing object is passed, the name and MObject will be returned
If a valid MObject is passed, the name will be returned as None
If a PyNode instance is passed, its name and MObject will be returned
"""
obj = None
results = {}
isAttribute = False
#--------------------------
# API object testing
#--------------------------
if isinstance(arg, _api.MObject):
results['MObjectHandle'] = _api.MObjectHandle(arg)
obj = arg
elif isinstance(arg, _api.MObjectHandle):
results['MObjectHandle'] = arg
obj = arg.object()
elif isinstance(arg, _api.MDagPath):
results['MDagPath'] = arg
obj = arg.node()
elif isinstance(arg, _api.MPlug):
isAttribute = True
obj = arg
results['MPlug'] = obj
if _api.isValidMPlug(arg):
pymelType = Attribute
else:
raise MayaAttributeError, "Unable to determine Pymel type: the passed MPlug is not valid"
# #---------------------------------
# # No Api Object : Virtual PyNode
# #---------------------------------
# elif objName :
# # non existing node
# pymelType = DependNode
# if '.' in objName :
# # TODO : some better checking / parsing
# pymelType = Attribute
else:
raise ValueError("Unable to determine Pymel type for %r" % (arg,))
if not isAttribute:
pymelType = _getPymelTypeFromObject(obj, name)
return pymelType, results
#-----------------------------------------------
# Enhanced Commands
#-----------------------------------------------
# TODO: possible bugfix for 'parent'?
# Docs state 'If there is only a single object specified then the selected objects are parented to that object. '
# ...but actual behavior is to parent the named object (and any other selected objects) to the last selected object
#-----------------------
# Object Manipulation
#-----------------------
def select(*args, **kwargs):
"""
Modifications:
- passing an empty list no longer causes an error.
instead, the selection is cleared if the selection mod is replace (the default);
otherwise, it does nothing
"""
try:
cmds.select(*args, **kwargs)
except TypeError, msg:
if args == ([],):
for modeFlag in ('add', 'af', 'addFirst',
'd', 'deselect',
'tgl', 'toggle'):
if kwargs.get(modeFlag, False):
return
# The mode is replace, clear the selection
cmds.select(cl=True)
else:
raise TypeError, msg
#select.__doc__ = mel.help('select') + select.__doc__
# TODO: make it handle multiple objects, like original command
def move(*args, **kwargs):
"""
Modifications:
- allows any iterable object to be passed as first argument::
move("pSphere1", [0,1,2])
NOTE: this command also reorders the argument order to be more intuitive, with the object first
"""
obj = None
if args and isinstance(args[0], (basestring, PyNode)):
obj = args[0]
args = args[1:]
if len(args) == 1 and _util.isIterable(args[0]):
args = tuple(args[0])
if obj is not None:
args = args + (obj,)
return cmds.move(*args, **kwargs)
def scale(obj, *args, **kwargs):
"""
Modifications:
- allows any iterable object to be passed as first argument::
scale("pSphere1", [0,1,2])
NOTE: this command also reorders the argument order to be more intuitive, with the object first
"""
if len(args) == 1 and _util.isIterable(args[0]):
args = tuple(args[0])
args = args + (obj,)
return cmds.scale(*args, **kwargs)
def rotate(obj, *args, **kwargs):
"""
Modifications:
- allows any iterable object to be passed as first argument::
rotate("pSphere1", [0,1,2])
NOTE: this command also reorders the argument order to be more intuitive, with the object first
"""
if len(args) == 1 and _util.isIterable(args[0]):
args = tuple(args[0])
args = args + (obj,)
return cmds.rotate(*args, **kwargs)
#-----------------------
# Attributes
#-----------------------
def connectAttr(source, destination, **kwargs):
"""
Maya Bug Fix:
- even with the 'force' flag enabled, the command would raise an error if the connection already existed.
"""
if kwargs.get('force', False) or kwargs.get('f', False):
try:
cmds.connectAttr(source, destination, **kwargs)
except RuntimeError, e:
if str(e) != 'Maya command error':
# we only want to pass on a certain connection error. all others we re-raise
raise e
else:
cmds.connectAttr(source, destination, **kwargs)
def disconnectAttr(source, destination=None, inputs=None, outputs=None,
**kwargs):
"""
Modifications:
- If no destination is passed, then all inputs will be disconnected if inputs
is True, and all outputs will be disconnected if outputs is True; if
neither are given (or both are None), both all inputs and all outputs
will be disconnected
"""
if destination:
if inputs:
raise ValueError('inputs/outputs flags may not be used in combination with a destination')
cmds.disconnectAttr(source, destination, **kwargs)
else:
disconnectionDirs = []
if inputs is None and outputs is None:
inputs = True
outputs = True
if inputs:
disconnectionDirs.append('inputs')
if outputs:
disconnectionDirs.append('outputs')
for disconnectDir in disconnectionDirs:
disconnectingInputs = (disconnectDir == 'inputs')
connections = cmds.listConnections(source,
source=disconnectingInputs,
destination=(not disconnectingInputs),
connections=True,
plugs=True)
# stupid maya.cmds returns None instead of []...
if connections is None:
continue
# if disconnectingInputs, results from listConnections will be returned in dest, src order -
# reverse the list to flip this to src, dest
if disconnectingInputs:
connections.reverse()
for src, dest in _util.pairIter(connections):
cmds.disconnectAttr(src, dest, **kwargs)
def getAttr(attr, default=None, **kwargs):
"""
Maya Bug Fix:
- maya pointlessly returned vector results as a tuple wrapped in a list ( ex. '[(1,2,3)]' ). This command unpacks the vector for you.
Modifications:
- casts double3 datatypes to `Vector`
- casts matrix datatypes to `Matrix`
- casts vectorArrays from a flat array of floats to an array of Vectors
- when getting a multi-attr, maya would raise an error, but pymel will return a list of values for the multi-attr
- added a default argument. if the attribute does not exist and this argument is not None, this default value will be returned
- added support for getting message attributes
"""
def listToMat(l):
return datatypes.Matrix(
[[l[0], l[1], l[2], l[3]],
[l[4], l[5], l[6], l[7]],
[l[8], l[9], l[10], l[11]],
[l[12], l[13], l[14], l[15]]])
def listToVec(l):
vecRes = []
for i in range(0, len(res), 3):
vecRes.append(datatypes.Vector(res[i:i + 3]))
return vecRes
# stringify fix
if isinstance(attr, Attribute):
attr = attr.name(placeHolderIndices=False)
else:
attr = unicode(attr)
try:
res = cmds.getAttr(attr, **kwargs)
if isinstance(res, list) and len(res):
if isinstance(res[0], tuple):
typ = cmds.getAttr(attr, type=1)
if typ == 'pointArray':
return [datatypes.Point(x) for x in res]
elif typ == 'vectorArray':
return [datatypes.Vector(x) for x in res]
res = res[0]
if typ == 'double3':
return datatypes.Vector(list(res))
# elif cmds.getAttr( attr, type=1) == 'matrix':
# return listToMat(res)
else:
try:
return {
'matrix': listToMat,
'vectorArray': listToVec
}[cmds.getAttr(attr, type=1)](res)
except KeyError:
pass
return res
# perhaps it error'd because it's a mixed compound, or a multi attribute
except RuntimeError, e:
try:
pyattr = Attribute(attr)
# mixed compound takes precedence, because by default, compound attributes are returned by getAttr, but
# mixed compounds cannot be expressed in a mel array.
if pyattr.isCompound():
return [child.get() for child in pyattr.getChildren()]
elif pyattr.isMulti():
if pyattr.type() == 'message':
return pyattr.listConnections()
return [pyattr[i].get() for i in range(pyattr.numElements())]
# re-raise error
elif pyattr.type() == 'message':
connects = pyattr.listConnections()
if connects:
return connects[0]
else:
return None
raise
except AttributeError:
if default is not None:
return default
# raise original RuntimeError
raise e
class AmbiguityWarning(Warning):
pass
# getting and setting
def setAttr(attr, *args, **kwargs):
"""
Maya Bug Fix:
- setAttr did not work with type matrix.
Modifications:
- No need to set type, this will automatically be determined
- Adds support for passing a list or tuple as the second argument for datatypes such as double3.
- When setting stringArray datatype, you no longer need to prefix the list with the number of elements - just pass a list or tuple as with other arrays
- Added 'force' kwarg, which causes the attribute to be added if it does not exist.
- if no type flag is passed, the attribute type is based on type of value being set (if you want a float, be sure to format it as a float, e.g. 3.0 not 3)
- currently does not support compound attributes
- currently supported python-to-maya mappings:
============ ===========
python type maya type
============ ===========
float double
------------ -----------
int long
------------ -----------
str string
------------ -----------
bool bool
------------ -----------
Vector double3
------------ -----------
Matrix matrix
------------ -----------
[str] stringArray
============ ===========
>>> addAttr( 'persp', longName= 'testDoubleArray', dataType='doubleArray')
>>> setAttr( 'persp.testDoubleArray', [0,1,2])
>>> setAttr( 'defaultRenderGlobals.preMel', 'sfff')
- Added ability to set enum attributes using the string values; this may be
done either by setting the 'asString' kwarg to True, or simply supplying
a string value for an enum attribute.
"""
datatype = kwargs.get('type', kwargs.get('typ', None))
# if there is only one argument we do our special pymel tricks
if len(args) == 1:
arg = args[0]
# force flag
force = kwargs.pop('force', kwargs.pop('f', False))
# asString flag
asString = kwargs.pop('asString', None)
# vector, matrix, and arrays
if _util.isIterable(arg):
if datatype is None:
# if using force flag and the attribute does not exist
# we can infer the type from the passed value
#attr = Attribute(attr)
if force and not cmds.objExists(attr): # attr.exists():
import pymel.util.nameparse as nameparse
attrName = nameparse.parse(attr)
assert attrName.isAttributeName(), "passed object is not an attribute"
try:
if isinstance(arg[0], (basestring, _util.ProxyUnicode)):
datatype = 'stringArray'
elif isinstance(arg[0], (list, datatypes.Vector)):
datatype = 'vectorArray'
elif isinstance(arg[0], (list, datatypes.Point)):
datatype = 'pointArray'
elif isinstance(arg, datatypes.Vector):
datatype = 'double3'
elif isinstance(arg, datatypes.Matrix):
datatype = 'matrix'
elif isinstance(arg[0], int):
datatype = 'Int32Array'
elif isinstance(arg[0], float):
datatype = 'doubleArray'
if len(arg) == 3:
_logger.warn(
"The supplied value will be interperted as a 'doubleArray' and not as a 'double3' (vector). "
"Supply an explicit 'datatype' argument to avoid this warning.")
else:
raise ValueError, "pymel.core.setAttr: %s is not a supported type for use with the force flag" % type(arg[0])
#_logger.debug("adding %r as %r", attr, datatype)
addAttr(attrName.nodePath, ln=attrName.attribute, dt=datatype)
# empty array is being passed
# if the attribute exists, this is ok
except IndexError:
raise ValueError, "pymel.core.setAttr: when setting 'force' keyword to create a new array attribute, you must provide an array with at least one element"
except TypeError:
raise ValueError, "pymel.core.setAttr: %s is not a supported type" % type(args)
else:
if isinstance(arg, datatypes.Vector):
datatype = 'double3'
elif isinstance(arg, datatypes.Matrix):
datatype = 'matrix'
else:
datatype = getAttr(attr, type=1)
if not datatype:
datatype = addAttr(attr, q=1, dataType=1) # [0] # this is returned as a single element list
if datatype:
kwargs['type'] = datatype
try:
arg = arg.__melobject__()
except AttributeError:
pass
if datatype == 'stringArray':
# string arrays:
# first arg must be the length of the array being set
# ex:
# setAttr('loc.strArray',["first", "second", "third"] )
# becomes:
# cmds.setAttr('loc.strArray',3,"first", "second", "third",type='stringArray')
args = tuple([len(arg)] + arg)
elif datatype in ['vectorArray', 'pointArray']:
if _versions.current() < _versions.v2011:
# vector arrays:
# first arg must be the length of the array being set
# empty values are placed between vectors
# ex:
# setAttr('loc.vecArray',[1,2,3],[4,5,6],[7,8,9] )
# becomes:
# cmds.setAttr('loc.vecArray',3,[1,2,3],"",[4,5,6],"",[7,8,9],type='vectorArray')
arg = list(arg)
size = len(arg)
try:
tmpArgs = [arg.pop(0)]
for filler, real in zip([""] * (size - 1), arg):
tmpArgs.append(filler)
tmpArgs.append(real)
except IndexError:
tmpArgs = []
args = tuple([size] + tmpArgs)
else:
# vector arrays:
# first arg must be the length of the array being set
# empty values are placed between vectors
# ex:
# setAttr('loc.vecArray',[1,2,3],[4,5,6],[7,8,9] )
# becomes:
# cmds.setAttr('loc.vecArray',3,[1,2,3],[4,5,6],[7,8,9],type='vectorArray')
arg = list(arg)
size = len(arg)
args = tuple([size] + arg)
# print args
elif datatype in ['Int32Array', 'doubleArray']:
# int32 and double arrays:
# actually fairly sane
# ex:
# setAttr('loc.doubleArray',[1,2,3] )
# becomes:
# cmds.setAttr('loc.doubleArray',[1,2,3],type='doubleArray')
args = (tuple(arg),)
else:
# others: short2, short3, long2, long3, float2, etc...
# args must be expanded
# ex:
# setAttr('loc.foo',[1,2,3] )
# becomes:
# cmds.setAttr('loc.foo',1,2,3 )
args = tuple(arg)
# non-iterable types
else:
if datatype is None:
#attr = Attribute(attr)
if force and not cmds.objExists(attr): # attr.exists():
import pymel.util.nameparse as nameparse
attrName = nameparse.parse(attr)
assert attrName.isAttributeName(), "passed object is not an attribute"
if isinstance(arg, basestring):
addAttr(attrName.nodePath, ln=attrName.attribute, dt='string')
kwargs['type'] = 'string'
elif isinstance(arg, int):
addAttr(attrName.nodePath, ln=attrName.attribute, at='long')
elif isinstance(arg, float):
addAttr(attrName.nodePath, ln=attrName.attribute, at='double')
elif isinstance(arg, bool):
addAttr(attrName.nodePath, ln=attrName.attribute, at='bool')
else:
raise TypeError, "%s.setAttr: %s is not a supported type for use with the force flag" % (__name__, type(arg))
elif isinstance(arg, (basestring, _util.ProxyUnicode)):
if asString is None:
if isinstance(attr, Attribute):
attrType = attr.type()
else:
attrType = cmds.getAttr(attr, type=1)
asString = (attrType == 'enum')
if asString:
val = getEnums(attr).get(arg)
if val is None:
raise MayaAttributeEnumError(attr, arg)
arg = val
args = (val,)
else:
kwargs['type'] = 'string'
# stringify fix
attr = unicode(attr)
try:
# print args, kwargs
cmds.setAttr(attr, *args, **kwargs)
except TypeError, msg:
val = kwargs.pop('type', kwargs.pop('typ', False))
typ = addAttr(attr, q=1, at=1)
if val == 'string' and typ == 'enum':
enums = addAttr(attr, q=1, en=1).split(":")
index = enums.index(args[0])
args = (index, )
cmds.setAttr(attr, *args, **kwargs)
else:
raise TypeError, msg
except RuntimeError, msg:
# normally this is handled in pmcmds, but setAttr error is different for some reason
# can't use 'startswith' because of Autodesk test strings wrapped in commas
if 'No object matches name: ' in str(msg):
raise _objectError(attr)
else:
# re-raise
raise
def addAttr(*args, **kwargs):
"""
Modifications:
- allow python types to be passed to set -at type
str string
float double
int long
bool bool
Vector double3
- when querying dataType, the dataType is no longer returned as a list
- when editing hasMinValue, hasMaxValue, hasSoftMinValue, or hasSoftMaxValue the passed boolean value was ignored
and the command instead behaved as a toggle. The behavior is now more intuitive::
>>> addAttr('persp', ln='test', at='double', k=1)
>>> addAttr('persp.test', query=1, hasMaxValue=True)
False
>>> addAttr('persp.test', edit=1, hasMaxValue=False)
>>> addAttr('persp.test', query=1, hasMaxValue=True)
False
>>> addAttr('persp.test', edit=1, hasMaxValue=True)
>>> addAttr('persp.test', query=1, hasMaxValue=True)
True
- allow passing a list or dict instead of a string for enumName
- allow user to pass in type and determine whether it is a dataType or
attributeType. Types that may be both, such as float2, float3, double2,
double3, long2, long3, short2, and short3 are all treated as
attributeTypes. In addition, as a convenience, since these attributeTypes
are actually treated as compound attributes, the child attributes are
automatically created, with X/Y/Z appended, unless usedAsColor is set, in
which case R/G/B is added. Alternatively, the suffices can explicitly
specified with childSuffixes:
>>> addAttr('persp', ln='autoDouble', type='double', k=1)
>>> addAttr('persp.autoDouble', query=1, attributeType=1)
u'double'
>>> addAttr('persp.autoDouble', query=1, dataType=1)
u'TdataNumeric'
>>> addAttr('persp', ln='autoMesh', type='mesh', k=1)
>>> addAttr('persp.autoMesh', query=1, attributeType=1)
u'typed'
>>> addAttr('persp.autoMesh', query=1, dataType=1)
u'mesh'
>>> addAttr('persp', ln='autoDouble3Vec', type='double3', k=1)
>>> [x.attrName() for x in PyNode('persp').listAttr() if 'autoDouble3' in x.name()]
[u'autoDouble3Vec', u'autoDouble3VecX', u'autoDouble3VecY', u'autoDouble3VecZ']
>>> addAttr('persp', ln='autoFloat3Col', type='float3', usedAsColor=1)
>>> [x.attrName() for x in PyNode('persp').listAttr() if 'autoFloat3' in x.name()]
[u'autoFloat3Col', u'autoFloat3ColR', u'autoFloat3ColG', u'autoFloat3ColB']
>>> addAttr('persp', ln='autoLong2', type='long2', childSuffixes=['_first', '_second'])
>>> [x.attrName() for x in PyNode('persp').listAttr() if 'autoLong2' in x.name()]
[u'autoLong2', u'autoLong2_first', u'autoLong2_second']
"""
attributeTypes = [ 'bool', 'long', 'short', 'byte', 'char', 'enum',
'float', 'double', 'doubleAngle', 'doubleLinear',
'compound', 'message', 'time', 'fltMatrix', 'reflectance',
'spectrum', 'float2', 'float3', 'double2', 'double3', 'long2',
'long3', 'short2', 'short3', datatypes.Vector ]
dataTypes = [ 'string', 'stringArray', 'matrix', 'reflectanceRGB',
'spectrumRGB', 'doubleArray', 'Int32Array', 'vectorArray',
'nurbsCurve', 'nurbsSurface', 'mesh', 'lattice', 'pointArray' ]
type = kwargs.pop('type', kwargs.pop('typ', None ))
childSuffixes = kwargs.pop('childSuffixes', None)
if type is not None:
if type in attributeTypes:
kwargs['at'] = type
elif type in dataTypes:
kwargs['dt'] = type
else:
raise TypeError, "type not supported"
at = kwargs.pop('attributeType', kwargs.pop('at', None))
if at is not None:
try:
at = {
float: 'double',
int: 'long',
bool: 'bool',
datatypes.Vector: 'double3',
str: 'string',
unicode: 'string'
}[at]
except KeyError:
pass
kwargs['at'] = at
if kwargs.get('e', kwargs.get('edit', False)):
for editArg, value in kwargs.iteritems():
if editArg not in ('e', 'edit') and value:
break
if editArg in ('hasMinValue', 'hnv', 'hasMaxValue', 'hxv', 'hasSoftMinValue', 'hsn', 'hasSoftMaxValue', 'hsx'):
# bugfix: hasM*Value works as a toggle, regardless of whether you specify True or False
if bool(value) != bool(cmds.addAttr(*args, **{'query': True, editArg: True})):
return cmds.addAttr(*args, **kwargs)
else:
# otherwise, don't do anything, bc the value is already correct
return
# translate dict or list for enumName
enums = kwargs.pop('en', kwargs.pop('enumName', None))
if enums is not None:
kwargs['enumName'] = _toEnumStr(enums)
# MObject stringify Fix
#args = map(unicode, args)
res = cmds.addAttr(*args, **kwargs)
if kwargs.get('q', kwargs.get('query', False)):
# When addAttr is queried, and has multiple other query flags - ie,
# addAttr('joint1.sweetpea', q=1, parent=1, dataType=1)
# ... it seems to ignore every kwarg but the 'first'
for queriedArg, value in kwargs.iteritems():
if queriedArg not in ('q', 'query') and value:
break
if queriedArg in ('dt', 'dataType'):
# If the attr is not a dynamic attribute, maya.cmds prints:
# Error: '...' is not a dynamic attribute of node '...'.
# ...but does NOT raise an exception
# Because it will be more consistent with maya.cmds, and because
# attributeType already behaves like this, we will do the same -
# allow maya.cmds to print it's error message, and return None, but
# not raise an exception
if res is not None:
res = res[0]
elif queriedArg in ('p', 'parent'):
node = None
if args:
node = PyNode(args[0])
else:
node = ls(sl=1)[0]
if isinstance(node, Attribute):
node = node.node()
res = node.attr(res)
elif not kwargs.get('e', kwargs.get('edit', False)):
# if we were creating an attribute, and used "type", check if we
# made a compound type...
if type is not None and at:
# string parse the attributeType, because the type may be an
# actual python type...
baseType = at[:-1]
num = at[-1]
if (baseType in ('float', 'double', 'short', 'long')
and num in ('2', '3')):
num = int(num)
if childSuffixes is None:
if kwargs.get('usedAsColor', kwargs.get('uac')):
childSuffixes = 'RGB'
else:
childSuffixes = 'XYZ'
baseLongName = kwargs.get('longName', kwargs.get('ln'))
baseShortName = kwargs.get('shortName', kwargs.get('sn'))
childKwargs = dict(kwargs)
for kwarg in (
'longName', 'ln',
'shortName', 'sn',
'attributeType', 'at',
'dataType', 'dt',
'multi', 'm',
'indexMatters', 'im',
'parent', 'p',
'numberOfChildren', 'nc',
'usedAsColor', 'uac',
):
childKwargs.pop(kwarg, None)
childKwargs['attributeType'] = baseType
childKwargs['parent'] = baseLongName
for i in xrange(num):
suffix = childSuffixes[i]
childKwargs['longName'] = baseLongName + suffix
if baseShortName:
childKwargs['shortName'] = baseShortName + suffix
cmds.addAttr(*args, **childKwargs)
# else:
# # attempt to gather Attributes we just made
# # this is slightly problematic because compound attributes are invalid
# # until all of their children are created, as in these example from the docs
#
# #addAttr( longName='sampson', numberOfChildren=5, attributeType='compound' )
# #addAttr( longName='homeboy', attributeType='matrix', parent='sampson' )
# #addAttr( longName='midge', attributeType='message', parent='sampson' )
# #addAttr( longName='damien', attributeType='double', parent='sampson' )
# #addAttr( longName='elizabeth', attributeType='double', parent='sampson' )
# #addAttr( longName='sweetpea', attributeType='double', parent='sampson' )
#
#
# if not args:
# args=cmds.ls(sl=1,l=1)
# longName = kwargs.pop( 'longName', kwargs.get('ln',None) )
# shortName = kwargs.pop( 'shortName', kwargs.get('sn',None) )
# name = longName if longName else shortName
# assert name, "could not determine name of attribute"
# res = [ Attribute(x + '.' + name) for x in args]
return res
def hasAttr(pyObj, attr, checkShape=True):
"""convenience function for determining if an object has an attribute.
If checkShape is enabled, the shape node of a transform will also be checked for the attribute.
:rtype: `bool`
"""
if not isinstance(pyObj, PyNode):
raise TypeError, "hasAttr requires a PyNode instance and a string"
import nodetypes
if isinstance(pyObj, nodetypes.Transform):
try:
pyObj.attr(attr, checkShape=checkShape)
return True
except AttributeError:
return False
try:
pyObj.attr(attr)
return True
except AttributeError:
return False
#-----------------------
# Attr Enums
#-----------------------
def _toEnumStr(enums):
if isinstance(enums, dict):
firstKey = enums.iterkeys().next()
firstVal = enums.itervalues().next()
if isinstance(firstKey, basestring) and isinstance(firstVal, int):
enums = ['%s=%s' % (key, val) for key, val in enums.iteritems()]
elif isinstance(firstKey, int) and isinstance(firstVal, basestring):
enums = ['%s=%s' % (val, key) for key, val in enums.iteritems()]
else:
raise ValueError('dict must map from strings to ints, or vice-versa')
if isinstance(enums, basestring):
enumStr = enums
else:
enumStr = ":".join(enums)
return enumStr
def setEnums(attr, enums):
"""
Set the enumerators for an enum attribute.
"""
cmds.addAttr(attr, e=1, en=_toEnumStr(enums))
def getEnums(attr):
"""
Get the enumerators for an enum attribute.
:rtype: `util.enum.EnumDict`
>>> addAttr( "persp", ln='numbers', at='enum', enumName="zero:one:two:thousand=1000:three")
>>> numbers = Attribute('persp.numbers').getEnums()
>>> sorted(numbers.items())
[(u'one', 1), (u'thousand', 1000), (u'three', 1001), (u'two', 2), (u'zero', 0)]
>>> numbers[1]
u'one'
>>> numbers['thousand']
1000
"""
if isinstance(attr, Attribute):
attrName = attr.attrName()
node = attr.node().name()
else:
node, attrName = unicode(attr).rsplit('.', 1)
enum_list = cmds.attributeQuery(attrName, node=node,
listEnum=True)[0].split(':')
enum_dict = {}
index = 0
for enum in enum_list:
try:
name, value = enum.split(u'=')
index = int(value)
enum = name
except:
pass
enum_dict[enum] = index
index += 1
return _util.enum.EnumDict(enum_dict)
#-----------------------
# List Functions
#-----------------------
# def listAttr(*args, **kwargs):
# """
# Modifications:
# - returns an empty list when the result is None
# """
# return _util.listForNone(cmds.listAttr(*args, **kwargs))
def listConnections(*args, **kwargs):
"""
Modifications:
- returns an empty list when the result is None
- returns an empty list (with a warning) when the arg is an empty list, tuple,
set, or frozenset, making it's behavior consistent with when None is
passed, or no args and nothing is selected (would formerly raise a
TypeError)
- When 'connections' flag is True, (and 'plugs' is True) the attribute pairs are returned in a 2D-array::
[['checker1.outColor', 'lambert1.color'], ['checker1.color1', 'fractal1.outColor']]
Note that if 'plugs' is False (the default), for backward compatibility, the returned pairs are somewhat less intuitive attrs + nodes::
[['checker1.outColor', 'lambert1'], ['checker1.color1', 'fractal1']]
- added sourceFirst keyword arg. when sourceFirst is true and connections is also true,
the paired list of plugs is returned in (source,destination) order instead of (thisnode,othernode) order.
this puts the pairs in the order that disconnectAttr and connectAttr expect.
- added ability to pass a list of types
:rtype: `PyNode` list
"""
# We need to force casting to Attribute, as opposed to just Pynode,
# if we are returning plugs, because PyNode will prefer component
# objects over attributes when there is amibiguity - ie,
# PyNode('myNode.rotatePivot') will give a component
args = tuple(None if isinstance(x, (list, tuple, set, frozenset)) and not x
else x for x in args)
plugs = kwargs.get('plugs', kwargs.get('p', False))
if plugs:
CastObj = Attribute
else:
CastObj = PyNode
def makePairs(l):
if l is None:
return []
return [(CastObj(a), CastObj(b)) for (a, b) in _util.pairIter(l)]
# group the core functionality into a funcion, so we can call in a loop when passed a list of types
def doIt(**kwargs):
if kwargs.get('connections', kwargs.get('c', False)):
if kwargs.pop('sourceFirst', False):
source = kwargs.get('source', kwargs.get('s', True))
dest = kwargs.get('destination', kwargs.get('d', True))
if source:
if not dest:
return [(s, d) for d, s in makePairs(cmds.listConnections(*args, **kwargs))]
else:
res = []
kwargs.pop('destination', None)
kwargs['d'] = False
res = [(s, d) for d, s in makePairs(cmds.listConnections(*args, **kwargs))]
kwargs.pop('source', None)
kwargs['s'] = False
kwargs['d'] = True
return makePairs(cmds.listConnections(*args, **kwargs)) + res
# if dest passes through to normal method
return makePairs(cmds.listConnections(*args, **kwargs))
else:
return map(CastObj, _util.listForNone(cmds.listConnections(*args, **kwargs)))
# if passed a list of types, concatenate the resutls
# NOTE: there may be duplicate results if a leaf type and it's parent are both passed: ex. animCurve and animCurveTL
types = kwargs.get('type', kwargs.get('t', None))
if _util.isIterable(types):
types = list(set(types)) # remove dupes from types list
kwargs.pop('type', None)
kwargs.pop('t', None)
res = []
for type in types:
ikwargs = kwargs.copy()
ikwargs['type'] = type
res += doIt(**ikwargs)
return res
else:
return doIt(**kwargs)
def listHistory(*args, **kwargs):
"""
Modifications:
- returns an empty list when the result is None
- raises a RuntimeError when the arg is an empty list, tuple, set, or
frozenset, making it's behavior consistent with when None is passed, or
no args and nothing is selected (would formerly raise a TypeError)
- added a much needed 'type' filter
- added an 'exactType' filter (if both 'exactType' and 'type' are present, 'type' is ignored)
:rtype: `DependNode` list
"""
args = tuple(None if isinstance(x, (list, tuple, set, frozenset)) and not x
else x for x in args)
type = exactType = None
if 'type' in kwargs:
type = kwargs.pop('type')
if 'exactType' in kwargs:
exactType = kwargs.pop('exactType')
results = [PyNode(x) for x in _util.listForNone(cmds.listHistory(*args, **kwargs))]
if exactType:
results = [x for x in results if x.nodeType() == exactType]
elif type:
results = [x for x in results if type in x.nodeType(inherited=True)]
return results
def listFuture(*args, **kwargs):
"""
Modifications:
- returns an empty list when the result is None
- added a much needed 'type' filter
- added an 'exactType' filter (if both 'exactType' and 'type' are present, 'type' is ignored)
:rtype: `DependNode` list
"""
kwargs['future'] = True
return listHistory(*args, **kwargs)
def listRelatives(*args, **kwargs):
"""
Maya Bug Fix:
- allDescendents and shapes flags did not work in combination
- noIntermediate doesn't appear to work
Modifications:
- returns an empty list when the result is None
- returns an empty list when the arg is an empty list, tuple, set, or
frozenset, making it's behavior consistent with when None is passed, or
no args and nothing is selected (would formerly raise a TypeError)
- returns wrapped classes
- fullPath is forced on to ensure that all returned node paths are unique
:rtype: `DependNode` list
"""
args = tuple(None if isinstance(x, (list, tuple, set, frozenset)) and not x
else x for x in args)
kwargs['fullPath'] = True
kwargs.pop('f', None)
# Stringify Fix
#args = [ unicode(x) for x in args ]
if kwargs.get('allDescendents', kwargs.get('ad', False)) and kwargs.pop('shapes', kwargs.pop('s', False)):
kwargs['fullPath'] = True
kwargs.pop('f', None)
res = cmds.listRelatives(*args, **kwargs)
if res is None:
results = []
else:
results = ls(res, shapes=1)
else:
results = map(PyNode, _util.listForNone(cmds.listRelatives(*args, **kwargs)))
# Fix that noIntermediate doesn't seem to work in list relatives
if kwargs.get('noIntermediate', kwargs.get('ni', False)):
return [result for result in results if not result.intermediateObject.get()]
return results
def ls(*args, **kwargs):
"""
Modifications:
- Returns PyNode objects, not "names" - all flags which do nothing but modify
the string name of returned objects are ignored (ie, 'long'); note that
the 'allPaths' flag DOES have an effect, as PyNode objects are aware of
their dag paths (ie, two different instances of the same object will result
in two unique PyNodes)
- Added new keyword: 'editable' - this will return the inverse set of the readOnly flag. i.e. non-read-only nodes
- Added new keyword: 'regex' - pass a valid regular expression string, compiled regex pattern, or list thereof.
>>> group('top')
nt.Transform(u'group1')
>>> duplicate('group1')
[nt.Transform(u'group2')]
>>> group('group2')
nt.Transform(u'group3')
>>> ls(regex='group\d+\|top') # don't forget to escape pipes `|`
[nt.Transform(u'group1|top'), nt.Transform(u'group2|top')]
>>> ls(regex='group\d+\|top.*')
[nt.Transform(u'group1|top'), nt.Camera(u'group1|top|topShape'), nt.Transform(u'group2|top'), nt.Camera(u'group2|top|topShape')]
>>> ls(regex='group\d+\|top.*', cameras=1)
[nt.Camera(u'group2|top|topShape'), nt.Camera(u'group1|top|topShape')]
>>> ls(regex='\|group\d+\|top.*', cameras=1) # add a leading pipe to search for full path
[nt.Camera(u'group1|top|topShape')]
The regular expression will be used to search the full DAG path, starting from the right, in a similar fashion to how globs currently work.
Technically speaking, your regular expression string is used like this::
re.search( '(\||^)' + yourRegexStr + '$', fullNodePath )
:rtype: `PyNode` list
"""
regexArgs = kwargs.pop('regex', [])
if not isinstance(regexArgs, (tuple, list)):
regexArgs = [regexArgs]
if regexArgs:
# if we're searching for a regex, we may be trying to match against full
# path name, so use long names...
kwargs['long'] = True
else:
# otherwise, should be more efficient to use short names... and there
# was a maya bug that would sometimes make nodes have no long name:
# BSPR-18158 Referencing creates extra nodes with no fullpath
kwargs['long'] = False
kwargs.pop('l', None)
# # TODO: make this safe for international unicode characters
# validGlobChars = re.compile('[a-zA-Z0-9_|.*?\[\]]+$')
# newArgs = []
# regexArgs = []
# for arg in args:
# if isinstance(arg, (list, tuple)):
# # maya only goes one deep, and only checks for lists or tuples
# for subarg in arg:
# if isinstance(subarg, basestring) and not validGlobChars.match(subarg):
# regexArgs.append(subarg)
# else:
# newArgs.append(subarg)
# elif isinstance(arg, basestring) and not validGlobChars.match(arg):
# regexArgs.append(arg)
# else:
# newArgs.append(arg)
for i, val in enumerate(regexArgs):
# add a prefix which allows the regex to match against a dag path, mounted at the right
if isinstance(val, basestring):
if not val.endswith('$'):
val = val + '$'
val = re.compile('(\||^)' + val)
elif not isinstance(val, re._pattern_type):
raise TypeError('regex flag must be passed a valid regex string, a compiled regex object, or a list of these types. got %s' % type(val).__name__)
regexArgs[i] = val
editable = kwargs.pop('editable', False)
res = _util.listForNone(cmds.ls(*args, **kwargs))
if regexArgs:
tmp = res
res = []
for x in tmp:
for reg in regexArgs:
if reg.search(x):
res.append(x)
break
if editable:
kwargs['readOnly'] = True
kwargs.pop('ro', True)
roNodes = _util.listForNone(cmds.ls(*args, **kwargs))
# faster way?
return map(PyNode, filter(lambda x: x not in roNodes, res))
if kwargs.get('readOnly', kwargs.get('ro', False)):
# when readOnly is provided showType is ignored
return map(PyNode, res)
if kwargs.get('showType', kwargs.get('st', False)):
tmp = res
res = []
for i in range(0, len(tmp), 2):
res.append(PyNode(tmp[i]))
res.append(tmp[i + 1])
return res
if kwargs.get('nodeTypes', kwargs.get('nt', False)):
return res
# kwargs['showType'] = True
# tmp = _util.listForNone(cmds.ls(*args, **kwargs))
# res = []
# for i in range(0,len(tmp),2):
# res.append( PyNode( tmp[i], tmp[i+1] ) )
#
# return res
if kwargs.get('showNamespace', kwargs.get('sns', False)):
return [system.Namespace(item) if i % 2 else PyNode(item) for i, item in enumerate(res)]
return map(PyNode, res)
# showType = kwargs.get( 'showType', kwargs.get('st', False) )
# kwargs['showType'] = True
# kwargs.pop('st',None)
# res = []
# if kwargs.get( 'readOnly', kwargs.get('ro', False) ):
#
# ro = cmds.ls(*args, **kwargs) # showType flag will be ignored
#
# # this was unbelievably slow
#
# kwargs.pop('readOnly',None)
# kwargs.pop('ro',None)
# all = cmds.ls(*args, **kwargs)
# for pymel.core.node in ro:
# try:
# idx = all.index(pymel.core.node)
# all.pop(idx)
# typ = all.pop(idx+1)
# res.append( PyNode( pymel.core.node, typ ) )
# if showType:
# res.append( typ )
# except ValueError: pass
# return res
# else:
# tmp = _util.listForNone(cmds.ls(*args, **kwargs))
# for i in range(0,len(tmp),2):
# typ = tmp[i+1]
# res.append( PyNode( tmp[i], ) )
# if showType:
# res.append( typ )
#
# return res
def listTransforms(*args, **kwargs):
"""
Modifications:
- returns wrapped classes
:rtype: `Transform` list
"""
kwargs['ni'] = True
res = cmds.ls(*args, **kwargs)
if not res:
return res
res = cmds.listRelatives(res, p=1, path=1)
if res is None:
return []
# res = list(set(res)) # ruins the order, but prevents dupes, which can happend when a transform has more than one shape
return [PyNode(x) for x in res] # , ['transform']*len(res) )
def listSets(*args, **kwargs):
'''
Modifications:
- returns wrapped classes
- if called without arguments and keys works as with allSets=True
:rtype: `PyNode` list
'''
# cmds.listSets() reports existance of defaultCreaseDataSet which does not
# exist if checked with cmds.objExists at least linux-2010
if not args and not kwargs:
kwargs['allSets'] = True
return [PyNode(x) for x in _util.listForNone(cmds.listSets(*args, **kwargs))
if not x == 'defaultCreaseDataSet']
#-----------------------
# Objects
#-----------------------
def nodeType(node, **kwargs):
"""
Note: this will return the dg node type for an object, like maya.cmds.nodeType,
NOT the pymel PyNode class. For objects like components or attributes,
nodeType will return the dg type of the node to which the PyNode is attached.
:rtype: `unicode`
"""
# still don't know how to do inherited via _api
if kwargs.get('inherited', kwargs.get('i', False)):
return cmds.nodeType(unicode(node), **kwargs)
# obj = None
# objName = None
import nodetypes
if isinstance(node, nodetypes.DependNode):
pass
#obj = node.__apimobject__()
elif isinstance(node, Attribute):
node = node.plugNode()
# elif isinstance(node, _api.MObject) :
# # TODO : convert MObject attributes to DependNode
# if _api.isValidMObjectHandle(_api.MObjectHandle(node)) :
# obj = node
# else :
# obj = None
else:
# if isinstance(node,basestring) :
#obj = _api.toMObject( node.split('.')[0] )
# don't spend the extra time converting to MObject
# don't do unicode(node) - let pmcmds wrap handle it - 'node' may
# actually be a single item list, which cmds.nodeType accepts as a
# valid arg
return cmds.nodeType(node, **kwargs)
#raise TypeError, "Invalid input %r." % node
if kwargs.get('apiType', kwargs.get('_api', False)):
return node.__apimobject__().apiTypeStr()
# default
try:
return node.__apimfn__().typeName()
except RuntimeError:
pass
def group(*args, **kwargs):
"""
Modifications
- if no objects are passed or selected, the empty flag is automatically set
Maya Bug Fix:
- corrected to return a unique name
"""
if not args and not cmds.ls(sl=1):
kwargs['empty'] = True
newGroup = cmds.group(*args, **kwargs)
if cmds.versions.current() >= cmds.versions.v2014:
# bug was fixed in 2014, so we can just cast to a PyNode and return...
return PyNode(newGroup)
else:
# found an interesting bug. group does not return a unique path, so the following line
# will error if the passed name is in another group somewhere:
# Transform( cmds.group( name='foo') )
# luckily the group command always selects the last created node, so we can just use selected()[0]
return selected()[0]
# except RuntimeError, msg:
# print msg
# if msg == 'Not enough objects or values.':
# kwargs['empty'] = True
# return Transform( cmds.group(**kwargs) )
def parent(*args, **kwargs):
"""
Modifications:
- if parent is `None`, world=True is automatically set
- if the given parent is the current parent, don't error (similar to mel)
"""
if args and args[-1] is None:
if not kwargs.get('w', kwargs.get('world', True)):
raise ValueError('No parent given, but parent to world explicitly set to False')
if 'world' in kwargs:
del kwargs['world']
kwargs['w'] = True
args = args[:-1]
elif 'world' in kwargs:
# Standardize on 'w', for easier checking later
kwargs['w'] = kwargs['world']
del kwargs['world']
origPyNodes = []
origParent = None
origParentDag = None
removeObj = kwargs.get('removeObject', False) or kwargs.get('rm', False)
if args:
nodes = args
origPyNodes
if 'w' in kwargs or removeObj:
origPyNodes = nodes
else:
origPyNodes = nodes[:-1]
origParent = nodes[-1]
origPyNodes = [x for x in origPyNodes if isinstance(x, PyNode)]
# Make sure we have an MObjectHandle for all origPyNodes - may need
# these later to fix issues with instancing...
for n in origPyNodes:
n.__apimobject__()
else:
nodes = cmds.ls(sl=1, type='dagNode')
# There are some situations in which you can only pass one node - ie, with
# shape=True, removeObject=True - and we don't want to abort in these
# cases
if nodes and not removeObj:
if kwargs.get('w', False):
parent = None
children = nodes
else:
parent = PyNode(nodes[-1])
children = nodes[:-1]
# if you try to parent to the current parent, maya errors...
# check for this and return if that's the case
def getParent(obj):
parent = cmds.listRelatives(obj, parent=1)
if not parent:
return None
else:
return parent[0]
if all(getParent(child) == parent for child in children):
return [PyNode(x) for x in children]
result = cmds.parent(*args, **kwargs)
# if using removeObject, return is None
if result:
result = [PyNode(x) for x in result]
# fix the MDagPath for any ORIGINAL PyNodes, if instancing is involved
# (ie, if DependNode.setParent is called, we want to set the MDagPath to the
# correct instance if possible)
for origNode in origPyNodes:
try:
origNode.__apimdagpath__()
except AttributeError:
continue
except MayaInstanceError:
# Was problem, try to fix the MDagPath!
if origParentDag is None and origParent is not None:
if not isinstance(origParent, PyNode):
origParent = PyNode(origParent)
origParentDag = origParent.__apimdagpath__()
mfnDag = _api.MFnDagNode(origNode.__apimobject__())
dags = _api.MDagPathArray()
mfnDag.getAllPaths(dags)
foundDag = None
# Look for an instance whose parent is the parent we reparented to
for i in xrange(dags.length()):
dag = dags[i]
parentDag = _api.MDagPath(dag)
parentDag.pop()
if origParent is None:
if parentDag.length() == 0:
foundDag = dag
break
else:
if parentDag == origParentDag:
foundDag = dag
break
if foundDag is not None:
# copy the one from the array, or else we'll get a crash, when
# the array is freed and we try to use it!
origNode.__apiobjects__['MDagPath'] = _api.MDagPath(foundDag)
except MayaNodeError:
# if we were using removeObject, it's possible the object is now
# deleted... in this case (but only this case!), it's ok to ignore
# a deleted node
if removeObj:
continue
else:
continue
return result
# Because cmds.duplicate only ever returns node names (ie, NON-UNIQUE, and
# therefore, nearly useless names - yes, the function that is MOST LIKELY to
# create non-unique node names only ever returns node names - we need to use
# a node-tracking approach to duplicate, so that we can propery cast to
# PyNodes after... need to get autodesk to add a flag to duplicate, to return
# shortest-unqiue names, or full path names!
# Utility
def _pathFromMObj(mObj, fullPath=False):
"""
Return a unique path to an mObject
"""
if mObj.hasFn(_api.MFn.kDagNode):
if fullPath:
result = _api.MFnDagNode(mObj).fullPathName()
else:
result = _api.MFnDagNode(mObj).partialPathName()
elif mObj.hasFn(_api.MFn.kDependencyNode):
result = _api.MFnDependencyNode(mObj).name()
else:
raise TypeError("mObj must be either DagNode or DependencyNode - got a %s" % mObj.apiTypeStr())
return result
# Node Callbacks --
def _nodeAddedCallback(list_):
def callback(mObj, clientData):
# luma.logger.debug("Checking node of type %s" % mObj.apiTypeStr())
# luma.logger.debug("seeing whether %s should be added" %
# _pathFromMObj(mObj, fullPath=True))
handle = _api.MObjectHandle(mObj)
list_.append(handle)
return callback
# from http://github.com/jspatrick/RigIt/blob/master/lib/NodeTracking.py
class NodeTracker(object):
'''
A class for tracking Maya Objects as they are created and deleted.
Can (and probably should) be used as a context manager
'''
def __init__(self):
self._addedCallbackID = None
self._objects = []
def startTrack(self):
if not self._addedCallbackID:
# luma.logger.debug("%s: Beginning object tracking" % str(self))
self._addedCallbackID = _api.MDGMessage.addNodeAddedCallback(
_nodeAddedCallback(self._objects))
# luma.logger.debug("registered node added callback")
def endTrack(self):
"""
Stop tracking and remove the callback
"""
if self._addedCallbackID:
# luma.logger.debug("%s: Ending object tracking" % str(self))
_api.MMessage.removeCallback(self._addedCallbackID)
self._addedCallbackID = None
# luma.logger.debug("deregistered node added callback")
def getNodes(self, returnType='PyNode'):
"""
Return a list of maya objects as strings.
Parameters
----------
returnType : {'PyNode', 'str', 'MObject'}
"""
returnTypes = ('PyNode', 'str', 'MObject')
if returnType not in returnTypes:
raise ValueError('returnType must be one of: %s'
% ', '.join(repr(x) for x in returnTypes))
result = []
toRemove = []
for objHandle in self._objects:
# luma.logger.debug("Object valid status: %s" % str(objHandle.isValid()))
# luma.logger.debug("Object alive status: %s" %
# str(objHandle.isAlive()))
if not objHandle.isValid():
toRemove.append(objHandle)
else:
mobj = objHandle.object()
nodeName = _pathFromMObj(mobj)
# pymel's undo node should be ignored
if nodeName != '__pymelUndoNode':
if returnType == 'MObject':
result.append(mobj)
else:
result.append(nodeName)
for objHandle in toRemove:
self._objects.remove(objHandle)
if returnType == 'PyNode':
result = [PyNode(n) for n in result]
return result
def isTracking(self):
"""
Return True/False
"""
if self._addedCallbackID:
return True
return False
def reset(self):
self.endTrack()
self._objects = []
def __enter__(self):
self.startTrack()
return self
def __exit__(self, exctype, excval, exctb):
self.endTrack()
def duplicate(*args, **kwargs):
"""
Modifications:
- new option: addShape
If addShape evaluates to True, then all arguments fed in must be shapes, and each will be duplicated and added under
the existing parent transform, instead of duplicating the parent transform.
The following arguments are incompatible with addShape, and will raise a ValueError if enabled along with addShape:
renameChildren (rc), instanceLeaf (ilf), parentOnly (po), smartTransform (st)
- returns wrapped classes
- returnRootsOnly is forced on for dag objects. This is because the duplicate command does not use full paths when returning
the names of duplicated objects and will fail if the name is not unique.
"""
addShape = kwargs.pop('addShape', False)
kwargs.pop('rr', None)
fakeReturnRoots = False
if cmds.ls(dag=1, *args):
# TODO: provide a real fix?
# in general, we want to turn on 'returnRootsOnly' with dag nodes -
# however, there is a bug with returnRootsOnly and underworld nodes...
# not sure what to do about this in general, but for now, adding a
# special case check to see if there's only one arg, and it's an
# underworld node, in which case we don't need returnRoots...
def inUnderWorld(arg):
if isinstance(arg, PyNode):
return arg.inUnderWorld()
else:
return '->' in arg
if len(args) == 1 and inUnderWorld(args[0]):
fakeReturnRoots = True
else:
kwargs['returnRootsOnly'] = True
if not addShape:
if args:
origArgs = args
else:
origArgs = ls(sl=1)
with NodeTracker() as tracker:
nodeNames = cmds.duplicate(*args, **kwargs)
newNodes = tracker.getNodes(returnType='MObject')
if fakeReturnRoots:
del nodeNames[len(origArgs):]
# Ok, now we have a list of the string names, and a list of
# newly-created MObjects... we need to try to correlate them, since the
# nodeNames may not be unique
pyNodes = []
nameToNewNodes = None
for i, name in enumerate(nodeNames):
try:
node = PyNode(name)
except MayaObjectError:
# damn, it wasn't globally unique...
# first, see if it's name is unique, in the set of newNodes..
# ... to do this, we make a dict from node-name to PyNode...
if nameToNewNodes is None:
mfnDep = _api.MFnDependencyNode()
nameToNewNodes = {}
for mobj in newNodes:
mfnDep.setObject(mobj)
mobjNodeName = mfnDep.name()
newPyNode = PyNode(mobj)
nameToNewNodes.setdefault(mobjNodeName, []).append(newPyNode)
sameNames = nameToNewNodes[name]
if len(sameNames) == 1:
# yay, there was only one created node with this name!
node = sameNames[0]
else:
# darn, we have multiple options to choose from... find the
# first one with the same parent as the corresponding
# original node with the same index...
if i >= len(origArgs):
# uh oh, we have more results returned than we fed in..
# panic, and just take the first one with same name...
node = sameNames[0]
else:
origArg = origArgs[i]
if isinstance(origArg, PyNode):
origNode = origArg
else:
origNode = PyNode(origArg)
origParent = origNode.getParent()
for newNode in sameNames:
if newNode.getParent() == origParent:
node = newNode
break
else:
# uh oh, we couldn't find a new node with the same
# name and matching parent... panic, and just take
# the first one with same name...
node = sameNames[0]
pyNodes.append(node)
return pyNodes
else:
for invalidArg in ('renameChildren', 'rc', 'instanceLeaf', 'ilf',
'parentOnly', 'po', 'smartTransform', 'st'):
if kwargs.get(invalidArg, False):
raise ValueError("duplicate: argument %r may not be used with 'addShape' argument" % invalidArg)
name = kwargs.pop('name', kwargs.pop('n', None))
newShapes = []
for origShape in [PyNode(x) for x in args]:
if 'shape' not in cmds.nodeType(origShape.name(), inherited=True):
raise TypeError('addShape arg of duplicate requires all arguments to be shapes (non-shape arg: %r)'
% origShape)
# This is somewhat complex, because if we have a transform with
# multiple shapes underneath it,
# a) The transform and all shapes are always duplicated
# b) After duplication, there is no reliable way to distinguish
# which shape is the duplicate of the one we WANTED to
# duplicate (cmds.shapeCompare does not work on all types
# of shapes - ie, subdivs)
# To get around this, we:
# 1) duplicate the transform ONLY (result: dupeTransform1)
# 2) instance the shape we want under the new transform
# (result: dupeTransform1|instancedShape)
# 3) duplicate the new transform
# (result: dupeTransform2, dupeTransform2|duplicatedShape)
# 4) delete the transform with the instance (delete dupeTransform1)
# 5) place an instance of the duplicated shape under the original
# transform (result: originalTransform|duplicatedShape)
# 6) delete the extra transform (delete dupeTransform2)
# 7) rename the final shape (if requested)
# 1) duplicate the transform ONLY (result: dupeTransform1)
dupeTransform1 = duplicate(origShape, parentOnly=1)[0]
# 2) instance the shape we want under the new transform
# (result: dupeTransform1|instancedShape)
cmds.parent(origShape, dupeTransform1, shape=True, addObject=True,
relative=True)
# 3) duplicate the new transform
# (result: dupeTransform2, dupeTransform2|duplicatedShape)
dupeTransform2 = duplicate(dupeTransform1, **kwargs)[0]
# 4) delete the transform with the instance (delete dupeTransform1)
delete(dupeTransform1)
# 5) place an instance of the duplicated shape under the original
# transform (result: originalTransform|duplicatedShape)
origParent = origShape.getParent()
dupeShape = dupeTransform2.getShape()
try:
newShape = PyNode(cmds.parent(dupeShape, origParent, shape=True,
addObject=True, relative=True)[0])
except RuntimeError, e:
# Maya 2014 introduced a bug (Change request #: BSPR-12597) with
# using parent to instance a shape, where it will error when
# trying to make some material connections...
# Ie, try to run this:
# import maya.cmds as cmds
# def getShape(trans):
# return cmds.listRelatives(trans, children=True, shapes=True)[0]
#
# cmds.file(new=1, f=1)
# shapeTransform = cmds.polyCube(name='singleShapePoly')[0]
# origShape = getShape(shapeTransform)
# dupeTransform1 = cmds.duplicate(origShape, parentOnly=1)[0]
# cmds.parent(origShape, dupeTransform1, shape=True, addObject=True, relative=True)
# dupeTransform2 = cmds.duplicate(dupeTransform1)[0]
# cmds.delete(dupeTransform1)
# dupeShape = getShape(dupeTransform2)
# cmds.parent(dupeShape, shapeTransform, shape=True, addObject=True, relative=True)
# then maya gives this:
# Error: Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Source is not connected.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable.
# Connection not made: 'singleShapePolyShape2.instObjGroups[1]' -> 'initialShadingGroup.dagSetMembers[2]'. Destination attribute must be writable.
if _versions.current() >= _versions.v2014:
# Would like to check that the dupe is due to the above bug,
# but sometimes the error string is the one above, about
# connections, and sometimes it's the more generic "Maya
# command error"... and this isn't very safe for
# international translations anyway...
# ...so, we just ASSUME that the runtime error was due to
# the above bug... if there was an error that caused it to
# not duplicate, we will fail to find the new shape, and
# we will re-raise the error...
# we should still be able to figure out which the newShape
# is, since there should only be two instances of it, and it
# should be the one under the old parent...
shapes = origParent.getShapes()
for shape in shapes:
if shape.isInstanceOf(dupeShape):
newShape = shape
break
else:
raise
# 6) delete the extra transform (delete dupeTransform2)
delete(dupeTransform2)
# 7) rename the final shape (if requested)
if name is not None:
newShape.rename(name)
newShapes.append(newShape)
select(newShapes, r=1)
return newShapes
# def instance( *args, **kwargs ):
# """
# Modifications:
# - returns wrapped classes
# """
# return map(PyNode, cmds.instance( *args, **kwargs ) )
'''
def attributeInfo( *args, **kwargs ):
"""
Modifications:
- returns an empty list when the result is None
- returns wrapped classes
"""
return map(PyNode, _util.listForNone(cmds.attributeInfo(*args, **kwargs)))
'''
def rename(obj, newname, **kwargs):
"""
Modifications:
- if the full path to an object is passed as the new name, the shortname of the object will automatically be used
"""
import nodetypes
import other
# added catch to use object name explicitly when object is a Pymel Node
if isinstance(newname, nodetypes.DagNode):
newname = newname.nodeName()
else:
newname = other.DagNodeName(newname).nodeName()
return PyNode(cmds.rename(obj, newname, **kwargs))
def createNode(*args, **kwargs):
res = cmds.createNode(*args, **kwargs)
# createNode can sometimes return None, if the shared=True and name= an object that already exists
if res:
return PyNode(res)
def sets(*args, **kwargs):
"""
Modifications
- resolved confusing syntax: operating set is always the first and only arg:
>>> from pymel.core import *
>>> f=newFile(f=1) #start clean
>>>
>>> shdr, sg = createSurfaceShader( 'blinn' )
>>> shdr
nt.Blinn(u'blinn1')
>>> sg
nt.ShadingEngine(u'blinn1SG')
>>> s,h = polySphere()
>>> s
nt.Transform(u'pSphere1')
>>> sets( sg, forceElement=s ) # add the sphere
nt.ShadingEngine(u'blinn1SG')
>>> sets( sg, q=1) # check members
[nt.Mesh(u'pSphereShape1')]
>>> sets( sg, remove=s )
nt.ShadingEngine(u'blinn1SG')
>>> sets( sg, q=1)
[]
- returns wrapped classes
"""
setSetFlags = [
'subtract', 'sub',
'union', 'un',
'intersection', 'int',
'isIntersecting', 'ii',
'isMember', 'im',
'split', 'sp',
'addElement', 'add',
'include', 'in',
'remove', 'rm',
'forceElement', 'fe'
]
setFlags = [
'copy', 'cp',
'clear', 'cl',
'flatten', 'fl'
]
#args = (objectSet,)
# this:
# sets('myShadingGroup', forceElement=1)
# must be converted to:
# sets(forceElement='myShadingGroup')
for flag, value in kwargs.items():
if flag in setSetFlags:
kwargs[flag] = args[0]
# move arg over to kwarg
if _util.isIterable(value):
args = tuple(value)
elif isinstance(value, (basestring, PyNode)):
args = (value,)
else:
args = ()
break
elif flag in setFlags:
kwargs[flag] = args[0]
args = ()
break
# # the case where we need to return a list of objects
# if kwargs.get( 'query', kwargs.get('q',False) ):
# size = len(kwargs)
# if size == 1 or (size==2 and kwargs.get( 'nodesOnly', kwargs.get('no',False) ) ) :
# return map( PyNode, _util.listForNone(cmds.sets( *args, **kwargs )) )
# Just get the result, then check if it's a list, rather than trying to
# parse the kwargs...
result = cmds.sets(*args, **kwargs)
if isinstance(result, (bool, int, long, float)):
return result
if _util.isIterable(result):
return map(PyNode, _util.listForNone(result))
elif result is None:
return []
else:
return PyNode(result)
'''
#try:
# elements = elements[0]
#except:
# pass
#print elements
if kwargs.get('query', kwargs.get( 'q', False)):
#print "query", kwargs, len(kwargs)
if len(kwargs) == 1:
# list of elements
return set( cmds.sets( elements, **kwargs ) or [] )
# other query
return cmds.sets( elements, **kwargs )
elif kwargs.get('clear', kwargs.get( 'cl', False)):
return cmds.sets( **kwargs )
#if isinstance(elements,basestring) and cmds.ls( elements, sets=True):
# elements = cmds.sets( elements, q=True )
#print elements, kwargs
nonCreationArgs = set([
'edit', 'e',
'isIntersecting', 'ii',
'isMember', 'im',
'subtract', 'sub',
'union', 'un',
'intersection', 'int'])
if len( nonCreationArgs.intersection( kwargs.keys()) ):
#print "creation"
return cmds.sets( *elements, **kwargs )
# Creation
#args = _convertListArgs(args)
#print "creation"
return ObjectSet(cmds.sets( *elements, **kwargs ))
'''
def delete(*args, **kwargs):
"""
Modifications:
- the command will not fail on an empty list
"""
# if kwargs.pop('safe',False):
# empty list
if len(args) == 1 and _util.isIterable(args[0]) and not args[0]:
return
cmds.delete(*args, **kwargs)
def getClassification(*args, **kwargs):
"""
Modifications:
- previously returned a list with a single colon-separated string of classifications. now returns a list of classifications
:rtype: `unicode` list
Modifications:
- supports satisfies flag.
Returns true if the given node type's classification satisfies the classification string which is passed with the flag.
:rtype: `bool`
"""
if kwargs and len(kwargs) == 1 and 'satisfies' in kwargs:
return cmds.getClassification(*args, **kwargs)
else:
return cmds.getClassification(*args, **kwargs)[0].split(':')
#--------------------------
# New Commands
#--------------------------
def uniqueObjExists(name):
'''Returns True if name uniquely describes an object in the scene.
'''
all = cmds.ls(name)
# in case result is None...
return all and len(all) == 1
def selected(**kwargs):
"""ls -sl"""
kwargs['sl'] = 1
return ls(**kwargs)
_thisModule = sys.modules[__name__]
def spaceLocator(*args, **kwargs):
"""
Modifications:
- returns a single Transform instead of a list with a single locator
"""
import nodetypes
res = cmds.spaceLocator(**kwargs)
# unfortunately, spaceLocator returns non-unique names... however, it
# doesn't support a parent option - so we can just throw a '|' in front
# of the return result to get a unique name
if (not kwargs.get('query', kwargs.get('q', False))
and not kwargs.get('edit', kwargs.get('e', False))):
if isinstance(res, list):
res = res[0]
if isinstance(res, basestring):
res = '|' + res
res = nodetypes.Transform(res)
return res
def instancer(*args, **kwargs):
"""
Maya Bug Fix:
- name of newly created instancer was not returned
"""
# instancer does not like PyNode objects
args = map(unicode, args)
if kwargs.get('query', kwargs.get('q', False)):
return cmds.instancer(*args, **kwargs)
if kwargs.get('edit', kwargs.get('e', False)):
cmds.instancer(*args, **kwargs)
return PyNode(args[0], 'instancer')
else:
instancers = cmds.ls(type='instancer')
cmds.instancer(*args, **kwargs)
return PyNode(list(set(cmds.ls(type='instancer')).difference(instancers))[0], 'instancer')
#--------------------------
# PyNode Exceptions
#--------------------------
class MayaObjectError(TypeError):
_objectDescription = 'Object'
def __init__(self, node=None):
self.node = unicode(node)
def __str__(self):
msg = "Maya %s does not exist (or is not unique):" % (self._objectDescription)
if self.node:
msg += ": %r" % (self.node)
return msg
class MayaNodeError(MayaObjectError):
_objectDescription = 'Node'
class MayaAttributeError(MayaObjectError, AttributeError):
_objectDescription = 'Attribute'
class MayaAttributeEnumError(MayaAttributeError):
_objectDescription = 'Attribute Enum'
def __init__(self, node=None, enum=None):
super(MayaAttributeEnumError, self).__init__(node)
self.enum = enum
def __str__(self):
msg = super(MayaAttributeEnumError, self).__str__()
if self.enum:
msg += " - %r" % (self.enum)
return msg
class MayaComponentError(MayaAttributeError):
_objectDescription = 'Component'
class MayaInstanceError(MayaNodeError):
def __str__(self):
msg = "Maya %s was reparented to an instance, and dag path is now ambiguous:" % (self._objectDescription)
if self.node:
msg += ": %r" % (self.node)
return msg
class MayaParticleAttributeError(MayaComponentError):
_objectDescription = 'Per-Particle Attribute'
def _objectError(objectName):
# TODO: better name parsing
if '.' in objectName:
return MayaAttributeError(objectName)
return MayaNodeError(objectName)
#--------------------------
# Object Wrapper Classes
#--------------------------
class PyNode(_util.ProxyUnicode):
"""
Abstract class that is base for all pymel nodes classes.
The names of nodes and attributes can be passed to this class, and the appropriate subclass will be determined.
>>> PyNode('persp')
nt.Transform(u'persp')
>>> PyNode('persp.tx')
Attribute(u'persp.translateX')
If the passed node or attribute does not exist an error will be raised.
"""
_name = None # unicode
# for DependNode : _api.MObjectHandle
# for DagNode : _api.MDagPath
# for Attribute : _api.MPlug
_node = None # Attribute Only: stores the PyNode for the plug's node
__apiobjects__ = {}
def __new__(cls, *args, **kwargs):
""" Catch all creation for PyNode classes, creates correct class depending on type passed.
For nodes:
MObject
MObjectHandle
MDagPath
string/unicode
For attributes:
MPlug
MDagPath, MPlug
string/unicode
"""
import nodetypes
# print cls.__name__, cls
pymelType = None
obj = None
name = None
attrNode = None
argObj = None
if args:
if len(args) > 1:
# Attribute passed as two args: ( node, attr )
# valid types:
# node : MObject, MObjectHandle, MDagPath
# attr : MPlug (TODO: MObject and MObjectHandle )
# One very important reason for allowing an attribute to be specified as two args instead of as an MPlug
# is that the node can be represented as an MDagPath which will differentiate between instances, whereas
# an MPlug loses this distinction.
attrNode = args[0]
argObj = args[1]
#-- First Argument: Node
# ensure that the node object is a PyNode object
if not isinstance(attrNode, nodetypes.DependNode):
attrNode = PyNode(attrNode)
# #-- Second Argument: Plug or Component
# # convert from string to _api objects.
# if isinstance(argObj,basestring) :
# argObj = _api.toApiObject( argObj, dagPlugs=False )
#
# # components
# elif isinstance( argObj, int ) or isinstance( argObj, slice ):
# argObj = attrNode._apiobject
else:
argObj = args[0]
# the order of the following 3 checks is important, as it is in increasing generality
if isinstance(argObj, Attribute):
attrNode = argObj._node
argObj = argObj.__apimplug__()
elif isinstance(argObj, Component):
try:
argObj = argObj._node.__apimdagpath__()
except KeyError:
argObj = argObj._node.__apiobjects__['MObjectHandle']
elif isinstance(argObj, PyNode):
try:
argObj = argObj.__apimdagpath__()
except (KeyError, AttributeError):
argObj = argObj.__apiobjects__['MObjectHandle']
elif hasattr(argObj, '__module__') and argObj.__module__.startswith('maya.OpenMaya'):
pass
# elif isinstance(argObj,basestring) : # got rid of this check because of nameparse objects
else:
# didn't match any known types. treat as a string
# convert to string then to _api objects.
try:
name = unicode(argObj)
except Exception:
raise MayaNodeError
else:
res = _api.toApiObject(name, dagPlugs=True)
# DagNode Plug
if isinstance(res, tuple):
# Plug or Component
# print "PLUG or COMPONENT", res
attrNode = PyNode(res[0])
argObj = res[1]
# There are some names which are both components and
# attributes: ie, scalePivot / rotatePivot
# toApiObject (and MSelectionList) will return the
# component in these ambigious cases; therefore,
# if we're explicitly trying to make an Attribute - ie,
# Attribute('myCube.scalePivot')
# ... make sure to cast it to one in these cases
if issubclass(cls, Attribute) and \
isinstance(argObj, _api.MObject) and \
_api.MFnComponent().hasObj(argObj) and \
'.' in name:
attrName = name.split('.', 1)[1]
if attrNode.hasAttr(attrName):
return attrNode.attr(attrName)
# DependNode Plug
elif isinstance(res, _api.MPlug):
attrNode = PyNode(res.node())
argObj = res
# Other Object
elif res:
argObj = res
else:
# Removed ability to create components such as
# PyNode('myCube.vtx')
# because of inconsistency - in general, for
# PyNode(stringName)
# stringName should be a valid mel name, ie
# cmds.select(stringName)
# should work
# # Check if it's a component that's normally indexed,
# # but has no index specified - ie, myPoly.vtx,
# # instead of the (mel-valid) myPoly.vtx[*]
# dotSplit = name.split('.')
# if len(dotSplit) == 2:
# try:
# res = PyNode(dotSplit[0])
# except MayaObjectError:
# pass
# else:
# try:
# argObj = getattr(res, dotSplit[1])
# except AttributeError:
# pass
# else:
# if isinstance(argObj, cls):
# return argObj
# non-existent objects
# the object doesn't exist: raise an error
# note - at one point, I briefly changed things so
# that the code would check to see if the name
# existed, but had multiple matches, or didn't
# exist at all, and made it so MayaObjectError
# would give a more informative error message
# depending...
# ...but it had potential performance implications -
# at best, it was doing an extra cmds.objExists...
# ...and objExists wasn't fast enough, considering
# we will easily be trying to create 1000s of
# PyNodes, and the command gets slower as the size
# of the scene increases...
raise _objectError(name)
#-- Components
if validComponentIndexType(argObj):
#pymelType, obj, name = _getPymelType( attrNode._apiobject )
obj = {'ComponentIndex': argObj}
# if we are creating a component class using an int or slice, then we must specify a class type:
# valid: MeshEdge( myNode, 2 )
# invalid: PyNode( myNode, 2 )
assert issubclass(cls, Component), "%s is not a Component class" % cls.__name__
#-- All Others
else:
pymelType, obj = _getPymelType(argObj, name)
if attrNode is None and issubclass(pymelType, Attribute):
attrNode = PyNode(obj['MPlug'].node())
# print pymelType, obj, name, attrNode
# Virtual (non-existent) objects will be cast to their own virtual type.
# so, until we make that, we're rejecting them
assert obj is not None # real objects only
#assert obj or name
else:
# create node if possible
if issubclass(cls, nodetypes.DependNode):
newNode = None
vClassInfo = _factories.virtualClasses.getVirtualClassInfo(cls)
#----------------------------------
# Pre Creation
#----------------------------------
postArgs = {}
if vClassInfo and vClassInfo.preCreate:
kwargs = vClassInfo.preCreate(**kwargs)
if isinstance(kwargs, tuple):
assert len(kwargs) == 2, "preCreate must either 1 or 2 dictionaries of keyword arguments"
kwargs, postArgs = kwargs
assert isinstance(postArgs, dict), "preCreate second return value must be a dictionary of keyword arguments"
assert isinstance(kwargs, dict), "_preCreateVirtual must return a dictionary of keyword arguments"
#----------------------------------
# Creation
#----------------------------------
if vClassInfo and vClassInfo.create:
newNode = vClassInfo.create(**kwargs)
assert isinstance(newNode, basestring), "_createVirtual must return the name created node"
elif hasattr(cls, '__melcmd__') and not cls.__melcmd_isinfo__:
try:
_logger.debug('creating node of type %s using %s' % (cls.__melnode__, cls.__melcmd__.__name__))
res = cls.__melcmd__(**kwargs)
except Exception, e:
_logger.debug('failed to create %s' % e)
pass
else:
if isinstance(res, list):
# we only want to return a single object
for x in res:
typ = cmds.nodeType(x)
if typ == cls.__melnode__:
newNode = x
break
elif typ == 'transform':
shape = cmds.listRelatives(x, s=1)
if shape and cmds.nodeType(shape[0]) == cls.__melnode__:
newNode = shape[0]
break
if newNode is None:
raise ValueError, "could not find type %s in result %s returned by %s" % (cls.__name__, res, cls.__melcmd__.__name__)
elif cls.__melnode__ == nodeType(res): # isinstance(res,cls):
newNode = res
else:
raise ValueError, "unexpect result %s returned by %s" % (res, cls.__melcmd__.__name__)
else:
_logger.debug('creating node of type %s using createNode' % cls.__melnode__)
try:
newNode = createNode(cls.__melnode__, **kwargs)
except RuntimeError:
# FIXME: should we really be passing on this?
pass
#----------------------------------
# Post Creation
#----------------------------------
if newNode:
if vClassInfo and vClassInfo.postCreate:
vClassInfo.postCreate(newNode, **postArgs)
return cls(newNode)
raise ValueError, 'PyNode expects at least one argument: an object name, MObject, MObjectHandle, MDagPath, or MPlug'
# print "type:", pymelType
# print "PyNode __new__ : called with obj=%r, cls=%r, on object of type %s" % (obj, cls, pymelType)
# if an explicit class was given (ie: pyObj=DagNode(u'pCube1')) just check if actual type is compatible
# if none was given (ie generic pyObj=PyNode('pCube1')) then use the class corresponding to the type we found
newcls = None
if cls is not PyNode:
# a PyNode class was explicitly required, if an existing object was passed to init check that the object type
# is compatible with the required class, if no existing object was passed, create an empty PyNode of the required class
# There is one exception type: MeshVertex( Mesh( 'pSphere1') )
# TODO : can add object creation option in the __init__ if desired
if not pymelType or not issubclass(pymelType, cls):
if issubclass(cls, Component):
newcls = cls
else:
raise TypeError, "Determined type is %s, which is not a subclass of desired type %s" % (pymelType.__name__, cls.__name__)
else:
newcls = pymelType
else:
newcls = pymelType
if newcls:
self = super(PyNode, cls).__new__(newcls)
self._name = name
if attrNode:
self._node = attrNode
self.__apiobjects__ = obj
return self
else:
raise TypeError, "Cannot make a %s out of a %r object" % (cls.__name__, pymelType)
def __init__(self, *args, **kwargs):
# this prevents the _api class which is the second base, from being automatically instantiated. This __init__ should
# be overridden on subclasses of PyNode
pass
def __melobject__(self):
"""Special method for returning a mel-friendly representation."""
return self.name()
def __apimfn__(self):
"""Get a ``maya.OpenMaya*.MFn*`` instance
"""
try:
# if we have it, check that the mobject is still valid by calling
# __apimobject__
self.__apimobject__()
# ...if it is valid, go ahead and return the cached MFn
return self.__apiobjects__['MFn']
except KeyError:
if self.__apicls__:
# use whatever type is appropriate
obj = self.__apiobject__()
if obj:
try:
mfn = self.__apicls__(obj)
self.__apiobjects__['MFn'] = mfn
except RuntimeError:
# when using PyNodes in strange places, like node
# creation callbacks, the proper MFn does not work yet,
# so we default to a super class and we don't save it,
# so that we can get the right one later
if isinstance(obj, _api.MDagPath):
mfn = _api.MFnDagNode(obj)
_logger.warning("Could not create desired MFn. Defaulting to MFnDagNode.")
elif isinstance(obj, _api.MObject):
mfn = _api.MFnDependencyNode(obj)
_logger.warning("Could not create desired MFn. Defaulting to MFnDependencyNode.")
else:
raise
return mfn
def __repr__(self):
"""
:rtype: `unicode`
"""
return u"%s(%r)" % (self.__class__.__name__, self.name())
def __radd__(self, other):
if isinstance(other, basestring):
return other.__add__(self.name())
else:
raise TypeError, "cannot concatenate '%s' and '%s' objects" % (other.__class__.__name__, self.__class__.__name__)
def __reduce__(self):
"""allows PyNodes to be pickled"""
return (PyNode, (self.name(),))
def __eq__(self, other):
"""
:rtype: `bool`
"""
if isinstance(other, PyNode):
try:
apiobj = other.__apiobject__()
except TypeError: # intermixing MDagPath with MObject
return False
else:
try:
apiobj = PyNode(other).__apiobject__()
except:
return False
try:
return self.__apiobject__() == apiobj
except:
return False
def __ne__(self, other):
"""
:rtype: `bool`
"""
# != does not work for MDagPath (maybe others) iff MDagPaths are equal (returns True)
return not self == other
def __nonzero__(self):
"""
:rtype: `bool`
"""
return self.exists()
def __lt__(self, other):
if isinstance(other, (basestring, PyNode)):
return self.name().__lt__(unicode(other))
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, (basestring, PyNode)):
return self.name().__gt__(unicode(other))
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, (basestring, PyNode)):
return self.name().__le__(unicode(other))
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, (basestring, PyNode)):
return self.name().__ge__(unicode(other))
else:
return NotImplemented
#-----------------------------------------
# Name Info and Manipulation
#-----------------------------------------
def stripNamespace(self, *args, **kwargs):
"""
Returns the object's name with its namespace removed. The calling instance is unaffected.
The optional levels keyword specifies how many levels of cascading namespaces to strip, starting with the topmost (leftmost).
The default is 0 which will remove all namespaces.
:rtype: `other.NameParser`
"""
import other
return other.NameParser(self).stripNamespace(*args, **kwargs)
def swapNamespace(self, prefix):
"""Returns the object's name with its current namespace replaced with the provided one.
The calling instance is unaffected.
:rtype: `other.NameParser`
"""
import other
return other.NameParser(self).swapNamespace(prefix)
def namespaceList(self):
"""Useful for cascading references. Returns all of the namespaces of the calling object as a list
:rtype: `unicode` list
"""
return self.lstrip('|').rstrip('|').split('|')[-1].split(':')[:-1]
def addPrefix(self, prefix):
"""Returns the object's name with a prefix added to the beginning of the name
:rtype: `other.NameParser`
"""
import other
return other.NameParser(self).addPrefix(prefix)
# def attr(self, attr):
# """access to attribute of a node. returns an instance of the Attribute class for the
# given attribute."""
# return Attribute( '%s.%s' % (self, attr) )
def exists(self, **kwargs):
"objExists"
try:
if self.__apiobject__():
return True
except MayaObjectError:
pass
return False
objExists = exists
nodeType = cmds.nodeType
def select(self, **kwargs):
forbiddenKeys = ['all', 'allDependencyNodes', 'adn', 'allDagObjects' 'ado', 'clear', 'cl']
for key in forbiddenKeys:
if key in kwargs:
raise TypeError, "'%s' is an inappropriate keyword argument for object-oriented implementation of this command" % key
# stringify
return cmds.select(self.name(), **kwargs)
def deselect(self):
self.select(deselect=1)
def listSets(self, *args, **kwargs):
'''
Returns list of sets this object belongs
listSets -o $this
:rtype: 'PyNode' list
'''
return listSets(o=self, *args, **kwargs)
listConnections = listConnections
connections = listConnections
listHistory = listHistory
history = listHistory
listFuture = listFuture
future = listFuture
# This was supposed to be removed in the 1.0 update, but somehow got left out...
deprecated_str_methods = ['__getitem__']
strDeprecateDecorator = _warnings.deprecated('Convert to string first using str() or PyNode.name()', 'PyNode')
def _deprecatePyNode():
def makeDeprecatedMethod(method):
def f(self, *args):
proxyMethod = getattr(_util.ProxyUnicode, method)
return proxyMethod(self, *args)
f.__doc__ = "deprecated\n"
f.__name__ = method
g = strDeprecateDecorator(f)
setattr(PyNode, method, g)
for method in deprecated_str_methods:
makeDeprecatedMethod(method)
_deprecatePyNode()
_factories.pyNodeNamesToPyNodes['PyNode'] = PyNode
# def _MObjectIn(x):
# if isinstance(x,PyNode): return x.__apimobject__()
# return PyNode(x).__apimobject__()
# def _MDagPathIn(x):
# if isinstance(x,DagNode): return x.__apimdagpath__()
# return PyNode(x).__apimdagpath__()
# def _MPlugIn(x):
# if isinstance(x,Attribute): return x.__apimplug__()
# return PyNode(x).__apimplug__()
# def _MPlugOut(self,x):
# try: return Attribute(self.node(), x)
# except: pass
# return Attribute(x)
#_factories.ApiTypeRegister.register('MObject', PyNode, inCast=_MObjectIn )
#_factories.ApiTypeRegister.register('MDagPath', DagNode, inCast=_MDagPathIn )
#_factories.ApiTypeRegister.register('MPlug', Attribute, inCast=_MPlugIn, outCast=_MPlugOut )
def _getParent(getter, obj, generations):
'''If generations is None, then a list of all the parents is returned.
'''
if generations == 0:
return obj
x = obj
allParents = [obj]
if generations is None:
i = -1
else:
i = generations
# If generations is positive, we will stop as soon as we get to the parent
# we need; otherwise, we will get all the parents
while i != 0:
try:
x = getter(x)
except Exception:
break
if x is None:
break
allParents.append(x)
i -= 1
if generations is None:
return allParents[1:]
if generations >= 1:
if generations < len(allParents):
return allParents[generations]
else:
return None
elif generations < 0:
if -generations > len(allParents):
return None
else:
return allParents[generations]
class Attribute(PyNode):
"""Attribute class
see pymel docs for details on usage
"""
#
"""
Attributes
==========
The Attribute class is your one-stop shop for all attribute related functions. Those of us who have spent time using MEL
have become familiar with all the many commands for operating on attributes. This class gathers them all into one
place. If you forget or are unsure of the right method name, just ask for help by typing `help(Attribute)`.
For the most part, the names of the class equivalents to the maya.cmds functions follow a fairly simple pattern:
`setAttr` becomes `Attribute.set`, `getAttr` becomes `Attribute.get`, `connectAttr` becomes `Attribute.connect` and so on.
Here's a simple example showing how the Attribute class is used in context.
>>> from pymel.core import *
>>> cam = PyNode('persp')
>>> if cam.visibility.isKeyable() and not cam.visibility.isLocked():
... cam.visibility.set( True )
... cam.visibility.lock()
...
>>> print cam.v.type() # shortnames also work
bool
Accessing Attributes
--------------------
You can access an attribute class in three ways. The first two require that you already have a `PyNode` object.
Shorthand
~~~~~~~~~
The shorthand method is the most visually appealing and readable -- you simply access the maya attribute as a normal python attribute --
but it has one major drawback: **if the attribute that you wish to acess has the same name as one of the attributes or methods of the
python class then it will fail**.
>>> cam # continue from where we left off above
Transform(u'persp')
>>> cam.visibility # long name access
Attribute(u'persp.visibility')
>>> cam.v # short name access
Attribute(u'persp.visibility')
Keep in mind, that regardless of whether you use the long or short name of the attribute, you are accessing the same underlying API object.
If you need the attribute formatted as a string in a particular way, use `Attribute.name`, `Attribute.longName`, `Attribute.shortName`,
`Attribute.plugAttr`, or `Attribute.lastPlugAttr`.
attr Method
~~~~~~~~~~~
The attr method is the safest way to access an attribute, and can even be used to access attributes that conflict with
python methods, which would fail using shorthand syntax. This method is passed a string which
is the name of the attribute to be accessed.
>>> cam.attr('visibility')
Attribute(u'persp.visibility')
Unlike the shorthand syntax, this method is capable of being passed attributes which are passed in as variables:
>>> for axis in ['scaleX', 'scaleY', 'scaleZ']:
... cam.attr( axis ).lock()
Direct Instantiation
~~~~~~~~~~~~~~~~~~~~
The last way of getting an attribute is by directly instantiating the class. You can pass the attribute name as a string, or if you have one handy,
pass in an api MPlug object. If you don't know whether the string name represents a node or an attribute, you can always instantiate via the `PyNode`
class, which will determine the appropriate class automaticallly.
explicitly request an Attribute:
>>> Attribute( 'persp.visibility' )
Attribute(u'persp.visibility')
let PyNode figure it out for you:
>>> PyNode( 'persp.translate' )
Attribute(u'persp.translate')
Setting Attributes Values
-------------------------
To set the value of an attribute, you use the `Attribute.set` method.
>>> cam.translateX.set(0)
to set an attribute that expects a double3, you can use any iterable with 3 elements:
>>> cam.translate.set([4,5,6])
>>> cam.translate.set(datatypes.Vector([4,5,6]))
Getting Attribute Values
------------------------
To get the value of an attribute, you use the `Attribute.get` method. Keep in mind that, where applicable, the values returned will
be cast to pymel classes. This example shows that rotation (along with translation and scale) will be returned as a `Vector`.
>>> t = cam.translate.get()
>>> print t
[4.0, 5.0, 6.0]
>>> # translation is returned as a vector class
>>> print type(t)
<class 'pymel.core.datatypes.Vector'>
`set` is flexible in the types that it will accept, but `get` will always return the same type
for a given attribute. This can be a potential source of confusion:
>>> value = [4,5,6]
>>> cam.translate.set(value)
>>> result = cam.translate.get()
>>> value == result
False
>>> # why is this? because result is a Vector and value is a list
>>> # use `Vector.isEquivalent` or cast the list to a `list`
>>> result == datatypes.Vector(value)
True
>>> result.isEquivalent(value)
True
Connecting Attributes
---------------------
As you might expect, connecting and disconnecting attributes is pretty straightforward.
>>> cam.rotateX.connect( cam.rotateY )
>>> cam.rotateX.disconnect( cam.rotateY )
there are also handy operators for connection (`Attribute.__rshift__`) and disconnection (`Attribute.__floordiv__`)
>>> c = polyCube(name='testCube')[0]
>>> cam.tx >> c.tx # connect
>>> cam.tx.outputs()
[nt.Transform(u'testCube')]
>>> cam.tx // c.tx # disconnect
>>> cam.tx.outputs()
[]
"""
__metaclass__ = _factories.MetaMayaTypeWrapper
__apicls__ = _api.MPlug
attrItemReg = re.compile('\[(\d+)\]$')
# def __init__(self, *args, **kwargs ):
# self.apicls.__init__(self, self._apiobject )
def __apiobject__(self):
"Return the default API object (MPlug) for this attribute, if it is valid"
return self.__apimplug__()
def __apimobject__(self):
"Return the MObject for this attribute, if it is valid"
try:
handle = self.__apiobjects__['MObjectHandle']
except:
handle = _api.MObjectHandle(self.__apimplug__().attribute())
self.__apiobjects__['MObjectHandle'] = handle
if _api.isValidMObjectHandle(handle):
return handle.object()
raise MayaAttributeError
def __apimplug__(self):
"Return the MPlug for this attribute, if it is valid"
# check validity
# self.__apimobject__()
return self.__apiobjects__['MPlug']
def __apimdagpath__(self):
"Return the MDagPath for the node of this attribute, if it is valid"
try:
return self.node().__apimdagpath__()
except AttributeError:
pass
def __apimattr__(self):
"Return the MFnAttribute for this attribute, if it is valid"
try:
if 'MFnAttribute' not in self.__apiobjects__:
self.__apiobjects__['MFnAttribute'] = _api.MFnAttribute(self.__apimobject__())
return self.__apiobjects__['MFnAttribute']
except Exception:
raise MayaAttributeError
# def __init__(self, attrName):
# assert isinstance( _api.__apiobject__(), _api.MPlug )
# if '.' not in attrName:
# raise TypeError, "%s: Attributes must include the node and the attribute. e.g. 'nodeName.attributeName' " % self
# self._name = attrName
# # TODO : MObject support
# self.__dict__['_multiattrIndex'] = 0
#
__getitem__ = _factories.wrapApiMethod(_api.MPlug, 'elementByLogicalIndex', '__getitem__')
#elementByPhysicalIndex = _factories.wrapApiMethod( _api.MPlug, 'elementByPhysicalIndex' )
def removeMultiInstance(self, index=None, break_=False):
if index is None:
if not self.isElement():
raise ValueError("if calling removeMultiInstance without an"
" index, attribute must be an array element")
cmds.removeMultiInstance(self, b=break_)
else:
if not self.isArray():
raise ValueError("if calling removeMultiInstance with an"
" index, attribute must be an array")
if isinstance(index, slice):
# plug indices are sparse, so we don't bother using
# slice.indices(len), since all that does is potentially truncate
# the indices we get back
indices = xrange(index.start, index.stop, index.step)
elif isinstance(index, int):
indices = [index]
else:
indices = index
for i in indices:
cmds.removeMultiInstance(self[i], b=break_)
__delitem__ = removeMultiInstance
def attr(self, attr):
"""
:rtype: `Attribute`
"""
node = self.node()
try:
plug = self.__apimplug__()
# if this plug is an array we can't properly get the child plug
if plug.isArray():
return node.attr(attr)
else:
attrObj = node.__apimfn__().attribute(attr)
return Attribute(node, plug.child(attrObj))
except RuntimeError:
# raise our own MayaAttributeError, which subclasses AttributeError and MayaObjectError
raise MayaAttributeError('%s.%s' % (self, attr))
def __getattr__(self, attr):
try:
return self.attr(attr)
except MayaAttributeError:
raise AttributeError, "%r has no attribute or method named '%s'" % (self, attr)
# Added the __call__ so to generate a more appropriate exception when a class method is not found
def __call__(self, *args, **kwargs):
raise TypeError("The object <%s> does not support the '%s' method" % (repr(self.node()), self.plugAttr()))
# Need an iterator which is NOT self, so that we can have independent
# iterators - ie, so if we do:
# zip(self, self)
# we get
# ( (self[0], self[0]), (self[1], self[1]), (self[2], self[2]) ... )
# and not
# ( (self[0], self[1]), (self[2], self[3]), (self[4], self[5]) ... )
def __iter__(self):
"""
iterator for multi-attributes
>>> from pymel.core import *
>>> f=newFile(f=1) #start clean
>>>
>>> at = PyNode( 'defaultLightSet.dagSetMembers' )
>>> nt.SpotLight()
nt.SpotLight(u'spotLightShape1')
>>> nt.SpotLight()
nt.SpotLight(u'spotLightShape2')
>>> nt.SpotLight()
nt.SpotLight(u'spotLightShape3')
>>> for x in at: print x
...
defaultLightSet.dagSetMembers[0]
defaultLightSet.dagSetMembers[1]
defaultLightSet.dagSetMembers[2]
"""
if self.isMulti():
for i in self._getArrayIndices()[1]:
yield self[i]
# return self[0]
else:
raise TypeError, "%s is not a multi-attribute and cannot be iterated over" % self
def __str__(self):
"""
:rtype: `str`
"""
return str(self.name())
def __unicode__(self):
"""
:rtype: `unicode`
"""
return self.name()
def __eq__(self, other):
"""
:rtype: `bool`
"""
if not isinstance(other, Attribute):
try:
other = PyNode(other)
if not hasattr(other, '__apimplug__'):
return False
except (ValueError, TypeError): # could not cast to PyNode
return False
# Unfortunately, it seems that comparing two MPlugs for equality is
# essentially the same as just comparing their attribute objects. That
# means, that for instace, the plugs for objects like these will compare
# equal:
# node.attr[1] == node.attr[50]
# node.attr[5].subAttr == node.attr[7].subAttr
# Thefore, in order for the attributes to truly be equal:
# 1) the attributes must be equal
# 2) the indices must be equal
# 3) the indices of any parents must be equal
thisPlug = self.__apimplug__()
otherPlug = other.__apimplug__()
if thisPlug != otherPlug:
return False
try:
thisIndex = thisPlug.logicalIndex()
except RuntimeError:
thisIndex = None
try:
otherIndex = otherPlug.logicalIndex()
except RuntimeError:
otherIndex = None
if thisIndex != otherIndex:
return False
return self.parent() == other.parent()
def __hash__(self):
"""
:rtype: `int`
"""
return (self.plugNode(), self.name(includeNode=False)).__hash__()
def __ne__(self, other):
"""
:rtype: `bool`
"""
return not self.__eq__(other)
def name(self, includeNode=True, longName=True, fullAttrPath=False,
fullDagPath=False, placeHolderIndices=True):
""" Returns the name of the attribute (plug)
>>> tx = SCENE.persp.t.tx
>>> tx.name()
u'persp.translateX'
>>> tx.name(includeNode=False)
u'translateX'
>>> tx.name(longName=False)
u'persp.tx'
>>> tx.name(fullAttrPath=True, includeNode=False)
u'translate.translateX'
>>> vis = SCENE.perspShape.visibility
>>> vis.name()
u'perspShape.visibility'
>>> vis.name(fullDagPath=True)
u'|persp|perspShape.visibility'
>>> og = SCENE.persp.instObjGroups.objectGroups
>>> og.name()
u'persp.instObjGroups[-1].objectGroups'
>>> og.name(placeHolderIndices=False)
u'persp.instObjGroups.objectGroups'
:rtype: `unicode`
"""
obj = self.__apimplug__()
if obj:
name = ''
node = self.plugNode()
if includeNode:
import nodetypes
if isinstance(node, nodetypes.DagNode):
name = node.name(long=fullDagPath)
else:
name = node.name()
name += '.'
name += obj.partialName(False, # includeNodeName
True, # includeNonMandatoryIndices
True, # includeInstancedIndices
False, # useAlias
fullAttrPath, # useFullAttributePath
longName # useLongNames
)
if not placeHolderIndices:
name = name.replace('[-1]', '')
return name
raise MayaObjectError(self._name)
# def attributeName(self):
# pass
#
# def attributeNames(self):
# pass
def plugNode(self):
"""plugNode
:rtype: `DependNode`
"""
# we shouldn't have to use this
# if self._node is None:
# self._node = PyNode(self.__apimplug__().node())
return self._node
node = plugNode
def plugAttr(self, longName=False, fullPath=False):
"""
>>> from pymel.core import *
>>> at = SCENE.persp.t.tx
>>> at.plugAttr(longName=False, fullPath=False)
u'tx'
>>> at.plugAttr(longName=False, fullPath=True)
u't.tx'
>>> at.plugAttr(longName=True, fullPath=True)
u'translate.translateX'
:rtype: `unicode`
"""
return self.name(includeNode=False,
longName=longName,
fullAttrPath=fullPath)
def lastPlugAttr(self, longName=False):
"""
>>> from pymel.core import *
>>> at = SCENE.persp.t.tx
>>> at.lastPlugAttr(longName=False)
u'tx'
>>> at.lastPlugAttr(longName=True)
u'translateX'
:rtype: `unicode`
"""
return self.name(includeNode=False,
longName=longName,
fullAttrPath=False)
def longName(self, fullPath=False):
"""
>>> from pymel.core import *
>>> at = SCENE.persp.t.tx
>>> at.longName(fullPath=False)
u'translateX'
>>> at.longName(fullPath=True)
u'translate.translateX'
:rtype: `unicode`
"""
return self.name(includeNode=False,
longName=True,
fullAttrPath=fullPath)
def shortName(self, fullPath=False):
"""
>>> from pymel.core import *
>>> at = SCENE.persp.t.tx
>>> at.shortName(fullPath=False)
u'tx'
>>> at.shortName(fullPath=True)
u't.tx'
:rtype: `unicode`
"""
return self.name(includeNode=False,
longName=False,
fullAttrPath=fullPath)
def nodeName(self):
"""The node part of this plug as a string
:rtype: `unicode`
"""
return self.plugNode().name()
def attrName(self, longName=False, includeNode=False):
"""Just the name of the attribute for this plug
This will have no indices, no parent attributes, etc...
This is suitable for use with cmds.attributeQuery
>>> at = SCENE.persp.instObjGroups.objectGroups
>>> at.name()
u'persp.instObjGroups[-1].objectGroups'
>>> at.attrName()
u'og'
>>> at.attrName(longName=True)
u'objectGroups'
"""
# Need to implement this with MFnAttribute - anything
# with MPlug will have the [-1]...
attr = self.__apimattr__()
if longName:
name = attr.name()
else:
name = attr.shortName()
if includeNode:
name = self.nodeName() + '.' + name
return name
def namespace(self, *args, **kwargs):
return self.node().namespace(*args, **kwargs)
def array(self):
"""
Returns the array (multi) attribute of the current element:
>>> n = Attribute(u'initialShadingGroup.groupNodes[0]')
>>> n.isElement()
True
>>> n.array()
Attribute(u'initialShadingGroup.groupNodes')
This method will raise an error for attributes which are not elements of
an array:
>>> m = Attribute(u'initialShadingGroup.groupNodes')
>>> m.isElement()
False
>>> m.array()
Traceback (most recent call last):
...
TypeError: initialShadingGroup.groupNodes is not an array (multi) attribute
:rtype: `Attribute`
"""
try:
return Attribute(self._node, self.__apimplug__().array())
#att = Attribute(Attribute.attrItemReg.split( self )[0])
# if att.isMulti() :
# return att
# else :
# raise TypeError, "%s is not a multi attribute" % self
except:
raise TypeError, "%s is not an array (multi) attribute" % self
# TODO : do not list all children elements by default, allow to do
# skinCluster1.weightList.elements() for first level elements weightList[x]
# or skinCluster1.weightList.weights.elements() for all weightList[x].weights[y]
def elements(self):
"""
``listAttr -multi``
Return a list of strings representing all the attributes in the array.
If you don't need actual strings, it is recommended that you simply iterate through the elements in the array.
See `Attribute.__iter__`.
Modifications:
- returns an empty list when the result is None
"""
if self.isElement():
arrayAttr = self.array()
else:
arrayAttr = self
return _util.listForNone(cmds.listAttr(arrayAttr, multi=True))
# def item(self):
# try:
# return int(Attribute.attrItemReg.search(self).group(1))
# except: return None
def getArrayIndices(self):
"""
Get all set or connected array indices. Raises an error if this is not an array Attribute
:rtype: `int` list
"""
try:
return self._getArrayIndices()[1]
except RuntimeError:
raise TypeError, "%s is not an array (multi) attribute" % self
def numElements(self):
"""
The number of elements in an array attribute. Raises an error if this is not an array Attribute
Be aware that ``getAttr(..., size=1)`` does not always produce the expected value. It is recommend
that you use `Attribute.numElements` instead. This is a maya bug, *not* a pymel bug.
>>> from pymel.core import *
>>> f=newFile(f=1) #start clean
>>>
>>> dls = SCENE.defaultLightSet
>>> dls.dagSetMembers.numElements()
0
>>> nt.SpotLight() # create a light, which adds to the lightSet
nt.SpotLight(u'spotLightShape1')
>>> dls.dagSetMembers.numElements()
1
>>> nt.SpotLight() # create another light, which adds to the lightSet
nt.SpotLight(u'spotLightShape2')
>>> dls.dagSetMembers.numElements()
2
:rtype: `int`
"""
try:
return self._getArrayIndices()[0]
except RuntimeError:
raise TypeError, "%s is not an array (multi) attribute" % self
item = _factories.wrapApiMethod(_api.MPlug, 'logicalIndex', 'item')
index = _factories.wrapApiMethod(_api.MPlug, 'logicalIndex', 'index')
# enums
getEnums = getEnums
setEnums = setEnums
# getting and setting
set = setAttr
get = getAttr
setKey = _factories.functionFactory(cmds.setKeyframe, rename='setKey')
#----------------------
# xxx{ Connections
#----------------------
def isConnectedTo(self, other, ignoreUnitConversion=False, checkLocalArray=False, checkOtherArray=False):
"""
Determine if the attribute is connected to the passed attribute.
If checkLocalArray is True and the current attribute is a multi/array, the current attribute's elements will also be tested.
If checkOtherArray is True and the passed attribute is a multi/array, the passed attribute's elements will also be tested.
If checkLocalArray and checkOtherArray are used together then all element combinations will be tested.
"""
if cmds.isConnected(self, other, ignoreUnitConversion=ignoreUnitConversion):
return True
if checkLocalArray and self.isMulti():
for elem in self:
if elem.isConnectedTo(other, ignoreUnitConversion=ignoreUnitConversion, checkLocalArray=False, checkOtherArray=checkOtherArray):
return True
if checkOtherArray:
other = Attribute(other)
if other.isMulti():
for elem in other:
if self.isConnectedTo(elem, ignoreUnitConversion=ignoreUnitConversion, checkLocalArray=False, checkOtherArray=False):
return True
return False
# does not work because this method cannot return a value, it is akin to +=
# def __irshift__(self, other):
# """operator for 'isConnected'
# sphere.tx >>= box.tx
# """
# return cmds.isConnected(self, other)
connect = connectAttr
def __rshift__(self, other):
"""
operator for 'connectAttr'
>>> from pymel.core import *
>>> SCENE.persp.tx >> SCENE.top.tx # connect
>>> SCENE.persp.tx // SCENE.top.tx # disconnect
"""
return connectAttr(self, other, force=True)
disconnect = disconnectAttr
def __floordiv__(self, other):
"""
operator for 'disconnectAttr'
>>> from pymel.core import *
>>> SCENE.persp.tx >> SCENE.top.tx # connect
>>> SCENE.persp.tx // SCENE.top.tx # disconnect
"""
# no return
cmds.disconnectAttr(self, other)
def inputs(self, **kwargs):
"""
``listConnections -source 1 -destination 0``
see `Attribute.connections` for the full ist of flags.
:rtype: `PyNode` list
"""
kwargs['source'] = True
kwargs.pop('s', None)
kwargs['destination'] = False
kwargs.pop('d', None)
return listConnections(self, **kwargs)
def outputs(self, **kwargs):
"""
``listConnections -source 0 -destination 1``
see `Attribute.connections` for the full ist of flags.
:rtype: `PyNode` list
"""
kwargs['source'] = False
kwargs.pop('s', None)
kwargs['destination'] = True
kwargs.pop('d', None)
return listConnections(self, **kwargs)
def insertInput(self, node, nodeOutAttr, nodeInAttr):
"""connect the passed node.outAttr to this attribute and reconnect
any pre-existing connection into node.inAttr. if there is no
pre-existing connection, this method works just like connectAttr.
for example, for two nodes with the connection::
a.out-->b.in
running this command::
b.in.insertInput( 'c', 'out', 'in' )
causes the new connection order (assuming 'c' is a node with 'in' and 'out' attributes)::
a.out-->c.in
c.out-->b.in
"""
inputs = self.inputs(plugs=1)
if inputs:
inputs[0].connect(node + '.' + nodeInAttr)
cmds.connectAttr(node + '.' + nodeOutAttr, self.name(), force=1)
@_factories.addMelDocs('setKeyframe')
def setKey(self, **kwargs):
kwargs.pop('attribute', None)
kwargs.pop('at', None)
return cmds.setKeyframe(self, **kwargs)
#}
#----------------------
# xxx{ Info and Modification
#----------------------
def getAlias(self, **kwargs):
"""
Returns the alias for this attribute, or None.
The alias of the attribute is set through
Attribute.setAlias, or the aliasAttr command.
"""
alias = self.node().__apimfn__().plugsAlias(self.__apimplug__())
if alias:
return alias
else:
return None
def setAlias(self, alias):
"""
Sets the alias for this attribute (similar to aliasAttr).
"""
cmds.aliasAttr(alias, self.name())
# def add( self, **kwargs):
# kwargs['longName'] = self.plugAttr()
# kwargs.pop('ln', None )
# return addAttr( self.node(), **kwargs )
def delete(self):
"""deleteAttr"""
return cmds.deleteAttr(self)
def remove(self, **kwargs):
'removeMultiInstance'
#kwargs['break'] = True
return cmds.removeMultiInstance(self, **kwargs)
# Edge, Vertex, CV Methods
# def getTranslation( self, **kwargs ):
# """xform -translation"""
# kwargs['translation'] = True
# kwargs['query'] = True
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
#----------------------
# Info Methods
#----------------------
def isDirty(self, **kwargs):
"""
:rtype: `bool`
"""
return cmds.isDirty(self, **kwargs)
def setDirty(self, **kwargs):
cmds.dgdirty(self, **kwargs)
def evaluate(self, **kwargs):
cmds.dgeval(self, **kwargs)
def affects(self, **kwargs):
rawResult = cmds.affects(self.plugAttr(), self.node())
if not rawResult:
return []
return [Attribute('%s.%s' % (self.node(), x)) for x in rawResult]
def affected(self, **kwargs):
rawResult = cmds.affects(self.plugAttr(), self.node(), by=True)
if not rawResult:
return []
return [Attribute('%s.%s' % (self.node(), x)) for x in rawResult]
class _TempRealIndexedAttr(object):
'''When used with the 'with statement', will return a 'sibling' of the
whose indices all exist - creating indices if needed.
If any indices are created, they will be destroyed in exit.
'''
def __init__(self, attr):
self.origAttr = attr
# indexed attrs whose indice we have created, and will need to
# delete when done
self.toDelete = None
def _getRealIndexedElem(self, plug, i):
parent = self.chain[i - 1]
indices = parent.getArrayIndices()
if plug.index() in indices:
return plug
if indices:
# print "plug didn't exist, but parent had existing indices..."
return parent[indices[0]]
# Because it was the Great One's number...
newPlug = parent[99]
# print "plug didn't exist, parent had no existing indices..."
try:
# this should create a 'real' instance at that index
newPlug.get()
except Exception:
pass
self.chain[i] = newPlug
# Only need to delete the 'topmost' plug
if self.toDelete is None:
self.toDelete = newPlug
def __enter__(self):
self.chain = self.origAttr.getAllParents(arrays=True)
self.chain.reverse()
self.chain.append(self.origAttr)
# traverse, starting from upper-most parent, as we may need to
# replace children with 'real' ones as we go down
for i in xrange(len(self.chain)):
# print 'processing:', i
elem = self.chain[i]
if self.toDelete:
# print 'need new plug due to upstream change'
# We've already had to make a new attribute upstream,
# which means we need to grab a 'new' object for every
# element downstream.
if elem.isChild():
newPlug = self.chain[i - 1].attr(elem.attrName())
self.chain[i] = newPlug
elif elem.isElement():
self._getRealIndexedElem(elem, i)
elif elem.isElement():
self._getRealIndexedElem(elem, i)
return self.chain[-1]
def __exit__(self, type, value, traceback):
if self.toDelete is not None:
cmds.removeMultiInstance(self.toDelete.name())
# getAttr info methods
def type(self):
"""
getAttr -type
:rtype: `unicode`
"""
# Note - currently, this returns 'TdataCompound' even for multi,
# NON-compound attributes, if you feed it the array plug (ie, not
# an indexed element plug)
# Not sure this is really desirable, but changing would be backward
# incompatible... revisit this later?
with self._TempRealIndexedAttr(self) as realAttr:
res = cmds.getAttr(realAttr.name(), type=True)
if res:
return res
# Sometimes getAttr seems to fail with dynamic attributes...
if realAttr.isDynamic():
at = cmds.addAttr(realAttr.name(), q=1, attributeType=1)
if isinstance(at, (list, tuple)):
at = at[0]
if at != 'typed':
return at
dt = cmds.addAttr(realAttr.name(), q=1, dataType=1)
if isinstance(dt, (list, tuple)):
dt = dt[0]
return dt
def setLocked(self, locked, checkReference=CHECK_ATTR_BEFORE_LOCK):
'''
Sets the locked state for this plug's value. A plug's locked state determines whether or not the plug's value can be changed.
:Parameters:
locked : `bool`
True if this plug's value is to be locked
checkReference : `bool`
Set True to raise errors on referenced attributes.
By default pymel and the maya api do not check if the node is referenced before
setting the locked state. This is unsafe because changes to the locked state on
referenced nodes are not saved with the scene.
'''
if checkReference and self.node().isReferenced():
raise AttributeError("The attribute '%s' is from a referenced file, and cannot be %s."
% (self, ('unlocked', 'locked')[locked]))
else:
self._setLocked(locked)
def lock(self, checkReference=CHECK_ATTR_BEFORE_LOCK):
"setAttr -locked 1"
return self.setLocked(True, checkReference=checkReference)
def unlock(self, checkReference=CHECK_ATTR_BEFORE_LOCK):
"setAttr -locked 0"
return self.setLocked(False, checkReference=checkReference)
def isMuted(self):
"""
mute -q
:rtype: `bool`
"""
return cmds.mute(self.name(), q=1)
def mute(self, **kwargs):
"""
mute
Mutes the attribute.
"""
cmds.mute(self.name(), **kwargs)
def unmute(self, **kwargs):
"""
mute -disable -force
Unmutes the attribute
"""
kwargs.setdefault('disable', True)
kwargs.setdefault('force', True)
cmds.mute(self.name(), **kwargs)
def isSettable(self):
"""getAttr -settable
:rtype: `bool`
"""
# use MPlug.isFreeToChange, as it doesn't have the issues that getAttr
# does with multi-compound attributes with no indices existing
# return cmds.getAttr(self.name(placeHolderIndices=False), settable=True)
return self.__apimplug__().isFreeToChange() == _api.MPlug.kFreeToChange
# attributeQuery info methods
def isHidden(self):
"""
attributeQuery -hidden
:rtype: `bool`
"""
return cmds.attributeQuery(self.attrName(), node=self.node(), hidden=True)
def isConnectable(self):
"""
attributeQuery -connectable
:rtype: `bool`
"""
return cmds.attributeQuery(self.attrName(), node=self.node(), connectable=True)
def isUsedAsColor(self):
"""
attributeQuery -usedAsColor
"""
return cmds.attributeQuery(self.attrName(), node=self.node(), uac=True)
def indexMatters(self):
return self.__apimattr__().indexMatters()
isMulti = _factories.wrapApiMethod(_api.MPlug, 'isArray', 'isMulti')
def exists(self):
"""
Whether the attribute actually exists.
In spirit, similar to 'attributeQuery -exists'...
...however, also handles multi (array) attribute elements, such as plusMinusAverage.input1D[2]
:rtype: `bool`
"""
if not self.node().exists():
return False
if self.isElement():
arrayExists = self.array().exists()
if not arrayExists:
return False
# If the array exists, now check the array indices...
indices = self.array().getArrayIndices()
return bool(indices and self.index() in indices)
elif self.isChild():
# attributeQuery doesn't handle multi-compound attributes well...
# so need to traverse all the way up the parent chain
return self.parent().exists()
else:
try:
return bool(cmds.attributeQuery(self.lastPlugAttr(), node=self.node(), exists=True))
except TypeError:
return False
#}
#--------------------------
# xxx{ Ranges
#--------------------------
def getSoftMin(self):
"""attributeQuery -softMin
Returns None if softMin does not exist.
:rtype: `float`
"""
if cmds.attributeQuery(self.attrName(), node=self.node(), softMinExists=True):
return cmds.attributeQuery(self.attrName(), node=self.node(), softMin=True)[0]
def getSoftMax(self):
"""attributeQuery -softMax
Returns None if softMax does not exist.
:rtype: `float`
"""
if cmds.attributeQuery(self.attrName(), node=self.node(), softMaxExists=True):
return cmds.attributeQuery(self.attrName(), node=self.node(), softMax=True)[0]
def getMin(self):
"""attributeQuery -min
Returns None if min does not exist.
:rtype: `float`
"""
if cmds.attributeQuery(self.attrName(), node=self.node(), minExists=True):
return cmds.attributeQuery(self.attrName(), node=self.node(), min=True)[0]
def getMax(self):
"""attributeQuery -max
Returns None if max does not exist.
:rtype: `float`
"""
if cmds.attributeQuery(self.attrName(), node=self.node(), maxExists=True):
return cmds.attributeQuery(self.attrName(), node=self.node(), max=True)[0]
def getSoftRange(self):
"""attributeQuery -softRange
returns a two-element list containing softMin and softMax. if the attribute does not have
a softMin or softMax the corresponding element in the list will be set to None.
:rtype: [`float`, `float`]
"""
softRange = []
softRange.append(self.getSoftMin())
softRange.append(self.getSoftMax())
return softRange
def getRange(self):
"""attributeQuery -range
returns a two-element list containing min and max. if the attribute does not have
a softMin or softMax the corresponding element will be set to None.
:rtype: `float`
"""
range = []
range.append(self.getMin())
range.append(self.getMax())
return range
def setMin(self, newMin):
self.setRange(newMin, 'default')
def setMax(self, newMax):
self.setRange('default', newMax)
def setSoftMin(self, newMin):
self.setSoftRange(newMin, 'default')
def setSoftMax(self, newMax):
self.setSoftRange('default', newMax)
def setRange(self, *args):
"""provide a min and max value as a two-element tuple or list, or as two arguments to the
method. To remove a limit, provide a None value. for example:
>>> from pymel.core import *
>>> s = polyCube()[0]
>>> s.addAttr( 'new' )
>>> s.new.setRange( -2, None ) #sets just the min to -2 and removes the max limit
>>> s.new.setMax( 3 ) # sets just the max value and leaves the min at its previous default
>>> s.new.getRange()
[-2.0, 3.0]
"""
self._setRange('hard', *args)
def setSoftRange(self, *args):
self._setRange('soft', *args)
def _setRange(self, limitType, *args):
if len(args) == 2:
newMin = args[0]
newMax = args[1]
if len(args) == 1:
try:
newMin = args[0][0]
newMax = args[0][1]
except:
raise TypeError, "Please provide a min and max value as a two-element tuple or list, or as two arguments to the method. To ignore a limit, provide a None value."
# # first find out what connections are going into and out of the object
# ins = self.inputs(p=1)
# outs = self.outputs(p=1)
#
# # get the current value of the attr
# val = self.get()
#
# # break the connections if they exist
# self.disconnect()
# MIN
# if 'default' is passed, we retain the current value
if newMin == 'default':
pass
elif newMin is None:
if limitType == 'hard':
addAttr(self, edit=1, hasMinValue=False)
else:
addAttr(self, edit=1, hasSoftMinValue=False)
else:
if limitType == 'hard':
addAttr(self, edit=1, minValue=newMin)
else:
addAttr(self, edit=1, softMinValue=newMin)
# MAX
# if 'default' is passed, we retain the current value
if newMax == 'default':
pass
elif newMax is None:
if limitType == 'hard':
addAttr(self, edit=1, hasMaxValue=False)
else:
addAttr(self, edit=1, hasSoftMaxValue=False)
else:
if limitType == 'hard':
addAttr(self, edit=1, maxValue=newMax)
else:
addAttr(self, edit=1, softMaxValue=newMax)
# # set the value to be what it used to be
# self.set(val)
#
# # remake the connections
# for conn in ins:
# conn >> self
#
# for conn in outs:
# self >> outs
# def getChildren(self):
# """attributeQuery -listChildren"""
# return map(
# lambda x: Attribute( self.node() + '.' + x ),
# _util.listForNone( cmds.attributeQuery(self.lastPlugAttr(), node=self.node(), listChildren=True) )
# )
#}
#--------------------------
# xxx{ Relatives
#--------------------------
def getChildren(self):
"""attributeQuery -listChildren
:rtype: `Attribute` list
"""
res = []
for i in range(self.numChildren()):
res.append(Attribute(self.node(), self.__apimfn__().child(i)))
return res
children = getChildren
def iterDescendants(self, levels=None, leavesOnly=False):
'''Yields all attributes "below" this attribute, recursively,
traversing down both through multi/array elements, and through
compound attribute children.
Parameters
----------
levels : int or None
the number of levels deep to descend; each descent from an array
to an array element, and from a compound to it's child, counts as
one level (so, if you have a compound-multi attr parentAttr, to get
to parentAttr[0].child would require levels to be at least 2); None
means no limit
leavesOnly : bool
if True, then results will only be returned if they do not have any
children to recurse into (either because it's not an arry or
compound, or because we've hit the levels limit)
'''
if levels is None:
nextLevels = None
elif levels <= 0:
return
else:
nextLevels = levels - 1
def hasArrayChildren(attr):
return attr.isArray() and attr.evaluateNumElements()
def isLeaf(attr):
return ((nextLevels is not None and nextLevels <= 0) or
(not attr.isCompound() and not hasArrayChildren(attr)))
if self.isArray():
children = iter(self)
elif self.isCompound():
children = self.getChildren()
else:
children = []
for child in children:
leaf = isLeaf(child)
if not leavesOnly or leaf:
yield child
if not leaf:
for grandChild in child.iterDescendants(levels=nextLevels,
leavesOnly=leavesOnly):
yield grandChild
def getSiblings(self):
"""
attributeQuery -listSiblings
:rtype: `Attribute` list
"""
try:
return self.getParent().getChildren()
except:
pass
siblings = getSiblings
@_warnings.deprecated('use Attribute.getParent instead', 'Attribute')
def firstParent(self):
"deprecated: use getParent instead"
try:
return Attribute(self.node(), self.__apimfn__().parent())
except:
pass
@staticmethod
def _getAttrParent(plug):
if plug.isChild():
return plug.parent()
else:
return None
@staticmethod
def _getAttrOrMultiParent(plug):
if plug.isChild():
return plug.parent()
elif plug.isElement():
return plug.array()
else:
return None
def getParent(self, generations=1, arrays=False):
"""
Modifications:
- added optional generations keyword arg, which gives the number of
levels up that you wish to go for the parent
Negative values will traverse from the top.
A value of 0 will return the same node.
The default value is 1.
If generations is None, it will be interpreted as 'return all
parents', and a list will be returned.
Since the original command returned None if there is no parent,
to sync with this behavior, None will be returned if generations
is out of bounds (no IndexError will be thrown).
- added optional arrays keyword arg, which if True, will also
traverse from an array element to an array plug
:rtype: `Attribute`
"""
if arrays:
getter = self._getAttrOrMultiParent
else:
getter = self._getAttrParent
res = _getParent(getter, self.__apimfn__(), generations)
if generations is None:
if res is None:
return []
return [Attribute(self.node(), x) for x in res]
elif res is not None:
return Attribute(self.node(), res)
def getAllParents(self, arrays=False):
"""
Return a list of all parents above this.
Starts from the parent immediately above, going up.
:rtype: `Attribute` list
"""
return self.getParent(generations=None, arrays=arrays)
parent = getParent
def _MObjectIn(x):
if isinstance(x, PyNode):
return x.__apimobject__()
return PyNode(x).__apimobject__()
def _MDagPathIn(x):
if isinstance(x, PyNode):
return x.__apimdagpath__()
return PyNode(x).__apimdagpath__()
def _MPlugIn(x):
if isinstance(x, PyNode):
return x.__apimplug__()
return PyNode(x).__apimplug__()
def _MPlugOut(self, x):
return PyNode(self.node(), x)
# try: return PyNode(self.node(), x)
# except: pass
# return PyNode(x)
_factories.ApiTypeRegister.register('MObject', PyNode, inCast=_MObjectIn)
_factories.ApiTypeRegister.register('MDagPath', PyNode, inCast=_MDagPathIn)
_factories.ApiTypeRegister.register('MPlug', PyNode, inCast=_MPlugIn, outCast=_MPlugOut)
# TODO:
# -----
# Seperate out _makeComponentHandle and _setComponentHandle - ie, order should be:
# 1. _makeComponentHandle
# 2. _makeMFnComponent
# 3. _setComponentHandle
# Implement makeComponentFromIndex - have it return an MObject handle
# Implement multiple component labels! (ie, surface iso can be 'u' or 'v')
# Add 'setCompleteData' when we can find how many components (instead of just 'setComplete')
# Handle multiple _ComponentLabel__'s that refer to different flavors of same component type -
# ie, NurbsSurface.u/.v/.uv, transform.rotatePivot/scalePivot
# NurbsSurfaceRange
# Make it work with multiple component types in single component(?)
def _formatSlice(sliceObj):
startIndex, stopIndex, step = sliceObj.start, sliceObj.stop, sliceObj.step
if startIndex == stopIndex:
sliceStr = '%s' % startIndex
elif step is not None and step != 1:
sliceStr = '%s:%s:%s' % (startIndex, stopIndex, step)
else:
sliceStr = '%s:%s' % (startIndex, stopIndex)
return sliceStr
ProxySlice = _util.proxyClass(slice, 'ProxySlice', module=__name__, dataAttrName='_slice', makeDefaultInit=True)
# prevent auto-completion generator from getting confused
ProxySlice.__module__ = __name__
# Really, don't need to have another class inheriting from
# the proxy class, but do this so I can define a method using
# normal class syntax...
class HashableSlice(ProxySlice):
def __init__(self, *args, **kwargs):
if len(args) == 1 and not kwargs and isinstance(args[0], (slice, HashableSlice)):
if isinstance(args[0], HashableSlice):
self._slice = args[0]._slice
else:
self._slice = args[0]
else:
self._slice = slice(*args, **kwargs)
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = (self.start, self.stop, self.step).__hash__()
return self._hash
def _toNormalSlice(self):
return slice(self.start, self.stop, self.step)
def __cmp__(self, other):
if isinstance(other, HashableSlice):
other = other._toNormalSlice()
elif not isinstance(other, slice):
return -1
return slice.__cmp__(self._toNormalSlice(), other)
@property
def start(self):
return self._slice.start
@property
def stop(self):
return self._slice.stop
@property
def step(self):
return self._slice.step
class Component(PyNode):
"""
Abstract base class for pymel components.
"""
__metaclass__ = _factories.MetaMayaComponentWrapper
_mfncompclass = _api.MFnComponent
_apienum__ = _api.MFn.kComponent
_ComponentLabel__ = None
# Maya 2008 and earlier have no kUint64SingleIndexedComponent /
# MFnUint64SingleIndexedComponent...
_componentEnums = [_api.MFn.kComponent,
_api.MFn.kSingleIndexedComponent,
_api.MFn.kDoubleIndexedComponent,
_api.MFn.kTripleIndexedComponent]
if hasattr(_api.MFn, 'kUint64SingleIndexedComponent'):
_hasUint64 = True
_componentEnums.append(_api.MFn.kUint64SingleIndexedComponent)
else:
_hasUint64 = False
@classmethod
def _componentMObjEmpty(cls, mobj):
"""
Returns true if the given component mobj is empty (has no elements).
"""
# Note that a component marked as complete will return elementCount == 0,
# even if it is not truly empty.
#
# Even MFnComponent.isEmpty will sometimes "lie" if component is complete.
#
# Try this:
#
# import maya.OpenMaya as api
# import maya.cmds as cmds
#
# melSphere = cmds.sphere()[0]
# selList = _api.MSelectionList()
# selList.add(melSphere + '.cv[*][*]')
# compObj = _api.MObject()
# dagPath = _api.MDagPath()
# selList.getDagPath(0, dagPath, compObj)
# mfnComp = _api.MFnDoubleIndexedComponent(compObj)
# print "is empty:", mfnComp.isEmpty()
# print "is complete:", mfnComp.isComplete()
# print "elementCount:", mfnComp.elementCount()
# print
# mfnComp.setComplete(True)
# print "is empty:", mfnComp.isEmpty()
# print "is complete:", mfnComp.isComplete()
# print "elementCount:", mfnComp.elementCount()
# print
# mfnComp.setComplete(False)
# print "is empty:", mfnComp.isEmpty()
# print "is complete:", mfnComp.isComplete()
# print "elementCount:", mfnComp.elementCount()
# print
mfnComp = _api.MFnComponent(mobj)
completeStatus = mfnComp.isComplete()
if completeStatus:
mfnComp.setComplete(False)
isEmpty = mfnComp.isEmpty()
if completeStatus:
mfnComp.setComplete(True)
return isEmpty
@classmethod
def _compOrEmptyList(cls, node, components):
if (not isinstance(components, (_api.MObject, _api.MFnComponent))
and not components):
return []
return cls(node, components)
def __init__(self, *args, **kwargs):
# the Component class can be instantiated several ways:
# Component(dagPath, component):
# args get stored on self._node and
# self.__apiobjects__['MObjectHandle'] respectively
# Component(dagPath):
# in this case, stored on self.__apiobjects__['MDagPath']
# (self._node will be None)
# First, ensure that we have a self._node...
if not self._node:
dag = self.__apiobjects__['MDagPath']
self._node = PyNode(dag)
assert(self._node)
# Need to do indices checking even for non-dimensional
# components, because the ComponentIndex might be used to
# specify the 'flavor' of the component - ie, 'scalePivot' or
# 'rotatePivot' for Pivot components
self._indices = self.__apiobjects__.get('ComponentIndex', None)
if self._indices:
if _util.isIterable(self._ComponentLabel__):
oldCompLabel = set(self._ComponentLabel__)
else:
oldCompLabel = set((self._ComponentLabel__,))
if isinstance(self._indices, dict):
if len(self._indices) > 1:
assert set(self._indices.iterkeys()).issubset(oldCompLabel)
self._ComponentLabel__ = self._indices.keys()
else:
# dict only has length 1..
self._ComponentLabel__ = self._indices.keys()[0]
self._indices = self._indices.values()[0]
if isinstance(self._indices, ComponentIndex) and self._indices.label:
assert self._indices.label in oldCompLabel
self._ComponentLabel__ = self._indices.label
elif 'MObjectHandle' not in self.__apiobjects__:
# We're making a component by ComponentClass(shapeNode)...
# set a default label if one is specified
if self._defaultLabel():
self._ComponentLabel__ = self._defaultLabel()
def __apimdagpath__(self):
"Return the MDagPath for the node of this component, if it is valid"
try:
# print "NODE", self.node()
return self.node().__apimdagpath__()
except AttributeError:
pass
def __apimobject__(self):
"get the MObject for this component if it is valid"
handle = self.__apihandle__()
if _api.isValidMObjectHandle(handle):
return handle.object()
# Can't use self.name(), as that references this!
raise MayaObjectError(self._completeNameString())
def __apiobject__(self):
return self.__apimobject__()
def __apihandle__(self):
if 'MObjectHandle' not in self.__apiobjects__:
handle = self._makeComponentHandle()
if not handle or not _api.isValidMObjectHandle(handle):
raise MayaObjectError(self._completeNameString())
self.__apiobjects__['MObjectHandle'] = handle
return self.__apiobjects__['MObjectHandle']
def __apicomponent__(self):
mfnComp = self.__apiobjects__.get('MFnComponent', None)
if mfnComp is None:
mfnComp = self._mfncompclass(self.__apimobject__())
self.__apiobjects__['MFnComponent'] = mfnComp
return mfnComp
def __apimfn__(self):
return self.__apicomponent__()
def __eq__(self, other):
if not hasattr(other, '__apicomponent__'):
return False
return self.__apicomponent__().isEqual(other.__apicomponent__().object())
def __nonzero__(self):
"""
:rtype: `bool`
"""
return bool(len(self))
def __str__(self):
return str(self.name())
def __unicode__(self):
return self.name()
def _completeNameString(self):
return u'%s.%s' % (self.node(), self.plugAttr())
def _makeComponentHandle(self):
component = None
# try making from MFnComponent.create, if _mfncompclass has it defined
if ('create' in dir(self._mfncompclass) and
self._apienum__ not in self._componentEnums + [None]):
try:
component = self._mfncompclass().create(self._apienum__)
# Note - there's a bug with kSurfaceFaceComponent - can't use create
except RuntimeError:
pass
else:
if not _api.isValidMObject(component):
component = None
# that didn't work - try checking if we have a valid plugAttr
if not component and self.plugAttr():
try:
component = _api.toApiObject(self._completeNameString())[1]
except Exception:
pass
else:
if not _api.isValidMObject(component):
component = None
# component objects we create always start out 'complete'
mfnComp = self._mfncompclass(component)
mfnComp.setComplete(True)
return _api.MObjectHandle(component)
def __melobject__(self):
selList = _api.MSelectionList()
selList.add(self.__apimdagpath__(), self.__apimobject__(), False)
strings = []
selList.getSelectionStrings(0, strings)
nodeName = self.node().name() + '.'
strings = [nodeName + x.split('.', 1)[-1] for x in strings]
if not strings:
return self._completeNameString()
elif len(strings) == 1:
return strings[0]
else:
return strings
def _defaultLabel(self):
"""
Intended for classes such as NurbsSurfaceRange which have multiple possible
component labels (ie, u, v, uv), and we want to specify a 'default' one
so that we can do NurbsSurfaceRange(myNurbsSurface).
This should be None if either the component only has one label, or picking
a default doesn't make sense (ie, in the case of Pivot, we have no
idea whether the user would want the scale or rotate pivot, so
doing Pivot(myObject) makes no sense...
"""
return None
def name(self):
melObj = self.__melobject__()
if isinstance(melObj, basestring):
return melObj
return repr(melObj)
def node(self):
return self._node
def namespace(self, *args, **kwargs):
return self.node().namespace(*args, **kwargs)
# just for backward compatibility with old Component class (though the
# only place this WAS used was with particles...)
plugNode = node
def plugAttr(self):
return self._ComponentLabel__
def isComplete(self, *args, **kwargs):
return self._isCompleteMfnComp(self.__apicomponent__())
def _isCompleteMfnComp(self, mfncomp):
return mfncomp.isComplete()
@staticmethod
def numComponentsFromStrings(*componentStrings):
"""
Does basic string processing to count the number of components
given a number of strings, which are assumed to be the valid mel names
of components.
"""
numComps = 0
for compString in componentStrings:
indices = re.findall(r'\[[^\]]*\]', compString)
newComps = 1
if indices:
for index in indices:
if ':' in index:
indexSplit = index.split(':')
# + 1 is b/c mel indices are inclusive
newComps *= int(indexSplit[1]) - int(indexSplit[0]) + 1
numComps += newComps
return numComps
class DimensionedComponent(Component):
"""
Components for which having a __getitem__ of some sort makes sense
ie, myComponent[X] would be reasonable.
"""
# All components except for the pivot component and the unknown ones are
# indexable in some manner
dimensions = 0
def __init__(self, *args, **kwargs):
# the Component class can be instantiated several ways:
# Component(dagPath, component):
# args get stored on self._node and
# self.__apiobjects__['MObjectHandle'] respectively
# Component(dagPath):
# in this case, stored on self.__apiobjects__['MDagPath']
# (self._node will be None)
super(DimensionedComponent, self).__init__(*args, **kwargs)
isComplete = True
# If we're fed an MObjectHandle already, we don't allow
# __getitem__ indexing... unless it's complete
handle = self.__apiobjects__.get('MObjectHandle', None)
if handle is not None:
mfncomp = self._mfncompclass(handle.object())
if not self._isCompleteMfnComp(mfncomp):
isComplete = False
if isinstance(self._indices, dict) and len(self._indices) > 1:
isComplete = False
# If the component is complete, we allow further indexing of it using
# __getitem__
# Whether or not __getitem__ indexing is allowed, and what dimension
# we are currently indexing, is stored in _partialIndex
# If _partialIndex is None, __getitem__ indexing is NOT allowed
# Otherwise, _partialIndex should be a ComponentIndex object,
# and it's length indicates how many dimensions have already been
# specified.
if isComplete:
# Do this test before doing 'if self._indices',
# because an empty ComponentIndex will be 'False',
# but could still have useful info (like 'label')!
if isinstance(self._indices, ComponentIndex):
if len(self._indices) < self.dimensions:
self._partialIndex = self._indices
else:
self._partialIndex = None
elif self._indices:
self._partialIndex = None
else:
self._partialIndex = ComponentIndex(label=self._ComponentLabel__)
else:
self._partialIndex = None
def _completeNameString(self):
# Note - most multi-dimensional components allow selection of all
# components with only a single index - ie,
# myNurbsSurface.cv[*]
# will work, even though nurbs cvs are double-indexed
# However, some multi-indexed components WON'T work like this, ie
# myNurbsSurface.sf[*]
# FAILS, and you MUST do:
# myNurbsSurface.sf[*][*]
# ...However, some multi-indexed components (well, only LatticePoint
# that I know of) will give incorrect results with
# ffd1LatticeShape.pt[*][*][*]
# ...and so you must do
# ffd1LatticeShape.pt[*]
return (super(DimensionedComponent, self)._completeNameString() +
('[*]' * self.dimensions))
def _makeComponentHandle(self):
indices = self._standardizeIndices(self._indices)
handle = self._makeIndexedComponentHandle(indices)
return handle
def _makeIndexedComponentHandle(self, indices):
"""
Returns an MObjectHandle that points to a maya component object with
the given indices.
"""
selList = _api.MSelectionList()
def addComp(compName):
try:
selList.add(compName)
except RuntimeError:
raise MayaComponentError(compName)
if len(indices) == 1 and self._isCompleteIndex(indices[0]):
addComp(self._completeNameString())
else:
for index in indices:
compName = Component._completeNameString(self)
for dimNum, dimIndex in enumerate(index):
if isinstance(dimIndex, (slice, HashableSlice)):
# by the time we're gotten here, standardizedIndices
# should have either flattened out slice-indices
# (DiscreteComponents) or disallowed slices with
# step values (ContinuousComponents)
if dimIndex.start == dimIndex.stop == None:
dimIndex = '*'
else:
if dimIndex.start is None:
if isinstance(self, DiscreteComponent):
start = 0
else:
partialIndex = ComponentIndex(('*',) * dimNum,
index.label)
start = self._dimRange(partialIndex)[0]
else:
start = dimIndex.start
if dimIndex.stop is None:
partialIndex = ComponentIndex(('*',) * dimNum,
index.label)
stop = self._dimRange(partialIndex)[1]
else:
stop = dimIndex.stop
dimIndex = "%s:%s" % (start, stop)
compName += '[%s]' % dimIndex
addComp(compName)
compMobj = _api.MObject()
dagPath = _api.MDagPath()
selList.getDagPath(0, dagPath, compMobj)
return _api.MObjectHandle(compMobj)
VALID_SINGLE_INDEX_TYPES = [] # re-define in derived!
# For situations in which we want a component object to represent ALL the
# possible components of that type - ie, all the vertices - it is a LOT
# faster to special case that situation, rather than the default behavior,
# which will flatten out the components into a list, etc.
# However, the shortcut for "complete" components will not work for all
# component types (ie, subdiv components), so this function controls whether
# it will be used.
_ALLOW_COMPLETE_SHORTCUT = True
# in addition, for some types, it may USUALLY be allowable to use [*]
# syntax, but in some specific instances, it will cause problems... ie,
# for empty meshes, doing
# pCubeShape1.vtx[*]
# will error...
def _allowCompleteShortcut(self):
# check for the empty mesh problem by grabbing the node's mfn - if it's
# a dag node, we have problems
return (self._ALLOW_COMPLETE_SHORTCUT
and not issubclass(_api.MFnDagNode,
type(self.node().__apimfn__())))
def _standardizeIndices(self, indexObjs, allowIterable=True, label=None,
allowComplete=True):
"""
Convert indexObjs to an iterable of ComponentIndex objects.
indexObjs may be a member of VALID_SINGLE_INDEX_TYPES, a
ComponentIndex object, or an iterable of such items (if allowIterable),
or 'None'
"""
# For speed, we want to allow through "entire component" indices,
# without flattening... but only if "allowComplete" is True
if not self._allowCompleteShortcut():
allowComplete = False
if indexObjs is None:
indexObjs = ComponentIndex(label=label)
indices = set()
# Convert single objects to a list
if isinstance(indexObjs, self.VALID_SINGLE_INDEX_TYPES):
if self.dimensions == 1:
if (isinstance(indexObjs, (slice, HashableSlice)) and not
(allowComplete and self._isCompleteIndex(indexObjs))):
return self._standardizeIndices(self._sliceToIndices(indexObjs), label=label)
else:
indices.add(ComponentIndex((indexObjs,), label=label))
else:
raise IndexError("Single Index given for a multi-dimensional component")
elif isinstance(indexObjs, ComponentIndex):
if label and indexObjs.label and label != indexObjs.label:
raise IndexError('ComponentIndex object had a different label than desired (wanted %s, found %s)'
% (label, indexObjs.label))
if allowComplete and self._isCompleteIndex(indexObjs):
indices.add(self._completeIndex(label=label))
else:
indices.update(self._flattenIndex(indexObjs))
elif isinstance(indexObjs, dict):
# Dicts are used to specify component labels for a group of indices at once...
for dictLabel, dictIndices in indexObjs.iteritems():
if label and label != dictLabel:
raise IndexError('ComponentIndex object had a different label than desired (wanted %s, found %s)'
% (label, dictLabel))
indices.update(self._standardizeIndices(dictIndices, label=dictLabel))
elif allowIterable and _util.isIterable(indexObjs):
for index in indexObjs:
indices.update(self._standardizeIndices(index,
allowIterable=False,
label=label))
if (allowComplete and len(indices) == 1
and self._isCompleteIndex(list(indices)[0])):
break
allowComplete = False
else:
raise IndexError("Invalid indices for component: %r" % (indexObjs,))
return tuple(indices)
def _completeIndex(self, label=None):
return ComponentIndex((HashableSlice(None),) * self.dimensions, label=label)
def _isCompleteIndex(self, indexObj):
'''Return true if the indexObj represents the entire set of indices possible for this component'''
if isinstance(indexObj, ComponentIndex):
return (len(indexObj) == 0
or indexObj == self._completeIndex(label=indexObj.label))
elif self.dimensions == 1:
return indexObj == slice(None)
return False
def _sliceToIndices(self, sliceObj):
raise NotImplementedError
def _flattenIndex(self, index, allowIterable=True):
"""
Given a ComponentIndex object, which may be either a partial index (ie,
len(index) < self.dimensions), or whose individual-dimension indices
might be slices or iterables, return an flat list of ComponentIndex
objects.
"""
# Some components - such as face-vertices - need to know the previous
# indices to be able to fully expand the remaining indices... ie,
# faceVertex[1][2][:] may result in a different expansion than for
# faceVertex[3][8][:]...
# for this reason, we need to supply the previous indices to
# _sliceToIndices, and expand on a per-partial-index basis
while len(index) < self.dimensions:
index = ComponentIndex(index + (HashableSlice(None),))
indices = [ComponentIndex(label=index.label)]
for dimIndex in index:
if isinstance(dimIndex, (slice, HashableSlice)):
newIndices = []
for oldPartial in indices:
newIndices.extend(self._sliceToIndices(dimIndex,
partialIndex=oldPartial))
indices = newIndices
elif _util.isIterable(dimIndex):
if allowIterable:
newIndices = []
for oldPartial in indices:
for indice in dimIndex:
newIndices.append(oldPartial + (indice,))
indices = newIndices
else:
raise IndexError(index)
elif isinstance(dimIndex, (float, int, long)) and dimIndex < 0:
indices = [x + (self._translateNegativeIndice(dimIndex, x),)
for x in indices]
else:
indices = [x + (dimIndex,) for x in indices]
return indices
def _translateNegativeIndice(self, negIndex, partialIndex):
raise NotImplementedError
def __getitem__(self, item):
if self.currentDimension() is None:
raise IndexError("Indexing only allowed on an incompletely "
"specified component (ie, 'cube.vtx')")
self._validateGetItemIndice(item)
return self.__class__(self._node,
ComponentIndex(self._partialIndex + (item,)))
def _validateGetItemIndice(self, item, allowIterables=True):
"""
Will raise an appropriate IndexError if the given item
is not suitable as a __getitem__ indice.
"""
if allowIterables and _util.isIterable(item):
for x in item:
self._validateGetItemIndice(x, allowIterables=False)
return
if not isinstance(item, self.VALID_SINGLE_INDEX_TYPES):
raise IndexError("Invalid indice type for %s: %r" %
(self.__class__.__name__,
item.__class__.__name__))
if isinstance(item, (slice, HashableSlice)):
if item.step and item.step < 0:
raise IndexError("Components do not support slices with negative steps")
# 'None' compares as less than all numbers, so need
# to check for it explicitly
if item.start is None and item.stop is None:
# If it's an open range, [:], and slices are allowed,
# it's valid
return
elif item.start is None:
minIndex = maxIndex = item.stop
elif item.stop is None:
minIndex = maxIndex = item.start
else:
maxIndex = max(item.start, item.stop)
minIndex = min(item.start, item.stop)
if (not isinstance(maxIndex, self.VALID_SINGLE_INDEX_TYPES) or
not isinstance(minIndex, self.VALID_SINGLE_INDEX_TYPES)):
raise IndexError("Invalid slice start or stop value")
else:
maxIndex = minIndex = item
allowedRange = self._dimRange(self._partialIndex)
if minIndex < allowedRange[0] or maxIndex > allowedRange[1]:
raise IndexError("Indice %s out of range %s" % (item, allowedRange))
def _dimRange(self, partialIndex):
"""
Returns (minIndex, maxIndex) for the next dimension index after
the given partialIndex.
The range is inclusive.
"""
raise NotImplemented
def _dimLength(self, partialIndex):
"""
Given a partialIndex, returns the maximum value for the first
unspecified dimension.
"""
# Implement in derived classes - no general way to determine the length
# of components!
raise NotImplementedError
def currentDimension(self):
"""
Returns the dimension index that an index operation - ie, self[...] /
self.__getitem__(...) - will operate on.
If the component is completely specified (ie, all dimensions are
already indexed), then None is returned.
"""
if not hasattr(self, '_currentDimension'):
indices = self._partialIndex
if (indices is not None and
len(indices) < self.dimensions):
self._currentDimension = len(indices)
else:
self._currentDimension = None
return self._currentDimension
class ComponentIndex(tuple):
"""
Class used to specify a multi-dimensional component index.
If the length of a ComponentIndex object < the number of dimensions,
then the remaining dimensions are taken to be 'complete' (ie, have not yet
had indices specified).
"""
def __new__(cls, *args, **kwargs):
"""
:Parameters:
label : `string`
Component label for this index.
Useful for components whose 'mel name' may vary - ie, an isoparm
may be specified as u, v, or uv.
"""
label = kwargs.pop('label', None)
self = tuple.__new__(cls, *args, **kwargs)
if not label and args and isinstance(args[0], ComponentIndex) and args[0].label:
self.label = args[0].label
else:
self.label = label
return self
def __add__(self, other):
if isinstance(other, ComponentIndex) and other.label:
if not self.label:
label = other.label
else:
if other.label != self.label:
raise ValueError('cannot add two ComponentIndex objects with different labels')
label = self.label
else:
label = self.label
return ComponentIndex(itertools.chain(self, other), label=label)
def __repr__(self):
return "%s(%s, label=%r)" % (self.__class__.__name__,
super(ComponentIndex, self).__repr__(),
self.label)
def validComponentIndexType(argObj, allowDicts=True, componentIndexTypes=None):
"""
True if argObj is of a suitable type for specifying a component's index.
False otherwise.
Dicts allow for components whose 'mel name' may vary - ie, a single
isoparm component may have, u, v, or uv elements; or, a single pivot
component may have scalePivot and rotatePivot elements. The key of the
dict would indicate the 'mel component name', and the value the actual
indices.
Thus:
{'u':3, 'v':(4,5), 'uv':ComponentIndex((1,4)) }
would represent single component that contained:
.u[3]
.v[4]
.v[5]
.uv[1][4]
Derived classes should implement:
_dimLength
"""
if not componentIndexTypes:
componentIndexTypes = (int, long, float, slice, HashableSlice, ComponentIndex)
if allowDicts and isinstance(argObj, dict):
for value in argObj.itervalues():
if not validComponentIndexType(value, allowDicts=False):
return False
return True
else:
if isinstance(argObj, componentIndexTypes):
return True
elif isinstance(argObj, (list, tuple)) and len(argObj):
for indice in argObj:
if not isinstance(indice, componentIndexTypes):
return False
else:
return True
return False
class DiscreteComponent(DimensionedComponent):
"""
Components whose dimensions are discretely indexed.
Ie, there are a finite number of possible components, referenced by integer
indices.
Example: polyCube.vtx[38], f.cv[3][2]
Derived classes should implement:
_dimLength
"""
VALID_SINGLE_INDEX_TYPES = (int, long, slice, HashableSlice)
def __init__(self, *args, **kwargs):
self.reset()
super(DiscreteComponent, self).__init__(*args, **kwargs)
def _isCompleteMfnComp(self, mfncomp):
# for components created through MSelectionList - ie, pm.PyNode('pCube1.vtx[0]')
# - we may get back an MFnComponent object that actually has all the
# indices, but is not marked as complete
# check both if it is marked "isComplete", and if it has a number of
# components equal to the number that this object has
if mfncomp.isComplete():
return True
else:
try:
totalSize = self.totalSize()
except NotImplementedError:
return False
return mfncomp.elementCount() == totalSize
def _sliceToIndices(self, sliceObj, partialIndex=None):
"""
Converts a slice object to an iterable of the indices it represents.
If a partialIndex is supplied, then sliceObj is taken to be a slice
at the next dimension not specified by partialIndex - ie,
myFaceVertex._sliceToIndices(slice(1,-1), partialIndex=ComponentIndex((3,)))
might be used to get a component such as
faceVertices[3][1:-1]
"""
if partialIndex is None:
partialIndex = ComponentIndex()
# store these in local variables, to avoid constantly making
# new slice objects, since slice objects are immutable
start = sliceObj.start
stop = sliceObj.stop
step = sliceObj.step
if start is None:
start = 0
if step is None:
step = 1
# convert 'maya slices' to 'python slices'...
# ie, in maya, someObj.vtx[2:3] would mean:
# (vertices[2], vertices[3])
# in python, it would mean:
# (vertices[2],)
if stop is not None and stop >= 0:
stop += 1
if stop is None or start < 0 or stop < 0 or step < 0:
start, stop, step = slice(start, stop, step).indices(self._dimLength(partialIndex))
# Made this return a normal list for easier debugging...
# ... can always make it back to a generator if need it for speed
for rawIndex in xrange(start, stop, step):
yield ComponentIndex(partialIndex + (rawIndex,))
# return [ComponentIndex(partialIndex + (rawIndex,))
# for rawIndex in xrange(start, stop, step)]
def _makeIndexedComponentHandle(self, indices):
# We could always create our component using the selection list
# method; but since this has to do string processing, it is slower...
# so use MFnComponent.addElements method if possible.
handle = Component._makeComponentHandle(self)
if (self._componentMObjEmpty(handle.object())
and not (len(indices) == 1 and self._isCompleteIndex(indices[0]))):
mayaArrays = []
for dimIndices in zip(*indices):
mayaArrays.append(self._pyArrayToMayaArray(dimIndices))
mfnComp = self._mfncompclass(handle.object())
mfnComp.setComplete(False)
if mayaArrays:
mfnComp.addElements(*mayaArrays)
return handle
else:
return super(DiscreteComponent, self)._makeIndexedComponentHandle(indices)
@classmethod
def _pyArrayToMayaArray(cls, pythonArray):
mayaArray = _api.MIntArray()
_api.MScriptUtil.createIntArrayFromList(list(pythonArray), mayaArray)
return mayaArray
def _dimRange(self, partialIndex):
dimLen = self._dimLength(partialIndex)
return (-dimLen, dimLen - 1)
def _translateNegativeIndice(self, negIndex, partialIndex):
assert negIndex < 0
return self._dimLength(partialIndex) + negIndex
def __iter__(self):
# We proceed in two ways, depending on whether we're a
# completely-specified component (ie, no longer indexable),
# or partially-specified (ie, still indexable).
for compIndex in self._compIndexObjIter():
yield self.__class__(self._node, compIndex)
def _compIndexObjIter(self):
"""
An iterator over all the indices contained by this component,
as ComponentIndex objects (which are a subclass of tuple).
"""
if self.currentDimension() is None:
# we're completely specified, do flat iteration
return self._flatIter()
else:
# we're incompletely specified, iterate across the dimensions!
return self._dimensionIter()
# Essentially identical to _compIndexObjIter, except that while
# _compIndexObjIter, this is intended for use by end-user,
# and so if it's more 'intuitive' to return some other object,
# it will be overriden in derived classes to do so.
# ie, for Component1D, this will return ints
indicesIter = _compIndexObjIter
def indices(self):
"""
A list of all the indices contained by this component.
"""
return list(self.indicesIter())
def _dimensionIter(self):
# If we're incompletely specified, then if, for instance, we're
# iterating through all the vertices of a poly with 500,000 verts,
# then it's a huge waste of time / space to create a list of
# 500,000 indices in memory, then iterate through it, when we could
# just as easily generate the indices as we go with an xrange
# Since an MFnComponent is essentially a flat list of such indices
# - only it's stored in maya's private memory space - we AVOID
# calling __apicomponent__ in this case!
# self._partialIndex may have slices...
for index in self._flattenIndex(self._partialIndex):
yield index
def _flatIter(self):
# If we're completely specified, we assume that we NEED
# to have some sort of list of indicies just in order to know
# what this component obejct holds (ie, we might have
# [1][4], [3][80], [3][100], [4][10], etc)
# ...so we assume that we're not losing any speed / memory
# by iterating through a 'list of indices' stored in memory
# in our case, this list of indices is the MFnComponent object
# itself, and is stored in maya's memory, but the idea is the same...
# This code duplicates much of currentItem - keeping both
# for speed, as _flatIter may potentially have to plow through a lot of
# components, so we don't want to make an extra function call...
dimensionIndicePtrs = []
mfncomp = self.__apicomponent__()
for _ in xrange(self.dimensions):
dimensionIndicePtrs.append(_api.SafeApiPtr('int'))
for flatIndex in xrange(len(self)):
mfncomp.getElement(flatIndex, *[x() for x in dimensionIndicePtrs])
yield ComponentIndex([x.get() for x in dimensionIndicePtrs])
def __len__(self):
return self.__apicomponent__().elementCount()
def count(self):
return len(self)
# default implementation assumes that each dimension has a consistent
# number of components - so total number of components is
# sizeDim1 * sizeDim2 * ... * sizeDimN
# if this is not the case (ie, for faceVertex, or subds), need to override
# this - either with a "correct" method, or an implementation that raises
# NotImplementedError
def totalSize(self):
'''The maximum possible number of components
ie, for a polygon cube, the totalSize for verts would be 8, for edges
would be 12, and for faces would be 6
'''
if not self.dimensions:
return 0
totalSize = 1
partialIndex = ComponentIndex()
for _ in xrange(self.dimensions):
totalSize *= self._dimLength(partialIndex)
partialIndex += (0,)
return totalSize
def setIndex(self, index):
if not 0 <= index < len(self):
raise IndexError
self._currentFlatIndex = index
return self
def getIndex(self):
'''Returns the current 'flat list' index for this group of components -
ie, if this component holds the vertices:
[5, 7, 12, 13, 14, 25]
then if the 'flat list' index is 2, then we are pointing to vertex 12.
'''
return self._currentFlatIndex
def currentItem(self):
# This code duplicates much of _flatIter - keeping both
# for speed, as _flatIter may potentially have to plow through a lot of
# components, so we don't want to make an extra function call...
dimensionIndicePtrs = []
mfncomp = self.__apicomponent__()
for _ in xrange(self.dimensions):
dimensionIndicePtrs.append(_api.SafeApiPtr('int'))
mfncomp.getElement(self._currentFlatIndex, *[x() for x in dimensionIndicePtrs])
curIndex = ComponentIndex([x.get() for x in dimensionIndicePtrs])
return self.__class__(self._node, curIndex)
def currentItemIndex(self):
'''Returns the component indices for the current item in this component
group
If the component type has more then one dimension, the return result
will be a ComponentIndex object which is a sub-class of tuple; otherwise,
it will be a single int.
These values correspond to the indices that you would use when selecting
components in mel - ie, vtx[5], cv[3][2]
'''
# Again, duplicates some code in currentItem/_flatIter for speed
dimensionIndicePtrs = []
mfncomp = self.__apicomponent__()
for _ in xrange(self.dimensions):
dimensionIndicePtrs.append(_api.SafeApiPtr('int'))
mfncomp.getElement(self._currentFlatIndex, *[x() for x in dimensionIndicePtrs])
if self.dimensions == 1:
return dimensionIndicePtrs[0].get()
else:
return ComponentIndex([x.get() for x in dimensionIndicePtrs])
def next(self):
if self._stopIteration:
raise StopIteration
elif not self:
self._stopIteration = True
raise StopIteration
else:
toReturn = self.currentItem()
try:
self.setIndex(self.getIndex() + 1)
except IndexError:
self._stopIteration = True
return toReturn
def reset(self):
self._stopIteration = False
self._currentFlatIndex = 0
class ContinuousComponent(DimensionedComponent):
"""
Components whose dimensions are continuous.
Ie, there are an infinite number of possible components, referenced by
floating point parameters.
Example: nurbsCurve.u[7.48], nurbsSurface.uv[3.85][2.1]
Derived classes should implement:
_dimRange
"""
VALID_SINGLE_INDEX_TYPES = (int, long, float, slice, HashableSlice)
def _standardizeIndices(self, indexObjs, **kwargs):
return super(ContinuousComponent, self)._standardizeIndices(indexObjs,
allowIterable=False,
**kwargs)
def _sliceToIndices(self, sliceObj, partialIndex=None):
# Note that as opposed to a DiscreteComponent, where we
# always want to flatten a slice into it's discrete elements,
# with a ContinuousComponent a slice is a perfectly valid
# indices... the only caveat is we need to convert it to a
# HashableSlice, as we will be sticking it into a set...
if sliceObj.step != None:
raise MayaComponentError("%ss may not use slice-indices with a 'step' - bad slice: %s" %
(self.__class__.__name__, sliceObj))
if partialIndex is None:
partialIndex = ComponentIndex()
if sliceObj.start == sliceObj.stop == None:
return (partialIndex + (HashableSlice(None), ), )
else:
return (partialIndex +
(HashableSlice(sliceObj.start, sliceObj.stop),), )
def __iter__(self):
raise TypeError("%r object is not iterable" % self.__class__.__name__)
def _dimLength(self, partialIndex):
# Note that in the default implementation, used
# by DiscreteComponent, _dimRange depends on _dimLength.
# In ContinuousComponent, the opposite is True - _dimLength
# depends on _dimRange
range = self._dimRange(partialIndex)
return range[1] - range[0]
def _dimRange(self, partialIndex):
# Note that in the default implementation, used
# by DiscreteComponent, _dimRange depends on _dimLength.
# In ContinuousComponent, the opposite is True - _dimLength
# depends on _dimRange
raise NotImplementedError
def _translateNegativeIndice(self, negIndex, partialIndex):
return negIndex
class Component1DFloat(ContinuousComponent):
dimensions = 1
def index(self):
return self.indices()[0]
class Component2DFloat(ContinuousComponent):
dimensions = 2
class Component1D(DiscreteComponent):
_mfncompclass = _api.MFnSingleIndexedComponent
_apienum__ = _api.MFn.kSingleIndexedComponent
dimensions = 1
def index(self):
return self.indices()[0]
@staticmethod
def _sequenceToComponentSlice(array):
"""given an array, convert to a maya-formatted slice"""
return [HashableSlice(x.start, x.stop - 1, x.step) for x in _util.sequenceToSlices(array)]
def name(self):
# this function produces a name that uses extended slice notation, such as vtx[10:40:2]
melobj = self.__melobject__()
if isinstance(melobj, basestring):
return melobj
else:
compSlice = self._sequenceToComponentSlice(self.indicesIter())
sliceStr = ','.join([_formatSlice(x) for x in compSlice])
return self._completeNameString().replace('*', sliceStr)
def _flatIter(self):
# for some reason, the command to get an element is 'element' for
# 1D components, and 'getElement' for 2D/3D... so parent class's
# _flatIter won't work!
# Just as well, we get a more efficient iterator for 1D comps...
mfncomp = self.__apicomponent__()
for flatIndex in xrange(len(self)):
yield ComponentIndex((mfncomp.element(flatIndex),))
def currentItem(self):
mfncomp = self.__apicomponent__()
return self.__class__(self._node, mfncomp.element(self._currentFlatIndex))
def currentItemIndex(self):
'''Returns the component indices for the current item in this component
group
If the component type has more then one dimension, the return result
will be a ComponentIndex object which is a sub-class of tuple; otherwise,
it will be a single int.
These values correspond to the indices that you would use when selecting
components in mel - ie, vtx[5], cv[3][2]
'''
# Again, duplicates some code in currentItem/_flatIter for speed
mfncomp = self.__apicomponent__()
return mfncomp.element(self._currentFlatIndex)
def indicesIter(self):
"""
An iterator over all the indices contained by this component,
as integers.
"""
for compIndex in self._compIndexObjIter():
yield compIndex[0]
class Component2D(DiscreteComponent):
_mfncompclass = _api.MFnDoubleIndexedComponent
_apienum__ = _api.MFn.kDoubleIndexedComponent
dimensions = 2
class Component3D(DiscreteComponent):
_mfncompclass = _api.MFnTripleIndexedComponent
_apienum__ = _api.MFn.kTripleIndexedComponent
dimensions = 3
# Mixin class for components which use MIt* objects for some functionality
class MItComponent(Component):
"""
Abstract base class for pymel components that can be accessed via iterators.
(ie, `MeshEdge`, `MeshVertex`, and `MeshFace` can be wrapped around
MItMeshEdge, etc)
If deriving from this class, you should set __apicls__ to an appropriate
MIt* type - ie, for MeshEdge, you would set __apicls__ = _api.MItMeshEdge
"""
#
def __init__(self, *args, **kwargs):
super(MItComponent, self).__init__(*args, **kwargs)
def __apimit__(self, alwaysUnindexed=False):
# Note - the iterator should NOT be stored, as if it gets out of date,
# it can cause crashes - see, for instance, MItMeshEdge.geomChanged
# Since we don't know how the user might end up using the components
# we feed out, and it seems like asking for trouble to have them
# keep track of when things such as geomChanged need to be called,
# we simply never retain the MIt for long..
if self._currentFlatIndex == 0 or alwaysUnindexed:
return self.__apicls__(self.__apimdagpath__(), self.__apimobject__())
else:
return self.__apicls__(self.__apimdagpath__(), self.currentItem().__apimobject__())
def __apimfn__(self):
return self.__apimit__()
class MItComponent1D(MItComponent, Component1D):
pass
class Component1D64(DiscreteComponent):
_ALLOW_COMPLETE_SHORTCUT = False
if Component._hasUint64:
_mfncompclass = _api.MFnUint64SingleIndexedComponent
_apienum__ = _api.MFn.kUint64SingleIndexedComponent
else:
_mfncompclass = _api.MFnComponent
_apienum__ = _api.MFn.kComponent
def totalSize(self):
raise NotImplementedError
if Component._hasUint64 and hasattr(_api, 'MUint64'):
# Note that currently the python api has zero support for MUint64's
# This code is just here because I'm an optimist...
@classmethod
def _pyArrayToMayaArray(cls, pythonArray):
mayaArray = _api.MUint64Array(len(pythonArray))
for i, value in enumerate(pythonArray):
mayaArray.set(value, i)
return mayaArray
else:
# Component indices aren't sequential, and without MUint64, the only
# way to check if a given indice is valid is by trying to insert it
# into an MSelectionList... since this is both potentially fairly
# slow, for now just going to 'open up the gates' as far as
# validation is concerned...
_max32 = 2 ** 32
def _dimLength(self, partialIndex):
return self._max32
# The ContinuousComponent version works fine for us - just
# make sure we grab the original function object, not the method
# object, since we don't inherit from ContinuousComponent
_sliceToIndices = ContinuousComponent._sliceToIndices.im_func
# We're basically having to fall back on strings here, so revert 'back'
# to the string implementation of various methods...
_makeIndexedComponentHandle = DimensionedComponent._makeIndexedComponentHandle
def __len__(self):
if hasattr(self, '_storedLen'):
return self._storedLen
else:
# subd MIt*'s have no .count(), and there is no appropriate
# MFn, so count it using strings...
melStrings = self.__melobject__()
if _util.isIterable(melStrings):
count = Component.numComponentsFromStrings(*melStrings)
else:
count = Component.numComponentsFromStrings(melStrings)
self._storedLen = count
return count
# The standard _flatIter relies on being able to use element/getElement
# Since we can't use these, due to lack of MUint64, fall back on
# string processing...
_indicesRe = re.compile(r'\[((?:\d+(?::\d+)?)|\*)\]' * 2 + '$')
def _flatIter(self):
if not hasattr(self, '_fullIndices'):
melobj = self.__melobject__()
if isinstance(melobj, basestring):
melobj = [melobj]
indices = [self._indicesRe.search(x).groups() for x in melobj]
for i, indicePair in enumerate(indices):
processedPair = []
for dimIndice in indicePair:
if dimIndice == '*':
processedPair.append(HashableSlice(None))
elif ':' in dimIndice:
start, stop = dimIndice.split(':')
processedPair.append(HashableSlice(int(start),
int(stop)))
else:
processedPair.append(int(dimIndice))
indices[i] = ComponentIndex(processedPair)
self._fullIndices = indices
for fullIndex in self._fullIndices:
for index in self._flattenIndex(fullIndex):
yield index
# kUint64SingleIndexedComponent components have a bit of a dual-personality
# - though internally represented as a single-indexed long-int, in almost
# all of the "interface", they are displayed as double-indexed-ints:
# ie, if you select a subd vertex, it might be displayed as
# mySubd.smp[256][4388]
# Since the end user will mostly "see" the component as double-indexed,
# the default pymel indexing will be double-indexed, so we set dimensions
# to 2, and then hand correct cases where self.dimensions affects how
# we're interacting with the kUint64SingleIndexedComponent
dimensions = 2
#-----------------------------------------
# Specific Components...
#-----------------------------------------
class MeshVertex(MItComponent1D):
__apicls__ = _api.MItMeshVertex
_ComponentLabel__ = "vtx"
_apienum__ = _api.MFn.kMeshVertComponent
def _dimLength(self, partialIndex):
return self.node().numVertices()
def setColor(self, color):
for i in self.indices():
self.node().setVertexColor(color, i)
def connectedEdges(self):
"""
:rtype: `MeshEdge` list
"""
array = _api.MIntArray()
self.__apimfn__().getConnectedEdges(array)
return MeshEdge._compOrEmptyList(self, self._sequenceToComponentSlice([array[i] for i in range(array.length())]))
def connectedFaces(self):
"""
:rtype: `MeshFace` list
"""
array = _api.MIntArray()
self.__apimfn__().getConnectedFaces(array)
return MeshFace._compOrEmptyList(self, self._sequenceToComponentSlice([array[i] for i in range(array.length())]))
def connectedVertices(self):
"""
:rtype: `MeshVertex` list
"""
array = _api.MIntArray()
self.__apimfn__().getConnectedVertices(array)
return MeshVertex._compOrEmptyList(self, self._sequenceToComponentSlice([array[i] for i in range(array.length())]))
def isConnectedTo(self, component):
"""
pass a component of type `MeshVertex`, `MeshEdge`, `MeshFace`, with a single element
:rtype: bool
"""
if isinstance(component, MeshFace):
return self.isConnectedToFace(component.currentItemIndex())
if isinstance(component, MeshEdge):
return self.isConnectedToEdge(component.currentItemIndex())
if isinstance(component, MeshVertex):
array = _api.MIntArray()
self.__apimfn__().getConnectedVertices(array)
return component.currentItemIndex() in [array[i] for i in range(array.length())]
raise TypeError, 'type %s is not supported' % type(component)
def getColor(self, *args, **kwargs):
# Want all possible versions of this command, so easiest to just manually
# wrap (particularly want to be able to invoke with no args!
color = _api.MColor()
self.__apimfn__().getColor(color, *args, **kwargs)
return datatypes.Color(color)
class MeshEdge(MItComponent1D):
__apicls__ = _api.MItMeshEdge
_ComponentLabel__ = "e"
_apienum__ = _api.MFn.kMeshEdgeComponent
def _dimLength(self, partialIndex):
return self.node().numEdges()
def connectedEdges(self):
"""
:rtype: `MeshEdge` list
"""
array = _api.MIntArray()
self.__apimfn__().getConnectedEdges(array)
return MeshEdge._compOrEmptyList(self, self._sequenceToComponentSlice([array[i] for i in range(array.length())]))
def connectedFaces(self):
"""
:rtype: `MeshFace` list
"""
array = _api.MIntArray()
self.__apimfn__().getConnectedFaces(array)
return MeshFace._compOrEmptyList(self, self._sequenceToComponentSlice([array[i] for i in range(array.length())]))
def connectedVertices(self):
"""
:rtype: `MeshVertex` list
"""
index0 = self.__apimfn__().index(0)
index1 = self.__apimfn__().index(1)
return (MeshVertex(self, index0), MeshVertex(self, index1))
def isConnectedTo(self, component):
"""
:rtype: bool
"""
if isinstance(component, MeshFace):
return self.isConnectedToFace(component.currentItemIndex())
if isinstance(component, MeshEdge):
return self.isConnectedToEdge(component.currentItemIndex())
if isinstance(component, MeshVertex):
index0 = self.__apimfn__().index(0)
index1 = self.__apimfn__().index(1)
return component.currentItemIndex() in [index0, index1]
raise TypeError, 'type %s is not supported' % type(component)
class MeshFace(MItComponent1D):
__apicls__ = _api.MItMeshPolygon
_ComponentLabel__ = "f"
_apienum__ = _api.MFn.kMeshPolygonComponent
def _dimLength(self, partialIndex):
return self.node().numFaces()
def connectedEdges(self):
"""
:rtype: `MeshEdge` list
"""
array = _api.MIntArray()
self.__apimfn__().getConnectedEdges(array)
return MeshEdge._compOrEmptyList(self, self._sequenceToComponentSlice([array[i] for i in range(array.length())]))
def connectedFaces(self):
"""
:rtype: `MeshFace` list
"""
array = _api.MIntArray()
self.__apimfn__().getConnectedFaces(array)
return MeshFace._compOrEmptyList(self, self._sequenceToComponentSlice([array[i] for i in range(array.length())]))
def connectedVertices(self):
"""
:rtype: `MeshVertex` list
"""
array = _api.MIntArray()
self.__apimfn__().getConnectedVertices(array)
return MeshVertex._compOrEmptyList(self, self._sequenceToComponentSlice([array[i] for i in range(array.length())]))
def isConnectedTo(self, component):
"""
:rtype: bool
"""
if isinstance(component, MeshFace):
return self.isConnectedToFace(component.currentItemIndex())
if isinstance(component, MeshEdge):
return self.isConnectedToEdge(component.currentItemIndex())
if isinstance(component, MeshVertex):
return self.isConnectedToVertex(component.currentItemIndex())
raise TypeError, 'type %s is not supported' % type(component)
MeshFace.numVertices = MeshFace.polygonVertexCount
class MeshUV(Component1D):
_ComponentLabel__ = "map"
_apienum__ = _api.MFn.kMeshMapComponent
def _dimLength(self, partialIndex):
return self._node.numUVs()
class MeshVertexFace(Component2D):
_ComponentLabel__ = "vtxFace"
_apienum__ = _api.MFn.kMeshVtxFaceComponent
# getting all the mel strings for MeshVertexFace is SLLOOOWW - so check if
# it's complete, and if so, just return the .vtxFace[*] form
def __melobject__(self):
if self.isComplete():
return self._completeNameString()
else:
return super(MeshVertexFace, self).__melobject__()
def _dimLength(self, partialIndex):
if len(partialIndex) == 0:
return self._node.numVertices()
elif len(partialIndex) == 1:
return self._node.vtx[partialIndex[0]].numConnectedFaces()
def totalSize(self):
return self.node().numFaceVertices()
def _sliceToIndices(self, sliceObj, partialIndex=None):
if not partialIndex:
# If we're just grabbing a slice of the first index,
# the verts, we can proceed as normal...
for x in super(MeshVertexFace, self)._sliceToIndices(sliceObj, partialIndex):
yield x
# If we're iterating over the FACES attached to a given vertex,
# which may be a random set - say, (3,6,187) - not clear how to
# interpret an index 'range'
else:
if (sliceObj.start not in (0, None) or
sliceObj.stop is not None or
sliceObj.step is not None):
raise ValueError('%s objects may not be indexed with slices, execpt for [:]' %
self.__class__.__name__)
# get a MitMeshVertex ...
mIt = _api.MItMeshVertex(self._node.__apimdagpath__())
# Even though we're not using the result stored in the int,
# STILL need to store a ref to the MScriptUtil - otherwise,
# there's a chance it gets garbage collected before the
# api function call is made, and it writes the value into
# the pointer...
intPtr = _api.SafeApiPtr('int')
mIt.setIndex(partialIndex[0], intPtr())
intArray = _api.MIntArray()
mIt.getConnectedFaces(intArray)
for i in xrange(intArray.length()):
yield partialIndex + (intArray[i],)
def _validateGetItemIndice(self, item, allowIterables=True):
"""
Will raise an appropriate IndexError if the given item
is not suitable as a __getitem__ indice.
"""
if len(self._partialIndex) == 0:
return super(MeshVertexFace, self)._validateGetItemIndice(item)
if allowIterables and _util.isIterable(item):
for _ in item:
self._validateGetItemIndice(item, allowIterables=False)
return
if isinstance(item, (slice, HashableSlice)):
if slice.start == slice.stop == slice.step == None:
return
raise IndexError("only completely open-ended slices are allowable"
" for the second indice of %s objects" %
self.__class__.__name__)
if not isinstance(item, self.VALID_SINGLE_INDEX_TYPES):
raise IndexError("Invalid indice type for %s: %r" %
(self.__class__.__name__,
item.__class__.__name__))
for fullIndice in self._sliceToIndices(slice(None),
partialIndex=self._partialIndex):
if item == fullIndice[1]:
return
raise IndexError("vertex-face %s-%s does not exist" %
(self._partialIndex[0], item))
# Subd Components
class SubdVertex(Component1D64):
_ComponentLabel__ = "smp"
_apienum__ = _api.MFn.kSubdivCVComponent
class SubdEdge(Component1D64):
_ComponentLabel__ = "sme"
_apienum__ = _api.MFn.kSubdivEdgeComponent
# There is a currently a bug with subd edges, where if you do:
# import maya.cmds as cmds
# cmds.file(new=1, f=1)
# polyCube = cmds.polyCube()[0]
# subd = cmds.polyToSubdiv(polyCube)[0]
# cmds.select(subd + '.sme[*][*]')
# ...maya crashes. as a hack to to help avoid crashing, define the complete
# component as just containing the first edge...
# GET RID OF THIS ONCE THE CRASH BUG IS FIXED!!!
def _completeNameString(self):
return Component._completeNameString(self) + '[0][0]'
class SubdFace(Component1D64):
_ComponentLabel__ = "smf"
_apienum__ = _api.MFn.kSubdivFaceComponent
class SubdUV(Component1D):
# ...because you can't select subduv comps with '*' - ie, this doesn't work:
# cmds.select('subdivCube1Shape.smm[*]')
_ALLOW_COMPLETE_SHORTCUT = False
_ComponentLabel__ = "smm"
_apienum__ = _api.MFn.kSubdivMapComponent
# This implementation failed because
# it appears that you can have a subd shape
# with no uvSet elements
# (shape.uvSet.evaluateNumElements() == 0)
# but with valid .smm's
# def _dimLength(self, partialIndex):
# # My limited tests reveal that
# # subds with multiple uv sets
# # mostly just crash a lot
# # However, when not crashing, it
# # SEEMS that you can select
# # a .smm[x] up to the size
# # of the largest possible uv
# # set, regardless of which uv
# # set is current...
# max = 0
# for elemPlug in self._node.attr('uvSet'):
# numElements = elemPlug.evaluateNumElements()
# if numElements > max:
# max = numElements
# # For some reason, if there are 206 elements
# # in the uvSet, the max indexable smm's go from
# # .smm[0] to .smm[206] - ie, num elements + 1...?
# return max + 1
# ok - some weirdness in trying to find what the maximum
# allowable smm index is...
# To see what I mean, uncomment this and try it in maya:
#from pymel.core import *
#import sys
#import platform
#
# def testMaxIndex():
#
#
# def interpreterBits():
# """
# Returns the number of bits of the architecture the interpreter was compiled on
# (ie, 32 or 64).
#
# :rtype: `int`
# """
# return int(re.match(r"([0-9]+)(bit)?", platform.architecture()[0]).group(1))
#
# subdBase = polyCube()[0]
# subdTrans = polyToSubdiv(subdBase)[0]
# subd = subdTrans.getShape()
# selList = _api.MSelectionList()
# try:
# selList.add("%s.smm[0:%d]" % (subd.name(), sys.maxint))
# except:
# print "sys.maxint (%d) failed..." % sys.maxint
# else:
# print "sys.maxint (%d) SUCCESS" % sys.maxint
# try:
# selList.add("%s.smm[0:%d]" % (subd.name(), 2 ** interpreterBits() - 1))
# except:
# print "2 ** %d - 1 (%d) failed..." % (interpreterBits(), 2 ** interpreterBits() - 1)
# else:
# print "2 ** %d - 1 (%d) SUCCESS" % (interpreterBits(), 2 ** interpreterBits() - 1)
# try:
# selList.add("%s.smm[0:%d]" % (subd.name(), 2 ** interpreterBits()))
# except:
# print "2 ** %d (%d) failed..." % (interpreterBits(), 2 ** interpreterBits())
# else:
# print "2 ** %d (%d) SUCCESS" % (interpreterBits(), 2 ** interpreterBits())
# try:
# selList.add("%s.smm[0:%d]" % (subd.name(), 2 ** 31 - 1))
# except:
# print "2 ** 31 - 1 (%d) failed..." % (2 ** 31 - 1)
# else:
# print "2 ** 31 - 1 (%d) SUCCESS" % (2 ** 31 - 1)
# try:
# selList.add("%s.smm[0:%d]" % (subd.name(), 2 ** 31))
# except:
# print "2 ** 31 (%d) failed..." % (2 ** 31)
# else:
# print "2 ** 31 (%d) SUCCESS" % (2 ** 31)
# try:
# selList.add("%s.smm[0:%d]" % (subd.name(), 2 ** 32 - 1))
# except:
# print "2 ** 32 - 1 (%d) failed..." % (2 ** 32 - 1)
# else:
# print "2 ** 32 - 1 (%d) SUCCESS" % (2 ** 32 - 1)
# try:
# selList.add("%s.smm[0:%d]" % (subd.name(), 2 ** 32))
# except:
# print "2 ** 32 (%d) failed..." % (2 ** 32)
# else:
# print "2 ** 32 (%d) SUCCESS" % (2 ** 32)
#
# On Windows XP x64, Maya2009x64, 2**64 -1 works (didn't try others at the time)
# ...but on Linux Maya2009x64, and OSX Maya2011x64, I get this weirdness:
# sys.maxint (9223372036854775807) failed...
# 2 ** 64 - 1 (18446744073709551615) failed...
# 2 ** 64 (18446744073709551616) failed...
# 2 ** 31 - 1 (2147483647) SUCCESS
# 2 ** 31 (2147483648) failed...
# 2 ** 32 - 1 (4294967295) failed...
# 2 ** 32 (4294967296) SUCCESS
# So, given the inconsistencies here, just going to use
# 2**31 -1... hopefully nobody needs more uv's than that
_MAX_INDEX = 2 ** 31 - 1
_tempSel = _api.MSelectionList()
_maxIndexRe = re.compile(r'\[0:([0-9]+)\]$')
def _dimLength(self, partialIndex):
# Fall back on good ol' string processing...
# unfortunately, .smm[*] is not allowed -
# so we have to provide a 'maximum' value...
self._tempSel.clear()
self._tempSel.add(Component._completeNameString(self) +
'[0:%d]' % self._MAX_INDEX)
selStrings = []
self._tempSel.getSelectionStrings(0, selStrings)
try:
# remember the + 1 for the 0'th index
return int(self._maxIndexRe.search(selStrings[0]).group(1)) + 1
except AttributeError:
raise RuntimeError("Couldn't determine max index for %s" %
Component._completeNameString(self))
def totalSize(self):
raise NotImplementedError
# SubdUV's don't work with .smm[*] - so need to use
# explicit range instead - ie, .smm[0:206]
def _completeNameString(self):
# Note - most multi-dimensional components allow selection of all
# components with only a single index - ie,
# myNurbsSurface.cv[*]
# will work, even though nurbs cvs are double-indexed
# However, some multi-indexed components WON'T work like this, ie
# myNurbsSurface.sf[*]
# FAILS, and you MUST do:
# myNurbsSurface.sf[*][*]
return (super(DimensionedComponent, self)._completeNameString() +
('[:%d]' % self._dimLength(None)))
# Nurbs Curve Components
class NurbsCurveParameter(Component1DFloat):
_ComponentLabel__ = "u"
_apienum__ = _api.MFn.kCurveParamComponent
def _dimRange(self, partialIndex):
return self._node.getKnotDomain()
class NurbsCurveCV(MItComponent1D):
__apicls__ = _api.MItCurveCV
_ComponentLabel__ = "cv"
_apienum__ = _api.MFn.kCurveCVComponent
def _dimLength(self, partialIndex):
return self.node().numCVs()
class NurbsCurveEP(Component1D):
_ComponentLabel__ = "ep"
_apienum__ = _api.MFn.kCurveEPComponent
def _dimLength(self, partialIndex):
return self.node().numEPs()
class NurbsCurveKnot(Component1D):
_ComponentLabel__ = "knot"
_apienum__ = _api.MFn.kCurveKnotComponent
def _dimLength(self, partialIndex):
return self.node().numKnots()
# NurbsSurface Components
class NurbsSurfaceIsoparm(Component2DFloat):
_ComponentLabel__ = ("u", "v", "uv")
_apienum__ = _api.MFn.kIsoparmComponent
def __init__(self, *args, **kwargs):
super(NurbsSurfaceIsoparm, self).__init__(*args, **kwargs)
# Fix the bug where running:
#
# import maya.cmds as cmds
# cmds.sphere()
# cmds.select('nurbsSphere1.uv[*][*]')
# print cmds.ls(sl=1)
# cmds.select('nurbsSphere1.u[*][*]')
# print cmds.ls(sl=1)
#
# Gives two different results:
# [u'nurbsSphere1.u[0:4][0:1]']
# [u'nurbsSphere1.u[0:4][0:8]']
# to fix this, change 'uv' comps to 'u' comps
if hasattr(self, '_partialIndex'):
self._partialIndex = self._convertUVtoU(self._partialIndex)
if 'ComponentIndex' in self.__apiobjects__:
self.__apiobjects__['ComponentIndex'] = self._convertUVtoU(self.__apiobjects__['ComponentIndex'])
if hasattr(self, '_indices'):
self._indices = self._convertUVtoU(self._indices)
self._ComponentLabel__ = self._convertUVtoU(self._ComponentLabel__)
@classmethod
def _convertUVtoU(cls, index):
if isinstance(index, dict):
if 'uv' in index:
# convert over index['uv']
oldUvIndex = cls._convertUVtoU(index['uv'])
if 'u' in index:
# First, make sure index['u'] is a list
if (isinstance(index['u'], ComponentIndex) or
not isinstance(index['u'], (list, tuple))):
index['u'] = [index['u']]
elif isinstance(index['u'], tuple):
index['u'] = list(index['u'])
# then add on 'uv' contents
if (isinstance(oldUvIndex, ComponentIndex) or
not isinstance(oldUvIndex, (list, tuple))):
index['u'].append(oldUvIndex)
else:
index['u'].extend(oldUvIndex)
else:
index['u'] = oldUvIndex
del index['uv']
elif isinstance(index, ComponentIndex):
# do this check INSIDE here, because, since a ComponentIndex is a tuple,
# we don't want to change a ComponentIndex object with a 'v' index
# into a list in the next elif clause!
if index.label == 'uv':
index.label = 'u'
elif isinstance(index, (list, tuple)) and not isinstance(index, ComponentIndex):
index = [cls._convertUVtoU(x) for x in index]
elif isinstance(index, basestring):
if index == 'uv':
index = 'u'
return index
def _defaultLabel(self):
return 'u'
def _dimRange(self, partialIndex):
minU, maxU, minV, maxV = self._node.getKnotDomain()
if len(partialIndex) == 0:
if partialIndex.label == 'v':
param = 'v'
else:
param = 'u'
else:
if partialIndex.label == 'v':
param = 'u'
else:
param = 'v'
if param == 'u':
return minU, maxU
else:
return minV, maxV
class NurbsSurfaceRange(NurbsSurfaceIsoparm):
_ComponentLabel__ = ("u", "v", "uv")
_apienum__ = _api.MFn.kSurfaceRangeComponent
def __getitem__(self, item):
if self.currentDimension() is None:
raise IndexError("Indexing only allowed on an incompletely "
"specified component")
self._validateGetItemIndice(item)
# You only get a NurbsSurfaceRange if BOTH indices are slices - if
# either is a single value, you get an isoparm
if (not isinstance(item, (slice, HashableSlice)) or
(self.currentDimension() == 1 and
not isinstance(self._partialIndex[0], (slice, HashableSlice)))):
return NurbsSurfaceIsoparm(self._node, self._partialIndex + (item,))
else:
return super(NurbsSurfaceRange, self).__getitem__(item)
class NurbsSurfaceCV(Component2D):
_ComponentLabel__ = "cv"
_apienum__ = _api.MFn.kSurfaceCVComponent
def _dimLength(self, partialIndex):
if len(partialIndex) == 0:
return self.node().numCVsInU()
elif len(partialIndex) == 1:
return self.node().numCVsInV()
else:
raise ValueError('partialIndex %r too long for %s._dimLength' %
(partialIndex, self.__class__.__name__))
class NurbsSurfaceEP(Component2D):
_ComponentLabel__ = "ep"
_apienum__ = _api.MFn.kSurfaceEPComponent
def _dimLength(self, partialIndex):
if len(partialIndex) == 0:
return self.node().numEPsInU()
elif len(partialIndex) == 1:
return self.node().numEPsInV()
else:
raise ValueError('partialIndex %r too long for %s._dimLength' %
(partialIndex, self.__class__.__name__))
class NurbsSurfaceKnot(Component2D):
_ComponentLabel__ = "knot"
_apienum__ = _api.MFn.kSurfaceKnotComponent
def _dimLength(self, partialIndex):
if len(partialIndex) == 0:
return self.node().numKnotsInU()
elif len(partialIndex) == 1:
return self.node().numKnotsInV()
else:
raise ValueError('partialIndex %r too long for %s._dimLength' %
(partialIndex, self.__class__.__name__))
class NurbsSurfaceFace(Component2D):
_ComponentLabel__ = "sf"
_apienum__ = _api.MFn.kSurfaceFaceComponent
def _dimLength(self, partialIndex):
if len(partialIndex) == 0:
return self.node().numSpansInU()
elif len(partialIndex) == 1:
return self.node().numSpansInV()
else:
raise IndexError("partialIndex %r for %s must have length <= 1" %
(partialIndex, self.__class__.__name__))
# Lattice Components
class LatticePoint(Component3D):
_ComponentLabel__ = "pt"
_apienum__ = _api.MFn.kLatticeComponent
def _dimLength(self, partialIndex):
if len(partialIndex) > 2:
raise ValueError('partialIndex %r too long for %s._dimLength' %
(partialIndex, self.__class__.__name__))
return self.node().getDivisions()[len(partialIndex)]
def _completeNameString(self):
# ...However, some multi-indexed components (well, only LatticePoint
# that I know of) will give incorrect results with
# ffd1LatticeShape.pt[*][*][*]
# ...and so you must do
# ffd1LatticeShape.pt[*]
return Component._completeNameString(self) + '[*]'
# Pivot Components
class Pivot(Component):
_ComponentLabel__ = ("rotatePivot", "scalePivot")
_apienum__ = _api.MFn.kPivotComponent
# Particle Components
class ParticleComponent(Component1D):
_ComponentLabel__ = "pt"
_apienum__ = _api.MFn.kDynParticleSetComponent
def attr(self, attr):
try:
return cmds.particle(self._node, q=1, attribute=attr, order=super(ParticleComponent, self).currentItemIndex())
except RuntimeError:
raise MayaParticleAttributeError('%s.%s' % (self, attr))
def __getattr__(self, attr):
# MayaParticleAttributeError is a subclass of AttributeError, so if
# it is raised, that should signal it was not found
return self.attr(attr)
def _dimLength(self, partialIndex):
return self.node().pointCount()
# class ComponentArray(object):
# def __init__(self, name):
# self._name = name
# self._iterIndex = 0
# self._node = self.node()
#
# def __str__(self):
# return self._name
#
# def __repr__(self):
# return "ComponentArray(u'%s')" % self
#
# #def __len__(self):
# # return 0
#
# def __iter__(self):
# """iterator for multi-attributes
##
# >>> for attr in SCENE.persp.attrInfo(multi=1)[0]:
# ... print attr
##
# """
# return self
#
# def next(self):
# """iterator for multi-attributes
##
# >>> for attr in SCENE.persp.attrInfo(multi=1)[0]:
# ... print attr
##
# """
# if self._iterIndex >= len(self):
# raise StopIteration
# else:
# new = self[ self._iterIndex ]
# self._iterIndex += 1
# return new
#
# def __getitem__(self, item):
#
# def formatSlice(item):
# step = item.step
# if step is not None:
# return '%s:%s:%s' % ( item.start, item.stop, step)
# else:
# return '%s:%s' % ( item.start, item.stop )
#
#
# if isinstance( item, tuple ):
# return [ Component(u'%s[%s]' % (self, formatSlice(x)) ) for x in item ]
##
# elif isinstance( item, slice ):
# return Component(u'%s[%s]' % (self, formatSlice(item) ) )
##
# else:
# return Component(u'%s[%s]' % (self, item) )
#
# if isinstance( item, tuple ):
# return [ self.returnClass( self._node, formatSlice(x) ) for x in item ]
#
# elif isinstance( item, (slice, HashableSlice) ):
# return self.returnClass( self._node, formatSlice(item) )
#
# else:
# return self.returnClass( self._node, item )
#
#
# def plugNode(self):
# 'plugNode'
# return PyNode( str(self).split('.')[0])
#
# def plugAttr(self):
# """plugAttr"""
# return '.'.join(str(self).split('.')[1:])
#
# node = plugNode
#
# class _Component(object):
# """
# Abstract base class for component types like vertices, edges, and faces.
#
# This class is deprecated.
# """
# def __init__(self, node, item):
# self._item = item
# self._node = node
#
# def __repr__(self):
# return "%s('%s')" % (self.__class__.__name__, self)
#
# def node(self):
# 'plugNode'
# return self._node
#
# def item(self):
# return self._item
#
# def move( self, *args, **kwargs ):
# return move( self, *args, **kwargs )
# def scale( self, *args, **kwargs ):
# return scale( self, *args, **kwargs )
# def rotate( self, *args, **kwargs ):
# return rotate( self, *args, **kwargs )
class AttributeDefaults(PyNode):
__metaclass__ = _factories.MetaMayaTypeWrapper
__apicls__ = _api.MFnAttribute
def __apiobject__(self):
"Return the default API object for this attribute, if it is valid"
return self.__apimobject__()
def __apimobject__(self):
"Return the MObject for this attribute, if it is valid"
try:
handle = self.__apiobjects__['MObjectHandle']
except:
handle = self.__apimplug__().attribute()
self.__apiobjects__['MObjectHandle'] = handle
if _api.isValidMObjectHandle(handle):
return handle.object()
raise MayaAttributeError
def __apimplug__(self):
"Return the MPlug for this attribute, if it is valid"
# check validity
# self.__apimobject__()
return self.__apiobjects__['MPlug']
def __apimdagpath__(self):
"Return the MDagPath for the node of this attribute, if it is valid"
try:
return self.node().__apimdagpath__()
except AttributeError:
pass
def name(self):
return self.__apimfn__().name()
#-----------------------------------------------
# Global Settings
#-----------------------------------------------
#-----------------------------------------------
# Scene Class
#-----------------------------------------------
class Scene(object):
"""
The Scene class provides an attribute-based method for retrieving `PyNode` instances of
nodes in the current scene.
>>> SCENE = Scene()
>>> SCENE.persp
nt.Transform(u'persp')
>>> SCENE.persp.t
Attribute(u'persp.translate')
An instance of this class is provided for you with the name `SCENE`.
"""
__metaclass__ = _util.Singleton
def __getattr__(self, obj):
if obj.startswith('__') and obj.endswith('__'):
try:
return self.__dict__[obj]
except KeyError:
raise AttributeError, "type object %r has no attribute %r" % (self.__class__.__name__, obj)
return PyNode(obj)
SCENE = Scene()
_factories.createFunctions(__name__, PyNode)
|
{
"content_hash": "e0238e481266b2e49b33fecfd38d2bbd",
"timestamp": "",
"source": "github",
"line_count": 6249,
"max_line_length": 177,
"avg_line_length": 37.46551448231717,
"alnum_prop": 0.5622367825321841,
"repo_name": "AtonLerin/pymel",
"id": "9fe319dfb426fbbe6c9029c3f2d0b3bea0f09b32",
"size": "234122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymel/core/general.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "316"
},
{
"name": "CSS",
"bytes": "16762"
},
{
"name": "Python",
"bytes": "2610474"
},
{
"name": "Shell",
"bytes": "7033"
}
],
"symlink_target": ""
}
|
from .templates import (html_template, css_template)
|
{
"content_hash": "94400ba9f0565b49c0fafc36e2c8cf5f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 52,
"avg_line_length": 53,
"alnum_prop": 0.7924528301886793,
"repo_name": "lucashtnguyen/wqreports",
"id": "448e382c80ea728eab07b1141e36059bce4e60f6",
"size": "53",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "wqreports/utils/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "38"
},
{
"name": "CSS",
"bytes": "3705"
},
{
"name": "HTML",
"bytes": "225"
},
{
"name": "Python",
"bytes": "33295"
}
],
"symlink_target": ""
}
|
import serial
import time
import platform
import csv
import threading
import zephyr.protocol
import zephyr.message
def callback(x):
print x
def reading_thread(protocol):
start_time = time.time()
while time.time() < start_time + 120:
protocol.read_and_handle_byte()
def create_data_files(input_definitions):
threads = []
try:
for serial_i, (serial_port, enable_channels) in enumerate(input_definitions):
payload_parser = zephyr.message.MessagePayloadParser([callback])
ser = serial.Serial(serial_port)
protocol = zephyr.protocol.BioHarnessProtocol(ser, payload_parser.handle_message, "../test_data/120-second-bt-stream-%d" % serial_i)
if enable_channels:
protocol.enable_periodic_packets()
thread = threading.Thread(target=reading_thread, args=(protocol,))
threads.append(thread)
thread.start()
finally:
for thread in threads:
thread.join()
def main():
create_data_files([(29, False), (30, True)])
if __name__ == "__main__":
main()
|
{
"content_hash": "f861557538f5280ac89e51116a67861a",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 144,
"avg_line_length": 24.3,
"alnum_prop": 0.5802469135802469,
"repo_name": "jpaalasm/zephyr-bt",
"id": "42d932884e2323baab1e285c80bd18e516039194",
"size": "1215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/create_test_data_file_from_bt.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "46916"
}
],
"symlink_target": ""
}
|
import unittest
from lib.relay import Relay
from .dev4testing import Dev
class Test_relay(unittest.TestCase):
def setUp(self):
self.dev = Dev()
self.relay = Relay(self.dev)
def test_on(self):
self.relay.on()
self.assertEqual(self.dev.pop(), bytes((85,)) + b'\x01;')
def test_off(self):
self.relay.off()
self.assertEqual(self.dev.pop(), bytes((85,)) + b'\x00;')
def test_state(self):
self.relay.off()
self.relay.off()
self.assertEqual(self.dev.pop(), bytes((85,)) + b'\x00;')
self.assertIsNone(self.dev.pop())
# off to on
self.relay.on()
self.relay.on()
self.assertEqual(self.dev.pop(), bytes((85,)) + b'\x01;')
self.assertIsNone(self.dev.pop())
# on to off
self.relay.off()
self.relay.off()
self.assertEqual(self.dev.pop(), bytes((85,)) + b'\x00;')
self.assertIsNone(self.dev.pop())
|
{
"content_hash": "131c0962d3be82f8eb79b3fdaadf69aa",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 65,
"avg_line_length": 27.6,
"alnum_prop": 0.5610766045548654,
"repo_name": "icve/liv-Ard",
"id": "01d0aaf54c7c5de3b045ce59399f93f0dd1c3ca7",
"size": "966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hostscripts/test/test_relay.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6870"
},
{
"name": "HTML",
"bytes": "315"
},
{
"name": "JavaScript",
"bytes": "758"
},
{
"name": "Python",
"bytes": "44359"
},
{
"name": "Shell",
"bytes": "124"
}
],
"symlink_target": ""
}
|
from Board import Board
from copy import copy
from copy import deepcopy
from dfs import dfs_wrapper
from BestFirstSolitaire import BestFirstSolitaire
class DFSAgent:
def __init__(self):
self.sol = BestFirstSolitaire()
def getPath(self, source = None, isPrint = False):
return self.getMoves(source, isPrint, isWithMove = False)
def getMoves(self, source = None, isPrint = False, isWithMove = True):
parent, target = self.__dfs__(self.sol, 2000, source, isPrint)
if not parent:
return None
node, move = target, None
path = []
while parent[node]:
node, move = parent[node]
path.append( (node, move) if isWithMove else node )
path.reverse()
return path
def __dfs__(self, solitaire, limit, source = None, isPrint = True):
if source is None:
source = solitaire.newGame()
solitaire.clearBoard(source)
front = [source]
parent = {source: None}
cnt = 0
while front and cnt < limit:
node = front.pop()
if isPrint:
print cnt
print node
for move in solitaire.nextMove(node):
replica = Board(None, [col[:] for col in node.tableau],
deepcopy(node.foundation), copy(node.stock))
child = solitaire.getChild(replica, move)
solitaire.clearBoard(child)
if child not in parent:
front.append(child)
parent[child] = (node, move)
if solitaire.isWin(child):
return parent, child
cnt += 1
return None, None
if __name__ == "__main__":
da = DFSAgent()
path = da.getPath(isPrint = True)
if path is None:
print "Fail"
else:
for state in path:
print state
|
{
"content_hash": "ee6efac873157bac3873f0a7ef735f47",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 74,
"avg_line_length": 33,
"alnum_prop": 0.5470219435736677,
"repo_name": "davidxk/SolitaireBot",
"id": "b7d1131fbe80cea78fe9b5d4fd05f1b30a0d686c",
"size": "1914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DFSAgent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50120"
}
],
"symlink_target": ""
}
|
import argparse
import os
import re
import shlex
import sys
from subprocess import run, SubprocessError, DEVNULL, PIPE
from tempfile import NamedTemporaryFile
DESC = """
A `csmith` fuzzing driver for `bindgen`.
Generates random C source files with `csmith` and then passes them to `bindgen`
(via `predicate.py`). If `bindgen` can't emit bindings, `rustc` can't compile
those bindings, or the compiled bindings' layout tests fail, then the driver has
found a bug, and will report the problematic test case to you.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESC.strip())
parser.add_argument(
"--keep-going",
action="store_true",
help="Do not stop after finding a test case that exhibits a bug in `bindgen`. Instead, keep going.")
CSMITH_ARGS="\
--no-checksum \
--nomain \
--max-block-size 1 \
--max-block-depth 1"
parser.add_argument(
"--csmith-args",
type=str,
default=CSMITH_ARGS,
help="Pass this argument string to `csmith`. By default, very small functions are generated.")
BINDGEN_ARGS = "--with-derive-partialeq \
--with-derive-eq \
--with-derive-partialord \
--with-derive-ord \
--with-derive-hash \
--with-derive-default"
parser.add_argument(
"--bindgen-args",
type=str,
default=BINDGEN_ARGS,
help="Pass this argument string to `bindgen`. By default, all traits are derived.")
parser.add_argument(
"--no-creduce",
action="store_false",
dest="creduce",
help="Do not run `creduce` on any buggy test case(s) discovered.")
################################################################################
def cat(path, title=None):
if not title:
title = path
print("-------------------- {} --------------------".format(title))
print()
print()
run(["cat", path])
def decode(f):
return f.decode(encoding="utf-8", errors="ignore")
def run_logged(cmd):
result = run(cmd, stdin=DEVNULL, stdout=PIPE, stderr=PIPE)
result.stdout = decode(result.stdout)
result.stderr = decode(result.stderr)
if result.returncode != 0:
print()
print()
print("Error: {} exited with code {}".format(cmd, result.returncode))
print()
print()
for line in result.stdout.splitlines():
sys.stdout.write("+")
sys.stdout.write(line)
sys.stdout.write("\n")
for line in result.stderr.splitlines():
sys.stderr.write("+")
sys.stderr.write(line)
sys.stderr.write("\n")
return result
def main():
os.environ["RUST_BACKTRACE"] = "full"
args = parser.parse_args()
bindgen_args = args.bindgen_args
if bindgen_args.find(" -- ") == -1:
bindgen_args = bindgen_args + " -- "
bindgen_args = bindgen_args + " -I{}".format(os.path.abspath(os.path.dirname(sys.argv[0])))
args.bindgen_args = bindgen_args
print()
print()
print("Fuzzing `bindgen` with C-Smith...")
print()
print()
iterations = 0
while True:
print("\rIteration: {}".format(iterations), end="", flush=True)
iterations += 1
input = NamedTemporaryFile(delete=False, prefix="input-", suffix=".h")
input.close()
result = run_logged(["csmith", "-o", input.name] + shlex.split(args.csmith_args))
if result.returncode != 0:
exit(1)
predicate_command = [
"./predicate.py",
"--bindgen-args",
args.bindgen_args,
input.name
]
result = run_logged(predicate_command)
if result.returncode != 0:
print()
print()
cat(input.name, title="Failing test case: {}".format(input.name))
print()
print()
if args.creduce:
creduce(args, input.name, result)
print_issue_template(args, input.name, predicate_command, result)
if args.keep_going:
continue
exit(1)
os.remove(input.name)
RUSTC_ERROR_REGEX = re.compile(r".*(error\[.*].*)")
LAYOUT_TEST_FAILURE = re.compile(r".*(test bindgen_test_layout_.* \.\.\. FAILED)")
def creduce(args, failing_test_case, result):
print()
print()
print("Reducing failing test case with `creduce`...")
match = re.search(RUSTC_ERROR_REGEX, result.stderr)
if match:
error_msg = match.group(1)
print("...searching for \"{}\".".format(error_msg))
return creduce_with_predicate_flags(
args,
failing_test_case,
"--bindgen-args '{}' --expect-compile-fail --rustc-grep '{}'".format(
args.bindgen_args,
re.escape(error_msg)
)
)
match = re.search(LAYOUT_TEST_FAILURE, result.stdout)
if match:
layout_failure = match.group(1)
struct_name = layout_failure[len("test bindgen_test_layout_"):layout_failure.rindex(" ... FAILED")]
print("...searching for \"{}\".".format(layout_failure))
return creduce_with_predicate_flags(
args,
failing_test_case,
"--bindgen-args '{}' --expect-layout-tests-fail --bindings-grep '{}' --layout-tests-grep '{}'".format(
args.bindgen_args,
re.escape(struct_name),
re.escape(layout_failure)
)
)
print("...nevermind, don't know how to `creduce` this bug. Skipping.")
def creduce_with_predicate_flags(args, failing_test_case, predicate_flags):
predicate = """
#!/usr/bin/env bash
set -eu
{} {} {}
""".format(
os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "predicate.py")),
predicate_flags,
os.path.basename(failing_test_case)
)
print("...and reducing with this script:")
print()
print()
print(predicate)
print()
print()
predicate_path = failing_test_case + ".predicate.sh"
with open(predicate_path, "w") as p:
p.write(predicate)
os.chmod(predicate_path, 0o755)
creduce_command = ["creduce", "--n", str(os.cpu_count()), predicate_path, failing_test_case]
print("Running:", creduce_command)
result = run(creduce_command)
if result.returncode == 0:
print()
print()
print("`creduce` reduced the failing test case to:")
print()
print()
cat(failing_test_case)
print()
print()
else:
print()
print()
print("`creduce` failed!")
if not args.keep_going:
sys.exit(1)
def print_issue_template(args, failing_test_case, predicate_command, result):
test_case_contents = None
with open(failing_test_case, "r") as f:
test_case_contents = f.read()
print("""
! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! !
! File this issue at https://github.com/rust-lang/rust-bindgen/issues/new !
! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! !
--------------- 8< --------------- 8< --------------- 8< ---------------
This bug was found with `csmith` and `driver.py`.
### Input Header
```c
{}
```
### `bindgen` Invocation
```
$ {}
```
### Actual Results
<details>
```
{}
```
</details>
### Expected Results
`bindgen` emits bindings OK, then `rustc` compiles those bindings OK, then the
compiled bindings' layout tests pass OK.
--------------- 8< --------------- 8< --------------- 8< ---------------
<3 <3 <3 Thank you! <3 <3 <3
""".format(
test_case_contents,
" ".join(map(lambda s: "'{}'".format(s), predicate_command)),
result.stdout + result.stderr
))
if __name__ == "__main__":
try:
os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))
main()
except KeyboardInterrupt:
exit()
|
{
"content_hash": "149b7a9df475dc6d38a0b75c828e49bd",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 114,
"avg_line_length": 27.875886524822697,
"alnum_prop": 0.5599796463554255,
"repo_name": "emilio/rust-bindgen",
"id": "1d3af540fd8adba6d2e02cd0a8511563f5cc0917",
"size": "7885",
"binary": false,
"copies": "2",
"ref": "refs/heads/sm-hacks-rebase-squashed",
"path": "csmith-fuzzing/driver.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1532"
},
{
"name": "C",
"bytes": "88512"
},
{
"name": "C++",
"bytes": "4727930"
},
{
"name": "Objective-C",
"bytes": "4713"
},
{
"name": "Python",
"bytes": "17325"
},
{
"name": "Rust",
"bytes": "2266531"
},
{
"name": "Shell",
"bytes": "8077"
}
],
"symlink_target": ""
}
|
from .methods import new, new_component, new_pb
|
{
"content_hash": "188f8baed76ef17800dd35555e4c69e2",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 47,
"avg_line_length": 48,
"alnum_prop": 0.7708333333333334,
"repo_name": "Python-Tools/pmfp",
"id": "6f316dba6043af69a4506bd0e496cc4591907723",
"size": "48",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pmfp/new/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "808"
},
{
"name": "CSS",
"bytes": "181"
},
{
"name": "HTML",
"bytes": "10845"
},
{
"name": "Makefile",
"bytes": "601"
},
{
"name": "Python",
"bytes": "197396"
}
],
"symlink_target": ""
}
|
from django.db import models
import datetime
class Record(models.Model):
price = models.DecimalField('the price of ltc', max_digits=6, decimal_places=2)
timestamp = models.IntegerField('date of record', db_index=True)
count = models.IntegerField('fetch times', default = 0)
class Meta:
get_latest_by = 'timestamp'
def __str__(self):
offset = datetime.timedelta(hours=8)
tz = datetime.timezone(offset,'Asia/Shanghai')
date = datetime.datetime.fromtimestamp(self.timestamp)
date = date.replace(tzinfo=datetime.timezone.utc).astimezone(tz=tz)
desc = date.strftime('%y年%m月%d日 %H:%M:%S') + ' ' + str(self.price)
if self.count > 1:
desc = desc + ' x' + str(self.count)
return desc
@classmethod
def create(cls, price, timestamp):
record = cls(price = price,timestamp = timestamp,count = 1)
record.save()
return record
def dump_json(self):
json = {}
json['price'] = self.price
json['timestamp'] = self.timestamp
json['count'] = self.count
return json
|
{
"content_hash": "c28f2f91923d483198fc2bd24413775d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 88,
"avg_line_length": 34.45454545454545,
"alnum_prop": 0.6051011433597185,
"repo_name": "yiplee/ltc-huobi",
"id": "0e00a2b0e0d8ae717fcb5e2ba3515da708a40052",
"size": "1143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ltc/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46518"
},
{
"name": "JavaScript",
"bytes": "97398"
},
{
"name": "Python",
"bytes": "10588"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
}
|
import amo
def match_rules(rules, app, action):
"""
This will match rules found in Group.
"""
for rule in rules.split(','):
rule_app, rule_action = rule.split(':')
if rule_app == '*' or rule_app == app:
if (rule_action == '*'
or rule_action == action
or action == '%'):
return True
return False
def action_allowed(request, app, action):
"""
Determines if the request user has permission to do a certain action
'Admin:%' is true if the user has any of:
('Admin:*', 'Admin:%s'%whatever, '*:*',) as rules.
"""
allowed = any(match_rules(group.rules, app, action) for group in
getattr(request, 'groups', ()))
return allowed
def action_allowed_user(user, app, action):
"""Similar to action_allowed, but takes user instead of request."""
allowed = any(match_rules(group.rules, app, action) for group in
user.groups.all())
return allowed
def check_ownership(request, obj, require_owner=False, require_author=False,
ignore_disabled=False, admin=True):
"""
A convenience function. Check if request.user has permissions
for the object.
"""
if hasattr(obj, 'check_ownership'):
return obj.check_ownership(request, require_owner=require_owner,
require_author=require_author,
ignore_disabled=ignore_disabled,
admin=admin)
return False
def check_addon_ownership(request, addon, viewer=False, dev=False,
support=False, admin=True, ignore_disabled=False):
"""
Check request.user's permissions for the addon.
If user is an admin they can do anything.
If the app is disabled only admins have permission.
If they're an app owner they can do anything.
dev=True checks that the user has an owner or developer role.
viewer=True checks that the user has an owner, developer, or viewer role.
support=True checks that the user has a support role.
"""
if not request.user.is_authenticated():
return False
# Deleted apps can't be edited at all.
if addon.is_deleted:
return False
# Users with 'Apps:Edit' can do anything.
if admin and action_allowed(request, 'Apps', 'Edit'):
return True
# Only admins can edit banned addons.
if addon.status == amo.STATUS_DISABLED and not ignore_disabled:
return False
# Addon owners can do everything else.
roles = (amo.AUTHOR_ROLE_OWNER,)
if dev:
roles += (amo.AUTHOR_ROLE_DEV,)
# Viewer privs are implied for devs.
elif viewer:
roles += (amo.AUTHOR_ROLE_DEV, amo.AUTHOR_ROLE_VIEWER,
amo.AUTHOR_ROLE_SUPPORT)
# Support can do support.
elif support:
roles += (amo.AUTHOR_ROLE_SUPPORT,)
return addon.authors.filter(pk=request.user.pk,
addonuser__role__in=roles).exists()
def check_reviewer(request, region=None):
if region is not None:
# This is for reviewers in special regions (e.g., China).
from mkt.regions.utils import parse_region
region_slug = parse_region(region).slug.upper()
return action_allowed(request, 'Apps', 'ReviewRegion%s' % region_slug)
return action_allowed(request, 'Apps', 'Review')
|
{
"content_hash": "d05e678330f6f33026925df7cc2ce452",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 35.28865979381443,
"alnum_prop": 0.6099912357581069,
"repo_name": "ngokevin/zamboni",
"id": "2002736e7a642bd84d80b8dc7f8f6f9fe60782e1",
"size": "3423",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mkt/access/acl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "356777"
},
{
"name": "JavaScript",
"bytes": "536388"
},
{
"name": "Python",
"bytes": "3883015"
},
{
"name": "Shell",
"bytes": "13597"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(name="ransac",
version='0.1',
description='Robust method for fitting a model to observed data.',
author='Fredrik Appelros, Carl Ekerot',
author_email='fredrik.appelros@gmail.com, kalle@implode.se',
url='https://github.com/FredrikAppelros/ransac',
py_modules=['ransac'],
install_requires=['numpy'],
)
|
{
"content_hash": "410d44d697597606d1b36a7e14ba9718",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 72,
"avg_line_length": 31.583333333333332,
"alnum_prop": 0.6701846965699209,
"repo_name": "FredrikAppelros/ransac",
"id": "731bdcd79f7ab28a543a1997629be6fc9a6670ef",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4428"
}
],
"symlink_target": ""
}
|
from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, logout_user, login_required, \
current_user
from . import auth
from .. import db
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.ping()
if not current_user.confirmed \
and request.endpoint[:5] != 'auth.' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token)
flash('An email with instructions to confirm your new email '
'address has been sent to you.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template("auth/change_email.html", form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('Your email address has been updated.')
else:
flash('Invalid request.')
return redirect(url_for('main.index'))
|
{
"content_hash": "d26ae5c10a59f914e6b3fa6c6ea1a1bc",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 78,
"avg_line_length": 39,
"alnum_prop": 0.63904052936311,
"repo_name": "MozzieChou/FlaskDemo",
"id": "b338bf599f182fe9f76b2f496025ea01eb5828d8",
"size": "6071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/auth/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "886"
},
{
"name": "C",
"bytes": "447552"
},
{
"name": "C++",
"bytes": "2005"
},
{
"name": "CSS",
"bytes": "1891"
},
{
"name": "HTML",
"bytes": "19211"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "57637"
}
],
"symlink_target": ""
}
|
import unittest
from telemetry.story import expectations
from telemetry.testing import fakes
class MockState(object):
def __init__(self):
self.platform = fakes.FakePlatform()
class MockStory(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class MockStorySet(object):
def __init__(self, stories):
self._stories = stories
@property
def stories(self):
return self._stories
class MockBrowserFinderOptions(object):
def __init__(self):
self._browser_type = None
@property
def browser_type(self):
return self._browser_type
@browser_type.setter
def browser_type(self, t):
assert isinstance(t, basestring)
self._browser_type = t
class TestConditionTest(unittest.TestCase):
def setUp(self):
self._platform = fakes.FakePlatform()
self._finder_options = MockBrowserFinderOptions()
def testAllAlwaysReturnsTrue(self):
self.assertTrue(
expectations.ALL.ShouldDisable(self._platform, self._finder_options))
def testAllWinReturnsTrueOnWindows(self):
self._platform.SetOSName('win')
self.assertTrue(
expectations.ALL_WIN.ShouldDisable(self._platform,
self._finder_options))
def testAllWinReturnsFalseOnOthers(self):
self._platform.SetOSName('not_windows')
self.assertFalse(
expectations.ALL_WIN.ShouldDisable(self._platform,
self._finder_options))
def testAllLinuxReturnsTrueOnLinux(self):
self._platform.SetOSName('linux')
self.assertTrue(expectations.ALL_LINUX.ShouldDisable(self._platform,
self._finder_options))
def testAllLinuxReturnsFalseOnOthers(self):
self._platform.SetOSName('not_linux')
self.assertFalse(expectations.ALL_LINUX.ShouldDisable(self._platform,
self._finder_options))
def testAllMacReturnsTrueOnMac(self):
self._platform.SetOSName('mac')
self.assertTrue(expectations.ALL_MAC.ShouldDisable(self._platform,
self._finder_options))
def testAllMacReturnsFalseOnOthers(self):
self._platform.SetOSName('not_mac')
self.assertFalse(expectations.ALL_MAC.ShouldDisable(self._platform,
self._finder_options))
def testAllAndroidReturnsTrueOnAndroid(self):
self._platform.SetOSName('android')
self.assertTrue(
expectations.ALL_ANDROID.ShouldDisable(self._platform,
self._finder_options))
def testAllAndroidReturnsFalseOnOthers(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ALL_ANDROID.ShouldDisable(self._platform,
self._finder_options))
def testAllDesktopReturnsFalseOnNonDesktop(self):
false_platforms = ['android']
for plat in false_platforms:
self._platform.SetOSName(plat)
self.assertFalse(
expectations.ALL_DESKTOP.ShouldDisable(self._platform,
self._finder_options))
def testAllDesktopReturnsTrueOnDesktop(self):
true_platforms = ['win', 'mac', 'linux']
for plat in true_platforms:
self._platform.SetOSName(plat)
self.assertTrue(
expectations.ALL_DESKTOP.ShouldDisable(self._platform,
self._finder_options))
def testAllMobileReturnsFalseOnNonMobile(self):
false_platforms = ['win', 'mac', 'linux']
for plat in false_platforms:
self._platform.SetOSName(plat)
self.assertFalse(
expectations.ALL_MOBILE.ShouldDisable(self._platform,
self._finder_options))
def testAllMobileReturnsTrueOnMobile(self):
true_platforms = ['android']
for plat in true_platforms:
self._platform.SetOSName(plat)
self.assertTrue(
expectations.ALL_MOBILE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus5ReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_NEXUS5.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus5XReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_NEXUS5X.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus6ReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_NEXUS6.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus6PReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_NEXUS6.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus7ReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_NEXUS7.ShouldDisable(self._platform,
self._finder_options))
def testAndroidCherryMobileReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_ONE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidSvelteReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_SVELTE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus5ReturnsFalseOnAndroidNotNexus5(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_NEXUS5.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus5XReturnsFalseOnAndroidNotNexus5X(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_NEXUS5X.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus6ReturnsFalseOnAndroidNotNexus6(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_NEXUS6.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus6PReturnsFalseOnAndroidNotNexus6P(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_NEXUS6.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus7ReturnsFalseOnAndroidNotNexus7(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_NEXUS7.ShouldDisable(self._platform,
self._finder_options))
def testAndroidCherryMobileReturnsFalseOnAndroidNotCherryMobile(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_ONE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidSvelteReturnsFalseOnAndroidNotSvelte(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_SVELTE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus5ReturnsTrueOnAndroidNexus5(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('Nexus 5')
self.assertTrue(
expectations.ANDROID_NEXUS5.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus5XReturnsTrueOnAndroidNexus5X(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('Nexus 5X')
self.assertTrue(
expectations.ANDROID_NEXUS5X.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus6ReturnsTrueOnAndroidNexus6(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('Nexus 6')
self.assertTrue(
expectations.ANDROID_NEXUS6.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus6PReturnsTrueOnAndroidNexus6P(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('Nexus 6P')
self.assertTrue(
expectations.ANDROID_NEXUS6.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus7ReturnsTrueOnAndroidNexus7(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('Nexus 7')
self.assertTrue(
expectations.ANDROID_NEXUS7.ShouldDisable(self._platform,
self._finder_options))
def testAndroidCherryMobileReturnsTrueOnAndroidCherryMobile(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('W6210')
self.assertTrue(
expectations.ANDROID_ONE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidSvelteReturnsTrueOnAndroidSvelte(self):
self._platform.SetOSName('android')
self._platform.SetIsSvelte(True)
self.assertTrue(
expectations.ANDROID_SVELTE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidWebviewReturnsTrueOnAndroidWebview(self):
self._platform.SetOSName('android')
self._platform.SetIsAosp(True)
self._finder_options.browser_type = 'android-webview'
self.assertTrue(
expectations.ANDROID_WEBVIEW.ShouldDisable(self._platform,
self._finder_options))
def testAndroidWebviewReturnsFalseOnAndroidNotWebview(self):
self._platform.SetOSName('android')
self._platform.SetIsAosp(False)
self.assertFalse(
expectations.ANDROID_WEBVIEW.ShouldDisable(self._platform,
self._finder_options))
def testAndroidWebviewReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_WEBVIEW.ShouldDisable(self._platform,
self._finder_options))
def testMac1011ReturnsTrueOnMac1011(self):
self._platform.SetOSName('mac')
self._platform.SetOsVersionDetailString('10.11')
self.assertTrue(
expectations.MAC_10_11.ShouldDisable(self._platform,
self._finder_options))
def testMac1011ReturnsFalseOnNotMac1011(self):
self._platform.SetOSName('mac')
self._platform.SetOsVersionDetailString('10.12')
self.assertFalse(
expectations.MAC_10_11.ShouldDisable(self._platform,
self._finder_options))
def testMac1012ReturnsTrueOnMac1012(self):
self._platform.SetOSName('mac')
self._platform.SetOsVersionDetailString('10.12')
self.assertTrue(
expectations.MAC_10_12.ShouldDisable(self._platform,
self._finder_options))
def testMac1012ReturnsFalseOnNotMac1012(self):
self._platform.SetOSName('mac')
self._platform.SetOsVersionDetailString('10.11')
self.assertFalse(
expectations.MAC_10_12.ShouldDisable(self._platform,
self._finder_options))
class StoryExpectationsTest(unittest.TestCase):
def setUp(self):
self.platform = fakes.FakePlatform()
self.finder_options = MockBrowserFinderOptions()
def testCantDisableAfterInit(self):
e = expectations.StoryExpectations()
with self.assertRaises(AssertionError):
e.PermanentlyDisableBenchmark(['test'], 'test')
with self.assertRaises(AssertionError):
e.DisableStory('story', ['platform'], 'reason')
def testPermanentlyDisableBenchmark(self):
class FooExpectations(expectations.StoryExpectations):
def SetExpectations(self):
self.PermanentlyDisableBenchmark(
[expectations.ALL_WIN], 'crbug.com/123')
e = FooExpectations()
self.platform.SetOSName('win')
reason = e.IsBenchmarkDisabled(self.platform, self.finder_options)
self.assertEqual(reason, 'crbug.com/123')
self.platform.SetOSName('android')
reason = e.IsBenchmarkDisabled(self.platform, self.finder_options)
self.assertIsNone(reason)
def testDisableStoryMultipleConditions(self):
class FooExpectations(expectations.StoryExpectations):
def SetExpectations(self):
self.DisableStory(
'multi', [expectations.ALL_WIN], 'crbug.com/123')
self.DisableStory(
'multi', [expectations.ALL_MAC], 'crbug.com/456')
e = FooExpectations()
self.platform.SetOSName('mac')
reason = e.IsStoryDisabled(
MockStory('multi'), self.platform, self.finder_options)
self.assertEqual(reason, 'crbug.com/456')
def testDisableStoryOneCondition(self):
class FooExpectations(expectations.StoryExpectations):
def SetExpectations(self):
self.DisableStory(
'disable', [expectations.ALL_WIN], 'crbug.com/123')
e = FooExpectations()
self.platform.SetOSName('win')
reason = e.IsStoryDisabled(
MockStory('disable'), self.platform, self.finder_options)
self.assertEqual(reason, 'crbug.com/123')
self.platform.SetOSName('mac')
reason = e.IsStoryDisabled(
MockStory('disabled'), self.platform, self.finder_options)
self.assertFalse(reason)
self.assertIsNone(reason)
def testDisableStoryWithLongName(self):
class FooExpectations(expectations.StoryExpectations):
def SetExpectations(self):
self.DisableStory(
'123456789012345678901234567890123456789012345678901234567890123456'
'789012345',
[expectations.ALL], 'Too Long')
with self.assertRaises(AssertionError):
FooExpectations()
def testDisableStoryWithLongNameStartsWithHttp(self):
class FooExpectations(expectations.StoryExpectations):
def SetExpectations(self):
self.DisableStory(
'http12345678901234567890123456789012345678901234567890123456789012'
'3456789012345',
[expectations.ALL], 'Too Long')
FooExpectations()
def testGetBrokenExpectationsNotMatching(self):
class FooExpectations(expectations.StoryExpectations):
def SetExpectations(self):
self.DisableStory('bad_name', [expectations.ALL], 'crbug.com/123')
e = FooExpectations()
s = MockStorySet([MockStory('good_name')])
self.assertEqual(e.GetBrokenExpectations(s), ['bad_name'])
def testGetBrokenExpectationsMatching(self):
class FooExpectations(expectations.StoryExpectations):
def SetExpectations(self):
self.DisableStory('good_name', [expectations.ALL], 'crbug.com/123')
e = FooExpectations()
s = MockStorySet([MockStory('good_name')])
self.assertEqual(e.GetBrokenExpectations(s), [])
|
{
"content_hash": "af60fda69f6e7f34d16de12025783856",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 80,
"avg_line_length": 38.43872549019608,
"alnum_prop": 0.6444557801441051,
"repo_name": "catapult-project/catapult-csm",
"id": "41a05f9ded0efe85bc4406898d04620e9b149034",
"size": "15846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telemetry/telemetry/story/expectations_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43728"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "80325"
},
{
"name": "HTML",
"bytes": "11817766"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6207634"
},
{
"name": "Shell",
"bytes": "2558"
}
],
"symlink_target": ""
}
|
"""
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases signicantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import numpy as np
import pylab as pl
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignement with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
pl.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(pl.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
pl.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
pl.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
pl.ylabel('Score value')
pl.legend(plots, names)
pl.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
pl.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(pl.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
pl.title("Clustering measures for random uniform labeling\n"
"against reference assignement with %d classes" % n_classes)
pl.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
pl.ylabel('Score value')
pl.ylim(ymin=-0.05, ymax=1.05)
pl.legend(plots, names)
pl.show()
|
{
"content_hash": "33e469fea03b70c13844a79e29ee7437",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 78,
"avg_line_length": 35.16260162601626,
"alnum_prop": 0.67121387283237,
"repo_name": "maxlikely/scikit-learn",
"id": "ef78b9f6fb06e0daf18ca8c55435fce1523b369e",
"size": "4325",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/cluster/plot_adjusted_for_chance_measures.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
import sys
def cur_file_dir():
#获取脚本路径
path = sys.path[0]
#判断为脚本文件还是py2exe编译后的文件,如果是脚本文件,则返回的是脚本的目录,
#如果是py2exe编译后的文件,则返回的是编译后的文件路径
if os.path.isdir(path):
return path
elif os.path.isfile(path):
return os.path.dirname(path)
# 用于生成版本代码的模板
_code_template = os.path.join(cur_file_dir(), "config-code-template")
JAVA_VERSION_CODE_FORMAT = open(_code_template, "r").read()
# 需要依赖的svn库,更新需要将SDK的版本进行更新
DEPENDS_SVN = {}
"""
蓝港自主的svn配置
"""
LK_SDK_SVN = "eUsdk/branches/china-linekong/0.1.2/Native_SDK"
LK_SDK_DIR = None
eSdk_DEPENDS_SVN = [
LK_SDK_SVN,
"eUsdk/branches/common/LK_AntiRobot",
"eUsdk/branches/china-linekong/0.1.2/eBilling_v2",
"eUsdk/branches/china-linekong/0.1.2/LK_SDK_RES"
]
DEPENDS_SVN ["svn://192.168.41.231:7654/repos_esuite"] = [
eSdk_DEPENDS_SVN, # 需要监控的svn和该svn依赖的svn
LK_SDK_DIR, # 需要监控的svn目录位置
"com.linekong.sdk.util" # 版本文件的报名
]
# ========================= 读取配置文件 =========================
import xml.etree.ElementTree as ET
from xml.dom import minidom
def parse_project(root):
projects = []
for element in root.getElementsByTagName("project"):
element_url = element.getAttribute("url")
element_dir = None
dir_tags = element.getElementsByTagName("dir")
if len(dir_tags) > 0:
try:
element_dir = dir_tags[0].childNodes[0].nodeValue
except Exception, e:
pass
# 获取Versions.java的包名
element_version_package = None
tags = element.getElementsByTagName("name")
if len(tags) > 0:
try:
element_version_package = tags[0].childNodes[0].nodeValue
except Exception, e:
pass
else:
print "没有配置Versions.java的包名!"
exit()
depend_svns = [element_url]
tags = element.getElementsByTagName("depends-svn")
for tag in tags:
url_tags = tag.getElementsByTagName("url")
for url_tag in url_tags:
try:
depend_svns.append(url_tag.childNodes[0].nodeValue)
except:
pass
print element_url
print element_dir
print element_version_package
print depend_svns
project = {}
project["url"] = element_url
project["dir"] = element_dir
project["package"] = element_version_package
project["depends"] = depend_svns
projects.append(project)
return projects
import svn_common
svn_config_file = os.path.join(svn_common.cur_file_dir(), "config-svn-depends.xml")
xmldoc = minidom.parse(svn_config_file)
firstNode = xmldoc.documentElement
pNode = firstNode
pNode.getElementsByTagName("svn-root")
for element in pNode.getElementsByTagName("svn-root"):
element_url = element.getAttribute("url")
DEPENDS_SVN[element_url] = parse_project(element)
print DEPENDS_SVN
|
{
"content_hash": "7f66ea913ba677550fef122a66979a4d",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 83,
"avg_line_length": 28.028301886792452,
"alnum_prop": 0.6095590710198586,
"repo_name": "jiangerji/svn-tools",
"id": "f577db97fd52ec019dab9b56c43b861c2468fd2c",
"size": "3280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9043"
}
],
"symlink_target": ""
}
|
import threading
import time
import serial
from PyQt4 import QtCore
from tradutor import Tradutor
class ReaderThread(QtCore.QThread):
def __init__(self, texto, config):
QtCore.QThread.__init__(self)
#threading.Thread.__init__(self)
self.config = config
self.texto = texto
self.lista = texto.split()
self.tr = Tradutor(texto)
self.porta = '/dev/ttyUSB1'
try:
self.com = serial.Serial(self.porta, 19200, timeout=1)
except serial.SerialException:
self.config.statusBar.emit(QtCore.SIGNAL('update(QString)'), 'Não foi possível abrir a porta ' + self.porta)
#com.read(10)
def EnviarLetra(self, letra):
tempo = (1 << 7) + self.config.tempo_celula
self.com.write(bytes([tempo, int(letra, 2)]))
def MontarBraille(self, palavras):
self.braille = ''
for palavra in palavras:
for letra in palavra['letras']:
self.braille += letra[1]
self.braille += '⠀'
def AtualizarTexto(self, indice):
novo = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body><p>"""
novo += ' '.join( self.lista[:indice] )
novo += '<span style=" font-weight:600; color:#0000c0;"> '
novo += self.lista[indice] + '</span> '
novo += ' '.join( self.lista[indice + 1:] ) + "</p></body></html>"
self.config.AtualizarDestaqueTexto(novo)
def AtualizarBraille(self, indice):
antes = self.braille[:indice]
depois = self.braille[indice + 1:]
if len(self.braille) > 38:
ia = indice - 18
if ia < 0:
ia = 0
antes = self.braille[ia:indice]
id = indice + 18
if indice < 18:
id += abs(indice - 18)
depois = self.braille[indice + 1:id]
novo = '<html><head/><body>'#<span style=" font-size:16pt; color:#000000;">⠁ ⠉⠑⠓⠑</span></p></body></html>
novo += antes
novo += '<span style="font-size:16pt; color:#0000c0;">'
novo += self.braille[indice] + '</span>'
novo += depois
self.config.AtualizarTextoBraille(novo)
def stop(self):
self.ativo = False
def run(self):
if not hasattr(self, 'com'):
print('Leitura abortada: não foi possível abrir a porta ' + self.porta)
return
self.ativo = True
self.MontarBraille(self.tr.data['palavras'])
indice_total = 0
for ip, palavra in enumerate( self.tr.data['palavras'] ):
self.AtualizarTexto(ip)
lp = len(palavra['letras'])
for i, letra in enumerate(palavra['letras']):
print(letra)
if type(letra[2]) is list:
for l in letra[2]:
self.EnviarLetra(l)
self.AtualizarBraille(indice_total)
indice_total += 1
if i + 1 < lp:
proxima = palavra['letras'][i+1]
else:
proxima = [None, None, None, '']
time.sleep(self.config.tempo_celula / 50)
else:
self.EnviarLetra(letra[2])
self.AtualizarBraille(indice_total)
indice_total += 1
if i + 1 < lp:
proxima = palavra['letras'][i+1]
else:
proxima = [None, None, None, '']
self.config.AtualizarLeds(letra, proxima)
time.sleep(self.config.tempo_celula / 50)
if not self.ativo:
self.com.close()
return
self.AtualizarBraille(indice_total)
indice_total += 1
if palavra['última']:
letra = [None, None, None, '']
self.config.AtualizarLeds(letra, letra)
time.sleep(self.config.tempo_sentencas / 50)
else:
time.sleep(self.config.tempo_palavras / 50 )
self.com.close()
|
{
"content_hash": "9fc1b48cdefaed6732f1c3586adea743",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 111,
"avg_line_length": 31.862385321100916,
"alnum_prop": 0.6331701698819464,
"repo_name": "jimmyskull/Pirarucu",
"id": "da44381c974018b114c94fdfbc83160421b6b8b6",
"size": "3516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reader_thread.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15753"
}
],
"symlink_target": ""
}
|
"""This module is deprecated. Please use `airflow.providers.apache.cassandra.hooks.cassandra`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.apache.cassandra.hooks.cassandra import CassandraHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.cassandra.hooks.cassandra`.",
DeprecationWarning, stacklevel=2
)
|
{
"content_hash": "dafe950b8830ec287481a6e7e338e8df",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 98,
"avg_line_length": 35.18181818181818,
"alnum_prop": 0.7855297157622739,
"repo_name": "Fokko/incubator-airflow",
"id": "fb43077fb59442286ca855f25f12a7c2a403b677",
"size": "1198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/cassandra_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "14170"
},
{
"name": "HTML",
"bytes": "145596"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "8787104"
},
{
"name": "Shell",
"bytes": "187296"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
import random
import operator
import sys
from utils import *
# Judjes stallation by elite_fit_mean
# `fitness_func` has sense "unfitness_func" while minimizing
def minimize_until_stall(fitness_func, stall_precision, max_stalled_in_row, options):
initial_population = _initial_population(options)
print_the_info = _get_print_the_info(options)
next_generation_func = next_generation(options)
max_generations = options['Max_Generations']
if max_generations:
stop_func = lambda count: count == max_generations
else:
stop_func = lambda count: False
inner = _minimize_until_stall(fitness_func, stop_func, stall_precision, max_stalled_in_row,
options, print_the_info)
_stop_flag = False
_population = initial_population
_count = 0
_last_elite_fit_mean = sys.maxint
_stalled = 0
while not _stop_flag:
res = inner(_population, _count, _last_elite_fit_mean, _stalled)
if res['success'] == None:
_population = next_generation_func(_count, res['result'])
_count = _count + 1
_last_elite_fit_mean = res['elite_fit_mean']
_stalled = res['stalled']
else:
_stop_flag = True
return res
def _minimize_until_stall(fitness_func, stop_func, stall_precision,
max_stalled_in_row, options, print_the_info):
def inner(generation, count, last_elite_fit_mean, stalled_count):
fit_of_gen = [ (ex, fitness_func(ex)) for ex in generation ]
elite = fit_of_gen[:options['N_Elite']]
elite_fit_mean = mean(map(lambda (_,f): f, elite))
print_the_info(fit_of_gen, count, options, elite_fit_mean)
if last_elite_fit_mean - elite_fit_mean <= stall_precision:
stalled = stalled_count + 1
else:
stalled = 0
if stalled == max_stalled_in_row:
return { 'success': True,
'result' : elite,
'count' : count,
'best' : max(elite, key = lambda (_,f): f),
'elite_fit_mean': elite_fit_mean
}
elif stop_func(count):
return { 'success': False,
'result' : fit_of_gen,
'count' : count,
'best' : max(elite, key = lambda (_,f): f),
'elite_fit_mean': elite_fit_mean
}
else:
return { 'success': None,
'result' : fit_of_gen,
'stalled': stalled,
'elite_fit_mean': elite_fit_mean
}
return inner
def print_intil_stall_result(res):
print 'Success' if res['success'] else 'Maximum Generations Reached'
print 'Elite fitness mean: ' + str(res['elite_fit_mean'])
print 'Generations constructed: ' + str(res['count'])
best, best_fitness = res['best']
print "Best: " + str(best)
print " with fitness " + str(best_fitness)
def minimize_to_target(fitness_func, target_fitness, options, n_retry=0):
initial_population = _initial_population(options)
print_the_info = _get_print_the_info(options)
next_generation_func = next_generation(options)
max_generations = options['Max_Generations']
target_fit_func = lambda f: f <= target_fitness
stop_func = lambda count: count == max_generations
_stop_flag = False
_population = initial_population
_count = 0
while not _stop_flag:
res = _minimize_to_target_inner(_population, _count, fitness_func, stop_func, target_fit_func)
if res['success'] == None:
_res = res['result']
_population = next_generation_func(_count, _res)
_count = _count + 1
print_the_info(_res, _count, options)
elif res['success'] == False:
_stop_flag = True
if n_retry > 0:
print "\nRetrying... " + str(n_retry-1) + " tries left.\n"
res = minimize(fitness_func, target_fitness, max_iter, options, n_retry-1)
else:
_stop_flag = True
return res
def _minimize_to_target_inner(generation, count, fitness_func, stop_func, target_fitness_func):
fit_of_gen = [ (ex, fitness_func(ex)) for ex in generation ]
fit = filter(lambda (ex, f): target_fitness_func(f), fit_of_gen)
if len(fit) > 0:
return { 'success': True,
'result' : fit,
'count' : count
}
elif stop_func(count):
return { 'success': False,
'result' : fit_of_gen,
'count' : count
}
else:
return { 'success': None,
'result' : fit_of_gen
}
def _get_print_the_info(options):
if options['Print_Info_Each']:
print_the_info = _print_the_info
else:
print_the_info = _nothing
return print_the_info
def _print_the_info(gen_with_fit, count, options, elite_fit_mean = None):
if count % options['Print_Info_Each'] == 0:
best = head(gen_with_fit)
elite = gen_with_fit[:options['N_Elite']]
fit_mean = mean(map(lambda (_,f): f, gen_with_fit))
elite_fit_mean = elite_fit_mean or mean(map(lambda (_,f): f, elite))
print 'Iteration ' + str(count)
print '\t fitness mean: ' + str(fit_mean)
print '\t elite fitness mean: ' + str(elite_fit_mean)
print "\t best: " + str(best)
def _nothing(x): return
def _initial_population(options):
return [ [random.gauss(0, 1) for _ in range(options['Genes'])]
for _ in range(options['Population'])
]
def default_options(population, genes_count, **options):
opts = {
'Population': population,
'Genes': genes_count,
'Max_Generations': None,
'N_Elite': population / 100,
'Crossover_Fraction': 0.5,
'Crossover': xover_simple_between_best,
'Crossover_Mutate_Chance': 0.5,
'Crossover_Mutate_Preserve': True,
'Mutate': mutate_simple_random,
'Gene_Mutation_Chance': 0.2,
'Print_Info_Each': 10,
'Mutate_Stdev': 1,
# indicates that Mutate_Stdev must be (Mutate_Stdev * Mutate_Shrink_Stdev) at Max_Generations.
# 0<= shrink <= 1
'Mutate_Shrink_Stdev': 1 # Mutate_Stdev = const
}
opts.update(options)
return opts
# inspired by Matlab's globalOptimization: elite, xover, mutate
# All the elite go to the next generation;
# The `crossover_fraction` of the best parents are chosen for crossover;
# The rest is mutated
def next_generation(options):
population_size = options['Population']
elite_count = options['N_Elite']
crossover_fraction = options['Crossover_Fraction']
crossover_func = options['Crossover']
mutate_func = options['Mutate']
cm_ch = options['Crossover_Mutate_Chance']
cm_preserve = options['Crossover_Mutate_Preserve']
xover_count = int(crossover_fraction * population_size) - elite_count
xover_last_index = elite_count+xover_count
# case not cm_preserve
default_mutate_count = population_size - xover_count - 2*elite_count
def func(generation, parents_with_fit):
parents_with_fit.sort(key = lambda (_, f): f)
parents = map(lambda (p,_): p, parents_with_fit)
elite = parents[0:elite_count]
xover = parents[elite_count:xover_last_index]
xover_children = crossover_func(options, xover)
mutated_elite = mutate_func(generation, options, elite)
if cm_preserve:
xover_2_mutate = filter(lambda _: random.random() < cm_ch, xover_children)
mutated_xover = mutate_func(generation, options, xover_2_mutate)
# case cm_preserve
mutate_count = default_mutate_count - len(mutated_xover)
resulting_xover = xover_children + mutated_xover
else:
mutate_count = default_mutate_count
resulting_xover = [ mutate_func(generation, options, [ch])[0] if random.random() < cm_ch else ch
for ch in xover_children
]
rest = parents[xover_last_index:xover_last_index+mutate_count]
mutated_rest = mutate_func(generation, options, rest)
mutated = mutated_elite + mutated_rest
return elite + resulting_xover + mutated
return func
def xover_simple_between_best(options, parents):
genes_count = options['Genes']
if even(len(parents)):
one_more = None
n = len(parents) - 1
else:
one_more = last(parents)
n = len(parents)
def xover_func(p1, p2):
n = genes_count / 2
a1 = p1[:n]
b1 = p1[n:]
a2 = p2[:n]
b2 = p2[n:]
ch1 = a1 + b2
ch2 = a2 + b1
return [ch1, ch2]
children_ = [ xover_func(parents[i], parents[i+1]) for i in range(0, n, 2) ]
children = reduce(operator.add, children_)
if one_more:
children = children + xover_func(one_more, random.choice(but_last(parents)))
return children
def mutate_simple_random(count, options, parents):
mutate_chance = options['Gene_Mutation_Chance']
mutate_stdev = options['Mutate_Stdev']
mutate_shrink = options['Mutate_Shrink_Stdev']
max_generations = options['Max_Generations']
if mutate_shrink == 1:
shrink = 0
elif max_generations:
shrink = count * mutate_stdev * mutate_shrink / max_generations
else:
print "WARNING: Mutate_Shrink_Stdev cannot function without Max_Generations set."
shrink = 0
mutate_chromosome = lambda chrom: chrom + random.gauss(0, mutate_stdev - shrink)
mutated = [ [ mutate_chromosome(c) if random.random() < mutate_chance else c for c in p ]
for p in parents
]
return mutated
|
{
"content_hash": "8a621f82619b9310edf924d5355596bf",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 108,
"avg_line_length": 33.382165605095544,
"alnum_prop": 0.5496088532722763,
"repo_name": "fehu/min-dat--lin-regression",
"id": "7a82a62f77d5a174dff74ee8f8e095b7e0773429",
"size": "10483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ga.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18518"
}
],
"symlink_target": ""
}
|
import os
import re
import cx_Oracle
class OracleHomeError(EnvironmentError):
pass
class FileNotFoundError(StandardError):
pass
class SIDNotFound(StandardError):
pass
class TnsOra(object):
TNSNAMES_PATH = '/network/admin/tnsnames.ora'
def __init__(self):
self.services = {}
self.initialize()
def initialize(self):
"""
Function tries to find tnsnames.ora and retrieve services
"""
if not os.environ['ORACLE_HOME']:
raise OracleHomeError()
if not os.path.exists(os.environ['ORACLE_HOME'] + TnsOra.TNSNAMES_PATH):
raise FileNotFoundError()
f = open(os.environ['ORACLE_HOME'] + TnsOra.TNSNAMES_PATH, 'r')
# Flag that it's stated service description
service_begin = False
# array of service descriptions
service = list()
service_name = ''
for line in f.readlines():
# Skip comments and new line symbols
if re.match('^#', line) or re.match('\n', line):
continue
line = line.replace('\n', '')
# try to find start of service description
result = re.match('\s*(\w+)\s*=', line)
if result:
# say that service description was started
service_begin = True
# save previous service descriprion
if len(service) > 0:
self.services[service_name] = ''.join(service)
service = []
# save service name
service_name = result.group(1)
# skip first line of service description and save next lines
elif service_begin:
service.append(line)
# save last service description
if len(service) > 0:
self.services[service_name] = ''.join(service)
f.close()
def get(self, sid):
"""
Function to get connection string by service name
Parameter:
sid - oracle service identifier
"""
if not self.services.has_key(sid):
raise SIDNotFound()
return self.services[sid]
class OraConnection(cx_Oracle.Connection):
@staticmethod
def connect(sid, login, password):
tns = TnsOra()
service_description = tns.get(sid)
connection_string = "{0}/{1}@{2}".format(login, password, service_description)
return OraConnection(connection_string)
class DBA(object):
"""
Class implements database access
"""
def __init__(self, connection):
self.connection = connection
def get_packages(self):
"""
Retrieve all user's plsql packages
"""
cursor = self.connection.cursor()
raw = cursor.execute("""
select *
from user_objects
where object_type = 'PACKAGE'
""")
packages = raw.fetchall()
cursor.close()
return packages
def get_spec_source(self, package_name):
"""
Retrieve plsql package specification
"""
cursor = self.connection.cursor()
raw = cursor.execute("""
select text
from user_source
where name = :name
and type = 'PACKAGE'
""", {':name' : package_name})
raw_source = raw.fetchall()
cursor.close()
source = ''
for line in raw_source:
source += line[0]
return source
|
{
"content_hash": "80a28c341b45eaf2863a50426655e5dc",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 86,
"avg_line_length": 26.05925925925926,
"alnum_prop": 0.5466173962478681,
"repo_name": "mshogin/django-plsql",
"id": "c8ef9a9ca185e49fae8f9b666b562036e1dc5c40",
"size": "3518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plsql/dbgate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28174"
}
],
"symlink_target": ""
}
|
import requests, urllib, urllib.request, re
import os, sys, time
from imgurpython import ImgurClient
counter = 0
def getPosts(subreddit, postLimit):
url = 'http://www.reddit.com/r/' + subreddit + '/.json?limit=' + str(postLimit)
headers = {
'User-Agent': 'Reddit Image Scraper 1.1'
}
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
data = r.json()
print('Sleeping for 3 seconds...\n')
time.sleep(3)
return data['data']['children']
else:
print('Sorry, but there was an error retrieving the subreddit\'s data!')
return None
def saveImages(posts, scoreLimit, save_dir='scraped_wallpapers'):
for post in posts:
url = post['data']['url']
score = post['data']['score']
title = post['data']['title']
if 'http://i.imgur.com' in url and score > scoreLimit:
saveImage(url, title, save_dir)
elif 'http://imgur.com/a/' in url and score > scoreLimit:
saveAlbum(url, save_dir)
def saveImage(url, title, save_dir):
global counter
save_dir = makeSaveDir(save_dir)
title = re.sub(r'\W+', '', title)
dot_location = url.rfind('.')
filename = (save_dir + title.replace('/', ':') + url[dot_location: dot_location + 4]).encode('utf-8')
if not os.path.exists(filename):
print('Saving ' + filename.decode('utf-8') + '!\n')
counter += 1
urllib.request.urlretrieve(url, filename)
def saveAlbum(url, save_dir):
#Sign up for an imgur api here (Oauth with no callback) and paste your keys here
client_id = 'REDACTED'
client_secret = 'REDACTED'
icounter = 0
client = ImgurClient(client_id, client_secret)
if '/#0' in url:
url = url[:-3]
album_id = url.split('/')[-1]
if '#' in album_id:
album_id = album_id[:5]
print(url)
print(album_id)
album_title = client.get_album(album_id).title
if album_title is None:
album_title = 'Untitled ' + album_id
print(album_title)
album_title = album_title.replace(' ', '')
album_title = re.sub(r'\W+', '', album_title)
save_dir = save_dir + '/' + album_title
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for image in client.get_album_images(album_id):
icounter += 1
title = album_title + '_' + str(icounter)
saveImage(image.link, title, save_dir)
def makeSaveDir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
return dir + '/'
def downloadImagesFromReddit(subreddits, postLimit=100, scoreLimit=10):
for subreddit in subreddits:
posts = getPosts(subreddit, postLimit)
saveImages(posts, scoreLimit, subreddit.lower())
print(str(counter) + ' images have been scraped!')
def main():
if len(sys.argv) > 1:
downloadImagesFromReddit(sys.argv[1:])
else:
downloadImagesFromReddit([
'wallpapers'
])
if __name__ == '__main__':
main()
|
{
"content_hash": "e8da5e6372ad4fcb653b3f5d544cbfc8",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 102,
"avg_line_length": 26.754901960784313,
"alnum_prop": 0.6625137412971784,
"repo_name": "stephencoetzee/personal-scripts",
"id": "eb7d5a2f44e1181e16943c2d21f966bf962f98e6",
"size": "2747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/gettit.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2747"
},
{
"name": "Shell",
"bytes": "4181"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.auth.models import User
from django_mysql.models import JSONField, Model, ListCharField
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True,)
dateOfBirth = models.PositiveSmallIntegerField(default = '1900')
country = models.CharField(max_length = 150)
state = models.CharField(max_length = 150)
zipcode = models.CharField(max_length = 10)
adress = models.CharField(max_length = 200, default = '')
games = ListCharField(
base_field=models.CharField(max_length=100),
max_length=3000,
default ='',
)
count = models.IntegerField(default = '0')
def __str__(self):
return self.user.username
|
{
"content_hash": "4d84fe7d1bc12defe2befb539795addb",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 82,
"avg_line_length": 37.76190476190476,
"alnum_prop": 0.6973518284993695,
"repo_name": "ClovisDj/Playconnect4",
"id": "345e29e52b8882f20f93d1b1ffb23f4b2bbb4eac",
"size": "793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userprofile/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7156"
},
{
"name": "HTML",
"bytes": "21647"
},
{
"name": "JavaScript",
"bytes": "18593"
},
{
"name": "Python",
"bytes": "37383"
}
],
"symlink_target": ""
}
|
import os
import logging
import clients
import textwrap
from persist import JsonObject
from telegram.ext import Updater, MessageHandler, CommandHandler, Filters
from webpreview import web_preview
class TelegramBot(object):
def __init__(self, token, reddit):
self.chats = JsonObject('chats.json')
self.updater = Updater(token)
self.reddit = reddit
self.updater.dispatcher.add_handler(MessageHandler(Filters.status_update, self._status))
self.updater.dispatcher.add_handler(MessageHandler(Filters.entity('url'), self._url))
for c in 'start stop help post'.split():
self.updater.dispatcher.add_handler(CommandHandler(c, getattr(self, '_' + c)))
def _start(self, bot, update):
update.message.reply_text(textwrap.dedent('''
Hello. I will send the links on this chat to Reddit.
Send /help for more information.
''').strip())
def _help(self, bot, update):
text = textwrap.dedent('''
This bot sends all links message in this chat to a subreddit you choose.
Commands:
/start : Show the welcome message.
/help : This message.
/post <sub> : Set the subreddit which to post links to.
/post : Show which subreddit it is posting to.
/stop : Stop sending links to reddit.
''').strip()
update.message.reply_text(text)
def _post(self, bot, update):
if not self.chats.data:
self.chats.load()
m = update.message
params = m.text.split()
if len(params) < 2:
subreddit = self.chats[update.message.chat.id]
if subreddit:
text = 'This bot is sending links to /r/' + subreddit
else:
text = 'This bot was not configured yet.'
text += textwrap.dedent('''
Please inform the subreddit alongside the command to send the links
posted on this chat to {} subreddit.
e.g: /post pics
'''.format('another' if subreddit else 'a'))
m.reply_text(text)
return
subreddit = params[1]
if not self.reddit.exists(subreddit):
m.reply_text('The subreddit "{}" doesn\'t exist!'.format(subreddit))
return
self.chats[m.chat.id] = subreddit
self.chats.save()
m.reply_text('All links from "{}" will be posted to {}'.format(
m.chat.title or m.chat.username, subreddit))
def _url(self, bot, update):
url = update.message.text
try:
title, description, image = web_preview(url)
subreddit = self.chats[update.message.chat.id]
self.reddit.submit(subreddit, title, url)
except Exception as e:
# update.message.reply_text(str(e))
pass
def _stop(self, bot, update):
m = update.message
m.reply_text('This bot will no longer post links to reddit.')
del self.chats[m.chat.id]
self.chats.save()
def _status(self, bot, update):
m = update.message
if hasattr(m, 'new_chat_member') and m.new_chat_member.id == bot.id:
self._start(bot, update)
if hasattr(m, 'left_chat_member') and m.new_chat_member.id == bot.id:
self._finish(bot, update)
def run(self):
self.updater.start_polling()
self.updater.idle()
|
{
"content_hash": "7ee7d2d8276fc12c164763d96756058b",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 96,
"avg_line_length": 35.45918367346939,
"alnum_prop": 0.582158273381295,
"repo_name": "diegor2/redditbot",
"id": "8324a7f20972cf0e9fe10c6dfd470afa2dfc39c7",
"size": "3475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/clients/telegram.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "205"
},
{
"name": "Python",
"bytes": "6573"
}
],
"symlink_target": ""
}
|
'''
Beacon to monitor statistics from ethernet adapters
.. versionadded:: 2015.5.0
'''
# Import Python libs
from __future__ import absolute_import
import logging
# Import third party libs
# pylint: disable=import-error
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
# pylint: enable=import-error
log = logging.getLogger(__name__)
__virtualname__ = 'network_info'
__attrs = ['bytes_sent', 'bytes_recv', 'packets_sent',
'packets_recv', 'errin', 'errout',
'dropin', 'dropout']
def _to_list(obj):
'''
Convert snetinfo object to list
'''
ret = {}
for attr in __attrs:
# Better way to do this?
ret[attr] = obj.__dict__[attr]
return ret
def __virtual__():
if not HAS_PSUTIL:
return (False, 'cannot load network_info beacon: psutil not available')
return __virtualname__
def validate(config):
'''
Validate the beacon configuration
'''
VALID_ITEMS = [
'type', 'bytes_sent', 'bytes_recv', 'packets_sent',
'packets_recv', 'errin', 'errout', 'dropin',
'dropout'
]
# Configuration for load beacon should be a list of dicts
if not isinstance(config, dict):
return False, ('Configuration for load beacon must be a dictionary.')
else:
for item in config:
if not isinstance(config[item], dict):
return False, ('Configuration for load beacon must '
'be a dictionary of dictionaries.')
else:
if not any(j in VALID_ITEMS for j in config[item]):
return False, ('Invalid configuration item in '
'Beacon configuration.')
return True, 'Valid beacon configuration'
def beacon(config):
'''
Emit the network statistics of this host.
Specify thresholds for each network stat
and only emit a beacon if any of them are
exceeded.
Emit beacon when any values are equal to
configured values.
.. code-block:: yaml
beacons:
network_info:
eth0:
- type: equal
- bytes_sent: 100000
- bytes_recv: 100000
- packets_sent: 100000
- packets_recv: 100000
- errin: 100
- errout: 100
- dropin: 100
- dropout: 100
Emit beacon when any values are greater
than configured values.
.. code-block:: yaml
beacons:
network_info:
eth0:
- type: greater
- bytes_sent: 100000
- bytes_recv: 100000
- packets_sent: 100000
- packets_recv: 100000
- errin: 100
- errout: 100
- dropin: 100
- dropout: 100
'''
ret = []
_stats = psutil.net_io_counters(pernic=True)
for interface in config:
if interface in _stats:
_if_stats = _stats[interface]
_diff = False
for attr in __attrs:
if attr in config[interface]:
if 'type' in config[interface] and config[interface]['type'] == 'equal':
if _if_stats.__dict__[attr] == int(config[interface][attr]):
_diff = True
elif 'type' in config[interface] and config[interface]['type'] == 'greater':
if _if_stats.__dict__[attr] > int(config[interface][attr]):
_diff = True
else:
if _if_stats.__dict__[attr] == int(config[interface][attr]):
_diff = True
if _diff:
ret.append({'interface': interface,
'network_info': _to_list(_if_stats)})
return ret
|
{
"content_hash": "4ad5cca8c17c3fd21cc466f50b7ed101",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 96,
"avg_line_length": 28.02836879432624,
"alnum_prop": 0.5192307692307693,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "fb497cc344385116e1d74ad949d4198a461ebe7b",
"size": "3976",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/salt/beacons/network_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
import unittest
import random
from time import sleep
import os
from bingmaps import *
class BingMapsTestError(Exception):
"""Bing Maps test exception"""
def __init__(self, reason):
self.reason = unicode(reason)
def __str__(self):
return self.reason
# TODO: enter your key for testing
api_key = ''
class DirectionsTests(unittest.TestCase):
def setUp(self):
self.api = BingMapsAPI(api_key=api_key)
def testBasicNav(self):
# start - 717 Market St
# end - Ferry Plaza, San Francisco, CA
# we shrunk the precision to match return values for easier comparison
start_lat = "37.786861"
start_lon = "-122.403689"
end_lat = "37.795556"
end_lon = "-122.392124"
start = start_lat+","+start_lon
end = end_lat+","+end_lon
ret = self.api.routes(waypoints=[start, end])
# verify start and end points are reflected in response
self.assertNotEqual(ret, {})
estimated_total = ret['resourceSets'][0]['estimatedTotal']
self.assertEqual(estimated_total, 1)
routeLegs = ret['resourceSets'][0]['resources'][0]['routeLegs']
self.assertEqual(len(routeLegs), 1)
itinerary_items = routeLegs[0]['itineraryItems']
self.assertNotEqual(itinerary_items, [])
# skip the last step, as it doesn't have a transport Mode
for i in itinerary_items:
self.assertEqual(i['details'][0]['mode'], 'Driving')
def testPedestrianNav(self):
start_lat = "37.7868609332517"
start_lon = "-122.403689949149"
end_lat = "37.795556930015"
end_lon = "-122.392124051039"
start = start_lat+","+start_lon
end = end_lat+","+end_lon
ret = self.api.routes(waypoints=[start,end], travelMode='Walking')
self.assertNotEqual(ret, {})
legs = ret['resourceSets'][0]['resources'][0]['routeLegs']
self.assertNotEqual(legs, [])
legs = legs[0]
itinerary_items = legs['itineraryItems']
self.assertNotEqual(itinerary_items, [])
# skip the last step, as it doesn't have a transport Mode
for i in itinerary_items:
self.assertEqual(i['details'][0]['mode'], 'Walking')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "94eeb404fe7533f81ad8f529c2b9e039",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 78,
"avg_line_length": 29.82716049382716,
"alnum_prop": 0.5802980132450332,
"repo_name": "ckelly/pybingmaps",
"id": "40ac4ec777b7bc387be14a996d46bdf5f0da5291",
"size": "2416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12832"
}
],
"symlink_target": ""
}
|
"""The model definition for scores."""
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from apps.managers.cache_mgr import cache_mgr
class ScoreSetting(models.Model):
"""score settings models."""
name = models.CharField(default="Score Settings",
max_length="30", editable=False,
help_text="The settings label.")
setup_points = models.IntegerField(
default=5,
help_text="The point amount for setting up the profile."
)
active_threshold_points = models.IntegerField(
default=50,
help_text="The point amount for considering an active participant. It is also the "
"threshold point amount for awarding referral bonus.",
)
signup_bonus_points = models.IntegerField(
default=2,
help_text="The point amount for signing up a commitment or event/excursion."
)
quest_bonus_points = models.IntegerField(
default=0,
help_text="The point amount for completing a quest."
)
noshow_penalty_points = models.IntegerField(
default=2,
help_text="The point amount for no show penalty."
)
feedback_bonus_points = models.IntegerField(
default=0,
help_text="The point amount for providing action feedback."
)
admin_tool_tip = "The points awarded for completing various " + \
"actions and how many points are needed for a player to be 'active'."
class Meta:
"""meta"""
verbose_name = "point rubric"
def __unicode__(self):
return "point rubric"
def save(self, *args, **kwargs):
"""Custom save method."""
super(ScoreSetting, self).save(*args, **kwargs)
cache_mgr.delete("score_setting")
class ReferralSetting(models.Model):
"""Defines the model of the dynamic referral settings."""
normal_referral_points = models.IntegerField(
default=10,
help_text="The point amount for normal referral bonus.",
)
super_referral_points = models.IntegerField(
default=20,
help_text="The point amount for supper referral bonus, when the referral is from a team "
"of participation rate from 20% to 40%",
)
mega_referral_points = models.IntegerField(
default=30,
help_text="The point amount for mega referral bonus, when the referrals is from a team of"
" participation rate les than 20%",
)
start_dynamic_bonus = models.BooleanField(
default=False,
help_text="Start rewarding the dynamic referral bonus. set it to true if you want to "
"reward referral bonus depends on referral's team participation."
)
def save(self, *args, **kwargs):
"""Custom save method."""
super(ReferralSetting, self).save(*args, **kwargs)
cache_mgr.delete("referral_setting")
class ScoreboardEntry(models.Model):
"""Defines the model that tracks user scores."""
profile = models.ForeignKey("player_mgr.Profile", editable=False)
round_name = models.CharField(
max_length="30", editable=False,
help_text="The name of the round")
points = models.IntegerField(
default=0,
help_text="Points for this round")
last_awarded_submission = models.DateTimeField(
null=True, blank=True, editable=False,
help_text="Last award time")
class Meta:
"""meta"""
unique_together = (("profile", "round_name",),)
ordering = ("round_name",)
class PointsTransaction(models.Model):
"""Entries that track points awarded to users."""
user = models.ForeignKey(User)
points = models.IntegerField(
help_text="The points for the transaction. negative number indicates a subtraction"
)
transaction_date = models.DateTimeField(
help_text="The date of the transaction"
)
message = models.CharField(
max_length=255,
help_text="The message of the transcation.")
object_id = models.PositiveIntegerField(null=True)
content_type = models.ForeignKey(ContentType, null=True)
related_object = generic.GenericForeignKey("content_type", "object_id")
class Meta:
"""meta"""
unique_together = (("user", "transaction_date", "message",),)
ordering = ("-transaction_date",)
|
{
"content_hash": "597abb7a4497bf7995c1cd60aa2e9894",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 98,
"avg_line_length": 35.57142857142857,
"alnum_prop": 0.643239625167336,
"repo_name": "KendyllD/boukenda-project",
"id": "1791aa439719b1ecd122de904d6415d9e4ce1bef",
"size": "4482",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "makahiki/apps/managers/score_mgr/models.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import gettext_lazy as _
class DummyModel(models.Model):
"""
Base for test models that sets app_label, so they play nicely.
"""
class Meta:
app_label = "tests"
abstract = True
class BasicModel(DummyModel):
text = models.CharField(
max_length=100,
verbose_name=_("Text comes here"),
help_text=_("Text description."),
)
|
{
"content_hash": "32871b4c0437d72e33ca33b95e39e0ef",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 22.136363636363637,
"alnum_prop": 0.6406570841889117,
"repo_name": "eamigo86/graphene-django-extras",
"id": "e46617e2b46c840424d56f55aeb1869d6ccb9db4",
"size": "487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "148342"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import MDAnalysis as mda
import numpy as np
from silicanet import SilicaNetwork
if __name__ == '__main__':
u = mda.Universe('SiO2_protein_0.17.pdb')
ntw = SilicaNetwork(u.select_atoms('resname SIL'))
ntw.set_connections(1.7) # generous bond distance between Si and O# generous bond distance between Si and O
probe_count = 20000 # 10000 nodes is enough for statistics
bond_Si_O = 0.0
angle_O_Si_O = 0.0; n_angle_O_Si_O = 0
angle_Si_O_Si = 0.0; n_angle_Si_O_Si = 0
for node in ntw[:probe_count]:
if len(node.neighbors) < 2:
continue
xyz = node.atom.position
end1 = node.neighbors[0].atom.position -xyz
end2 = node.neighbors[1].atom.position -xyz
# Si-O bond distance
bond_Si_O += np.linalg.norm(end1)
# Angle
cosine_angle = np.dot(end1, end2) / (np.linalg.norm(end1) * np.linalg.norm(end2))
angle = np.arccos(cosine_angle)
if node.type == 'Si':
angle_O_Si_O += angle; n_angle_O_Si_O += 1
else:
angle_Si_O_Si += angle; n_angle_Si_O_Si += 1
print('bond_Si_O = ', bond_Si_O/probe_count)
print('angle_O_Si_O = ', np.degrees(angle_O_Si_O/n_angle_O_Si_O))
print('angle_Si_O_Si = ', np.degrees(angle_Si_O_Si/n_angle_Si_O_Si))
|
{
"content_hash": "9c52aa635e74e9a003e8c270b8526f18",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 112,
"avg_line_length": 42.806451612903224,
"alnum_prop": 0.6036171816126601,
"repo_name": "jmborr/confinedBSA",
"id": "b07400015e75d1444ce014155c64f25714d34763",
"size": "1349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simulation/silica/cristobalite/confineBSA/poretop/evaluate_bonding.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "55900"
},
{
"name": "Python",
"bytes": "39915"
},
{
"name": "Shell",
"bytes": "20379"
},
{
"name": "Smarty",
"bytes": "4682"
}
],
"symlink_target": ""
}
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: fused_conv2d_bias_activation_op.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('fused_conv2d_bias_activation')
def fused_conv2d_bias_activation(conv_input, filter, bias, side_input, conv_input_scale, side_input_scale, strides, padding, data_format="NHWC", filter_format="HWIO", activation_mode="Relu", dilations=[1, 1, 1, 1], name=None):
r""" Computes a fused kernel which implements: 2-D convolution, adds side input,
with separate scaling on convolution and side inputs, then adds bias and
applies the RELU activation function to the result. Supports both float and
qint8 data formats. In the case of qint8, the output is clipped to [0..127].
conv_input: A tensor with format as specified by `data_format` (see below).
filter: A tensor with format depending on `data_format` as follows:
"NHWC", "NCHW":
`float [ filter_height, filter_width, in_channels, out_channels ]`
"NCHW_VECT_C":
`qint8 [ out_channels, in_channels, filter_height, filter_width ]`
bias: 1-D float tensor with size matching the `out_channels` dimension of
`filter`.
Note: this tensor is still float, even if other inputs are qint8.
side_input: A tensor with format as specified by `data_format` (see below).
This tensor will be ignored and can be [] if side_input_scale == 0.
Otherwise, the size of each dimension must match the `output` tensor.
conv_input_scale: scalar float value to be multiplied by `conv_input`.
(conceptually.. in reality it is applied after convolution).
side_input_scale: scalar float value to be multiplied by `side_input`.
output: A tensor with format as specified by `data_format` (see below).
The dimension sizes are determined automatically based on other inputs
and attributes.
T: The element data type of `conv_input`, `side_input` and `output` tensors.
Note: must match with the `data_format`.
Tbias: The element data type of `bias`.
strides: 1-D tensor of length 4. The stride of the sliding window for each
dimension of `input`. The dimension order is determined by the value of
`data_format`, see below for details.
Note: the stride for batch and channel dimensions must be 1.
padding: The type of padding algorithm to use.
data_format: A string specifying the data format of `conv_input`,
`side_input` and `output` tensors with the following options:
"NHWC": `float [ batch, height, width, channels ]`
"NCHW": `float [ batch, channels, height, width ]`
"NCHW_VECT_C":
`qint8 [ batch, channels / 4, height, width, channels % 4 ]`
Note: for "NCHW_VECT_C", `channels` must be a multiple of 4.
filter_format: A string specifying the data format of `filter`,
"HWIO": `float [ kernel_height, kernel_width, input_channels,
output_channels ]`
"OIHW_VECT_I":
`qint8 [ output_channels, input_channels / 4,
kernel_height, kernel_width, input_channels % 4 ]`
activation_mode: The activation applied to the output.
Currently must be "Relu".
dilations: 1-D tensor of length 4. The dilation factor for each dimension
of `input`. If set to k > 1, there will be k-1 skipped cells between
each filter element on that dimension. The dimension order is determined
by the value of `data_format`, see above for details. Dilations in the
batch and depth dimensions must be 1.
Args:
conv_input: A `Tensor`. Must be one of the following types: `float32`, `half`, `qint8`.
filter: A `Tensor`. Must have the same type as `conv_input`.
bias: A `Tensor`. Must be one of the following types: `float32`, `half`.
side_input: A `Tensor`. Must have the same type as `conv_input`.
conv_input_scale: A `Tensor` of type `float32`.
side_input_scale: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`.
filter_format: An optional `string` from: `"HWIO", "OIHW", "OIHW_VECT_I"`. Defaults to `"HWIO"`.
activation_mode: An optional `string` from: `"Relu"`. Defaults to `"Relu"`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `conv_input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'fused_conv2d_bias_activation' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if filter_format is None:
filter_format = "HWIO"
filter_format = _execute.make_str(filter_format, "filter_format")
if activation_mode is None:
activation_mode = "Relu"
activation_mode = _execute.make_str(activation_mode, "activation_mode")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'fused_conv2d_bias_activation' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"FusedConv2DBiasActivation", conv_input=conv_input, filter=filter,
bias=bias, side_input=side_input, conv_input_scale=conv_input_scale,
side_input_scale=side_input_scale, strides=strides, padding=padding,
data_format=data_format, filter_format=filter_format,
activation_mode=activation_mode, dilations=dilations, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tbias", _op.get_attr("Tbias"),
"strides", _op.get_attr("strides"), "padding",
_op.get_attr("padding"), "data_format",
_op.get_attr("data_format"), "filter_format",
_op.get_attr("filter_format"), "activation_mode",
_op.get_attr("activation_mode"), "dilations",
_op.get_attr("dilations"))
_execute.record_gradient(
"FusedConv2DBiasActivation", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"FusedConv2DBiasActivation", name, _ctx._post_execution_callbacks,
conv_input, filter, bias, side_input, conv_input_scale,
side_input_scale, "strides", strides, "padding", padding,
"data_format", data_format, "filter_format", filter_format,
"activation_mode", activation_mode, "dilations", dilations)
return _result
except _core._FallbackException:
return fused_conv2d_bias_activation_eager_fallback(
conv_input, filter, bias, side_input, conv_input_scale,
side_input_scale, strides=strides, padding=padding,
data_format=data_format, filter_format=filter_format,
activation_mode=activation_mode, dilations=dilations, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def fused_conv2d_bias_activation_eager_fallback(conv_input, filter, bias, side_input, conv_input_scale, side_input_scale, strides, padding, data_format="NHWC", filter_format="HWIO", activation_mode="Relu", dilations=[1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fused_conv2d_bias_activation
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'fused_conv2d_bias_activation' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if filter_format is None:
filter_format = "HWIO"
filter_format = _execute.make_str(filter_format, "filter_format")
if activation_mode is None:
activation_mode = "Relu"
activation_mode = _execute.make_str(activation_mode, "activation_mode")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'fused_conv2d_bias_activation' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([conv_input, filter, side_input], _ctx)
(conv_input, filter, side_input) = _inputs_T
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], _ctx)
conv_input_scale = _ops.convert_to_tensor(conv_input_scale, _dtypes.float32)
side_input_scale = _ops.convert_to_tensor(side_input_scale, _dtypes.float32)
_inputs_flat = [conv_input, filter, bias, side_input, conv_input_scale, side_input_scale]
_attrs = ("T", _attr_T, "Tbias", _attr_Tbias, "strides", strides, "padding",
padding, "data_format", data_format, "filter_format", filter_format,
"activation_mode", activation_mode, "dilations", dilations)
_result = _execute.execute(b"FusedConv2DBiasActivation", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"FusedConv2DBiasActivation", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("FusedConv2DBiasActivation")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "FusedConv2DBiasActivation"
# input_arg {
# name: "conv_input"
# type_attr: "T"
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# input_arg {
# name: "bias"
# type_attr: "Tbias"
# }
# input_arg {
# name: "side_input"
# type_attr: "T"
# }
# input_arg {
# name: "conv_input_scale"
# type: DT_FLOAT
# }
# input_arg {
# name: "side_input_scale"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_HALF
# type: DT_QINT8
# }
# }
# }
# attr {
# name: "Tbias"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_HALF
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# s: "NCHW_VECT_C"
# }
# }
# }
# attr {
# name: "filter_format"
# type: "string"
# default_value {
# s: "HWIO"
# }
# allowed_values {
# list {
# s: "HWIO"
# s: "OIHW"
# s: "OIHW_VECT_I"
# }
# }
# }
# attr {
# name: "activation_mode"
# type: "string"
# default_value {
# s: "Relu"
# }
# allowed_values {
# list {
# s: "Relu"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n\300\003\n\031FusedConv2DBiasActivation\022\017\n\nconv_input\"\001T\022\013\n\006filter\"\001T\022\r\n\004bias\"\005Tbias\022\017\n\nside_input\"\001T\022\024\n\020conv_input_scale\030\001\022\024\n\020side_input_scale\030\001\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\001\023\013\"\025\n\005Tbias\022\004type:\006\n\0042\002\001\023\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\":\n\013data_format\022\006string\032\006\022\004NHWC:\033\n\031\022\004NHWC\022\004NCHW\022\013NCHW_VECT_C\"<\n\rfilter_format\022\006string\032\006\022\004HWIO:\033\n\031\022\004HWIO\022\004OIHW\022\013OIHW_VECT_I\"+\n\017activation_mode\022\006string\032\006\022\004Relu:\010\n\006\022\004Relu\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001")
|
{
"content_hash": "b3f4d278df971dac8608d4b4011e56ef",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 863,
"avg_line_length": 42.2,
"alnum_prop": 0.6340256481739616,
"repo_name": "ryfeus/lambda-packs",
"id": "c3b19de22e9dbebbccf7469373c05f7ecd175686",
"size": "14348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Keras_tensorflow_nightly/source2.7/tensorflow/contrib/fused_conv/ops/gen_fused_conv2d_bias_activation_op.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
"""This file provides the opening handshake processor for the WebSocket
protocol (RFC 6455).
Specification:
http://tools.ietf.org/html/rfc6455
"""
# Note: request.connection.write is used in this module, even though mod_python
# document says that it should be used only in connection handlers.
# Unfortunately, we have no other options. For example, request.write is not
# suitable because it doesn't allow direct raw bytes writing.
import base64
import logging
import os
import re
from mod_pywebsocket import common
from mod_pywebsocket.extensions import get_extension_processor
from mod_pywebsocket.handshake._base import check_request_line
from mod_pywebsocket.handshake._base import format_header
from mod_pywebsocket.handshake._base import get_mandatory_header
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import parse_token_list
from mod_pywebsocket.handshake._base import validate_mandatory_header
from mod_pywebsocket.handshake._base import validate_subprotocol
from mod_pywebsocket.handshake._base import VersionException
from mod_pywebsocket.stream import Stream
from mod_pywebsocket.stream import StreamOptions
from mod_pywebsocket import util
# Used to validate the value in the Sec-WebSocket-Key header strictly. RFC 4648
# disallows non-zero padding, so the character right before == must be any of
# A, Q, g and w.
_SEC_WEBSOCKET_KEY_REGEX = re.compile('^[+/0-9A-Za-z]{21}[AQgw]==$')
# Defining aliases for values used frequently.
_VERSION_HYBI08 = common.VERSION_HYBI08
_VERSION_HYBI08_STRING = str(_VERSION_HYBI08)
_VERSION_LATEST = common.VERSION_HYBI_LATEST
_VERSION_LATEST_STRING = str(_VERSION_LATEST)
_SUPPORTED_VERSIONS = [
_VERSION_LATEST,
_VERSION_HYBI08,
]
def compute_accept(key):
"""Computes value for the Sec-WebSocket-Accept header from value of the
Sec-WebSocket-Key header.
"""
accept_binary = util.sha1_hash(
key + common.WEBSOCKET_ACCEPT_UUID).digest()
accept = base64.b64encode(accept_binary)
return (accept, accept_binary)
class Handshaker(object):
"""Opening handshake processor for the WebSocket protocol (RFC 6455)."""
def __init__(self, request, dispatcher):
"""Construct an instance.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
Handshaker will add attributes such as ws_resource during handshake.
"""
self._logger = util.get_class_logger(self)
self._request = request
self._dispatcher = dispatcher
def _validate_connection_header(self):
connection = get_mandatory_header(
self._request, common.CONNECTION_HEADER)
try:
connection_tokens = parse_token_list(connection)
except HandshakeException, e:
raise HandshakeException(
'Failed to parse %s: %s' % (common.CONNECTION_HEADER, e))
connection_is_valid = False
for token in connection_tokens:
if token.lower() == common.UPGRADE_CONNECTION_TYPE.lower():
connection_is_valid = True
break
if not connection_is_valid:
raise HandshakeException(
'%s header doesn\'t contain "%s"' %
(common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
def do_handshake(self):
self._request.ws_close_code = None
self._request.ws_close_reason = None
# Parsing.
check_request_line(self._request)
validate_mandatory_header(
self._request,
common.UPGRADE_HEADER,
common.WEBSOCKET_UPGRADE_TYPE)
self._validate_connection_header()
self._request.ws_resource = self._request.uri
unused_host = get_mandatory_header(self._request, common.HOST_HEADER)
self._request.ws_version = self._check_version()
# This handshake must be based on latest hybi. We are responsible to
# fallback to HTTP on handshake failure as latest hybi handshake
# specifies.
try:
self._get_origin()
self._set_protocol()
self._parse_extensions()
# Key validation, response generation.
key = self._get_key()
(accept, accept_binary) = compute_accept(key)
self._logger.debug(
'%s: %r (%s)',
common.SEC_WEBSOCKET_ACCEPT_HEADER,
accept,
util.hexify(accept_binary))
self._logger.debug('Protocol version is RFC 6455')
# Setup extension processors.
processors = []
if self._request.ws_requested_extensions is not None:
for extension_request in self._request.ws_requested_extensions:
processor = get_extension_processor(extension_request)
# Unknown extension requests are just ignored.
if processor is not None:
processors.append(processor)
self._request.ws_extension_processors = processors
# Extra handshake handler may modify/remove processors.
self._dispatcher.do_extra_handshake(self._request)
stream_options = StreamOptions()
self._request.ws_extensions = None
for processor in self._request.ws_extension_processors:
if processor is None:
# Some processors may be removed by extra handshake
# handler.
continue
extension_response = processor.get_extension_response()
if extension_response is None:
# Rejected.
continue
if self._request.ws_extensions is None:
self._request.ws_extensions = []
self._request.ws_extensions.append(extension_response)
processor.setup_stream_options(stream_options)
if self._request.ws_extensions is not None:
self._logger.debug(
'Extensions accepted: %r',
map(common.ExtensionParameter.name,
self._request.ws_extensions))
self._request.ws_stream = Stream(self._request, stream_options)
if self._request.ws_requested_protocols is not None:
if self._request.ws_protocol is None:
raise HandshakeException(
'do_extra_handshake must choose one subprotocol from '
'ws_requested_protocols and set it to ws_protocol')
validate_subprotocol(self._request.ws_protocol, hixie=False)
self._logger.debug(
'Subprotocol accepted: %r',
self._request.ws_protocol)
else:
if self._request.ws_protocol is not None:
raise HandshakeException(
'ws_protocol must be None when the client didn\'t '
'request any subprotocol')
self._send_handshake(accept)
except HandshakeException, e:
if not e.status:
# Fallback to 400 bad request by default.
e.status = common.HTTP_STATUS_BAD_REQUEST
raise e
def _get_origin(self):
if self._request.ws_version is _VERSION_HYBI08:
origin_header = common.SEC_WEBSOCKET_ORIGIN_HEADER
else:
origin_header = common.ORIGIN_HEADER
origin = self._request.headers_in.get(origin_header)
if origin is None:
self._logger.debug('Client request does not have origin header')
self._request.ws_origin = origin
def _check_version(self):
version = get_mandatory_header(self._request,
common.SEC_WEBSOCKET_VERSION_HEADER)
if version == _VERSION_HYBI08_STRING:
return _VERSION_HYBI08
if version == _VERSION_LATEST_STRING:
return _VERSION_LATEST
if version.find(',') >= 0:
raise HandshakeException(
'Multiple versions (%r) are not allowed for header %s' %
(version, common.SEC_WEBSOCKET_VERSION_HEADER),
status=common.HTTP_STATUS_BAD_REQUEST)
raise VersionException(
'Unsupported version %r for header %s' %
(version, common.SEC_WEBSOCKET_VERSION_HEADER),
supported_versions=', '.join(map(str, _SUPPORTED_VERSIONS)))
def _set_protocol(self):
self._request.ws_protocol = None
protocol_header = self._request.headers_in.get(
common.SEC_WEBSOCKET_PROTOCOL_HEADER)
if not protocol_header:
self._request.ws_requested_protocols = None
return
self._request.ws_requested_protocols = parse_token_list(
protocol_header)
self._logger.debug('Subprotocols requested: %r',
self._request.ws_requested_protocols)
def _parse_extensions(self):
extensions_header = self._request.headers_in.get(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER)
if not extensions_header:
self._request.ws_requested_extensions = None
return
if self._request.ws_version is common.VERSION_HYBI08:
allow_quoted_string=False
else:
allow_quoted_string=True
try:
self._request.ws_requested_extensions = common.parse_extensions(
extensions_header, allow_quoted_string=allow_quoted_string)
except common.ExtensionParsingException, e:
raise HandshakeException(
'Failed to parse Sec-WebSocket-Extensions header: %r' % e)
self._logger.debug(
'Extensions requested: %r',
map(common.ExtensionParameter.name,
self._request.ws_requested_extensions))
def _validate_key(self, key):
if key.find(',') >= 0:
raise HandshakeException('Request has multiple %s header lines or '
'contains illegal character \',\': %r' %
(common.SEC_WEBSOCKET_KEY_HEADER, key))
# Validate
key_is_valid = False
try:
# Validate key by quick regex match before parsing by base64
# module. Because base64 module skips invalid characters, we have
# to do this in advance to make this server strictly reject illegal
# keys.
if _SEC_WEBSOCKET_KEY_REGEX.match(key):
decoded_key = base64.b64decode(key)
if len(decoded_key) == 16:
key_is_valid = True
except TypeError, e:
pass
if not key_is_valid:
raise HandshakeException(
'Illegal value for header %s: %r' %
(common.SEC_WEBSOCKET_KEY_HEADER, key))
return decoded_key
def _get_key(self):
key = get_mandatory_header(
self._request, common.SEC_WEBSOCKET_KEY_HEADER)
decoded_key = self._validate_key(key)
self._logger.debug(
'%s: %r (%s)',
common.SEC_WEBSOCKET_KEY_HEADER,
key,
util.hexify(decoded_key))
return key
def _send_handshake(self, accept):
response = []
response.append('HTTP/1.1 101 Switching Protocols\r\n')
response.append(format_header(
common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE))
response.append(format_header(
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
response.append(format_header(
common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
if self._request.ws_protocol is not None:
response.append(format_header(
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
if (self._request.ws_extensions is not None and
len(self._request.ws_extensions) != 0):
response.append(format_header(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
response.append('\r\n')
raw_response = ''.join(response)
self._request.connection.write(raw_response)
self._logger.debug('Sent server\'s opening handshake: %r',
raw_response)
# vi:sts=4 sw=4 et
|
{
"content_hash": "b7a961d9469206fdf061517dc0da5efe",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 79,
"avg_line_length": 36.71345029239766,
"alnum_prop": 0.602899012424339,
"repo_name": "olinux/twice",
"id": "2883acbf856df718547e7fc967b56c264f05e0b6",
"size": "14086",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "MouseControlPython/mod_pywebsocket/handshake/hybi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "47360"
},
{
"name": "Java",
"bytes": "649198"
},
{
"name": "Python",
"bytes": "251450"
},
{
"name": "Shell",
"bytes": "1938"
}
],
"symlink_target": ""
}
|
if __name__ == '__main__': # {{{1
from scripts import StartConky
import sys
import argparse
from scripts import SystemConkyConf
from scripts import InfoConkyConf
parser = argparse.ArgumentParser(
prog='conky setting script',
description='Automatic conky setting creation')
parser.add_argument(
'--create',
action='store_const',
const=True,
default=False,
help='Create settings')
parser.add_argument(
'--run',
action='store_const',
const=True,
default=False,
help='Run conky')
args = parser.parse_args()
if args.create:
info_conky = InfoConkyConf()
info_conky.saveConf()
system_conky = SystemConkyConf()
system_conky.saveConf()
elif args.run:
conf_names = ['info.conf', 'system.conf']
StartConky(conf_names).execute()
else:
parser.print_help()
|
{
"content_hash": "542da79a46276adeb8621e857bb6462e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 55,
"avg_line_length": 27.823529411764707,
"alnum_prop": 0.5898520084566596,
"repo_name": "deresmos/conky-setting",
"id": "c9cfd48f3204ca4bef44d8290bed6c55c8fbb39d",
"size": "946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14611"
}
],
"symlink_target": ""
}
|
import errno
import filecmp
import os
import pty
import re
import socket
import subprocess
import sys
import tempfile
import threading
import time
from io import open
XTENSA_ARGS = '--toolchain-prefix xtensa-esp32-elf-'
RISCV_ARGS = '--decode-panic backtrace --target esp32c3 --toolchain-prefix riscv32-esp-elf-'
test_list = (
# Add new tests here. All files should be placed in IN_DIR. Columns are
# Input file Filter string File with expected output Timeout ELF file Extra args
('in1.txt', '', 'in1f1.txt', 60, 'dummy_xtensa.elf', XTENSA_ARGS),
('in1.txt', '*:V', 'in1f1.txt', 60, 'dummy_xtensa.elf', XTENSA_ARGS),
('in1.txt', 'hello_world', 'in1f2.txt', 60, 'dummy_xtensa.elf', XTENSA_ARGS),
('in1.txt', '*:N', 'in1f3.txt', 60, 'dummy_xtensa.elf', XTENSA_ARGS),
('in2.txt', 'boot mdf_device_handle:I mesh:E vfs:I', 'in2f1.txt', 420, 'dummy_xtensa.elf', XTENSA_ARGS),
('in2.txt', 'vfs', 'in2f2.txt', 420, 'dummy_xtensa.elf', XTENSA_ARGS),
('core1.txt', '', 'core1_out.txt', 60, 'dummy_xtensa.elf', XTENSA_ARGS),
('riscv_panic1.txt', '', 'riscv_panic1_out.txt', 60, 'dummy_riscv.elf', RISCV_ARGS),
)
IN_DIR = 'tests/' # tests are in this directory
OUT_DIR = 'outputs/' # test results are written to this directory (kept only for debugging purposes)
ERR_OUT = 'monitor_error_output.'
IDF_MONITOR_WAPPER = 'idf_monitor_wrapper.py'
SERIAL_ALIVE_FILE = '/tmp/serial_alive' # the existence of this file signalize that idf_monitor is ready to receive
# connection related to communicating with idf_monitor through sockets
HOST = 'localhost'
# blocking socket operations are used with timeout:
SOCKET_TIMEOUT = 30
# the test is restarted after failure (idf_monitor has to be killed):
RETRIES_PER_TEST = 2
COREDUMP_VERSION_REGEX = r'espcoredump\.py v\d+\.[\d\w-]+(\.[\d\w-]+)?'
def remove_coredump_version_string(file_path):
with open(file_path, 'r') as file:
init_text = file.read()
modified_text = re.sub(COREDUMP_VERSION_REGEX, '', init_text, re.MULTILINE)
if not init_text != modified_text:
return None
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
temp_file.write(modified_text.encode())
return temp_file.name
def monitor_timeout(process):
if process.poll() is None:
# idf_monitor_wrapper is still running
try:
process.kill()
print('\tidf_monitor_wrapper was killed because it did not finish in time.')
except OSError as e:
if e.errno == errno.ESRCH:
# ignores a possible race condition which can occur when the process exits between poll() and kill()
pass
else:
raise
class TestRunner(object):
def __enter__(self):
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serversocket.bind((HOST, 0))
self.port = self.serversocket.getsockname()[1]
self.serversocket.listen(5)
return self
def __exit__(self, type, value, traceback):
try:
self.serversocket.shutdown(socket.SHUT_RDWR)
self.serversocket.close()
print('Socket was closed successfully')
except (OSError, socket.error):
pass
def accept_connection(self):
""" returns a socket for sending the input for idf_monitor which must be closed before calling this again. """
(clientsocket, address) = self.serversocket.accept()
# exception will be thrown here if the idf_monitor didn't connect in time
return clientsocket
def test_iteration(runner, test):
try:
# Make sure that the file doesn't exist. It will be recreated by idf_monitor_wrapper.py
os.remove(SERIAL_ALIVE_FILE)
except OSError:
pass
print('\nRunning test on {} with filter "{}" and expecting {}'.format(test[0], test[1], test[2]))
try:
with open(OUT_DIR + test[2], 'w', encoding='utf-8') as o_f, \
tempfile.NamedTemporaryFile(dir=OUT_DIR, prefix=ERR_OUT, mode='w', delete=False) as e_f:
monitor_cmd = [sys.executable, IDF_MONITOR_WAPPER,
'--port', 'socket://{}:{}?logging=debug'.format(HOST, runner.port),
'--print_filter', test[1],
'--serial_alive_file', SERIAL_ALIVE_FILE,
'--elf-file', test[4]]
monitor_cmd += test[5].split()
(master_fd, slave_fd) = pty.openpty()
print('\t', ' '.join(monitor_cmd), sep='')
print('\tstdout="{}" stderr="{}" stdin="{}"'.format(o_f.name, e_f.name, os.ttyname(slave_fd)))
print('\tMonitor timeout: {} seconds'.format(test[3]))
start = time.time()
# the server socket is alive so idf_monitor can start now
proc = subprocess.Popen(monitor_cmd, stdin=slave_fd, stdout=o_f, stderr=e_f, close_fds=True, bufsize=0)
# - idf_monitor's stdin needs to be connected to some pseudo-tty in docker image even when it is not
# used at all
# - setting bufsize is needed because the default value is different on Python 2 and 3
# - the default close_fds is also different on Python 2 and 3
monitor_watchdog = threading.Timer(test[3], monitor_timeout, [proc])
monitor_watchdog.start()
client = runner.accept_connection()
# The connection is ready but idf_monitor cannot yet receive data (the serial reader thread is not running).
# This seems to happen on Ubuntu 16.04 LTS and is not related to the version of Python or pyserial.
# Updating to Ubuntu 18.04 LTS also helps but here, a workaround is used: A wrapper is used for IDF monitor
# which checks the serial reader thread and creates a file when it is running.
while not os.path.isfile(SERIAL_ALIVE_FILE) and proc.poll() is None:
print('\tSerial reader is not ready. Do a sleep...')
time.sleep(1)
# Only now can we send the inputs:
with open(IN_DIR + test[0], 'rb') as f:
print('\tSending {} to the socket'.format(f.name))
for chunk in iter(lambda: f.read(1024), b''):
client.sendall(chunk)
idf_exit_sequence = b'\x1d\n'
print('\tSending <exit> to the socket')
client.sendall(idf_exit_sequence)
close_end_time = start + 0.75 * test[3] # time when the process is close to be killed
while True:
ret = proc.poll()
if ret is not None:
break
if time.time() > close_end_time:
# The process isn't finished yet so we are starting to send additional exit sequences because maybe
# the other end didn't received it.
print('\tSending additional <exit> to the socket')
client.sendall(idf_exit_sequence)
time.sleep(1)
end = time.time()
print('\tidf_monitor exited after {:.2f} seconds'.format(end - start))
if ret < 0:
raise RuntimeError('idf_monitor was terminated by signal {}'.format(-ret))
# idf_monitor needs to end before the socket is closed in order to exit without an exception.
finally:
if monitor_watchdog:
monitor_watchdog.cancel()
os.close(slave_fd)
os.close(master_fd)
if client:
client.close()
print('\tThe client was closed successfully')
f1 = IN_DIR + test[2]
f2 = OUT_DIR + test[2]
temp_f1, temp_f2 = remove_coredump_version_string(f1), remove_coredump_version_string(f2)
print('\tdiff {} {}'.format(f1, f2))
if temp_f1 and temp_f2:
f1, f2 = temp_f1, temp_f2
if filecmp.cmp(f1, f2, shallow=False):
print('\tTest has passed')
else:
raise RuntimeError('The contents of the files are different. Please examine the artifacts.')
def main():
gstart = time.time()
if not os.path.exists(OUT_DIR):
os.mkdir(OUT_DIR)
socket.setdefaulttimeout(SOCKET_TIMEOUT)
for test in test_list:
for i in range(RETRIES_PER_TEST):
with TestRunner() as runner:
# Each test (and each retry) is run with a different port (and server socket). This is done for
# the CI run where retry with a different socket is necessary to pass the test. According to the
# experiments, retry with the same port (and server socket) is not sufficient.
try:
test_iteration(runner, test)
# no more retries if test_iteration exited without an exception
break
except Exception as e:
if i < RETRIES_PER_TEST - 1:
print('Test has failed with exception:', e)
print('Another attempt will be made.')
else:
raise
gend = time.time()
print('Execution took {:.2f} seconds\n'.format(gend - gstart))
if __name__ == '__main__':
main()
|
{
"content_hash": "9d5a17b1e6bab45460dafacffca35b1a",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 147,
"avg_line_length": 46.92270531400966,
"alnum_prop": 0.5707814269535674,
"repo_name": "espressif/esp-idf",
"id": "ed4523a09834d7e1fa905c8ba7dea6b352157ae2",
"size": "9848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/test_idf_monitor/run_test_idf_monitor.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "388440"
},
{
"name": "Batchfile",
"bytes": "5451"
},
{
"name": "C",
"bytes": "69102322"
},
{
"name": "C++",
"bytes": "992772"
},
{
"name": "CMake",
"bytes": "539972"
},
{
"name": "Dockerfile",
"bytes": "3290"
},
{
"name": "Makefile",
"bytes": "23747"
},
{
"name": "Nim",
"bytes": "1005"
},
{
"name": "PowerShell",
"bytes": "4537"
},
{
"name": "Python",
"bytes": "2158180"
},
{
"name": "Roff",
"bytes": "101"
},
{
"name": "Shell",
"bytes": "126143"
}
],
"symlink_target": ""
}
|
"""
Read and write SAS XPORT/XPT-format files.
"""
# Standard Library
import functools
import json
import logging
import logging.config
import sys
# Community Packages
import click
import yaml
# Xport Modules
import xport
import xport.v56
import xport.v89
__all__ = [
'cli',
]
try:
yaml.load = functools.partial(yaml.load, Loader=yaml.CSafeLoader)
except AttributeError:
yaml.load = functools.partial(yaml.load, Loader=yaml.SafeLoader)
try:
with open('logging.yml') as file:
LOG_CONFIG = yaml.load(file)
except FileNotFoundError:
LOG_CONFIG = {'version': 1}
logging.config.dictConfig(LOG_CONFIG)
LOG = logging.getLogger(__name__)
log_levels = [name for x, name in sorted(logging._levelToName.items()) if x]
@click.command(
context_settings={'help_option_names': ['-h', '--help']},
)
@click.argument('input', type=click.File('rb'))
@click.argument(
'output',
type=click.File('wt'),
default=sys.stdout,
)
@click.option(
'--dataset', metavar='NAME', help='Select a dataset by name. Defaults to the first dataset.'
)
@click.option(
'--loglevel',
metavar='LEVEL',
type=click.Choice(log_levels, case_sensitive=False),
help=f'Set logging level. {{{", ".join(log_levels[:-1])}}}',
)
@click.version_option(version=xport.__version__)
def cli(input, output, dataset, loglevel):
"""
Convert SAS Transport (XPORT) files to comma-separated values (CSV).
"""
if loglevel:
for k, config in LOG_CONFIG['loggers'].items():
config['level'] = loglevel.upper()
logging.config.dictConfig(LOG_CONFIG)
LOG.debug('Xport version %s', xport.__version__)
LOG.debug('CLI arg --loglevel = %r', loglevel)
LOG.debug('Using logging config %s', json.dumps(LOG_CONFIG, indent=2))
bytestring = input.read()
if xport.v89.Library.pattern.match(bytestring):
library = xport.v89.loads(bytestring)
else:
library = xport.v56.loads(bytestring)
if dataset is not None:
ds = library[dataset]
elif library:
ds = next(iter(library.values()))
else:
raise ValueError("Library has no member datasets")
LOG.info(f'Selected dataset {ds.name!r}')
ds.to_csv(output, index=False)
|
{
"content_hash": "f7234a5a253f25c7c6b6dc345203d654",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 97,
"avg_line_length": 26.235294117647058,
"alnum_prop": 0.6623318385650224,
"repo_name": "selik/xport",
"id": "5a97d769e3e545b5e7698d7ce955812031389f78",
"size": "2230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/xport/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1483"
},
{
"name": "Python",
"bytes": "127113"
},
{
"name": "Shell",
"bytes": "3532"
}
],
"symlink_target": ""
}
|
import time
import fixtures
from oslo_config import cfg
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest.tests import base
from tempest.tests import fake_config
SERVER = {
'id': 'server_uuid',
'name': 'fake_server',
'status': 'ACTIVE'
}
BROKEN_SERVER = {
'id': 'broken_server_uuid',
'name': 'broken_server',
'status': 'ERROR'
}
class FakeServersClient(object):
CONSOLE_OUTPUT = "Console output for %s"
def get_console_output(self, server_id):
status = 'ERROR'
for s in SERVER, BROKEN_SERVER:
if s['id'] == server_id:
status = s['status']
if status == 'ERROR':
raise lib_exc.BadRequest('Server in ERROR state')
else:
return dict(output=self.CONSOLE_OUTPUT % server_id)
class TestRemoteClient(base.TestCase):
def setUp(self):
super(TestRemoteClient, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.patchobject(config, 'TempestConfigPrivate',
fake_config.FakePrivate)
cfg.CONF.set_default('ip_version_for_ssh', 4, group='validation')
cfg.CONF.set_default('network_for_ssh', 'public', group='validation')
cfg.CONF.set_default('connect_timeout', 1, group='validation')
self.conn = remote_client.RemoteClient('127.0.0.1', 'user', 'pass')
self.ssh_mock = self.useFixture(fixtures.MockPatchObject(self.conn,
'ssh_client'))
def test_write_to_console_regular_str(self):
self.conn.write_to_console('test')
self._assert_exec_called_with(
'sudo sh -c "echo \\"test\\" >/dev/console"')
def _test_write_to_console_helper(self, message, expected_call):
self.conn.write_to_console(message)
self._assert_exec_called_with(expected_call)
def test_write_to_console_special_chars(self):
self._test_write_to_console_helper(
r'\`',
'sudo sh -c "echo \\"\\\\\\`\\" >/dev/console"')
self.conn.write_to_console('$')
self._assert_exec_called_with(
'sudo sh -c "echo \\"\\\\$\\" >/dev/console"')
# NOTE(maurosr): The tests below end up closer to an output format
# assurance than a test since it's basically using comand_exec to format
# the information using gnu/linux tools.
def _assert_exec_called_with(self, cmd):
cmd = "set -eu -o pipefail; PATH=$PATH:/sbin:/usr/sbin; " + cmd
self.ssh_mock.mock.exec_command.assert_called_with(cmd)
def test_get_disks(self):
output_lsblk = """\
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 128035676160 0 disk
sdb 8:16 0 1000204886016 0 disk
sr0 11:0 1 1073741312 0 rom"""
result = """\
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 128035676160 0 disk
sdb 8:16 0 1000204886016 0 disk"""
self.ssh_mock.mock.exec_command.return_value = output_lsblk
self.assertEqual(self.conn.get_disks(), result)
self._assert_exec_called_with('lsblk -lb --nodeps')
def test_list_disks(self):
output_lsblk = """\
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 128035676160 0 disk
sdb 8:16 0 1000204886016 0 disk
sr0 11:0 1 1073741312 0 rom"""
disk_list = ['sda', 'sdb']
self.ssh_mock.mock.exec_command.return_value = output_lsblk
self.assertEqual(self.conn.list_disks(), disk_list)
def test_get_boot_time(self):
booted_at = 10000
uptime_sec = 5000.02
self.ssh_mock.mock.exec_command.return_value = uptime_sec
self.useFixture(fixtures.MockPatchObject(
time, 'time', return_value=booted_at + uptime_sec))
self.assertEqual(self.conn.get_boot_time(),
time.localtime(booted_at))
self._assert_exec_called_with('cut -f1 -d. /proc/uptime')
def test_ping_host(self):
ping_response = """PING localhost (127.0.0.1) 70(98) bytes of data.
78 bytes from localhost (127.0.0.1): icmp_req=1 ttl=64 time=0.048 ms
78 bytes from localhost (127.0.0.1): icmp_req=2 ttl=64 time=0.048 ms
--- localhost ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.048/0.048/0.048/0.000 ms"""
self.ssh_mock.mock.exec_command.return_value = ping_response
self.assertEqual(self.conn.ping_host('127.0.0.1', count=2, size=70),
ping_response)
self._assert_exec_called_with('ping -c2 -w2 -s70 127.0.0.1')
def test_get_mac_address(self):
macs = """0a:0b:0c:0d:0e:0f
a0:b0:c0:d0:e0:f0"""
self.ssh_mock.mock.exec_command.return_value = macs
self.assertEqual(self.conn.get_mac_address(), macs)
self._assert_exec_called_with(
"ip addr | awk '/ether/ {print $2}'")
class TestRemoteClientWithServer(base.TestCase):
server = SERVER
def setUp(self):
super(TestRemoteClientWithServer, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.patchobject(config, 'TempestConfigPrivate',
fake_config.FakePrivate)
cfg.CONF.set_default('ip_version_for_ssh', 4, group='validation')
cfg.CONF.set_default('network_for_ssh', 'public',
group='validation')
cfg.CONF.set_default('connect_timeout', 1, group='validation')
cfg.CONF.set_default('console_output', True,
group='compute-feature-enabled')
self.conn = remote_client.RemoteClient(
'127.0.0.1', 'user', 'pass',
server=self.server, servers_client=FakeServersClient())
self.useFixture(fixtures.MockPatch(
'tempest.lib.common.ssh.Client._get_ssh_connection',
side_effect=lib_exc.SSHTimeout(host='127.0.0.1',
user='user',
password='pass')))
self.log = self.useFixture(fixtures.FakeLogger(
name='tempest.lib.common.utils.linux.remote_client',
level='DEBUG'))
def test_validate_debug_ssh_console(self):
self.assertRaises(lib_exc.SSHTimeout,
self.conn.validate_authentication)
msg = 'Caller: %s. Timeout trying to ssh to server %s' % (
'TestRemoteClientWithServer:test_validate_debug_ssh_console',
self.server)
self.assertIn(msg, self.log.output)
self.assertIn('Console output for', self.log.output)
def test_exec_command_debug_ssh_console(self):
self.assertRaises(lib_exc.SSHTimeout,
self.conn.exec_command, 'fake command')
self.assertIn('fake command', self.log.output)
msg = 'Caller: %s. Timeout trying to ssh to server %s' % (
'TestRemoteClientWithServer:test_exec_command_debug_ssh_console',
self.server)
self.assertIn(msg, self.log.output)
self.assertIn('Console output for', self.log.output)
class TestRemoteClientWithBrokenServer(TestRemoteClientWithServer):
server = BROKEN_SERVER
def test_validate_debug_ssh_console(self):
self.assertRaises(lib_exc.SSHTimeout,
self.conn.validate_authentication)
msg = 'Caller: %s. Timeout trying to ssh to server %s' % (
'TestRemoteClientWithBrokenServer:test_validate_debug_ssh_console',
self.server)
self.assertIn(msg, self.log.output)
msg = 'Could not get console_log for server %s' % self.server['id']
self.assertIn(msg, self.log.output)
def test_exec_command_debug_ssh_console(self):
self.assertRaises(lib_exc.SSHTimeout,
self.conn.exec_command, 'fake command')
self.assertIn('fake command', self.log.output)
caller = ":".join(['TestRemoteClientWithBrokenServer',
'test_exec_command_debug_ssh_console'])
msg = 'Caller: %s. Timeout trying to ssh to server %s' % (
caller, self.server)
self.assertIn(msg, self.log.output)
msg = 'Could not get console_log for server %s' % self.server['id']
self.assertIn(msg, self.log.output)
|
{
"content_hash": "a446b071d77ccc67480777858a7a5050",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 79,
"avg_line_length": 40.40952380952381,
"alnum_prop": 0.6006363422107,
"repo_name": "openstack/tempest",
"id": "937f93a6fa86183be659f7d98c9bba9e9c419c8c",
"size": "9088",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/tests/common/utils/linux/test_remote_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5364077"
},
{
"name": "Shell",
"bytes": "8684"
}
],
"symlink_target": ""
}
|
"""InvenioStats views."""
from elasticsearch.exceptions import NotFoundError
from flask import Blueprint, abort, jsonify, request
from invenio_rest.views import ContentNegotiatedMethodView
from .errors import InvalidRequestInputError, UnknownQueryError
from .proxies import current_stats
from .utils import current_user
blueprint = Blueprint(
'invenio_stats',
__name__,
url_prefix='/stats',
)
class StatsQueryResource(ContentNegotiatedMethodView):
"""REST API resource providing access to statistics."""
view_name = 'stat_query'
def __init__(self, **kwargs):
"""Constructor."""
super(StatsQueryResource, self).__init__(
serializers={
'application/json':
lambda data, *args, **kwargs: jsonify(data),
},
default_method_media_type={
'GET': 'application/json',
},
default_media_type='application/json',
**kwargs)
def post(self, **kwargs):
"""Get statistics."""
data = request.get_json(force=False)
if data is None:
data = {}
result = {}
for query_name, config in data.items():
if config is None or not isinstance(config, dict) \
or (set(config.keys()) != {'stat', 'params'} and
set(config.keys()) != {'stat'}):
raise InvalidRequestInputError(
'Invalid Input. It should be of the form '
'{ STATISTIC_NAME: { "stat": STAT_TYPE, '
'"params": STAT_PARAMS }}'
)
stat = config['stat']
params = config.get('params', {})
try:
query_cfg = current_stats.queries[stat]
except KeyError:
raise UnknownQueryError(stat)
permission = current_stats.permission_factory(stat, params)
if permission is not None and not permission.can():
message = ('You do not have a permission to query the '
'statistic "{}" with those '
'parameters'.format(stat))
if current_user.is_authenticated:
abort(403, message)
abort(401, message)
try:
query = query_cfg.cls(name=query_name, **query_cfg.params)
result[query_name] = query.run(**params)
except ValueError as e:
raise InvalidRequestInputError(e.args[0])
except NotFoundError as e:
# In case there is no index or value for the metric we return 0
result[query_name] = dict.fromkeys(
query.metric_fields.keys(), 0)
return self.make_response(result)
stats_view = StatsQueryResource.as_view(
StatsQueryResource.view_name,
)
blueprint.add_url_rule(
'',
view_func=stats_view,
)
|
{
"content_hash": "ffe4c5e8c8f10b7ec3d759255fd4bf62",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 34.64705882352941,
"alnum_prop": 0.5500848896434635,
"repo_name": "inveniosoftware/invenio-stats",
"id": "be12bc5c8757d858491d4bdf78ceee7874157185",
"size": "3180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_stats/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "161930"
},
{
"name": "Shell",
"bytes": "474"
}
],
"symlink_target": ""
}
|
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
#A dirty hack to get around some early import/configurations ambiguities
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (register_commands, adjust_compiler,
get_debug_option, get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# Get some values from the setup.cfg
from distutils import config
conf = config.ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('package_name', 'packagename')
DESCRIPTION = metadata.get('description', 'Astropy affiliated package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'http://astropy.org')
# Get the long description from the package's docstring
__import__(PACKAGENAME)
package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '0.1.19.dev'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
# Adjust the compiler in case the default on this platform is to use a
# broken one.
adjust_compiler(PACKAGENAME)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Treat everything in scripts except README.rst as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if os.path.basename(fname) != 'README.rst']
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
# Define entry points for command-line scripts
entry_points = {}
entry_points['console_scripts'] = [
'astropy-package-template-example = packagename.example_mod:main',
]
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
setup(name=PACKAGENAME,
version=VERSION,
description=DESCRIPTION,
scripts=scripts,
requires=['astropy', 'matplotlib', 'numpy'],
install_requires=['astropy', 'numpy', 'matplotlib>=1.4'],
provides=[PACKAGENAME],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=False,
entry_points=entry_points,
**package_info
)
|
{
"content_hash": "8409219be006f47a13d6e4e259c35a56",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 32.91228070175438,
"alnum_prop": 0.7132196162046909,
"repo_name": "mikelum/pyspeckit",
"id": "51c16f3d140a1b5fff439483970dd76a34306519",
"size": "3839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "53"
},
{
"name": "Python",
"bytes": "1198082"
},
{
"name": "Shell",
"bytes": "313"
}
],
"symlink_target": ""
}
|
'''OpenGL extension NV.half_float
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_NV_half_float'
_DEPRECATED = False
GL_HALF_FLOAT_NV = constant.Constant( 'GL_HALF_FLOAT_NV', 0x140B )
glVertex2hNV = platform.createExtensionFunction(
'glVertex2hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLhalfNV,constants.GLhalfNV,),
doc='glVertex2hNV(GLhalfNV(x), GLhalfNV(y)) -> None',
argNames=('x','y',),
deprecated=_DEPRECATED,
)
glVertex2hvNV = platform.createExtensionFunction(
'glVertex2hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(arrays.GLushortArray,),
doc='glVertex2hvNV(GLushortArray(v)) -> None',
argNames=('v',),
deprecated=_DEPRECATED,
)
glVertex3hNV = platform.createExtensionFunction(
'glVertex3hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,),
doc='glVertex3hNV(GLhalfNV(x), GLhalfNV(y), GLhalfNV(z)) -> None',
argNames=('x','y','z',),
deprecated=_DEPRECATED,
)
glVertex3hvNV = platform.createExtensionFunction(
'glVertex3hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(arrays.GLushortArray,),
doc='glVertex3hvNV(GLushortArray(v)) -> None',
argNames=('v',),
deprecated=_DEPRECATED,
)
glVertex4hNV = platform.createExtensionFunction(
'glVertex4hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,),
doc='glVertex4hNV(GLhalfNV(x), GLhalfNV(y), GLhalfNV(z), GLhalfNV(w)) -> None',
argNames=('x','y','z','w',),
deprecated=_DEPRECATED,
)
glVertex4hvNV = platform.createExtensionFunction(
'glVertex4hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(arrays.GLushortArray,),
doc='glVertex4hvNV(GLushortArray(v)) -> None',
argNames=('v',),
deprecated=_DEPRECATED,
)
glNormal3hNV = platform.createExtensionFunction(
'glNormal3hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,),
doc='glNormal3hNV(GLhalfNV(nx), GLhalfNV(ny), GLhalfNV(nz)) -> None',
argNames=('nx','ny','nz',),
deprecated=_DEPRECATED,
)
glNormal3hvNV = platform.createExtensionFunction(
'glNormal3hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(arrays.GLushortArray,),
doc='glNormal3hvNV(GLushortArray(v)) -> None',
argNames=('v',),
deprecated=_DEPRECATED,
)
glColor3hNV = platform.createExtensionFunction(
'glColor3hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,),
doc='glColor3hNV(GLhalfNV(red), GLhalfNV(green), GLhalfNV(blue)) -> None',
argNames=('red','green','blue',),
deprecated=_DEPRECATED,
)
glColor3hvNV = platform.createExtensionFunction(
'glColor3hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(arrays.GLushortArray,),
doc='glColor3hvNV(GLushortArray(v)) -> None',
argNames=('v',),
deprecated=_DEPRECATED,
)
glColor4hNV = platform.createExtensionFunction(
'glColor4hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,),
doc='glColor4hNV(GLhalfNV(red), GLhalfNV(green), GLhalfNV(blue), GLhalfNV(alpha)) -> None',
argNames=('red','green','blue','alpha',),
deprecated=_DEPRECATED,
)
glColor4hvNV = platform.createExtensionFunction(
'glColor4hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(arrays.GLushortArray,),
doc='glColor4hvNV(GLushortArray(v)) -> None',
argNames=('v',),
deprecated=_DEPRECATED,
)
glTexCoord1hNV = platform.createExtensionFunction(
'glTexCoord1hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLhalfNV,),
doc='glTexCoord1hNV(GLhalfNV(s)) -> None',
argNames=('s',),
deprecated=_DEPRECATED,
)
glTexCoord1hvNV = platform.createExtensionFunction(
'glTexCoord1hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(arrays.GLushortArray,),
doc='glTexCoord1hvNV(GLushortArray(v)) -> None',
argNames=('v',),
deprecated=_DEPRECATED,
)
glTexCoord2hNV = platform.createExtensionFunction(
'glTexCoord2hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLhalfNV,constants.GLhalfNV,),
doc='glTexCoord2hNV(GLhalfNV(s), GLhalfNV(t)) -> None',
argNames=('s','t',),
deprecated=_DEPRECATED,
)
glTexCoord2hvNV = platform.createExtensionFunction(
'glTexCoord2hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(arrays.GLushortArray,),
doc='glTexCoord2hvNV(GLushortArray(v)) -> None',
argNames=('v',),
deprecated=_DEPRECATED,
)
glTexCoord3hNV = platform.createExtensionFunction(
'glTexCoord3hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,),
doc='glTexCoord3hNV(GLhalfNV(s), GLhalfNV(t), GLhalfNV(r)) -> None',
argNames=('s','t','r',),
deprecated=_DEPRECATED,
)
glTexCoord3hvNV = platform.createExtensionFunction(
'glTexCoord3hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(arrays.GLushortArray,),
doc='glTexCoord3hvNV(GLushortArray(v)) -> None',
argNames=('v',),
deprecated=_DEPRECATED,
)
glTexCoord4hNV = platform.createExtensionFunction(
'glTexCoord4hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,),
doc='glTexCoord4hNV(GLhalfNV(s), GLhalfNV(t), GLhalfNV(r), GLhalfNV(q)) -> None',
argNames=('s','t','r','q',),
deprecated=_DEPRECATED,
)
glTexCoord4hvNV = platform.createExtensionFunction(
'glTexCoord4hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(arrays.GLushortArray,),
doc='glTexCoord4hvNV(GLushortArray(v)) -> None',
argNames=('v',),
deprecated=_DEPRECATED,
)
glMultiTexCoord1hNV = platform.createExtensionFunction(
'glMultiTexCoord1hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLhalfNV,),
doc='glMultiTexCoord1hNV(GLenum(target), GLhalfNV(s)) -> None',
argNames=('target','s',),
deprecated=_DEPRECATED,
)
glMultiTexCoord1hvNV = platform.createExtensionFunction(
'glMultiTexCoord1hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,arrays.GLushortArray,),
doc='glMultiTexCoord1hvNV(GLenum(target), GLushortArray(v)) -> None',
argNames=('target','v',),
deprecated=_DEPRECATED,
)
glMultiTexCoord2hNV = platform.createExtensionFunction(
'glMultiTexCoord2hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLhalfNV,constants.GLhalfNV,),
doc='glMultiTexCoord2hNV(GLenum(target), GLhalfNV(s), GLhalfNV(t)) -> None',
argNames=('target','s','t',),
deprecated=_DEPRECATED,
)
glMultiTexCoord2hvNV = platform.createExtensionFunction(
'glMultiTexCoord2hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,arrays.GLushortArray,),
doc='glMultiTexCoord2hvNV(GLenum(target), GLushortArray(v)) -> None',
argNames=('target','v',),
deprecated=_DEPRECATED,
)
glMultiTexCoord3hNV = platform.createExtensionFunction(
'glMultiTexCoord3hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,),
doc='glMultiTexCoord3hNV(GLenum(target), GLhalfNV(s), GLhalfNV(t), GLhalfNV(r)) -> None',
argNames=('target','s','t','r',),
deprecated=_DEPRECATED,
)
glMultiTexCoord3hvNV = platform.createExtensionFunction(
'glMultiTexCoord3hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,arrays.GLushortArray,),
doc='glMultiTexCoord3hvNV(GLenum(target), GLushortArray(v)) -> None',
argNames=('target','v',),
deprecated=_DEPRECATED,
)
glMultiTexCoord4hNV = platform.createExtensionFunction(
'glMultiTexCoord4hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,),
doc='glMultiTexCoord4hNV(GLenum(target), GLhalfNV(s), GLhalfNV(t), GLhalfNV(r), GLhalfNV(q)) -> None',
argNames=('target','s','t','r','q',),
deprecated=_DEPRECATED,
)
glMultiTexCoord4hvNV = platform.createExtensionFunction(
'glMultiTexCoord4hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,arrays.GLushortArray,),
doc='glMultiTexCoord4hvNV(GLenum(target), GLushortArray(v)) -> None',
argNames=('target','v',),
deprecated=_DEPRECATED,
)
glFogCoordhNV = platform.createExtensionFunction(
'glFogCoordhNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLhalfNV,),
doc='glFogCoordhNV(GLhalfNV(fog)) -> None',
argNames=('fog',),
deprecated=_DEPRECATED,
)
glFogCoordhvNV = platform.createExtensionFunction(
'glFogCoordhvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(arrays.GLushortArray,),
doc='glFogCoordhvNV(GLushortArray(fog)) -> None',
argNames=('fog',),
deprecated=_DEPRECATED,
)
glSecondaryColor3hNV = platform.createExtensionFunction(
'glSecondaryColor3hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,),
doc='glSecondaryColor3hNV(GLhalfNV(red), GLhalfNV(green), GLhalfNV(blue)) -> None',
argNames=('red','green','blue',),
deprecated=_DEPRECATED,
)
glSecondaryColor3hvNV = platform.createExtensionFunction(
'glSecondaryColor3hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(arrays.GLushortArray,),
doc='glSecondaryColor3hvNV(GLushortArray(v)) -> None',
argNames=('v',),
deprecated=_DEPRECATED,
)
glVertexWeighthNV = platform.createExtensionFunction(
'glVertexWeighthNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLhalfNV,),
doc='glVertexWeighthNV(GLhalfNV(weight)) -> None',
argNames=('weight',),
deprecated=_DEPRECATED,
)
glVertexWeighthvNV = platform.createExtensionFunction(
'glVertexWeighthvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(arrays.GLushortArray,),
doc='glVertexWeighthvNV(GLushortArray(weight)) -> None',
argNames=('weight',),
deprecated=_DEPRECATED,
)
glVertexAttrib1hNV = platform.createExtensionFunction(
'glVertexAttrib1hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLhalfNV,),
doc='glVertexAttrib1hNV(GLuint(index), GLhalfNV(x)) -> None',
argNames=('index','x',),
deprecated=_DEPRECATED,
)
glVertexAttrib1hvNV = platform.createExtensionFunction(
'glVertexAttrib1hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,arrays.GLushortArray,),
doc='glVertexAttrib1hvNV(GLuint(index), GLushortArray(v)) -> None',
argNames=('index','v',),
deprecated=_DEPRECATED,
)
glVertexAttrib2hNV = platform.createExtensionFunction(
'glVertexAttrib2hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLhalfNV,constants.GLhalfNV,),
doc='glVertexAttrib2hNV(GLuint(index), GLhalfNV(x), GLhalfNV(y)) -> None',
argNames=('index','x','y',),
deprecated=_DEPRECATED,
)
glVertexAttrib2hvNV = platform.createExtensionFunction(
'glVertexAttrib2hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,arrays.GLushortArray,),
doc='glVertexAttrib2hvNV(GLuint(index), GLushortArray(v)) -> None',
argNames=('index','v',),
deprecated=_DEPRECATED,
)
glVertexAttrib3hNV = platform.createExtensionFunction(
'glVertexAttrib3hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,),
doc='glVertexAttrib3hNV(GLuint(index), GLhalfNV(x), GLhalfNV(y), GLhalfNV(z)) -> None',
argNames=('index','x','y','z',),
deprecated=_DEPRECATED,
)
glVertexAttrib3hvNV = platform.createExtensionFunction(
'glVertexAttrib3hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,arrays.GLushortArray,),
doc='glVertexAttrib3hvNV(GLuint(index), GLushortArray(v)) -> None',
argNames=('index','v',),
deprecated=_DEPRECATED,
)
glVertexAttrib4hNV = platform.createExtensionFunction(
'glVertexAttrib4hNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,constants.GLhalfNV,),
doc='glVertexAttrib4hNV(GLuint(index), GLhalfNV(x), GLhalfNV(y), GLhalfNV(z), GLhalfNV(w)) -> None',
argNames=('index','x','y','z','w',),
deprecated=_DEPRECATED,
)
glVertexAttrib4hvNV = platform.createExtensionFunction(
'glVertexAttrib4hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,arrays.GLushortArray,),
doc='glVertexAttrib4hvNV(GLuint(index), GLushortArray(v)) -> None',
argNames=('index','v',),
deprecated=_DEPRECATED,
)
glVertexAttribs1hvNV = platform.createExtensionFunction(
'glVertexAttribs1hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLsizei,arrays.GLushortArray,),
doc='glVertexAttribs1hvNV(GLuint(index), GLsizei(n), GLushortArray(v)) -> None',
argNames=('index','n','v',),
deprecated=_DEPRECATED,
)
glVertexAttribs2hvNV = platform.createExtensionFunction(
'glVertexAttribs2hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLsizei,arrays.GLushortArray,),
doc='glVertexAttribs2hvNV(GLuint(index), GLsizei(n), GLushortArray(v)) -> None',
argNames=('index','n','v',),
deprecated=_DEPRECATED,
)
glVertexAttribs3hvNV = platform.createExtensionFunction(
'glVertexAttribs3hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLsizei,arrays.GLushortArray,),
doc='glVertexAttribs3hvNV(GLuint(index), GLsizei(n), GLushortArray(v)) -> None',
argNames=('index','n','v',),
deprecated=_DEPRECATED,
)
glVertexAttribs4hvNV = platform.createExtensionFunction(
'glVertexAttribs4hvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLsizei,arrays.GLushortArray,),
doc='glVertexAttribs4hvNV(GLuint(index), GLsizei(n), GLushortArray(v)) -> None',
argNames=('index','n','v',),
deprecated=_DEPRECATED,
)
def glInitHalfFloatNV():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
{
"content_hash": "ff0b9b1044668cde9e298027c0df5abf",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 105,
"avg_line_length": 30.934736842105263,
"alnum_prop": 0.7786851776235199,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "0c30778259babbfcff248accd0d43d3e7441594d",
"size": "14694",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/raw/GL/NV/half_float.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
SITE_URL = 'http://stage.takeyourmeds.co.uk'
TWILIO_ENABLED = False
|
{
"content_hash": "ec1768ba052342d18150a350987ff967",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 44,
"avg_line_length": 34,
"alnum_prop": 0.7352941176470589,
"repo_name": "takeyourmeds/takeyourmeds-web",
"id": "46e270147aa03e8d5092d8e2b19e5fe1e085c585",
"size": "68",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "takeyourmeds/settings/roles/stage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "266001"
},
{
"name": "HTML",
"bytes": "80882"
},
{
"name": "JavaScript",
"bytes": "248719"
},
{
"name": "Nginx",
"bytes": "1013"
},
{
"name": "Python",
"bytes": "107863"
},
{
"name": "Shell",
"bytes": "918"
}
],
"symlink_target": ""
}
|
"""List firewalls."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer import utils
import click
@click.command()
@environment.pass_env
def cli(env):
"""List firewalls."""
mgr = SoftLayer.FirewallManager(env.client)
table = formatting.Table(['firewall id',
'type',
'features',
'server/vlan id'])
fwvlans = mgr.get_firewalls()
dedicated_firewalls = [firewall for firewall in fwvlans
if firewall['dedicatedFirewallFlag']]
for vlan in dedicated_firewalls:
features = []
if vlan['highAvailabilityFirewallFlag']:
features.append('HA')
if features:
feature_list = formatting.listing(features, separator=',')
else:
feature_list = formatting.blank()
table.add_row([
'vlan:%s' % vlan['networkVlanFirewall']['id'],
'VLAN - dedicated',
feature_list,
vlan['id']
])
shared_vlan = [firewall for firewall in fwvlans
if not firewall['dedicatedFirewallFlag']]
for vlan in shared_vlan:
vs_firewalls = [guest
for guest in vlan['firewallGuestNetworkComponents']
if has_firewall_component(guest)]
for firewall in vs_firewalls:
table.add_row([
'vs:%s' % firewall['id'],
'Virtual Server - standard',
'-',
firewall['guestNetworkComponent']['guest']['id']
])
server_firewalls = [server
for server in vlan['firewallNetworkComponents']
if has_firewall_component(server)]
for firewall in server_firewalls:
table.add_row([
'server:%s' % firewall['id'],
'Server - standard',
'-',
utils.lookup(firewall,
'networkComponent',
'downlinkComponent',
'hardwareId')
])
env.fout(table)
def has_firewall_component(server):
"""Helper to determine whether or not a server has a firewall.
:param dict server: A dictionary representing a server
:returns: True if the Server has a firewall.
"""
if server['status'] != 'no_edit':
return True
return False
|
{
"content_hash": "de15b2037627ab1e9f7df9231a33fd18",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 75,
"avg_line_length": 30.188235294117646,
"alnum_prop": 0.5288386593920499,
"repo_name": "iftekeriba/softlayer-python",
"id": "ff8f7c8e6a6abb36c58d6874ed3ffd510decf8ea",
"size": "2566",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "SoftLayer/CLI/firewall/list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "854"
},
{
"name": "Python",
"bytes": "744378"
}
],
"symlink_target": ""
}
|
from hvac.api.system_backend.system_backend_mixin import SystemBackendMixin
from hvac.exceptions import ParamValidationError
class Key(SystemBackendMixin):
def read_root_generation_progress(self):
"""Read the configuration and process of the current root generation attempt.
Supported methods:
GET: /sys/generate-root/attempt. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
"""
api_path = "/v1/sys/generate-root/attempt"
return self._adapter.get(
url=api_path,
)
def start_root_token_generation(self, otp=None, pgp_key=None):
"""Initialize a new root generation attempt.
Only a single root generation attempt can take place at a time. One (and only one) of otp or pgp_key are
required.
Supported methods:
PUT: /sys/generate-root/attempt. Produces: 200 application/json
:param otp: Specifies a base64-encoded 16-byte value. The raw bytes of the token will be XOR'd with this value
before being returned to the final unseal key provider.
:type otp: str | unicode
:param pgp_key: Specifies a base64-encoded PGP public key. The raw bytes of the token will be encrypted with
this value before being returned to the final unseal key provider.
:type pgp_key: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {}
if otp is not None and pgp_key is not None:
raise ParamValidationError(
"one (and only one) of otp or pgp_key arguments are required"
)
if otp is not None:
params["otp"] = otp
if pgp_key is not None:
params["pgp_key"] = pgp_key
api_path = "/v1/sys/generate-root/attempt"
return self._adapter.put(url=api_path, json=params)
def generate_root(self, key, nonce):
"""Enter a single master key share to progress the root generation attempt.
If the threshold number of master key shares is reached, Vault will complete the root generation and issue the
new token. Otherwise, this API must be called multiple times until that threshold is met. The attempt nonce must
be provided with each call.
Supported methods:
PUT: /sys/generate-root/update. Produces: 200 application/json
:param key: Specifies a single master key share.
:type key: str | unicode
:param nonce: The nonce of the attempt.
:type nonce: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {
"key": key,
"nonce": nonce,
}
api_path = "/v1/sys/generate-root/update"
return self._adapter.put(
url=api_path,
json=params,
)
def cancel_root_generation(self):
"""Cancel any in-progress root generation attempt.
This clears any progress made. This must be called to change the OTP or PGP key being used.
Supported methods:
DELETE: /sys/generate-root/attempt. Produces: 204 (empty body)
:return: The response of the request.
:rtype: request.Response
"""
api_path = "/v1/sys/generate-root/attempt"
return self._adapter.delete(
url=api_path,
)
def get_encryption_key_status(self):
"""Read information about the current encryption key used by Vault.
Supported methods:
GET: /sys/key-status. Produces: 200 application/json
:return: JSON response with information regarding the current encryption key used by Vault.
:rtype: dict
"""
api_path = "/v1/sys/key-status"
return self._adapter.get(
url=api_path,
)
def rotate_encryption_key(self):
"""Trigger a rotation of the backend encryption key.
This is the key that is used to encrypt data written to the storage backend, and is not provided to operators.
This operation is done online. Future values are encrypted with the new key, while old values are decrypted with
previous encryption keys.
This path requires sudo capability in addition to update.
Supported methods:
PUT: /sys/rorate. Produces: 204 (empty body)
:return: The response of the request.
:rtype: requests.Response
"""
api_path = "/v1/sys/rotate"
return self._adapter.put(
url=api_path,
)
def read_rekey_progress(self, recovery_key=False):
"""Read the configuration and progress of the current rekey attempt.
Supported methods:
GET: /sys/rekey-recovery-key/init. Produces: 200 application/json
GET: /sys/rekey/init. Produces: 200 application/json
:param recovery_key: If true, send requests to "rekey-recovery-key" instead of "rekey" api path.
:type recovery_key: bool
:return: The JSON response of the request.
:rtype: requests.Response
"""
api_path = "/v1/sys/rekey/init"
if recovery_key:
api_path = "/v1/sys/rekey-recovery-key/init"
return self._adapter.get(
url=api_path,
)
def start_rekey(
self,
secret_shares=5,
secret_threshold=3,
pgp_keys=None,
backup=False,
require_verification=False,
recovery_key=False,
):
"""Initializes a new rekey attempt.
Only a single recovery key rekeyattempt can take place at a time, and changing the parameters of a rekey
requires canceling and starting a new rekey, which will also provide a new nonce.
Supported methods:
PUT: /sys/rekey/init. Produces: 204 (empty body)
PUT: /sys/rekey-recovery-key/init. Produces: 204 (empty body)
:param secret_shares: Specifies the number of shares to split the master key into.
:type secret_shares: int
:param secret_threshold: Specifies the number of shares required to reconstruct the master key. This must be
less than or equal to secret_shares.
:type secret_threshold: int
:param pgp_keys: Specifies an array of PGP public keys used to encrypt the output unseal keys. Ordering is
preserved. The keys must be base64-encoded from their original binary representation. The size of this array
must be the same as secret_shares.
:type pgp_keys: list
:param backup: Specifies if using PGP-encrypted keys, whether Vault should also store a plaintext backup of the
PGP-encrypted keys at core/unseal-keys-backup in the physical storage backend. These can then be retrieved
and removed via the sys/rekey/backup endpoint.
:type backup: bool
:param require_verification: This turns on verification functionality. When verification is turned on, after
successful authorization with the current unseal keys, the new unseal keys are returned but the master key
is not actually rotated. The new keys must be provided to authorize the actual rotation of the master key.
This ensures that the new keys have been successfully saved and protects against a risk of the keys being
lost after rotation but before they can be persisted. This can be used with without pgp_keys, and when used
with it, it allows ensuring that the returned keys can be successfully decrypted before committing to the
new shares, which the backup functionality does not provide.
:param recovery_key: If true, send requests to "rekey-recovery-key" instead of "rekey" api path.
:type recovery_key: bool
:type require_verification: bool
:return: The JSON dict of the response.
:rtype: dict | request.Response
"""
params = {
"secret_shares": secret_shares,
"secret_threshold": secret_threshold,
"require_verification": require_verification,
}
if pgp_keys:
if len(pgp_keys) != secret_shares:
raise ParamValidationError(
"length of pgp_keys argument must equal secret shares value"
)
params["pgp_keys"] = pgp_keys
params["backup"] = backup
api_path = "/v1/sys/rekey/init"
if recovery_key:
api_path = "/v1/sys/rekey-recovery-key/init"
return self._adapter.put(
url=api_path,
json=params,
)
def cancel_rekey(self, recovery_key=False):
"""Cancel any in-progress rekey.
This clears the rekey settings as well as any progress made. This must be called to change the parameters of the
rekey.
Note: Verification is still a part of a rekey. If rekeying is canceled during the verification flow, the current
unseal keys remain valid.
Supported methods:
DELETE: /sys/rekey/init. Produces: 204 (empty body)
DELETE: /sys/rekey-recovery-key/init. Produces: 204 (empty body)
:param recovery_key: If true, send requests to "rekey-recovery-key" instead of "rekey" api path.
:type recovery_key: bool
:return: The response of the request.
:rtype: requests.Response
"""
api_path = "/v1/sys/rekey/init"
if recovery_key:
api_path = "/v1/sys/rekey-recovery-key/init"
return self._adapter.delete(
url=api_path,
)
def rekey(self, key, nonce=None, recovery_key=False):
"""Enter a single recovery key share to progress the rekey of the Vault.
If the threshold number of recovery key shares is reached, Vault will complete the rekey. Otherwise, this API
must be called multiple times until that threshold is met. The rekey nonce operation must be provided with each
call.
Supported methods:
PUT: /sys/rekey/update. Produces: 200 application/json
PUT: /sys/rekey-recovery-key/update. Produces: 200 application/json
:param key: Specifies a single recovery share key.
:type key: str | unicode
:param nonce: Specifies the nonce of the rekey operation.
:type nonce: str | unicode
:param recovery_key: If true, send requests to "rekey-recovery-key" instead of "rekey" api path.
:type recovery_key: bool
:return: The JSON response of the request.
:rtype: dict
"""
params = {
"key": key,
}
if nonce is not None:
params["nonce"] = nonce
api_path = "/v1/sys/rekey/update"
if recovery_key:
api_path = "/v1/sys/rekey-recovery-key/update"
return self._adapter.put(
url=api_path,
json=params,
)
def rekey_multi(self, keys, nonce=None, recovery_key=False):
"""Enter multiple recovery key shares to progress the rekey of the Vault.
If the threshold number of recovery key shares is reached, Vault will complete the rekey.
:param keys: Specifies multiple recovery share keys.
:type keys: list
:param nonce: Specifies the nonce of the rekey operation.
:type nonce: str | unicode
:param recovery_key: If true, send requests to "rekey-recovery-key" instead of "rekey" api path.
:type recovery_key: bool
:return: The last response of the rekey request.
:rtype: response.Request
"""
result = None
for key in keys:
result = self.rekey(
key=key,
nonce=nonce,
recovery_key=recovery_key,
)
if result.get("complete"):
break
return result
def read_backup_keys(self, recovery_key=False):
"""Retrieve the backup copy of PGP-encrypted unseal keys.
The returned value is the nonce of the rekey operation and a map of PGP key fingerprint to hex-encoded
PGP-encrypted key.
Supported methods:
PUT: /sys/rekey/backup. Produces: 200 application/json
PUT: /sys/rekey-recovery-key/backup. Produces: 200 application/json
:param recovery_key: If true, send requests to "rekey-recovery-key" instead of "rekey" api path.
:type recovery_key: bool
:return: The JSON response of the request.
:rtype: dict
"""
api_path = "/v1/sys/rekey/backup"
if recovery_key:
api_path = "/v1/sys/rekey/recovery-key-backup"
return self._adapter.get(
url=api_path,
)
def cancel_rekey_verify(self):
"""Cancel any in-progress rekey verification.
This clears any progress made and resets the nonce. Unlike cancel_rekey, this only resets
the current verification operation, not the entire rekey atttempt.
The return value is the same as GET along with the new nonce.
Supported methods:
DELETE: /sys/rekey/verify. Produces: 204 (empty body)
:return: The response of the request.
:rtype: requests.Response
"""
api_path = "/v1/sys/rekey/verify"
return self._adapter.delete(
url=api_path,
)
def rekey_verify(self, key, nonce):
"""Enter a single new recovery key share to progress the rekey verification of the Vault.
If the threshold number of new recovery key shares is reached, Vault will complete the
rekey. Otherwise, this API must be called multiple times until that threshold is met.
The rekey verification nonce must be provided with each call.
Supported methods:
PUT: /sys/rekey/verify. Produces: 200 application/json
:param key: Specifies multiple recovery share keys.
:type key: str | unicode
:param nonce: Specifies the nonce of the rekey verify operation.
:type nonce: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {
"key": key,
"nonce": nonce,
}
api_path = "/v1/sys/rekey/verify"
return self._adapter.put(
url=api_path,
json=params,
)
def rekey_verify_multi(self, keys, nonce):
"""Enter multiple new recovery key shares to progress the rekey verification of the Vault.
If the threshold number of new recovery key shares is reached, Vault will complete the
rekey. Otherwise, this API must be called multiple times until that threshold is met.
The rekey verification nonce must be provided with each call.
Supported methods:
PUT: /sys/rekey/verify. Produces: 200 application/json
:param keys: Specifies multiple recovery share keys.
:type keys: list
:param nonce: Specifies the nonce of the rekey verify operation.
:type nonce: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
result = None
for key in keys:
result = self.rekey_verify(
key=key,
nonce=nonce,
)
if result.get("complete"):
break
return result
def read_rekey_verify_progress(self):
"""Read the configuration and progress of the current rekey verify attempt.
Supported methods:
GET: /sys/rekey/verify. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: requests.Response
"""
api_path = "/v1/sys/rekey/verify"
return self._adapter.get(
url=api_path,
)
|
{
"content_hash": "9c684c023c98d05941eff93dc86c7be4",
"timestamp": "",
"source": "github",
"line_count": 407,
"max_line_length": 120,
"avg_line_length": 39.03931203931204,
"alnum_prop": 0.6206180376361005,
"repo_name": "ianunruh/hvac",
"id": "16b86e785391b0582ed1162ddbb3b14ccd371f63",
"size": "15889",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "hvac/api/system_backend/key.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HCL",
"bytes": "194"
},
{
"name": "Makefile",
"bytes": "236"
},
{
"name": "Python",
"bytes": "224553"
},
{
"name": "Shell",
"bytes": "1347"
}
],
"symlink_target": ""
}
|
from behave import given, then, when
from lastuser_core.models import User
@given('we have a new user')
def given_new_user(context):
context.test_user = {
'fullname': 'Alyssa P Hacker',
'email': 'alyssa@hacker.com',
'password': 'alyssa',
'confirm_password': 'alyssa',
}
@when('a new user submits the registration form with the proper details')
def when_form_submit(context):
context.browser.visit('/register')
assert context.browser.find_element_by_name('csrf_token').is_enabled()
for k, v in context.test_user.items():
context.browser.find_element_by_name(k).send_keys(v)
register_form = context.browser.find_element_by_id('form-register')
register_form.submit()
@then('the new user will be registered')
def then_user_registered(context):
user = User.get(username=context.test_user['username'])
assert user is not None
assert len(user.emailclaims) == 1
|
{
"content_hash": "3b010d2cbc1586afa83906710f253d27",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 74,
"avg_line_length": 29.46875,
"alnum_prop": 0.6786850477200425,
"repo_name": "hasgeek/lastuser",
"id": "74bcc842de009742a365f50d7a7363bb1fe9e140",
"size": "967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "features/steps/registration.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2937"
},
{
"name": "Gherkin",
"bytes": "841"
},
{
"name": "HTML",
"bytes": "49341"
},
{
"name": "JavaScript",
"bytes": "145"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "545882"
},
{
"name": "Ruby",
"bytes": "404"
},
{
"name": "Shell",
"bytes": "1251"
}
],
"symlink_target": ""
}
|
from .common import CustomAttribute, UserInfo
from .document import Document
from .document_service import (
CreateDocumentRequest,
DeleteDocumentRequest,
GetDocumentRequest,
ListDocumentsRequest,
ListDocumentsResponse,
UpdateDocumentRequest,
)
from .import_config import (
BigQuerySource,
GcsSource,
ImportDocumentsMetadata,
ImportDocumentsRequest,
ImportDocumentsResponse,
ImportErrorConfig,
ImportUserEventsMetadata,
ImportUserEventsRequest,
ImportUserEventsResponse,
)
from .recommendation_service import RecommendRequest, RecommendResponse
from .user_event import (
CompletionInfo,
DocumentInfo,
MediaInfo,
PageInfo,
PanelInfo,
SearchInfo,
TransactionInfo,
UserEvent,
)
from .user_event_service import CollectUserEventRequest, WriteUserEventRequest
__all__ = (
"CustomAttribute",
"UserInfo",
"Document",
"CreateDocumentRequest",
"DeleteDocumentRequest",
"GetDocumentRequest",
"ListDocumentsRequest",
"ListDocumentsResponse",
"UpdateDocumentRequest",
"BigQuerySource",
"GcsSource",
"ImportDocumentsMetadata",
"ImportDocumentsRequest",
"ImportDocumentsResponse",
"ImportErrorConfig",
"ImportUserEventsMetadata",
"ImportUserEventsRequest",
"ImportUserEventsResponse",
"RecommendRequest",
"RecommendResponse",
"CompletionInfo",
"DocumentInfo",
"MediaInfo",
"PageInfo",
"PanelInfo",
"SearchInfo",
"TransactionInfo",
"UserEvent",
"CollectUserEventRequest",
"WriteUserEventRequest",
)
|
{
"content_hash": "3e2c4b74b728673c408b1819ebaa42ba",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 78,
"avg_line_length": 24.181818181818183,
"alnum_prop": 0.7205513784461153,
"repo_name": "googleapis/google-cloud-python",
"id": "f605d812a34eee43281413e289dec9eec1a38190",
"size": "2196",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "packages/google-cloud-discoveryengine/google/cloud/discoveryengine_v1beta/types/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2895"
},
{
"name": "Python",
"bytes": "5620713"
},
{
"name": "Shell",
"bytes": "51704"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import unittest
import yaml
import os
from PyAnalysisTools.base.YAMLHandle import YAMLLoader as YL
from PyAnalysisTools.base.YAMLHandle import YAMLDumper as YD
from PyAnalysisTools.base import _logger
class TestYAMLLoader(unittest.TestCase):
def setUp(self):
_logger.setLevel(50)
self.data = {"a": 1, "b": 2}
self.loader = YL()
self.test_file = open("yaml_loader.yml", "w+")
yaml.dump(self.data, self.test_file)
self.test_file.close()
def test_parsing(self):
result = YL.read_yaml("yaml_loader.yml")
self.assertEqual(result, self.data)
def test_io_exception(self):
self.assertRaises(IOError, YL.read_yaml, "non_existing_file")
def test_invalid_input_file(self):
test_file = open("invalid_yaml_file.yml", "w+")
print("foo:--:\nsome invalid input", file=test_file)
test_file.close()
self.assertRaises(Exception, YL.read_yaml, "invalid_yaml_file.yml")
def test_accept_None(self):
self.assertEqual(None, YL.read_yaml(None, True))
class TestYAMLDumper(unittest.TestCase):
def setUp(self):
_logger.setLevel(50)
self.data = {"a": 1, "b": 2}
self.dumper = YD()
self.test_file_name = "yaml_dumper.yml"
def test_dump_dict(self):
YD.dump_yaml(self.data, self.test_file_name)
self.assertTrue(os.path.exists(self.test_file_name))
def test_dump_failure(self):
self.assertRaises(Exception, YD.dump_yaml, "foo:--:\nsome invalid input", "/usr/bin/test.yml")
|
{
"content_hash": "27a7727dc2b8e929bbad601be0542333",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 102,
"avg_line_length": 33.104166666666664,
"alnum_prop": 0.6482064191315292,
"repo_name": "morgenst/PyAnalysisTools",
"id": "5076b8dbd0ac8e3004d988ee7d7763dc2fc37bcb",
"size": "1589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/TestYAMLHandle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2887"
},
{
"name": "Dockerfile",
"bytes": "319"
},
{
"name": "Python",
"bytes": "1156688"
},
{
"name": "Shell",
"bytes": "2314"
}
],
"symlink_target": ""
}
|
from PyObjCTools.TestSupport import *
from Foundation import *
class TestNSNotificationQueue (TestCase):
def testConstants(self):
self.assertEqual(NSPostWhenIdle, 1)
self.assertEqual(NSPostASAP, 2)
self.assertEqual(NSPostNow, 3)
self.assertEqual(NSNotificationNoCoalescing, 0)
self.assertEqual(NSNotificationCoalescingOnName, 1)
self.assertEqual(NSNotificationCoalescingOnSender, 2)
if __name__ == "__main__":
main()
|
{
"content_hash": "c9690290304baa2fb60e82424f392639",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 61,
"avg_line_length": 26.61111111111111,
"alnum_prop": 0.7077244258872651,
"repo_name": "albertz/music-player",
"id": "c431c6d148047c3e3760c1f8592fa0f0829769f0",
"size": "479",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsnotificationqueue.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "47481"
},
{
"name": "C",
"bytes": "435926"
},
{
"name": "C++",
"bytes": "149133"
},
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "HTML",
"bytes": "914432"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "M",
"bytes": "10808"
},
{
"name": "Makefile",
"bytes": "13304"
},
{
"name": "Mathematica",
"bytes": "61418"
},
{
"name": "Objective-C",
"bytes": "2082720"
},
{
"name": "Objective-C++",
"bytes": "62427"
},
{
"name": "PostScript",
"bytes": "2783"
},
{
"name": "Prolog",
"bytes": "217"
},
{
"name": "Python",
"bytes": "7789845"
},
{
"name": "QMake",
"bytes": "9667"
},
{
"name": "Roff",
"bytes": "8329"
},
{
"name": "Shell",
"bytes": "3521"
}
],
"symlink_target": ""
}
|
import math
from ds.vortex.core import baseNode
from ds.vortex.core import plug as plugs
class CosNode(baseNode.BaseNode):
def __init__(self, name):
"""
:param name: str, the name of the node
"""
baseNode.BaseNode.__init__(self, name)
def initialize(self):
baseNode.BaseNode.initialize(self)
self.outputPlug_ = plugs.OutputPlug("output", self)
self.valuePlug_ = plugs.InputPlug("value", self, value=0)
self.addPlug(self.outputPlug_, clean=True)
self.addPlug(self.valuePlug_, clean=True)
self.plugAffects(self.valuePlug_, self.outputPlug_)
def compute(self, requestPlug):
baseNode.BaseNode.compute(self, requestPlug=requestPlug)
if requestPlug != self.outputPlug_:
return None
result = math.cos(self.valuePlug_.value)
requestPlug.value = result
requestPlug.dirty = False
return result
def getNode():
"""General function that returns our node, used to get create our node via Ui etc
:return: Node instance
"""
return CosNode
|
{
"content_hash": "5c81db7fe8a2f4d94639784b3d58c4d6",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 85,
"avg_line_length": 28.94736842105263,
"alnum_prop": 0.6454545454545455,
"repo_name": "dsparrow27/vortex",
"id": "0c9693ab3caee67dde9a7ecd520274f94434d7ce",
"size": "1100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ds/vortex/nodes/math/trigonometry/cos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "139103"
}
],
"symlink_target": ""
}
|
from netmiko.ssh_connection import SSHConnection
import paramiko
import time
import socket
import re
import io
from netmiko.netmiko_globals import MAX_BUFFER
from netmiko.ssh_exception import NetMikoTimeoutException, NetMikoAuthenticationException
class FortinetSSH(SSHConnection):
def session_preparation(self):
'''
Prepare the session after the connection has been established
Disable paging
Change base prompt
'''
self.disable_paging()
self.set_base_prompt(pri_prompt_terminator='$')
def disable_paging(self, delay_factor=.5):
'''
Disable paging is only available with specific roles so it may fail
'''
check_command = "get system status\n"
output = self.send_command(check_command)
self.allow_disable_global = True
self.vdoms = False
# According with http://www.gossamer-threads.com/lists/rancid/users/6729
if output.find("Virtual domain configuration: enable"):
self.vdoms = True
vdom_additional_command = "config global\n"
output = self.send_command(vdom_additional_command)
if output.find("Command fail"):
self.allow_disable_global = False
self.remote_conn.close()
self.establish_connection(width=100, height=1000)
if self.allow_disable_global:
disable_paging_commands = [ "config system console\n", "set output standard\n", "end\n" ]
outputlist = [ self.send_command(command) for command in disable_paging_commands ]
# Some code should be inserted for testing the output of the commands
def cleanup(self):
'''
Re-enable paging globally
'''
if self.allow_disable_global:
enable_paging_commands = ["config system console\n", "set output more\n", "end\n" ]
if self.vdoms:
enable_paging_commands.insert(0,"config global\n")
outputlist = [ self.send_command(command) for command in enable_paging_commands ]
# Some code should be inserted for testing the output of the commands
def establish_connection(self, sleep_time=3, verbose=True, timeout=8, use_keys=False, width=None, height=None):
'''
Establish SSH connection to the network device
Timeout will generate a NetMikoTimeoutException
Authentication failure will generate a NetMikoAuthenticationException
use_keys is a boolean that allows ssh-keys to be used for authentication
'''
# Create instance of SSHClient object
self.remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure appropriate for your environment)
self.remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection
try:
self.remote_conn_pre.connect(hostname=self.ip, port=self.port,
username=self.username, password=self.password,
look_for_keys=use_keys, allow_agent=False,
timeout=timeout)
except socket.error:
msg = "Connection to device timed-out: {device_type} {ip}:{port}".format(
device_type=self.device_type, ip=self.ip, port=self.port)
raise NetMikoTimeoutException(msg)
except paramiko.ssh_exception.AuthenticationException as auth_err:
msg = "Authentication failure: unable to connect {device_type} {ip}:{port}".format(
device_type=self.device_type, ip=self.ip, port=self.port)
msg += '\n' + str(auth_err)
raise NetMikoAuthenticationException(msg)
if verbose:
print("SSH connection established to {0}:{1}".format(self.ip, self.port))
# Since Fortinet paging setting is global we need a way to disable paging
# Use invoke_shell to establish an 'interactive session'
if width and height:
self.remote_conn = self.remote_conn_pre.invoke_shell(term='vt100',width=width,height=height)
else:
self.remote_conn = self.remote_conn_pre.invoke_shell()
if verbose:
print("Interactive SSH session established")
# Strip the initial router prompt
time.sleep(sleep_time)
return self.remote_conn.recv(MAX_BUFFER).decode('utf-8')
def config_mode(self, config_command=''):
'''
No config mode for Fortinet devices
'''
return u''
def exit_config_mode(self, exit_config=''):
'''
No config mode for Fortinet devices
'''
return u''
|
{
"content_hash": "2101d19684af6237956ab9fbfd1c31e2",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 115,
"avg_line_length": 39.52892561983471,
"alnum_prop": 0.6203219736567008,
"repo_name": "brutus333/netmiko",
"id": "af6b1d88a51df999c6383edc65bf6e3078ca3644",
"size": "4783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netmiko/fortinet/fortinet_ssh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "115174"
},
{
"name": "Shell",
"bytes": "1201"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('photo', '0006_auto_20160324_2225'),
]
operations = [
migrations.AlterField(
model_name='frame',
name='index',
field=models.SmallIntegerField(),
),
]
|
{
"content_hash": "3f49bfac8cca12f48cb8567d7290d8ec",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 45,
"avg_line_length": 20.5,
"alnum_prop": 0.5853658536585366,
"repo_name": "rjhelms/photo",
"id": "34bdbd01c5abe931afb95e26ebe49179234e9250",
"size": "460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/photo/migrations/0007_auto_20160324_2303.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59401"
},
{
"name": "Shell",
"bytes": "649"
}
],
"symlink_target": ""
}
|
import numpy as np
import re
import cnf
def parse(dimacs_data: str):
lines = dimacs_data.split('\n')
line_offset = 0
for line in lines:
if line[0] == 'c':
line_offset += 1
continue
if line[0] == 'p':
preamble = line.strip(' ')
preamble = re.sub(' +', ' ', preamble)
preamble = preamble.split(' ')
variable_count = int(preamble[2])
clause_count = int(preamble[3])
lines_with_clauses = lines[line_offset + 1:]
logical_matrix = __parse_clauses(lines_with_clauses, clause_count)
return cnf.CnfFormula(logical_matrix, variable_count, clause_count)
def __parse_clauses(lines_with_clauses: list, clause_count: int):
# We are representing a 3CNF formula as a matrix where each vector is a clause
logical_formula_matrix = np.zeros((clause_count, 3), np.int16)
joined_lines = ''.join(lines_with_clauses)
joined_lines = joined_lines.replace('0%0', '')
clauses = re.split('[\t\n ]0|[\t\n ]0', joined_lines)
for clause_index, clause in enumerate(clauses):
clause = clause.strip(' ')
variables_in_clause = re.split('[\t\n ]', clause)
for term_index, variable in enumerate(variables_in_clause):
numeric_representation = int(variable)
logical_formula_matrix[clause_index, term_index] = numeric_representation
return logical_formula_matrix
|
{
"content_hash": "fae8ed3a37fdc4610175dd16308b38b6",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 85,
"avg_line_length": 32.44444444444444,
"alnum_prop": 0.6061643835616438,
"repo_name": "camtotheron/genetic-3SAT",
"id": "abbe17e80d86565da299b072a98c263c6a3ea9f4",
"size": "1460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dimacs_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13090"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('produto', '0012_produtoitem'),
]
operations = [
migrations.AlterField(
model_name='produto',
name='composicao',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='produto.Composicao', verbose_name='Composição'),
),
migrations.AlterField(
model_name='produto',
name='imagem_tag',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='base.ImagemTag', verbose_name='Imagem no TAG'),
),
]
|
{
"content_hash": "657db040d183182c6c67accff3414c3d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 157,
"avg_line_length": 32.5,
"alnum_prop": 0.6358974358974359,
"repo_name": "anselmobd/fo2",
"id": "29b892f20080397913a0f112f75a15c3f09826e3",
"size": "855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/produto/migrations/0013_produto__campos_null.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
}
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Indicators")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Indicators import *
from datetime import datetime
### <summary>
### Simple indicator demonstration algorithm of MACD
### </summary>
### <meta name="tag" content="indicators" />
### <meta name="tag" content="indicator classes" />
### <meta name="tag" content="plotting indicators" />
class MACDTrendAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2004, 01, 01) #Set Start Date
self.SetEndDate(2015, 01, 01) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddEquity("SPY", Resolution.Daily)
# define our daily macd(12,26) with a 9 day signal
self.__macd = self.MACD("SPY", 12, 26, 9, MovingAverageType.Exponential, Resolution.Daily)
self.__previous = datetime.min
self.PlotIndicator("MACD", True, self.__macd, self.__macd.Signal)
self.PlotIndicator("SPY", self.__macd.Fast, self.__macd.Slow)
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.'''
# wait for our macd to fully initialize
if not self.__macd.IsReady: return
# only once per day
if self.__previous.date() == self.Time.date(): return
# define a small tolerance on our checks to avoid bouncing
tolerance = 0.0025;
holdings = self.Portfolio["SPY"].Quantity
signalDeltaPercent = (self.__macd.Current.Value - self.__macd.Signal.Current.Value)/self.__macd.Fast.Current.Value
# if our macd is greater than our signal, then let's go long
if holdings <= 0 and signalDeltaPercent > tolerance: # 0.01%
# longterm says buy as well
self.SetHoldings("SPY", 1.0)
# of our macd is less than our signal, then let's go short
elif holdings >= 0 and signalDeltaPercent < -tolerance:
self.Liquidate("SPY")
self.__previous = self.Time
|
{
"content_hash": "4f57b43627f01ed79f083f46e1984de6",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 151,
"avg_line_length": 41.50666666666667,
"alnum_prop": 0.6925795053003534,
"repo_name": "andrewhart098/Lean",
"id": "9315285fb54eef879a683623f0520c06a6c6cbd2",
"size": "3115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Algorithm.Python/MACDTrendAlgorithm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2540"
},
{
"name": "C#",
"bytes": "14079839"
},
{
"name": "F#",
"bytes": "1723"
},
{
"name": "Java",
"bytes": "852"
},
{
"name": "Jupyter Notebook",
"bytes": "13963"
},
{
"name": "Python",
"bytes": "274297"
},
{
"name": "Shell",
"bytes": "2307"
},
{
"name": "Visual Basic",
"bytes": "2448"
}
],
"symlink_target": ""
}
|
import os
import re
import argparse
from typing import Optional
from . import blackarrow as ba
def main():
args = get_args()
processes, final_queue = ba.start_search(args)
print_process = processes[-1]
try:
print_process.join() # Wait main thread until printer is done
except (KeyboardInterrupt, EOFError): # kill all on ctrl+c/d
[p.terminate() for p in processes]
def get_args(manual_args: Optional[str] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser()
regex_group = parser.add_mutually_exclusive_group(required=True)
regex_group.add_argument(
"regex_positional",
metavar="R",
type=str,
default=None,
nargs="?",
help="Search term (regular expression)",
)
parser.add_argument(
"-d",
"--directories",
type=str,
default=["."],
nargs="+",
help="Director(y|ies) to run against",
)
parser.add_argument(
"-i",
"--ignore",
type=str,
default=[],
nargs="+",
help="Things to ignore (regular expressions)",
)
parser.add_argument(
"-f",
"--filename",
type=str,
default=[],
nargs="+",
help="Filename search term(s)",
)
parser.add_argument(
"-w",
"--workers",
type=int,
default=None,
help=("Number of workers to use (default numcores, with fallback 6 unless set)"),
)
parser.add_argument(
"-p",
"--pipe",
action="store_true",
default=False,
help=('Run in "pipe" mode with brief output'),
)
parser.add_argument(
"-e", "--edit", action="store_true", default=False, help=("Edit the files?")
)
parser.add_argument(
"-l",
"--lower",
action="store_true",
default=False,
help=("Check strict lower case?"),
)
parser.add_argument(
"-r",
"--replace",
type=str,
default=None,
help="Replace text found in place with supplied",
)
parser.add_argument(
"-D",
"--depth",
type=int,
default=None,
required=False,
help="Directory depth to search in",
)
if manual_args is not None:
args = parser.parse_args(args=manual_args)
else:
args = parser.parse_args()
args.regex = args.regex_positional
return args
|
{
"content_hash": "fc69ed2e07d8374c3329869badb4d65a",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 89,
"avg_line_length": 24.848484848484848,
"alnum_prop": 0.541869918699187,
"repo_name": "willzfarmer/black-arrow",
"id": "d0798ee4c0b06ed0165023af74de8e38906188d0",
"size": "2460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blackarrow/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16188"
},
{
"name": "Shell",
"bytes": "57"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BookPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='', max_length=100)),
('image', models.ImageField(upload_to='images/')),
('content', models.CharField(default='', max_length=1024)),
],
),
]
|
{
"content_hash": "604675242466d0ec4102ac1c32e82a26",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 114,
"avg_line_length": 27.652173913043477,
"alnum_prop": 0.5644654088050315,
"repo_name": "Pepedou/Famas",
"id": "db77443ee9e825a184c6466d9eb46d12cbf5a7f4",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "books/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15818"
}
],
"symlink_target": ""
}
|
from direct.directnotify import DirectNotifyGlobal
import AttribDesc
from direct.showbase.PythonUtil import mostDerivedLast
class EntityTypeDesc:
notify = DirectNotifyGlobal.directNotify.newCategory('EntityTypeDesc')
output = None
def __init__(self):
self.__class__.privCompileAttribDescs(self.__class__)
self.attribNames = []
self.attribDescDict = {}
attribDescs = self.__class__._attribDescs
for desc in attribDescs:
attribName = desc.getName()
self.attribNames.append(attribName)
self.attribDescDict[attribName] = desc
def isConcrete(self):
return not self.__class__.__dict__.has_key('abstract')
def isPermanent(self):
return self.__class__.__dict__.has_key('permanent')
def getOutputType(self):
return self.output
def getAttribNames(self):
return self.attribNames
def getAttribDescDict(self):
return self.attribDescDict
def getAttribsOfType(self, type):
names = []
for attribName, desc in self.attribDescDict.items():
if desc.getDatatype() == type:
names.append(attribName)
return names
@staticmethod
def privCompileAttribDescs(entTypeClass):
if entTypeClass.__dict__.has_key('_attribDescs'):
return
c = entTypeClass
EntityTypeDesc.notify.debug('compiling attrib descriptors for %s' % c.__name__)
for base in c.__bases__:
EntityTypeDesc.privCompileAttribDescs(base)
blockAttribs = c.__dict__.get('blockAttribs', [])
baseADs = []
bases = list(c.__bases__)
mostDerivedLast(bases)
for base in bases:
for desc in base._attribDescs:
if desc.getName() in blockAttribs:
continue
for d in baseADs:
if desc.getName() == d.getName():
EntityTypeDesc.notify.warning('%s inherits attrib %s from multiple bases' % (c.__name__, desc.getName()))
break
else:
baseADs.append(desc)
attribDescs = []
if c.__dict__.has_key('attribs'):
for attrib in c.attribs:
desc = AttribDesc.AttribDesc(*attrib)
if desc.getName() == 'type' and entTypeClass.__name__ != 'Entity':
EntityTypeDesc.notify.error("(%s): '%s' is a reserved attribute name" % (entTypeClass.__name__, desc.getName()))
for ad in baseADs:
if ad.getName() == desc.getName():
baseADs.remove(ad)
break
attribDescs.append(desc)
c._attribDescs = baseADs + attribDescs
def __str__(self):
return str(self.__class__)
def __repr__(self):
return str(self.__class__.__dict__.get('type', None)) + str(self.output) + str(self.attribDescDict)
|
{
"content_hash": "b70c0658fb101f0f249e1485bfd71e61",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 132,
"avg_line_length": 34.98823529411764,
"alnum_prop": 0.5689307330195024,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "7642dc0e839f5f6adf0bec033af8194b56d04cb5",
"size": "2974",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "otp/level/EntityTypeDesc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
}
|
import mock
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common import template_format
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
import testtools
from heat_docker.resources import docker_container
from heat_docker.tests import fake_docker_client as docker
docker_container.docker = docker
template = '''
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Test template",
"Parameters": {},
"Resources": {
"Blog": {
"Type": "DockerInc::Docker::Container",
"Properties": {
"image": "samalba/wordpress",
"env": [
"FOO=bar"
]
}
}
}
}
'''
class DockerContainerTest(common.HeatTestCase):
def setUp(self):
super(DockerContainerTest, self).setUp()
for res_name, res_class in docker_container.resource_mapping().items():
resource._register_class(res_name, res_class)
self.addCleanup(self.m.VerifyAll)
def create_container(self, resource_name):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
resource = docker_container.DockerContainer(
resource_name,
self.stack.t.resource_definitions(self.stack)[resource_name],
self.stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(
docker.Client())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
return resource
def get_container_state(self, resource):
client = resource.get_client()
return client.inspect_container(resource.resource_id)['State']
def test_resource_create(self):
container = self.create_container('Blog')
self.assertTrue(container.resource_id)
running = self.get_container_state(container)['Running']
self.assertIs(True, running)
client = container.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertIsNone(client.container_create[0]['name'])
def test_create_with_name(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
props = t['Resources']['Blog']['Properties'].copy()
props['name'] = 'super-blog'
resource = docker_container.DockerContainer(
'Blog', definition.freeze(properties=props), self.stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(
docker.Client())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual('super-blog', client.container_create[0]['name'])
@mock.patch.object(docker_container.DockerContainer, 'get_client')
def test_create_failed(self, test_client):
mock_client = mock.Mock()
mock_client.inspect_container.return_value = {
"State": {
"ExitCode": -1
}
}
mock_client.logs.return_value = "Container startup failed"
test_client.return_value = mock_client
mock_stack = mock.Mock()
mock_stack.has_cache_data.return_value = False
mock_stack.db_resource_get.return_value = None
res_def = mock.Mock(spec=rsrc_defn.ResourceDefinition)
docker_res = docker_container.DockerContainer("test", res_def,
mock_stack)
exc = self.assertRaises(exception.ResourceInError,
docker_res.check_create_complete,
'foo')
self.assertIn("Container startup failed", six.text_type(exc))
def test_start_with_bindings_and_links(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
props = t['Resources']['Blog']['Properties'].copy()
props['port_bindings'] = {'80/tcp': [{'HostPort': '80'}]}
props['links'] = {'db': 'mysql'}
resource = docker_container.DockerContainer(
'Blog', definition.freeze(properties=props), self.stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(
docker.Client())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual({'db': 'mysql'}, client.container_start[0]['links'])
self.assertEqual(
{'80/tcp': [{'HostPort': '80'}]},
client.container_start[0]['port_bindings'])
def test_resource_attributes(self):
container = self.create_container('Blog')
# Test network info attributes
self.assertEqual('172.17.42.1', container.FnGetAtt('network_gateway'))
self.assertEqual('172.17.0.3', container.FnGetAtt('network_ip'))
self.assertEqual('1080', container.FnGetAtt('network_tcp_ports'))
self.assertEqual('', container.FnGetAtt('network_udp_ports'))
# Test logs attributes
self.assertEqual('---logs_begin---', container.FnGetAtt('logs_head'))
self.assertEqual('---logs_end---', container.FnGetAtt('logs_tail'))
# Test a non existing attribute
self.assertRaises(exception.InvalidTemplateAttribute,
container.FnGetAtt, 'invalid_attribute')
@testtools.skipIf(docker is None, 'docker-py not available')
def test_resource_delete(self):
container = self.create_container('Blog')
scheduler.TaskRunner(container.delete)()
self.assertEqual((container.DELETE, container.COMPLETE),
container.state)
exists = True
try:
self.get_container_state(container)['Running']
except docker.errors.APIError as error:
if error.response.status_code == 404:
exists = False
else:
raise
self.assertIs(False, exists)
self.m.VerifyAll()
@testtools.skipIf(docker is None, 'docker-py not available')
def test_resource_delete_exception(self):
response = mock.MagicMock()
response.status_code = 404
response.content = 'some content'
container = self.create_container('Blog')
self.m.StubOutWithMock(container.get_client(), 'kill')
container.get_client().kill(container.resource_id).AndRaise(
docker.errors.APIError('Not found', response))
self.m.StubOutWithMock(container, '_get_container_status')
container._get_container_status(container.resource_id).AndRaise(
docker.errors.APIError('Not found', response))
self.m.ReplayAll()
scheduler.TaskRunner(container.delete)()
self.m.VerifyAll()
def test_resource_suspend_resume(self):
container = self.create_container('Blog')
# Test suspend
scheduler.TaskRunner(container.suspend)()
self.assertEqual((container.SUSPEND, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(False, running)
# Test resume
scheduler.TaskRunner(container.resume)()
self.assertEqual((container.RESUME, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(True, running)
def test_start_with_restart_policy_no(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
props = t['Resources']['Blog']['Properties'].copy()
props['restart_policy'] = {'Name': 'no', 'MaximumRetryCount': 0}
resource = docker_container.DockerContainer(
'Blog', definition.freeze(properties=props), self.stack)
get_client_mock = self.patchobject(resource, 'get_client')
get_client_mock.return_value = docker.Client()
self.assertIsNone(resource.validate())
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual({'Name': 'no', 'MaximumRetryCount': 0},
client.container_start[0]['restart_policy'])
def test_start_with_restart_policy_on_failure(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
props = t['Resources']['Blog']['Properties'].copy()
props['restart_policy'] = {'Name': 'on-failure',
'MaximumRetryCount': 10}
resource = docker_container.DockerContainer(
'Blog', definition.freeze(properties=props), self.stack)
get_client_mock = self.patchobject(resource, 'get_client')
get_client_mock.return_value = docker.Client()
self.assertIsNone(resource.validate())
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual({'Name': 'on-failure', 'MaximumRetryCount': 10},
client.container_start[0]['restart_policy'])
def test_start_with_restart_policy_always(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
props = t['Resources']['Blog']['Properties'].copy()
props['restart_policy'] = {'Name': 'always', 'MaximumRetryCount': 0}
resource = docker_container.DockerContainer(
'Blog', definition.freeze(properties=props), self.stack)
get_client_mock = self.patchobject(resource, 'get_client')
get_client_mock.return_value = docker.Client()
self.assertIsNone(resource.validate())
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual({'Name': 'always', 'MaximumRetryCount': 0},
client.container_start[0]['restart_policy'])
def test_start_with_caps(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
props = t['Resources']['Blog']['Properties'].copy()
props['cap_add'] = ['NET_ADMIN']
props['cap_drop'] = ['MKNOD']
resource = docker_container.DockerContainer(
'Blog', definition.freeze(properties=props), self.stack)
get_client_mock = self.patchobject(resource, 'get_client')
get_client_mock.return_value = docker.Client()
self.assertIsNone(resource.validate())
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual(['NET_ADMIN'], client.container_start[0]['cap_add'])
self.assertEqual(['MKNOD'], client.container_start[0]['cap_drop'])
def test_start_with_read_only(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
props = t['Resources']['Blog']['Properties'].copy()
props['read_only'] = True
resource = docker_container.DockerContainer(
'Blog', definition.freeze(properties=props), self.stack)
get_client_mock = self.patchobject(resource, 'get_client')
get_client_mock.return_value = docker.Client()
get_client_mock.return_value.set_api_version('1.17')
self.assertIsNone(resource.validate())
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertIs(True, client.container_start[0]['read_only'])
def arg_for_low_api_version(self, arg, value, low_version):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
props = t['Resources']['Blog']['Properties'].copy()
props[arg] = value
my_resource = docker_container.DockerContainer(
'Blog', definition.freeze(properties=props), self.stack)
get_client_mock = self.patchobject(my_resource, 'get_client')
get_client_mock.return_value = docker.Client()
get_client_mock.return_value.set_api_version(low_version)
msg = self.assertRaises(docker_container.InvalidArgForVersion,
my_resource.validate)
min_version = docker_container.MIN_API_VERSION_MAP[arg]
args = dict(arg=arg, min_version=min_version)
expected = _('"%(arg)s" is not supported for API version '
'< "%(min_version)s"') % args
self.assertEqual(expected, six.text_type(msg))
def test_start_with_read_only_for_low_api_version(self):
self.arg_for_low_api_version('read_only', True, '1.16')
def test_compare_version(self):
self.assertEqual(docker_container.compare_version('1.17', '1.17'), 0)
self.assertEqual(docker_container.compare_version('1.17', '1.16'), -1)
self.assertEqual(docker_container.compare_version('1.17', '1.18'), 1)
def test_create_with_cpu_shares(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
props = t['Resources']['Blog']['Properties'].copy()
props['cpu_shares'] = 512
my_resource = docker_container.DockerContainer(
'Blog', definition.freeze(properties=props), self.stack)
get_client_mock = self.patchobject(my_resource, 'get_client')
get_client_mock.return_value = docker.Client()
self.assertIsNone(my_resource.validate())
scheduler.TaskRunner(my_resource.create)()
self.assertEqual((my_resource.CREATE, my_resource.COMPLETE),
my_resource.state)
client = my_resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual(512, client.container_create[0]['cpu_shares'])
def test_create_with_cpu_shares_for_low_api_version(self):
self.arg_for_low_api_version('cpu_shares', 512, '1.7')
def test_start_with_mapping_devices(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
props = t['Resources']['Blog']['Properties'].copy()
props['devices'] = (
[{'path_on_host': '/dev/sda',
'path_in_container': '/dev/xvdc',
'permissions': 'r'},
{'path_on_host': '/dev/mapper/a_bc-d',
'path_in_container': '/dev/xvdd',
'permissions': 'rw'}])
my_resource = docker_container.DockerContainer(
'Blog', definition.freeze(properties=props), self.stack)
get_client_mock = self.patchobject(my_resource, 'get_client')
get_client_mock.return_value = docker.Client()
self.assertIsNone(my_resource.validate())
scheduler.TaskRunner(my_resource.create)()
self.assertEqual((my_resource.CREATE, my_resource.COMPLETE),
my_resource.state)
client = my_resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual(['/dev/sda:/dev/xvdc:r',
'/dev/mapper/a_bc-d:/dev/xvdd:rw'],
client.container_start[0]['devices'])
def test_start_with_mapping_devices_also_with_privileged(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
props = t['Resources']['Blog']['Properties'].copy()
props['devices'] = (
[{'path_on_host': '/dev/sdb',
'path_in_container': '/dev/xvdc',
'permissions': 'r'}])
props['privileged'] = True
my_resource = docker_container.DockerContainer(
'Blog', definition.freeze(properties=props), self.stack)
get_client_mock = self.patchobject(my_resource, 'get_client')
get_client_mock.return_value = docker.Client()
self.assertIsNone(my_resource.validate())
scheduler.TaskRunner(my_resource.create)()
self.assertEqual((my_resource.CREATE, my_resource.COMPLETE),
my_resource.state)
client = my_resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertNotIn('devices', client.container_start[0])
def test_start_with_mapping_devices_for_low_api_version(self):
value = ([{'path_on_host': '/dev/sda',
'path_in_container': '/dev/xvdc',
'permissions': 'rwm'}])
self.arg_for_low_api_version('devices', value, '1.13')
def test_start_with_mapping_devices_not_set_path_in_container(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
props = t['Resources']['Blog']['Properties'].copy()
props['devices'] = [{'path_on_host': '/dev/sda',
'permissions': 'rwm'}]
my_resource = docker_container.DockerContainer(
'Blog', definition.freeze(properties=props), self.stack)
get_client_mock = self.patchobject(my_resource, 'get_client')
get_client_mock.return_value = docker.Client()
self.assertIsNone(my_resource.validate())
scheduler.TaskRunner(my_resource.create)()
self.assertEqual((my_resource.CREATE, my_resource.COMPLETE),
my_resource.state)
client = my_resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual(['/dev/sda:/dev/sda:rwm'],
client.container_start[0]['devices'])
def test_create_with_cpu_set(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
props = t['Resources']['Blog']['Properties'].copy()
props['cpu_set'] = '0-8,16-24,28'
my_resource = docker_container.DockerContainer(
'Blog', definition.freeze(properties=props), self.stack)
get_client_mock = self.patchobject(my_resource, 'get_client')
get_client_mock.return_value = docker.Client()
self.assertIsNone(my_resource.validate())
scheduler.TaskRunner(my_resource.create)()
self.assertEqual((my_resource.CREATE, my_resource.COMPLETE),
my_resource.state)
client = my_resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual('0-8,16-24,28',
client.container_create[0]['cpuset'])
def test_create_with_cpu_set_for_low_api_version(self):
self.arg_for_low_api_version('cpu_set', '0-8,^2', '1.11')
|
{
"content_hash": "c797ca4f09ce416345fb5fd27a6f4347",
"timestamp": "",
"source": "github",
"line_count": 451,
"max_line_length": 79,
"avg_line_length": 46.57427937915743,
"alnum_prop": 0.618852654129969,
"repo_name": "steveb/heat",
"id": "136549a380d095ef2f21adc2e830650533ee2d9a",
"size": "21639",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/heat_docker/heat_docker/tests/test_docker_container.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1226938"
},
{
"name": "Shell",
"bytes": "17870"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0059_weeklytimeslot'),
('app', '0151_set_tenth_level_price'),
]
operations = []
|
{
"content_hash": "6a03e66e1cc61242d1c51fb115326c14",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 46,
"avg_line_length": 23,
"alnum_prop": 0.6231884057971014,
"repo_name": "malaonline/Server",
"id": "1c47ffa73bcddaed5833af22acf093de2932f137",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/app/migrations/0152_merge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "236251"
},
{
"name": "HTML",
"bytes": "532032"
},
{
"name": "JavaScript",
"bytes": "580515"
},
{
"name": "Python",
"bytes": "987542"
},
{
"name": "Shell",
"bytes": "1881"
}
],
"symlink_target": ""
}
|
"""KDDCUP 99 dataset.
A classic dataset for anomaly detection.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
"""
import errno
from gzip import GzipFile
import logging
import os
from os.path import dirname, exists, join
import numpy as np
import joblib
from ._base import _fetch_remote
from ._base import _convert_data_dataframe
from . import get_data_home
from ._base import RemoteFileMetadata
from ..utils import Bunch
from ..utils import check_random_state
from ..utils import shuffle as shuffle_method
from ..utils.validation import _deprecate_positional_args
# The original data can be found at:
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
ARCHIVE = RemoteFileMetadata(
filename='kddcup99_data',
url='https://ndownloader.figshare.com/files/5976045',
checksum=('3b6c942aa0356c0ca35b7b595a26c89d'
'343652c9db428893e7494f837b274292'))
# The original data can be found at:
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz
ARCHIVE_10_PERCENT = RemoteFileMetadata(
filename='kddcup99_10_data',
url='https://ndownloader.figshare.com/files/5976042',
checksum=('8045aca0d84e70e622d1148d7df78249'
'6f6333bf6eb979a1b0837c42a9fd9561'))
logger = logging.getLogger(__name__)
@_deprecate_positional_args
def fetch_kddcup99(*, subset=None, data_home=None, shuffle=False,
random_state=None,
percent10=True, download_if_missing=True, return_X_y=False,
as_frame=False):
"""Load the kddcup99 dataset (classification).
Download it if necessary.
================= ====================================
Classes 23
Samples total 4898431
Dimensionality 41
Features discrete (int) or continuous (float)
================= ====================================
Read more in the :ref:`User Guide <kddcup99_dataset>`.
.. versionadded:: 0.18
Parameters
----------
subset : {'SA', 'SF', 'http', 'smtp'}, default=None
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
.. versionadded:: 0.19
shuffle : bool, default=False
Whether to shuffle dataset.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and for
selection of abnormal samples if `subset='SA'`. Pass an int for
reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` object.
.. versionadded:: 0.20
as_frame : bool, default=False
If `True`, returns a pandas Dataframe for the ``data`` and ``target``
objects in the `Bunch` returned object; `Bunch` return object will also
have a ``frame`` member.
.. versionadded:: 0.24
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (494021, 41)
The data matrix to learn. If `as_frame=True`, `data` will be a
pandas DataFrame.
target : {ndarray, series} of shape (494021,)
The regression target for each sample. If `as_frame=True`, `target`
will be a pandas Series.
frame : dataframe of shape (494021, 42)
Only present when `as_frame=True`. Contains `data` and `target`.
DESCR : str
The full description of the dataset.
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
kddcup99 = _fetch_brute_kddcup99(
data_home=data_home,
percent10=percent10,
download_if_missing=download_if_missing
)
data = kddcup99.data
target = kddcup99.target
feature_names = kddcup99.feature_names
target_names = kddcup99.target_names
if subset == 'SA':
s = target == b'normal.'
t = np.logical_not(s)
normal_samples = data[s, :]
normal_targets = target[s]
abnormal_samples = data[t, :]
abnormal_targets = target[t]
n_samples_abnormal = abnormal_samples.shape[0]
# selected abnormal samples:
random_state = check_random_state(random_state)
r = random_state.randint(0, n_samples_abnormal, 3377)
abnormal_samples = abnormal_samples[r]
abnormal_targets = abnormal_targets[r]
data = np.r_[normal_samples, abnormal_samples]
target = np.r_[normal_targets, abnormal_targets]
if subset == 'SF' or subset == 'http' or subset == 'smtp':
# select all samples with positive logged_in attribute:
s = data[:, 11] == 1
data = np.c_[data[s, :11], data[s, 12:]]
feature_names = feature_names[:11] + feature_names[12:]
target = target[s]
data[:, 0] = np.log((data[:, 0] + 0.1).astype(float, copy=False))
data[:, 4] = np.log((data[:, 4] + 0.1).astype(float, copy=False))
data[:, 5] = np.log((data[:, 5] + 0.1).astype(float, copy=False))
if subset == 'http':
s = data[:, 2] == b'http'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[4],
feature_names[5]]
if subset == 'smtp':
s = data[:, 2] == b'smtp'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[4],
feature_names[5]]
if subset == 'SF':
data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[2],
feature_names[4], feature_names[5]]
if shuffle:
data, target = shuffle_method(data, target, random_state=random_state)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'kddcup99.rst')) as rst_file:
fdescr = rst_file.read()
if return_X_y:
return data, target
frame = None
if as_frame:
frame, data, target = _convert_data_dataframe(
"fetch_kddcup99", data, target, feature_names, target_names
)
return Bunch(
data=data,
target=target,
frame=frame,
target_names=target_names,
feature_names=feature_names,
DESCR=fdescr,
)
def _fetch_brute_kddcup99(data_home=None,
download_if_missing=True, percent10=True):
"""Load the kddcup99 dataset, downloading it if necessary.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (494021, 41)
Each row corresponds to the 41 features in the dataset.
target : ndarray of shape (494021,)
Each value corresponds to one of the 21 attack types or to the
label 'normal.'.
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
DESCR : str
Description of the kddcup99 dataset.
"""
data_home = get_data_home(data_home=data_home)
dir_suffix = "-py3"
if percent10:
kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix)
archive = ARCHIVE_10_PERCENT
else:
kddcup_dir = join(data_home, "kddcup99" + dir_suffix)
archive = ARCHIVE
samples_path = join(kddcup_dir, "samples")
targets_path = join(kddcup_dir, "targets")
available = exists(samples_path)
dt = [('duration', int),
('protocol_type', 'S4'),
('service', 'S11'),
('flag', 'S6'),
('src_bytes', int),
('dst_bytes', int),
('land', int),
('wrong_fragment', int),
('urgent', int),
('hot', int),
('num_failed_logins', int),
('logged_in', int),
('num_compromised', int),
('root_shell', int),
('su_attempted', int),
('num_root', int),
('num_file_creations', int),
('num_shells', int),
('num_access_files', int),
('num_outbound_cmds', int),
('is_host_login', int),
('is_guest_login', int),
('count', int),
('srv_count', int),
('serror_rate', float),
('srv_serror_rate', float),
('rerror_rate', float),
('srv_rerror_rate', float),
('same_srv_rate', float),
('diff_srv_rate', float),
('srv_diff_host_rate', float),
('dst_host_count', int),
('dst_host_srv_count', int),
('dst_host_same_srv_rate', float),
('dst_host_diff_srv_rate', float),
('dst_host_same_src_port_rate', float),
('dst_host_srv_diff_host_rate', float),
('dst_host_serror_rate', float),
('dst_host_srv_serror_rate', float),
('dst_host_rerror_rate', float),
('dst_host_srv_rerror_rate', float),
('labels', 'S16')]
column_names = [c[0] for c in dt]
target_names = column_names[-1]
feature_names = column_names[:-1]
if download_if_missing and not available:
_mkdirp(kddcup_dir)
logger.info("Downloading %s" % archive.url)
_fetch_remote(archive, dirname=kddcup_dir)
DT = np.dtype(dt)
logger.debug("extracting archive")
archive_path = join(kddcup_dir, archive.filename)
file_ = GzipFile(filename=archive_path, mode='r')
Xy = []
for line in file_.readlines():
line = line.decode()
Xy.append(line.replace('\n', '').split(','))
file_.close()
logger.debug('extraction done')
os.remove(archive_path)
Xy = np.asarray(Xy, dtype=object)
for j in range(42):
Xy[:, j] = Xy[:, j].astype(DT[j])
X = Xy[:, :-1]
y = Xy[:, -1]
# XXX bug when compress!=0:
# (error: 'Incorrect data length while decompressing[...] the file
# could be corrupted.')
joblib.dump(X, samples_path, compress=0)
joblib.dump(y, targets_path, compress=0)
elif not available:
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
return Bunch(
data=X,
target=y,
feature_names=feature_names,
target_names=[target_names],
)
def _mkdirp(d):
"""Ensure directory d exists (like mkdir -p on Unix)
No guarantee that the directory is writable.
"""
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise
|
{
"content_hash": "c53105a45596dfa9d0ea8d6c53a8d356",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 98,
"avg_line_length": 33.94086021505376,
"alnum_prop": 0.5773800095041977,
"repo_name": "ndingwall/scikit-learn",
"id": "e5c8bb2f298ded7e6d779825ec0d32449cdf5b76",
"size": "12626",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/datasets/_kddcup99.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "416843"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1630"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6794973"
},
{
"name": "Shell",
"bytes": "13442"
}
],
"symlink_target": ""
}
|
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
def JoinPath(*args):
return os.path.normpath(os.path.join(*args))
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None, compatible_sdks=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
compatible_sdks = compatible_sdks or []
compatible_sdks.sort(key=lambda v: float(v.replace('v', '')), reverse=True)
self.compatible_sdks = compatible_sdks
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def _SetupScriptInternal(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
assert target_arch in ('x86', 'x64'), "target_arch not supported"
# If WindowsSDKDir is set and SetEnv.Cmd exists then we are using the
# depot_tools build tools and should run SetEnv.Cmd to set up the
# environment. The check for WindowsSDKDir alone is not sufficient because
# this is set by running vcvarsall.bat.
sdk_dir = os.environ.get('WindowsSDKDir', '')
setup_path = JoinPath(sdk_dir, 'Bin', 'SetEnv.Cmd')
if self.sdk_based and sdk_dir and os.path.exists(setup_path):
return [setup_path, '/' + target_arch]
is_host_arch_x64 = (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'
)
# For VS2017 (and newer) it's fairly easy
if self.short_name >= '2017':
script_path = JoinPath(self.path,
'VC', 'Auxiliary', 'Build', 'vcvarsall.bat')
# Always use a native executable, cross-compiling if necessary.
host_arch = 'amd64' if is_host_arch_x64 else 'x86'
msvc_target_arch = 'amd64' if target_arch == 'x64' else 'x86'
arg = host_arch
if host_arch != msvc_target_arch:
arg += '_' + msvc_target_arch
return [script_path, arg]
# We try to find the best version of the env setup batch.
vcvarsall = JoinPath(self.path, 'VC', 'vcvarsall.bat')
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and \
is_host_arch_x64:
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [vcvarsall, 'amd64_x86']
else:
# Otherwise, the standard x86 compiler. We don't use VC/vcvarsall.bat
# for x86 because vcvarsall calls vcvars32, which it can only find if
# VS??COMNTOOLS is set, which isn't guaranteed.
return [JoinPath(self.path, 'Common7', 'Tools', 'vsvars32.bat')]
elif target_arch == 'x64':
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express edition and
# we're running on a 64bit OS.
if self.short_name[-1] != 'e' and is_host_arch_x64:
arg = 'amd64'
return [vcvarsall, arg]
def SetupScript(self, target_arch):
script_data = self._SetupScriptInternal(target_arch)
script_path = script_data[0]
if not os.path.exists(script_path):
raise Exception('%s is missing - make sure VC++ tools are installed.' %
script_path)
return script_data
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError as e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
try:
import _winreg as winreg
except ImportError:
import winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2019': VisualStudioVersion('2019',
'Visual Studio 2019',
solution_version='12.00',
project_version='15.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v142',
compatible_sdks=['v8.1', 'v10.0']),
'2017': VisualStudioVersion('2017',
'Visual Studio 2017',
solution_version='12.00',
project_version='15.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v141',
compatible_sdks=['v8.1', 'v10.0']),
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
2017 - Visual Studio 2017 (15)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
'15.0': '2017'
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Microsoft\VisualStudio\SxS\VS7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VS7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version == '15.0':
if os.path.exists(path):
versions.append(_CreateVersion('2017', path))
elif version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('15.0', '14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
'2017': ('15.0',),
'2019': ('16.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
|
{
"content_hash": "4ccc71c7999c3c8a928ff49a68edda6f",
"timestamp": "",
"source": "github",
"line_count": 501,
"max_line_length": 80,
"avg_line_length": 39.12375249500998,
"alnum_prop": 0.549206673128922,
"repo_name": "turbulenz/gyp",
"id": "49f8b135072ec5541ec3e7f2c272c7cd4185a11c",
"size": "19758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylib/gyp/MSVSVersion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1133"
},
{
"name": "Batchfile",
"bytes": "1115"
},
{
"name": "C",
"bytes": "39351"
},
{
"name": "C++",
"bytes": "43106"
},
{
"name": "Emacs Lisp",
"bytes": "14357"
},
{
"name": "Objective-C",
"bytes": "14391"
},
{
"name": "Objective-C++",
"bytes": "1873"
},
{
"name": "Python",
"bytes": "2237491"
},
{
"name": "Shell",
"bytes": "18495"
},
{
"name": "Swift",
"bytes": "116"
}
],
"symlink_target": ""
}
|
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class servicegroupmember_stats(base_resource) :
ur""" Statistics for service group entity resource.
"""
def __init__(self) :
self._servicegroupname = ""
self._ip = ""
self._servername = ""
self._port = 0
self._clearstats = ""
self._avgsvrttfb = 0
self._primaryipaddress = ""
self._primaryport = 0
self._servicetype = ""
self._state = ""
self._totalrequests = 0
self._requestsrate = 0
self._totalresponses = 0
self._responsesrate = 0
self._totalrequestbytes = 0
self._requestbytesrate = 0
self._totalresponsebytes = 0
self._responsebytesrate = 0
self._curclntconnections = 0
self._surgecount = 0
self._cursrvrconnections = 0
self._svrestablishedconn = 0
self._curreusepool = 0
self._maxclients = 0
@property
def servicegroupname(self) :
ur"""Displays statistics for the specified service group.Name of the service group. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my servicegroup" or 'my servicegroup').<br/>Minimum length = 1.
"""
try :
return self._servicegroupname
except Exception as e:
raise e
@servicegroupname.setter
def servicegroupname(self, servicegroupname) :
ur"""Displays statistics for the specified service group.Name of the service group. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my servicegroup" or 'my servicegroup').
"""
try :
self._servicegroupname = servicegroupname
except Exception as e:
raise e
@property
def ip(self) :
ur"""IP address of the service group. Mutually exclusive with the server name parameter.
"""
try :
return self._ip
except Exception as e:
raise e
@ip.setter
def ip(self, ip) :
ur"""IP address of the service group. Mutually exclusive with the server name parameter.
"""
try :
self._ip = ip
except Exception as e:
raise e
@property
def servername(self) :
ur"""Name of the server. Mutually exclusive with the IP address parameter.<br/>Minimum length = 1.
"""
try :
return self._servername
except Exception as e:
raise e
@servername.setter
def servername(self, servername) :
ur"""Name of the server. Mutually exclusive with the IP address parameter.
"""
try :
self._servername = servername
except Exception as e:
raise e
@property
def port(self) :
ur"""Port number of the service group member.<br/>Range 1 - 65535.
"""
try :
return self._port
except Exception as e:
raise e
@port.setter
def port(self, port) :
ur"""Port number of the service group member.
"""
try :
self._port = port
except Exception as e:
raise e
@property
def clearstats(self) :
ur"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
ur"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def svrestablishedconn(self) :
ur"""Number of server connections in ESTABLISHED state.
"""
try :
return self._svrestablishedconn
except Exception as e:
raise e
@property
def curclntconnections(self) :
ur"""Number of current client connections.
"""
try :
return self._curclntconnections
except Exception as e:
raise e
@property
def servicetype(self) :
ur"""The service type of this service.Possible values are ADNS, DNS, MYSQL, RTSP, SSL_DIAMETER, ADNS_TCP, DNS_TCP, NNTP, SIP_UDP, SSL_TCP, ANY, FTP, RADIUS, SNMP, TCP, DHCPRA, HTTP, RDP, SSL, TFTP, DIAMETER, MSSQL, RPCSVR, SSL_BRIDGE, UDP.
"""
try :
return self._servicetype
except Exception as e:
raise e
@property
def totalrequests(self) :
ur"""Total number of requests received on this service or virtual server. (This applies to HTTP/SSL services and servers.).
"""
try :
return self._totalrequests
except Exception as e:
raise e
@property
def surgecount(self) :
ur"""Number of requests in the surge queue.
"""
try :
return self._surgecount
except Exception as e:
raise e
@property
def responsebytesrate(self) :
ur"""Rate (/s) counter for totalresponsebytes.
"""
try :
return self._responsebytesrate
except Exception as e:
raise e
@property
def totalresponses(self) :
ur"""Number of responses received on this service or virtual server. (This applies to HTTP/SSL services and servers.).
"""
try :
return self._totalresponses
except Exception as e:
raise e
@property
def requestbytesrate(self) :
ur"""Rate (/s) counter for totalrequestbytes.
"""
try :
return self._requestbytesrate
except Exception as e:
raise e
@property
def cursrvrconnections(self) :
ur"""Number of current connections to the actual servers behind the virtual server.
"""
try :
return self._cursrvrconnections
except Exception as e:
raise e
@property
def primaryipaddress(self) :
ur"""The IP address on which the service is running.
"""
try :
return self._primaryipaddress
except Exception as e:
raise e
@property
def responsesrate(self) :
ur"""Rate (/s) counter for totalresponses.
"""
try :
return self._responsesrate
except Exception as e:
raise e
@property
def maxclients(self) :
ur"""Maximum open connections allowed on this service.
"""
try :
return self._maxclients
except Exception as e:
raise e
@property
def avgsvrttfb(self) :
ur"""Average TTFB between the NetScaler appliance and the server.TTFB is the time interval between sending the request packet to a service and receiving the first response from the service.
"""
try :
return self._avgsvrttfb
except Exception as e:
raise e
@property
def totalrequestbytes(self) :
ur"""Total number of request bytes received on this service or virtual server.
"""
try :
return self._totalrequestbytes
except Exception as e:
raise e
@property
def curreusepool(self) :
ur"""Number of requests in the idle queue/reuse pool.
"""
try :
return self._curreusepool
except Exception as e:
raise e
@property
def state(self) :
ur"""Current state of the server. Possible values are UP, DOWN, UNKNOWN, OFS(Out of Service), TROFS(Transition Out of Service), TROFS_DOWN(Down When going Out of Service).
"""
try :
return self._state
except Exception as e:
raise e
@property
def totalresponsebytes(self) :
ur"""Number of response bytes received by this service or virtual server.
"""
try :
return self._totalresponsebytes
except Exception as e:
raise e
@property
def primaryport(self) :
ur"""The port on which the service is running.
"""
try :
return self._primaryport
except Exception as e:
raise e
@property
def requestsrate(self) :
ur"""Rate (/s) counter for totalrequests.
"""
try :
return self._requestsrate
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(servicegroupmember_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.servicegroupmember
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.servicegroupname is not None :
return str(self.servicegroupname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
ur""" Use this API to fetch the statistics of all servicegroupmember_stats resources that are configured on netscaler.
"""
try :
obj = servicegroupmember_stats()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = obj.stat_resource(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class servicegroupmember_response(base_response) :
def __init__(self, length=1) :
self.servicegroupmember = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.servicegroupmember = [servicegroupmember_stats() for _ in range(length)]
|
{
"content_hash": "a16265b2ba985d6daa2323dc1ef932f6",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 303,
"avg_line_length": 27.04237288135593,
"alnum_prop": 0.7011386190326961,
"repo_name": "benfinke/ns_python",
"id": "b46c28a261c3f45b1cd46990e271553d4f963b00",
"size": "10187",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/lib/nssrc/com/citrix/netscaler/nitro/resource/stat/basic/servicegroupmember_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21836782"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
}
|
'''
Float Layout
============
:class:`FloatLayout` honors the :attr:`~kivy.uix.widget.Widget.pos_hint`
and the :attr:`~kivy.uix.widget.Widget.size_hint` properties of its children.
.. only:: html
.. image:: images/floatlayout.gif
:align: right
.. only:: latex
.. image:: images/floatlayout.png
:align: right
For example, a FloatLayout with a size of (300, 300) is created::
layout = FloatLayout(size=(300, 300))
By default, all widgets have their size_hint=(1, 1), so this button will adopt
the same size as the layout::
button = Button(text='Hello world')
layout.add_widget(button)
To create a button 50% of the width and 25% of the height of the layout and
positioned at (20, 20), you can do::
button = Button(
text='Hello world',
size_hint=(.5, .25),
pos=(20, 20))
If you want to create a button that will always be the size of layout minus
20% on each side::
button = Button(text='Hello world', size_hint=(.6, .6),
pos_hint={'x':.2, 'y':.2})
.. note::
This layout can be used for an application. Most of the time, you will
use the size of Window.
.. warning::
If you are not using pos_hint, you must handle the positioning of the
children: if the float layout is moving, you must handle moving the
children too.
'''
__all__ = ('FloatLayout', )
from kivy.uix.layout import Layout
class FloatLayout(Layout):
'''Float layout class. See module documentation for more information.
'''
def __init__(self, **kwargs):
super(FloatLayout, self).__init__(**kwargs)
fbind = self.fbind
update = self._trigger_layout
fbind('children', update)
fbind('pos', update)
fbind('pos_hint', update)
fbind('size_hint', update)
fbind('size', update)
def do_layout(self, *largs, **kwargs):
# optimize layout by preventing looking at the same attribute in a loop
w, h = kwargs.get('size', self.size)
x, y = kwargs.get('pos', self.pos)
for c in self.children:
# size
shw, shh = c.size_hint
shw_min, shh_min = c.size_hint_min
shw_max, shh_max = c.size_hint_max
if shw is not None and shh is not None:
c_w = shw * w
c_h = shh * h
if shw_min is not None and c_w < shw_min:
c_w = shw_min
elif shw_max is not None and c_w > shw_max:
c_w = shw_max
if shh_min is not None and c_h < shh_min:
c_h = shh_min
elif shh_max is not None and c_h > shh_max:
c_h = shh_max
c.size = c_w, c_h
elif shw is not None:
c_w = shw * w
if shw_min is not None and c_w < shw_min:
c_w = shw_min
elif shw_max is not None and c_w > shw_max:
c_w = shw_max
c.width = c_w
elif shh is not None:
c_h = shh * h
if shh_min is not None and c_h < shh_min:
c_h = shh_min
elif shh_max is not None and c_h > shh_max:
c_h = shh_max
c.height = c_h
# pos
for key, value in c.pos_hint.items():
if key == 'x':
c.x = x + value * w
elif key == 'right':
c.right = x + value * w
elif key == 'pos':
c.pos = x + value[0] * w, y + value[1] * h
elif key == 'y':
c.y = y + value * h
elif key == 'top':
c.top = y + value * h
elif key == 'center':
c.center = x + value[0] * w, y + value[1] * h
elif key == 'center_x':
c.center_x = x + value * w
elif key == 'center_y':
c.center_y = y + value * h
def add_widget(self, widget, index=0, canvas=None):
widget.bind(
# size=self._trigger_layout,
# size_hint=self._trigger_layout,
pos=self._trigger_layout,
pos_hint=self._trigger_layout)
return super(FloatLayout, self).add_widget(widget, index, canvas)
def remove_widget(self, widget):
widget.unbind(
# size=self._trigger_layout,
# size_hint=self._trigger_layout,
pos=self._trigger_layout,
pos_hint=self._trigger_layout)
return super(FloatLayout, self).remove_widget(widget)
|
{
"content_hash": "67eb3936e5a6c859a70b470672df94f3",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 31.554054054054053,
"alnum_prop": 0.5066381156316917,
"repo_name": "inclement/kivy",
"id": "86145e01663fe995288b9ec7de8ed132a1e867f2",
"size": "4670",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kivy/uix/floatlayout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "321680"
},
{
"name": "C++",
"bytes": "3551"
},
{
"name": "Emacs Lisp",
"bytes": "9671"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Makefile",
"bytes": "4084"
},
{
"name": "Objective-C",
"bytes": "14779"
},
{
"name": "Python",
"bytes": "3838203"
},
{
"name": "Vim script",
"bytes": "1123"
}
],
"symlink_target": ""
}
|
import json
from cryptography.fernet import InvalidToken
from django.test.utils import override_settings
from django.conf import settings
import pytest
from awx.main import models
from awx.conf.models import Setting
from awx.main.management.commands import regenerate_secret_key
from awx.main.utils.encryption import encrypt_field, decrypt_field, encrypt_value
PREFIX = '$encrypted$UTF8$AESCBC$'
@pytest.mark.django_db
class TestKeyRegeneration:
def test_encrypted_ssh_password(self, credential):
# test basic decryption
assert credential.inputs['password'].startswith(PREFIX)
assert credential.get_input('password') == 'secret'
# re-key the credential
new_key = regenerate_secret_key.Command().handle()
new_cred = models.Credential.objects.get(pk=credential.pk)
assert credential.inputs['password'] != new_cred.inputs['password']
# verify that the old SECRET_KEY doesn't work
with pytest.raises(InvalidToken):
new_cred.get_input('password')
# verify that the new SECRET_KEY *does* work
with override_settings(SECRET_KEY=new_key):
assert new_cred.get_input('password') == 'secret'
def test_encrypted_setting_values(self):
# test basic decryption
settings.REDHAT_PASSWORD = 'sensitive'
s = Setting.objects.filter(key='REDHAT_PASSWORD').first()
assert s.value.startswith(PREFIX)
assert settings.REDHAT_PASSWORD == 'sensitive'
# re-key the setting value
new_key = regenerate_secret_key.Command().handle()
new_setting = Setting.objects.filter(key='REDHAT_PASSWORD').first()
assert s.value != new_setting.value
# wipe out the local cache so the value is pulled from the DB again
settings.cache.delete('REDHAT_PASSWORD')
# verify that the old SECRET_KEY doesn't work
with pytest.raises(InvalidToken):
settings.REDHAT_PASSWORD
# verify that the new SECRET_KEY *does* work
with override_settings(SECRET_KEY=new_key):
assert settings.REDHAT_PASSWORD == 'sensitive'
def test_encrypted_notification_secrets(self, notification_template_with_encrypt):
# test basic decryption
nt = notification_template_with_encrypt
nc = nt.notification_configuration
assert nc['token'].startswith(PREFIX)
Slack = nt.CLASS_FOR_NOTIFICATION_TYPE[nt.notification_type]
class TestBackend(Slack):
def __init__(self, *args, **kw):
assert kw['token'] == 'token'
def send_messages(self, messages):
pass
nt.CLASS_FOR_NOTIFICATION_TYPE['test'] = TestBackend
nt.notification_type = 'test'
nt.send('Subject', 'Body')
# re-key the notification config
new_key = regenerate_secret_key.Command().handle()
new_nt = models.NotificationTemplate.objects.get(pk=nt.pk)
assert nt.notification_configuration['token'] != new_nt.notification_configuration['token']
# verify that the old SECRET_KEY doesn't work
with pytest.raises(InvalidToken):
new_nt.CLASS_FOR_NOTIFICATION_TYPE['test'] = TestBackend
new_nt.notification_type = 'test'
new_nt.send('Subject', 'Body')
# verify that the new SECRET_KEY *does* work
with override_settings(SECRET_KEY=new_key):
new_nt.send('Subject', 'Body')
def test_job_start_args(self, job_factory):
# test basic decryption
job = job_factory()
job.start_args = json.dumps({'foo': 'bar'})
job.start_args = encrypt_field(job, field_name='start_args')
job.save()
assert job.start_args.startswith(PREFIX)
# re-key the start_args
new_key = regenerate_secret_key.Command().handle()
new_job = models.Job.objects.get(pk=job.pk)
assert new_job.start_args != job.start_args
# verify that the old SECRET_KEY doesn't work
with pytest.raises(InvalidToken):
decrypt_field(new_job, field_name='start_args')
# verify that the new SECRET_KEY *does* work
with override_settings(SECRET_KEY=new_key):
assert json.loads(
decrypt_field(new_job, field_name='start_args')
) == {'foo': 'bar'}
@pytest.mark.parametrize('cls', ('JobTemplate', 'WorkflowJobTemplate'))
def test_survey_spec(self, inventory, project, survey_spec_factory, cls):
params = {}
if cls == 'JobTemplate':
params['inventory'] = inventory
params['project'] = project
# test basic decryption
jt = getattr(models, cls).objects.create(
name='Example Template',
survey_spec=survey_spec_factory([{
'variable': 'secret_key',
'default': encrypt_value('donttell', pk=None),
'type': 'password'
}]),
survey_enabled=True,
**params
)
job = jt.create_unified_job()
assert jt.survey_spec['spec'][0]['default'].startswith(PREFIX)
assert job.survey_passwords == {'secret_key': '$encrypted$'}
assert json.loads(job.decrypted_extra_vars())['secret_key'] == 'donttell'
# re-key the extra_vars
new_key = regenerate_secret_key.Command().handle()
new_job = models.UnifiedJob.objects.get(pk=job.pk)
assert new_job.extra_vars != job.extra_vars
# verify that the old SECRET_KEY doesn't work
with pytest.raises(InvalidToken):
new_job.decrypted_extra_vars()
# verify that the new SECRET_KEY *does* work
with override_settings(SECRET_KEY=new_key):
assert json.loads(
new_job.decrypted_extra_vars()
)['secret_key'] == 'donttell'
def test_oauth2_application_client_secret(self, oauth_application):
# test basic decryption
secret = oauth_application.client_secret
assert len(secret) == 128
# re-key the client_secret
new_key = regenerate_secret_key.Command().handle()
# verify that the old SECRET_KEY doesn't work
with pytest.raises(InvalidToken):
models.OAuth2Application.objects.get(
pk=oauth_application.pk
).client_secret
# verify that the new SECRET_KEY *does* work
with override_settings(SECRET_KEY=new_key):
assert models.OAuth2Application.objects.get(
pk=oauth_application.pk
).client_secret == secret
|
{
"content_hash": "9197e14930d0582b1e7612758e3462c3",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 99,
"avg_line_length": 37.9080459770115,
"alnum_prop": 0.6247725894481504,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "d27b4329cdf84f2bc5a6471960490f9538522226",
"size": "6596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx/main/tests/functional/commands/test_secret_key_regeneration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""Example for aiohttp.web basic server
"""
import asyncio
import textwrap
from aiohttp.web import Application, Response, StreamResponse
def intro(request):
txt = textwrap.dedent("""\
Type {url}/hello/John {url}/simple or {url}/change_body
in browser url bar
""").format(url='127.0.0.1:8080')
binary = txt.encode('utf8')
resp = StreamResponse()
resp.content_length = len(binary)
yield from resp.prepare(request)
resp.write(binary)
return resp
def simple(request):
return Response(body=b'Simple answer')
def change_body(request):
resp = Response()
resp.body = b"Body changed"
return resp
@asyncio.coroutine
def hello(request):
resp = StreamResponse()
name = request.match_info.get('name', 'Anonymous')
answer = ('Hello, ' + name).encode('utf8')
resp.content_length = len(answer)
yield from resp.prepare(request)
resp.write(answer)
yield from resp.write_eof()
return resp
@asyncio.coroutine
def init(loop):
app = Application(loop=loop)
app.router.add_route('GET', '/', intro)
app.router.add_route('GET', '/simple', simple)
app.router.add_route('GET', '/change_body', change_body)
app.router.add_route('GET', '/hello/{name}', hello)
app.router.add_route('GET', '/hello', hello)
handler = app.make_handler()
srv = yield from loop.create_server(handler, '127.0.0.1', 8080)
print("Server started at http://127.0.0.1:8080")
return srv, handler
loop = asyncio.get_event_loop()
srv, handler = loop.run_until_complete(init(loop))
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(handler.finish_connections())
|
{
"content_hash": "2e910c4a7d2e4aec05619250911b3d6f",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 67,
"avg_line_length": 26.296875,
"alnum_prop": 0.6660724896019014,
"repo_name": "decentfox/aiohttp",
"id": "78876e4a8538ed8f4015dbf213548fb42d623fa3",
"size": "1706",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/web_srv.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "1060"
},
{
"name": "Makefile",
"bytes": "2272"
},
{
"name": "PLpgSQL",
"bytes": "765"
},
{
"name": "Python",
"bytes": "995177"
},
{
"name": "Shell",
"bytes": "550"
}
],
"symlink_target": ""
}
|
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class arpparam(base_resource) :
""" Configuration for Global arp parameters resource. """
def __init__(self) :
self._timeout = 0
self._spoofvalidation = ""
@property
def timeout(self) :
ur"""Time-out value (aging time) for the dynamically learned ARP entries, in seconds. The new value applies only to ARP entries that are dynamically learned after the new value is set. Previously existing ARP entries expire after the previously configured aging time.<br/>Default value: 1200<br/>Minimum length = 5<br/>Maximum length = 1200.
"""
try :
return self._timeout
except Exception as e:
raise e
@timeout.setter
def timeout(self, timeout) :
ur"""Time-out value (aging time) for the dynamically learned ARP entries, in seconds. The new value applies only to ARP entries that are dynamically learned after the new value is set. Previously existing ARP entries expire after the previously configured aging time.<br/>Default value: 1200<br/>Minimum length = 5<br/>Maximum length = 1200
"""
try :
self._timeout = timeout
except Exception as e:
raise e
@property
def spoofvalidation(self) :
ur"""enable/disable arp spoofing validation.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._spoofvalidation
except Exception as e:
raise e
@spoofvalidation.setter
def spoofvalidation(self, spoofvalidation) :
ur"""enable/disable arp spoofing validation.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._spoofvalidation = spoofvalidation
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(arpparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.arpparam
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update arpparam.
"""
try :
if type(resource) is not list :
updateresource = arpparam()
updateresource.timeout = resource.timeout
updateresource.spoofvalidation = resource.spoofvalidation
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of arpparam resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = arpparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the arpparam resources that are configured on netscaler.
"""
try :
if not name :
obj = arpparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Spoofvalidation:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class arpparam_response(base_response) :
def __init__(self, length=1) :
self.arpparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.arpparam = [arpparam() for _ in range(length)]
|
{
"content_hash": "099ebb71109eeac82fe14955ebba6a04",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 345,
"avg_line_length": 33.00787401574803,
"alnum_prop": 0.7180343511450382,
"repo_name": "atopuzov/nitro-python",
"id": "ec273c90d86a591412ea9a94dcfd70d0bf313ef8",
"size": "4806",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/network/arpparam.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10881939"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
}
|
"""Operations for BEL graphs."""
from typing import Iterable
import networkx as nx
from tqdm.autonotebook import tqdm
from .utils import update_metadata
from ..dsl import BaseEntity
__all__ = [
"subgraph",
"left_full_join",
"left_outer_join",
"union",
"left_node_intersection_join",
"node_intersection",
]
def subgraph(graph, nodes: Iterable[BaseEntity]):
"""Induce a sub-graph over the given nodes.
:rtype: BELGraph
"""
sg = graph.subgraph(nodes)
# see implementation for .copy()
rv = graph.child()
rv.graph.update(sg.graph)
for node, data in sg.nodes(data=True):
rv.add_node(node, **data)
rv.add_edges_from((u, v, key, datadict.copy()) for u, v, key, datadict in sg.edges(keys=True, data=True))
return rv
def left_full_join(g, h) -> None:
"""Add all nodes and edges from ``h`` to ``g``, in-place for ``g``.
:param pybel.BELGraph g: A BEL graph
:param pybel.BELGraph h: A BEL graph
Example usage:
>>> import pybel
>>> g = pybel.from_bel_script('...')
>>> h = pybel.from_bel_script('...')
>>> left_full_join(g, h)
"""
g.add_nodes_from((node, data) for node, data in h.nodes(data=True) if node not in g)
g.add_edges_from(
(u, v, key, data)
for u, v, key, data in h.edges(keys=True, data=True)
if u not in g or v not in g[u] or key not in g[u][v]
)
update_metadata(h, g)
g.warnings.extend(h.warnings)
def left_outer_join(g, h) -> None:
"""Only add components from the ``h`` that are touching ``g``.
Algorithm:
1. Identify all weakly connected components in ``h``
2. Add those that have an intersection with the ``g``
:param BELGraph g: A BEL graph
:param BELGraph h: A BEL graph
Example usage:
>>> import pybel
>>> g = pybel.from_bel_script('...')
>>> h = pybel.from_bel_script('...')
>>> left_outer_join(g, h)
"""
g_nodes = set(g)
for comp in nx.weakly_connected_components(h):
if g_nodes.intersection(comp):
h_subgraph = subgraph(h, comp)
left_full_join(g, h_subgraph)
def _left_outer_join_graphs(target, graphs):
"""Outer join a list of graphs to a target graph.
Note: the order of graphs will have significant results!
:param BELGraph target: A BEL graph
:param iter[BELGraph] graphs: An iterator of BEL graphs
:rtype: BELGraph
"""
for graph in graphs:
left_outer_join(target, graph)
return target
def union(graphs, use_tqdm: bool = False):
"""Take the union over a collection of graphs into a new graph.
Assumes iterator is longer than 2, but not infinite.
:param iter[BELGraph] graphs: An iterator over BEL graphs. Can't be infinite.
:param use_tqdm: Should a progress bar be displayed?
:return: A merged graph
:rtype: BELGraph
Example usage:
>>> import pybel
>>> g = pybel.from_bel_script('...')
>>> h = pybel.from_bel_script('...')
>>> k = pybel.from_bel_script('...')
>>> merged = union([g, h, k])
"""
it = iter(graphs)
if use_tqdm:
it = tqdm(it, desc="taking union")
try:
target = next(it)
except StopIteration as e:
raise ValueError("no graphs given") from e
try:
graph = next(it)
except StopIteration:
return target
else:
target = target.copy()
left_full_join(target, graph)
for graph in it:
left_full_join(target, graph)
return target
def left_node_intersection_join(g, h):
"""Take the intersection over two graphs.
This intersection of two graphs is defined by the union of the
sub-graphs induced over the intersection of their nodes
:param BELGraph g: A BEL graph
:param BELGraph h: A BEL graph
:rtype: BELGraph
Example usage:
>>> import pybel
>>> g = pybel.from_bel_script('...')
>>> h = pybel.from_bel_script('...')
>>> merged = left_node_intersection_join(g, h)
"""
intersecting = set(g).intersection(set(h))
g_inter = subgraph(g, intersecting)
h_inter = subgraph(h, intersecting)
left_full_join(g_inter, h_inter)
return g_inter
def node_intersection(graphs):
"""Take the node intersection over a collection of graphs into a new graph.
This intersection is defined the same way as by :func:`left_node_intersection_join`
:param iter[BELGraph] graphs: An iterable of graphs. Since it's iterated over twice, it gets converted to a
tuple first, so this isn't a safe operation for infinite lists.
:rtype: BELGraph
Example usage:
>>> import pybel
>>> g = pybel.from_bel_script('...')
>>> h = pybel.from_bel_script('...')
>>> k = pybel.from_bel_script('...')
>>> merged = node_intersection([g, h, k])
"""
graphs = tuple(graphs)
n_graphs = len(graphs)
if n_graphs == 0:
raise ValueError("no graphs given")
if n_graphs == 1:
return graphs[0]
nodes = set(graphs[0].nodes())
for graph in graphs[1:]:
nodes.intersection_update(graph)
return union(subgraph(graph, nodes) for graph in graphs)
|
{
"content_hash": "ea12fb5f2f350adb678f166f47517da7",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 111,
"avg_line_length": 25.274509803921568,
"alnum_prop": 0.6144297905352987,
"repo_name": "pybel/pybel",
"id": "cecafedc550282be810666f54582d464a69745aa",
"size": "5181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pybel/struct/operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "880"
},
{
"name": "JavaScript",
"bytes": "9473"
},
{
"name": "Jupyter Notebook",
"bytes": "52170"
},
{
"name": "Python",
"bytes": "1475429"
}
],
"symlink_target": ""
}
|
"""Implement the hil-admin command."""
from hil import config, model, deferred, server, migrations, rest
from hil.commands import db
from hil.commands.migrate_ipmi_info import MigrateIpmiInfo
from hil.commands.util import ensure_not_root
from hil.flaskapp import app
from time import sleep
from flask_script import Manager, Command, Option
import sys
import logging
from click import IntRange
manager = Manager(app)
class ServeNetworks(Command):
"""Start the HIL networking server"""
# pylint: disable=arguments-differ
def run(self):
logger = logging.getLogger(__name__)
server.init()
server.register_drivers()
server.validate_state()
migrations.check_db_schema()
# Check if config contains usable sleep_time
if (config.cfg.has_section('network-daemon') and
config.cfg.has_option('network-daemon', 'sleep_time')):
try:
sleep_time = config.cfg.getfloat(
'network-daemon', 'sleep_time')
except (ValueError):
sys.exit("Error: sleep_time set to non-float value")
if sleep_time <= 0 or sleep_time >= 3600:
sys.exit("Error: sleep_time not within bounds "
"0 < sleep_time < 3600")
if sleep_time > 60:
logger.warn('sleep_time greater than 1 minute.')
else:
sleep_time = 2
while True:
# Empty the journal until it's empty; then delay so we don't tight
# loop.
while deferred.apply_networking():
pass
sleep(sleep_time)
class RunDevelopmentServer(Command):
"""Run a development api server. Don't use this in production.
Specify the port with -p or --port otherwise defaults to 5000"""
option_list = (
Option('--port', '-p', dest='port',
type=IntRange(0, 2**16-1), default=5000),
)
# pylint: disable=arguments-differ
def run(self, port):
if config.cfg.has_option('devel', 'debug'):
debug = config.cfg.getboolean('devel', 'debug')
else:
debug = False
# We need to import api here so that the functions within it get
# registered (via `rest_call`), though we don't use it directly:
# pylint: disable=unused-variable
from hil import api
server.init()
migrations.check_db_schema()
server.stop_orphan_consoles()
rest.serve(port, debug=debug)
class CreateAdminUser(Command):
"""Create an admin user. Only valid for the database auth backend.
This must be run on the HIL API server, with access to hil.cfg and the
database. It will create a user named <username> with password
<password>, who will have administrator privileges.
This command should only be used for bootstrapping the system; once you
have an initial admin, you can (and should) create additional users via
the API.
"""
# these are actually positional arguments
option_list = (Option('username'), Option('password'))
# pylint: disable=arguments-differ
def run(self, username, password):
if not config.cfg.has_option('extensions', 'hil.ext.auth.database'):
sys.exit("'create-admin-user' is only valid with the database auth"
" backend.")
from hil.ext.auth.database import User
model.db.session.add(User(label=username, password=password,
is_admin=True))
model.db.session.commit()
manager.add_command('db', db.command)
manager.add_command('migrate-ipmi-info', MigrateIpmiInfo())
manager.add_command('serve-networks', ServeNetworks())
manager.add_command('run-dev-server', RunDevelopmentServer())
manager.add_command('create-admin-user', CreateAdminUser())
def main():
"""Entrypoint for the hil-admin command."""
ensure_not_root()
config.setup()
model.init_db()
manager.run()
|
{
"content_hash": "4d94658463506148b13c5b5ee6164fb5",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 34.71304347826087,
"alnum_prop": 0.6290080160320641,
"repo_name": "CCI-MOC/haas",
"id": "75073ee8ce0d52c7499b513bad5d08423c555b9d",
"size": "3992",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "hil/commands/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "252862"
}
],
"symlink_target": ""
}
|
import errno
import os
from unittest import mock
import ddt
from oslo_concurrency import processutils as putils
from oslo_utils import imageutils
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.dell_emc.powerstore import nfs
from cinder.volume import volume_utils
NFS_CONFIG = {'max_over_subscription_ratio': 1.0,
'reserved_percentage': 0,
'nfs_sparsed_volumes': True,
'nfs_qcow2_volumes': False,
'nas_secure_file_permissions': 'false',
'nas_secure_file_operations': 'false'}
QEMU_IMG_INFO_OUT1 = """image: %(volid)s
file format: raw
virtual size: %(size_gb)sG (%(size_b)s bytes)
disk size: 173K
"""
QEMU_IMG_INFO_OUT2 = """image: %(volid)s
file format: qcow2
virtual size: %(size_gb)sG (%(size_b)s bytes)
disk size: 173K
"""
QEMU_IMG_INFO_OUT3 = """image: volume-%(volid)s.%(snapid)s
file format: qcow2
virtual size: %(size_gb)sG (%(size_b)s bytes)
disk size: 196K
cluster_size: 65536
backing file: volume-%(volid)s
backing file format: qcow2
Format specific information:
compat: 1.1
lazy refcounts: false
refcount bits: 16
corrupt: false
"""
@ddt.ddt
class PowerStoreNFSDriverInitializeTestCase(test.TestCase):
TEST_NFS_HOST = 'nfs-host1'
def setUp(self):
super(PowerStoreNFSDriverInitializeTestCase, self).setUp()
self.context = mock.Mock()
self.create_configuration()
self.override_config('compute_api_class', 'unittest.mock.Mock')
self.drv = nfs.PowerStoreNFSDriverInitialization(
configuration=self.configuration)
def create_configuration(self):
config = conf.Configuration(None)
config.append_config_values(nfs.nfs_opts)
self.configuration = config
def test_check_multiattach_support(self):
drv = self.drv
self.configuration.nfs_qcow2_volumes = False
drv._check_multiattach_support()
self.assertEqual(not self.configuration.nfs_qcow2_volumes,
drv.multiattach_support)
def test_check_multiattach_support_disable(self):
drv = self.drv
drv.configuration.nfs_qcow2_volumes = True
drv._check_multiattach_support()
self.assertEqual(not self.configuration.nfs_qcow2_volumes,
drv.multiattach_support)
def test_check_snapshot_support(self):
drv = self.drv
drv.configuration.nfs_snapshot_support = True
drv.configuration.nas_secure_file_operations = 'false'
drv._check_snapshot_support()
self.assertTrue(drv.configuration.nfs_snapshot_support)
def test_check_snapshot_support_disable(self):
drv = self.drv
drv.configuration.nfs_snapshot_support = False
drv.configuration.nas_secure_file_operations = 'false'
self.assertRaises(exception.VolumeDriverException,
drv._check_snapshot_support)
def test_check_snapshot_support_nas_true(self):
drv = self.drv
drv.configuration.nfs_snapshot_support = True
drv.configuration.nas_secure_file_operations = 'true'
self.assertRaises(exception.VolumeDriverException,
drv._check_snapshot_support)
@mock.patch("cinder.volume.drivers.nfs.NfsDriver.do_setup")
def test_do_setup(self, mock_super_do_setup):
drv = self.drv
drv.configuration.nas_host = self.TEST_NFS_HOST
mock_check_multiattach_support = self.mock_object(
drv, '_check_multiattach_support'
)
drv.do_setup(self.context)
self.assertTrue(mock_check_multiattach_support.called)
def test_check_package_is_installed(self):
drv = self.drv
package = 'dellfcopy'
mock_execute = self.mock_object(drv, '_execute')
drv._check_package_is_installed(package)
mock_execute.assert_called_once_with(package,
check_exit_code=False,
run_as_root=False)
def test_check_package_is_not_installed(self):
drv = self.drv
package = 'dellfcopy'
drv._execute = mock.Mock(
side_effect=OSError(
errno.ENOENT, 'No such file or directory'
)
)
self.assertRaises(exception.VolumeDriverException,
drv._check_package_is_installed, package)
drv._execute.assert_called_once_with(package,
check_exit_code=False,
run_as_root=False)
def test_check_for_setup_error(self):
drv = self.drv
mock_check_package_is_installed = self.mock_object(
drv, '_check_package_is_installed')
drv.check_for_setup_error()
mock_check_package_is_installed.assert_called_once_with('dellfcopy')
def test_check_for_setup_error_not_passed(self):
drv = self.drv
drv._execute = mock.Mock(
side_effect=OSError(
errno.ENOENT, 'No such file or directory'
)
)
self.assertRaises(exception.VolumeDriverException,
drv.check_for_setup_error)
drv._execute.assert_called_once_with('dellfcopy',
check_exit_code=False,
run_as_root=False)
def test_update_volume_stats_has_multiattach(self):
drv = self.drv
self.mock_object(nfs.NfsDriver, '_update_volume_stats')
drv.multiattach_support = True
drv._stats = {}
drv._update_volume_stats()
self.assertIn('multiattach', drv._stats)
self.assertTrue(drv._stats['multiattach'])
@ddt.ddt
class PowerStoreNFSDriverTestCase(test.TestCase):
TEST_NFS_HOST = 'nfs-host1'
TEST_NFS_SHARE_PATH = '/export'
TEST_NFS_EXPORT = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH)
TEST_SIZE_IN_GB = 1
TEST_MNT_POINT = '/mnt/nfs'
TEST_MNT_POINT_BASE_EXTRA_SLASH = '/opt/stack/data/cinder//mnt'
TEST_MNT_POINT_BASE = '/mnt/test'
TEST_LOCAL_PATH = '/mnt/nfs/volume-123'
TEST_FILE_NAME = 'test.txt'
VOLUME_UUID = 'abcdefab-cdef-abcd-efab-cdefabcdefab'
def setUp(self):
super(PowerStoreNFSDriverTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.append_config_values(mock.ANY)
self.configuration.nfs_sparsed_volumes = True
self.configuration.nas_secure_file_permissions = 'false'
self.configuration.nas_secure_file_operations = 'false'
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
self.configuration.nfs_snapshot_support = True
self.configuration.max_over_subscription_ratio = 1.0
self.configuration.reserved_percentage = 5
self.configuration.nfs_mount_options = None
self.configuration.nfs_qcow2_volumes = True
self.configuration.nas_host = '0.0.0.0'
self.configuration.nas_share_path = None
self.mock_object(volume_utils, 'get_max_over_subscription_ratio',
return_value=1)
self.context = context.get_admin_context()
self._driver = nfs.PowerStoreNFSDriver(
configuration=self.configuration)
self._driver.shares = {}
self.mock_object(self._driver, '_execute')
def test_do_fast_clone_file(self):
drv = self._driver
volume_path = 'fake/path'
new_volume_path = 'fake/new_path'
drv._do_fast_clone_file(volume_path, new_volume_path)
drv._execute.assert_called_once_with(
'dellfcopy', '-o', 'fastclone', '-s', volume_path, '-d',
new_volume_path, '-v', '1', run_as_root=True
)
def test_do_fast_clone_file_raise_error(self):
drv = self._driver
volume_path = 'fake/path'
new_volume_path = 'fake/new_path'
drv._execute = mock.Mock(
side_effect=putils.ProcessExecutionError()
)
self.assertRaises(putils.ProcessExecutionError,
drv._do_fast_clone_file, volume_path,
new_volume_path)
drv._execute.assert_called_once_with(
'dellfcopy', '-o', 'fastclone', '-s', volume_path, '-d',
new_volume_path, '-v', '1', run_as_root=True
)
def _simple_volume(self, **kwargs):
updates = {'id': self.VOLUME_UUID,
'provider_location': self.TEST_NFS_EXPORT,
'display_name': f'volume-{self.VOLUME_UUID}',
'name': f'volume-{self.VOLUME_UUID}',
'size': 10,
'status': 'available'}
updates.update(kwargs)
if 'display_name' not in updates:
updates['display_name'] = 'volume-%s' % updates['id']
return fake_volume.fake_volume_obj(self.context, **updates)
def test_delete_volume_without_info(self):
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
display_name='volume',
provider_location=self.TEST_NFS_EXPORT
)
vol_path = '/path/to/vol'
mock_ensure_share_mounted = self.mock_object(
drv, '_ensure_share_mounted')
mock_local_path_volume_info = self.mock_object(
drv, '_local_path_volume_info'
)
mock_local_path_volume_info.return_value = self.TEST_LOCAL_PATH
mock_read_info_file = self.mock_object(drv, '_read_info_file')
mock_read_info_file.return_value = {}
mock_local_path_volume = self.mock_object(drv, '_local_path_volume')
mock_local_path_volume.return_value = vol_path
drv.delete_volume(volume)
mock_ensure_share_mounted.assert_called_once_with(
self.TEST_NFS_EXPORT)
mock_local_path_volume.assert_called_once_with(volume)
mock_read_info_file.assert_called_once_with(
self.TEST_LOCAL_PATH, empty_if_missing=True)
mock_local_path_volume.assert_called_once_with(volume)
drv._execute.assert_called_once_with(
'rm', '-f', vol_path, run_as_root=True)
def test_delete_volume_with_info(self):
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
display_name='volume',
provider_location=self.TEST_NFS_EXPORT
)
vol_path = '/path/to/vol'
with mock.patch.object(drv, '_ensure_share_mounted'):
mock_local_path_volume_info = self.mock_object(
drv, '_local_path_volume_info'
)
mock_local_path_volume_info.return_value = self.TEST_LOCAL_PATH
mock_read_info_file = self.mock_object(drv, '_read_info_file')
mock_read_info_file.return_value = {'active': '/path/to/active'}
mock_local_path_volume = self.mock_object(
drv, '_local_path_volume')
mock_local_path_volume.return_value = vol_path
drv.delete_volume(volume)
self.assertEqual(drv._execute.call_count, 3)
def test_delete_volume_without_provider_location(self):
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
display_name='volume',
provider_location=''
)
drv.delete_volume(volume)
self.assertFalse(bool(drv._execute.call_count))
@ddt.data([None, QEMU_IMG_INFO_OUT1],
['raw', QEMU_IMG_INFO_OUT1],
['qcow2', QEMU_IMG_INFO_OUT2])
@ddt.unpack
@mock.patch('cinder.objects.volume.Volume.get_by_id')
def test_extend_volume(self, file_format, qemu_img_info, mock_get):
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10',
size=1,
provider_location='nfs_share')
if file_format:
volume.admin_metadata = {'format': file_format}
mock_get.return_value = volume
path = 'path'
new_size = volume['size'] + 1
mock_img_utils = self.mock_object(drv, '_qemu_img_info')
img_out = qemu_img_info % {'volid': volume.id,
'size_gb': volume.size,
'size_b': volume.size * units.Gi}
mock_img_utils.return_value = imageutils.QemuImgInfo(
img_out)
with mock.patch.object(image_utils, 'resize_image') as resize:
with mock.patch.object(drv, 'local_path', return_value=path):
with mock.patch.object(drv, '_is_share_eligible',
return_value=True):
drv.extend_volume(volume, new_size)
resize.assert_called_once_with(path, new_size)
def test_create_volume_from_snapshot(self):
drv = self._driver
src_volume = self._simple_volume(size=10)
src_volume.id = fake.VOLUME_ID
fake_snap = fake_snapshot.fake_snapshot_obj(self.context)
fake_snap.volume = src_volume
fake_snap.size = 10
fake_snap.status = 'available'
new_volume = self._simple_volume(size=src_volume.size)
drv._find_share = mock.Mock(return_value=self.TEST_NFS_EXPORT)
drv._copy_volume_from_snapshot = mock.Mock()
drv._create_volume_from_snapshot(new_volume, fake_snap)
drv._find_share.assert_called_once_with(new_volume)
drv._copy_volume_from_snapshot.assert_called_once_with(
fake_snap, new_volume, new_volume.size
)
@mock.patch('cinder.objects.volume.Volume.get_by_id')
def test_create_cloned_volume(self, mock_get):
drv = self._driver
volume = self._simple_volume()
mock_get.return_value = volume
vol_dir = os.path.join(self.TEST_MNT_POINT_BASE,
drv._get_hash_str(volume.provider_location))
vol_path = os.path.join(vol_dir, volume.name)
new_volume = self._simple_volume()
new_vol_dir = os.path.join(self.TEST_MNT_POINT_BASE,
drv._get_hash_str(
volume.provider_location))
new_vol_path = os.path.join(new_vol_dir, volume.name)
drv._create_cloned_volume(new_volume, volume, self.context)
command = ['dellfcopy', '-o', 'fastclone', '-s', vol_path,
'-d', new_vol_path, '-v', '1']
calls = [mock.call(*command, run_as_root=True)]
drv._execute.assert_has_calls(calls)
@ddt.data([QEMU_IMG_INFO_OUT3])
@ddt.unpack
@mock.patch('cinder.objects.volume.Volume.save')
def test_copy_volume_from_snapshot(self, qemu_img_info, mock_save):
drv = self._driver
src_volume = self._simple_volume(size=10)
src_volume.id = fake.VOLUME_ID
fake_snap = fake_snapshot.fake_snapshot_obj(self.context)
snap_file = src_volume.name + '.' + fake_snap.id
fake_snap.volume = src_volume
fake_snap.size = 10
fake_source_vol_path = os.path.join(
drv._local_volume_dir(fake_snap.volume),
src_volume.name
)
new_volume = self._simple_volume(size=10)
new_vol_dir = os.path.join(self.TEST_MNT_POINT_BASE,
drv._get_hash_str(
src_volume.provider_location))
new_vol_path = os.path.join(new_vol_dir, new_volume.name)
mock_read_info_file = self.mock_object(drv, '_read_info_file')
mock_read_info_file.return_value = {'active': snap_file,
fake_snap.id: snap_file}
mock_img_utils = self.mock_object(drv, '_qemu_img_info')
img_out = qemu_img_info % {'volid': src_volume.id,
'snapid': fake_snap.id,
'size_gb': src_volume.size,
'size_b': src_volume.size * units.Gi}
mock_img_utils.return_value = imageutils.QemuImgInfo(img_out)
drv._copy_volume_from_snapshot(fake_snap, new_volume, new_volume.size)
command = ['dellfcopy', '-o', 'fastclone', '-s', fake_source_vol_path,
'-d', new_vol_path, '-v', '1']
calls = [mock.call(*command, run_as_root=True)]
drv._execute.assert_has_calls(calls)
|
{
"content_hash": "0ea7550e0eb396ba40bb52e2f2269a3a",
"timestamp": "",
"source": "github",
"line_count": 448,
"max_line_length": 78,
"avg_line_length": 37.832589285714285,
"alnum_prop": 0.5882942946486518,
"repo_name": "openstack/cinder",
"id": "79a80b246b8735995689a9c9fcb9551b9153a6b0",
"size": "17571",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_nfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "259"
},
{
"name": "Mako",
"bytes": "976"
},
{
"name": "Python",
"bytes": "25078349"
},
{
"name": "Shell",
"bytes": "6456"
},
{
"name": "Smarty",
"bytes": "67595"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_stoos_olko.iff"
result.attribute_template_id = 9
result.stfName("npc_name","sullustan_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "b38f2a5b903011b273b24222204b231a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 64,
"avg_line_length": 23.23076923076923,
"alnum_prop": 0.695364238410596,
"repo_name": "anhstudios/swganh",
"id": "631b4b10dcf38842b0fa2e09ccda8f544504ec64",
"size": "447",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_stoos_olko.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from subprocess import Popen, PIPE
import json
import os
import re
import sys
config = dict()
if os.path.isfile('/usr/lib/simple-vmcontrol/config.json'):
config = json.loads(open('/usr/lib/simple-vmcontrol/config.json', 'r').read())
datadisklocation = '/srv/vm/'
if 'datadisklocation' in config:
datadisklocation = config['datadisklocation']
vmname = sys.argv[1]
if re.search('[^\w]', vmname):
raise Exception('Name can only be alphanumeric chars')
datadiskfilename = sys.argv[2]
if not re.search(datadisklocation + vmname + '.data\d+\.img', datadiskfilename):
raise Exception('Invalid data disk file name specified:' + datadiskfilename)
# Find the device name for this data disk image file name
p = Popen(['/usr/bin/virsh', 'domblklist', vmname], stdin=PIPE, stdout=PIPE, stderr=PIPE)
r2 = p.communicate()[0]
m = re.search('^(\w+) +' + datadiskfilename, r2, re.MULTILINE)
devname = m.group(1)
# Detach the disk image
p = Popen(['/usr/bin/virsh', 'detach-disk', vmname, devname, '--config'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
r = '\n'.join(p.communicate())
os.remove(datadiskfilename)
r += '\nNote: Disk will first disappear fully on VM restart\n'
print r
|
{
"content_hash": "1ad3e33defb6d6ae8b68047d248fa00b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 89,
"avg_line_length": 30.53846153846154,
"alnum_prop": 0.7052896725440806,
"repo_name": "allanrbo/simple-vmcontrol",
"id": "7da9979cbf74baea9325832de7f78a391e8d1016",
"size": "1214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vmcontrol/deletedatadisk.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23570"
}
],
"symlink_target": ""
}
|
"""Distributed placement strategies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
from absl import logging
from adanet import tf_compat
from adanet.distributed.devices import _OpNameHashStrategy
import numpy as np
import six
@six.add_metaclass(abc.ABCMeta)
class PlacementStrategy(object): # pytype: disable=ignored-metaclass
"""Abstract placement strategy for distributed training.
Given a cluster of workers, the placement strategy determines which subgraph
each worker constructs.
"""
@property
def config(self):
"""Returns this strategy's configuration.
Returns:
The :class:`tf.estimator.RunConfig` instance that defines the cluster.
"""
return self._config
@config.setter
def config(self, config):
"""Configures the placement strategy with the given cluster description.
Args:
config: A :class:`tf.estimator.RunConfig` instance that defines the
cluster.
"""
self._config = config
@abc.abstractmethod
def should_build_ensemble(self, num_subnetworks):
"""Whether to build the ensemble on the current worker.
Args:
num_subnetworks: Integer number of subnetworks to train in the current
iteration.
Returns:
Boolean whether to build the ensemble on the current worker.
"""
@abc.abstractmethod
def should_build_subnetwork(self, num_subnetworks, subnetwork_index):
"""Whether to build the given subnetwork on the current worker.
Args:
num_subnetworks: Integer number of subnetworks to train in the current
iteration.
subnetwork_index: Integer index of the subnetwork in the list of the
current iteration's subnetworks.
Returns:
Boolean whether to build the given subnetwork on the current worker.
"""
@abc.abstractmethod
def should_train_subnetworks(self, num_subnetworks):
"""Whether to train subnetworks on the current worker.
Args:
num_subnetworks: Integer number of subnetworks to train in the current
iteration.
Returns:
Boolean whether to train subnetworks on the current worker.
"""
@abc.abstractmethod
@contextlib.contextmanager
def subnetwork_devices(self, num_subnetworks, subnetwork_index):
"""A context for assigning subnetwork ops to devices."""
class ReplicationStrategy(PlacementStrategy):
# pyformat: disable
"""A simple strategy that replicates the same graph on every worker.
This strategy does not scale well as the number of subnetworks and workers
increases. For :math:`m` workers, :math:`n` parameter servers, and :math:`k`
subnetworks, this strategy will scale with :math:`O(m)` training speedup,
:math:`O(m*n*k)` variable fetches from parameter servers, and :math:`O(k)`
memory required per worker. Additionally there will be :math:`O(m)` stale
gradients per subnetwork when training with asynchronous SGD.
Returns:
A :class:`ReplicationStrategy` instance for the current cluster.
"""
# pyformat: enable
def should_build_ensemble(self, num_subnetworks):
return True
def should_build_subnetwork(self, num_subnetworks, subnetwork_index):
return True
def should_train_subnetworks(self, num_subnetworks):
return True
@contextlib.contextmanager
def subnetwork_devices(self, num_subnetworks, subnetwork_index):
# Use default devices.
yield
class RoundRobinStrategy(PlacementStrategy):
# pyformat: disable
"""A strategy that round-robin assigns subgraphs to specific workers.
Specifically, it selects dedicated workers to only train ensemble variables,
and round-robin assigns subnetworks to dedicated subnetwork-training workers.
Unlike :class:`ReplicationStrategy`, this strategy scales better with the
number of subnetworks, workers, and parameter servers. For :math:`m` workers,
:math:`n` parameter servers, and :math:`k` subnetworks, this strategy will
scale with :math:`O(m/k)` training speedup, :math:`O(m*n/k)` variable fetches
from parameter servers, and :math:`O(1)` memory required per worker.
Additionally, there will only be :math:`O(m/k)` stale gradients per subnetwork
when training with asynchronous SGD, which reduces training instability versus
:class:`ReplicationStrategy`.
When there are more workers than subnetworks, this strategy assigns
subnetworks to workers modulo the number of subnetworks.
Conversely, when there are more subnetworks than workers, this round robin
assigns subnetworks modulo the number of workers. So certain workers may end
up training more than one subnetwork.
This strategy gracefully handles scenarios when the number of subnetworks
does not perfectly divide the number of workers and vice-versa. It also
supports different numbers of subnetworks at different iterations, and
reloading training with a resized cluster.
Args:
drop_remainder: Bool whether to drop remaining subnetworks that haven't been
assigned to a worker in the remainder after perfect division of workers by
the current iteration's num_subnetworks + 1. When :code:`True`, each subnetwork
worker will only train a single subnetwork, and subnetworks that have not
been assigned to assigned to a worker are dropped. NOTE: This can result
in subnetworks not being assigned to any worker when
num_workers < num_subnetworks + 1. When :code:`False`, remaining subnetworks
during the round-robin assignment will be placed on workers that already
have a subnetwork.
Returns:
A :class:`RoundRobinStrategy` instance for the current cluster.
"""
# pyformat: enable
# TODO: Allow user to disable ensemble workers. For example, when there
# are no ensemble variables to train, such as in a uniform average ensemble,
# there is no need for a non-chief to create the full ensemble during
# training, except for the chief to initialize the ensemble's non-trainable
# variables.
# TODO: Optional code organization suggestion:
# Explicitly define what a "task" is, to make the below code clearer. One way
# of doing this:
#
# def _worker_tasks(self, num_subnetworks):
# """Returns the set of tasks that this worker can work on.
#
# Each task is represented by an integer between 0 and num_subnetworks
# (inclusive). 0 corresponds to the task of training the ensemble(s), 1
# corresponds to the task of training subnetwork 0, 2 corresponds to the
# task of training subnetwork 1, and so on.
#
# Examples:
# - 1 worker, 3 subnetworks. This would return {0, 1, 2, 3} for the only
# worker, since the only worker would have to train the ensemble(s) and
# all 3 subnetworks.
# - 2 workers, 3 subnetworks. This would return {0} for worker 0, and
# {1, 2, 3} for worker 1. This means that the first worker trains the
# ensemble(s), while the second worker trains all three subnetworks.
# - 4 workers, 3 subnetworks. This would return {0} for worker 0, {1} for
# worker 1, {2} for worker 2, and {3} for worker 3. This means that
# worker 0 trains the ensemble(s) while the rest of the workers train
# one subnetwork each.
# - 5 workers, 3 subnetworks. This would return {0} for worker 0, {1} for
# worker 1, {2} for worker 2, {3} for worker 3, and {1} for worker 4.
# This is like the previous case, except that worker 4 also helps to
# train subnetwork 0.
# """
#
# That way, should_build_ensemble can just be:
#
# return 0 in self._worker_tasks(...)
#
# then should_build_subnetwork can just be:
#
# if (subnetwork_index in self._worker_tasks(...) or 0 in
# subnetwork_index in self._worker_tasks(...)):
# return True
# return False
#
# and should_train_subnetwork can just be:
#
# return subnetwork_index in self._worker_tasks(...)
def __init__(self, drop_remainder=False, dedicate_parameter_servers=True):
self._drop_remainder = drop_remainder
self._dedicate_parameter_servers = dedicate_parameter_servers
@property
def _num_workers(self):
return self.config.num_worker_replicas
@property
def _worker_index(self):
return self.config.global_id_in_cluster or 0
def _worker_task(self, num_subnetworks):
"""Returns the worker index modulo the number of subnetworks."""
if self._drop_remainder and self._num_workers > 1 and (num_subnetworks >
self._num_workers):
logging.log_first_n(
logging.WARNING,
"With drop_remainer=True, %s workers and %s subnetworks, the last %s "
"subnetworks will be dropped and will not be trained", 1,
self._num_workers, num_subnetworks,
num_subnetworks - self._num_workers - 1)
# The first worker will always build the ensemble so we add 1.
return self._worker_index % (num_subnetworks + 1)
def should_build_ensemble(self, num_subnetworks):
if num_subnetworks == 1:
return True
worker_task = self._worker_task(num_subnetworks)
# The ensemble builder is always the first worker task.
return worker_task == 0
def should_build_subnetwork(self, num_subnetworks, subnetwork_index):
if num_subnetworks == 1:
return True
worker_task = self._worker_task(num_subnetworks)
if worker_task == 0:
# The zeroth index worker is an ensemble worker.
return True
subnetwork_worker_index = worker_task - 1
if self._drop_remainder:
return subnetwork_worker_index == subnetwork_index
workers_per_subnetwork = self._num_workers // (num_subnetworks + 1)
if self._num_workers % (num_subnetworks + 1) == 0:
num_subnetwork_workers = num_subnetworks
elif self._worker_index >= workers_per_subnetwork * (num_subnetworks + 1):
num_subnetwork_workers = self._num_workers % (num_subnetworks + 1) - 1
else:
num_subnetwork_workers = num_subnetworks
return subnetwork_worker_index == subnetwork_index % num_subnetwork_workers
def should_train_subnetworks(self, num_subnetworks):
if num_subnetworks == 1 or self._num_workers == 1:
return True
return not self.should_build_ensemble(num_subnetworks)
@contextlib.contextmanager
def subnetwork_devices(self, num_subnetworks, subnetwork_index):
if not self._dedicate_parameter_servers:
# Use default device placement.
yield
return
# Each subnetwork gets its own dedicated parameter servers
num_ps_replicas = self.config.num_ps_replicas
ps_numbers = np.array(range(num_ps_replicas))
subnetwork_group = subnetwork_index
if num_ps_replicas > 0 and num_subnetworks > num_ps_replicas:
subnetwork_group = subnetwork_index % num_ps_replicas
ps_group = np.array_split(ps_numbers, num_subnetworks)[subnetwork_group]
# Assign ops to parameter servers based on hashed op names.
ps_strategy = _OpNameHashStrategy(len(ps_group))
def device_fn(op):
"""Assigns variables to a subnetwork's dedicated parameter servers."""
# Import here to avoid strict BUILD deps check.
from tensorflow.core.framework import node_def_pb2 # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
from tensorflow.python.training import device_setter # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
if num_ps_replicas > 0 and node_def.op in device_setter.STANDARD_PS_OPS:
# ps_group lists the task ids in the group. Adding the first task id in
# the group to the task number determined by the PS strategy gives the
# correct parameter server assignment.
return "/job:ps/task:{}".format(ps_group[0] + ps_strategy(op))
return op.device
with tf_compat.v1.device(device_fn):
yield
|
{
"content_hash": "18f52cb3750a9fbd58d7a9a354e192f7",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 124,
"avg_line_length": 38.925081433224754,
"alnum_prop": 0.7061924686192469,
"repo_name": "tensorflow/adanet",
"id": "88b6726c2d989860d22b46c6052282c4c31afc58",
"size": "12552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adanet/distributed/placement.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1914501"
},
{
"name": "Python",
"bytes": "1047162"
},
{
"name": "Shell",
"bytes": "2927"
},
{
"name": "Starlark",
"bytes": "28690"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(name='jstestnet',
version='0.1',
description="""JS TestNet is a Django_ web service that coordinates the
execution of JavaScript tests across web browsers.""",
long_description="",
author='Kumar McMillan',
author_email='kumar.mcmillan@gmail.com',
license="Apache License",
packages=find_packages(exclude=['ez_setup']),
install_requires=[], # see requirements.txt
tests_require=[], # see tox.ini
# url='',
)
|
{
"content_hash": "28458d404410bc880c8e057b0a99f1da",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 77,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.6256880733944954,
"repo_name": "kumar303/jstestnet",
"id": "30efce283dc53c25276cb477bd686120e67af833",
"size": "546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2073"
},
{
"name": "JavaScript",
"bytes": "83985"
},
{
"name": "Python",
"bytes": "138334"
}
],
"symlink_target": ""
}
|
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str, snapshot_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, snapshot_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(resource_group_name: str, snapshot_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, snapshot_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs)
def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_grant_access_request(
resource_group_name: str, snapshot_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_revoke_access_request(
resource_group_name: str, snapshot_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
return HttpRequest(method="POST", url=_url, params=_params, **kwargs)
class SnapshotsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2021_08_01.ComputeManagementClient`'s
:attr:`snapshots` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _create_or_update_initial(
self, resource_group_name: str, snapshot_name: str, snapshot: Union[_models.Snapshot, IO], **kwargs: Any
) -> _models.Snapshot:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Snapshot]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(snapshot, (IO, bytes)):
_content = snapshot
else:
_json = self._serialize.body(snapshot, "Snapshot")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Snapshot", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("Snapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}"} # type: ignore
@overload
def begin_create_or_update(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: _models.Snapshot,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Snapshot]:
"""Creates or updates a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters. Required.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Put disk operation. Required.
:type snapshot: ~azure.mgmt.compute.v2021_08_01.models.Snapshot
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Snapshot or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_08_01.models.Snapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Snapshot]:
"""Creates or updates a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters. Required.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Put disk operation. Required.
:type snapshot: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Snapshot or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_08_01.models.Snapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self, resource_group_name: str, snapshot_name: str, snapshot: Union[_models.Snapshot, IO], **kwargs: Any
) -> LROPoller[_models.Snapshot]:
"""Creates or updates a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters. Required.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Put disk operation. Is either a
model type or a IO type. Required.
:type snapshot: ~azure.mgmt.compute.v2021_08_01.models.Snapshot or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Snapshot or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_08_01.models.Snapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Snapshot]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Snapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}"} # type: ignore
def _update_initial(
self, resource_group_name: str, snapshot_name: str, snapshot: Union[_models.SnapshotUpdate, IO], **kwargs: Any
) -> _models.Snapshot:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Snapshot]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(snapshot, (IO, bytes)):
_content = snapshot
else:
_json = self._serialize.body(snapshot, "SnapshotUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Snapshot", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("Snapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}"} # type: ignore
@overload
def begin_update(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: _models.SnapshotUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Snapshot]:
"""Updates (patches) a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters. Required.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Patch snapshot operation.
Required.
:type snapshot: ~azure.mgmt.compute.v2021_08_01.models.SnapshotUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Snapshot or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_08_01.models.Snapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Snapshot]:
"""Updates (patches) a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters. Required.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Patch snapshot operation.
Required.
:type snapshot: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Snapshot or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_08_01.models.Snapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self, resource_group_name: str, snapshot_name: str, snapshot: Union[_models.SnapshotUpdate, IO], **kwargs: Any
) -> LROPoller[_models.Snapshot]:
"""Updates (patches) a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters. Required.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Patch snapshot operation. Is
either a model type or a IO type. Required.
:type snapshot: ~azure.mgmt.compute.v2021_08_01.models.SnapshotUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Snapshot or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_08_01.models.Snapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Snapshot]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial( # type: ignore
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Snapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}"} # type: ignore
@distributed_trace
def get(self, resource_group_name: str, snapshot_name: str, **kwargs: Any) -> _models.Snapshot:
"""Gets information about a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters. Required.
:type snapshot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_08_01.models.Snapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.Snapshot]
request = build_get_request(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Snapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, snapshot_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}"} # type: ignore
@distributed_trace
def begin_delete(self, resource_group_name: str, snapshot_name: str, **kwargs: Any) -> LROPoller[None]:
"""Deletes a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters. Required.
:type snapshot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}"} # type: ignore
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.Snapshot"]:
"""Lists snapshots under a resource group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Snapshot or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_08_01.models.Snapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.SnapshotList]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SnapshotList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots"} # type: ignore
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Snapshot"]:
"""Lists snapshots under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Snapshot or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_08_01.models.Snapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.SnapshotList]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SnapshotList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots"} # type: ignore
def _grant_access_initial(
self,
resource_group_name: str,
snapshot_name: str,
grant_access_data: Union[_models.GrantAccessData, IO],
**kwargs: Any
) -> Optional[_models.AccessUri]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.AccessUri]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(grant_access_data, (IO, bytes)):
_content = grant_access_data
else:
_json = self._serialize.body(grant_access_data, "GrantAccessData")
request = build_grant_access_request(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._grant_access_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("AccessUri", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_grant_access_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess"} # type: ignore
@overload
def begin_grant_access(
self,
resource_group_name: str,
snapshot_name: str,
grant_access_data: _models.GrantAccessData,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.AccessUri]:
"""Grants access to a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters. Required.
:type snapshot_name: str
:param grant_access_data: Access data object supplied in the body of the get snapshot access
operation. Required.
:type grant_access_data: ~azure.mgmt.compute.v2021_08_01.models.GrantAccessData
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AccessUri or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_08_01.models.AccessUri]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_grant_access(
self,
resource_group_name: str,
snapshot_name: str,
grant_access_data: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.AccessUri]:
"""Grants access to a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters. Required.
:type snapshot_name: str
:param grant_access_data: Access data object supplied in the body of the get snapshot access
operation. Required.
:type grant_access_data: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AccessUri or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_08_01.models.AccessUri]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_grant_access(
self,
resource_group_name: str,
snapshot_name: str,
grant_access_data: Union[_models.GrantAccessData, IO],
**kwargs: Any
) -> LROPoller[_models.AccessUri]:
"""Grants access to a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters. Required.
:type snapshot_name: str
:param grant_access_data: Access data object supplied in the body of the get snapshot access
operation. Is either a model type or a IO type. Required.
:type grant_access_data: ~azure.mgmt.compute.v2021_08_01.models.GrantAccessData or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AccessUri or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_08_01.models.AccessUri]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AccessUri]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._grant_access_initial( # type: ignore
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
grant_access_data=grant_access_data,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("AccessUri", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_grant_access.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess"} # type: ignore
def _revoke_access_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, snapshot_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_revoke_access_request(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._revoke_access_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revoke_access_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess"} # type: ignore
@distributed_trace
def begin_revoke_access(self, resource_group_name: str, snapshot_name: str, **kwargs: Any) -> LROPoller[None]:
"""Revokes access to a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters. Required.
:type snapshot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._revoke_access_initial( # type: ignore
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revoke_access.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess"} # type: ignore
|
{
"content_hash": "68a21feaa242768c1fcf024647b8a809",
"timestamp": "",
"source": "github",
"line_count": 1368,
"max_line_length": 198,
"avg_line_length": 48.15423976608187,
"alnum_prop": 0.641381404174573,
"repo_name": "Azure/azure-sdk-for-python",
"id": "66ec55fbec264746397bc3c7f605b140a6b51890",
"size": "66375",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_08_01/operations/_snapshots_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from partial.validate.validator import Validator
class ParseBool(Validator):
"""
Parse the value, using parser.parseInt
"""
def transform(self, value):
return value not in ("false", "False", "0")
|
{
"content_hash": "18236a9c27076654e059b374d3276994",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 51,
"avg_line_length": 22.6,
"alnum_prop": 0.6460176991150443,
"repo_name": "RentennaDev/partial",
"id": "735f8354750519a34a3dd194a6d4e8eb35e5fcf2",
"size": "226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "partial/validate/parseBool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "129"
},
{
"name": "Python",
"bytes": "25963"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.