text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import os
import shutil
import addSubproject
import option
import utility
import grapeGit as git
import grapeConfig
import grapeMenu
import checkout
# update your custom sparse checkout view
class UpdateView(option.Option):
"""
grape uv - Updates your active submodules and ensures you are on a consistent branch throughout your project.
Usage: grape-uv [-f ] [--checkSubprojects] [-b] [--skipSubmodules] [--allSubmodules]
[--skipNestedSubprojects] [--allNestedSubprojects] [--sync=<bool>]
[--add=<addedSubmoduleOrSubproject>...] [--rm=<removedSubmoduleOrSubproject>...]
Options:
-f Force removal of subprojects currently in your view that are taken out of the view as a
result to this call to uv.
--checkSubprojects Checks for branch model consistency across your submodules and subprojects, but does
not go through the 'which submodules do you want' script.
-b Automatically creates subproject branches that should be there according to your branching
model.
--allSubmodules Automatically add all submodules to your workspace.
--allNestedSubprojects Automatically add all nested subprojects to your workspace.
--sync=<bool> Take extra steps to ensure the branch you're on is up to date with origin,
either by pushing or pulling the remote tracking branch.
This will also checkout the public branch in a headless state prior to offering to create
a new branch (in repositories where the current branch does not exist).
[default: .grapeconfig.post-checkout.syncWithOrigin]
--add=<project> Submodule or subproject to add to the workspace. Can be defined multiple times.
--remove=<project> Submodule or subproject to remove from the workspace. Can be defined multiple times.
"""
def __init__(self):
super(UpdateView, self).__init__()
self._key = "uv"
self._section = "Workspace"
self._pushBranch = False
self._skipPush = False
def description(self):
return "Update the view of your current working tree"
@staticmethod
def defineActiveSubmodules(projectType="submodule"):
"""
Queries the user for the submodules (projectType == "submodule") or nested subprojects
(projectType == "nested subproject") they would like to activate.
"""
if projectType == "submodule":
allSubprojects = git.getAllSubmodules()
activeSubprojects = git.getActiveSubmodules()
if projectType == "nested subproject":
config = grapeConfig.grapeConfig()
allSubprojectNames = config.getAllNestedSubprojects()
allSubprojects = []
for project in allSubprojectNames:
allSubprojects.append(config.get("nested-%s" % project, "prefix"))
activeSubprojects = grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes()
toplevelDirs = {}
toplevelActiveDirs = {}
toplevelSubs = []
for sub in allSubprojects:
# we are taking advantage of the fact that branchPrefixes are the same as directory prefixes for local
# top-level dirs.
prefix = git.branchPrefix(sub)
if sub != prefix:
toplevelDirs[prefix] = []
toplevelActiveDirs[prefix] = []
for sub in allSubprojects:
prefix = git.branchPrefix(sub)
if sub != prefix:
toplevelDirs[prefix].append(sub)
else:
toplevelSubs.append(sub)
for sub in activeSubprojects:
prefix = git.branchPrefix(sub)
if sub != prefix:
toplevelActiveDirs[prefix].append(sub)
included = {}
for directory, subprojects in toplevelDirs.items():
activeDir = toplevelActiveDirs[directory]
if len(activeDir) == 0:
defaultValue = "none"
elif set(activeDir) == set(subprojects):
defaultValue = "all"
else:
defaultValue = "some"
opt = utility.userInput("Would you like all, some, or none of the %ss in %s?" % (projectType,directory),
default=defaultValue)
if opt.lower()[0] == "a":
for subproject in subprojects:
included[subproject] = True
if opt.lower()[0] == "n":
for subproject in subprojects:
included[subproject] = False
if opt.lower()[0] == "s":
for subproject in subprojects:
included[subproject] = utility.userInput("Would you like %s %s? [y/n]" % (projectType, subproject),
'y' if (subproject in activeSubprojects) else 'n')
for subproject in toplevelSubs:
included[subproject] = utility.userInput("Would you like %s %s? [y/n]" % (projectType, subproject),
'y' if (subproject in activeSubprojects) else 'n')
return included
@staticmethod
def defineActiveNestedSubprojects():
"""
Queries the user for the nested subprojects they would like to activate.
"""
return UpdateView.defineActiveSubmodules(projectType="nested subproject")
def execute(self, args):
sync = args["--sync"].lower().strip()
sync = sync == "true" or sync == "yes"
args["--sync"] = sync
config = grapeConfig.grapeConfig()
origwd = os.getcwd()
wsDir = utility.workspaceDir()
os.chdir(wsDir)
base = git.baseDir()
if base == "":
return False
hasSubmodules = len(git.getAllSubmodules()) > 0 and not args["--skipSubmodules"]
includedSubmodules = {}
includedNestedSubprojectPrefixes = {}
allSubmodules = git.getAllSubmodules()
allNestedSubprojects = config.getAllNestedSubprojects()
addedSubmodules = []
addedNestedSubprojects = []
addedProjects = args["--add"]
notFound = []
for proj in addedProjects:
if proj in allSubmodules:
addedSubmodules.append(proj)
elif proj in allNestedSubprojects:
addedNestedSubprojects.append(proj)
else:
notFound.append(proj)
rmSubmodules = []
rmNestedSubprojects = []
rmProjects = args["--rm"]
for proj in rmProjects:
if proj in allSubmodules:
rmSubmodules.append(proj)
elif proj in allNestedSubprojects:
rmNestedSubprojects.append(proj)
else:
notFound.append(proj)
if notFound:
utility.printMsg("\"%s\" not found in submodules %s \nor\n nested subprojects %s" % (",".join(notFound),",".join(allSubmodules),",".join(allNestedSubprojects)))
return False
if not args["--checkSubprojects"]:
# get submodules to update
if hasSubmodules:
if args["--allSubmodules"]:
includedSubmodules = {sub:True for sub in allSubmodules}
elif args["--add"] or args["--rm"]:
includedSubmodules = {sub:True for sub in git.getActiveSubmodules()}
includedSubmodules.update({sub:True for sub in addedSubmodules})
includedSubmodules.update({sub:False for sub in rmSubmodules})
else:
includedSubmodules = self.defineActiveSubmodules()
# get subprojects to update
if not args["--skipNestedSubprojects"]:
nestedPrefixLookup = lambda x : config.get("nested-%s" % x, "prefix")
if args["--allNestedSubprojects"]:
includedNestedSubprojectPrefixes = {nestedPrefixLookup(sub):True for sub in allNestedSubprojects}
elif args["--add"] or args["--rm"]:
includedNestedSubprojectPrefixes = {sub:True for sub in grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes()}
includedNestedSubprojectPrefixes.update({nestedPrefixLookup(sub):True for sub in addedNestedSubprojects})
includedNestedSubprojectPrefixes.update({nestedPrefixLookup(sub):False for sub in rmNestedSubprojects})
else:
includedNestedSubprojectPrefixes = self.defineActiveNestedSubprojects()
if hasSubmodules:
initStr = ""
deinitStr = ""
rmCachedStr = ""
resetStr = ""
for submodule, nowActive in includedSubmodules.items():
if nowActive:
initStr += ' %s' % submodule
else:
deinitStr += ' %s' % submodule
rmCachedStr += ' %s' % submodule
resetStr += ' %s' % submodule
if args["-f"] and deinitStr:
deinitStr = "-f"+deinitStr
utility.printMsg("Configuring submodules...")
utility.printMsg("Initializing submodules...")
git.submodule("init %s" % initStr.strip())
if deinitStr:
utility.printMsg("Deiniting submodules that were not requested... (%s)" % deinitStr)
done = False
while not done:
try:
git.submodule("deinit %s" % deinitStr.strip())
done = True
except git.GrapeGitError as e:
if "the following file has local modifications" in e.gitOutput:
print e.gitOutput
utility.printMsg("A submodule that you wanted to remove has local modifications. "
"Use grape uv -f to force removal.")
return False
elif "use 'rm -rf' if you really want to remove it including all of its history" in e.gitOutput:
if not args["-f"]:
raise e
# it is safe to move the .git of the submodule to the .git/modules area of the workspace...
module = None
for l in e.gitOutput.split('\n'):
if "Submodule work tree" in l and "contains a .git directory" in l:
module = l.split("'")[1]
break
if module:
src = os.path.join(module, ".git")
dest = os.path.join(wsDir, ".git", "modules", module)
utility.printMsg("Moving %s to %s"%(src, dest))
shutil.move(src, dest )
else:
raise e
else:
raise e
git.rm("--cached %s" % rmCachedStr)
git.reset(" %s" % resetStr)
if initStr:
utility.printMsg("Updating active submodules...(%s)" % initStr)
git.submodule("update")
# handle nested subprojects
if not args["--skipNestedSubprojects"]:
reverseLookupByPrefix = {nestedPrefixLookup(sub) : sub for sub in allNestedSubprojects}
userConfig = grapeConfig.grapeUserConfig()
updatedActiveList = []
for subproject, nowActive in includedNestedSubprojectPrefixes.items():
subprojectName = reverseLookupByPrefix[subproject]
section = "nested-%s" % reverseLookupByPrefix[subproject]
userConfig.ensureSection(section)
previouslyActive = userConfig.getboolean(section, "active")
previouslyActive = previouslyActive and os.path.exists(os.path.join(base, subproject, ".git"))
userConfig.set(section, "active", "True" if previouslyActive else "False")
if nowActive and previouslyActive:
updatedActiveList.append(subprojectName)
if nowActive and not previouslyActive:
utility.printMsg("Activating Nested Subproject %s" % subproject)
if not addSubproject.AddSubproject.activateNestedSubproject(subprojectName, userConfig):
utility.printMsg("Can't activate %s. Exiting..." % subprojectName)
return False
updatedActiveList.append(subprojectName)
if not nowActive and not previouslyActive:
pass
if not nowActive and previouslyActive:
#remove the subproject
subprojectdir = os.path.join(base, utility.makePathPortable(subproject))
proceed = args["-f"] or \
utility.userInput("About to delete all contents in %s. Any uncommitted changes, committed changes "
"that have not been pushed, or ignored files will be lost. Proceed?" %
subproject, 'n')
if proceed:
shutil.rmtree(subprojectdir)
userConfig.setActiveNestedSubprojects(updatedActiveList)
grapeConfig.writeConfig(userConfig, os.path.join(utility.workspaceDir(), ".git", ".grapeuserconfig"))
checkoutArgs = "-b" if args["-b"] else ""
safeSwitchWorkspaceToBranch( git.currentBranch(), checkoutArgs, sync)
os.chdir(origwd)
return True
@staticmethod
def getDesiredSubmoduleBranch(config):
publicBranches = config.getPublicBranchList()
currentBranch = git.currentBranch()
if currentBranch in publicBranches:
desiredSubmoduleBranch = config.getMapping("workspace", "submodulepublicmappings")[currentBranch]
else:
desiredSubmoduleBranch = currentBranch
return desiredSubmoduleBranch
def setDefaultConfig(self, config):
config.ensureSection("workspace")
config.set("workspace", "submodulepublicmappings", "?:master")
def ensureLocalUpToDateWithRemote(repo = '', branch = 'master'):
utility.printMsg( "Ensuring local branch %s in %s is up to date with origin" % (branch, repo))
with utility.cd(repo):
# attempt to fetch the requested branch
try:
git.fetch("origin", "%s:%s" % (branch, branch))
except:
# the branch may not exist, but this is ok
pass
if git.currentBranch() == branch:
return
if not git.hasBranch(branch):
# switch to corresponding public branch if the branch does not exist
public = grapeConfig.workspaceConfig().getPublicBranchFor(branch)
# figure out if this is a submodule
relpath = os.path.relpath(repo, utility.workspaceDir())
relpath = relpath.replace('\\',"/")
with utility.cd(utility.workspaceDir()):
# if this is a submodule, get the appropriate public mapping
if relpath in git.getAllSubmoduleURLMap().keys():
public = grapeConfig.workspaceConfig().getMapping("workspace", "submodulepublicmappings")[public]
utility.printMsg("Branch %s does not exist in %s, switching to %s and detaching" % (branch, repo, public))
git.checkout(public)
git.pull("origin %s" % (public))
git.checkout("--detach HEAD")
def cleanupPush(repo='', branch='', args='none'):
with utility.cd(repo):
utility.printMsg("Attempting push of local %s in %s" % (branch, repo))
git.push("origin %s" % branch)
def handleCleanupPushMRE(mre):
for e, repo, branch in zip(mre.exceptions(), mre.repos(), mre.branches()):
try:
raise e
except git.GrapeGitError as e2:
utility.printMsg("Local and remote versions of %s may have diverged in %s" % (branch, repo))
utility.printMsg("%s" % e2.gitOutput)
utility.printMsg("Use grape pull to merge the remote version into the local version.")
def handleEnsureLocalUpToDateMRE(mre):
_pushBranch = False
_skipPush = False
cleanupPushArgs = []
for e1, repo, branch in zip(mre.exceptions(), mre.repos(), mre.branches()):
try:
raise e1
except git.GrapeGitError as e:
if ("[rejected]" in e.gitOutput and "(non-fast-forward)" in e.gitOutput) or "Couldn't find remote ref" in e.gitOutput:
if "Couldn't find remote ref" in e.gitOutput:
if not _pushBranch:
utility.printMsg("No remote reference to %s in %s's origin. You may want to push this branch." % (branch, repo))
else:
utility.printMsg("Fetch of %s rejected as non-fast-forward in repo %s" % (branch, repo))
pushBranch = _pushBranch
if _skipPush:
pushBranch = False
elif not pushBranch:
pushBranch = utility.userInput("Would you like to push your local branch? \n"
"(select 'a' to say yes for (a)ll subprojects, 's' to (s)kip push for all subprojects)"
"\n(y,n,a,s)", 'y')
if str(pushBranch).lower()[0] == 'a':
_pushBranch = True
pushBranch = True
if str(pushBranch).lower()[0] == 's':
_skipPush = True
pushBranch = False
if pushBranch:
cleanupPushArgs.append((repo, branch, None))
else:
utility.printMsg("Skipping push of local %s in %s" % (branch, repo))
elif e.commError:
utility.printMsg("Could not update %s from origin due to a connectivity issue. Checking out most recent\n"
"local version. " % branch)
else:
raise(e)
# do another MRC launch to do any follow up pushes that were requested.
utility.MultiRepoCommandLauncher(cleanupPush, listOfRepoBranchArgTuples=cleanupPushArgs).launchFromWorkspaceDir(handleMRE=handleCleanupPushMRE)
return
def safeSwitchWorkspaceToBranch(branch, checkoutArgs, sync):
# Ensure local branches that you are about to check out are up to date with the remote
if sync:
launcher = utility.MultiRepoCommandLauncher(ensureLocalUpToDateWithRemote, branch = branch, globalArgs=[checkoutArgs])
launcher.launchFromWorkspaceDir(handleMRE=handleEnsureLocalUpToDateMRE)
# Do a checkout
# Pass False instead of sync since if sync is True ensureLocalUpToDateWithRemote will have already performed the fetch
launcher = utility.MultiRepoCommandLauncher(checkout.handledCheckout, branch = branch, globalArgs = [checkoutArgs, False])
launcher.launchFromWorkspaceDir(handleMRE=checkout.handleCheckoutMRE)
return
|
{
"content_hash": "e1295e1479ef3c61b8e98fed394a1cdb",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 172,
"avg_line_length": 48.75961538461539,
"alnum_prop": 0.5505817393019128,
"repo_name": "robinson96/GRAPE",
"id": "1a8b79a1646af2489018f8d276e2951d30a40f21",
"size": "20286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vine/updateView.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5564"
},
{
"name": "Python",
"bytes": "1374602"
},
{
"name": "Shell",
"bytes": "24573"
}
],
"symlink_target": ""
}
|
"""
This package is dedicated to AXI Load/Store Units (LSU) a others like.
Other LSU implementations:
* https://github.com/riscv-boom/riscv-boom - has RISC-V LSU in chisel3, read kill, MSHRs
* https://github.com/rsd-devel/rsd - has N issue RISC-V LSU
* https://github.com/bluespec/Toooba - has RISC-V LSU in BlueSpecVerilog
* https://github.com/openhwgroup/cv32e40p - has RISC-V LSU for in order pipeline
"""
|
{
"content_hash": "8a7467554a9ba9b958a48848db9d0166",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 88,
"avg_line_length": 41,
"alnum_prop": 0.7390243902439024,
"repo_name": "Nic30/hwtLib",
"id": "119f325211db9b4a3a85180561aa7eb5609cfd61",
"size": "410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hwtLib/amba/axi_comp/lsu/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "41560"
},
{
"name": "Python",
"bytes": "2523349"
},
{
"name": "VHDL",
"bytes": "117346"
},
{
"name": "Verilog",
"bytes": "36444"
}
],
"symlink_target": ""
}
|
'''
Some simple tests to check that the framework is properly wrapped.
'''
import objc
import unittest
import QuickLookUI
class TestQuickLookUI (unittest.TestCase):
def testClasses(self):
pass
# self.assert_( hasattr(QuickLookUI, 'CLASSNAME') )
# self.assert_( isinstance(QuickLookUI.CLASSNAME, objc.objc_class) )
# Tollfree CF-type:
# self.assert_( hasattr(QuickLookUI, 'CLASSNAMERef') )
# self.assert_( QuickLookUI.CLASSNAMERef is QuickLookUI.CLASSNAME )
# Not-tollfree CF-type:
# self.assert_( hasattr(QuickLookUI, 'CLASSNAMERef') )
# self.assert_( issubclass(QuickLookUI.CLASSNAMERef, objc.lookUpClass('NSCFType')) )
# self.assert_( QuickLookUI.CLASSNAMERef is not objc.lookUpClass('NSCFType') )
def testValues(self):
# Use this to test for a number of enum and #define values
pass
# Integer values:
# self.assert_( hasattr(QuickLookUI, 'CONSTANT') )
# self.assert_( isinstance(QuickLookUI.CONSTANT, (int, long)) )
# self.assertEquals(QuickLookUI.CONSTANT, 7)
# String values:
# self.assert_( hasattr(QuickLookUI, 'CONSTANT') )
# self.assert_( isinstance(QuickLookUI.CONSTANT, (str, unicode)) )
# self.assertEquals(QuickLookUI.CONSTANT, 'value')
def testVariables(self):
# Use this to test for global variables, (NSString*'s and the like)
pass
# self.assert_( hasattr(QuickLookUI, 'CONSTANT') )
# self.assert_( isinstance(QuickLookUI.CONSTANT, unicode) )
def testFunctions(self):
# Use this to test for functions
pass
# self.assert_( hasattr(QuickLookUI, 'FUNCTION') )
def testOpaque(self):
# Use this to test for opaque pointers
pass
# self.assert_( hasattr(QuickLookUI, 'OPAQUE') )
def testProtocols(self):
# Use this to test if informal protocols are present
pass
# self.assert_( hasattr(QuickLookUI, 'protocols') )
# self.assert_( hasattr(QuickLookUI.protocols, 'PROTOCOL') )
# self.assert_( isinstance(QuickLookUI.protocols.PROTOCOL, objc.informal_protocol) )
def test_structs(self):
# Use this to test struct wrappers
pass
# self.assert_( hasattr(QuickLookUI, 'STRUCT') )
# o = QuickLookUI.STRUCT()
# self.assert_( hasattr(o, 'FIELD_NAME') )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "7c608bb64651cc82328e40f67282042c",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 92,
"avg_line_length": 32.89333333333333,
"alnum_prop": 0.6331576813944062,
"repo_name": "albertz/music-player",
"id": "09c557b9e14581ed336004686ba0e16785df1bdc",
"size": "2467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mac/pyobjc-framework-Quartz/Lib/Quartz/QuickLookUI/test/test_quicklookui.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "47481"
},
{
"name": "C",
"bytes": "435926"
},
{
"name": "C++",
"bytes": "149133"
},
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "HTML",
"bytes": "914432"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "M",
"bytes": "10808"
},
{
"name": "Makefile",
"bytes": "13304"
},
{
"name": "Mathematica",
"bytes": "61418"
},
{
"name": "Objective-C",
"bytes": "2082720"
},
{
"name": "Objective-C++",
"bytes": "62427"
},
{
"name": "PostScript",
"bytes": "2783"
},
{
"name": "Prolog",
"bytes": "217"
},
{
"name": "Python",
"bytes": "7789845"
},
{
"name": "QMake",
"bytes": "9667"
},
{
"name": "Roff",
"bytes": "8329"
},
{
"name": "Shell",
"bytes": "3521"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
from tqdm import tqdm
import jsonlines
DOC_STRIDE = 2048
MAX_LENGTH = 4096
SEED = 42
PROCESS_TRAIN = os.environ.pop("PROCESS_TRAIN", "false")
CATEGORY_MAPPING = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def _get_single_answer(example):
def choose_first(answer, is_long_answer=False):
assert isinstance(answer, list)
if len(answer) == 1:
answer = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
a = {k: [a[k]] for k in a}
if len(a["start_token"]) > 0:
break
return a
answer = {"id": example["id"]}
annotation = example["annotations"]
yes_no_answer = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
answer["category"] = ["yes"] if 1 in yes_no_answer else ["no"]
answer["start_token"] = answer["end_token"] = []
answer["start_byte"] = answer["end_byte"] = []
answer["text"] = ["<cls>"]
else:
answer["category"] = ["short"]
out = choose_first(annotation["short_answers"])
if len(out["start_token"]) == 0:
# answer will be long if short is not available
answer["category"] = ["long"]
out = choose_first(annotation["long_answer"], is_long_answer=True)
out["text"] = []
answer.update(out)
# disregard some samples
if len(answer["start_token"]) > 1 or answer["start_token"] == answer["end_token"]:
answer["remove_it"] = True
else:
answer["remove_it"] = False
cols = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all([isinstance(answer[k], list) for k in cols]):
raise ValueError("Issue in ID", example["id"])
return answer
def get_context_and_ans(example, assertion=False):
"""Gives new context after removing <html> & new answer tokens as per new context"""
answer = _get_single_answer(example)
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
doc = example["document"]["tokens"]
context = []
for i in range(len(doc["token"])):
if not doc["is_html"][i]:
context.append(doc["token"][i])
return {
"context": " ".join(context),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
cols = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k]) > 0 else answer[k] for k in cols}) # e.g. [10] == 10
doc = example["document"]["tokens"]
start_token = answer["start_token"]
end_token = answer["end_token"]
context = []
for i in range(len(doc["token"])):
if not doc["is_html"][i]:
context.append(doc["token"][i])
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
new = " ".join(context[start_token:end_token])
# checking above code
if assertion:
"""checking if above code is working as expected for all the samples"""
is_html = doc["is_html"][answer["start_token"] : answer["end_token"]]
old = doc["token"][answer["start_token"] : answer["end_token"]]
old = " ".join([old[i] for i in range(len(old)) if not is_html[i]])
if new != old:
print("ID:", example["id"])
print("New:", new, end="\n")
print("Old:", old, end="\n\n")
return {
"context": " ".join(context),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def get_strided_contexts_and_ans(example, tokenizer, doc_stride=2048, max_length=4096, assertion=True):
# overlap will be of doc_stride - q_len
out = get_context_and_ans(example, assertion=assertion)
answer = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
input_ids = tokenizer(example["question"]["text"], out["context"]).input_ids
q_len = input_ids.index(tokenizer.sep_token_id) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
inputs = []
category = []
q_indices = input_ids[:q_len]
doc_start_indices = range(q_len, len(input_ids), max_length - doc_stride)
for i in doc_start_indices:
end_index = i + max_length - q_len
slice = input_ids[i:end_index]
inputs.append(q_indices + slice)
category.append(answer["category"][0])
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(category),
"end_token": [-100] * len(category),
"category": category,
},
}
splitted_context = out["context"].split()
complete_end_token = splitted_context[answer["end_token"]]
answer["start_token"] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]]),
add_special_tokens=False,
).input_ids
)
answer["end_token"] = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]]), add_special_tokens=False).input_ids
)
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
num_sub_tokens = len(tokenizer(complete_end_token, add_special_tokens=False).input_ids)
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
old = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
start_token = answer["start_token"]
end_token = answer["end_token"]
if assertion:
"""This won't match exactly because of extra gaps => visaully inspect everything"""
new = tokenizer.decode(old)
if answer["span"] != new:
print("ISSUE IN TOKENIZATION")
print("OLD:", answer["span"])
print("NEW:", new, end="\n\n")
if len(input_ids) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
q_indices = input_ids[:q_len]
doc_start_indices = range(q_len, len(input_ids), max_length - doc_stride)
inputs = []
answers_start_token = []
answers_end_token = []
answers_category = [] # null, yes, no, long, short
for i in doc_start_indices:
end_index = i + max_length - q_len
slice = input_ids[i:end_index]
inputs.append(q_indices + slice)
assert len(inputs[-1]) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
start_token = start_token - i + q_len
end_token = end_token - i + q_len
answers_category.append(answer["category"][0]) # ["short"] -> "short"
else:
start_token = -100
end_token = -100
answers_category.append("null")
new = inputs[-1][start_token : end_token + 1]
answers_start_token.append(start_token)
answers_end_token.append(end_token)
if assertion:
"""checking if above code is working as expected for all the samples"""
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:", example["id"])
print("New:", tokenizer.decode(new))
print("Old:", tokenizer.decode(old), end="\n\n")
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def prepare_inputs(example, tokenizer, doc_stride=2048, max_length=4096, assertion=False):
example = get_strided_contexts_and_ans(
example,
tokenizer,
doc_stride=doc_stride,
max_length=max_length,
assertion=assertion,
)
return example
def save_to_disk(hf_data, file_name):
with jsonlines.open(file_name, "a") as writer:
for example in tqdm(hf_data, total=len(hf_data), desc="Saving samples ... "):
labels = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"],
labels["start_token"],
labels["end_token"],
labels["category"],
):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
}
)
if __name__ == "__main__":
"""Running area"""
from datasets import load_dataset
from transformers import BigBirdTokenizer
data = load_dataset("natural_questions")
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
data = data["train" if PROCESS_TRAIN == "true" else "validation"]
fn_kwargs = dict(
tokenizer=tokenizer,
doc_stride=DOC_STRIDE,
max_length=MAX_LENGTH,
assertion=False,
)
data = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
data = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
cache_file_name = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
|
{
"content_hash": "55467e92b9eb7441ac2a5ff0f8ec40c0",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 106,
"avg_line_length": 34.28484848484848,
"alnum_prop": 0.5319073713982676,
"repo_name": "huggingface/transformers",
"id": "8d2f69031e2ab4c98e780eda85983466b17b23c3",
"size": "11314",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/research_projects/jax-projects/big_bird/prepare_natural_questions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
}
|
__author__ = 'Sean Lip'
import test_utils
class ReaderTest(test_utils.AppEngineTestBase):
def testReaderPage(self):
"""Test the reader exploration page."""
# TODO(sll): Write tests here.
pass
|
{
"content_hash": "74c1dd62f52bfc92cc9b83c3da119348",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 47,
"avg_line_length": 20.363636363636363,
"alnum_prop": 0.6383928571428571,
"repo_name": "sunu/oppia-test-2",
"id": "e5e47ff41f5f77c482cb7d83b902b60f4633e50b",
"size": "822",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "controllers/reader_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "5996391"
},
{
"name": "Python",
"bytes": "204026"
},
{
"name": "Shell",
"bytes": "16814"
}
],
"symlink_target": ""
}
|
try:
# Python 2.x
from UserDict import UserDict
except ImportError:
# Python 3.x
from collections import UserDict
import pickle
from axelrod import Player
class DeterministicCache(UserDict):
"""A class to cache the results of deterministic matches
For fixed length matches with no noise between pairs of deterministic
players, the results will always be the same. We can hold those results
in this class so as to avoid repeatedly generating them in tournaments
of multiple repetitions.
By also storing those cached results in a file, we can re-use the cache
between multiple tournaments if necessary.
The cache is a dictionary mapping pairs of Player classes to a list of
resulting interactions. e.g. for a 3 turn Match between Cooperator and
Alternator, the dictionary entry would be:
(axelrod.Cooperator, axelrod.Alternator): [('C', 'C'), ('C', 'D'), ('C', 'C')]
Most of the functionality is provided by the UserDict class (which uses an
instance of dict as the 'data' attribute to hold the dictionary entries).
This class overrides the __init__ and __setitem__ methods in order to limit
and validate the keys and values to be as described above. It also adds
methods to save/load the cache to/from a file.
"""
def __init__(self, file_name=None):
"""
Parameters
----------
file_name : string
Path to a previously saved cache file
"""
UserDict.__init__(self)
self.mutable = True
if file_name is not None:
self.load(file_name)
def __setitem__(self, key, value):
"""Overrides the UserDict.__setitem__ method in order to validate
the key/value and also to set the turns attribute"""
if not self.mutable:
raise ValueError('Cannot update cache unless mutable is True.')
if not self._is_valid_key(key):
raise ValueError(
'Key must be a tuple of 2 deterministic axelrod Player classes and an integer')
if not self._is_valid_value(value):
raise ValueError(
'Value must be a list with length equal to turns attribute')
UserDict.__setitem__(self, key, value)
def _is_valid_key(self, key):
"""Validate a proposed dictionary key
Parameters
----------
key : object
Returns
-------
boolean
"""
# The key should be a tuple
if not isinstance(key, tuple):
return False
# The tuple should be a triplet
if len(key) != 3:
return False
# The triplet should be a pair of axelrod.Player sublclasses and an
# integer
try:
if not (
issubclass(key[0], Player) and
issubclass(key[1], Player) and
isinstance(key[2], int)
):
return False
except TypeError:
return False
# Each Player class should be deterministic
if key[0].classifier['stochastic'] or key[1].classifier['stochastic']:
return False
return True
def _is_valid_value(self, value):
"""Validate a proposed dictionary value
Parameters
----------
value : object
Returns
-------
boolean
"""
# The value should be a list
if not isinstance(value, list):
return False
return True
def save(self, file_name):
"""Serialise the cache dictionary to a file
Parameters
----------
file_name : string
File path to which the cache should be saved
"""
with open(file_name, 'wb') as io:
pickle.dump(self.data, io)
return True
def load(self, file_name):
"""Load a previously saved cache into the dictionary
Parameters
----------
file_name : string
Path to a previously saved cache file
"""
with open(file_name, 'rb') as io:
data = pickle.load(io)
if isinstance(data, dict):
self.data = data
else:
raise ValueError(
'Cache file exists but is not the correct format. Try deleting and re-building the cache file.')
return True
|
{
"content_hash": "cee1c025348fc7ae2dbe71b8d303e7d2",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 112,
"avg_line_length": 29.62162162162162,
"alnum_prop": 0.583257299270073,
"repo_name": "ranjinidas/Axelrod",
"id": "20c3729962027f29db7cf074e8342adfed76f164",
"size": "4384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "axelrod/deterministic_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "568469"
},
{
"name": "Shell",
"bytes": "82"
}
],
"symlink_target": ""
}
|
from google.cloud import dialogflowcx_v3
def sample_validate_flow():
# Create a client
client = dialogflowcx_v3.FlowsClient()
# Initialize request argument(s)
request = dialogflowcx_v3.ValidateFlowRequest(
name="name_value",
)
# Make the request
response = client.validate_flow(request=request)
# Handle the response
print(response)
# [END dialogflow_v3_generated_Flows_ValidateFlow_sync]
|
{
"content_hash": "7c5d3e351c2e0b96cc44429e8f475bcc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 55,
"avg_line_length": 23.105263157894736,
"alnum_prop": 0.7015945330296127,
"repo_name": "googleapis/python-dialogflow-cx",
"id": "09835241e698e550e0e2c98c9699485ad506b0e6",
"size": "1822",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dialogflow_v3_generated_flows_validate_flow_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "10904903"
},
{
"name": "Shell",
"bytes": "30681"
}
],
"symlink_target": ""
}
|
class Error(Exception):
"""Base Cu2Qu exception class for all other errors."""
class ApproxNotFoundError(Error):
def __init__(self, curve):
message = "no approximation found: %s" % curve
super().__init__(message)
self.curve = curve
class UnequalZipLengthsError(Error):
pass
class IncompatibleGlyphsError(Error):
def __init__(self, glyphs):
assert len(glyphs) > 1
self.glyphs = glyphs
names = set(repr(g.name) for g in glyphs)
if len(names) > 1:
self.combined_name = "{%s}" % ", ".join(sorted(names))
else:
self.combined_name = names.pop()
def __repr__(self):
return "<%s %s>" % (type(self).__name__, self.combined_name)
class IncompatibleSegmentNumberError(IncompatibleGlyphsError):
def __str__(self):
return "Glyphs named %s have different number of segments" % (
self.combined_name
)
class IncompatibleSegmentTypesError(IncompatibleGlyphsError):
def __init__(self, glyphs, segments):
IncompatibleGlyphsError.__init__(self, glyphs)
self.segments = segments
def __str__(self):
lines = []
ndigits = len(str(max(self.segments)))
for i, tags in sorted(self.segments.items()):
lines.append(
"%s: (%s)" % (str(i).rjust(ndigits), ", ".join(repr(t) for t in tags))
)
return "Glyphs named %s have incompatible segment types:\n %s" % (
self.combined_name,
"\n ".join(lines),
)
class IncompatibleFontsError(Error):
def __init__(self, glyph_errors):
self.glyph_errors = glyph_errors
def __str__(self):
return "fonts contains incompatible glyphs: %s" % (
", ".join(repr(g) for g in sorted(self.glyph_errors.keys()))
)
|
{
"content_hash": "c46875c6250fbdeec64f5cd71ff7cb41",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 86,
"avg_line_length": 29.725806451612904,
"alnum_prop": 0.5794899620184482,
"repo_name": "googlefonts/fonttools",
"id": "74c4c2271aa4a4c8155c01cf9d196161588e76ba",
"size": "2440",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "Lib/fontTools/cu2qu/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3522"
},
{
"name": "Makefile",
"bytes": "352"
},
{
"name": "Python",
"bytes": "5349590"
}
],
"symlink_target": ""
}
|
"""
Filter support
"""
from nova import loadables
class BaseFilter(object):
"""Base class for all filter classes."""
def _filter_one(self, obj, filter_properties):
"""Return True if it passes the filter, False otherwise.
Override this in a subclass.
"""
return True
def filter_all(self, filter_obj_list, filter_properties):
"""Yield objects that pass the filter.
Can be overriden in a subclass, if you need to base filtering
decisions on all objects. Otherwise, one can just override
_filter_one() to filter a single object.
"""
for obj in filter_obj_list:
if self._filter_one(obj, filter_properties):
yield obj
class BaseFilterHandler(loadables.BaseLoader):
"""Base class to handle loading filter classes.
This class should be subclassed where one needs to use filters.
"""
def get_filtered_objects(self, filter_classes, objs,
filter_properties):
for filter_cls in filter_classes:
objs = filter_cls().filter_all(objs, filter_properties)
return list(objs)
|
{
"content_hash": "15bf847027925cae8fb64e3d9fc58223",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 69,
"avg_line_length": 30.05263157894737,
"alnum_prop": 0.6401050788091068,
"repo_name": "yrobla/nova",
"id": "59028a542facdd04767c3bd0a415f85bbf83d387",
"size": "1787",
"binary": false,
"copies": "8",
"ref": "refs/heads/debian/unstable",
"path": "nova/filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "9162801"
},
{
"name": "Shell",
"bytes": "17067"
}
],
"symlink_target": ""
}
|
"""
Example:
@route(r'/', name='index')
class IndexHandler(tornado.web.RequestHandler):
pass
class Application(tornado.web.Application):
def __init__(self):
handlers = [
# ...
] + Route.routes()
"""
import logging
from tornado.web import url
class Route(object):
_routes = {}
def __init__(self, pattern, kwargs={}, name=None, host='.*$'):
self.pattern = pattern
self.kwargs = kwargs
self.name = name
self.host = host
def __call__(self, handler_class):
logging.debug('Discover URLSpec with pattern `%s`, handler_class is `%s`' % (self.pattern, handler_class))
spec = url(self.pattern, handler_class, self.kwargs, name=self.name)
self._routes.setdefault(self.host, []).append(spec)
return handler_class
@classmethod
def routes(cls, application=None):
if application:
for host, handlers in cls._routes.items():
application.add_handlers(host, handlers)
else:
return reduce(lambda x, y: x + y, cls._routes.values()) if cls._routes else []
@classmethod
def url_for(cls, name, *args):
named_handlers = dict([(spec.name, spec) for spec in cls.routes() if spec.name])
if name in named_handlers:
return named_handlers[name].reverse(*args)
raise KeyError("%s not found in named urls" % name)
route = Route
|
{
"content_hash": "d28d875bcf2e664b54c27336157dfe53",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 114,
"avg_line_length": 29.24,
"alnum_prop": 0.585499316005472,
"repo_name": "knownsec/workin",
"id": "c0008f91d4320bc1944b6dbc1ca4a71952ca185f",
"size": "1502",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "workin/routes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "47912"
},
{
"name": "JavaScript",
"bytes": "281811"
},
{
"name": "Python",
"bytes": "102019"
},
{
"name": "Shell",
"bytes": "62"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
import os
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils import json
try:
from flexget.plugins.api_tvdb import lookup_series
except ImportError:
raise plugin.DependencyError(issued_by='uoccin', missing='api_tvdb',
message='uoccin requires the `api_tvdb` plugin')
def load_uoccin_data(path):
udata = {}
ufile = os.path.join(path, 'uoccin.json')
if os.path.exists(ufile):
try:
with open(ufile, 'r') as f:
udata = json.load(f)
except Exception as err:
raise plugin.PluginError('error reading %s: %s' % (ufile, err))
udata.setdefault('movies', {})
udata.setdefault('series', {})
return udata
class UoccinEmit(object):
schema = {
'type': 'object',
'properties': {
'path': {'type': 'string', 'format': 'path'},
'type': {'type': 'string', 'enum': ['movies', 'series', 'episodes']},
'tags': {'type': 'array', 'items': {'type': 'string'}, 'minItems': 1},
'check_tags': {'type': 'string', 'enum': ['any', 'all', 'none'], 'default': 'any'},
'ep_flags': {'type': 'string', 'enum': ['watched', 'collected'], 'default': 'watched'},
},
'required': ['path', 'type'],
'additionalProperties': False
}
def on_task_input(self, task, config):
"""Creates an entry for each item in your uoccin watchlist.
Example::
uoccin_emit:
path: /path/to/gdrive/uoccin
type: series
tags: [ 'favorite', 'hires' ]
check_tags: all
Options path and type are required while the others are for filtering:
- 'any' will include all the items marked with one or more tags in the list
- 'all' will only include the items marked with all the listed tags
- 'none' will only include the items not marked with any of the listed tags.
The entries created will have a valid imdb/tvdb url and id.
"""
imdb_lookup = plugin.get_plugin_by_name('imdb_lookup').instance
udata = load_uoccin_data(config['path'])
section = udata['movies'] if config['type'] == 'movies' else udata['series']
entries = []
for eid, itm in section.items():
if not itm['watchlist']:
continue
if 'tags' in config:
n = len(set(config['tags']) & set(itm.get('tags', [])))
if config['check_tags'] == 'any' and n <= 0:
continue
if config['check_tags'] == 'all' and n != len(config['tags']):
continue
if config['check_tags'] == 'none' and n > 0:
continue
if config['type'] == 'movies':
entry = Entry()
entry['url'] = 'http://www.imdb.com/title/' + eid
entry['imdb_id'] = eid
if itm['name'] != 'N/A':
entry['title'] = itm['name']
else:
try:
imdb_lookup.lookup(entry)
except plugin.PluginError as e:
self.log.trace('entry %s imdb failed (%s)' % (entry['imdb_id'], e.value))
continue
entry['title'] = entry.get('imdb_name')
if 'tags' in itm:
entry['uoccin_tags'] = itm['tags']
if entry.isvalid():
entries.append(entry)
else:
self.log.debug('Invalid entry created? %s' % entry)
else:
sname = itm['name']
try:
sname = lookup_series(tvdb_id=eid).seriesname
except LookupError:
self.log.warning('Unable to lookup series %s from tvdb, using raw name.' % eid)
surl = 'http://thetvdb.com/?tab=series&id=' + eid
if config['type'] == 'series':
entry = Entry()
entry['url'] = surl
entry['title'] = sname
entry['tvdb_id'] = eid
if 'tags' in itm:
entry['uoccin_tags'] = itm['tags']
if entry.isvalid():
entries.append(entry)
else:
self.log.debug('Invalid entry created? %s' % entry)
elif config['ep_flags'] == 'collected':
slist = itm.get('collected', {})
for sno in slist.keys():
for eno in slist[sno]:
entry = Entry()
entry['url'] = surl
entry['title'] = '%s S%02dE%02d' % (sname, int(sno), int(eno))
entry['tvdb_id'] = eid
if entry.isvalid():
entries.append(entry)
else:
self.log.debug('Invalid entry created? %s' % entry)
else:
slist = itm.get('watched', {})
for sno in slist.keys():
for eno in slist[sno]:
entry = Entry()
entry['url'] = surl
entry['title'] = '%s S%02dE%02d' % (sname, int(sno), eno)
entry['tvdb_id'] = eid
if entry.isvalid():
entries.append(entry)
else:
self.log.debug('Invalid entry created? %s' % entry)
entries.sort(key=lambda x: x['title'])
return entries
@event('plugin.register')
def register_plugin():
plugin.register(UoccinEmit, 'uoccin_emit', api_ver=2)
|
{
"content_hash": "5630c1ec97aa5e858a319146296d6c27",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 99,
"avg_line_length": 41.89655172413793,
"alnum_prop": 0.46139917695473254,
"repo_name": "antivirtel/Flexget",
"id": "82248978c251031b73f1e61041f48c54a4c967a5",
"size": "6075",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/plugins/input/uoccin_emit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "HTML",
"bytes": "35670"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "2178143"
}
],
"symlink_target": ""
}
|
"""Routines for IPv4 and IPv6 addresses, subnets and ranges."""
import sys as _sys
import re as _re
from netaddr.core import AddrFormatError, AddrConversionError, num_bits, \
DictDotLookup, NOHOST, N, INET_PTON, P, ZEROFILL, Z
from netaddr.strategy import ipv4 as _ipv4, ipv6 as _ipv6
from netaddr.compat import _sys_maxint, _iter_range, _is_str, _int_type, \
_str_type
#-----------------------------------------------------------------------------
# Pre-compiled regexen used by cidr_merge() function.
RE_CIDR_ADJACENT = _re.compile(r'^([01]+)0 \1[1]$')
RE_CIDR_WITHIN = _re.compile(r'^([01]+) \1[10]+$')
RE_VALID_CIDR_BITS = _re.compile('^[01]+$')
#-----------------------------------------------------------------------------
class BaseIP(object):
"""
An abstract base class for common operations shared between various IP
related subclasses.
"""
__slots__ = ('_value', '_module')
def __init__(self):
"""Constructor."""
self._value = None
self._module = None
def _set_value(self, value):
if not isinstance(value, _int_type):
raise TypeError('int argument expected, not %s' % type(value))
if not 0 <= value <= self._module.max_int:
raise AddrFormatError('value out of bounds for an %s address!' \
% self._module.family_name)
self._value = value
value = property(lambda self: self._value, _set_value,
doc='a positive integer representing the value of IP address/subnet.')
def key(self):
"""
:return: a key tuple that uniquely identifies this IP address.
"""
return NotImplemented
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPAddress`
correctly.
"""
return NotImplemented
def __hash__(self):
"""
:return: A hash value uniquely indentifying this IP object.
"""
return hash(self.key())
def __eq__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
equivalent to ``other``, ``False`` otherwise.
"""
try:
return self.key() == other.key()
except (AttributeError, TypeError):
return NotImplemented
def __ne__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
not equivalent to ``other``, ``False`` otherwise.
"""
try:
return self.key() != other.key()
except (AttributeError, TypeError):
return NotImplemented
def __lt__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
less than ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() < other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __le__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
less than or equal to ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() <= other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __gt__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
greater than ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() > other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __ge__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
greater than or equal to ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() >= other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def is_unicast(self):
""":return: ``True`` if this IP is unicast, ``False`` otherwise"""
return not self.is_multicast()
def is_multicast(self):
""":return: ``True`` if this IP is multicast, ``False`` otherwise"""
if self._module == _ipv4:
return self in IPV4_MULTICAST
elif self._module == _ipv6:
return self in IPV6_MULTICAST
def is_loopback(self):
"""
:return: ``True`` if this IP is loopback address (not for network
transmission), ``False`` otherwise.
References: RFC 3330 and 4291.
"""
if self._module.version == 4:
return self in IPV4_LOOPBACK
elif self._module.version == 6:
return self == IPV6_LOOPBACK
def is_private(self):
"""
:return: ``True`` if this IP is for internal/private use only
(i.e. non-public), ``False`` otherwise. Reference: RFCs 1918,
3330, 4193, 3879 and 2365.
"""
if self._module.version == 4:
for cidr in IPV4_PRIVATE:
if self in cidr:
return True
elif self._module.version == 6:
for cidr in IPV6_PRIVATE:
if self in cidr:
return True
if self.is_link_local():
return True
return False
def is_link_local(self):
"""
:return: ``True`` if this IP is link-local address ``False`` otherwise.
Reference: RFCs 3927 and 4291.
"""
if self._module.version == 4:
return self in IPV4_LINK_LOCAL
elif self._module.version == 6:
return self in IPV6_LINK_LOCAL
def is_reserved(self):
"""
:return: ``True`` if this IP is in IANA reserved range, ``False``
otherwise. Reference: RFCs 3330 and 3171.
"""
if self._module.version == 4:
for cidr in IPV4_RESERVED:
if self in cidr:
return True
elif self._module.version == 6:
for cidr in IPV6_RESERVED:
if self in cidr:
return True
return False
def is_ipv4_mapped(self):
"""
:return: ``True`` if this IP is IPv4-compatible IPv6 address, ``False``
otherwise.
"""
return self._module.version == 6 and (self._value >> 32) == 0xffff
def is_ipv4_compat(self):
"""
:return: ``True`` if this IP is IPv4-mapped IPv6 address, ``False``
otherwise.
"""
return self._module.version == 6 and (self._value >> 32) == 0
@property
def info(self):
"""
A record dict containing IANA registration details for this IP address
if available, None otherwise.
"""
# Lazy loading of IANA data structures.
from netaddr.ip.iana import query
return DictDotLookup(query(self))
@property
def version(self):
"""the IP protocol version represented by this IP object."""
return self._module.version
#-----------------------------------------------------------------------------
class IPAddress(BaseIP):
"""
An individual IPv4 or IPv6 address without a net mask or subnet prefix.
To support these and other network based operations, see `IPNetwork`.
"""
__slots__ = ()
def __init__(self, addr, version=None, flags=0):
"""
Constructor.
:param addr: an IPv4 or IPv6 address which may be represented in an
accepted string format, as an unsigned integer or as another
IPAddress object (copy construction).
:param version: (optional) optimizes version detection if specified
and distinguishes between IPv4 and IPv6 for addresses with an
equivalent integer value.
:param flags: (optional) decides which rules are applied to the
interpretation of the addr value. Supported constants are
INET_PTON and ZEROFILL. See the netaddr.core docs for further
details.
"""
super(IPAddress, self).__init__()
if isinstance(addr, BaseIP):
# Copy constructor.
if version is not None and version != addr._module.version:
raise ValueError('cannot switch IP versions using '
'copy constructor!')
self._value = addr._value
self._module = addr._module
else:
# Explicit IP address version.
if version is not None:
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('%r is an invalid IP version!' % version)
has_upper = hasattr(addr, 'upper')
if has_upper and '/' in addr:
raise ValueError('%s() does not support netmasks or subnet' \
' prefixes! See documentation for details.'
% self.__class__.__name__)
if self._module is None:
# IP version is implicit, detect it from addr.
if isinstance(addr, _int_type):
try:
if 0 <= int(addr) <= _ipv4.max_int:
self._value = int(addr)
self._module = _ipv4
elif _ipv4.max_int < int(addr) <= _ipv6.max_int:
self._value = int(addr)
self._module = _ipv6
except ValueError:
pass
else:
for module in _ipv4, _ipv6:
try:
self._value = module.str_to_int(addr, flags)
except:
continue
else:
self._module = module
break
if self._module is None:
raise AddrFormatError('failed to detect a valid IP ' \
'address from %r' % addr)
else:
# IP version is explicit.
if has_upper:
try:
self._value = self._module.str_to_int(addr, flags)
except AddrFormatError:
raise AddrFormatError('base address %r is not IPv%d'
% (addr, self._module.version))
else:
if 0 <= int(addr) <= self._module.max_int:
self._value = int(addr)
else:
raise AddrFormatError('bad address format: %r' % addr)
def __getstate__(self):
""":returns: Pickled state of an `IPAddress` object."""
return self._value, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPAddress` object.
"""
value, version = state
self._value = value
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('unpickling failed for object state: %s' \
% str(state))
def netmask_bits(self):
"""
@return: If this IP is a valid netmask, the number of non-zero
bits are returned, otherwise it returns the width in bits for
the IP address version.
"""
if not self.is_netmask():
return self._module.width
i_val = self._value
numbits = 0
while i_val > 0:
if i_val & 1 == 1:
break
numbits += 1
i_val >>= 1
mask_length = self._module.width - numbits
if not 0 <= mask_length <= self._module.width:
raise ValueError('Unexpected mask length %d for address type!' \
% mask_length)
return mask_length
def is_hostmask(self):
"""
:return: ``True`` if this IP address host mask, ``False`` otherwise.
"""
int_val = self._value + 1
return (int_val & (int_val - 1) == 0)
def is_netmask(self):
"""
:return: ``True`` if this IP address network mask, ``False`` otherwise.
"""
int_val = (self._value ^ self._module.max_int) + 1
return (int_val & (int_val - 1) == 0)
def __iadd__(self, num):
"""
Increases the numerical value of this IPAddress by num.
An IndexError is raised if result exceeds maximum IP address value or
is less than zero.
:param num: size of IP address increment.
"""
new_value = self._value + num
if 0 <= new_value <= self._module.max_int:
self._value = new_value
return self
raise IndexError('result outside valid IP address boundary!')
def __isub__(self, num):
"""
Decreases the numerical value of this IPAddress by num.
An IndexError is raised if result is less than zero or exceeds maximum
IP address value.
:param num: size of IP address decrement.
"""
new_value = self._value - num
if 0 <= new_value <= self._module.max_int:
self._value = new_value
return self
raise IndexError('result outside valid IP address boundary!')
def __add__(self, num):
"""
Add the numerical value of this IP address to num and provide the
result as a new IPAddress object.
:param num: size of IP address increase.
:return: a new IPAddress object with its numerical value increased by num.
"""
new_value = self._value + num
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self._module.version)
raise IndexError('result outside valid IP address boundary!')
__radd__ = __add__
def __sub__(self, num):
"""
Subtract the numerical value of this IP address from num providing
the result as a new IPAddress object.
:param num: size of IP address decrease.
:return: a new IPAddress object with its numerical value decreased by num.
"""
new_value = self._value - num
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self._module.version)
raise IndexError('result outside valid IP address boundary!')
def __rsub__(self, num):
"""
Subtract num (lvalue) from the numerical value of this IP address
(rvalue) providing the result as a new IPAddress object.
:param num: size of IP address decrease.
:return: a new IPAddress object with its numerical value decreased by num.
"""
new_value = num - self._value
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self._module.version)
raise IndexError('result outside valid IP address boundary!')
def key(self):
"""
:return: a key tuple that uniquely identifies this IP address.
"""
# NB - we return the value here twice because this IP Address may
# be sorted with a list of networks and it should still end up
# in the expected order.
return self._module.version, self._value
def sort_key(self):
""":return: A key tuple used to compare and sort this `IPAddress` correctly."""
return self._module.version, self._value, self._module.width
def __int__(self):
""":return: the value of this IP address as an unsigned integer"""
return self._value
def __long__(self):
""":return: the value of this IP address as an unsigned integer"""
return self._value
def __oct__(self):
""":return: an octal string representation of this IP address."""
# Python 2.x
if self._value == 0:
return '0'
return '0%o' % self._value
def __hex__(self):
""":return: a hexadecimal string representation of this IP address."""
# Python 2.x
return '0x%x' % self._value
def __index__(self):
"""
:return: return the integer value of this IP address when called by \
hex(), oct() or bin().
"""
# Python 3.x
return self._value
def bits(self, word_sep=None):
"""
:param word_sep: (optional) the separator to insert between words.
Default: None - use default separator for address type.
:return: the value of this IP address as a binary digit string."""
return self._module.int_to_bits(self._value, word_sep)
@property
def packed(self):
"""The value of this IP address as a packed binary string."""
return self._module.int_to_packed(self._value)
@property
def words(self):
"""
A list of unsigned integer words (octets for IPv4, hextets for IPv6)
found in this IP address.
"""
return self._module.int_to_words(self._value)
@property
def bin(self):
"""
The value of this IP adddress in standard Python binary
representational form (0bxxx). A back port of the format provided by
the builtin bin() function found in Python 2.6.x and higher.
"""
return self._module.int_to_bin(self._value)
@property
def reverse_dns(self):
"""The reverse DNS lookup record for this IP address"""
return self._module.int_to_arpa(self._value)
def ipv4(self):
"""
Raises an `AddrConversionError` if IPv6 address cannot be converted
to IPv4.
:return: A numerically equivalent version 4 `IPAddress` object.
"""
ip = None
klass = self.__class__
if self._module.version == 4:
ip = klass(self._value, 4)
elif self._module.version == 6:
if 0 <= self._value <= _ipv4.max_int:
ip = klass(self._value, 4)
elif _ipv4.max_int <= self._value <= 0xffffffffffff:
ip = klass(self._value - 0xffff00000000, 4)
else:
raise AddrConversionError('IPv6 address %s unsuitable for ' \
'conversion to IPv4!' % self)
return ip
def ipv6(self, ipv4_compatible=False):
"""
.. note:: The IPv4-mapped IPv6 address format is now considered \
deprecated. See RFC 4291 or later for details.
:param ipv4_compatible: If ``True`` returns an IPv4-mapped address
(::ffff:x.x.x.x), an IPv4-compatible (::x.x.x.x) address
otherwise. Default: False (IPv4-mapped).
:return: A numerically equivalent version 6 `IPAddress` object.
"""
ip = None
klass = self.__class__
if self._module.version == 6:
if ipv4_compatible and \
(0xffff00000000 <= self._value <= 0xffffffffffff):
ip = klass(self._value - 0xffff00000000, 6)
else:
ip = klass(self._value, 6)
elif self._module.version == 4:
# IPv4-Compatible IPv6 address
ip = klass(self._value, 6)
if not ipv4_compatible:
# IPv4-Mapped IPv6 address
ip = klass(0xffff00000000 + self._value, 6)
return ip
def format(self, dialect=None):
"""
Only relevant for IPv6 addresses. Has no effect for IPv4.
:param dialect: An ipv6_* dialect class.
:return: an alternate string representation for this IP address.
"""
if dialect is not None:
if not hasattr(dialect, 'word_fmt'):
raise TypeError(
'custom dialects should subclass ipv6_verbose!')
return self._module.int_to_str(self._value, dialect=dialect)
def __or__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise OR (x | y) between the integer value of this IP
address and ``other``.
"""
return self.__class__(self._value | int(other), self._module.version)
def __and__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise AND (x & y) between the integer value of this IP
address and ``other``.
"""
return self.__class__(self._value & int(other), self._module.version)
def __xor__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise exclusive OR (x ^ y) between the integer value of
this IP address and ``other``.
"""
return self.__class__(self._value ^ int(other), self._module.version)
def __lshift__(self, numbits):
"""
:param numbits: size of bitwise shift.
:return: an `IPAddress` object based on this one with its integer
value left shifted by ``numbits``.
"""
return self.__class__(self._value << numbits, self._module.version)
def __rshift__(self, numbits):
"""
:param numbits: size of bitwise shift.
:return: an `IPAddress` object based on this one with its integer
value right shifted by ``numbits``.
"""
return self.__class__(self._value >> numbits, self._module.version)
def __nonzero__(self):
""":return: ``True`` if the numerical value of this IP address is not \
zero, ``False`` otherwise."""
# Python 2.x.
return bool(self._value)
__bool__ = __nonzero__ # Python 3.x.
def __str__(self):
""":return: IP address in presentational format"""
return self._module.int_to_str(self._value)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s')" % (self.__class__.__name__, self)
#-----------------------------------------------------------------------------
class IPListMixin(object):
"""
A mixin class providing shared list-like functionality to classes
representing groups of IP addresses.
"""
def __iter__(self):
"""
:return: An iterator providing access to all `IPAddress` objects
within range represented by this ranged IP object.
"""
start_ip = IPAddress(self.first, self._module.version)
end_ip = IPAddress(self.last, self._module.version)
return iter_iprange(start_ip, end_ip)
@property
def size(self):
"""
The total number of IP addresses within this ranged IP object.
"""
return int(self.last - self.first + 1)
def __len__(self):
"""
:return: the number of IP addresses in this ranged IP object. Raises
an `IndexError` if size > system max int (a Python 2.x
limitation). Use the .size property for subnets of any size.
"""
size = self.size
if size > _sys_maxint:
raise IndexError(("range contains more than %d (index size max) "
"IP addresses! Use the .size property instead." % _sys_maxint))
return size
def __getitem__(self, index):
"""
:return: The IP address(es) in this `IPNetwork` object referenced by
index or slice. As slicing can produce large sequences of objects
an iterator is returned instead of the more usual `list`.
"""
item = None
if hasattr(index, 'indices'):
if self._module.version == 6:
raise TypeError('IPv6 slices are not supported!')
(start, stop, step) = index.indices(self.size)
if (start + step < 0) or (step > stop):
# step value exceeds start and stop boundaries.
item = iter([IPAddress(self.first, self._module.version)])
else:
start_ip = IPAddress(self.first + start, self._module.version)
end_ip = IPAddress(self.first + stop - step, self._module.version)
item = iter_iprange(start_ip, end_ip, step)
else:
try:
index = int(index)
if (- self.size) <= index < 0:
# negative index.
item = IPAddress(self.last + index + 1, self._module.version)
elif 0 <= index <= (self.size - 1):
# Positive index or zero index.
item = IPAddress(self.first + index, self._module.version)
else:
raise IndexError('index out range for address range size!')
except ValueError:
raise TypeError('unsupported index type %r!' % index)
return item
def __contains__(self, other):
"""
:param other: an `IPAddress` or ranged IP object.
:return: ``True`` if other falls within the boundary of this one,
``False`` otherwise.
"""
if isinstance(other, BaseIP):
if self._module.version != other._module.version:
return False
if isinstance(other, IPAddress):
return other._value >= self.first and other._value <= self.last
# Assume that we (and the other) provide .first and .last.
return other.first >= self.first and other.last <= self.last
# Whatever it is, try to interpret it as IPAddress.
return IPAddress(other) in self
def __nonzero__(self):
"""
Ranged IP objects always represent a sequence of at least one IP
address and are therefore always True in the boolean context.
"""
# Python 2.x.
return True
__bool__ = __nonzero__ # Python 3.x.
#-----------------------------------------------------------------------------
def parse_ip_network(module, addr, implicit_prefix=False, flags=0):
if isinstance(addr, tuple):
# CIDR integer tuple
try:
val1, val2 = addr
except ValueError:
raise AddrFormatError('invalid %s tuple!' % module.family_name)
if 0 <= val1 <= module.max_int:
value = val1
if 0 <= val2 <= module.width:
prefixlen = val2
else:
raise AddrFormatError('invalid prefix for %s tuple!' \
% module.family_name)
else:
raise AddrFormatError('invalid address value for %s tuple!' \
% module.family_name)
elif isinstance(addr, _str_type):
# CIDR-like string subnet
if implicit_prefix:
#TODO: deprecate this option in netaddr 0.8.x
addr = cidr_abbrev_to_verbose(addr)
try:
if '/' in addr:
val1, val2 = addr.split('/', 1)
else:
val1 = addr
val2 = None
except ValueError:
raise AddrFormatError('invalid IPNetwork address %s!' % addr)
try:
ip = IPAddress(val1, module.version, flags=INET_PTON)
except AddrFormatError:
if module.version == 4:
# Try a partial IPv4 network address...
expanded_addr = _ipv4.expand_partial_address(val1)
ip = IPAddress(expanded_addr, module.version, flags=INET_PTON)
else:
raise AddrFormatError('invalid IPNetwork address %s!' % addr)
value = ip._value
try:
# Integer CIDR prefix.
prefixlen = int(val2)
except TypeError:
if val2 is None:
# No prefix was specified.
prefixlen = module.width
except ValueError:
# Not an integer prefix, try a netmask/hostmask prefix.
mask = IPAddress(val2, module.version, flags=INET_PTON)
if mask.is_netmask():
prefixlen = module.netmask_to_prefix[mask._value]
elif mask.is_hostmask():
prefixlen = module.hostmask_to_prefix[mask._value]
else:
raise AddrFormatError('addr %r is not a valid IPNetwork!' \
% addr)
if not 0 <= prefixlen <= module.width:
raise AddrFormatError('invalid prefix for %s address!' \
% module.family_name)
else:
raise TypeError('unexpected type %s for addr arg' % type(addr))
if flags & NOHOST:
# Remove host bits.
netmask = module.prefix_to_netmask[prefixlen]
value = value & netmask
return value, prefixlen
#-----------------------------------------------------------------------------
class IPNetwork(BaseIP, IPListMixin):
"""
An IPv4 or IPv6 network or subnet.
A combination of an IP address and a network mask.
Accepts CIDR and several related variants :
a) Standard CIDR::
x.x.x.x/y -> 192.0.2.0/24
x::/y -> fe80::/10
b) Hybrid CIDR format (netmask address instead of prefix), where 'y' \
address represent a valid netmask::
x.x.x.x/y.y.y.y -> 192.0.2.0/255.255.255.0
x::/y:: -> fe80::/ffc0::
c) ACL hybrid CIDR format (hostmask address instead of prefix like \
Cisco's ACL bitmasks), where 'y' address represent a valid netmask::
x.x.x.x/y.y.y.y -> 192.0.2.0/0.0.0.255
x::/y:: -> fe80::/3f:ffff:ffff:ffff:ffff:ffff:ffff:ffff
d) Abbreviated CIDR format (as of netaddr 0.7.x this requires the \
optional constructor argument ``implicit_prefix=True``)::
x -> 192
x/y -> 10/8
x.x/y -> 192.168/16
x.x.x/y -> 192.168.0/24
which are equivalent to::
x.0.0.0/y -> 192.0.0.0/24
x.0.0.0/y -> 10.0.0.0/8
x.x.0.0/y -> 192.168.0.0/16
x.x.x.0/y -> 192.168.0.0/24
"""
__slots__ = ('_prefixlen',)
def __init__(self, addr, implicit_prefix=False, version=None, flags=0):
"""
Constructor.
:param addr: an IPv4 or IPv6 address with optional CIDR prefix,
netmask or hostmask. May be an IP address in presentation
(string) format, an tuple containing and integer address and a
network prefix, or another IPAddress/IPNetwork object (copy
construction).
:param implicit_prefix: (optional) if True, the constructor uses
classful IPv4 rules to select a default prefix when one is not
provided. If False it uses the length of the IP address version.
(default: False)
:param version: (optional) optimizes version detection if specified
and distinguishes between IPv4 and IPv6 for addresses with an
equivalent integer value.
:param flags: (optional) decides which rules are applied to the
interpretation of the addr value. Currently only supports the
NOHOST option. See the netaddr.core docs for further details.
"""
super(IPNetwork, self).__init__()
value, prefixlen, module = None, None, None
if hasattr(addr, '_prefixlen'):
# IPNetwork object copy constructor
value = addr._value
module = addr._module
prefixlen = addr._prefixlen
elif hasattr(addr, '_value'):
# IPAddress object copy constructor
value = addr._value
module = addr._module
prefixlen = module.width
elif version == 4:
value, prefixlen = parse_ip_network(_ipv4, addr,
implicit_prefix=implicit_prefix, flags=flags)
module = _ipv4
elif version == 6:
value, prefixlen = parse_ip_network(_ipv6, addr,
implicit_prefix=implicit_prefix, flags=flags)
module = _ipv6
else:
if version is not None:
raise ValueError('%r is an invalid IP version!' % version)
try:
module = _ipv4
value, prefixlen = parse_ip_network(module, addr,
implicit_prefix, flags)
except AddrFormatError:
try:
module = _ipv6
value, prefixlen = parse_ip_network(module, addr,
implicit_prefix, flags)
except AddrFormatError:
pass
if value is None:
raise AddrFormatError('invalid IPNetwork %s' % addr)
self._value = value
self._prefixlen = prefixlen
self._module = module
def __getstate__(self):
""":return: Pickled state of an `IPNetwork` object."""
return self._value, self._prefixlen, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPNetwork` object.
"""
value, prefixlen, version = state
self._value = value
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('unpickling failed for object state %s' \
% str(state))
if 0 <= prefixlen <= self._module.width:
self._prefixlen = prefixlen
else:
raise ValueError('unpickling failed for object state %s' \
% str(state))
def _set_prefixlen(self, value):
if not isinstance(value, _int_type):
raise TypeError('int argument expected, not %s' % type(value))
if not 0 <= value <= self._module.width:
raise AddrFormatError('invalid prefix for an %s address!' \
% self._module.family_name)
self._prefixlen = value
prefixlen = property(lambda self: self._prefixlen, _set_prefixlen,
doc='size of the bitmask used to separate the network from the host bits')
@property
def ip(self):
"""
The IP address of this `IPNetwork` object. This is may or may not be
the same as the network IP address which varies according to the value
of the CIDR subnet prefix.
"""
return IPAddress(self._value, self._module.version)
@property
def network(self):
"""The network address of this `IPNetwork` object."""
return IPAddress(self._value & self._netmask_int, self._module.version)
@property
def broadcast(self):
"""The broadcast address of this `IPNetwork` object"""
return IPAddress(self._value | self._hostmask_int, self._module.version)
@property
def first(self):
"""
The integer value of first IP address found within this `IPNetwork`
object.
"""
return self._value & (self._module.max_int ^ self._hostmask_int)
@property
def last(self):
"""
The integer value of last IP address found within this `IPNetwork`
object.
"""
hostmask = (1 << (self._module.width - self._prefixlen)) - 1
return self._value | hostmask
@property
def netmask(self):
"""The subnet mask of this `IPNetwork` object."""
netmask = self._module.max_int ^ self._hostmask_int
return IPAddress(netmask, self._module.version)
@property
def _netmask_int(self):
"""Same as self.netmask, but in integer format"""
return self._module.max_int ^ self._hostmask_int
@property
def hostmask(self):
"""The host mask of this `IPNetwork` object."""
hostmask = (1 << (self._module.width - self._prefixlen)) - 1
return IPAddress(hostmask, self._module.version)
@property
def _hostmask_int(self):
"""Same as self.hostmask, but in integer format"""
return (1 << (self._module.width - self._prefixlen)) - 1
@property
def cidr(self):
"""
The true CIDR address for this `IPNetwork` object which omits any
host bits to the right of the CIDR subnet prefix.
"""
return IPNetwork(
(self._value & self._netmask_int, self._prefixlen),
version=self._module.version)
def __iadd__(self, num):
"""
Increases the value of this `IPNetwork` object by the current size
multiplied by ``num``.
An `IndexError` is raised if result exceeds maximum IP address value
or is less than zero.
:param num: (optional) number of `IPNetwork` blocks to increment \
this IPNetwork's value by.
"""
new_value = int(self.network) + (self.size * num)
if (new_value + (self.size - 1)) > self._module.max_int:
raise IndexError('increment exceeds address boundary!')
if new_value < 0:
raise IndexError('increment is less than zero!')
self._value = new_value
return self
def __isub__(self, num):
"""
Decreases the value of this `IPNetwork` object by the current size
multiplied by ``num``.
An `IndexError` is raised if result is less than zero or exceeds
maximum IP address value.
:param num: (optional) number of `IPNetwork` blocks to decrement \
this IPNetwork's value by.
"""
new_value = int(self.network) - (self.size * num)
if new_value < 0:
raise IndexError('decrement is less than zero!')
if (new_value + (self.size - 1)) > self._module.max_int:
raise IndexError('decrement exceeds address boundary!')
self._value = new_value
return self
def __contains__(self, other):
"""
:param other: an `IPAddress` or ranged IP object.
:return: ``True`` if other falls within the boundary of this one,
``False`` otherwise.
"""
if isinstance(other, BaseIP):
if self._module.version != other._module.version:
return False
# self_net will contain only the network bits.
shiftwidth = self._module.width - self._prefixlen
self_net = self._value >> shiftwidth
if isinstance(other, IPRange):
# IPRange has no _value.
# (self_net+1)<<shiftwidth is not our last address, but the one
# after the last one.
return ((self_net << shiftwidth) <= other._start._value and
(((self_net + 1) << shiftwidth) > other._end._value))
other_net = other._value >> shiftwidth
if isinstance(other, IPAddress):
return other_net == self_net
if isinstance(other, IPNetwork):
return self_net == other_net and self._prefixlen <= other._prefixlen
# Whatever it is, try to interpret it as IPAddress.
return IPAddress(other) in self
def key(self):
"""
:return: A key tuple used to uniquely identify this `IPNetwork`.
"""
return self._module.version, self.first, self.last
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPNetwork` correctly.
"""
net_size_bits = self._prefixlen - 1
first = self._value & (self._module.max_int ^ self._hostmask_int)
host_bits = self._value - first
return self._module.version, first, net_size_bits, host_bits
def ipv4(self):
"""
:return: A numerically equivalent version 4 `IPNetwork` object. \
Raises an `AddrConversionError` if IPv6 address cannot be \
converted to IPv4.
"""
ip = None
klass = self.__class__
if self._module.version == 4:
ip = klass('%s/%d' % (self.ip, self.prefixlen))
elif self._module.version == 6:
if 0 <= self._value <= _ipv4.max_int:
addr = _ipv4.int_to_str(self._value)
ip = klass('%s/%d' % (addr, self.prefixlen - 96))
elif _ipv4.max_int <= self._value <= 0xffffffffffff:
addr = _ipv4.int_to_str(self._value - 0xffff00000000)
ip = klass('%s/%d' % (addr, self.prefixlen - 96))
else:
raise AddrConversionError('IPv6 address %s unsuitable for ' \
'conversion to IPv4!' % self)
return ip
def ipv6(self, ipv4_compatible=False):
"""
.. note:: the IPv4-mapped IPv6 address format is now considered \
deprecated. See RFC 4291 or later for details.
:param ipv4_compatible: If ``True`` returns an IPv4-mapped address
(::ffff:x.x.x.x), an IPv4-compatible (::x.x.x.x) address
otherwise. Default: False (IPv4-mapped).
:return: A numerically equivalent version 6 `IPNetwork` object.
"""
ip = None
klass = self.__class__
if self._module.version == 6:
if ipv4_compatible and \
(0xffff00000000 <= self._value <= 0xffffffffffff):
ip = klass((self._value - 0xffff00000000, self._prefixlen),
version=6)
else:
ip = klass((self._value, self._prefixlen), version=6)
elif self._module.version == 4:
if ipv4_compatible:
# IPv4-Compatible IPv6 address
ip = klass((self._value, self._prefixlen + 96), version=6)
else:
# IPv4-Mapped IPv6 address
ip = klass((0xffff00000000 + self._value,
self._prefixlen + 96), version=6)
return ip
def previous(self, step=1):
"""
:param step: the number of IP subnets between this `IPNetwork` object
and the expected subnet. Default: 1 (the previous IP subnet).
:return: The adjacent subnet preceding this `IPNetwork` object.
"""
ip_copy = self.__class__('%s/%d' % (self.network, self.prefixlen),
self._module.version)
ip_copy -= step
return ip_copy
def next(self, step=1):
"""
:param step: the number of IP subnets between this `IPNetwork` object
and the expected subnet. Default: 1 (the next IP subnet).
:return: The adjacent subnet succeeding this `IPNetwork` object.
"""
ip_copy = self.__class__('%s/%d' % (self.network, self.prefixlen),
self._module.version)
ip_copy += step
return ip_copy
def supernet(self, prefixlen=0):
"""
Provides a list of supernets for this `IPNetwork` object between the
size of the current prefix and (if specified) an endpoint prefix.
:param prefixlen: (optional) a CIDR prefix for the maximum supernet.
Default: 0 - returns all possible supernets.
:return: a tuple of supernet `IPNetwork` objects.
"""
if not 0 <= prefixlen <= self._module.width:
raise ValueError('CIDR prefix /%d invalid for IPv%d!' \
% (prefixlen, self._module.version))
supernets = []
# Use a copy of self as we'll be editing it.
supernet = self.cidr
supernet._prefixlen = prefixlen
while supernet._prefixlen != self._prefixlen:
supernets.append(supernet.cidr)
supernet._prefixlen += 1
return supernets
def subnet(self, prefixlen, count=None, fmt=None):
"""
A generator that divides up this IPNetwork's subnet into smaller
subnets based on a specified CIDR prefix.
:param prefixlen: a CIDR prefix indicating size of subnets to be
returned.
:param count: (optional) number of consecutive IP subnets to be
returned.
:return: an iterator containing IPNetwork subnet objects.
"""
if not 0 <= self.prefixlen <= self._module.width:
raise ValueError('CIDR prefix /%d invalid for IPv%d!' \
% (prefixlen, self._module.version))
if not self.prefixlen <= prefixlen:
# Don't return anything.
raise StopIteration
# Calculate number of subnets to be returned.
width = self._module.width
max_subnets = 2 ** (width - self.prefixlen) // 2 ** (width - prefixlen)
if count is None:
count = max_subnets
if not 1 <= count <= max_subnets:
raise ValueError('count outside of current IP subnet boundary!')
base_subnet = self._module.int_to_str(self.first)
i = 0
while(i < count):
subnet = self.__class__('%s/%d' % (base_subnet, prefixlen),
self._module.version)
subnet.value += (subnet.size * i)
subnet.prefixlen = prefixlen
i += 1
yield subnet
def iter_hosts(self):
"""
An generator that provides all the IP addresses that can be assigned
to hosts within the range of this IP object's subnet.
- for IPv4, the network and broadcast addresses are always excluded. \
Any subnet that contains less than 4 IP addresses yields an empty list.
- for IPv6, only the unspecified address '::' is excluded from any \
yielded IP addresses.
:return: an IPAddress iterator
"""
it_hosts = iter([])
if self._module.version == 4:
# IPv4 logic.
if self.size >= 4:
it_hosts = iter_iprange(
IPAddress(self.first+1, self._module.version),
IPAddress(self.last-1, self._module.version))
else:
# IPv6 logic.
if self.first == 0:
if self.size != 1:
# Don't return '::'.
it_hosts = iter_iprange(
IPAddress(self.first + 1, self._module.version),
IPAddress(self.last, self._module.version))
else:
it_hosts = iter(self)
return it_hosts
def __str__(self):
""":return: this IPNetwork in CIDR format"""
addr = self._module.int_to_str(self._value)
return "%s/%s" % (addr, self.prefixlen)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s')" % (self.__class__.__name__, self)
#-----------------------------------------------------------------------------
class IPRange(BaseIP, IPListMixin):
"""
An arbitrary IPv4 or IPv6 address range.
Formed from a lower and upper bound IP address. The upper bound IP cannot
be numerically smaller than the lower bound and the IP version of both
must match.
"""
__slots__ = ('_start', '_end')
def __init__(self, start, end, flags=0):
"""
Constructor.
:param start: an IPv4 or IPv6 address that forms the lower
boundary of this IP range.
:param end: an IPv4 or IPv6 address that forms the upper
boundary of this IP range.
:param flags: (optional) decides which rules are applied to the
interpretation of the start and end values. Supported constants
are INET_PTON and ZEROFILL. See the netaddr.core docs for further
details.
"""
self._start = IPAddress(start, flags=flags)
self._module = self._start._module
self._end = IPAddress(end, self._module.version, flags=flags)
if int(self._start) > int(self._end):
raise AddrFormatError('lower bound IP greater than upper bound!')
def __getstate__(self):
""":return: Pickled state of an `IPRange` object."""
return self._start.value, self._end.value, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPRange` object.
"""
start, end, version = state
self._start = IPAddress(start, version)
self._module = self._start._module
self._end = IPAddress(end, version)
def __contains__(self, other):
if isinstance(other, BaseIP):
if self._module.version != other._module.version:
return False
if isinstance(other, IPAddress):
return (self._start._value <= other._value and
self._end._value >= other._value)
if isinstance(other, IPRange):
return (self._start._value <= other._start._value and
self._end._value >= other._end._value)
if isinstance(other, IPNetwork):
shiftwidth = other._module.width - other._prefixlen
other_start = (other._value >> shiftwidth) << shiftwidth
# Start of the next network after other
other_next_start = other_start + (1 << shiftwidth)
return (self._start._value <= other_start and
self._end._value > other_next_start)
# Whatever it is, try to interpret it as IPAddress.
return IPAddress(other) in self
@property
def first(self):
"""The integer value of first IP address in this `IPRange` object."""
return int(self._start)
@property
def last(self):
"""The integer value of last IP address in this `IPRange` object."""
return int(self._end)
def key(self):
"""
:return: A key tuple used to uniquely identify this `IPRange`.
"""
return self._module.version, self.first, self.last
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPRange` correctly.
"""
skey = self._module.width - num_bits(self.size)
return self._module.version, self._start._value, skey
def cidrs(self):
"""
The list of CIDR addresses found within the lower and upper bound
addresses of this `IPRange`.
"""
return iprange_to_cidrs(self._start, self._end)
def __str__(self):
""":return: this `IPRange` in a common representational format."""
return "%s-%s" % (self._start, self._end)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s', '%s')" % (self.__class__.__name__,
self._start, self._end)
#-----------------------------------------------------------------------------
def iter_unique_ips(*args):
"""
:param args: A list of IP addresses and subnets passed in as arguments.
:return: A generator that flattens out IP subnets, yielding unique
individual IP addresses (no duplicates).
"""
for cidr in cidr_merge(args):
for ip in cidr:
yield ip
#-----------------------------------------------------------------------------
def cidr_abbrev_to_verbose(abbrev_cidr):
"""
A function that converts abbreviated IPv4 CIDRs to their more verbose
equivalent.
:param abbrev_cidr: an abbreviated CIDR.
Uses the old-style classful IP address rules to decide on a default
subnet prefix if one is not explicitly provided.
Only supports IPv4 addresses.
Examples ::
10 - 10.0.0.0/8
10/16 - 10.0.0.0/16
128 - 128.0.0.0/16
128/8 - 128.0.0.0/8
192.168 - 192.168.0.0/16
:return: A verbose CIDR from an abbreviated CIDR or old-style classful \
network address, The original value if it was not recognised as a \
supported abbreviation.
"""
# Internal function that returns a prefix value based on the old IPv4
# classful network scheme that has been superseded (almost) by CIDR.
def classful_prefix(octet):
octet = int(octet)
if not 0 <= octet <= 255:
raise IndexError('Invalid octet: %r!' % octet)
if 0 <= octet <= 127: # Legacy class 'A' classification.
return 8
elif 128 <= octet <= 191: # Legacy class 'B' classification.
return 16
elif 192 <= octet <= 223: # Legacy class 'C' classification.
return 24
elif 224 <= octet <= 239: # Multicast address range.
return 4
return 32 # Default.
start = ''
tokens = []
prefix = None
if _is_str(abbrev_cidr):
if ':' in abbrev_cidr:
return abbrev_cidr
try:
# Single octet partial integer or string address.
i = int(abbrev_cidr)
tokens = [str(i), '0', '0', '0']
return "%s%s/%s" % (start, '.'.join(tokens), classful_prefix(i))
except ValueError:
# Multi octet partial string address with optional prefix.
part_addr = abbrev_cidr
tokens = []
if part_addr == '':
# Not a recognisable format.
return abbrev_cidr
if '/' in part_addr:
(part_addr, prefix) = part_addr.split('/', 1)
# Check prefix for validity.
if prefix is not None:
try:
if not 0 <= int(prefix) <= 32:
raise ValueError('prefixlen in address %r out of range' \
' for IPv4!' % abbrev_cidr)
except ValueError:
return abbrev_cidr
if '.' in part_addr:
tokens = part_addr.split('.')
else:
tokens = [part_addr]
if 1 <= len(tokens) <= 4:
for i in range(4 - len(tokens)):
tokens.append('0')
else:
# Not a recognisable format.
return abbrev_cidr
if prefix is None:
try:
prefix = classful_prefix(tokens[0])
except ValueError:
return abbrev_cidr
return "%s%s/%s" % (start, '.'.join(tokens), prefix)
except TypeError:
pass
except IndexError:
pass
# Not a recognisable format.
return abbrev_cidr
#-----------------------------------------------------------------------------
def cidr_merge(ip_addrs):
"""
A function that accepts an iterable sequence of IP addresses and subnets
merging them into the smallest possible list of CIDRs. It merges adjacent
subnets where possible, those contained within others and also removes
any duplicates.
:param ip_addrs: an iterable sequence of IP addresses and subnets.
:return: a summarized list of `IPNetwork` objects.
"""
if not hasattr(ip_addrs, '__iter__'):
raise ValueError('A sequence or iterator is expected!')
# Start off using set as we'll remove any duplicates at the start.
ipv4_bit_cidrs = set()
ipv6_bit_cidrs = set()
# Convert IP addresses and subnets into their CIDR bit strings.
ipv4_match_all_found = False
ipv6_match_all_found = False
for ip in ip_addrs:
cidr = IPNetwork(ip)
bits = cidr.network.bits(word_sep='')[0:cidr.prefixlen]
if cidr.version == 4:
if bits == '':
# This is the /0 network, that includes all IPv4 addresses.
ipv4_match_all_found = True
ipv4_bit_cidrs = set(['']) # Clear all other IPv4 values.
if not ipv4_match_all_found:
ipv4_bit_cidrs.add(bits)
else:
if bits == '':
# This is the /0 network, that includes all IPv6 addresses.
ipv6_match_all_found = True
ipv6_bit_cidrs = set(['']) # Clear all other IPv6 values.
if not ipv6_match_all_found:
ipv6_bit_cidrs.add(bits)
# Merge binary CIDR addresses where possible.
def _reduce_bit_cidrs(cidrs):
new_cidrs = []
cidrs.sort()
# Multiple passes are required to obtain precise results.
while 1:
finished = True
while (cidrs):
if not new_cidrs:
new_cidrs.append(cidrs.pop(0))
if not cidrs:
break
# lhs and rhs are same size and adjacent.
(new_cidr, subs) = RE_CIDR_ADJACENT.subn(
r'\1', '%s %s' % (new_cidrs[-1], cidrs[0]))
if subs:
# merge lhs with rhs.
new_cidrs[-1] = new_cidr
cidrs.pop(0)
finished = False
else:
# lhs contains rhs.
(new_cidr, subs) = RE_CIDR_WITHIN.subn(
r'\1', '%s %s' % (new_cidrs[-1], cidrs[0]))
if subs:
# keep lhs, discard rhs.
new_cidrs[-1] = new_cidr
cidrs.pop(0)
finished = False
else:
# no matches - accept rhs.
new_cidrs.append(cidrs.pop(0))
if finished:
break
else:
# still seeing matches, reset.
cidrs = new_cidrs
new_cidrs = []
if new_cidrs == ['0', '1']:
# Special case where summary CIDR result is '0.0.0.0/0' or
# '::/0' i.e. the whole IPv4 or IPv6 address space.
new_cidrs = ['']
return new_cidrs
new_cidrs = []
def _bits_to_cidr(bits, module):
if bits == '':
if module.version == 4:
return IPNetwork('0.0.0.0/0', 4)
else:
return IPNetwork('::/0', 6)
if RE_VALID_CIDR_BITS.match(bits) is None:
raise ValueError('%r is an invalid bit string!' % bits)
num_bits = len(bits)
if bits == '':
return IPAddress(module.int_to_str(0), module.version)
else:
bits = bits + '0' * (module.width - num_bits)
return IPNetwork((module.bits_to_int(bits), num_bits),
version=module.version)
# Reduce and format lists of reduced CIDRs.
for bits in _reduce_bit_cidrs(list(ipv4_bit_cidrs)):
new_cidrs.append(_bits_to_cidr(bits, _ipv4))
for bits in _reduce_bit_cidrs(list(ipv6_bit_cidrs)):
new_cidrs.append(_bits_to_cidr(bits, _ipv6))
return new_cidrs
#-----------------------------------------------------------------------------
def cidr_exclude(target, exclude):
"""
Removes an exclude IP address or subnet from target IP subnet.
:param target: the target IP address or subnet to be divided up.
:param exclude: the IP address or subnet to be removed from target.
:return: list of `IPNetwork` objects remaining after exclusion.
"""
target = IPNetwork(target)
exclude = IPNetwork(exclude)
if exclude.last < target.first:
# Exclude subnet's upper bound address less than target
# subnet's lower bound.
return [target.cidr]
elif target.last < exclude.first:
# Exclude subnet's lower bound address greater than target
# subnet's upper bound.
return [target.cidr]
cidrs = []
new_prefixlen = target.prefixlen + 1
# Some @properties that are expensive to get and don't change below.
target_module_width = target._module.width
if new_prefixlen <= target_module_width:
target_first = target.first
version = exclude.version
i_lower = target_first
i_upper = target_first + (2 ** (target_module_width - new_prefixlen))
lower = IPNetwork((i_lower, new_prefixlen), version=version)
upper = IPNetwork((i_upper, new_prefixlen), version=version)
while exclude.prefixlen >= new_prefixlen:
if exclude in lower:
matched = i_lower
unmatched = i_upper
elif exclude in upper:
matched = i_upper
unmatched = i_lower
else:
# Exclude subnet not within target subnet.
cidrs.append(target.cidr)
break
ip = IPNetwork((unmatched, new_prefixlen), version=version)
cidrs.append(ip)
new_prefixlen += 1
if new_prefixlen > target_module_width:
break
i_lower = matched
i_upper = matched + (2 ** (target_module_width - new_prefixlen))
lower = IPNetwork((i_lower, new_prefixlen), version=version)
upper = IPNetwork((i_upper, new_prefixlen), version=version)
cidrs.sort()
return cidrs
#-----------------------------------------------------------------------------
def spanning_cidr(ip_addrs):
"""
Function that accepts a sequence of IP addresses and subnets returning
a single `IPNetwork` subnet that is large enough to span the lower and
upper bound IP addresses with a possible overlap on either end.
:param ip_addrs: sequence of IP addresses and subnets.
:return: a single spanning `IPNetwork` subnet.
"""
sorted_ips = sorted(
[IPNetwork(ip) for ip in ip_addrs])
if not len(sorted_ips) > 1:
raise ValueError('IP sequence must contain at least 2 elements!')
lowest_ip = sorted_ips[0]
highest_ip = sorted_ips[-1]
if lowest_ip.version != highest_ip.version:
raise TypeError('IP sequence cannot contain both IPv4 and IPv6!')
ip = highest_ip.cidr
while ip.prefixlen > 0:
if highest_ip in ip and lowest_ip not in ip:
ip.prefixlen -= 1
else:
break
return ip.cidr
#-----------------------------------------------------------------------------
def iter_iprange(start, end, step=1):
"""
A generator that produces IPAddress objects between an arbitrary start
and stop IP address with intervals of step between them. Sequences
produce are inclusive of boundary IPs.
:param start: start IP address.
:param end: end IP address.
:param step: (optional) size of step between IP addresses. Default: 1
:return: an iterator of one or more `IPAddress` objects.
"""
start = IPAddress(start)
end = IPAddress(end)
if start.version != end.version:
raise TypeError('start and stop IP versions do not match!')
version = start.version
step = int(step)
if step == 0:
raise ValueError('step argument cannot be zero')
# We don't need objects from here, just integers.
start = int(start)
stop = int(end)
negative_step = False
if step < 0:
negative_step = True
index = start - step
while True:
index += step
if negative_step:
if not index >= stop:
break
else:
if not index <= stop:
break
yield IPAddress(index, version)
#-----------------------------------------------------------------------------
def iprange_to_cidrs(start, end):
"""
A function that accepts an arbitrary start and end IP address or subnet
and returns a list of CIDR subnets that fit exactly between the boundaries
of the two with no overlap.
:param start: the start IP address or subnet.
:param end: the end IP address or subnet.
:return: a list of one or more IP addresses and subnets.
"""
cidr_list = []
start = IPNetwork(start)
end = IPNetwork(end)
iprange = [start.first, end.last]
# Get spanning CIDR covering both addresses.
cidr_span = spanning_cidr([start, end])
if cidr_span.first == iprange[0] and cidr_span.last == iprange[-1]:
# Spanning CIDR matches start and end exactly.
cidr_list = [cidr_span]
elif cidr_span.last == iprange[-1]:
# Spanning CIDR matches end exactly.
ip = IPAddress(start)
first_int_val = int(ip)
ip -= 1
cidr_remainder = cidr_exclude(cidr_span, ip)
first_found = False
for cidr in cidr_remainder:
if cidr.first == first_int_val:
first_found = True
if first_found:
cidr_list.append(cidr)
elif cidr_span.first == iprange[0]:
# Spanning CIDR matches start exactly.
ip = IPAddress(end)
last_int_val = int(ip)
ip += 1
cidr_remainder = cidr_exclude(cidr_span, ip)
last_found = False
for cidr in cidr_remainder:
cidr_list.append(cidr)
if cidr.last == last_int_val:
break
elif cidr_span.first <= iprange[0] and cidr_span.last >= iprange[-1]:
# Spanning CIDR overlaps start and end.
ip = IPAddress(start)
first_int_val = int(ip)
ip -= 1
cidr_remainder = cidr_exclude(cidr_span, ip)
# Fix start.
first_found = False
for cidr in cidr_remainder:
if cidr.first == first_int_val:
first_found = True
if first_found:
cidr_list.append(cidr)
# Fix end.
ip = IPAddress(end)
last_int_val = int(ip)
ip += 1
cidr_remainder = cidr_exclude(cidr_list.pop(), ip)
last_found = False
for cidr in cidr_remainder:
cidr_list.append(cidr)
if cidr.last == last_int_val:
break
return cidr_list
#-----------------------------------------------------------------------------
def smallest_matching_cidr(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address or subnet.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: the smallest (most specific) matching IPAddress or IPNetwork
object from the provided sequence, None if there was no match.
"""
match = None
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
match = cidr
else:
if match is not None:
break
return match
#-----------------------------------------------------------------------------
def largest_matching_cidr(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address or subnet.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: the largest (least specific) matching IPAddress or IPNetwork
object from the provided sequence, None if there was no match.
"""
match = None
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
match = cidr
break
return match
#-----------------------------------------------------------------------------
def all_matching_cidrs(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: all matching IPAddress and/or IPNetwork objects from the provided
sequence, an empty list if there was no match.
"""
matches = []
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
matches.append(cidr)
else:
if matches:
break
return matches
#-----------------------------------------------------------------------------
# Cached IPv4 address range lookups.
#-----------------------------------------------------------------------------
IPV4_LOOPBACK = IPNetwork('127.0.0.0/8')
IPV4_PRIVATE = (
IPNetwork('10.0.0.0/8'), # Private-Use Networks
IPNetwork('172.16.0.0/12'), # Private-Use Networks
IPNetwork('192.0.2.0/24'), # Test-Net
IPNetwork('192.168.0.0/16'), # Private-Use Networks
IPRange('239.0.0.0', '239.255.255.255'), # Administrative Multicast
)
IPV4_LINK_LOCAL = IPNetwork('169.254.0.0/16')
IPV4_MULTICAST = IPNetwork('224.0.0.0/4')
IPV4_6TO4 = IPNetwork('192.88.99.0/24') # 6to4 Relay Anycast
IPV4_RESERVED = (
IPNetwork('128.0.0.0/16'), # Reserved but subject to allocation
IPNetwork('191.255.0.0/16'), # Reserved but subject to allocation
IPNetwork('192.0.0.0/24'), # Reserved but subject to allocation
IPNetwork('223.255.255.0/24'), # Reserved but subject to allocation
IPNetwork('240.0.0.0/4'), # Reserved for Future Use
# Reserved multicast
IPRange('234.0.0.0', '238.255.255.255'),
IPRange('225.0.0.0', '231.255.255.255'),
)
#-----------------------------------------------------------------------------
# Cached IPv6 address range lookups.
#-----------------------------------------------------------------------------
IPV6_LOOPBACK = IPAddress('::1')
IPV6_PRIVATE = (
IPNetwork('fc00::/7'), # Unique Local Addresses (ULA)
IPNetwork('fec0::/10'), # Site Local Addresses (deprecated - RFC 3879)
)
IPV6_LINK_LOCAL = IPNetwork('fe80::/10')
IPV6_MULTICAST = IPNetwork('ff00::/8')
IPV6_RESERVED = (
IPNetwork('ff00::/12'), IPNetwork('::/8'),
IPNetwork('0100::/8'), IPNetwork('0200::/7'),
IPNetwork('0400::/6'), IPNetwork('0800::/5'),
IPNetwork('1000::/4'), IPNetwork('4000::/3'),
IPNetwork('6000::/3'), IPNetwork('8000::/3'),
IPNetwork('A000::/3'), IPNetwork('C000::/3'),
IPNetwork('E000::/4'), IPNetwork('F000::/5'),
IPNetwork('F800::/6'), IPNetwork('FE00::/9'),
)
|
{
"content_hash": "284a66d57934ccef453974be04e02ead",
"timestamp": "",
"source": "github",
"line_count": 2041,
"max_line_length": 87,
"avg_line_length": 34.580107790298875,
"alnum_prop": 0.5416560401258182,
"repo_name": "JeyZeta/Dangerous",
"id": "6fe5102d6989e0d87d10784bd634e97587e1d190",
"size": "70876",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "Dangerous/Golismero/thirdparty_libs/netaddr/ip/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
}
|
import time
import saharaclient.api.base as sab
from sahara.openstack.common import timeutils
from sahara.tests.integration.tests import base
from sahara.tests.integration.tests import edp
from sahara.utils import edp as utils_edp
class TransientClusterTest(edp.EDPTest):
@base.skip_test(
'SKIP_TRANSIENT_CLUSTER_TEST',
message='Test for transient cluster was skipped.')
def transient_cluster_testing(self, plugin_config, floating_ip_pool,
internal_neutron_net):
cluster_template_id = self.create_cluster_template(
name='test-transient-cluster-template-vanilla',
plugin_config=self.vanilla_config,
description=('test cluster template for transient cluster '
'of Vanilla plugin'),
cluster_configs={},
node_groups=[
dict(
name='master-node',
flavor_id=self.flavor_id,
node_processes=['namenode', 'oozie', 'jobtracker'],
floating_ip_pool=floating_ip_pool,
count=1),
dict(
name='worker-node',
flavor_id=self.flavor_id,
node_processes=['datanode', 'tasktracker'],
floating_ip_pool=floating_ip_pool,
count=1)
],
net_id=internal_neutron_net
)
try:
# create a transient cluster
try:
cluster_name = (self.common_config.CLUSTER_NAME + '-transient-'
+ plugin_config.PLUGIN_NAME)
self.create_cluster(
name=cluster_name,
plugin_config=plugin_config,
cluster_template_id=cluster_template_id,
description='test transient cluster',
cluster_configs={},
is_transient=True
)
except Exception:
self.delete_objects(cluster_id=self.cluster_id)
raise
# check EDP
path = 'sahara/tests/integration/tests/resources/'
pig_job_data = open(path + 'edp-job.pig').read()
pig_lib_data = open(path + 'edp-lib.jar').read()
self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG,
job_data_list=[{'pig': pig_job_data}],
lib_data_list=[{'jar': pig_lib_data}])
# set timeout in seconds
timeout = self.common_config.TRANSIENT_CLUSTER_TIMEOUT * 60
s_time = timeutils.utcnow()
raise_failure = True
# wait for cluster deleting
while timeutils.delta_seconds(
s_time, timeutils.utcnow()) < timeout:
try:
self.sahara.clusters.get(self.cluster_id)
except sab.APIException as api_ex:
if 'not found' in api_ex.message:
raise_failure = False
break
time.sleep(2)
if raise_failure:
self.delete_objects(cluster_id=self.cluster_id)
self.fail('Transient cluster has not been deleted within %s '
'minutes.'
% self.common_config.TRANSIENT_CLUSTER_TIMEOUT)
finally:
self.delete_objects(cluster_template_id=cluster_template_id)
|
{
"content_hash": "b5b62a393cff85c3a149c5b7d696265c",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 40.98837209302326,
"alnum_prop": 0.516595744680851,
"repo_name": "tellesnobrega/storm_plugin",
"id": "45970956e7013970568db1c7cf98e15dd98e3f41",
"size": "4108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sahara/tests/integration/tests/vanilla_transient_cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Python",
"bytes": "1742534"
},
{
"name": "Shell",
"bytes": "15761"
}
],
"symlink_target": ""
}
|
from amazon_api_manger import AmazonAPIManger
from product import Product
class ProductSearcher(object):
MAX_RESULTS = 10
def __init__(self, config):
self.api = AmazonAPIManger(config).get_api()
def search(self, product_group, keyword, browse_node=None):
"""
:param product_group: search_index(string)
:param keyword: free keyword (string)
:return: list of Products instance
"""
product_items = self.api.item_search(product_group, Keywords=keyword, BrowseNode=browse_node)
products = []
index = -1
for item in product_items:
index += 1
if index == self.MAX_RESULTS:
break
browse_nodes = self.api.item_lookup(ItemId=item.ASIN, ResponseGroup='OfferListings,\
BrowseNodes,\
OfferSummary,\
Offers,\
Images')
product = Product(item, browse_nodes)
if (product.get_img_url('SmallImage') == 'null') or \
(product.get_img_url('MediumImage') == 'null' and product.get_img_url('LargeImage') == 'null'):
index -= 1
continue
if product.get_rating() == 'null' or product.get_review() == 'null' or product.get_price() == 'null':
index -= 1
continue
if float(product.get_rating()) < 4.0:
index -= 1
continue
products.append(product)
return products
|
{
"content_hash": "825e59507042a83c12db7e16eccf5558",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 115,
"avg_line_length": 39.43478260869565,
"alnum_prop": 0.45755237045203967,
"repo_name": "netarachelhershko/amazon_aotumation",
"id": "e25f6bc1bba89a7901475a7681032c92fb02bb1b",
"size": "1814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "product_searcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28648"
}
],
"symlink_target": ""
}
|
import os
from swift import __version__
import subprocess
import sys
import warnings
# TODO(Graham Hayes): Remove the following block of code when os-api-ref is
# using openstackdocstheme
import os_api_ref
if getattr(os_api_ref, 'THEME', 'olsosphinx') == 'openstackdocstheme':
# We are on the new version with openstackdocstheme support
extensions = [
'os_api_ref',
]
import openstackdocstheme # noqa
html_theme = 'openstackdocs'
html_theme_path = [openstackdocstheme.get_html_theme_path()]
html_theme_options = {
"sidebar_mode": "toc",
}
else:
# We are on the old version without openstackdocstheme support
extensions = [
'os_api_ref',
'oslosphinx',
]
# End temporary block
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Object Storage API Reference'
copyright = u'2010-present, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__.rsplit('.', 1)[0]
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use
# for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
try:
html_last_updated_fmt = subprocess.Popen(
git_cmd, stdout=subprocess.PIPE).communicate()[0]
except OSError:
warnings.warn('Cannot get last updated time from git repository. '
'Not setting "html_last_updated_fmt".')
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'swiftdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'swift.tex', u'OpenStack Object Storage API Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
|
{
"content_hash": "875008398be9579eef1d0fd5e4f78a70",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 31.69683257918552,
"alnum_prop": 0.6882226980728051,
"repo_name": "prashanthpai/swift",
"id": "3ec1303ce518958d6ac342ce9f77aceba1f6ac59",
"size": "7907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api-ref/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6742828"
},
{
"name": "Shell",
"bytes": "1452"
}
],
"symlink_target": ""
}
|
"""
@package mi.dataset.parser.optaa_dj_cspp
@file marine-integrations/mi/dataset/parser/optaa_dj_cspp.py
@author Joe Padula
@brief Parser for the optaa_dj_cspp dataset driver. This parser extends CsppParser
located in cspp_base.py.
Release notes:
Initial Release
"""
import numpy
import re
from mi.core.common import BaseEnum
from mi.core.exceptions import RecoverableSampleException
from mi.core.instrument.dataset_data_particle import DataParticle
from mi.core.log import get_logger
from mi.dataset.parser.common_regexes import \
END_OF_LINE_REGEX, \
FLOAT_REGEX, \
INT_REGEX
from mi.dataset.parser.cspp_base import \
CsppParser, \
HEADER_PART_MATCHER, \
Y_OR_N_REGEX, \
CsppMetadataDataParticle, \
MetadataRawDataKey, \
encode_y_or_n
log = get_logger()
__author__ = 'Joe Padula'
__license__ = 'Apache 2.0'
TAB_REGEX = r'\t'
# This is the beginning part of the REGEX, the rest of it varies
BEGIN_REGEX = '(' + FLOAT_REGEX + ')' + TAB_REGEX # Profiler Timestamp
BEGIN_REGEX += '(' + FLOAT_REGEX + ')' + TAB_REGEX # Depth
BEGIN_REGEX += '(' + Y_OR_N_REGEX + ')' + TAB_REGEX # Suspect Timestamp
BEGIN_REGEX += '(' + INT_REGEX + ')' + TAB_REGEX # serial number
BEGIN_REGEX += '(' + FLOAT_REGEX + ')' + TAB_REGEX # powered on seconds
BEGIN_REGEX += '(' + INT_REGEX + ')' + TAB_REGEX # num wavelengths
BEGIN_MATCHER = re.compile(BEGIN_REGEX)
class DataMatchesGroupNumber(BaseEnum):
"""
An enum for group match indices for a data record chunk.
These indices are into match.group(INDEX).
Used to access the match groups in the particle raw data
"""
PROFILER_TIMESTAMP = 1
DEPTH = 2
SUSPECT_TIMESTAMP = 3
SERIAL_NUMBER = 4
ON_SECONDS = 5
NUM_WAVELENGTHS = 6
C_REF_DARK = 7
C_REF_COUNTS = 8
C_SIG_DARK = 9
C_SIG_COUNTS = 10
A_REF_DARK = 11
A_REF_COUNTS = 12
A_SIG_DARK = 13
A_SIG_COUNTS = 14
EXTERNAL_TEMP_COUNTS = 15
INTERNAL_TEMP_COUNTS = 16
PRESSURE_COUNTS = 17
class DataParticleType(BaseEnum):
"""
The data particle types that this parser can generate
"""
METADATA_RECOVERED = 'optaa_dj_cspp_metadata_recovered'
INSTRUMENT_RECOVERED = 'optaa_dj_cspp_instrument_recovered'
METADATA_TELEMETERED = 'optaa_dj_cspp_metadata'
INSTRUMENT_TELEMETERED = 'optaa_dj_cspp_instrument'
class OptaaDjCsppParserDataParticleKey(BaseEnum):
"""
The data particle keys associated with the metadata particle parameters
"""
# non-common metadata particle key
SERIAL_NUMBER = 'serial_number'
# The data particle keys associated with the data instrument particle parameters
PROFILER_TIMESTAMP = 'profiler_timestamp'
PRESSURE_DEPTH = 'pressure_depth'
SUSPECT_TIMESTAMP = 'suspect_timestamp'
ON_SECONDS = 'on_seconds'
NUM_WAVELENGTHS = 'num_wavelengths'
C_REFERENCE_DARK_COUNTS = 'c_reference_dark_counts'
C_REFERENCE_COUNTS = 'c_reference_counts'
C_SIGNAL_DARK_COUNTS = 'c_signal_dark_counts'
C_SIGNAL_COUNTS = 'c_signal_counts'
A_REFERENCE_DARK_COUNTS = 'a_reference_dark_counts'
A_REFERENCE_COUNTS = 'a_reference_counts'
A_SIGNAL_DARK_COUNTS = 'a_signal_dark_counts'
A_SIGNAL_COUNTS = 'a_signal_counts'
EXTERNAL_TEMP_RAW = 'external_temp_raw'
INTERNAL_TEMP_RAW = 'internal_temp_raw'
PRESSURE_COUNTS = 'pressure_counts'
# Two groups instrument particle encoding rules used to simplify encoding using a loop.
# This the beginning part of the encoding, before the lists.
INSTRUMENT_PARTICLE_ENCODING_RULES_BEGIN = [
# Since 1/1/70 with millisecond resolution
(OptaaDjCsppParserDataParticleKey.PROFILER_TIMESTAMP, DataMatchesGroupNumber.PROFILER_TIMESTAMP, numpy.float),
# "Depth" from Record Structure section
(OptaaDjCsppParserDataParticleKey.PRESSURE_DEPTH, DataMatchesGroupNumber.DEPTH, float),
# Flag indicating a potential inaccuracy in the timestamp
(OptaaDjCsppParserDataParticleKey.SUSPECT_TIMESTAMP, DataMatchesGroupNumber.SUSPECT_TIMESTAMP, encode_y_or_n),
# Powered On Seconds
(OptaaDjCsppParserDataParticleKey.ON_SECONDS, DataMatchesGroupNumber.ON_SECONDS, float),
# Number of output wavelengths.
(OptaaDjCsppParserDataParticleKey.NUM_WAVELENGTHS, DataMatchesGroupNumber.NUM_WAVELENGTHS, int)
]
# This the end part of the encoding, after the lists.
INSTRUMENT_PARTICLE_ENCODING_RULES_END = [
# Temperature external to the instrument measured in counts.
(OptaaDjCsppParserDataParticleKey.EXTERNAL_TEMP_RAW, DataMatchesGroupNumber.EXTERNAL_TEMP_COUNTS, int),
# Temperature internal to the instrument measured in counts.
(OptaaDjCsppParserDataParticleKey.INTERNAL_TEMP_RAW, DataMatchesGroupNumber.INTERNAL_TEMP_COUNTS, int),
# Raw A/D counts from the pressure sensor
(OptaaDjCsppParserDataParticleKey.PRESSURE_COUNTS, DataMatchesGroupNumber.PRESSURE_COUNTS, int)
]
class OptaaDjCsppMetadataDataParticle(CsppMetadataDataParticle):
"""
Base Class for building a metadata particle
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
@returns results a list of encoded metadata particle (key/value pairs)
@throws RecoverableSampleException If there is a problem with sample creation
"""
metadata_particle = []
# Call base class to append the base metadata parsed values to the particle to return
metadata_particle += self._build_metadata_parsed_values()
data_match = self.raw_data[MetadataRawDataKey.DATA_MATCH]
# Process the non common metadata particle parameter
# Instrument serial number (from first record)
metadata_particle.append(self._encode_value(OptaaDjCsppParserDataParticleKey.SERIAL_NUMBER,
data_match.group(DataMatchesGroupNumber.SERIAL_NUMBER),
str))
# Set the internal timestamp
internal_timestamp_unix = numpy.float(data_match.group(
DataMatchesGroupNumber.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
return metadata_particle
class OptaaDjCsppMetadataRecoveredDataParticle(OptaaDjCsppMetadataDataParticle):
"""
Class for building a recovered metadata particle
"""
_data_particle_type = DataParticleType.METADATA_RECOVERED
class OptaaDjCsppMetadataTelemeteredDataParticle(OptaaDjCsppMetadataDataParticle):
"""
Class for building a telemetered metadata particle
"""
_data_particle_type = DataParticleType.METADATA_TELEMETERED
class OptaaDjCsppInstrumentDataParticle(DataParticle):
"""
Base Class for building a instrument data particle
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
@throws RecoverableSampleException If there is a problem with sample creation
"""
results = []
# Process each of the non-list type instrument particle parameters that occur first
for name, group, function in INSTRUMENT_PARTICLE_ENCODING_RULES_BEGIN:
results.append(self._encode_value(name, self.raw_data.group(group), function))
# The following is a mix if int, followed by a list.
# C-channel reference dark counts, used for diagnostic purposes.
results.append(self._encode_value(OptaaDjCsppParserDataParticleKey.C_REFERENCE_DARK_COUNTS,
self.raw_data.group(DataMatchesGroupNumber.C_REF_DARK),
int))
# Array of raw c-channel reference counts
results.append(self._encode_value(OptaaDjCsppParserDataParticleKey.C_REFERENCE_COUNTS,
self._build_list_for_encoding(DataMatchesGroupNumber.C_REF_COUNTS),
list))
# C-signal reference dark counts, used for diagnostic purposes.
results.append(self._encode_value(OptaaDjCsppParserDataParticleKey.C_SIGNAL_DARK_COUNTS,
self.raw_data.group(DataMatchesGroupNumber.C_SIG_DARK),
int))
# Array of raw c-channel signal counts
results.append(self._encode_value(OptaaDjCsppParserDataParticleKey.C_SIGNAL_COUNTS,
self._build_list_for_encoding(DataMatchesGroupNumber.C_SIG_COUNTS),
list))
# A-channel reference dark counts, used for diagnostic purposes.
results.append(self._encode_value(OptaaDjCsppParserDataParticleKey.A_REFERENCE_DARK_COUNTS,
self.raw_data.group(DataMatchesGroupNumber.A_REF_DARK),
int))
# Array of raw a-channel reference counts
results.append(self._encode_value(OptaaDjCsppParserDataParticleKey.A_REFERENCE_COUNTS,
self._build_list_for_encoding(DataMatchesGroupNumber.A_REF_COUNTS),
list))
# A-signal reference dark counts, used for diagnostic purposes.
results.append(self._encode_value(OptaaDjCsppParserDataParticleKey.A_SIGNAL_DARK_COUNTS,
self.raw_data.group(DataMatchesGroupNumber.A_SIG_DARK),
int))
# Array of raw a-channel signal counts
results.append(self._encode_value(OptaaDjCsppParserDataParticleKey.A_SIGNAL_COUNTS,
self._build_list_for_encoding(DataMatchesGroupNumber.A_SIG_COUNTS),
list))
# Process each of the non-list instrument particle parameters that occur last
for name, group, function in INSTRUMENT_PARTICLE_ENCODING_RULES_END:
results.append(self._encode_value(name, self.raw_data.group(group), function))
# Set the internal timestamp
internal_timestamp_unix = numpy.float(self.raw_data.group(
DataMatchesGroupNumber.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
return results
def _build_list_for_encoding(self, group_num):
"""
Helper method for building the list that is needed for encoding
@param group_num the group number of the match
@return the list of counts
"""
# Load the tab separated string
tab_str = self.raw_data.group(group_num)
# Strip off the ending tab
tab_str_stripped = tab_str.strip('\t')
counts_list = tab_str_stripped.split('\t')
# return a list of integers
return map(int, counts_list)
class OptaaDjCsppInstrumentRecoveredDataParticle(OptaaDjCsppInstrumentDataParticle):
"""
Class for building a recovered instrument data particle
"""
_data_particle_type = DataParticleType.INSTRUMENT_RECOVERED
class OptaaDjCsppInstrumentTelemeteredDataParticle(OptaaDjCsppInstrumentDataParticle):
"""
Class for building a telemetered instrument data particle
"""
_data_particle_type = DataParticleType.INSTRUMENT_TELEMETERED
class OptaaDjCsppParser(CsppParser):
def __init__(self,
config,
stream_handle,
exception_callback):
"""
This method is a constructor that will instantiate an OptaaDjCsppParser object.
@param config The configuration for this OptaaDjCsppParser parser
@param stream_handle The handle to the data stream containing the optaa_dj_cspp data
@param exception_callback The function to call to report exceptions
"""
# Call the superclass constructor
super(OptaaDjCsppParser, self).__init__(config,
stream_handle,
exception_callback,
BEGIN_REGEX)
@staticmethod
def _build_data_regex(regex, count):
"""
Helper method for building up regex
@param regex the beginning part of the regex that has already been determined
@param count the number of items in the array
"""
data_regex = regex
array = r'((?:\d+\t){%s})' % count
data_regex += '(' + INT_REGEX + ')' + TAB_REGEX # C Ref Dark
data_regex += array # C Ref Counts
data_regex += '(' + INT_REGEX + ')' + TAB_REGEX # C Sig Dark
data_regex += array # C Sig Counts
data_regex += '(' + INT_REGEX + ')' + TAB_REGEX # A Ref Dark
data_regex += array # A Ref Counts
data_regex += '(' + INT_REGEX + ')' + TAB_REGEX # A Sig Dark
data_regex += array # A Sig Counts
data_regex += '(' + INT_REGEX + ')' + TAB_REGEX # External Temp Counts
data_regex += '(' + INT_REGEX + ')' + TAB_REGEX # Internal Temp Counts
data_regex += '(' + INT_REGEX + ')' # Pressure Counts
data_regex += r'\t*' + END_OF_LINE_REGEX
return data_regex
def parse_file(self):
"""
Parse through the file, pulling single lines and comparing to the established patterns,
generating particles for data lines
"""
for line in self._stream_handle:
match = BEGIN_MATCHER.match(line)
if match is not None:
count = match.group(DataMatchesGroupNumber.NUM_WAVELENGTHS)
data_regex = self._build_data_regex(BEGIN_REGEX, count)
fields = re.match(data_regex, line)
if fields is not None:
self._process_data_match(fields, self._record_buffer)
else: # did not match the regex
log.warn("line did not match regex %s", line)
self._exception_callback(RecoverableSampleException("Found an invalid line: %s" % line))
else:
# Check for head part match
header_part_match = HEADER_PART_MATCHER.match(line)
if header_part_match is not None:
self._process_header_part_match(header_part_match)
else:
self._process_line_not_containing_data_record_or_header_part(line)
|
{
"content_hash": "a79318dbf21957cb4e9596ead441799c",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 114,
"avg_line_length": 39.77479892761394,
"alnum_prop": 0.6357508762469668,
"repo_name": "vipullakhani/mi-instrument",
"id": "7427186dbad6a5af81c248cd138b9edf3ef07335",
"size": "14836",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mi/dataset/parser/optaa_dj_cspp.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "9968191"
}
],
"symlink_target": ""
}
|
from django.contrib.admin.utils import quote
from django.core import checks
from django.db.models import ForeignKey
from django.urls import reverse
from django.utils.module_loading import import_string
from wagtail.admin.checks import check_panels_in_model
from wagtail.admin.forms.models import register_form_field_override
from wagtail.admin.viewsets import viewsets
from wagtail.models import ReferenceIndex
from .widgets import AdminSnippetChooser
SNIPPET_MODELS = []
# register_snippet will often be called before models are fully loaded, which may cause
# issues with constructing viewsets (https://github.com/wagtail/wagtail/issues/9586).
# We therefore initially set a DEFER_REGISTRATION flag to indicate that registration
# should not be processed immediately, but added to the DEFERRED_REGISTRATIONS list to be
# handled later. This is initiated from WagtailSnippetsAppConfig.ready(), at which point
# we can be sure that models are fully loaded.
DEFER_REGISTRATION = True
DEFERRED_REGISTRATIONS = []
def get_snippet_models():
return SNIPPET_MODELS
class SnippetAdminURLFinder:
# subclasses should define a 'model' attribute
def __init__(self, user=None):
if user:
from wagtail.snippets.permissions import get_permission_name
self.user_can_edit = user.has_perm(
get_permission_name("change", self.model)
)
else:
# skip permission checks
self.user_can_edit = True
def get_edit_url(self, instance):
app_label = self.model._meta.app_label
model_name = self.model._meta.model_name
if self.user_can_edit:
return reverse(
f"wagtailsnippets_{app_label}_{model_name}:edit",
args=[quote(instance.pk)],
)
def register_snippet(model, viewset=None):
if DEFER_REGISTRATION:
# Models may not have been fully loaded yet, so defer registration until they are -
# add it to the list of registrations to be processed by register_deferred_snippets
DEFERRED_REGISTRATIONS.append((model, viewset))
else:
_register_snippet_immediately(model, viewset)
return model
def _register_snippet_immediately(model, viewset=None):
# Register the viewset and formfield for this snippet model,
# skipping the check for whether models are loaded
if model in SNIPPET_MODELS:
# Do not create duplicate registrations of the same model
return
from wagtail.snippets.views.chooser import SnippetChooserViewSet
from wagtail.snippets.views.snippets import SnippetViewSet
model.get_usage = lambda obj: ReferenceIndex.get_references_to(
obj
).group_by_source_object()
model.usage_url = get_snippet_usage_url
model.get_admin_base_path = get_admin_base_path
model.get_admin_url_namespace = get_admin_url_namespace
if viewset is None:
viewset = SnippetViewSet
elif isinstance(viewset, str):
viewset = import_string(viewset)
admin_viewset = viewset(
model.get_admin_url_namespace(),
model=model,
url_prefix=model.get_admin_base_path(),
)
chooser_viewset = SnippetChooserViewSet(
f"wagtailsnippetchoosers_{model._meta.app_label}_{model._meta.model_name}",
model=model,
url_prefix=f"snippets/choose/{model._meta.app_label}/{model._meta.model_name}",
)
viewsets.register(admin_viewset)
viewsets.register(chooser_viewset)
SNIPPET_MODELS.append(model)
SNIPPET_MODELS.sort(key=lambda x: x._meta.verbose_name)
@checks.register("panels")
def modeladmin_model_check(app_configs, **kwargs):
errors = check_panels_in_model(model, "snippets")
return errors
# Set up admin model forms to use AdminSnippetChooser for any ForeignKey to this model
register_form_field_override(
ForeignKey, to=model, override={"widget": AdminSnippetChooser(model=model)}
)
def get_snippet_usage_url(self):
return reverse(
f"wagtailsnippets_{self._meta.app_label}_{self._meta.model_name}:usage",
args=[quote(self.pk)],
)
@classmethod
def get_admin_base_path(cls):
return f"snippets/{cls._meta.app_label}/{cls._meta.model_name}"
@classmethod
def get_admin_url_namespace(cls):
return f"wagtailsnippets_{cls._meta.app_label}_{cls._meta.model_name}"
def register_deferred_snippets():
"""
Called from WagtailSnippetsAppConfig.ready(), at which point we can be sure all models
have been loaded and register_snippet can safely construct viewsets.
"""
global DEFER_REGISTRATION
DEFER_REGISTRATION = False
for model, viewset in DEFERRED_REGISTRATIONS:
_register_snippet_immediately(model, viewset)
|
{
"content_hash": "af90dae935144ce18172a46ecd335813",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 91,
"avg_line_length": 33.528169014084504,
"alnum_prop": 0.7015332913253518,
"repo_name": "rsalmaso/wagtail",
"id": "2c990026d9e1073e11266e706a8caa96c6acfaf3",
"size": "4761",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "wagtail/snippets/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593672"
},
{
"name": "JavaScript",
"bytes": "624463"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6598232"
},
{
"name": "SCSS",
"bytes": "221911"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "296087"
}
],
"symlink_target": ""
}
|
'''
Given n non-negative integers representing the histogram's bar height where the width of each bar is 1, find the area of largest rectangle in the histogram.
Above is a histogram where width of each bar is 1, given height = [2,1,5,6,2,3].
The largest rectangle is shown in the shaded area, which has area = 10 unit.
For example,
Given height = [2,1,5,6,2,3],
return 10.
'''
class Solution(object):
def largestRectangleArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
height.insert(0, 0)
height.append(0)
n = len(height)
stack = []
i, res = 0, 0
stack.append(0)
for i in range(1, n):
idx = stack[-1]
while height[i] < height[idx]:
stack.pop()
res = max(res, height[idx] * (i - stack[-1] - 1))
idx = stack[-1]
stack.append(i)
return res
|
{
"content_hash": "3058d102e15163be8aaace418c4a7999",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 156,
"avg_line_length": 24.487179487179485,
"alnum_prop": 0.5455497382198953,
"repo_name": "weixsong/algorithm",
"id": "d1e6094f77e88f4468e2720112abd5c15706d274",
"size": "981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/84h.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2201"
},
{
"name": "C++",
"bytes": "11061"
},
{
"name": "Makefile",
"bytes": "152"
},
{
"name": "Python",
"bytes": "379600"
}
],
"symlink_target": ""
}
|
import unittest
from circleci.error import CircleCIException, BadKeyError, BadVerbError, InvalidFilterError
class TestCircleCIError(unittest.TestCase):
def setUp(self):
self.base = CircleCIException('fake')
self.key = BadKeyError('fake')
self.verb = BadVerbError('fake')
self.filter = InvalidFilterError('fake', 'status')
self.afilter = InvalidFilterError('fake', 'artifacts')
def test_error_implements_str(self):
self.assertTrue(self.base.__str__ is not object.__str__)
string = self.base.__str__()
self.assertIn('invalid', string)
def test_verb_message(self):
self.assertIn('DELETE', self.verb.message)
def test_key_message(self):
self.assertIn('deploy-key', self.key.message)
def test_filter_message(self):
self.assertIn('running', self.filter.message)
self.assertIn('completed', self.afilter.message)
|
{
"content_hash": "d1d8c381cd2f7615d9b14044f498308e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 91,
"avg_line_length": 33.17857142857143,
"alnum_prop": 0.667384284176534,
"repo_name": "levlaz/circleci.py",
"id": "5b0f90be160bb99b70864905a7da1059571b0537",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/circle/test_error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "550"
},
{
"name": "Python",
"bytes": "39153"
}
],
"symlink_target": ""
}
|
import copy
import stevedore
from glance.common import location_strategy
from glance.common.location_strategy import location_order
from glance.common.location_strategy import store_type
from glance.tests.unit import base
class TestLocationStrategy(base.IsolatedUnitTest):
"""Test routines in glance.common.location_strategy"""
def _set_original_strategies(self, original_strategies):
for name in location_strategy._available_strategies.keys():
if name not in original_strategies:
del location_strategy._available_strategies[name]
def setUp(self):
super(TestLocationStrategy, self).setUp()
original_strategies = ['location_order', 'store_type']
self.addCleanup(self._set_original_strategies, original_strategies)
def test_load_strategy_modules(self):
modules = location_strategy._load_strategies()
# By default we have two built-in strategy modules.
self.assertEqual(2, len(modules))
self.assertEqual(set(['location_order', 'store_type']),
set(modules.keys()))
self.assertEqual(location_strategy._available_strategies, modules)
def test_load_strategy_module_with_deduplicating(self):
modules = ['module1', 'module2']
def _fake_stevedore_extension_manager(*args, **kwargs):
ret = lambda: None
ret.names = lambda: modules
return ret
def _fake_stevedore_driver_manager(*args, **kwargs):
ret = lambda: None
ret.driver = lambda: None
ret.driver.__name__ = kwargs['name']
# Module 1 and 2 has a same strategy name
ret.driver.get_strategy_name = lambda: 'module_name'
ret.driver.init = lambda: None
return ret
self.stub = self.stubs.Set(stevedore.extension, "ExtensionManager",
_fake_stevedore_extension_manager)
self.stub = self.stubs.Set(stevedore.driver, "DriverManager",
_fake_stevedore_driver_manager)
loaded_modules = location_strategy._load_strategies()
self.assertEqual(1, len(loaded_modules))
self.assertIn('module_name', loaded_modules)
# Skipped module #2, duplicated one.
self.assertEqual('module1', loaded_modules['module_name'].__name__)
def test_load_strategy_module_with_init_exception(self):
modules = ['module_init_exception', 'module_good']
def _fake_stevedore_extension_manager(*args, **kwargs):
ret = lambda: None
ret.names = lambda: modules
return ret
def _fake_stevedore_driver_manager(*args, **kwargs):
if kwargs['name'] == 'module_init_exception':
raise Exception('strategy module failed to initialize.')
else:
ret = lambda: None
ret.driver = lambda: None
ret.driver.__name__ = kwargs['name']
ret.driver.get_strategy_name = lambda: kwargs['name']
ret.driver.init = lambda: None
return ret
self.stub = self.stubs.Set(stevedore.extension, "ExtensionManager",
_fake_stevedore_extension_manager)
self.stub = self.stubs.Set(stevedore.driver, "DriverManager",
_fake_stevedore_driver_manager)
loaded_modules = location_strategy._load_strategies()
self.assertEqual(1, len(loaded_modules))
self.assertIn('module_good', loaded_modules)
# Skipped module #1, initialize failed one.
self.assertEqual('module_good', loaded_modules['module_good'].__name__)
def test_verify_valid_location_strategy(self):
for strategy_name in ['location_order', 'store_type']:
self.config(location_strategy=strategy_name)
location_strategy.verify_location_strategy()
def test_get_ordered_locations_with_none_or_empty_locations(self):
self.assertEqual([], location_strategy.get_ordered_locations(None))
self.assertEqual([], location_strategy.get_ordered_locations([]))
def test_get_ordered_locations(self):
self.config(location_strategy='location_order')
original_locs = [{'url': 'loc1'}, {'url': 'loc2'}]
ordered_locs = location_strategy.get_ordered_locations(original_locs)
# Original location list should remain unchanged
self.assertNotEqual(id(original_locs), id(ordered_locs))
self.assertEqual(original_locs, ordered_locs)
def test_choose_best_location_with_none_or_empty_locations(self):
self.assertIsNone(location_strategy.choose_best_location(None))
self.assertIsNone(location_strategy.choose_best_location([]))
def test_choose_best_location(self):
self.config(location_strategy='location_order')
original_locs = [{'url': 'loc1'}, {'url': 'loc2'}]
best_loc = location_strategy.choose_best_location(original_locs)
# Deep copy protect original location.
self.assertNotEqual(id(original_locs), id(best_loc))
self.assertEqual(original_locs[0], best_loc)
class TestLocationOrderStrategyModule(base.IsolatedUnitTest):
"""Test routines in glance.common.location_strategy.location_order"""
def test_get_ordered_locations(self):
original_locs = [{'url': 'loc1'}, {'url': 'loc2'}]
ordered_locs = location_order.get_ordered_locations(original_locs)
# The result will ordered by original natural order.
self.assertEqual(original_locs, ordered_locs)
class TestStoreTypeStrategyModule(base.IsolatedUnitTest):
"""Test routines in glance.common.location_strategy.store_type"""
def test_get_ordered_locations(self):
self.config(store_type_preference=[' rbd', 'sheepdog ', ' file',
'swift ', ' http ', 'vmware'],
group='store_type_location_strategy')
locs = [{'url': 'file://image0', 'metadata': {'idx': 3}},
{'url': 'rbd://image1', 'metadata': {'idx': 0}},
{'url': 'file://image3', 'metadata': {'idx': 4}},
{'url': 'swift://image4', 'metadata': {'idx': 6}},
{'url': 'cinder://image5', 'metadata': {'idx': 9}},
{'url': 'file://image6', 'metadata': {'idx': 5}},
{'url': 'rbd://image7', 'metadata': {'idx': 1}},
{'url': 'vsphere://image9', 'metadata': {'idx': 8}},
{'url': 'sheepdog://image8', 'metadata': {'idx': 2}}]
ordered_locs = store_type.get_ordered_locations(copy.deepcopy(locs))
locs.sort(key=lambda loc: loc['metadata']['idx'])
# The result will ordered by preferred store type order.
self.assertEqual(locs, ordered_locs)
def test_get_ordered_locations_with_invalid_store_name(self):
self.config(store_type_preference=[' rbd', 'sheepdog ', 'invalid',
'swift ', ' http '],
group='store_type_location_strategy')
locs = [{'url': 'file://image0', 'metadata': {'idx': 4}},
{'url': 'rbd://image1', 'metadata': {'idx': 0}},
{'url': 'file://image3', 'metadata': {'idx': 5}},
{'url': 'swift://image4', 'metadata': {'idx': 3}},
{'url': 'cinder://image5', 'metadata': {'idx': 6}},
{'url': 'file://image6', 'metadata': {'idx': 7}},
{'url': 'rbd://image7', 'metadata': {'idx': 1}},
{'url': 'sheepdog://image8', 'metadata': {'idx': 2}}]
ordered_locs = store_type.get_ordered_locations(copy.deepcopy(locs))
locs.sort(key=lambda loc: loc['metadata']['idx'])
# The result will ordered by preferred store type order.
self.assertEqual(locs, ordered_locs)
|
{
"content_hash": "ea0d013210727770473940c34d47cb1f",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 79,
"avg_line_length": 46.411764705882355,
"alnum_prop": 0.5982256020278834,
"repo_name": "rajalokan/glance",
"id": "bef1a9f9237748414575292fb09616689159d674",
"size": "8515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/tests/unit/common/test_location_strategy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "673"
},
{
"name": "Mako",
"bytes": "431"
},
{
"name": "Python",
"bytes": "3840692"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url, include
from .views import empty_view
urlpatterns = [
url(r'^$', empty_view, name="named-url3"),
url(r'^extra/(?P<extra>\w+)/$', empty_view, name="named-url4"),
url(r'^(?P<one>\d+)|(?P<two>\d+)/$', empty_view),
url(r'^included/', include('urlpatterns_reverse.included_named_urls2')),
]
|
{
"content_hash": "e299da7ea8193d0051da4c9a09bfade5",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 76,
"avg_line_length": 30.818181818181817,
"alnum_prop": 0.6224188790560472,
"repo_name": "liavkoren/djangoDev",
"id": "d8a6a1648145a30afe9e41361c0a713016011828",
"size": "339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/urlpatterns_reverse/included_named_urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52957"
},
{
"name": "JavaScript",
"bytes": "102668"
},
{
"name": "Python",
"bytes": "9336943"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
}
|
"""TELNET client class.
Based on RFC 854: TELNET Protocol Specification, by J. Postel and
J. Reynolds
Example:
>>> from telnetlib import Telnet
>>> tn = Telnet('www.python.org', 79) # connect to finger port
>>> tn.write('guido\r\n')
>>> print tn.read_all()
Login Name TTY Idle When Where
guido Guido van Rossum pts/2 <Dec 2 11:10> snag.cnri.reston..
>>>
Note that read_all() won't read until eof -- it just reads some data
-- but it guarantees to read at least one byte unless EOF is hit.
It is possible to pass a Telnet object to select.select() in order to
wait until more data is available. Note that in this case,
read_eager() may return '' even if there was data on the socket,
because the protocol negotiation may have eaten the data. This is why
EOFError is needed in some cases to distinguish between "no data" and
"connection closed" (since the socket also appears ready for reading
when it is closed).
To do:
- option negotiation
- timeout should be intrinsic to the connection object instead of an
option on one of the read calls only
"""
# Imported modules
import sys
import socket
import os
if os.name == 'java':
from select import cpython_compatible_select as select
else:
from select import select
del os
__all__ = ["Telnet"]
# Tunable parameters
DEBUGLEVEL = 0
# Telnet protocol defaults
TELNET_PORT = 23
# Telnet protocol characters (don't change)
IAC = chr(255) # "Interpret As Command"
DONT = chr(254)
DO = chr(253)
WONT = chr(252)
WILL = chr(251)
theNULL = chr(0)
SE = chr(240) # Subnegotiation End
NOP = chr(241) # No Operation
DM = chr(242) # Data Mark
BRK = chr(243) # Break
IP = chr(244) # Interrupt process
AO = chr(245) # Abort output
AYT = chr(246) # Are You There
EC = chr(247) # Erase Character
EL = chr(248) # Erase Line
GA = chr(249) # Go Ahead
SB = chr(250) # Subnegotiation Begin
# Telnet protocol options code (don't change)
# These ones all come from arpa/telnet.h
BINARY = chr(0) # 8-bit data path
ECHO = chr(1) # echo
RCP = chr(2) # prepare to reconnect
SGA = chr(3) # suppress go ahead
NAMS = chr(4) # approximate message size
STATUS = chr(5) # give status
TM = chr(6) # timing mark
RCTE = chr(7) # remote controlled transmission and echo
NAOL = chr(8) # negotiate about output line width
NAOP = chr(9) # negotiate about output page size
NAOCRD = chr(10) # negotiate about CR disposition
NAOHTS = chr(11) # negotiate about horizontal tabstops
NAOHTD = chr(12) # negotiate about horizontal tab disposition
NAOFFD = chr(13) # negotiate about formfeed disposition
NAOVTS = chr(14) # negotiate about vertical tab stops
NAOVTD = chr(15) # negotiate about vertical tab disposition
NAOLFD = chr(16) # negotiate about output LF disposition
XASCII = chr(17) # extended ascii character set
LOGOUT = chr(18) # force logout
BM = chr(19) # byte macro
DET = chr(20) # data entry terminal
SUPDUP = chr(21) # supdup protocol
SUPDUPOUTPUT = chr(22) # supdup output
SNDLOC = chr(23) # send location
TTYPE = chr(24) # terminal type
EOR = chr(25) # end or record
TUID = chr(26) # TACACS user identification
OUTMRK = chr(27) # output marking
TTYLOC = chr(28) # terminal location number
VT3270REGIME = chr(29) # 3270 regime
X3PAD = chr(30) # X.3 PAD
NAWS = chr(31) # window size
TSPEED = chr(32) # terminal speed
LFLOW = chr(33) # remote flow control
LINEMODE = chr(34) # Linemode option
XDISPLOC = chr(35) # X Display Location
OLD_ENVIRON = chr(36) # Old - Environment variables
AUTHENTICATION = chr(37) # Authenticate
ENCRYPT = chr(38) # Encryption option
NEW_ENVIRON = chr(39) # New - Environment variables
# the following ones come from
# http://www.iana.org/assignments/telnet-options
# Unfortunately, that document does not assign identifiers
# to all of them, so we are making them up
TN3270E = chr(40) # TN3270E
XAUTH = chr(41) # XAUTH
CHARSET = chr(42) # CHARSET
RSP = chr(43) # Telnet Remote Serial Port
COM_PORT_OPTION = chr(44) # Com Port Control Option
SUPPRESS_LOCAL_ECHO = chr(45) # Telnet Suppress Local Echo
TLS = chr(46) # Telnet Start TLS
KERMIT = chr(47) # KERMIT
SEND_URL = chr(48) # SEND-URL
FORWARD_X = chr(49) # FORWARD_X
PRAGMA_LOGON = chr(138) # TELOPT PRAGMA LOGON
SSPI_LOGON = chr(139) # TELOPT SSPI LOGON
PRAGMA_HEARTBEAT = chr(140) # TELOPT PRAGMA HEARTBEAT
EXOPL = chr(255) # Extended-Options-List
NOOPT = chr(0)
class Telnet:
"""Telnet interface class.
An instance of this class represents a connection to a telnet
server. The instance is initially not connected; the open()
method must be used to establish a connection. Alternatively, the
host name and optional port number can be passed to the
constructor, too.
Don't try to reopen an already connected instance.
This class has many read_*() methods. Note that some of them
raise EOFError when the end of the connection is read, because
they can return an empty string for other reasons. See the
individual doc strings.
read_until(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
read_all()
Read all data until EOF; may block.
read_some()
Read at least one byte or EOF; may block.
read_very_eager()
Read all data available already queued or on the socket,
without blocking.
read_eager()
Read either data already queued or some data available on the
socket, without blocking.
read_lazy()
Read all data in the raw queue (processing it first), without
doing any socket I/O.
read_very_lazy()
Reads all data in the cooked queue, without doing any socket
I/O.
read_sb_data()
Reads available data between SB ... SE sequence. Don't block.
set_option_negotiation_callback(callback)
Each time a telnet option is read on the input flow, this callback
(if set) is called with the following parameters :
callback(telnet socket, command, option)
option will be chr(0) when there is no option.
No other action is done afterwards by telnetlib.
"""
def __init__(self, host=None, port=0):
"""Constructor.
When called without arguments, create an unconnected instance.
With a hostname argument, it connects the instance; a port
number is optional.
"""
self.debuglevel = DEBUGLEVEL
self.host = host
self.port = port
self.sock = None
self.rawq = ''
self.irawq = 0
self.cookedq = ''
self.eof = 0
self.iacseq = '' # Buffer for IAC sequence.
self.sb = 0 # flag for SB and SE sequence.
self.sbdataq = ''
self.option_callback = None
if host is not None:
self.open(host, port)
def open(self, host, port=0):
"""Connect to a host.
The optional second argument is the port number, which
defaults to the standard telnet port (23).
Don't try to reopen an already connected instance.
"""
self.eof = 0
if not port:
port = TELNET_PORT
self.host = host
self.port = port
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
def __del__(self):
"""Destructor -- close the connection."""
self.close()
def msg(self, msg, *args):
"""Print a debug message, when the debug level is > 0.
If extra arguments are present, they are substituted in the
message using the standard string formatting operator.
"""
if self.debuglevel > 0:
print 'Telnet(%s,%d):' % (self.host, self.port),
if args:
print msg % args
else:
print msg
def set_debuglevel(self, debuglevel):
"""Set the debug level.
The higher it is, the more debug output you get (on sys.stdout).
"""
self.debuglevel = debuglevel
def close(self):
"""Close the connection."""
if self.sock:
self.sock.close()
self.sock = 0
self.eof = 1
self.iacseq = ''
self.sb = 0
def get_socket(self):
"""Return the socket object used internally."""
return self.sock
def fileno(self):
"""Return the fileno() of the socket object used internally."""
return self.sock.fileno()
def write(self, buffer):
"""Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.
"""
if IAC in buffer:
buffer = buffer.replace(IAC, IAC+IAC)
self.msg("send %s", `buffer`)
self.sock.sendall(buffer)
def read_until(self, match, timeout=None):
"""Read until a given string is encountered or until timeout.
When no match is found, return whatever is available instead,
possibly the empty string. Raise EOFError if the connection
is closed and no cooked data is available.
"""
n = len(match)
self.process_rawq()
i = self.cookedq.find(match)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
s_reply = ([self], [], [])
s_args = s_reply
if timeout is not None:
s_args = s_args + (timeout,)
while not self.eof and select(*s_args) == s_reply:
i = max(0, len(self.cookedq)-n)
self.fill_rawq()
self.process_rawq()
i = self.cookedq.find(match, i)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
return self.read_very_lazy()
def read_all(self):
"""Read all data until EOF; block until connection closed."""
self.process_rawq()
while not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_some(self):
"""Read at least one byte of cooked data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available.
"""
self.process_rawq()
while not self.cookedq and not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_very_eager(self):
"""Read everything that's possible without blocking in I/O (eager).
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_eager(self):
"""Read readily available data.
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.cookedq and not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_lazy(self):
"""Process and return data that's already in the queues (lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block
unless in the midst of an IAC sequence.
"""
self.process_rawq()
return self.read_very_lazy()
def read_very_lazy(self):
"""Return any data available in the cooked queue (very lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block.
"""
buf = self.cookedq
self.cookedq = ''
if not buf and self.eof and not self.rawq:
raise EOFError, 'telnet connection closed'
return buf
def read_sb_data(self):
"""Return any data available in the SB ... SE queue.
Return '' if no SB ... SE available. Should only be called
after seeing a SB or SE command. When a new SB command is
found, old unread SB data will be discarded. Don't block.
"""
buf = self.sbdataq
self.sbdataq = ''
return buf
def set_option_negotiation_callback(self, callback):
"""Provide a callback function called after each receipt of a telnet option."""
self.option_callback = callback
def process_rawq(self):
"""Transfer from raw queue to cooked queue.
Set self.eof when connection is closed. Don't block unless in
the midst of an IAC sequence.
"""
buf = ['', '']
try:
while self.rawq:
c = self.rawq_getchar()
if not self.iacseq:
if c == theNULL:
continue
if c == "\021":
continue
if c != IAC:
buf[self.sb] = buf[self.sb] + c
continue
else:
self.iacseq += c
elif len(self.iacseq) == 1:
'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
if c in (DO, DONT, WILL, WONT):
self.iacseq += c
continue
self.iacseq = ''
if c == IAC:
buf[self.sb] = buf[self.sb] + c
else:
if c == SB: # SB ... SE start.
self.sb = 1
self.sbdataq = ''
elif c == SE:
self.sb = 0
self.sbdataq = self.sbdataq + buf[1]
buf[1] = ''
if self.option_callback:
# Callback is supposed to look into
# the sbdataq
self.option_callback(self.sock, c, NOOPT)
else:
# We can't offer automatic processing of
# suboptions. Alas, we should not get any
# unless we did a WILL/DO before.
self.msg('IAC %d not recognized' % ord(c))
elif len(self.iacseq) == 2:
cmd = self.iacseq[1]
self.iacseq = ''
opt = c
if cmd in (DO, DONT):
self.msg('IAC %s %d',
cmd == DO and 'DO' or 'DONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + WONT + opt)
elif cmd in (WILL, WONT):
self.msg('IAC %s %d',
cmd == WILL and 'WILL' or 'WONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + DONT + opt)
except EOFError: # raised by self.rawq_getchar()
self.iacseq = '' # Reset on EOF
self.sb = 0
pass
self.cookedq = self.cookedq + buf[0]
self.sbdataq = self.sbdataq + buf[1]
def rawq_getchar(self):
"""Get next char from raw queue.
Block if no data is immediately available. Raise EOFError
when connection is closed.
"""
if not self.rawq:
self.fill_rawq()
if self.eof:
raise EOFError
c = self.rawq[self.irawq]
self.irawq = self.irawq + 1
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
return c
def fill_rawq(self):
"""Fill raw queue from exactly one recv() system call.
Block if no data is immediately available. Set self.eof when
connection is closed.
"""
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
# The buffer size should be fairly small so as to avoid quadratic
# behavior in process_rawq() above
buf = self.sock.recv(50)
self.msg("recv %s", `buf`)
self.eof = (not buf)
self.rawq = self.rawq + buf
def sock_avail(self):
"""Test whether data is available on the socket."""
return select([self], [], [], 0) == ([self], [], [])
def interact(self):
"""Interaction function, emulates a very dumb telnet client."""
if sys.platform == "win32":
self.mt_interact()
return
while 1:
rfd, wfd, xfd = select([self, sys.stdin], [], [])
if self in rfd:
try:
text = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
break
if text:
sys.stdout.write(text)
sys.stdout.flush()
if sys.stdin in rfd:
line = sys.stdin.readline()
if not line:
break
self.write(line)
def mt_interact(self):
"""Multithreaded version of interact()."""
import thread
thread.start_new_thread(self.listener, ())
while 1:
line = sys.stdin.readline()
if not line:
break
self.write(line)
def listener(self):
"""Helper for mt_interact() -- this executes in the other thread."""
while 1:
try:
data = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
return
if data:
sys.stdout.write(data)
else:
sys.stdout.flush()
def expect(self, list, timeout=None):
"""Read until one from a list of a regular expressions matches.
The first argument is a list of regular expressions, either
compiled (re.RegexObject instances) or uncompiled (strings).
The optional second argument is a timeout, in seconds; default
is no timeout.
Return a tuple of three items: the index in the list of the
first regular expression that matches; the match object
returned; and the text read up till and including the match.
If EOF is read and no text was read, raise EOFError.
Otherwise, when nothing matches, return (-1, None, text) where
text is the text received so far (may be the empty string if a
timeout happened).
If a regular expression ends with a greedy match (e.g. '.*')
or if more than one expression can match the same input, the
results are undeterministic, and may depend on the I/O timing.
"""
re = None
list = list[:]
indices = range(len(list))
for i in indices:
if not hasattr(list[i], "search"):
if not re: import re
list[i] = re.compile(list[i])
while 1:
self.process_rawq()
for i in indices:
m = list[i].search(self.cookedq)
if m:
e = m.end()
text = self.cookedq[:e]
self.cookedq = self.cookedq[e:]
return (i, m, text)
if self.eof:
break
if timeout is not None:
r, w, x = select([self.fileno()], [], [], timeout)
if not r:
break
self.fill_rawq()
text = self.read_very_lazy()
if not text and self.eof:
raise EOFError
return (-1, None, text)
def test():
"""Test program for telnetlib.
Usage: python telnetlib.py [-d] ... [host [port]]
Default host is localhost; default port is 23.
"""
debuglevel = 0
while sys.argv[1:] and sys.argv[1] == '-d':
debuglevel = debuglevel+1
del sys.argv[1]
host = 'localhost'
if sys.argv[1:]:
host = sys.argv[1]
port = 0
if sys.argv[2:]:
portstr = sys.argv[2]
try:
port = int(portstr)
except ValueError:
port = socket.getservbyname(portstr, 'tcp')
tn = Telnet()
tn.set_debuglevel(debuglevel)
tn.open(host, port)
tn.interact()
tn.close()
if __name__ == '__main__':
test()
|
{
"content_hash": "db1506acee290b8e6210065029cf155d",
"timestamp": "",
"source": "github",
"line_count": 662,
"max_line_length": 87,
"avg_line_length": 32.720543806646525,
"alnum_prop": 0.5582383084806796,
"repo_name": "neopoly/rubyfox-server",
"id": "812bceabb5bad87af31a47ad577f8ec492a4f696",
"size": "21661",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "lib/rubyfox/server/data/lib/Lib/telnetlib.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "36798"
},
{
"name": "CSS",
"bytes": "3443"
},
{
"name": "HTML",
"bytes": "43018"
},
{
"name": "Java",
"bytes": "41664"
},
{
"name": "JavaScript",
"bytes": "168340"
},
{
"name": "Python",
"bytes": "4026158"
},
{
"name": "Ruby",
"bytes": "5433"
},
{
"name": "Shell",
"bytes": "13948"
}
],
"symlink_target": ""
}
|
from contextlib import contextmanager
import jinja2
import unittest
from nereid.sessions import Session
from nereid.contrib.locale import Babel
from werkzeug.contrib.sessions import FilesystemSessionStore
from nereid import Nereid, current_app
from flask.globals import _request_ctx_stack
class NereidTestApp(Nereid):
"""
A Nereid app which works by removing transaction handling around the wsgi
app
"""
def __init__(self, **config):
super(NereidTestApp, self).__init__(**config)
self.config['WTF_CSRF_ENABLED'] = False
@property
def root_transaction(self):
"""
There is no need of a separate root transaction as everything could
be loaded in the transaction context provided in the test case
"""
@contextmanager
def do_nothing():
yield
return do_nothing()
def load_backend(self):
"""
Just reuse the pool and DB already loaded by the tryton test loader
"""
from trytond.tests.test_tryton import DB, POOL
self._database = DB
self._pool = POOL
def dispatch_request(self):
"""
Skip the transaction handling and call the _dispatch_request
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
Website = current_app.pool.get('nereid.website')
website = Website.get_from_host(req.host)
locale = website.get_current_locale(req)
_request_ctx_stack.top.website = website.id
_request_ctx_stack.top.locale = locale.id
# pop locale if specified in the view_args
req.view_args.pop('locale', None)
active_id = req.view_args.pop('active_id', None)
return self._dispatch_request(req, locale.language.code, active_id)
def get_app(**options):
app = NereidTestApp()
if 'SECRET_KEY' not in options:
options['SECRET_KEY'] = 'secret-key'
app.config.update(options)
from trytond.tests.test_tryton import DB_NAME
app.config['DATABASE_NAME'] = DB_NAME
app.config['DEBUG'] = True
app.session_interface.session_store = \
FilesystemSessionStore('/tmp', session_class=Session)
# loaders is usually lazy loaded
# Pre-fetch it so that the instance attribute _loaders will exist
app.jinja_loader.loaders
# Initialise the app now
app.initialise()
# Load babel as its a required extension anyway
Babel(app)
return app
class NereidTestCase(unittest.TestCase):
@property
def _templates(self):
if hasattr(self, 'templates'):
return self.templates
return {}
def get_app(self, **options):
app = get_app(**options)
app.jinja_loader._loaders.insert(0, jinja2.DictLoader(self._templates))
return app
|
{
"content_hash": "9354ab1ee55ffdf5ef3172886dd6f2f2",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 30.09433962264151,
"alnum_prop": 0.6470219435736677,
"repo_name": "prakashpp/nereid",
"id": "d227c76c41a7bc38530a550fcf0b350c7fc169a9",
"size": "3342",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "nereid/testing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1246"
},
{
"name": "Makefile",
"bytes": "644"
},
{
"name": "Python",
"bytes": "389690"
}
],
"symlink_target": ""
}
|
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgrid to send emails
- Use MEMCACHIER on Heroku
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
) + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assests
# ------------------------
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='contmon <noreply@www.brandverity.com>')
EMAIL_HOST = env("DJANGO_EMAIL_HOST", default='smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = env("SENDGRID_PASSWORD")
EMAIL_HOST_USER = env('SENDGRID_USERNAME')
EMAIL_PORT = env.int("EMAIL_PORT", default=587)
EMAIL_SUBJECT_PREFIX = env("EMAIL_SUBJECT_PREFIX", default='[contmon] ')
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
try:
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = {
'default': env.cache_url("DJANGO_CACHE_URL", default="memcache://127.0.0.1:11211"),
}
# Your production stuff: Below this line define 3rd party library settings
|
{
"content_hash": "893408be718251aee6c65e83b91e1c94",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 101,
"avg_line_length": 37.99236641221374,
"alnum_prop": 0.637532650190878,
"repo_name": "adandan01/contmon",
"id": "f37f52d078ae908589fd3e512a40cbacee31dacf",
"size": "5001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/production.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2782"
},
{
"name": "HTML",
"bytes": "35817"
},
{
"name": "JavaScript",
"bytes": "36063"
},
{
"name": "Python",
"bytes": "74624"
},
{
"name": "Shell",
"bytes": "3896"
}
],
"symlink_target": ""
}
|
"""
Utility script used to set metadata on a collection after an upload.
Expects metadata in json format as in the adjoining metadata.json file. The format for an
individual subject looks like the following, and dates should be formatted as YYYY-MM-DD.
"023": {
"DOB": "2013-01-04",
"scans": [
["02months", "2013-02-27", 0.716],
["05months", "2013-06-06", 1.15],
["08months", "2013-08-28", 1.64],
["11months", "2013-11-20", 1.85],
["14months", "2014-03-12", 2.24]],
"subject": "023",
"sex": "F"}
This also expects to some degree that the folder structure is consistent with what is here:
https://data.kitware.com/#collection/54b582c38d777f4362aa9cb3/folder/54b582d88d777f4362aa9cb5
which is
subject_id (e.g. 001)
scantime (e.g. 12months)
DTI
AUTOQC
ORIG
sMRI
ORIG
Reg2Atlas
AUTO_MASK
data upload can be done by the girder python client cli.
"""
import re
import girder_client
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
username = ''
password = ''
parent_id = '553e6db18d777f082b5918eb'
port = 443
scheme = 'https'
host = 'data.kitware.com'
def load_metadata(metadata_file):
import json
with open(metadata_file) as json_file:
json_data = json.load(json_file)
return json_data
metadata_file = 'metadata.json'
metadata = load_metadata(metadata_file)
g = girder_client.GirderClient(host, port, scheme=scheme)
g.authenticate(interactive=True)
subject_regex = re.compile(r'^(\d\d\d)$')
subject_scan_age_regex = re.compile(r'^((\d*)(months|weeks))$')
def walkGirderTree(ancestorFolderId, parentType='folder', parentFolderName=None):
offset = 0
while True:
folders = g.get('folder', parameters={
'limit': 50,
'offset': offset,
'parentType': parentType,
'parentId': ancestorFolderId
})
thisFolder = g.getFolder(ancestorFolderId)
name = thisFolder['name']
metaFromJson = {}
metadataToUpdate = {}
if parentFolderName is not None:
if parentFolderName == 'scan_data':
newMeta = metadata[name]
metaFromJson = {
u'dob': newMeta['DOB'],
u'folder_type': 'subject',
u'sex': newMeta['sex'],
u'subject_id': newMeta['subject']
}
if 'meta' in thisFolder:
meta = thisFolder['meta']
for key in metaFromJson:
if key not in meta or meta[key] != metaFromJson[key]:
metadataToUpdate[key] = metaFromJson[key]
else:
metadataToUpdate = metaFromJson
else:
# change this based on which subject you are trying to upload
subjectMatches = subject_regex.search('024')
ageMatches = subject_scan_age_regex.search(name)
if subjectMatches and ageMatches:
newMeta = metadata[parentFolderName]
scanMeta = [scan for scan in newMeta['scans'] if scan[0] == name][0]
metaFromJson = {
'dob': newMeta['DOB'],
'sex': newMeta['sex'],
'subject_id': parentFolderName,
'scan_age': scanMeta[0],
'scan_date': scanMeta[1],
'scan_weight_kg': scanMeta[2],
'folder_type': 'scan'
}
metadataToUpdate = {}
if 'meta' in thisFolder:
meta = thisFolder['meta']
for key in metaFromJson:
if key not in meta or meta[key] != metaFromJson[key]:
metadataToUpdate[key] = metaFromJson[key]
else:
metadataToUpdate = metaFromJson
else:
# need to remove any meta here
if 'meta' in thisFolder:
meta = thisFolder['meta']
for key in meta.keys():
metadataToUpdate[key] = None
if metadataToUpdate:
print('would be adding meta to ', thisFolder['name'], thisFolder['_id'])
print(metadataToUpdate)
g.addMetadataToFolder(thisFolder['_id'], metadataToUpdate)
else:
# can't do anything without a parentFolderName
pass
# recurse on children folders
for folder in folders:
walkGirderTree(folder['_id'], 'folder', name)
offset += len(folders)
if len(folders) < 50:
break
walkGirderTree(parent_id, 'folder')
|
{
"content_hash": "7287dfb4a785edc368e5932bc0fe02b4",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 93,
"avg_line_length": 33.74657534246575,
"alnum_prop": 0.5317637507611123,
"repo_name": "girder/monkeybrains",
"id": "efcf0b8125a46087d974a5ff2958796073840f6a",
"size": "4950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/set_single_subj_metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2587"
},
{
"name": "HTML",
"bytes": "661"
},
{
"name": "JavaScript",
"bytes": "27567"
},
{
"name": "Python",
"bytes": "18013"
}
],
"symlink_target": ""
}
|
from _common import *
from _common import _Generator, _Reshaper
_truths = ['nprongs']
truths = {_truths[x]:x for x in xrange(len(_truths))}
def make_coll(fpath, label=-1):
coll = obj.GenCollection(label=label)
coll.add_categories(['singletons'], fpath)
return coll
class Generator(_Generator):
def __init__(self,
collections, partition='train', batch=256,
label=False,
**kwargs):
super(Generator, self).__init__(collections,
partition,
batch,
['singletons','ptweight_scaled','truth'],
**kwargs)
self.label = label
def __call__(self):
variables = config.gen_default_variables
mus = config.gen_default_mus
sigmas = config.gen_default_sigmas
var_idx = [config.gen_singletons[x] for x in variables]
if (mus is not None) and (sigmas is not None):
mus = np.array(mus)
sigmas = np.array(sigmas)
while True:
inputs = []
outputs = []
weights = []
for c in self.collections:
data = {k:v.data for k,v in next(self.generators[c]).iteritems()}
i = [data['singletons'][:,var_idx]]
if (mus is not None) and (sigmas is not None):
# need to apply some normalization to the inputs:
i[0] -= mus
i[0] /= sigmas
w = [data[c.weight], data[c.weight]]
subbatch_size = w[0].shape[0]
o = [i[0],
np.zeros((subbatch_size, len(self.collections)))]
if self.label:
nprongs = data['truth'][:,truths['nprongs']].astype(np.int)
o.append(nprongs)
w.append(data[c.weight])
inputs.append(i)
outputs.append(o)
weights.append(w)
merged_inputs = []
for j in xrange(1):
merged_inputs.append(np.concatenate([v[j] for v in inputs], axis=0))
merged_outputs = []
merged_weights = []
NOUTPUTS = 2 + int(self.label)
for j in xrange(NOUTPUTS):
merged_outputs.append(np.concatenate([v[j] for v in outputs], axis=0))
merged_weights.append(np.concatenate([v[j] for v in weights], axis=0))
yield merged_inputs, merged_outputs, merged_weights
def generate(*args, **kwargs):
g = Generator(*args, **kwargs)()
while True:
yield next(g)
|
{
"content_hash": "7e0cabd4f29b96cc0563d032951a5f5f",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 86,
"avg_line_length": 36.171052631578945,
"alnum_prop": 0.4823572208075664,
"repo_name": "sidnarayanan/BAdNet",
"id": "aad05f94507c1da3c1a850319cfbcc4e5c6c3b1d",
"size": "2749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/subtlenet/generators/gen_auto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "326584"
},
{
"name": "Shell",
"bytes": "900"
}
],
"symlink_target": ""
}
|
"""Built-in instance properties."""
import re
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
from nova import utils
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
INVALID_NAME_REGEX = re.compile("[^\w\.\- ]")
def create(name, memory, vcpus, root_gb, ephemeral_gb, flavorid=None,
swap=None, rxtx_factor=None, is_public=True):
"""Creates instance types."""
if flavorid is None:
flavorid = utils.gen_uuid()
if swap is None:
swap = 0
if rxtx_factor is None:
rxtx_factor = 1
kwargs = {
'memory_mb': memory,
'vcpus': vcpus,
'root_gb': root_gb,
'ephemeral_gb': ephemeral_gb,
'swap': swap,
'rxtx_factor': rxtx_factor,
}
# ensure name does not contain any special characters
invalid_name = INVALID_NAME_REGEX.search(name)
if invalid_name:
msg = _("names can only contain [a-zA-Z0-9_.- ]")
raise exception.InvalidInput(reason=msg)
# ensure some attributes are integers and greater than or equal to 0
for option in kwargs:
try:
kwargs[option] = int(kwargs[option])
assert kwargs[option] >= 0
except (ValueError, AssertionError):
msg = _("create arguments must be positive integers")
raise exception.InvalidInput(reason=msg)
# some value are required to be nonzero, not just positive
for option in ['memory_mb', 'vcpus']:
try:
assert kwargs[option] > 0
except AssertionError:
msg = _("create arguments must be positive integers")
raise exception.InvalidInput(reason=msg)
kwargs['name'] = name
# NOTE(vish): Internally, flavorid is stored as a string but it comes
# in through json as an integer, so we convert it here.
kwargs['flavorid'] = unicode(flavorid)
# ensure is_public attribute is boolean
kwargs['is_public'] = utils.bool_from_str(is_public)
try:
return db.instance_type_create(context.get_admin_context(), kwargs)
except exception.DBError, e:
LOG.exception(_('DB error: %s') % e)
raise exception.InstanceTypeCreateFailed()
def destroy(name):
"""Marks instance types as deleted."""
try:
assert name is not None
db.instance_type_destroy(context.get_admin_context(), name)
except (AssertionError, exception.NotFound):
LOG.exception(_('Instance type %s not found for deletion') % name)
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
def get_all_types(ctxt=None, inactive=False, filters=None):
"""Get all non-deleted instance_types.
Pass true as argument if you want deleted instance types returned also.
"""
if ctxt is None:
ctxt = context.get_admin_context()
inst_types = db.instance_type_get_all(
ctxt, inactive=inactive, filters=filters)
inst_type_dict = {}
for inst_type in inst_types:
inst_type_dict[inst_type['name']] = inst_type
return inst_type_dict
get_all_flavors = get_all_types
def get_default_instance_type():
"""Get the default instance type."""
name = FLAGS.default_instance_type
return get_instance_type_by_name(name)
def get_instance_type(instance_type_id, ctxt=None):
"""Retrieves single instance type by id."""
if instance_type_id is None:
return get_default_instance_type()
if ctxt is None:
ctxt = context.get_admin_context()
return db.instance_type_get(ctxt, instance_type_id)
def get_instance_type_by_name(name, ctxt=None):
"""Retrieves single instance type by name."""
if name is None:
return get_default_instance_type()
if ctxt is None:
ctxt = context.get_admin_context()
return db.instance_type_get_by_name(ctxt, name)
# TODO(termie): flavor-specific code should probably be in the API that uses
# flavors.
def get_instance_type_by_flavor_id(flavorid, ctxt=None, read_deleted="yes"):
"""Retrieve instance type by flavorid.
:raises: FlavorNotFound
"""
if ctxt is None:
ctxt = context.get_admin_context(read_deleted=read_deleted)
return db.instance_type_get_by_flavor_id(ctxt, flavorid)
def get_instance_type_access_by_flavor_id(flavorid, ctxt=None):
"""Retrieve instance type access list by flavor id"""
if ctxt is None:
ctxt = context.get_admin_context()
return db.instance_type_access_get_by_flavor_id(ctxt, flavorid)
def add_instance_type_access(flavorid, projectid, ctxt=None):
"""Add instance type access for project"""
if ctxt is None:
ctxt = context.get_admin_context()
return db.instance_type_access_add(ctxt, flavorid, projectid)
def remove_instance_type_access(flavorid, projectid, ctxt=None):
"""Remove instance type access for project"""
if ctxt is None:
ctxt = context.get_admin_context()
return db.instance_type_access_remove(ctxt, flavorid, projectid)
|
{
"content_hash": "24c41494876bba1c87d87628cf3a634b",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 76,
"avg_line_length": 30.37125748502994,
"alnum_prop": 0.6561514195583596,
"repo_name": "usc-isi/nova",
"id": "eb8ac706d0c11ec199a21f60454866d70165dc79",
"size": "5919",
"binary": false,
"copies": "6",
"ref": "refs/heads/hpc-trunk",
"path": "nova/compute/instance_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7282590"
},
{
"name": "Shell",
"bytes": "42905"
}
],
"symlink_target": ""
}
|
import autocomplete_light
from models import City
autocomplete_light.register(City, search_fields=("city",))
|
{
"content_hash": "58993760acd7308876e6820403b64734",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 58,
"avg_line_length": 18.666666666666668,
"alnum_prop": 0.7857142857142857,
"repo_name": "Visgean/django-autocomplete-light",
"id": "cf65b5850d5dbeb425edee82d1aa76e190f8b213",
"size": "112",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "test_grappelli/inline_autocomplete/autocomplete_light_registry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14611"
},
{
"name": "JavaScript",
"bytes": "2002"
},
{
"name": "Python",
"bytes": "248153"
},
{
"name": "Shell",
"bytes": "1777"
}
],
"symlink_target": ""
}
|
from celebrerclient.common import utils
def Client(version, *args, **kwargs):
module = utils.import_versioned_module(version, 'client')
client_class = getattr(module, 'Client')
return client_class(*args, **kwargs)
|
{
"content_hash": "95435cb49b12fa8ac9490207e8c3a938",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 61,
"avg_line_length": 32.57142857142857,
"alnum_prop": 0.7149122807017544,
"repo_name": "Mirantis/python-celebrerclient",
"id": "23034f65310275ef3d1d75e12b0b364b4dc29880",
"size": "802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "celebrerclient/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "43301"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import collections
import contextlib
import logging
import operator
from mopidy import compat, exceptions, models
from mopidy.compat import urllib
from mopidy.internal import deprecation, validation
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def _backend_error_handling(backend, reraise=None):
try:
yield
except exceptions.ValidationError as e:
logger.error('%s backend returned bad data: %s',
backend.actor_ref.actor_class.__name__, e)
except Exception as e:
if reraise and isinstance(e, reraise):
raise
logger.exception('%s backend caused an exception.',
backend.actor_ref.actor_class.__name__)
class LibraryController(object):
pykka_traversable = True
def __init__(self, backends, core):
self.backends = backends
self.core = core
def _get_backend(self, uri):
uri_scheme = urllib.parse.urlparse(uri).scheme
return self.backends.with_library.get(uri_scheme, None)
def _get_backends_to_uris(self, uris):
if uris:
backends_to_uris = collections.defaultdict(list)
for uri in uris:
backend = self._get_backend(uri)
if backend is not None:
backends_to_uris[backend].append(uri)
else:
backends_to_uris = dict([
(b, None) for b in self.backends.with_library.values()])
return backends_to_uris
def browse(self, uri):
"""
Browse directories and tracks at the given ``uri``.
``uri`` is a string which represents some directory belonging to a
backend. To get the intial root directories for backends pass
:class:`None` as the URI.
Returns a list of :class:`mopidy.models.Ref` objects for the
directories and tracks at the given ``uri``.
The :class:`~mopidy.models.Ref` objects representing tracks keep the
track's original URI. A matching pair of objects can look like this::
Track(uri='dummy:/foo.mp3', name='foo', artists=..., album=...)
Ref.track(uri='dummy:/foo.mp3', name='foo')
The :class:`~mopidy.models.Ref` objects representing directories have
backend specific URIs. These are opaque values, so no one but the
backend that created them should try and derive any meaning from them.
The only valid exception to this is checking the scheme, as it is used
to route browse requests to the correct backend.
For example, the dummy library's ``/bar`` directory could be returned
like this::
Ref.directory(uri='dummy:directory:/bar', name='bar')
:param string uri: URI to browse
:rtype: list of :class:`mopidy.models.Ref`
.. versionadded:: 0.18
"""
if uri is None:
return self._roots()
elif not uri.strip():
return []
validation.check_uri(uri)
return self._browse(uri)
def _roots(self):
directories = set()
backends = self.backends.with_library_browse.values()
futures = {b: b.library.root_directory for b in backends}
for backend, future in futures.items():
with _backend_error_handling(backend):
root = future.get()
validation.check_instance(root, models.Ref)
directories.add(root)
return sorted(directories, key=operator.attrgetter('name'))
def _browse(self, uri):
scheme = urllib.parse.urlparse(uri).scheme
backend = self.backends.with_library_browse.get(scheme)
if not backend:
return []
with _backend_error_handling(backend):
result = backend.library.browse(uri).get()
validation.check_instances(result, models.Ref)
return result
return []
def get_distinct(self, field, query=None):
"""
List distinct values for a given field from the library.
This has mainly been added to support the list commands the MPD
protocol supports in a more sane fashion. Other frontends are not
recommended to use this method.
:param string field: One of ``track``, ``artist``, ``albumartist``,
``album``, ``composer``, ``performer``, ``date`` or ``genre``.
:param dict query: Query to use for limiting results, see
:meth:`search` for details about the query format.
:rtype: set of values corresponding to the requested field type.
.. versionadded:: 1.0
"""
validation.check_choice(field, validation.DISTINCT_FIELDS)
query is None or validation.check_query(query) # TODO: normalize?
result = set()
futures = {b: b.library.get_distinct(field, query)
for b in self.backends.with_library.values()}
for backend, future in futures.items():
with _backend_error_handling(backend):
values = future.get()
if values is not None:
validation.check_instances(values, compat.text_type)
result.update(values)
return result
def get_images(self, uris):
"""Lookup the images for the given URIs
Backends can use this to return image URIs for any URI they know about
be it tracks, albums, playlists. The lookup result is a dictionary
mapping the provided URIs to lists of images.
Unknown URIs or URIs the corresponding backend couldn't find anything
for will simply return an empty list for that URI.
:param uris: list of URIs to find images for
:type uris: list of string
:rtype: {uri: tuple of :class:`mopidy.models.Image`}
.. versionadded:: 1.0
"""
validation.check_uris(uris)
futures = {
backend: backend.library.get_images(backend_uris)
for (backend, backend_uris)
in self._get_backends_to_uris(uris).items() if backend_uris}
results = {uri: tuple() for uri in uris}
for backend, future in futures.items():
with _backend_error_handling(backend):
if future.get() is None:
continue
validation.check_instance(future.get(), collections.Mapping)
for uri, images in future.get().items():
if uri not in uris:
raise exceptions.ValidationError(
'Got unknown image URI: %s' % uri)
validation.check_instances(images, models.Image)
results[uri] += tuple(images)
return results
def find_exact(self, query=None, uris=None, **kwargs):
"""Search the library for tracks where ``field`` is ``values``.
.. deprecated:: 1.0
Use :meth:`search` with ``exact`` set.
"""
deprecation.warn('core.library.find_exact')
return self.search(query=query, uris=uris, exact=True, **kwargs)
def lookup(self, uri=None, uris=None):
"""
Lookup the given URIs.
If the URI expands to multiple tracks, the returned list will contain
them all.
:param uri: track URI
:type uri: string or :class:`None`
:param uris: track URIs
:type uris: list of string or :class:`None`
:rtype: list of :class:`mopidy.models.Track` if uri was set or
{uri: list of :class:`mopidy.models.Track`} if uris was set.
.. versionadded:: 1.0
The ``uris`` argument.
.. deprecated:: 1.0
The ``uri`` argument. Use ``uris`` instead.
"""
if sum(o is not None for o in [uri, uris]) != 1:
raise ValueError('Exactly one of "uri" or "uris" must be set')
uris is None or validation.check_uris(uris)
uri is None or validation.check_uri(uri)
if uri:
deprecation.warn('core.library.lookup:uri_arg')
if uri is not None:
uris = [uri]
futures = {}
results = {u: [] for u in uris}
# TODO: lookup(uris) to backend APIs
for backend, backend_uris in self._get_backends_to_uris(uris).items():
for u in backend_uris:
futures[(backend, u)] = backend.library.lookup(u)
for (backend, u), future in futures.items():
with _backend_error_handling(backend):
result = future.get()
if result is not None:
validation.check_instances(result, models.Track)
results[u] = result
if uri:
return results[uri]
return results
def refresh(self, uri=None):
"""
Refresh library. Limit to URI and below if an URI is given.
:param uri: directory or track URI
:type uri: string
"""
uri is None or validation.check_uri(uri)
futures = {}
backends = {}
uri_scheme = urllib.parse.urlparse(uri).scheme if uri else None
for backend_scheme, backend in self.backends.with_library.items():
backends.setdefault(backend, set()).add(backend_scheme)
for backend, backend_schemes in backends.items():
if uri_scheme is None or uri_scheme in backend_schemes:
futures[backend] = backend.library.refresh(uri)
for backend, future in futures.items():
with _backend_error_handling(backend):
future.get()
def search(self, query=None, uris=None, exact=False, **kwargs):
"""
Search the library for tracks where ``field`` contains ``values``.
``field`` can be one of ``uri``, ``track_name``, ``album``, ``artist``,
``albumartist``, ``composer``, ``performer``, ``track_no``, ``genre``,
``date``, ``comment`` or ``any``.
If ``uris`` is given, the search is limited to results from within the
URI roots. For example passing ``uris=['file:']`` will limit the search
to the local backend.
Examples::
# Returns results matching 'a' in any backend
search({'any': ['a']})
# Returns results matching artist 'xyz' in any backend
search({'artist': ['xyz']})
# Returns results matching 'a' and 'b' and artist 'xyz' in any
# backend
search({'any': ['a', 'b'], 'artist': ['xyz']})
# Returns results matching 'a' if within the given URI roots
# "file:///media/music" and "spotify:"
search({'any': ['a']}, uris=['file:///media/music', 'spotify:'])
# Returns results matching artist 'xyz' and 'abc' in any backend
search({'artist': ['xyz', 'abc']})
:param query: one or more queries to search for
:type query: dict
:param uris: zero or more URI roots to limit the search to
:type uris: list of string or :class:`None`
:param exact: if the search should use exact matching
:type exact: :class:`bool`
:rtype: list of :class:`mopidy.models.SearchResult`
.. versionadded:: 1.0
The ``exact`` keyword argument, which replaces :meth:`find_exact`.
.. deprecated:: 1.0
Previously, if the query was empty, and the backend could support
it, all available tracks were returned. This has not changed, but
it is strongly discouraged. No new code should rely on this
behavior.
.. deprecated:: 1.1
Providing the search query via ``kwargs`` is no longer supported.
"""
query = _normalize_query(query or kwargs)
uris is None or validation.check_uris(uris)
query is None or validation.check_query(query)
validation.check_boolean(exact)
if kwargs:
deprecation.warn('core.library.search:kwargs_query')
if not query:
deprecation.warn('core.library.search:empty_query')
futures = {}
for backend, backend_uris in self._get_backends_to_uris(uris).items():
futures[backend] = backend.library.search(
query=query, uris=backend_uris, exact=exact)
# Some of our tests check for LookupError to catch bad queries. This is
# silly and should be replaced with query validation before passing it
# to the backends.
reraise = (TypeError, LookupError)
results = []
for backend, future in futures.items():
try:
with _backend_error_handling(backend, reraise=reraise):
result = future.get()
if result is not None:
validation.check_instance(result, models.SearchResult)
results.append(result)
except TypeError:
backend_name = backend.actor_ref.actor_class.__name__
logger.warning(
'%s does not implement library.search() with "exact" '
'support. Please upgrade it.', backend_name)
return results
def _normalize_query(query):
broken_client = False
# TODO: this breaks if query is not a dictionary like object...
for (field, values) in query.items():
if isinstance(values, compat.string_types):
broken_client = True
query[field] = [values]
if broken_client:
logger.warning(
'A client or frontend made a broken library search. Values in '
'queries must be lists of strings, not a string. Please check what'
' sent this query and file a bug. Query: %s', query)
if not query:
logger.warning(
'A client or frontend made a library search with an empty query. '
'This is strongly discouraged. Please check what sent this query '
'and file a bug.')
return query
|
{
"content_hash": "8d192547cb04fa7ae8c1f0a917598230",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 79,
"avg_line_length": 37.432,
"alnum_prop": 0.5872337394030064,
"repo_name": "quartz55/mopidy",
"id": "30064e5af9be7c9c7146576828d8316051ed6f15",
"size": "14037",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mopidy/core/library.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "610"
},
{
"name": "Groff",
"bytes": "573"
},
{
"name": "HTML",
"bytes": "805"
},
{
"name": "JavaScript",
"bytes": "82060"
},
{
"name": "Python",
"bytes": "1153759"
},
{
"name": "Shell",
"bytes": "556"
}
],
"symlink_target": ""
}
|
"""
WSGI config for test_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_project.settings')
application = get_wsgi_application()
|
{
"content_hash": "33c6638941e7f3e50e336076da6caef6",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.0625,
"alnum_prop": 0.770573566084788,
"repo_name": "anx-hnezbeda/anexia-monitoring-django",
"id": "4fd53dc0201d9c919a397aa3ada1f6f13d3ca9aa",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_project/test_project/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8092"
}
],
"symlink_target": ""
}
|
import inspect
import os
import traceback
import logging
import socket
import sys
from datetime import datetime
import subprocess
try:
import json
except ImportError:
import simplejson as json
class LogstashFormatterBase(logging.Formatter):
def __init__(self, server_type=None, module=None, tags=None, fqdn=False, **kwargs):
self.tags = tags if tags is not None else []
self.kwargs = kwargs
if fqdn:
self.host = socket.getfqdn()
else:
self.host = socket.gethostname()
self.server_type = server_type
self.module = module
def get_extra_fields(self, record):
# The list contains all the attributes listed in
# http://docs.python.org/library/logging.html#logrecord-attributes
skip_list = (
'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',
'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module',
'msecs', 'msecs', 'message', 'msg', 'name', 'pathname', 'process',
'processName', 'relativeCreated', 'thread', 'threadName', 'extra')
if sys.version_info < (3, 0):
easy_types = (basestring, bool, dict, float, int, long, list, type(None))
else:
easy_types = (str, bool, dict, float, int, list, type(None))
fields = {}
for key, value in record.__dict__.items():
if key not in skip_list:
if isinstance(value, easy_types):
fields[key] = value
else:
fields[key] = repr(value)
return fields
def get_debug_fields(self, record):
fields = {
'exc_info': self.format_exception(record.exc_info),
'lineno': record.lineno,
'process': record.process,
'threadName': record.threadName,
}
# funcName was added in 2.5
if not getattr(record, 'funcName', None):
fields['funcName'] = record.funcName
# processName was added in 2.6
if not getattr(record, 'processName', None):
fields['processName'] = record.processName
return fields
@classmethod
def format_source(cls, message_type, host, path):
return "%s://%s/%s" % (message_type, host, path)
@classmethod
def format_timestamp(cls, time):
tstamp = datetime.utcfromtimestamp(time)
return tstamp.strftime("%Y-%m-%dT%H:%M:%S") + ".%03d" % (tstamp.microsecond / 1000) + "Z"
@classmethod
def format_exception(cls, exc_info):
return ''.join(traceback.format_exception(*exc_info)) if exc_info else ''
@classmethod
def serialize(cls, message):
if sys.version_info < (3, 0):
return json.dumps(message)
else:
return bytes(json.dumps(message), 'utf-8')
class LogstashFormatterVersion0(LogstashFormatterBase):
version = 0
def format(self, record):
# Create message dict
message = {
'@timestamp': self.format_timestamp(record.created),
'@message': record.getMessage(),
'@source': self.format_source(self.message_type, self.host,
record.pathname),
'@source_host': self.host,
'@source_path': record.pathname,
'@tags': self.tags,
'@type': self.message_type,
'@fields': {
'levelname': record.levelname,
'logger': record.name,
},
}
# Add extra fields
message['@fields'].update(self.get_extra_fields(record))
# If exception, add debug info
if record.exc_info:
message['@fields'].update(self.get_debug_fields(record))
return self.serialize(message)
class LogstashFormatterVersion1(LogstashFormatterBase):
def format(self, record):
# Create message dict
message = {
'@timestamp': self.format_timestamp(record.created),
'@version': 1,
'message': record.getMessage(),
'host': self.host,
'path': record.pathname,
'tags': self.tags,
'type': self.message_type,
# Extra Fields
'levelname': record.levelname,
'logger': record.name,
}
# Add extra fields
message.update(self.get_extra_fields(record))
# If exception, add debug info
if record.exc_info:
message.update(self.get_debug_fields(record))
return self.serialize(message)
class MiniLogstashFormatter(LogstashFormatterBase):
def format_base(self, record):
# Create message dict
message = {
'@timestamp': self.format_timestamp(record.created),
}
if self.module:
message['module'] = self.module
if self.server_type:
message['server_type'] = self.server_type
# Add configured fields
message.update(self.kwargs)
# Add extra fields
message.update(self.get_extra_fields(record))
# If exception, add debug info
if record.exc_info:
message.update(self.get_debug_fields(record))
# Update fields after all others, in case the user accidentally used one of them as an extra field
message.update({'message': record.getMessage(),
'host': self.host,
'type': '%s_%s' % (self.module, record.getMessage()),
# Extra Fields
'levelname': record.levelname})
return message
def format(self, record):
return self.serialize(self.format_base(record))
class AWSLogstashFormatter(MiniLogstashFormatter):
def __init__(self, cwd=None, aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
# import here so only users of the class are required to install the packages
import boto
import boto.ec2
import boto.exception
import boto.utils
MiniLogstashFormatter.__init__(self, **kwargs)
self.ec2_tags = {}
try:
metadata = boto.utils.get_instance_metadata(timeout=1)
instance_id = metadata['instance-id']
region = metadata['placement']['availability-zone'][:-1] # us-east-1c -> us-east-1
ec2_con = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
inst = ec2_con.get_only_instances([instance_id])[0]
tags = dict(env_tag=inst.tags["Environment"], server_type_tag=inst.tags["Name"])
self.ec2_tags.update(tags)
except (boto.exception.StandardError, IndexError, KeyError):
raise
self.commit_hash = subprocess.check_output(['git', 'log', '-n1', '--format=%h'], cwd=cwd).strip()
# get the calling module's repository root directory name
repo_path = subprocess.check_output(['git', 'rev-parse', '--show-toplevel'], cwd=cwd).strip()
self.repo_path = os.path.split(repo_path)[1]
def format(self, record):
msg = self.format_base(record)
msg.update(self.ec2_tags)
msg['commit'] = self.commit_hash
msg['repository'] = self.repo_path
return self.serialize(msg)
|
{
"content_hash": "a46850d239475268f47f5ec5e9001145",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 106,
"avg_line_length": 34.02304147465438,
"alnum_prop": 0.5755113097656779,
"repo_name": "stopmachine/python-logstash",
"id": "e61d35b4d031d3184b4183975e6bffe990198f92",
"size": "7383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logstash/formatter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24919"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from collections import namedtuple
from janome.tokenizer import Tokenizer
_TOKENIZER = Tokenizer()
def tokenize(text):
'''
ユニコード文字列を形態素に分解する
'''
return [token.surface for token in _TOKENIZER.tokenize(text)]
class Trove(namedtuple('Trove',
['lineno', 'position', 'query', 'line'])):
pass
class StreamDetector(object):
'''
一行ずつ形態素に分解しながらクエリと一致するか調べる
'''
def __init__(self, query, inverse=False):
self.tokens = tokenize(query)
self.inverse = inverse
self._query = query
self._lineno = 1
def _match_morpheme(self, tokens):
for index, expect_token in enumerate(self.tokens):
target_token = tokens[index]
if target_token != expect_token:
return False
return True
def _find_morpheme(self, tokens):
for index in range(len(tokens) - len(self.tokens) + 1):
is_match = self._match_morpheme(tokens[index:])
if not is_match:
continue
# トークン同士が一致するものが見つかったので場所を返す
return index
else:
# 見つからなかった (break しなかった) ので誤検出として -1 を返す
return -1
def _find(self, line):
# 先にざっくりと文字列のレベルで一致するか確認しておく
position = line.find(self._query)
if position == -1:
return -1
# それっぽいものが見つかったら構文解析器を使って詳しく調べる
tokens = tokenize(line)
index = self._find_morpheme(tokens)
if index == -1:
return -1
return position
def feed(self, line):
position = self._find(line)
if self.inverse:
'''
inverse モードのときはマッチしなかった行を返す必要がある
見つかった場所は設定しようがないので None にしておく
'''
position = -1 if position != -1 else None
if position == -1:
return None
return Trove(self._lineno, position, self._query, line)
|
{
"content_hash": "035ea54edafb98e3adb470e990d14747",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 65,
"avg_line_length": 24.746835443037973,
"alnum_prop": 0.5652173913043478,
"repo_name": "momijiame/jpgrep",
"id": "be7b04b0c745fbafd8b64d698722a7b8318d8f23",
"size": "2392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jpgrep/morpheme.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16961"
}
],
"symlink_target": ""
}
|
import sys
if sys.version_info[0] == 2:
from ConfigParser import RawConfigParser
if sys.version_info[0] >= 3:
from configparser import RawConfigParser
import json
import requests
# read configuration file
config_file_name = "usermanagement.config"
config = RawConfigParser()
config.read(config_file_name)
# server parameters
host = config.get("server", "host")
endpoint = config.get("server", "endpoint")
# enterprise parameters
domain = config.get("enterprise", "domain")
org_id = config.get("enterprise", "org_id")
api_key = config.get("enterprise", "api_key")
access_token = config.get("enterprise", "access_token")
# method parameters
url = "https://" + host + endpoint + "/action/" + org_id
headers = {
"Content-type" : "application/json",
"Accept" : "application/json",
"x-api-key" : api_key,
"Authorization" : "Bearer " + access_token
}
json_data = \
[
{
"user" : "john.doe@" + domain,
"do" : [
{
"removeFromOrg" : {}
}
]
}
]
# prepare body
body = json.dumps(json_data)
print("Sending following request body to User Management Action API: " + body)
# send http request
res = requests.post(url, headers=headers, data=body)
# print response
print(res.status_code)
print(res.headers)
print(res.text)
# parse response body
if res.status_code == 200:
res_json_data = json.loads(res.text)
result = res_json_data["result"]
if result == "success":
print("Success");
exit(res.status_code)
|
{
"content_hash": "c9be0e068fa6842833ad85df4f94eb49",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 22.753846153846155,
"alnum_prop": 0.665314401622718,
"repo_name": "adobe-apiplatform/umapi-documentation",
"id": "544ac5565458a54e20f2d0cf803ef96cc396a727",
"size": "2086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/RemoveFromOrg.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from __future__ import print_function, division
import json
from glob import glob
import fitsio
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import gridspec
from matplotlib.ticker import NullLocator
from .extern import ADR, Hyper_PSF3D_PL
from .main import REFWAVE, SPAXEL_SIZE
__all__ = ["plot_timeseries", "plot_epoch", "plot_sn", "plot_adr"]
BAND_LIMITS = {'U': (3400., 3900.),
'B': (4102., 5100.),
'V': (6289., 7607.)}
COLORMAP = 'bone'
def plot_timeseries(cubes, results, band=None, fname=None, title='Timeseries'):
"""Return a figure showing data and model.
Parameters
----------
cubes : list of DataCube
results : dict
Dictionary of dictionaries. Dictionaries represent result
after each step in the fit. Each dictionary contains keys
'galaxy' (3-d array), 'snctr' (tuple), 'epochs' (structured array).
band : str
Band over which to flatten 3-d cubes
fname : str
Output file name
"""
nt = len(cubes)
cube_shape = cubes[0].data.shape
wave = cubes[0].wave
# Set up figure and axes grid.
ncol = nt + 3 # one column for each data epoch, 2 for model,
# 1 for blank space.
nrow = 2 + 2 * len(results) # one row for the data, two for each
# step in the fit (model and residual),
# one more for colorbar at the bottom
left, right = 0.4, 0.2 # border in inches
bottom, top = 0.5, 0.3 # border in inches
wspace, hspace = 0.2, 0.2 # spacing in inches
heights = nrow * [0.9]
widths = ncol * [0.9]
heights[-1] = 0.1 # shrink colorbar height
widths[2] = 0.1 # width of blank space between model and epochs
# set up figure and subplot grid.
figwidth = left + right + sum(widths) + (ncol - 1) * wspace
figheight = bottom + top + sum(heights) + (nrow - 1) * hspace
fig = plt.figure(figsize=(figwidth, figheight))
gs = gridspec.GridSpec(nrow, ncol, width_ratios=widths,
height_ratios=heights)
gs.update(left=(left / figwidth),
right=(1.0 - right / figwidth),
bottom=(bottom / figheight),
top=(1.0 - top / figheight),
wspace=(wspace / figwidth * (ncol - 1)),
hspace=(hspace / figheight * (nrow - 1)))
# upper and lower wavelength limits (default is a 1000 Angstrom wide
# band in the middle of the cube)
if band is None:
wmid = (wave[0] + wave[-1]) / 2.0
wmin, wmax = wmid - 500.0, wmid + 500.0
else:
wmin, wmax = BAND_LIMITS[band]
wavemask = (wave > wmin) & (wave < wmax)
# Plot data for each epoch, keeping track of vmin/vmax for each.
dataims = []
datavmin = np.zeros(nt)
datavmax = np.zeros(nt)
for i_t, cube in enumerate(cubes):
dataim = np.sum(cube.data[wavemask, :, :], axis=0)
datavmax[i_t] = 1.1*np.max(dataim)
datavmin[i_t] = -0.2*np.max(dataim)
ax = plt.subplot(gs[0, i_t + 3])
ax.imshow(dataim, vmin=datavmin[i_t], vmax=datavmax[i_t],
cmap=COLORMAP, interpolation='nearest', origin='lower')
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
ax.set_title("epoch {:d}".format(i_t), fontsize=12)
dataims.append(dataim)
if i_t == 0:
ax.set_ylabel('data', fontsize=12)
# evaluate all scenes and residuals first, so we can set vmin/vmax
# uniformly in the residuals (based on last row)
scenes = []
residuals = []
masks = []
for result in results.values():
epochs = result['epochs']
scenerow = []
residualrow = []
maskrow = []
for i_t in range(nt):
galeval = epochs['galeval'][i_t]
sneval = epochs['sneval'][i_t]
sky = epochs['sky'][i_t, :, None, None]
scene = sky + galeval + sneval
sceneim = np.sum(scene[wavemask, :, :], axis=0)
residim = dataims[i_t] - sceneim
weightim = np.sum(cubes[i_t].weight[wavemask, :, :], axis=0)
mask = weightim > 0.
scenerow.append(sceneim)
residualrow.append(residim)
maskrow.append(mask)
scenes.append(scenerow)
residuals.append(residualrow)
masks.append(maskrow)
# Set residual vmin/vmax based on *last* row.
residvmin = np.zeros(nt)
residvmax = np.zeros(nt)
for i_t in range(nt):
vals = residuals[-1][i_t][masks[-1][i_t]]
std = np.std(vals)
residvmin[i_t] = -3. * std
residvmax[i_t] = 3. * std
for j, (key, result) in enumerate(results.items()):
galaxy = result['galaxy']
epochs = result['epochs']
# galaxy model
image = np.sum(galaxy[wavemask, :, :], axis=0)
ax = plt.subplot(gs[2*j+1:2*j+3, 0:2])
ax.imshow(image, cmap=COLORMAP, interpolation='nearest',
origin='lower')
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
ax.set_ylabel(key, fontsize=18)
if j == len(results)-1:
ax.set_xlabel("galaxy model", fontsize=12)
# scene and residual for each epoch
for i_t in range(nt):
# scene
ax1 = plt.subplot(gs[2*j+1, i_t+3])
ax1.imshow(scenes[j][i_t], vmin=datavmin[i_t], vmax=datavmax[i_t],
cmap=COLORMAP, interpolation='nearest', origin='lower')
ax1.xaxis.set_major_locator(NullLocator())
ax1.yaxis.set_major_locator(NullLocator())
# residual
ax2 = plt.subplot(gs[2*j+2, i_t+3])
rp = ax2.imshow(residuals[j][i_t], vmin=residvmin[i_t],
vmax=residvmax[i_t], cmap=COLORMAP,
interpolation='nearest', origin='lower')
ax2.xaxis.set_major_locator(NullLocator())
ax2.yaxis.set_major_locator(NullLocator())
# label first column
if i_t == 0:
ax1.set_ylabel('scene model', fontsize=11)
ax2.set_ylabel('residual', fontsize=11)
# colorbar (last row only)
if j == len(results) - 1:
ticklocs = [residvmin[i_t], 0., residvmax[i_t]]
scenemax = scenes[j][i_t].max()
ticklabels = ['%d%%' % round(100 * residvmin[i_t] / scenemax),
'0%',
'%d%%' % round(100 * residvmax[i_t] / scenemax)]
tickalign =['left', 'center', 'right']
cb = fig.colorbar(rp, cax=plt.subplot(gs[2*j+3, i_t+3]),
ticks=ticklocs, orientation='horizontal')
cb.ax.set_xticklabels(ticklabels, fontsize=10)
for k, label in enumerate(cb.ax.get_xticklabels()):
label.set_horizontalalignment(tickalign[k])
# figure text
fig.text(left / figwidth, (figheight - top) / figheight, title,
horizontalalignment='left', verticalalignment='top', fontsize=20)
fig.text(left / figwidth, (figheight - top - 0.3) / figheight,
"${:d} \\AA - {:d} \\AA$".format(int(wmin), int(wmax)),
horizontalalignment='left', verticalalignment='top', fontsize=16)
residtextx = 0.5 * (left + sum(widths[0:3]) + 3*wspace + figwidth - right)
residtexty = 0.1
fig.text(residtextx / figwidth, residtexty / figheight,
"residual / max(scene)", horizontalalignment='center',
verticalalignment='baseline')
if fname is None:
return fig
plt.savefig(fname)
plt.close()
def plot_epoch(cube, epoch, fname=None):
"""Return a figure with diagnostic plots for one epoch
Parameters
----------
cube : DataCube
epoch : structured ndarray
One row from the table of result['epochs'].
"""
data = cube.data
weight = cube.weight
wave = cube.wave
numslices = 5
# plot parameters in physical units (in)
left = 0.5
right = 0.2
bottom = 0.6
top = 0.3
wspace = 0.2
hspace = 0.2
height = 1.3 # stamp height
widths = [1.3, 1.3, 1.3, 0.1, 0.8, 4.2] # column widths: [data,
# model, resid, colorbar,
# (blank), spectrum]
numcols = len(widths)
numrows = numslices + 1
# set up figure and subplot grid.
figwidth = left + right + sum(widths) + (numcols - 1) * wspace
figheight = bottom + top + numrows * height + (numrows - 1) * hspace
fig = plt.figure(figsize=(figwidth, figheight))
gs = gridspec.GridSpec(numrows, numcols, width_ratios=widths)
gs.update(left=(left / figwidth),
right=(1.0 - right / figwidth),
bottom=(bottom / figheight),
top=(1.0 - top / figheight),
wspace=(wspace / figwidth * (numcols - 1)),
hspace=(hspace / figheight * (numrows - 1)))
# First row of stamps
data_plot = plt.subplot(gs[0, 0])
model_plot = plt.subplot(gs[0, 1])
resid_plot = plt.subplot(gs[0, 2])
wmin, wmax = wave[0], wave[-1]
wavemask = (wave > wmin) & (wave < wmax)
dataim = np.average(data[wavemask, :, :], axis=0)
galeval = epoch['galeval']
sneval = epoch['sneval']
sky = epoch['sky'][:, None, None]
scene = sky + galeval + sneval
sceneim = np.average(scene[wavemask, :, :], axis=0)
residim = dataim - sceneim
datavmax = 1.1*np.max(dataim)
datavmin = -0.2*np.max(dataim)
weightim = np.sum(weight[wavemask, :, :], axis=0)
mask = weightim > 0.
vals = residim[mask]
std = np.std(vals)
residvmin = -3. * std
residvmax = 3. * std
data_plot.imshow(dataim, cmap=COLORMAP, vmin=datavmin, vmax=datavmax,
interpolation='nearest', origin='lower')
model_plot.imshow(sceneim, cmap=COLORMAP, vmin=datavmin, vmax=datavmax,
interpolation='nearest', origin='lower')
rp = resid_plot.imshow(residim, cmap=COLORMAP,
vmin=residvmin, vmax=residvmax,
interpolation='nearest', origin='lower')
cb = fig.colorbar(rp, cax=plt.subplot(gs[0, 3]),
ticks=[residvmin, 0, residvmax])
cb.ax.set_yticklabels(['%.0f%%' % (100*residvmin/np.max(dataim)), '0%',
'%.0f%%' % (100*residvmax/np.max(dataim))],
fontsize='small')
data_plot.xaxis.set_major_locator(NullLocator())
data_plot.yaxis.set_major_locator(NullLocator())
model_plot.xaxis.set_major_locator(NullLocator())
model_plot.yaxis.set_major_locator(NullLocator())
resid_plot.xaxis.set_major_locator(NullLocator())
resid_plot.yaxis.set_major_locator(NullLocator())
data_plot.set_ylabel('all\nwavelengths')
data_plot.set_title('Data')
model_plot.set_title('Model')
resid_plot.set_title('Residual')
metaslices = np.linspace(0, len(wave), numslices + 1)
for i in range(numslices):
sliceindices = np.arange(metaslices[i], metaslices[i+1], dtype=int)
dataslice = np.average(data[sliceindices, :, :], axis=0)
sceneslice = np.average(scene[sliceindices, :, :], axis=0)
residslice = dataslice - sceneslice
vmin, vmax = -.2*np.max(dataslice), 1.1*np.max(dataslice)
data_plot = plt.subplot(gs[i+1, 0])
model_plot = plt.subplot(gs[i+1, 1])
resid_plot = plt.subplot(gs[i+1, 2])
residmax = 3. * np.std(residslice[mask])
residmin = -residmax
data_plot.imshow(dataslice, cmap=COLORMAP, vmin=vmin, vmax=vmax,
interpolation='nearest', origin='lower')
model_plot.imshow(sceneslice, cmap=COLORMAP, vmin=vmin, vmax=vmax,
interpolation='nearest', origin='lower')
rp = resid_plot.imshow(residslice, cmap=COLORMAP, vmin=residmin,
vmax=residmax, interpolation='nearest',
origin='lower')
data_plot.xaxis.set_major_locator(NullLocator())
data_plot.yaxis.set_major_locator(NullLocator())
model_plot.xaxis.set_major_locator(NullLocator())
model_plot.yaxis.set_major_locator(NullLocator())
resid_plot.xaxis.set_major_locator(NullLocator())
resid_plot.yaxis.set_major_locator(NullLocator())
cb = fig.colorbar(rp, cax=plt.subplot(gs[i+1, 3]),
ticks=[residmin, 0, residmax])
cb.ax.set_yticklabels(['%.0f%%' % (100*residmin/np.max(dataim)),
'0%',
'%.0f%%' % (100*residmax/np.max(dataim))],
fontsize='small')
data_plot.set_ylabel('%d -\n %d $\AA$' % (wave[metaslices[i]],
wave[metaslices[i+1]-1]))
spec = plt.subplot(gs[:, 5])
spec.plot(wave, epoch['sn'], label='SN spectrum')
spec.plot(wave, epoch['sky'], label='Sky spectrum')
gal_ave = galeval.sum(axis=(1,2))
spec.plot(wave, gal_ave, label = 'Galaxy spectrum (sum)')
spec.set_xlim(wave[0], wave[-1])
spec.legend(fontsize=9, frameon=False)
spec.set_xlabel("wavelength ($\\AA$)")
if fname is None:
return fig
plt.savefig(fname)
plt.close()
def plot_sn(filenames, sn_spectra, wave, idrfilenames, outfname):
"""Return a figure with the SN
Parameters
----------
fname : str
Output file name
"""
sn_max = sn_spectra.max()
day_exp_nums = [fname.split('_')[1:4] for fname in filenames]
phase_strings = [fname.split('_')[-2] for fname in idrfilenames]
print(phase_strings)
phases = [((-1 if phase_string[0] == 'M' else 1) *
float(phase_string[1:])/1000.)
for phase_string in phase_strings]
phase_sort = np.array(phases).argsort()
fig = plt.figure(figsize=(7,8))
for p, phase_arg in enumerate(phase_sort):
file = idrfilenames[phase_arg]
phase = phases[phase_arg]
with fitsio.FITS(file, 'r') as f:
header = f[0].read_header()
data = f[0].read()
variance = f[1].read()
n = header["NAXIS1"]
#crpix = header["CRPIX1"]-1.0 # FITS is 1-indexed, numpy as 0-indexed
crval = header["CRVAL1"]
cdelt = header["CDELT1"]
sn_wave = crval + cdelt * (np.arange(n)) # - crpix)
file_day_exp = header["FILENAME"].split('_')[1:4]
i_t_match = np.flatnonzero(np.array([day_exp == file_day_exp for
day_exp in day_exp_nums]))
plt.plot(sn_wave, data/sn_max + p/2., color='k')
for i_t in i_t_match:
plt.plot(wave, sn_spectra[i_t]/sn_max + p/2., color='r')
plt.text(sn_wave[-20], p/2., 'Phase = '+str(phase))
plt.savefig(outfname)
plt.close()
def plot_adr(cubes, wave, fname=None):
"""Plot adr x and y vs. wavelength, and x vs y
Parameters
----------
cfg : dict
Configuration contents.
wave : 1-d array
cubes : list of DataCube
Used for header values only.
"""
nt = len(cubes)
fig = plt.figure()
yplot = plt.subplot2grid((2, 2), (0, 0))
xplot = plt.subplot2grid((2, 2), (0, 1))
xyplot = plt.subplot2grid((2, 2), (1, 0), colspan=2)
cm = plt.get_cmap("jet")
for i in range(nt):
# following lines same as in main.cubefit()
delta, theta = Hyper_PSF3D_PL.predict_adr_params(cubes[i].header)
adr = ADR(cubes[i].header['PRESSURE'], cubes[i].header['TEMP'],
lref=REFWAVE, delta=delta, theta=theta)
adr_refract = adr.refract(0, 0, wave, unit=SPAXEL_SIZE)
xctr, yctr = adr_refract
yplot.plot(wave, yctr, color=cm(i/nt))
xplot.plot(wave, xctr, color=cm(i/nt))
xyplot.plot(xctr, yctr, color=cm(i/nt))
yplot.set_ylabel('dY (spaxels)')
xplot.set_ylabel('dX (spaxels)')
yplot.set_xlabel('wavelength ($\\AA$)')
xplot.set_xlabel('wavelength ($\\AA$)')
xyplot.set_xlabel('dX')
xyplot.set_ylabel('dY')
plt.tight_layout()
if fname is None:
return fig
plt.savefig(fname)
plt.close()
|
{
"content_hash": "36fb1b7363b5d2ff0442fa46da84f701",
"timestamp": "",
"source": "github",
"line_count": 451,
"max_line_length": 79,
"avg_line_length": 36.59423503325942,
"alnum_prop": 0.5569558894813379,
"repo_name": "kbarbary/cubefit",
"id": "b3505b61d1d20becffbc3d75d7eac4db9b18df93",
"size": "16504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cubefit/plotting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2346"
},
{
"name": "Python",
"bytes": "120851"
}
],
"symlink_target": ""
}
|
"""VMRC console drivers."""
import base64
import json
from nova import exception
from nova import flags
from nova import log as logging
from nova.virt.vmwareapi import vim_util
FLAGS = flags.FLAGS
flags.DEFINE_integer('console_vmrc_port',
443,
"port for VMware VMRC connections")
flags.DEFINE_integer('console_vmrc_error_retries',
10,
"number of retries for retrieving VMRC information")
class VMRCConsole(object):
"""VMRC console driver with ESX credentials."""
def __init__(self):
super(VMRCConsole, self).__init__()
@property
def console_type(self):
return 'vmrc+credentials'
def get_port(self, context):
"""Get available port for consoles."""
return FLAGS.console_vmrc_port
def setup_console(self, context, console):
"""Sets up console."""
pass
def teardown_console(self, context, console):
"""Tears down console."""
pass
def init_host(self):
"""Perform console initialization."""
pass
def fix_pool_password(self, password):
"""Encode password."""
# TODO(sateesh): Encrypt pool password
return password
def generate_password(self, vim_session, pool, instance_name):
"""Returns VMRC Connection credentials.
Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'.
"""
username, password = pool['username'], pool['password']
vms = vim_session._call_method(vim_util, 'get_objects',
'VirtualMachine', ['name', 'config.files.vmPathName'])
vm_ds_path_name = None
vm_ref = None
for vm in vms:
vm_name = None
ds_path_name = None
for prop in vm.propSet:
if prop.name == 'name':
vm_name = prop.val
elif prop.name == 'config.files.vmPathName':
ds_path_name = prop.val
if vm_name == instance_name:
vm_ref = vm.obj
vm_ds_path_name = ds_path_name
break
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
json_data = json.dumps({'vm_id': vm_ds_path_name,
'username': username,
'password': password})
return base64.b64encode(json_data)
def is_otp(self):
"""Is one time password or not."""
return False
class VMRCSessionConsole(VMRCConsole):
"""VMRC console driver with VMRC One Time Sessions."""
def __init__(self):
super(VMRCSessionConsole, self).__init__()
@property
def console_type(self):
return 'vmrc+session'
def generate_password(self, vim_session, pool, instance_name):
"""Returns a VMRC Session.
Return string is of the form '<VM MOID>:<VMRC Ticket>'.
"""
vms = vim_session._call_method(vim_util, 'get_objects',
'VirtualMachine', ['name'])
vm_ref = NoneV
for vm in vms:
if vm.propSet[0].val == instance_name:
vm_ref = vm.obj
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
virtual_machine_ticket = \
vim_session._call_method(
vim_session._get_vim(),
'AcquireCloneTicket',
vim_session._get_vim().get_service_content().sessionManager)
json_data = json.dumps({'vm_id': str(vm_ref.value),
'username': virtual_machine_ticket,
'password': virtual_machine_ticket})
return base64.b64encode(json_data)
def is_otp(self):
"""Is one time password or not."""
return True
|
{
"content_hash": "594dd3a10051eeb2861537fe95aea4e7",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 79,
"avg_line_length": 31.121951219512194,
"alnum_prop": 0.5634796238244514,
"repo_name": "termie/nova-migration-demo",
"id": "cc8b0cdf57916b0f6e912f96227964c73c23dafe",
"size": "4522",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/console/vmrc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "47238"
},
{
"name": "Python",
"bytes": "2431410"
},
{
"name": "Shell",
"bytes": "31459"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 # bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound # actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh + 2, 1) # integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound # bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) # fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd = rvs
f, l = np.histogram(rvs, bins=gridlimits)
sfreq = np.vstack([gridint, f, probs * n_sample]).T
fs = sfreq[:, 1] / float(n_sample)
ft = sfreq[:, 2] / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind + width, fs, width, color='r')
normline = plt.plot(ind + width / 2.0, stats.norm.pdf(ind, scale=nd_std),
color='b')
plt.ylabel('Frequency')
plt.title('Frequency and Probability of normdiscrete')
plt.xticks(ind + width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
|
{
"content_hash": "d1f555435c5da0fb44e0c62ad5a07b8b",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 81,
"avg_line_length": 34.79545454545455,
"alnum_prop": 0.6949706074461136,
"repo_name": "DailyActie/Surrogate-Model",
"id": "b442409fccbe4d0e6a57f833addd6c1408ca6dd8",
"size": "1531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/scipy-master/doc/source/tutorial/examples/normdiscr_plot1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
}
|
import argparse
from log.dagda_logger import DagdaLogger
class CheckCLIParser:
# -- Public methods
# CheckDockerCLIParser Constructor
def __init__(self):
super(CheckCLIParser, self).__init__()
self.parser = DagdaCheckParser(prog='dagda.py check', usage=check_parser_text)
self.parser.add_argument('-i', '--docker_image', type=str)
self.parser.add_argument('-c', '--container_id', type=str)
self.args, self.unknown = self.parser.parse_known_args()
# Verify command line arguments
status = self.verify_args(self.args)
if status != 0:
exit(status)
# -- Getters
# Gets docker image name
def get_docker_image_name(self):
return self.args.docker_image
# Gets docker container id
def get_container_id(self):
return self.args.container_id
# -- Static methods
# Verify command line arguments
@staticmethod
def verify_args(args):
if not args.container_id and not args.docker_image:
DagdaLogger.get_logger().error('Missing arguments.')
return 1
elif args.container_id and args.docker_image:
DagdaLogger.get_logger().error('Arguments --docker_image/--container_id: Both arguments '
'can not be together.')
return 2
# Else
return 0
# Custom parser
class DagdaCheckParser(argparse.ArgumentParser):
# Overrides the error method
def error(self, message):
self.print_usage()
exit(2)
# Overrides the format help method
def format_help(self):
return check_parser_text
# Custom text
check_parser_text = '''usage: dagda.py check [-h] [-i DOCKER_IMAGE] [-c CONTAINER_ID]
Your personal docker security analyzer.
Optional Arguments:
-h, --help show this help message and exit
-i DOCKER_IMAGE, --docker_image DOCKER_IMAGE
the input docker image name
-c CONTAINER_ID, --container_id CONTAINER_ID
the input docker container id
'''
|
{
"content_hash": "8408c50c1416657a06b7327f19e9bf28",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 101,
"avg_line_length": 28.63013698630137,
"alnum_prop": 0.6196172248803827,
"repo_name": "eliasgranderubio/dagda",
"id": "64ee5f3cbae1511c08de88779ef2e0f1703e9dc5",
"size": "2845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dagda/cli/command/check_cli_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "233"
},
{
"name": "Makefile",
"bytes": "71"
},
{
"name": "Python",
"bytes": "400625"
},
{
"name": "Shell",
"bytes": "1874"
}
],
"symlink_target": ""
}
|
import copy
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from neutron_lib.api.definitions import portbindings
from neutron_lib.plugins.ml2 import api
from neutron.tests import base
NETWORK_ID = "fake_network"
PORT_ID = "fake_port"
class FakeNetworkContext(api.NetworkContext):
def __init__(self, segments):
self._network_segments = segments
@property
def current(self):
return {'id': NETWORK_ID}
@property
def original(self):
return None
@property
def network_segments(self):
return self._network_segments
class FakePortContext(api.PortContext):
def __init__(self, agent_type, agents, segments,
vnic_type=portbindings.VNIC_NORMAL,
original=None, profile=None):
self._agent_type = agent_type
self._agents = agents
self._network_context = FakeNetworkContext(segments)
self._bound_vnic_type = vnic_type
self._bound_profile = profile
self._bound_segment_id = None
self._bound_vif_type = None
self._bound_vif_details = None
self._original = original
self._binding_levels = []
@property
def current(self):
current_data = {'id': PORT_ID,
portbindings.VNIC_TYPE: self._bound_vnic_type,
portbindings.PROFILE: self._bound_profile}
ret_value = current_data
if self._original:
ret_value = copy.deepcopy(self.original)
ret_value.update(current_data)
return ret_value
@property
def original(self):
return self._original
@property
def status(self):
return 'DOWN'
@property
def original_status(self):
return None
@property
def network(self):
return self._network_context
def _prepare_to_bind(self, segments_to_bind):
self._segments_to_bind = segments_to_bind
self._new_bound_segment = None
self._next_segments_to_bind = None
def _push_binding_level(self, binding_level):
self._binding_levels.append(binding_level)
def _pop_binding_level(self):
return self._binding_levels.pop()
@property
def binding_levels(self):
if self._binding_levels:
return [{
api.BOUND_DRIVER: level.driver,
api.BOUND_SEGMENT: self._expand_segment(level.segment_id)
} for level in self._binding_levels]
@property
def original_binding_levels(self):
return None
@property
def top_bound_segment(self):
if self._binding_levels:
return self._expand_segment(self._binding_levels[0].segment_id)
@property
def original_top_bound_segment(self):
return None
@property
def bottom_bound_segment(self):
if self._binding_levels:
return self._expand_segment(self._binding_levels[-1].segment_id)
@property
def original_bottom_bound_segment(self):
return None
def _expand_segment(self, segment_id):
for segment in self._network_context.network_segments:
if segment[api.ID] == self._bound_segment_id:
return segment
@property
def host(self):
return ''
@property
def original_host(self):
return None
@property
def vif_type(self):
return portbindings.UNBOUND
@property
def original_vif_type(self):
return portbindings.UNBOUND
@property
def vif_details(self):
return None
@property
def original_vif_details(self):
return None
@property
def segments_to_bind(self):
return self._network_context.network_segments
def host_agents(self, agent_type):
if agent_type == self._agent_type:
return self._agents
else:
return []
def set_binding(self, segment_id, vif_type, vif_details):
self._bound_segment_id = segment_id
self._bound_vif_type = vif_type
self._bound_vif_details = vif_details
def continue_binding(self, segment_id, next_segments_to_bind):
pass
def allocate_dynamic_segment(self, segment):
pass
def release_dynamic_segment(self, segment_id):
pass
class MechDriverConfFixture(config_fixture.Config):
def __init__(self, conf=cfg.CONF, prohibit_list_cfg=None,
registration_func=None):
"""ConfigFixture for vnic_type_prohibit_list
:param conf: The driver configuration object
:param prohibit_list_cfg: A dictionary in the form
{'group': {'opt': 'value'}}, i.e.:
{'OVS_DRIVER': {'vnic_type_prohibit_list':
['foo']}}
:param registration_func: The method which do the config group's
registration.
"""
super(MechDriverConfFixture, self).__init__(conf)
self.prohibit_list_cfg = prohibit_list_cfg
self.registration_func = registration_func
def setUp(self):
super(MechDriverConfFixture, self).setUp()
self.registration_func(self.conf)
for group, option in self.prohibit_list_cfg.items():
self.config(group=group, **option)
class AgentMechanismBaseTestCase(base.BaseTestCase):
# The following must be overridden for the specific mechanism
# driver being tested:
VIF_TYPE = None
VIF_DETAILS = None
AGENT_TYPE = None
AGENTS = None
AGENTS_DEAD = None
AGENTS_BAD = None
VNIC_TYPE = portbindings.VNIC_NORMAL
def _check_unbound(self, context):
self.assertIsNone(context._bound_segment_id)
self.assertIsNone(context._bound_vif_type)
self.assertIsNone(context._bound_vif_details)
def _check_bound(self, context, segment):
self.assertEqual(context._bound_segment_id, segment[api.ID])
self.assertEqual(context._bound_vif_type, self.VIF_TYPE)
vif_details = context._bound_vif_details
self.assertIsNotNone(vif_details)
# NOTE(r-mibu): The following five lines are just for backward
# compatibility. In this class, HAS_PORT_FILTER has been replaced
# by VIF_DETAILS which can be set expected vif_details to check,
# but all replacement of HAS_PORT_FILTER in successor has not been
# completed.
if self.VIF_DETAILS is None:
expected = getattr(self, 'CAP_PORT_FILTER', None)
port_filter = vif_details[portbindings.CAP_PORT_FILTER]
self.assertEqual(expected, port_filter)
return
self.assertEqual(self.VIF_DETAILS, vif_details)
class AgentMechanismGenericTestCase(AgentMechanismBaseTestCase):
UNKNOWN_TYPE_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type',
api.NETWORK_ID: 'fake_network_id'}]
def test_unknown_type(self):
self.context = FakePortContext(self.AGENT_TYPE, self.AGENTS,
self.UNKNOWN_TYPE_SEGMENTS,
vnic_type=self.VNIC_TYPE)
context = self.context
self.driver.bind_port(context)
self._check_unbound(context)
def test_driver_not_responsible_for_ports_allocation(self):
agents = [
{'configurations': {'rp_bandwidths': {'eth0': {}}},
'host': 'host',
'agent_type': self.AGENT_TYPE,
},
]
profile = {}
segments = []
port_ctx = FakePortContext(
self.AGENT_TYPE,
agents,
segments,
vnic_type=portbindings.VNIC_DIRECT,
profile=profile)
self.assertFalse(
self.driver.responsible_for_ports_allocation(port_ctx))
class AgentMechanismLocalTestCase(AgentMechanismBaseTestCase):
LOCAL_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type',
api.NETWORK_ID: 'fake_network_id'},
{api.ID: 'local_segment_id',
api.NETWORK_TYPE: 'local',
api.NETWORK_ID: 'fake_network_id'}]
def test_type_local(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.LOCAL_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
self._check_bound(context, self.LOCAL_SEGMENTS[1])
def test_type_local_dead(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_DEAD,
self.LOCAL_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismFlatTestCase(AgentMechanismBaseTestCase):
FLAT_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type',
api.NETWORK_ID: 'fake_network_id'},
{api.ID: 'flat_segment_id',
api.NETWORK_TYPE: 'flat',
api.PHYSICAL_NETWORK: 'fake_physical_network',
api.NETWORK_ID: 'fake_network_id'}]
def test_type_flat(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.FLAT_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
self._check_bound(context, self.FLAT_SEGMENTS[1])
def test_type_flat_bad(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.FLAT_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismVlanTestCase(AgentMechanismBaseTestCase):
VLAN_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type',
api.NETWORK_ID: 'fake_network_id'},
{api.ID: 'vlan_segment_id',
api.NETWORK_TYPE: 'vlan',
api.PHYSICAL_NETWORK: 'fake_physical_network',
api.SEGMENTATION_ID: 1234,
api.NETWORK_ID: 'fake_network_id'}]
def test_type_vlan(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.VLAN_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
self._check_bound(context, self.VLAN_SEGMENTS[1])
def test_type_vlan_bad(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.VLAN_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismGreTestCase(AgentMechanismBaseTestCase):
GRE_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type',
api.NETWORK_ID: 'fake_network_id'},
{api.ID: 'gre_segment_id',
api.NETWORK_TYPE: 'gre',
api.SEGMENTATION_ID: 1234,
api.NETWORK_ID: 'fake_network_id'}]
def test_type_gre(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.GRE_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.GRE_SEGMENTS[1])
def test_type_gre_bad(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.GRE_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
|
{
"content_hash": "1c61079335eb3aba48d76ea52d060485",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 76,
"avg_line_length": 34.04456824512535,
"alnum_prop": 0.5659466535755195,
"repo_name": "openstack/neutron",
"id": "c2b2989f14ebeb16b4aed04dfcd7f98746830465",
"size": "12862",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/plugins/ml2/_test_mech_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15932611"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
}
|
""" Django settings for bugbug project.
"""
import os
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, "static"),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'sd1gwi1jjs23qhu+z*rhjm4^gwxibdd52bmy00(rbs9lc-nso+'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'bugbug.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'bugbug.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_PATH, 'templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'teams',
'homepage',
'meets',
'courses',
'predictions',
'runners',
'results',
'functional_tests',
'libs',
'south',
'haystack',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200/',
'INDEX_NAME': 'haystack',
}
}
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES = {
"default": dj_database_url.config(default="postgres://turbobug:butte@localhost:5432/bugbug_dev")
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
|
{
"content_hash": "c9056e726b59dea5f4160c2eb8ec339c",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 100,
"avg_line_length": 31.042105263157893,
"alnum_prop": 0.6968463886063072,
"repo_name": "ColCarroll/bugbug",
"id": "61bd010e9c6a0ff21621788466bc1867b65a04dc",
"size": "5898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bugbug/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "214027"
},
{
"name": "JavaScript",
"bytes": "106778"
},
{
"name": "Python",
"bytes": "52381"
}
],
"symlink_target": ""
}
|
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from pants.option.ranked_value import RankedValue
class OptionValueContainer(object):
"""A container for option values.
Implements the following functionality:
1) Attribute forwarding.
An attribute can be registered as forwarding to another attribute, and attempts
to read the source attribute's value will be read from the target attribute.
This is necessary so we can qualify registered options by the scope that registered them,
to allow re-registration in inner scopes. This is best explained by example:
Say that in global scope we register an option with two names: [-f, --foo], which writes its
value to the attribute foo. Then in the compile scope we re-register --foo but leave -f alone.
The re-registered --foo will also write to attribute foo. So now -f, which in the compile
scope is unrelated to --foo, can still stomp on its value.
With attribute forwarding we can have the global scope option write to _DEFAULT_foo__, and
the re-registered option to _COMPILE_foo__, and then have the 'f' and 'foo' attributes
forward, appropriately.
Note that only reads are forwarded. The target of the forward must be written to directly.
If the source attribute is set directly, this overrides any forwarding.
2) Value ranking.
Attribute values can be ranked, so that a given attribute's value can only be changed if
the new value has at least as high a rank as the old value. This allows an option value in
an outer scope to override that option's value in an inner scope, when the outer scope's
value comes from a higher ranked source (e.g., the outer value comes from an env var and
the inner one from config).
See ranked_value.py for more details.
Note that this container is suitable for passing as the namespace argument to argparse's
parse_args() method.
"""
def __init__(self):
self._forwardings = {} # src attribute name -> target attribute name.
def add_forwardings(self, forwardings):
"""Add attribute forwardings.
Will overwrite existing forwardings with the same source attributes.
:param forwardings: A map of source attribute name -> attribute to read source's value from.
"""
self._forwardings.update(forwardings)
def update(self, attrs):
"""Set attr values on this object from the data in the attrs dict."""
for k, v in attrs.items():
setattr(self, k, v)
def __setattr__(self, key, value):
if key == '_forwardings':
return super(OptionValueContainer, self).__setattr__(key, value)
if hasattr(self, key):
existing_value = getattr(self, key)
if isinstance(existing_value, RankedValue):
existing_rank = existing_value.rank
else:
# Values without rank are assumed to be flag values set by argparse.
existing_rank = RankedValue.FLAG
else:
existing_rank = RankedValue.NONE
if isinstance(value, RankedValue):
new_rank = value.rank
else:
# Values without rank are assumed to be flag values set by argparse.
new_rank = RankedValue.FLAG
if new_rank >= existing_rank:
# We set values from outer scopes before values from inner scopes, so
# in case of equal rank we overwrite. That way that the inner scope value wins.
super(OptionValueContainer, self).__setattr__(key, value)
def __getattr__(self, key):
# Note: Called only if regular attribute lookup fails, so accesses
# to non-forwarded attributes will be handled the normal way.
if key == '_forwardings':
# In case we get called in copy/deepcopy, which don't invoke the ctor.
raise AttributeError
if key not in self._forwardings:
raise AttributeError('No such forwarded attribute: %s' % key)
val = getattr(self, self._forwardings[key])
if isinstance(val, RankedValue):
return val.value
else:
return val
|
{
"content_hash": "09d8bfea4a5b02f442d5094656556211",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 99,
"avg_line_length": 40.7,
"alnum_prop": 0.7002457002457002,
"repo_name": "Ervii/garage-time",
"id": "117e0ccd181fe055ac6c67230ed8830d16f7624f",
"size": "4217",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "garage/src/python/pants/option/option_value_container.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9347"
},
{
"name": "GAP",
"bytes": "4684"
},
{
"name": "HTML",
"bytes": "64603"
},
{
"name": "Java",
"bytes": "43275"
},
{
"name": "JavaScript",
"bytes": "9523"
},
{
"name": "Protocol Buffer",
"bytes": "4664"
},
{
"name": "Python",
"bytes": "2200035"
},
{
"name": "Scala",
"bytes": "6693"
},
{
"name": "Shell",
"bytes": "29352"
},
{
"name": "Thrift",
"bytes": "1946"
}
],
"symlink_target": ""
}
|
"""
filename: committees_response.py
description: Responses for committees module.
created by: Omar De La Hoz (oed7416@rit.edu)
created on: 10/12/17
"""
class Response():
ComDoesntExist = {'error': "Committee doesn't exist."}
AddSuccess = {'success': 'Committee succesfully created.'}
AddError = {"error": "Committee couldn't be created, check data."}
AddExists = {'error': "Committee already exists."}
EditSuccess = {"success": "Committee succesfully edited."}
EditError = {"error": "Committee couldn't be edited, check data."}
UsrDoesntExist = {'error': "User doesn't exist or is not admin."}
|
{
"content_hash": "bc6ab80a3affb37b0be191132149c921",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 67,
"avg_line_length": 37.75,
"alnum_prop": 0.7135761589403974,
"repo_name": "ritstudentgovernment/chargeflask",
"id": "3fcbd6b9029faa6a32462f87dc32c5cbad65dea2",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/committees/committees_response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1632"
},
{
"name": "Dockerfile",
"bytes": "381"
},
{
"name": "HTML",
"bytes": "21682"
},
{
"name": "Python",
"bytes": "221454"
},
{
"name": "Shell",
"bytes": "132"
}
],
"symlink_target": ""
}
|
import argparse
from os import system
### args parsing
parser = argparse.ArgumentParser(description='runs TF/IDF on a directory of text docs')
parser.add_argument("-i","--input", help="the input in HDFS", required=True)
parser.add_argument("-o", '--output', help="the output in HDFS", required=True )
parser.add_argument("-mdf", '--min_document_frequency', default=1 )
args = parser.parse_args()
docs_dir = args.input#"hdfs://master:9000/user/root/sequence_files1"
d_out = "hdfs://master:9000/" + args.output
min_df = int(args.min_document_frequency)
# import spark-realated stuff
from pyspark import SparkContext
from pyspark.mllib.feature import HashingTF
from pyspark.mllib.feature import IDF
sc = SparkContext(appName="TF-IDF")
# Load documents (one per line).
documents = sc.textFile(docs_dir).map(lambda title_text: title_text[1].split(" "))
hashingTF = HashingTF()
tf = hashingTF.transform(documents)
# IDF
idf = IDF().fit(tf)
tfidf = idf.transform(tf)
#save
tfidf.saveAsTextFile(d_out)
|
{
"content_hash": "6eb73b25331409e298134da416c959ec",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 87,
"avg_line_length": 28.742857142857144,
"alnum_prop": 0.7335984095427436,
"repo_name": "project-asap/IReS-Platform",
"id": "3a32a874896017869dbd26eaebf114525fe543ad",
"size": "1006",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "asap-platform/asap-server/asapLibrary/operators/TF_IDF_mllib/spark_tfidf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1321"
},
{
"name": "C",
"bytes": "3151"
},
{
"name": "C++",
"bytes": "16049"
},
{
"name": "CSS",
"bytes": "7653"
},
{
"name": "Gnuplot",
"bytes": "1954"
},
{
"name": "HTML",
"bytes": "60058"
},
{
"name": "Java",
"bytes": "810860"
},
{
"name": "JavaScript",
"bytes": "400733"
},
{
"name": "Jupyter Notebook",
"bytes": "35509"
},
{
"name": "Lua",
"bytes": "152841"
},
{
"name": "Makefile",
"bytes": "1125"
},
{
"name": "PLpgSQL",
"bytes": "62556"
},
{
"name": "Python",
"bytes": "252650"
},
{
"name": "Roff",
"bytes": "4013"
},
{
"name": "Ruby",
"bytes": "1635"
},
{
"name": "Scala",
"bytes": "10976"
},
{
"name": "Shell",
"bytes": "232701"
}
],
"symlink_target": ""
}
|
from pandac.PandaModules import Point3, VBase3
from toontown.suit.DistributedSellbotBoss import DistributedSellbotBoss
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.chat import ChatGlobals
from toontown.coghq import CogDisguiseGlobals
from toontown.suit import SuitDNA
from toontown.battle import SuitBattleGlobals
from direct.interval.IntervalGlobal import *
class DistributedBrutalSellbotBoss(DistributedSellbotBoss):
notify = directNotify.newCategory('DistributedBrutalSellbotBoss')
ANIM_PLAYRATE = 3
def announceGenerate(self):
DistributedSellbotBoss.announceGenerate(self)
self.setName(TTLocalizer.BrutalSellbotBossName)
base.localAvatar.setCanUseUnites(False)
def disable(self):
DistributedSellbotBoss.disable(self)
base.localAvatar.setCanUseUnites(True)
def makeIntroductionMovie(self, delayDeletes):
track = Parallel()
camera.reparentTo(render)
camera.setPosHpr(0, 25, 30, 0, 0, 0)
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
dooberTrack = Parallel()
if self.doobers:
self._DistributedSellbotBoss__doobersToPromotionPosition(self.doobers[:4], self.battleANode)
self._DistributedSellbotBoss__doobersToPromotionPosition(self.doobers[4:], self.battleBNode)
turnPosA = ToontownGlobals.SellbotBossDooberTurnPosA
turnPosB = ToontownGlobals.SellbotBossDooberTurnPosB
self._DistributedSellbotBoss__walkDoober(self.doobers[0], 0, turnPosA, dooberTrack, delayDeletes)
self._DistributedSellbotBoss__walkDoober(self.doobers[1], 4, turnPosA, dooberTrack, delayDeletes)
self._DistributedSellbotBoss__walkDoober(self.doobers[2], 8, turnPosA, dooberTrack, delayDeletes)
self._DistributedSellbotBoss__walkDoober(self.doobers[3], 12, turnPosA, dooberTrack, delayDeletes)
self._DistributedSellbotBoss__walkDoober(self.doobers[7], 2, turnPosB, dooberTrack, delayDeletes)
self._DistributedSellbotBoss__walkDoober(self.doobers[6], 6, turnPosB, dooberTrack, delayDeletes)
self._DistributedSellbotBoss__walkDoober(self.doobers[5], 10, turnPosB, dooberTrack, delayDeletes)
self._DistributedSellbotBoss__walkDoober(self.doobers[4], 14, turnPosB, dooberTrack, delayDeletes)
toonTrack = Parallel()
self._DistributedSellbotBoss__toonsToPromotionPosition(self.toonsA, self.battleANode)
self._DistributedSellbotBoss__toonsToPromotionPosition(self.toonsB, self.battleBNode)
delay = 0
for toonId in self.toonsA:
self._DistributedSellbotBoss__walkToonToPromotion(toonId, delay, self.toonsEnterA, toonTrack, delayDeletes)
delay += 1
for toonId in self.toonsB:
self._DistributedSellbotBoss__walkToonToPromotion(toonId, delay, self.toonsEnterB, toonTrack, delayDeletes)
delay += 1
toonTrack.append(Sequence(Wait(delay), self.closeDoors))
self.rampA.request('extended')
self.rampB.request('extended')
self.rampC.request('retracted')
self.clearChat()
self.cagedToon.clearChat()
promoteDoobers = TTLocalizer.BrutalBossCogPromoteDoobers
doobersAway = TTLocalizer.BrutalBossCogDoobersAway
welcomeToons = TTLocalizer.BrutalBossCogWelcomeToons
promoteToons = TTLocalizer.BrutalBossCogPromoteToons
discoverToons = TTLocalizer.BrutalBossCogDiscoverToons
attackToons = TTLocalizer.BrutalBossCogAttackToons
interruptBoss = TTLocalizer.BrutalCagedToonInterruptBoss
rescueQuery = TTLocalizer.BrutalCagedToonRescueQuery
bossAnimTrack = Sequence(
ActorInterval(self, 'Ff_speech', startTime=2, duration=10, loop=1),
ActorInterval(self, 'ltTurn2Wave', duration=2),
ActorInterval(self, 'wave', duration=4, loop=1),
ActorInterval(self, 'ltTurn2Wave', startTime=2, endTime=0),
ActorInterval(self, 'Ff_speech', duration=7, loop=1))
track.append(bossAnimTrack)
dialogTrack = Track(
(0, Parallel(
camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13, 0), blendType='easeInOut'),
IndirectInterval(toonTrack, 0, 18))),
(5.6, Func(self.setChatAbsolute, promoteDoobers, ChatGlobals.CFSpeech)),
(9, IndirectInterval(dooberTrack, 0, 9)),
(10, Sequence(
Func(self.clearChat),
Func(camera.setPosHpr, -23.1, 15.7, 17.2, -160, -2.4, 0))),
(12, Func(self.setChatAbsolute, doobersAway, ChatGlobals.CFSpeech)),
(16, Parallel(
Func(self.clearChat),
Func(camera.setPosHpr, -25, -99, 10, -14, 10, 0),
IndirectInterval(dooberTrack, 14),
IndirectInterval(toonTrack, 30))),
(18, Func(self.setChatAbsolute, welcomeToons, ChatGlobals.CFSpeech)),
(22, Func(self.setChatAbsolute, promoteToons, ChatGlobals.CFSpeech)),
(22.2, Sequence(
Func(self.cagedToon.nametag3d.setScale, 2),
Func(self.cagedToon.setChatAbsolute, interruptBoss, ChatGlobals.CFSpeech),
ActorInterval(self.cagedToon, 'wave'),
Func(self.cagedToon.loop, 'neutral'))),
(25, Sequence(
Func(self.clearChat),
Func(self.cagedToon.clearChat),
Func(camera.setPosHpr, -12, -15, 27, -151, -15, 0),
ActorInterval(self, 'Ff_lookRt'))),
(27, Sequence(
Func(self.cagedToon.setChatAbsolute, rescueQuery, ChatGlobals.CFSpeech),
Func(camera.setPosHpr, -12, 48, 94, -26, 20, 0),
ActorInterval(self.cagedToon, 'wave'),
Func(self.cagedToon.loop, 'neutral'))),
(31, Sequence(
Func(camera.setPosHpr, -20, -35, 10, -88, 25, 0),
Func(self.setChatAbsolute, discoverToons, ChatGlobals.CFSpeech),
Func(self.cagedToon.nametag3d.setScale, 1),
Func(self.cagedToon.clearChat),
ActorInterval(self, 'turn2Fb'))),
(34, Sequence(
Func(self.clearChat),
self.loseCogSuits(self.toonsA, self.battleANode, (0, 18, 5, -180, 0, 0)),
self.loseCogSuits(self.toonsB, self.battleBNode, (0, 18, 5, -180, 0, 0)))),
(37, Sequence(
self.toonNormalEyes(self.involvedToons),
Func(camera.setPosHpr, -23.4, -145.6, 44.0, -10.0, -12.5, 0),
Func(self.loop, 'Fb_neutral'),
Func(self.rampA.request, 'retract'),
Func(self.rampB.request, 'retract'),
Parallel(self.backupToonsToBattlePosition(self.toonsA, self.battleANode),
self.backupToonsToBattlePosition(self.toonsB, self.battleBNode),
Sequence(
Wait(2),
Func(self.setChatAbsolute, attackToons, ChatGlobals.CFSpeech))))))
track.append(dialogTrack)
return Sequence(Func(self.stickToonsToFloor), track, Func(self.unstickToons), name=self.uniqueName('Introduction'))
def enterPrepareBattleThree(self):
self.cleanupIntervals()
self.controlToons()
self.clearChat()
self.cagedToon.clearChat()
self.reparentTo(render)
self.rampA.request('retract')
self.rampB.request('retract')
self.rampC.request('extend')
self.setCageIndex(4)
camera.reparentTo(render)
camera.setPosHpr(self.cage, 0, -17, 3.3, 0, 0, 0)
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
self.hide()
self.acceptOnce('doneChatPage', self._DistributedSellbotBoss__onToBattleThree)
self.cagedToon.setLocalPageChat(TTLocalizer.BrutalCagedToonPrepareBattleThree, 1)
base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)
def _DistributedSellbotBoss__talkAboutPromotion(self, speech):
if self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel:
deptIndex = CogDisguiseGlobals.dept2deptIndex(self.style.dept)
cogLevels = base.localAvatar.getCogLevels()
newCogSuitLevel = cogLevels[deptIndex]
cogTypes = base.localAvatar.getCogTypes()
maxCogSuitLevel = (SuitDNA.levelsPerSuit-1) + cogTypes[deptIndex]
if self.prevCogSuitLevel != maxCogSuitLevel:
speech += TTLocalizer.BrutalCagedToonLevelPromotion
if newCogSuitLevel == maxCogSuitLevel:
if newCogSuitLevel != ToontownGlobals.MaxCogSuitLevel:
suitIndex = (SuitDNA.suitsPerDept*deptIndex) + cogTypes[deptIndex]
cogTypeStr = SuitDNA.suitHeadTypes[suitIndex]
cogName = SuitBattleGlobals.SuitAttributes[cogTypeStr]['name']
speech += TTLocalizer.CagedToonSuitPromotion % cogName
else:
speech += TTLocalizer.CagedToonMaxed % (ToontownGlobals.MaxCogSuitLevel + 1)
return speech
def _DistributedSellbotBoss__makeCageOpenMovie(self):
speech = TTLocalizer.BrutalCagedToonThankYou
speech = self._DistributedSellbotBoss__talkAboutPromotion(speech)
name = self.uniqueName('CageOpen')
seq = Sequence(
Func(self.cage.setPos, self.cagePos[4]),
Func(self.cageDoor.setHpr, VBase3(0, 0, 0)),
Func(self.cagedToon.setPos, Point3(0, -2, 0)),
Parallel(
self.cage.posInterval(0.5, self.cagePos[5], blendType='easeOut'),
SoundInterval(self.cageLowerSfx, duration=0.5)),
Parallel(
self.cageDoor.hprInterval(0.5, VBase3(0, 90, 0), blendType='easeOut'),
Sequence(SoundInterval(self.cageDoorSfx), duration=0)),
Wait(0.2),
Func(self.cagedToon.loop, 'walk'),
self.cagedToon.posInterval(0.8, Point3(0, -6, 0)),
Func(self.cagedToon.setChatAbsolute, TTLocalizer.CagedToonYippee, ChatGlobals.CFSpeech),
ActorInterval(self.cagedToon, 'jump'),
Func(self.cagedToon.loop, 'neutral'),
Func(self.cagedToon.headsUp, localAvatar),
Func(self.cagedToon.setLocalPageChat, speech, 0),
Func(camera.reparentTo, localAvatar),
Func(camera.setPos, 0, -9, 9),
Func(camera.lookAt, self.cagedToon, Point3(0, 0, 2)), name=name)
return seq
|
{
"content_hash": "86228ded00234181b10a2ab4874b01b5",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 123,
"avg_line_length": 51.35748792270532,
"alnum_prop": 0.6450945348509077,
"repo_name": "Spiderlover/Toontown",
"id": "0de7aa7b5b3f4c53b6affd54807519a651b6f3dd",
"size": "10631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/suit/DistributedBrutalSellbotBoss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7774"
},
{
"name": "Python",
"bytes": "17241353"
},
{
"name": "Shell",
"bytes": "7699"
}
],
"symlink_target": ""
}
|
from ansible.module_utils.basic import *
class UFWForwards(object):
def __init__(self, data, ipv6):
self.ufw_chain = "ufw6" if ipv6 else 'ufw'
self.nat_rules = []
self.filter_rules = []
self.reroute_data = []
self.port_forward_data = []
args = [
'incomming_dev',
'incomming_network',
'outgoing_dev',
'outgoing_network',
'masquerading',
'conntrack_state'
]
if 'reroute' in data:
self.reroute_data = data['reroute']
else:
self.reroute_data = []
if 'forwards' in data:
self.port_forward_data = data['forwards']
else:
self.port_forward_data = []
for arg in args:
if arg in data:
setattr(self, arg, data[arg])
else:
setattr(self, arg, None)
def generate(self):
if self.masquerading:
for item in self.reroute_data:
self._reroute_generate(item)
for item in self.port_forward_data:
self._port_forward(item)
if self.masquerading:
self._masquerade_generate()
self._forward_generate()
def _port_forward(self, item):
for k in ['destination_port', 'incomming_port', 'protocol']:
if k not in item:
item[k] = [None]
if not isinstance(item[k], list):
item[k] = [str(item[k])]
if item['incomming_port'][0] is None and len(item['destination_port']) > 1:
item['incomming_port'] = [None for i in range(len(item['destination_port']))]
for protocol in item['protocol']:
ports = zip(item['incomming_port'], item['destination_port'])
for in_port, dport in ports:
self._port_forward_generate(item, protocol, dport)
if self.masquerading:
self._port_forward_dnat_generate(item, protocol, in_port, dport)
def _reroute_generate(self, item):
rule = ["-A", "POSTROUTING"]
if self.incomming_dev:
rule += ['-o', self.incomming_dev]
if 'source_ip' in item:
rule += ['-s', item['source_ip']]
rule += ['-j', 'SNAT']
rule += ["--to-source", item['routed_ip']]
if rule not in self.nat_rules:
self.nat_rules.append(rule)
def _port_forward_dnat_generate(self, item, protocol, in_port, dport):
rule = ["-A", "PREROUTING"]
if self.incomming_dev:
rule += ['-i', self.incomming_dev]
if 'incomming_ip' in item:
rule += ['-d', item['incomming_ip']]
rule += ['-p', protocol]
rule += ['-m', protocol]
rule += ['--dport', str(in_port)]
rule += ['-j', 'DNAT']
if self.ufw_chain == "ufw6":
rule += ["--to-destination", "[{}]:{}".format(item['destination_ip'], str(dport))]
else:
rule += ["--to-destination", "{}:{}".format(item['destination_ip'], str(dport))]
if rule not in self.nat_rules:
self.nat_rules.append(rule)
def _port_forward_generate(self, item, protocol, dport):
rule = ["-A", self.ufw_chain + "-before-forward"]
if self.incomming_dev:
rule += ['-i', self.incomming_dev]
if self.outgoing_dev:
rule += ['-o', self.outgoing_dev]
if 'destination_ip' in item:
rule += ['-d', item['destination_ip']]
elif 'destination_network' in item:
rule += ['-d', item['destination_network']]
if protocol and dport:
rule += ['-p', protocol]
rule += ['-m', protocol]
rule += ['--dport', str(dport)]
rule += ['-j', 'ACCEPT']
if rule not in self.filter_rules:
self.filter_rules.append(rule)
def _masquerade_generate(self):
rule = ["-A", "POSTROUTING"]
if self.incomming_dev:
rule += ['-o', self.incomming_dev]
if self.incomming_network:
rule += ['-d', self.incomming_network]
if self.outgoing_network:
rule += ['-s', self.outgoing_network]
rule += ['-j', 'MASQUERADE']
if rule not in self.nat_rules:
self.nat_rules.append(rule)
def _forward_generate(self):
for i in [('i','s','o','d'),('o','d','i','s')]:
rule = ["-A", self.ufw_chain + "-before-forward"]
if self.incomming_dev:
rule += ['-' + i[0], self.incomming_dev]
if self.outgoing_dev:
rule += ['-' + i[2], self.outgoing_dev]
if self.incomming_network:
rule += ['-' + i[1], self.incomming_network]
if self.outgoing_network:
rule += ['-' + i[3], self.outgoing_network]
if i[0] == 'i' and self.conntrack_state:
rule += ['-m conntrack', '--ctstate', self.conntrack_state]
rule += ['-j', 'ACCEPT']
if rule not in self.filter_rules:
self.filter_rules.append(rule)
rule = ["-A", self.ufw_chain + "-before-forward"]
rule += ['-i', self.outgoing_dev]
rule += ['-o', self.outgoing_dev]
rule += ['-j', 'ACCEPT']
if rule not in self.filter_rules:
self.filter_rules.append(rule)
def main():
fields = {
"data" : {"required": True, "type": "list"},
"ipv6" : {"default": False, "type": "bool"},
}
module = AnsibleModule(argument_spec=fields)
response = {
'nat_rules' : [],
'filter_rules' : []
}
for item in module.params['data']:
ufw_forwards = UFWForwards(item, module.params['ipv6'])
ufw_forwards.nat_rules = response['nat_rules']
ufw_forwards.filter_rules = response['filter_rules']
ufw_forwards.generate()
module.exit_json(changed=False, meta=response)
if __name__ == '__main__':
main()
|
{
"content_hash": "ef5915bdddbf4ede33c6b3ee3304fd01",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 94,
"avg_line_length": 32.736559139784944,
"alnum_prop": 0.5005748070290688,
"repo_name": "EggieCode/ansible-role-ufw",
"id": "31adb990bbed879603a2d1017288aa2e1b8a712f",
"size": "6108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/ufw_forward.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8269"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sas
import sas_testcase
from sas_test_harness import SasTestHarnessServer, generateCbsdRecords, generatePpaRecords
from util import winnforum_testcase, configurable_testcase, writeConfig, \
loadConfig, makePpaAndPalRecordsConsistent, getFqdnLocalhost, getUnusedPort, \
getCertFilename, json_load
from testcases.WINNF_FT_S_MCP_testcase import McpXprCommonTestcase
class EscProtectionTestcase(McpXprCommonTestcase):
def setUp(self):
self._sas, self._sas_admin = sas.GetTestingSas()
self._sas_admin.Reset()
def tearDown(self):
self.ShutdownServers()
def generate_EPR_1_default_config(self, filename):
""" Generates the WinnForum configuration for EPR.1. """
# Load ESC record
esc_record_1 = json_load(
os.path.join('testcases', 'testdata', 'esc_sensor_record_0.json'))
# Load devices info
device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
# Moving device_1(Cat A) to a location within 40 KMs of ESC sensor
device_1['installationParam'][
'latitude'] = esc_record_1['installationParam']['latitude'] + 0.20
device_1['installationParam'][
'longitude'] = esc_record_1['installationParam']['longitude']
device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
# Moving device_2(Cat B) to a location within 80 KMs of ESC sensor
device_2['installationParam'][
'latitude'] = esc_record_1['installationParam']['latitude'] + 0.70
device_2['installationParam'][
'longitude'] = esc_record_1['installationParam']['longitude']
device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
# Moving device_3(Cat A) to a location outside 40 KMs of ESC sensor
device_3['installationParam'][
'latitude'] = esc_record_1['installationParam']['latitude'] + 0.50
device_3['installationParam'][
'longitude'] = esc_record_1['installationParam']['longitude']
device_4 = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
# Moving device_4(Cat B) to a location outside 80 KMs of ESC sensor
device_4['installationParam'][
'latitude'] = esc_record_1['installationParam']['latitude'] + 1.0
device_4['installationParam'][
'longitude'] = esc_record_1['installationParam']['longitude']
# Load Grant requests
# Loading grant_request_1 for device_1 with In-band frequency range of 3620 to 3630 MHz
grant_request_1 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
# Loading grant_request_2 for device_2 with In-band frequency range of 3630 to 3640 MHz
grant_request_2 = json_load(
os.path.join('testcases', 'testdata', 'grant_1.json'))
# Loading grant_request_3 for device_3 with Out-of-band frequency range of 3650 to 3660 MHz
grant_request_3 = json_load(
os.path.join('testcases', 'testdata', 'grant_2.json'))
# Loading grant_request_4 for device_4 with In-band frequency range of 3620 to 3630 MHz
grant_request_4 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
# device_b and device_d are Category B
# Load Conditional Data
self.assertEqual(device_2['cbsdCategory'], 'B')
conditionals_device_2 = {
'cbsdCategory': device_2['cbsdCategory'],
'fccId': device_2['fccId'],
'cbsdSerialNumber': device_2['cbsdSerialNumber'],
'airInterface': device_2['airInterface'],
'installationParam': device_2['installationParam'],
'measCapability': device_2['measCapability']
}
self.assertEqual(device_4['cbsdCategory'], 'B')
conditionals_device_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam'],
'measCapability': device_4['measCapability']
}
# Remove conditionals from registration
del device_2['cbsdCategory']
del device_2['airInterface']
del device_2['installationParam']
del device_2['measCapability']
del device_4['cbsdCategory']
del device_4['airInterface']
del device_4['installationParam']
del device_4['measCapability']
# Registration and grant records
cbsd_records_domain_proxy_0 = {
'registrationRequests': [device_1, device_2],
'grantRequests': [grant_request_1, grant_request_2],
'conditionalRegistrationData': [conditionals_device_2]
}
cbsd_records_domain_proxy_1 = {
'registrationRequests': [device_3],
'grantRequests': [grant_request_3],
'conditionalRegistrationData': []
}
# Protected entity record
protected_entities = {
'escRecords': [esc_record_1]
}
iteration_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_domain_proxy_0,
cbsd_records_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_4,
'grantRequest': grant_request_4,
'conditionalRegistrationData': conditionals_device_4,
'clientCert': getCertFilename('device_d.cert'),
'clientKey': getCertFilename('device_d.key')
}],
'protectedEntities': protected_entities,
'dpaActivationList': [],
'dpaDeactivationList': [],
'sasTestHarnessData': []
}
# Create the actual config.
config = {
'initialCbsdRequestsWithDomainProxies': self.getEmptyCbsdRequestsWithDomainProxies(2),
'initialCbsdRecords': [],
'iterationData': [iteration_config],
'sasTestHarnessConfigs': [],
'domainProxyConfigs': [{
'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')
}, {
'cert': getCertFilename('domain_proxy_1.cert'),
'key': getCertFilename('domain_proxy_1.key')
}]
}
writeConfig(filename, config)
@configurable_testcase(generate_EPR_1_default_config)
def test_WINNF_FT_S_EPR_1(self, config_filename):
"""Single SAS ESC Sensor Protection
"""
config = loadConfig(config_filename)
# Invoke MCP test steps 1 through 22.
self.executeMcpTestSteps(config, 'xPR1')
def generate_EPR_2_default_config(self, filename):
""" Generates the WinnForum configuration for EPR.2. """
# Load ESC record
esc_record_1 = json_load(
os.path.join('testcases', 'testdata', 'esc_sensor_record_0.json'))
# Load devices info
device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
# Moving device_1(Cat A) to a location within 40 KMs of ESC sensor
device_1['installationParam'][
'latitude'] = esc_record_1['installationParam']['latitude'] + 0.20
device_1['installationParam'][
'longitude'] = esc_record_1['installationParam']['longitude']
device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
# Moving device_2(Cat B) to a location within 80 KMs of ESC sensor
device_2['installationParam'][
'latitude'] = esc_record_1['installationParam']['latitude'] + 0.70
device_2['installationParam'][
'longitude'] = esc_record_1['installationParam']['longitude']
device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
# Moving device_3(Cat A) to a location outside 40 KMs of ESC sensor
device_3['installationParam'][
'latitude'] = esc_record_1['installationParam']['latitude'] + 0.50
device_3['installationParam'][
'longitude'] = esc_record_1['installationParam']['longitude']
device_4 = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
# Moving device_4(Cat B) to a location outside 80 KMs of ESC sensor
device_4['installationParam'][
'latitude'] = esc_record_1['installationParam']['latitude'] + 1.0
device_4['installationParam'][
'longitude'] = esc_record_1['installationParam']['longitude']
# Load Grant requests with In-band frequency range for all devices
grant_request_1 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_1['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_1['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
grant_request_2 = json_load(
os.path.join('testcases', 'testdata', 'grant_1.json'))
grant_request_2['operationParam']['operationFrequencyRange']['lowFrequency'] = 3570000000
grant_request_2['operationParam']['operationFrequencyRange']['highFrequency'] = 3580000000
grant_request_3 = json_load(
os.path.join('testcases', 'testdata', 'grant_2.json'))
grant_request_3['operationParam']['operationFrequencyRange']['lowFrequency'] = 3590000000
grant_request_3['operationParam']['operationFrequencyRange']['highFrequency'] = 3600000000
grant_request_4 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_4['operationParam']['operationFrequencyRange']['lowFrequency'] = 3610000000
grant_request_4['operationParam']['operationFrequencyRange']['highFrequency'] = 3620000000
# device_b and device_d are Category B
# Load Conditional Data
self.assertEqual(device_2['cbsdCategory'], 'B')
conditionals_device_2 = {
'cbsdCategory': device_2['cbsdCategory'],
'fccId': device_2['fccId'],
'cbsdSerialNumber': device_2['cbsdSerialNumber'],
'airInterface': device_2['airInterface'],
'installationParam': device_2['installationParam'],
'measCapability': device_2['measCapability']
}
self.assertEqual(device_4['cbsdCategory'], 'B')
conditionals_device_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam'],
'measCapability': device_4['measCapability']
}
# Remove conditionals from registration
del device_2['cbsdCategory']
del device_2['airInterface']
del device_2['installationParam']
del device_2['measCapability']
del device_4['cbsdCategory']
del device_4['airInterface']
del device_4['installationParam']
del device_4['measCapability']
# Registration and grant records
cbsd_records_domain_proxy_0 = {
'registrationRequests': [device_1, device_2],
'grantRequests': [grant_request_1, grant_request_2],
'conditionalRegistrationData': [conditionals_device_2]
}
cbsd_records_domain_proxy_1 = {
'registrationRequests': [device_3],
'grantRequests': [grant_request_3],
'conditionalRegistrationData': []
}
# Protected entity record
protected_entities = {
'escRecords': [esc_record_1]
}
# SAS Test Harnesses configurations,
# Following configurations are for two SAS test harnesses
sas_test_harness_device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
sas_test_harness_device_1['fccId'] = "test_fcc_id_e"
sas_test_harness_device_1['userId'] = "test_user_id_e"
sas_test_harness_device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
sas_test_harness_device_2['fccId'] = "test_fcc_id_f"
sas_test_harness_device_2['userId'] = "test_user_id_f"
sas_test_harness_device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
sas_test_harness_device_3['fccId'] = "test_fcc_id_g"
sas_test_harness_device_3['userId'] = "test_user_id_g"
# Generate Cbsd FAD Records for SAS Test Harness 0
cbsd_fad_records_sas_test_harness_0 = generateCbsdRecords(
[sas_test_harness_device_1],
[[grant_request_1]]
)
# Generate Cbsd FAD Records for SAS Test Harness 1
cbsd_fad_records_sas_test_harness_1 = generateCbsdRecords(
[sas_test_harness_device_2, sas_test_harness_device_3],
[[grant_request_2], [grant_request_3]]
)
# Generate SAS Test Harnesses dump records
dump_records_sas_test_harness_0 = {
'cbsdRecords': cbsd_fad_records_sas_test_harness_0
}
dump_records_sas_test_harness_1 = {
'cbsdRecords': cbsd_fad_records_sas_test_harness_1
}
# SAS Test Harnesses configuration
sas_test_harness_0_config = {
'sasTestHarnessName': 'SAS-TH-1',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas.cert'),
'serverKey': getCertFilename('sas.key'),
'caCert': getCertFilename('ca.cert')
}
sas_test_harness_1_config = {
'sasTestHarnessName': 'SAS-TH-2',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas_1.cert'),
'serverKey': getCertFilename('sas_1.key'),
'caCert': getCertFilename('ca.cert')
}
iteration_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_domain_proxy_0,
cbsd_records_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_4,
'grantRequest': grant_request_4,
'conditionalRegistrationData': conditionals_device_4,
'clientCert': getCertFilename('device_d.cert'),
'clientKey': getCertFilename('device_d.key')
}],
'protectedEntities': protected_entities,
'dpaActivationList': [],
'dpaDeactivationList': [],
'sasTestHarnessData': [dump_records_sas_test_harness_0,
dump_records_sas_test_harness_1]
}
# Create the actual config.
config = {
'initialCbsdRequestsWithDomainProxies': self.getEmptyCbsdRequestsWithDomainProxies(2),
'initialCbsdRecords': [],
'iterationData': [iteration_config],
'sasTestHarnessConfigs': [
sas_test_harness_0_config, sas_test_harness_1_config
],
'domainProxyConfigs': [{
'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')
}, {
'cert': getCertFilename('domain_proxy_1.cert'),
'key': getCertFilename('domain_proxy_1.key')
}]
}
writeConfig(filename, config)
@configurable_testcase(generate_EPR_2_default_config)
def test_WINNF_FT_S_EPR_2(self, config_filename):
"""Multiple SAS ESC Sensor Protection
"""
config = loadConfig(config_filename)
# Invoke MCP test steps 1 through 22.
self.executeMcpTestSteps(config, 'xPR2')
|
{
"content_hash": "c1f8f1c230624212f8ac088bcf58c976",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 97,
"avg_line_length": 40.0132625994695,
"alnum_prop": 0.6413655949618826,
"repo_name": "Wireless-Innovation-Forum/Spectrum-Access-System",
"id": "230422ed769abeb1a46f6f185e668ffbc846ada5",
"size": "15720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/harness/testcases/WINNF_FT_S_EPR_testcase.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "10978"
},
{
"name": "C++",
"bytes": "130297"
},
{
"name": "Makefile",
"bytes": "869"
},
{
"name": "PowerShell",
"bytes": "11931"
},
{
"name": "Python",
"bytes": "1836478"
},
{
"name": "Shell",
"bytes": "46984"
}
],
"symlink_target": ""
}
|
import account_journal
import account_partner_ledger
|
{
"content_hash": "bdc3680c951da71ad7413dc65cc61eb6",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 29,
"avg_line_length": 26.5,
"alnum_prop": 0.8679245283018868,
"repo_name": "vileopratama/vitech",
"id": "e424cac8b5da3aaa346b95c3e9f9818a94286688",
"size": "153",
"binary": false,
"copies": "25",
"ref": "refs/heads/master",
"path": "src/addons/account_extra_reports/report/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
}
|
"""Test Home Assistant package util methods."""
import os
import pkg_resources
import unittest
from subprocess import PIPE
from distutils.sysconfig import get_python_lib
from unittest.mock import call, patch, Mock
import homeassistant.util.package as package
RESOURCE_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'resources'))
TEST_EXIST_REQ = 'pip>=7.0.0'
TEST_NEW_REQ = 'pyhelloworld3==1.0.0'
TEST_ZIP_REQ = 'file://{}#{}' \
.format(os.path.join(RESOURCE_DIR, 'pyhelloworld3.zip'), TEST_NEW_REQ)
@patch('homeassistant.util.package.Popen')
@patch('homeassistant.util.package.check_package_exists')
class TestPackageUtilInstallPackage(unittest.TestCase):
"""Test for homeassistant.util.package module."""
def setUp(self):
"""Setup the tests."""
self.mock_process = Mock()
self.mock_process.communicate.return_value = (b'message', b'error')
self.mock_process.returncode = 0
def test_install_existing_package(self, mock_exists, mock_popen):
"""Test an install attempt on an existing package."""
mock_popen.return_value = self.mock_process
mock_exists.return_value = True
self.assertTrue(package.install_package(TEST_EXIST_REQ))
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_exists.call_args, call(TEST_EXIST_REQ, None))
self.assertEqual(self.mock_process.communicate.call_count, 0)
@patch('homeassistant.util.package.sys')
def test_install(self, mock_sys, mock_exists, mock_popen):
"""Test an install attempt on a package that doesn't exist."""
mock_exists.return_value = False
mock_popen.return_value = self.mock_process
self.assertTrue(package.install_package(TEST_NEW_REQ, False))
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(self.mock_process.communicate.call_count, 1)
self.assertEqual(mock_popen.call_count, 1)
self.assertEqual(
mock_popen.call_args,
call([
mock_sys.executable, '-m', 'pip', 'install', '--quiet',
TEST_NEW_REQ
], stdin=PIPE, stdout=PIPE, stderr=PIPE)
)
@patch('homeassistant.util.package.sys')
def test_install_upgrade(self, mock_sys, mock_exists, mock_popen):
"""Test an upgrade attempt on a package."""
mock_exists.return_value = False
mock_popen.return_value = self.mock_process
self.assertTrue(package.install_package(TEST_NEW_REQ))
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(self.mock_process.communicate.call_count, 1)
self.assertEqual(mock_popen.call_count, 1)
self.assertEqual(
mock_popen.call_args,
call([
mock_sys.executable, '-m', 'pip', 'install', '--quiet',
TEST_NEW_REQ, '--upgrade'
], stdin=PIPE, stdout=PIPE, stderr=PIPE)
)
@patch('homeassistant.util.package.sys')
def test_install_target(self, mock_sys, mock_exists, mock_popen):
"""Test an install with a target."""
target = 'target_folder'
mock_exists.return_value = False
mock_popen.return_value = self.mock_process
self.assertTrue(
package.install_package(TEST_NEW_REQ, False, target=target)
)
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(self.mock_process.communicate.call_count, 1)
self.assertEqual(mock_popen.call_count, 1)
self.assertEqual(
mock_popen.call_args,
call([
mock_sys.executable, '-m', 'pip', 'install', '--quiet',
TEST_NEW_REQ, '--target', os.path.abspath(target)
], stdin=PIPE, stdout=PIPE, stderr=PIPE)
)
@patch('homeassistant.util.package._LOGGER')
@patch('homeassistant.util.package.sys')
def test_install_error(self, mock_sys, mock_logger, mock_exists,
mock_popen):
"""Test an install with a target."""
mock_exists.return_value = False
mock_popen.return_value = self.mock_process
self.mock_process.returncode = 1
self.assertFalse(package.install_package(TEST_NEW_REQ))
self.assertEqual(mock_logger.error.call_count, 1)
class TestPackageUtilCheckPackageExists(unittest.TestCase):
"""Test for homeassistant.util.package module."""
def test_check_package_global(self):
"""Test for a globally-installed package."""
installed_package = list(pkg_resources.working_set)[0].project_name
self.assertTrue(package.check_package_exists(installed_package, None))
def test_check_package_local(self):
"""Test for a locally-installed package."""
lib_dir = get_python_lib()
installed_package = list(pkg_resources.working_set)[0].project_name
self.assertTrue(
package.check_package_exists(installed_package, lib_dir)
)
def test_check_package_zip(self):
"""Test for an installed zip package."""
self.assertFalse(package.check_package_exists(TEST_ZIP_REQ, None))
|
{
"content_hash": "034ef5ae531b0c73bd63dc7c31e6f3b6",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 78,
"avg_line_length": 36.58156028368794,
"alnum_prop": 0.6421093447072509,
"repo_name": "JshWright/home-assistant",
"id": "e0682d79f57be2a30ad3e1bfb5ffbc2e75d5eb9d",
"size": "5158",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "tests/util/test_package.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1808411"
},
{
"name": "Python",
"bytes": "6070409"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15525"
}
],
"symlink_target": ""
}
|
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventType
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import WildValue, check_string, to_wild_value
from zerver.lib.webhooks.common import check_send_webhook_message, get_setup_webhook_message
from zerver.models import UserProfile
RADARR_TOPIC_TEMPLATE = "{movie_title}".strip()
RADARR_TOPIC_TEMPLATE_TEST = "Radarr - Test".strip()
RADARR_TOPIC_TEMPLATE_HEALTH_CHECK = "Health {level}".strip()
RADARR_MESSAGE_TEMPLATE_HEALTH_CHECK = "{message}.".strip()
RADARR_MESSAGE_TEMPLATE_MOVIE_RENAMED = "The movie {movie_title} has been renamed.".strip()
RADARR_MESSAGE_TEMPLATE_MOVIE_IMPORTED = "The movie {movie_title} has been imported.".strip()
RADARR_MESSAGE_TEMPLATE_MOVIE_IMPORTED_UPGRADE = (
"The movie {movie_title} has been upgraded from {old_quality} to {new_quality}.".strip()
)
RADARR_MESSAGE_TEMPLATE_MOVIE_GRABBED = "The movie {movie_title} has been grabbed.".strip()
ALL_EVENT_TYPES = ["Rename", "Test", "Download", "Health", "Grab"]
@webhook_view("Radarr", all_event_types=ALL_EVENT_TYPES)
@has_request_variables
def api_radarr_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: WildValue = REQ(argument_type="body", converter=to_wild_value),
) -> HttpResponse:
body = get_body_for_http_request(payload)
subject = get_subject_for_http_request(payload)
check_send_webhook_message(
request, user_profile, subject, body, payload["eventType"].tame(check_string)
)
return json_success(request)
def get_subject_for_http_request(payload: WildValue) -> str:
event_type = payload["eventType"].tame(check_string)
if event_type != "Test" and event_type != "Health":
topic = RADARR_TOPIC_TEMPLATE.format(
movie_title=payload["movie"]["title"].tame(check_string)
)
elif event_type == "Test":
topic = RADARR_TOPIC_TEMPLATE_TEST
elif event_type == "Health":
topic = RADARR_TOPIC_TEMPLATE_HEALTH_CHECK.format(level=payload["level"].tame(check_string))
return topic
def get_body_for_health_check_event(payload: WildValue) -> str:
return RADARR_MESSAGE_TEMPLATE_HEALTH_CHECK.format(
message=payload["message"].tame(check_string)
)
def get_body_for_movie_renamed_event(payload: WildValue) -> str:
return RADARR_MESSAGE_TEMPLATE_MOVIE_RENAMED.format(
movie_title=payload["movie"]["title"].tame(check_string)
)
def get_body_for_movie_imported_upgrade_event(payload: WildValue) -> str:
data = {
"movie_title": payload["movie"]["title"].tame(check_string),
"new_quality": payload["movieFile"]["quality"].tame(check_string),
"old_quality": payload["deletedFiles"][0]["quality"].tame(check_string),
}
return RADARR_MESSAGE_TEMPLATE_MOVIE_IMPORTED_UPGRADE.format(**data)
def get_body_for_movie_imported_event(payload: WildValue) -> str:
return RADARR_MESSAGE_TEMPLATE_MOVIE_IMPORTED.format(
movie_title=payload["movie"]["title"].tame(check_string)
)
def get_body_for_movie_grabbed_event(payload: WildValue) -> str:
return RADARR_MESSAGE_TEMPLATE_MOVIE_GRABBED.format(
movie_title=payload["movie"]["title"].tame(check_string)
)
def get_body_for_http_request(payload: WildValue) -> str:
event_type = payload["eventType"].tame(check_string)
if event_type == "Test":
return get_setup_webhook_message("Radarr")
elif event_type == "Health":
return get_body_for_health_check_event(payload)
elif event_type == "Rename":
return get_body_for_movie_renamed_event(payload)
elif event_type == "Download" and "isUpgrade" in payload:
if payload["isUpgrade"]:
return get_body_for_movie_imported_upgrade_event(payload)
else:
return get_body_for_movie_imported_event(payload)
elif event_type == "Grab":
return get_body_for_movie_grabbed_event(payload)
else:
raise UnsupportedWebhookEventType(event_type)
|
{
"content_hash": "f766c5a9ffa9ac40b51deb7d11250c67",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 100,
"avg_line_length": 39.16981132075472,
"alnum_prop": 0.7025529865125241,
"repo_name": "andersk/zulip",
"id": "c9ec8d8fcc9319a9fb028298263f606240377159",
"size": "4152",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "zerver/webhooks/radarr/view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "490256"
},
{
"name": "Dockerfile",
"bytes": "4025"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "749848"
},
{
"name": "Handlebars",
"bytes": "377098"
},
{
"name": "JavaScript",
"bytes": "4006373"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112128"
},
{
"name": "Python",
"bytes": "10168530"
},
{
"name": "Ruby",
"bytes": "3459"
},
{
"name": "Shell",
"bytes": "146797"
},
{
"name": "TypeScript",
"bytes": "284837"
}
],
"symlink_target": ""
}
|
"""Test that files marked as 'streamable' when 'streaming_allowed' can be named pipes."""
import os
from pathlib import Path
from typing import cast
import pytest
from ruamel.yaml.comments import CommentedMap
from schema_salad.sourceline import cmap
from cwltool.command_line_tool import CommandLineTool
from cwltool.context import LoadingContext, RuntimeContext
from cwltool.errors import WorkflowException
from cwltool.job import JobBase
from cwltool.update import INTERNAL_VERSION, ORIGINAL_CWLVERSION
from cwltool.utils import CWLObjectType
from .util import get_data
toolpath_object = cast(
CommentedMap,
cmap(
{
"cwlVersion": INTERNAL_VERSION,
"class": "CommandLineTool",
"inputs": [
{
"type": "File",
"id": "inp",
"streamable": True,
}
],
"outputs": [],
"requirements": [],
}
),
)
loading_context = LoadingContext(
{
"metadata": {
"cwlVersion": INTERNAL_VERSION,
ORIGINAL_CWLVERSION: INTERNAL_VERSION,
}
}
)
def test_regular_file() -> None:
"""Test that regular files do not raise any exception when they are checked in job._setup."""
clt = CommandLineTool(
toolpath_object,
loading_context,
)
runtime_context = RuntimeContext()
joborder: CWLObjectType = {
"inp": {
"class": "File",
"location": get_data("tests/wf/whale.txt"),
}
}
job = next(clt.job(joborder, None, runtime_context))
assert isinstance(job, JobBase)
job._setup(runtime_context)
streaming = [
(True, True, False),
(True, False, True),
(False, True, True),
(False, False, True),
]
@pytest.mark.parametrize("streamable,streaming_allowed,raise_exception", streaming)
def test_input_can_be_named_pipe(
tmp_path: Path, streamable: bool, streaming_allowed: bool, raise_exception: bool
) -> None:
"""Test that input can be a named pipe."""
clt = CommandLineTool(
toolpath_object,
loading_context,
)
runtime_context = RuntimeContext()
runtime_context.streaming_allowed = streaming_allowed
path = tmp_path / "tmp"
os.mkfifo(path)
joborder: CWLObjectType = {
"inp": {
"class": "File",
"location": str(path),
"streamable": streamable,
}
}
job = next(clt.job(joborder, None, runtime_context))
assert isinstance(job, JobBase)
if raise_exception:
with pytest.raises(WorkflowException):
job._setup(runtime_context)
else:
job._setup(runtime_context)
|
{
"content_hash": "f888f718d04dfea17636f3b3d307da29",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 97,
"avg_line_length": 25.23148148148148,
"alnum_prop": 0.6055045871559633,
"repo_name": "common-workflow-language/cwltool",
"id": "3c5526592dc6da96bc3521f92d3cf06804d65b41",
"size": "2725",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_streaming.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Common Workflow Language",
"bytes": "242053"
},
{
"name": "Dockerfile",
"bytes": "1128"
},
{
"name": "JavaScript",
"bytes": "1240"
},
{
"name": "Makefile",
"bytes": "7943"
},
{
"name": "Python",
"bytes": "1255040"
},
{
"name": "Shell",
"bytes": "10752"
},
{
"name": "Tcl",
"bytes": "523"
}
],
"symlink_target": ""
}
|
import os
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
if os.path.exists('.env'):
print("从 .env 导入环境变量")
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
from app import create_app, db
from app.models import User, Follow, Role, Permission, Post, Comment
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Follow=Follow, Role=Role,
Permission=Permission, Post=Post, Comment=Comment)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test(coverage=False):
"""Run the unit tests."""
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
@manager.command
def profile(length=25, profile_dir=None):
"""Start the application under the code profiler."""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],
profile_dir=profile_dir)
app.run()
@manager.command
def deploy():
"""Run deployment tasks."""
from flask.ext.migrate import upgrade
from app.models import Role, User
# migrate database to latest revision
upgrade()
# create user roles
Role.insert_roles()
# create self-follows for all users
User.add_self_follows()
if __name__ == '__main__':
manager.run()
|
{
"content_hash": "a4aa408a6ec4d2f2275fcd00acfcf220",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 74,
"avg_line_length": 29.35,
"alnum_prop": 0.6482112436115843,
"repo_name": "hellckt/micblog",
"id": "10c01863c387da510980886629ddb45c6748b14b",
"size": "2406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1891"
},
{
"name": "Python",
"bytes": "87906"
}
],
"symlink_target": ""
}
|
from itertools import product
import numpy as np
## Import unit testing librarys
try:
import unittest2 as unittest
except ImportError:
import unittest
def prod_to_array(*iterables):
return np.array(list(product(*iterables)))
|
{
"content_hash": "6d411e9f571b89cbc597f19d7d696661",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 46,
"avg_line_length": 18.53846153846154,
"alnum_prop": 0.7510373443983402,
"repo_name": "darothen/pyrcel",
"id": "86a68a1c72c7ce37f39412e714d63c095e2fd708",
"size": "241",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyrcel/test/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "151643"
}
],
"symlink_target": ""
}
|
from stevedore import extension
from heat.engine import clients
from heat.engine import environment
from heat.engine import plugin_manager
def _register_resources(env, type_pairs):
for res_name, res_class in type_pairs:
env.register_class(res_name, res_class)
def _register_constraints(env, type_pairs):
for constraint_name, constraint in type_pairs:
env.register_constraint(constraint_name, constraint)
def _register_stack_lifecycle_plugins(env, type_pairs):
for stack_lifecycle_name, stack_lifecycle_class in type_pairs:
env.register_stack_lifecycle_plugin(stack_lifecycle_name,
stack_lifecycle_class)
def _get_mapping(namespace):
mgr = extension.ExtensionManager(
namespace=namespace,
invoke_on_load=False,
verify_requirements=False)
return [[name, mgr[name].plugin] for name in mgr.names()]
_environment = None
def global_env():
if _environment is None:
initialise()
return _environment
def initialise():
global _environment
if _environment is not None:
return
clients.initialise()
global_env = environment.Environment({}, user_env=False)
_load_global_environment(global_env)
_environment = global_env
def _load_global_environment(env):
_load_global_resources(env)
environment.read_global_environment(env)
def _load_global_resources(env):
_register_constraints(env, _get_mapping('heat.constraints'))
_register_stack_lifecycle_plugins(
env,
_get_mapping('heat.stack_lifecycle_plugins'))
manager = plugin_manager.PluginManager(__name__)
# Sometimes resources should not be available for registration in Heat due
# to unsatisfied dependencies. We look first for the function
# 'available_resource_mapping', which should return the filtered resources.
# If it is not found, we look for the legacy 'resource_mapping'.
resource_mapping = plugin_manager.PluginMapping(['available_resource',
'resource'])
constraint_mapping = plugin_manager.PluginMapping('constraint')
_register_resources(env, resource_mapping.load_all(manager))
_register_constraints(env, constraint_mapping.load_all(manager))
|
{
"content_hash": "67a3bf97ad08e3f155aa648558fb5f1f",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 30.613333333333333,
"alnum_prop": 0.688588850174216,
"repo_name": "redhat-openstack/heat",
"id": "a5d7fadbbe7d516848e9bb887dd47988a21abfcc",
"size": "2871",
"binary": false,
"copies": "1",
"ref": "refs/heads/f22-patches",
"path": "heat/engine/resources/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4827027"
},
{
"name": "Shell",
"bytes": "26720"
}
],
"symlink_target": ""
}
|
"""Tests for doc generator traversal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import generate_lib
from tensorflow.tools.docs import parser
def test_function():
"""Docstring for test_function."""
pass
class TestClass(object):
"""Docstring for TestClass itself."""
class ChildClass(object):
"""Docstring for a child class."""
class GrandChildClass(object):
"""Docstring for a child of a child class."""
pass
class DummyVisitor(object):
def __init__(self, index, duplicate_of):
self.index = index
self.duplicate_of = duplicate_of
class GenerateTest(googletest.TestCase):
def get_test_objects(self):
# These are all mutable objects, so rebuild them for each test.
# Don't cache the objects.
module = sys.modules[__name__]
index = {
'tf': sys, # Can be any module, this test doesn't care about content.
'tf.TestModule': module,
'tf.test_function': test_function,
'tf.TestModule.test_function': test_function,
'tf.TestModule.TestClass': TestClass,
'tf.TestModule.TestClass.ChildClass': TestClass.ChildClass,
'tf.TestModule.TestClass.ChildClass.GrandChildClass':
TestClass.ChildClass.GrandChildClass,
}
tree = {
'tf': ['TestModule', 'test_function'],
'tf.TestModule': ['test_function', 'TestClass'],
'tf.TestModule.TestClass': ['ChildClass'],
'tf.TestModule.TestClass.ChildClass': ['GrandChildClass'],
'tf.TestModule.TestClass.ChildClass.GrandChildClass': []
}
duplicate_of = {'tf.test_function': 'tf.TestModule.test_function'}
duplicates = {
'tf.TestModule.test_function': [
'tf.test_function', 'tf.TestModule.test_function'
]
}
base_dir = os.path.dirname(__file__)
visitor = DummyVisitor(index, duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=duplicates,
duplicate_of=duplicate_of,
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir=base_dir)
return reference_resolver, parser_config
def test_write(self):
_, parser_config = self.get_test_objects()
output_dir = googletest.GetTempDir()
generate_lib.write_docs(output_dir, parser_config, yaml_toc=True)
# Check redirects
redirects_file = os.path.join(output_dir, '_redirects.yaml')
self.assertTrue(os.path.exists(redirects_file))
with open(redirects_file) as f:
redirects = f.read()
self.assertEqual(redirects.split(), [
'redirects:', '-', 'from:', '/api_docs/python/tf/test_function', 'to:',
'/api_docs/python/tf/TestModule/test_function'
])
# Make sure that the right files are written to disk.
self.assertTrue(os.path.exists(os.path.join(output_dir, 'index.md')))
self.assertTrue(os.path.exists(os.path.join(output_dir, 'tf.md')))
self.assertTrue(os.path.exists(os.path.join(output_dir, '_toc.yaml')))
self.assertTrue(
os.path.exists(os.path.join(output_dir, 'tf/TestModule.md')))
self.assertFalse(
os.path.exists(os.path.join(output_dir, 'tf/test_function.md')))
self.assertTrue(
os.path.exists(
os.path.join(output_dir, 'tf/TestModule/TestClass.md')))
self.assertTrue(
os.path.exists(
os.path.join(output_dir,
'tf/TestModule/TestClass/ChildClass.md')))
self.assertTrue(
os.path.exists(
os.path.join(
output_dir,
'tf/TestModule/TestClass/ChildClass/GrandChildClass.md')))
# Make sure that duplicates are not written
self.assertTrue(
os.path.exists(
os.path.join(output_dir, 'tf/TestModule/test_function.md')))
def test_update_id_tags_inplace(self):
test_dir = googletest.GetTempDir()
test_sub_dir = os.path.join(test_dir, 'a/b')
os.makedirs(test_sub_dir)
test_path1 = os.path.join(test_dir, 'file1.md')
test_path2 = os.path.join(test_sub_dir, 'file2.md')
test_path3 = os.path.join(test_sub_dir, 'file3.notmd')
with open(test_path1, 'w') as f:
f.write('## abc&123')
with open(test_path2, 'w') as f:
f.write('# A Level 1 Heading\n')
f.write('## A Level 2 Heading')
with open(test_path3, 'w') as f:
f.write("## don\'t change this")
generate_lib.update_id_tags_inplace(test_dir)
with open(test_path1) as f:
content = f.read()
self.assertEqual(content, '<h2 id="abc_123">abc&123</h2>')
with open(test_path2) as f:
content = f.read()
self.assertEqual(
content, '# A Level 1 Heading\n'
'<h2 id="A_Level_2_Heading">A Level 2 Heading</h2>')
with open(test_path3) as f:
content = f.read()
self.assertEqual(content, "## don\'t change this")
def test_replace_refes(self):
test_dir = googletest.GetTempDir()
test_in_dir = os.path.join(test_dir, 'in')
test_in_dir_a = os.path.join(test_dir, 'in/a')
test_in_dir_b = os.path.join(test_dir, 'in/b')
os.makedirs(test_in_dir)
os.makedirs(test_in_dir_a)
os.makedirs(test_in_dir_b)
test_out_dir = os.path.join(test_dir, 'out')
os.makedirs(test_out_dir)
test_path1 = os.path.join(test_in_dir_a, 'file1.md')
test_path2 = os.path.join(test_in_dir_b, 'file2.md')
test_path3 = os.path.join(test_in_dir_b, 'file3.notmd')
test_path4 = os.path.join(test_in_dir_b, 'OWNERS')
with open(test_path1, 'w') as f:
f.write('Use `tf.test_function` to test things.')
with open(test_path2, 'w') as f:
f.write('Use @{tf.TestModule.TestClass.ChildClass} to test things.\n'
"`tf.whatever` doesn't exist")
with open(test_path3, 'w') as f:
file3_content = (
'Not a .md file. Should be copied unchanged:'
'@{tf.TestModule.TestClass.ChildClass}, `tf.test_function`')
f.write(file3_content)
with open(test_path4, 'w') as f:
f.write('')
reference_resolver, _ = self.get_test_objects()
generate_lib.replace_refs(test_in_dir, test_out_dir, reference_resolver,
'*.md')
with open(os.path.join(test_out_dir, 'a/file1.md')) as f:
content = f.read()
self.assertEqual(
content,
'Use <a href="../api_docs/python/tf/TestModule/test_function.md">'
'<code>tf.test_function</code></a> to test things.')
with open(os.path.join(test_out_dir, 'b/file2.md')) as f:
content = f.read()
self.assertEqual(
content,
'Use '
'<a href="../api_docs/python/tf/TestModule/TestClass/ChildClass.md">'
'<code>tf.TestModule.TestClass.ChildClass</code></a> '
'to test things.\n'
'`tf.whatever` doesn\'t exist')
with open(os.path.join(test_out_dir, 'b/file3.notmd')) as f:
content = f.read()
self.assertEqual(content, file3_content)
with self.assertRaises(IOError):
# This should fail. The OWNERS file should not be copied
with open(os.path.join(test_out_dir, 'b/OWNERS')) as f:
content = f.read()
if __name__ == '__main__':
googletest.main()
|
{
"content_hash": "7f15ad274075b31e08f58b2fbb348417",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 79,
"avg_line_length": 31.733050847457626,
"alnum_prop": 0.6226465482707971,
"repo_name": "chemelnucfin/tensorflow",
"id": "863504913e55b9758fc28b51b72d957a2d8a4ece",
"size": "8178",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/tools/docs/generate_lib_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "16146"
},
{
"name": "C",
"bytes": "825231"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "75313939"
},
{
"name": "CMake",
"bytes": "207856"
},
{
"name": "Dockerfile",
"bytes": "80130"
},
{
"name": "Go",
"bytes": "1670422"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "881711"
},
{
"name": "Jupyter Notebook",
"bytes": "1113647"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "853297"
},
{
"name": "Makefile",
"bytes": "109340"
},
{
"name": "Objective-C",
"bytes": "105235"
},
{
"name": "Objective-C++",
"bytes": "258793"
},
{
"name": "PHP",
"bytes": "38007"
},
{
"name": "Pascal",
"bytes": "3741"
},
{
"name": "Pawn",
"bytes": "14380"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "50825074"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4706"
},
{
"name": "Shell",
"bytes": "532610"
},
{
"name": "Smarty",
"bytes": "31460"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
from string import ascii_lowercase, ascii_uppercase, digits
natural = digits[1:] + '0'
# next line might error out if destination file does not exist
with open('../lists/abc.wl', 'r+') as file:
for a in range(3, len(ascii_lowercase) + 1):
for b in range(len(ascii_lowercase)):
if len(ascii_lowercase[b: b + a]) == a:
file.write(ascii_lowercase[b: b + a] + '\n')
for a in range(3, len(ascii_uppercase) + 1):
for b in range(len(ascii_uppercase)):
if len(ascii_uppercase[b: b + a]) == a:
file.write(ascii_uppercase[b: b + a] + '\n')
for a in range(3, len(digits) + 1):
for b in range(len(digits)):
if len(digits[b: b + a]) == a:
file.write(digits[b: b + a] + '\n')
for a in range(3, len(natural) + 1):
for b in range(len(natural)):
if len(natural[b: b + a]) == a:
file.write(natural[b: b + a] + '\n')
reversed_lowercase = ascii_lowercase[:: -1]
reversed_uppercase = ascii_uppercase[:: -1]
reversed_digits = digits[:: -1]
reversed_natural = natural[:: -1]
for a in range(3, len(reversed_lowercase) + 1):
for b in range(len(reversed_lowercase)):
if len(reversed_lowercase[b: b + a]) == a:
file.write(reversed_lowercase[b: b + a] + '\n')
for a in range(3, len(reversed_uppercase) + 1):
for b in range(len(reversed_uppercase)):
if len(reversed_uppercase[b: b + a]) == a:
file.write(reversed_uppercase[b: b + a] + '\n')
for a in range(3, len(reversed_digits) + 1):
for b in range(len(reversed_digits)):
if len(reversed_digits[b: b + a]) == a:
file.write(reversed_digits[b: b + a] + '\n')
for a in range(3, len(reversed_natural) + 1):
for b in range(len(reversed_natural)):
if len(reversed_natural[b: b + a]) == a:
file.write(reversed_natural[b: b + a] + '\n')
file.close()
|
{
"content_hash": "ee57631c82ffc79f9a46fb46acae0345",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 63,
"avg_line_length": 44.55555555555556,
"alnum_prop": 0.543142144638404,
"repo_name": "Videonauth/passgen",
"id": "8bc355cb52f86eecfde2cddcc818f2bcd7b5bbc7",
"size": "2479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tool/abc_list_generator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "7186931"
},
{
"name": "Python",
"bytes": "20106"
}
],
"symlink_target": ""
}
|
import base64
import time
import logging
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import NoSuchElementException
from PIL import Image
from io import StringIO, BytesIO
from synchronize_util import synchronized, CONSOLE_LOCK
# This module is for code verification
# Every time there would be only one for users
get_image_data = '''
function getBase64Image(img) {
// Create an empty canvas element
var canvas = document.createElement("canvas");
canvas.width = img.width;
canvas.height = img.height;
// Copy the image contents to the canvas
var ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0);
// Get the data-URL formatted image
// Firefox supports PNG and JPEG. You could check img.src to
// guess the original format, but be aware the using "image/jpg"
// will re-encode the image.
var dataURL = canvas.toDataURL("image/png");
return dataURL.replace(/^data:image\/(png|jpg);base64,/, "");
// return dataURL;
}
code_img = document.querySelector('img[node-type="yzm_img"]');
// code_img = document.querySelector('img');
data_URL = getBase64Image(code_img);
return data_URL;
'''
def test():
driver = webdriver.PhantomJS()
driver.get('http://s.weibo.com/ajax/pincode/pin?type=sass&ts=1405404856')
verify_user(driver)
return
def get_img(base64_str):
'''
convert the base64 string to png image --> PIL.Image
'''
base64_bytes = base64.b64decode(base64_str)
image_bytes_io = BytesIO(base64_bytes)
image = Image.open(image_bytes_io)
return image
def get_code(img):
'''
given an image, return its code, each time only one image could be served --> the code string
'''
img.show()
verification_code = input('Please input the verificaiont code: ')
return verification_code
def verify_user_for_search(driver):
'''
when the driver shows the verification code, load the code in the browser and input the code-->the code
driver: the current driver which comes into the verification code
'''
while True:
feed = driver.find_elements_by_class_name('feed_list')
if len(feed) == 0:
# there is no feed in this page, meaning you need to input the code
code_png = get_img(driver.execute_script(get_image_data))
verification_code = get_code(code_png)# this action needs to be primitive
code_input = driver.find_element_by_xpath('//input[@node-type="yzm_input"]')
code_input.click()
code_input.send_keys(verification_code.strip())
submit_button = driver.find_element_by_xpath('//a[@node-type="yzm_submit"]')
submit_button.click()
time.sleep(5)
driver.get_screenshot_as_file('./screenshot/after_verfiy.png')
else:
break
logging.info('verification completed!')
return
def verify_user_for_login(driver):
'''
因为使用循环登陆,所以此验证码只保证一次,与搜索验证码的情况不同
'''
if not driver.find_element_by_xpath('//img[@node-type="verifycode_image"]'):
logging.info('There is no verfication code here, continue')
return
else:
try:
# get png, the image instance of PIL
png_element = driver.find_element_by_xpath('//img[@node-type="verifycode_image"]')
location = png_element.location
size = png_element.size
logging.info('vrcode: location--{}, size--{}'.format(location, size))
im = get_img(driver.get_screenshot_as_base64())
left = location['x']
top = location['y']
right = location['x'] + size['width']
bottom = location['y'] + size['height']
im = im.crop((left, top, right, bottom)) # defines crop points
verification_code = get_code(im)
code_input = driver.find_element_by_xpath('//input[@name="verifycode"]')
code_input.click()
code_input.send_keys(verification_code.strip())
except Exception as e:
driver.get_screenshot_as_file('./screenshot/login_failed.png')
logging.info('error, filed savedd to ./screenshot/login_failed.png')
return
@synchronized(CONSOLE_LOCK) # this method is primitive
def verify_user(driver, v_type):
'''
v_type: string, 'search', 'login'
'''
if v_type == 'search':
verify_user_for_search(driver)
elif v_type == 'login':
verify_user_for_login(driver)
else:
logging.info('Unknown verification type')
return
if __name__ == '__main__':
test()
|
{
"content_hash": "44d6cc028bf957eea64900bc42f4cf75",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 107,
"avg_line_length": 32.034246575342465,
"alnum_prop": 0.6369467607440668,
"repo_name": "KeithYue/weibo-keywords-crawler",
"id": "321a849775e824655942ecde437bbf281050052a",
"size": "4756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code_verification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20488"
}
],
"symlink_target": ""
}
|
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMISCSIDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
from cinder import exception
from cinder.image import glance
from cinder.openstack.common import excutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder import quota
from cinder import utils
from cinder.volume import manager as core_manager
from cinder.volume import volume_types
from cinder.volume import utils as vol_utils
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
class HuaweiVolumeManager(core_manager.VolumeManager):
"""The volume manager of huawei."""
RPC_API_VERSION = '1.11'
def __init__(self, volume_driver=None, service_name=None, *args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(HuaweiVolumeManager, self).__init__(volume_driver=volume_driver,
service_name=service_name,
*args, **kwargs)
def _is_share_volume(self, volume_type_id):
if volume_type_id:
value = volume_types.get_volume_type_extra_specs(volume_type_id,
key='is_shared')
if value and value.lower() == 'true':
return True
return False
def _check_volume_dependence(self, context, volume_id, volume_type_id,
project_id):
if context.project_id != project_id:
project_id = project_id
else:
project_id = context.project_id
if self._is_share_volume(volume_type_id):
all_vols = self.db.volume_get_all_by_project(context, project_id,
None, None,
'source_volid', None)
for vol in all_vols:
if vol.source_volid == volume_id:
self.db.volume_update(context, volume_id,
{'status': 'available'})
msg = _("Volume still has dependent volume")
raise exception.InvalidVolume(reason=msg)
def delete_volume(self, context, volume_id, unmanage_only=False):
"""Deletes and unexports volume."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
LOG.info(_("volume %s: deleting"), volume_ref['id'])
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if (vol_utils.extract_host(volume_ref['host']) != self.host):
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
volume_type_id = volume_ref.get('volume_type_id', None)
self._check_volume_dependence(context, volume_id, volume_type_id,
project_id)
self._notify_about_volume_usage(context, volume_ref, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug(_("volume %s: removing export"), volume_ref['id'])
self.driver.remove_export(context, volume_ref)
LOG.debug(_("volume %s: deleting"), volume_ref['id'])
self.driver.delete_volume(volume_ref)
except exception.VolumeIsBusy:
LOG.error(_("Cannot delete volume %s: volume is busy"),
volume_ref['id'])
self.driver.ensure_export(context, volume_ref)
self.db.volume_update(context, volume_ref['id'],
{'status': 'available'})
return True
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context,
volume_ref['id'],
{'status': 'error_deleting'})
# If deleting the source volume in a migration, we want to skip quotas
# and other database updates.
if volume_ref['migration_status']:
return True
# Get reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deleting volume"))
# Delete glance metadata if it exists
try:
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
LOG.debug(_("volume %s: glance metadata deleted"),
volume_ref['id'])
except exception.GlanceMetadataNotFound:
LOG.debug(_("no glance metadata found for volume %s"),
volume_ref['id'])
self.db.volume_destroy(context, volume_id)
LOG.info(_("volume %s: deleted successfully"), volume_ref['id'])
self._notify_about_volume_usage(context, volume_ref, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.publish_service_capabilities(context)
return True
def create_snapshot(self, context, volume_id, snapshot_id):
"""Creates and exports the snapshot."""
caller_context = context
context = context.elevated()
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
LOG.info(_("snapshot %s: creating"), snapshot_ref['id'])
vol_ref = self.db.volume_get(context, volume_id)
volume_type_id = vol_ref.get('volume_type_id', None)
if self._is_share_volume(volume_type_id):
msg = _("Volume is share volume")
self.db.snapshot_update(context,
snapshot_ref['id'],
{'status': 'error'})
raise exception.InvalidVolume(reason=msg)
self._notify_about_snapshot_usage(
context, snapshot_ref, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
LOG.debug(_("snapshot %(snap_id)s: creating"),
{'snap_id': snapshot_ref['id']})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot_ref['context'] = caller_context
model_update = self.driver.create_snapshot(snapshot_ref)
if model_update:
self.db.snapshot_update(context, snapshot_ref['id'],
model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.snapshot_update(context,
snapshot_ref['id'],
{'status': 'error'})
self.db.snapshot_update(context,
snapshot_ref['id'], {'status': 'available',
'progress': '100%'})
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_ref['id'], volume_id)
except exception.CinderException as ex:
LOG.exception(_("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata") %
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
raise exception.MetadataCopyFailure(reason=ex)
LOG.info(_("snapshot %s: created successfully"), snapshot_ref['id'])
self._notify_about_snapshot_usage(context, snapshot_ref, "create.end")
return snapshot_id
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume = self.db.volume_get(context, volume_id)
volume_type_id = volume.get('volume_type_id', None)
if self._is_share_volume(volume_type_id):
self.db.volume_update(context,
volume_id,
{'status': 'error'})
msg = _("Volume is share volume")
raise exception.InvalidVolume(reason=msg)
self.driver.ensure_export(context.elevated(), volume)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
self.driver.copy_volume_to_image(context, volume, image_service,
image_meta)
LOG.debug(_("Uploaded volume %(volume_id)s to "
"image (%(image_id)s) successfully"),
{'volume_id': volume_id, 'image_id': image_id})
except Exception as error:
with excutils.save_and_reraise_exception():
payload['message'] = unicode(error)
finally:
if not volume['volume_attachment']:
self.db.volume_update(context, volume_id,
{'status': 'available'})
else:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume = self.db.volume_get(context, volume_id)
self.driver.validate_connector(connector)
instance_uuid = connector.get('instance_uuid', None)
self._check_attach_same_instance(context, volume, instance_uuid)
conn_info = self.driver.initialize_connection(volume, connector)
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
if conn_info['data'].get('access_mode') is None:
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
return conn_info
def _check_attach_same_instance(self, context, volume, instance_uuid):
volume_type_id = volume['volume_type_id']
if not self._is_share_volume(volume_type_id):
return
vol_attachments = self.db.volume_get_all_by_instance_uuid(context,
instance_uuid)
base_src_vol_id = self._get_source_volume_id_by_volume(context, volume)
for vol_attachment in vol_attachments:
vol = vol_attachment.volume
volume_type_id = vol['volume_type_id']
if not self._is_share_volume(volume_type_id):
continue
src_vol_id = self._get_source_volume_id_by_volume(context, vol)
if base_src_vol_id == src_vol_id:
LOG.warn(_("attach same instance with volume %s"),
vol['id'])
msg = _("share volumes has the same source volume "
"can not attach to the same instance.")
raise exception.InvalidVolume(reason=msg)
def _get_source_volume_id_by_volume(self, context, volume):
source_volume_id = volume['source_volid']
if source_volume_id:
return self._get_source_volume_id_by_volume_id(context,
source_volume_id)
return volume['id']
def _get_source_volume_id_by_volume_id(self, context, volume_id):
volume = self.db.volume_get(context, volume_id)
return self._get_source_volume_id_by_volume(context, volume)
|
{
"content_hash": "8b6279a12fcdf39d089b9545e9342e7e",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 80,
"avg_line_length": 44.31903485254692,
"alnum_prop": 0.555259814893231,
"repo_name": "nash-x/hws",
"id": "34231a8df0ee53f1251d04484d986a4c2912102e",
"size": "17307",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/volume/huawei_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "PLpgSQL",
"bytes": "12782"
},
{
"name": "Python",
"bytes": "20443623"
},
{
"name": "Shell",
"bytes": "4643"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('program', '0005_programusers_is_reg_complete'),
]
operations = [
migrations.AddField(
model_name='programusers',
name='is_pre_reg_complete',
field=models.BooleanField(default=False),
),
]
|
{
"content_hash": "fd4ebf3fd5c90683c431bfcd580e1e17",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 57,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.6127450980392157,
"repo_name": "airportmarc/the416life",
"id": "a7527916d39950ee872e7888aee6cdec272949d6",
"size": "481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apps/program/migrations/0006_programusers_is_pre_reg_complete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "18"
},
{
"name": "CSS",
"bytes": "430385"
},
{
"name": "HTML",
"bytes": "174632"
},
{
"name": "JavaScript",
"bytes": "224762"
},
{
"name": "Python",
"bytes": "477212"
},
{
"name": "Shell",
"bytes": "4240"
},
{
"name": "Vue",
"bytes": "80363"
}
],
"symlink_target": ""
}
|
"""
HTTP agent logic
~~~~~~~~~~~~~~~~
:author: Sam Gammon <sg@samgammon.com>
:copyright: (c) Sam Gammon, 2014
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
"""
# canteen core & util
from canteen.base import logic
from canteen.util import struct
from canteen.util import decorators
class Vendor(struct.BidirectionalEnum):
""" Enumerated common vendors that can be found in an HTTP client or browser's
``User-Agent`` string. """
GOOGLE = 0x0 # Chrome
MOZILLA = 0x1 # Firefox
MICROSOFT = 0x2 # IE
OPERA = 0x3 # Opera
APPLE = 0x4 # Apple
OPEN = 0x5 # Open source
OTHER = 0x6 # Anything else
class AgentInfo(object):
""" Base class structure that removes object slots in favor of an extendable,
fully-static object structure. Used for indivisual objects that retain or
specify details about an HTTP client's ``User-Agent`` string. """
__slots__ = tuple()
def dump(self):
""" Dump the local data carried by this ``AgentInfo`` object or subclass
object.
:returns: Dictionary (``dict``) of held ``key => value`` pairs. """
return dict(((k, getattr(self, k, None)) for k in self.__slots__))
def __repr__(self):
""" Generate a pleasant string representation for this unit of
``AgentInfo``.
:returns: Human-readable string representation of this object. """
return "%s(%s)" % (
self.__class__.__name__.replace('Agent', ''),
', '.join(
('='.join((
i,
str(getattr(self, i) if (
hasattr(self, i)) else None))) for i in (
self.__slots__) if not i.startswith('__'))))
class AgentVersion(AgentInfo):
""" Holds parsed version information for a software HTTP client or browser,
found while scanning the ``User-Agent`` header. """
__slots__ = (
'major', # major browser version (the `3` in 3.0)
'minor', # minor browser version (the `1` in 3.1)
'micro') # micro browser version (the `5` in 3.1.5)
def __init__(self, major, minor=None, micro=None):
""" Initialize this version info container.
:param major: Major version.
:param minor: Minor version (optional, defaults to ``None``).
:param micro: Micro version (optional, defaults to ``None``). """
self.major, self.minor, self.micro = major, minor, micro
class AgentOS(AgentInfo):
""" Holds parsed operating system information for a software HTTP client or
browser, found while scanning the ``User-Agent`` string. """
__slots__ = (
'name', # `Mac OS X`, `Windows XP`, etc
'vendor', # vendor of the OS, from above
'version') # detected version of the OS
def __init__(self, name, vendor, version):
""" Initialize this OS info container.
:param name: Name of the operating system running on the host described
by the subject ``User-Agent`` string.
:param vendor: Software vendor that produced the operating system
running on the host described by the ``User-Agent`` string.
:param version: Version information for the operating system running on
the host described by the ``User-Agent`` string. """
self.name, self.vendor, self.version = name, vendor, version
@classmethod
def scan(cls, request, user_agent, detected):
""" Scan a target ``user_agent`` string, encapsulated by an HTTP
``request``, for information about an HTTP client or browser's active
operating system.
:param request: HTTP request that carries with it the ``User-Agent``
header in question.
:param user_agent: Specifically, the ``User-Agent`` that the framework
wishes us to scan.
:param detected: Container of other information detected so-far in the
``User-Agent`` detection process.
:returns: Spawned ``AgentOS`` info container describing any operating
system information in the ``User-Agent`` in question."""
return cls(*{
'bsd': ('BSD', Vendor.OPEN, AgentVersion(0)),
'linux': ('Linux', Vendor.OPEN, AgentVersion(0)),
'macos': ('Mac OS X', Vendor.APPLE, AgentVersion(0)),
'windows': ('Windows', Vendor.MICROSOFT, AgentVersion(0)),
'ipad': ('iOS', Vendor.APPLE, AgentVersion(0)),
'iphone': ('iOS', Vendor.APPLE, AgentVersion(0))
}.get(user_agent.platform.lower().strip(), (
'unknown', Vendor.OTHER, AgentVersion(0))))
class AgentCapabilities(AgentInfo):
""" Holds parsed or detected information about a software HTTP client or
browser's extra/interesting capabilities. """
__slots__ = (
'spdy', # support for SPDY
'quic', # support for QUIC
'webp', # support for WebP
'webm', # support for WebM
'http2' # support for HTTP2
)
def __init__(self, **kwargs):
""" Initialize this capabilities container.
:param **kwargs: Accepts keywords for supported flags, to set them as
active (``True``) or inactive (``False``). Currently, the supported
capabilities flags are all ``bool`` and consist of:
- ``spdy`` - is the client browser communicating over SPDY?
- ``quic`` - is the client browser communicating over QUIC?
- ``http2`` - is the client browser communicating over HTTP2?
- ``webp`` - does the client indicate support for WebP?
- ``webm`` - does the client indicate support for WebM? """
for datapoint in self.__slots__:
setattr(self, datapoint, kwargs[datapoint] if datapoint in (
kwargs) else None)
@classmethod
def scan(cls, request, user_agent, detected):
""" Scan a target ``user_agent`` string, encapsulated by an HTTP
``request``, for information about an HTTP client or browser's
indicated or implied capabilities.
:param request: HTTP request that contains the original ``User-Agent``
header to be scanned.
:param user_agent: Specifically, the ``User-Agent`` that the framework
wishes us to scan, should it be different from the original.
:param detected: Container of other information detected so-far in the
``User-Agent`` scanning process.
:returns: Spawned ``AgentCapabilities`` object describing any detected
capabilities implied or indicated by the subject ``User-Agent``
string. """
detected = {} # detected capabilities
accept_string = request.headers['Accept'] if 'Accept' in (
request.headers) else ''
for datapoint, conditional in ((
('quic', user_agent.browser == 'chrome'),
('spdy', user_agent.browser in ('chrome', 'firefox', 'opera')),
('webm', user_agent.browser in ('chrome', 'firefox', 'opera')),
('webp', user_agent.browser == 'chrome' or 'webp' in accept_string))):
detected[datapoint] = conditional
return cls(**detected)
class AgentFingerprint(AgentInfo):
""" Holds a full picture of detected information about a software HTTP client
or browser, scanned or inferred from various request headers such as
``User-Agent`` and ``Accept``.
Encapsulates local information about:
- asset quality preferences indicated by browser
- general flags for whether a client is *modern* or *ancient*
- supported languages, character sets, mimetypes and encodings
- a client's OS (contained in an ``AgentOS`` instance)
- a client's inferred or indicated capabilities (in an
``AgentCapabilities`` instance) """
__slots__ = (
# == Basic Data == #
'accept', # full Accept request header
'string', # full User-Agent request header
'vendor', # detected Vendor of this browser (enumerated in AgentVendor)
'version', # detected version of this browser
'quality', # global relative quality preference
# == Accept Header == #
'charsets', # accepted charsets
'encodings', # accepted encodings
'languages', # accepted languages
'mimetypes', # accepted mimetypes
# == General Flags == #
'modern', # is this browser generally considered `modern`?
'ancient', # or is this browser considered `ancient`?
# == Specific Browsers == #
'chrome', # is this Chrome or Chromium?
'seamonkey', # is this Seamonkey?
'msie', # is this Internet Exporer?
'safari', # is this Safari?
'firefox', # is this Firefox?
'opera', # is this Opera?
'googlebot', # is this google's crawler?
'aol', # is this AOL's crawler?
'ask', # is this Ask's crawler?
'yahoo', # is this Yahoo's crawler?
# == Environment == #
'mobile', # can we detect that this is a mobile device?
'tablet', # can we detect that this is a tablet?
'desktop', # or do we fallback to desktop?
'crawler', # is this a known crawler?
# == Engines == #
'gecko', # old mozilla/netscape engine
'blink', # is this the `blink` fork of webkit (Chrome)?
'webkit', # is this webkit (Chrome/Safari)?
'presto', # is this an early Opera engine, pre-Blink?
'trident', # always active during MSIE requests
'spidermonkey', # is this spidermonkey (mozilla)?
# == Internals == #
'__os__', # holds an `AgentOS` object
'__supports__') # holds an `AgentCapabilities` object
def __init__(self, **kwargs):
""" Initialize a new ``AgentFingerprint`` object.
:param **kwargs: Arbitrary container of parameters to write into the
new ``AgentFingerprint`` object. Valid options are specified in the
object's ``__slots__`` attribute. """
self.__os__, self.__supports__ = None, None
for datapoint in self.__slots__:
setattr(self, datapoint, kwargs[datapoint] if datapoint in (
kwargs) else None)
@property
def os(self):
""" Property accessor for detected operating system information.
:returns: ``AgentOS`` instance describing operating system information
for a given ``AgentFingerprint`` subject. """
return self.__os__
@property
def supports(self):
""" Property accessor for inferred or indicated client capabilities.
:returns: ``AgentCapabilities`` instance describing detected/supported
capabilities and features for a given ``AgentFingerprint``
subject. """
return self.__supports__
capabilities = supports
@classmethod
def scan(cls, request, ua):
""" Scan a target HTTP ``request`` and ``User-Agent`` string for various
pieces of information, such as an OS, browser/vendor, etc. Also scan
other request-based headers that can provide hints about supported
browser features and options.
:param request: Original HTTP request providing the ``User-Agent`` to
be scanned.
:param ua: Specific ``User-Agent`` string requested for parsing by the
framework.
:returns: Spawned ``AgentFingerprint`` instance describing any and all
information available to be parsed from the ``User-Agent`` and
``Accept``-series request headers. """
detected = {
'accept': request.headers.get('accept'),
'string': request.headers.get('user-agent'),
'charsets': request.accept_charsets,
'encodings': request.accept_encodings,
'languages': request.accept_languages,
'mimetypes': request.accept_mimetypes
} # holds detected truths/guesses
if ua is None: return cls(**{})
# detect version first
version = detected['version'] = ua.version.split('.')
version_spec = []
# take each version section as major, minor, micro
for grouping in version:
if len(version_spec) >= 3:
break
try:
version_spec.append(int(grouping))
except ValueError:
break
# if we detected *anything* as an int, add it as our version
version = detected['version'] = AgentVersion(*tuple(
version_spec)) if version_spec else AgentVersion(0)
platform = ua.platform.lower().strip()
# all others
for datapoint, condition in ((
## Browser
('chrome', ua.browser == 'chrome'),
('firefox', ua.browser == 'firefox'),
('seamonkey', ua.browser == 'seamonkey'),
('safari', ua.browser == 'safari'),
('opera', ua.browser == 'opera'),
('msie', ua.browser == 'msie'),
('googlebot', ua.browser == 'google'),
('yahoo', ua.browser == 'yahoo'),
('aol', ua.browser == 'aol'),
('ask', ua.browser == 'ask'),
## Engines
('trident', ua.browser == 'msie'),
('blink', ua.browser in ('chrome', 'opera')),
('presto', ua.browser == 'opera' and version.major < 15),
('webkit', ua.browser in ('safari', 'chrome', 'opera')),
('spidermonkey', ua.browser in ('firefox', 'seamonkey')),
('gecko', 'Gecko' in ua.string and ('WebKit' not in ua.string and (
'Chrome' not in ua.string))),
## Environments
('tablet', 'Tabl' in ua.string or 'iPad' in ua.string),
('crawler', ua.browser in ('google', 'yahoo', 'aol', 'ask')),
('mobile', 'Mobi' in ua.string or 'IEMobile' in ua.string or (
platform in ('ios', 'iphone', 'ipad'))))):
detected[datapoint] = condition
# detect vendor
detected['vendor'] = Vendor.OTHER
for k, v in {
Vendor.GOOGLE: detected.get('chrome') or detected.get('googlebot'),
Vendor.MOZILLA: detected.get('firefox') or detected.get('seamonkey'),
Vendor.MICROSOFT: detected.get('msie'),
Vendor.OPERA: detected.get('opera'),
Vendor.APPLE: detected.get('safari'),
Vendor.OPEN: detected.get('seamonkey')}.iteritems():
if v: detected['vendor'] = k
# desktop mode
detected['desktop'] = not any((
detected.get('mobile'), detected.get('tablet')))
# OS detection
detected['__os__'] = AgentOS.scan(request, ua, detected)
# capabilities detection
detected['__supports__'] = AgentCapabilities.scan(request, ua, detected)
# judge modern/ancient
detected['modern'] = (detected['chrome'] or detected['safari'] or (
detected['firefox'] or detected['opera'])) and (
detected['__os__'].name in ('Mac OS X', 'Windows', 'Linux'))
# calculate quality preferences
detected['quality'] = {}
if 'mimetypes' in detected:
for mime, quality in detected['mimetypes']:
detected['quality'][mime] = quality
detected['ancient'] = not detected['modern']
return cls(**detected)
@decorators.bind('http.agent')
class UserAgent(logic.Logic):
""" Provides structured access to HTTP request headers. Interrogates values
such as ``User-Agent`` and ``Accept`` to infer or detect things such as a
client's OS, browser, and feature capabilities. """
@staticmethod
def scan(request):
""" Scan an HTTP ``request`` for information about the other end of the
connection. Detect as much information as possible from headers such as
``User-Agent`` and ``Accept``.
:param request: HTTP request to be scanned.
:returns: :py:class:`AgentFingerprint` instance containing any detected
information found in the ``User-Agent`` or ``Accept``-series request
headers. """
return AgentFingerprint.scan(request, request.user_agent)
|
{
"content_hash": "713dc70aa15338f8e88c1beb2c0b1225",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 80,
"avg_line_length": 34.588366890380314,
"alnum_prop": 0.6262208136601772,
"repo_name": "momentum/canteen",
"id": "389c6619177dac0abcad2865f4ce37c8afbf76c9",
"size": "15486",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "canteen/logic/http/agent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "55"
},
{
"name": "Makefile",
"bytes": "4701"
},
{
"name": "Python",
"bytes": "804158"
}
],
"symlink_target": ""
}
|
from django.db import models
# Create your models here.
class Pages(models.Model):
name = models.CharField(max_length=32)
page = models.TextField()
|
{
"content_hash": "8c2c9768f7e5b7d93072c9b19c88e6b3",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 42,
"avg_line_length": 22.571428571428573,
"alnum_prop": 0.7215189873417721,
"repo_name": "CursosWeb/X-Serv-15.5-Django-CMS",
"id": "938ff31af4669c10d3388fb86ba14e72181e41b5",
"size": "158",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "cms/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5727"
}
],
"symlink_target": ""
}
|
from flask import Flask, render_template
import pymongo
from game import Game
import thread
app = Flask(__name__)
mongo = pymongo.MongoClient()
db = mongo['Conway']
@app.route("/")
def hello():
return render_template("helloworld.html")
@app.route("/test")
def test():
return render_template("helloworld.html")
if __name__ == "__main__":
print("Launching web application.")
# thread.start_new_thread(app.run,())
print("Launching simulation.")
#g = Game(db)
app.run()
|
{
"content_hash": "05f009af61a905902dc7efff49eed2a0",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 45,
"avg_line_length": 20,
"alnum_prop": 0.658,
"repo_name": "nguyenml/PracticePython",
"id": "6fbed58751e530cca9dd6c93f30817df94ef1a1c",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3304"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('submissions', '0004_submission_slug'),
('submissions', '0005_auto_20191102_2010'),
]
operations = [
]
|
{
"content_hash": "62e66eaee07271b323f647033f30ef3c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 51,
"avg_line_length": 19.083333333333332,
"alnum_prop": 0.6244541484716157,
"repo_name": "patrick91/pycon",
"id": "d39b6ff6d7293d0e283612816683930aad70810b",
"size": "278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/submissions/migrations/0006_merge_20191113_0542.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1456"
},
{
"name": "Python",
"bytes": "13911"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2017 LeanIX GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
class FactSheetHasPredecessor:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
self.swaggerTypes = {
'ID': 'str',
'factSheetID': 'str',
'factSheetRefID': 'str',
'description': 'str',
'dependencyTypeID': 'str'
}
self.ID = None # str
self.factSheetID = None # str
self.factSheetRefID = None # str
self.description = None # str
self.dependencyTypeID = None # str
|
{
"content_hash": "159865256218270ac2e692b6663a7965",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 105,
"avg_line_length": 39.68888888888889,
"alnum_prop": 0.7105263157894737,
"repo_name": "leanix/leanix-sdk-python",
"id": "7bb8b9367b977085381cb9e997a06c8fc453974c",
"size": "1808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/leanix/models/FactSheetHasPredecessor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1555496"
},
{
"name": "Scala",
"bytes": "1911"
}
],
"symlink_target": ""
}
|
"""
@author: Aaron Ponti
"""
from Processor import Processor
def process(transaction):
"""Dropbox entry point.
@param transaction, the transaction object
"""
#
# Run registration
#
prefix = "S3E"
version = 2
logDir = "../core-plugins/flow/3/dss/drop-boxes/BIORADS3eDropbox/logs"
processor = Processor(transaction, prefix, version, logDir)
processor.run()
|
{
"content_hash": "15e8b19fdc783f251846e3e12410c136",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 74,
"avg_line_length": 18.40909090909091,
"alnum_prop": 0.6518518518518519,
"repo_name": "aarpon/obit_flow_core_technology",
"id": "3e0041785f67fca577746113ec516659f29e9495",
"size": "430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core-plugins/flow/3/dss/drop-boxes/BIORADS3eDropbox/BIORADS3eDropbox.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9054"
},
{
"name": "HTML",
"bytes": "17041"
},
{
"name": "JavaScript",
"bytes": "340088"
},
{
"name": "Python",
"bytes": "929959"
}
],
"symlink_target": ""
}
|
class Formatter:
def format(self, match):
formatstr = "[{0}] {1}\n{2}:{3}\n{4}\n"
return formatstr.format(match.rule.id,
match.message,
match.filename,
match.linenumber,
match.line)
class QuietFormatter:
def format(self, match):
formatstr = "[{0}] {1}:{2}"
return formatstr.format(match.rule.id, match.filename,
match.linenumber)
class ParseableFormatter:
def format(self, match):
formatstr = "{0}:{1}: [{2}] {3}"
return formatstr.format(match.filename,
match.linenumber,
match.rule.id,
match.message,
)
|
{
"content_hash": "50563ec033b021cc56dbb2d835c8f0d3",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 62,
"avg_line_length": 30.678571428571427,
"alnum_prop": 0.42491268917345754,
"repo_name": "schlueter/ansible-lint",
"id": "6c612de56f9ff7bc570b0a77d731e45a33a7b195",
"size": "859",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "lib/ansiblelint/formatters/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46773"
}
],
"symlink_target": ""
}
|
"""The 'binary' module contains all classes related to the Binary
protocol.
"""
# Changed from 'import serial' for PyCMDS
import pycmds.project.com_handler as serial
import struct
import logging
import sys
# Changed path to accurately import
from pycmds.library.zaber.exceptions import TimeoutError, UnexpectedReplyError
# See https://docs.python.org/2/howto/logging.html#configuring-logging-
# for-a-library for info on why we have these two lines here.
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class BinaryCommand(object):
"""Models a single command in Zaber's Binary protocol.
Attributes:
device_number: An integer representing the number (*a.k.a.*
address) of the device to which to send the command. A
device number of 0 indicates the command should be executed
by all devices. 0-255.
command_number: An integer representing the command to be sent
to the device. Command numbers are listed in Zaber's
`Binary Protocol Manual`_. 0-255.
data: The data value to be transmitted with the command.
message_id: The `message ID`_ of the command. 0-255, or None if
not present.
.. _Binary Protocol Manual: http://www.zaber.com/wiki/Manuals/Binary
_Protocol_Manual#Quick_Command_Reference
.. _message ID: http://www.zaber.com/wiki/Manuals/Binary_Protocol_Ma
nual#Set_Message_Id_Mode_-_Cmd_102
"""
def __init__(self, device_number, command_number, data=0, message_id=None):
"""
Args:
device_number: An integer specifying the number of the
target device to which to send this command. 0-255.
command_number: An integer specifying the command to be
sent. 0-255.
data: An optional integer containing the data value to be
sent with the command. When omitted, *data* will be set
to 0.
message_id: An optional integer specifying a message ID to
give to the message. 0-255, or None if no message ID is
to be used.
Raises:
ValueError: An invalid value was passed.
"""
if device_number < 0 or command_number < 0:
raise ValueError("Device and command number must be between 0 " "and 255.")
self.device_number = device_number
self.command_number = command_number
self.data = data
if message_id is not None and (message_id < 0 or message_id > 255):
raise ValueError("Message ID must be between 0 and 255.")
self.message_id = message_id
def encode(self):
"""Encodes a 6-byte byte string to be transmitted to a device.
Returns:
A byte string of length 6, formatted according to Zaber's
`Binary Protocol Manual`_.
"""
packed = struct.pack("<2Bl", self.device_number, self.command_number, self.data)
if self.message_id is not None:
packed = packed[:5] + struct.pack("B", self.message_id)
return packed
def __str__(self):
return "[{:d}, {:d}, {:d}]".format(self.device_number, self.command_number, self.data)
class BinaryDevice(object):
"""A class to represent a Zaber device in the Binary protocol.
Attributes:
port: A BinarySerial object which represents the port to which
this device is connected.
number: The integer number of this device. 1-255.
"""
def __init__(self, port, number):
"""
Args:
port: A BinarySerial object to use as a parent port.
number: An integer between 1 and 255 which is the number of
this device.
Raises:
ValueError: The device number was invalid.
"""
if number > 255 or number < 1:
raise ValueError("Device number must be 1-255.")
self.number = number
self.port = port
def send(self, *args):
"""Sends a command to this device, then waits for a response.
Args:
*args: Either a single BinaryCommand, or 1-3 integers
specifying, in order, the command number, data value,
and message ID of the command to be sent.
Notes:
The ability to pass integers to this function is provided
as a convenience to the programmer. Calling
``device.send(2)`` is equivalent to calling
``device.send(BinaryCommand(device.number, 2))``.
Note that in the Binary protocol, devices will only reply
once they have completed a command. Since this function
waits for a reply from the device, this function may block
for a long time while it waits for a response. For the same
reason, it is important to set the timeout of this device's
parent port to a value sufficiently high that any command
sent will be completed within the timeout.
Regardless of the device address specified to this function,
the device number of the transmitted command will be
overwritten with the number of this device.
If the command has a message ID set, this function will return
a reply with a message ID. It does not check whether the message
IDs match.
Raises:
UnexpectedReplyError: The reply read was not send by this
device.
Returns: A BinaryReply containing the reply received.
"""
if len(args) == 1 and isinstance(args[0], BinaryCommand):
command = args[0]
elif len(args) < 4:
command = BinaryCommand(self.number, *args)
command.device_number = self.number
# self.port._ser.external_lock_control = True
self.port._ser.lock()
self.port.write(command)
reply = self.port.read(command.message_id is not None)
self.port._ser.unlock()
# self.port._ser.external_lock_control = False
if reply.device_number != self.number:
raise UnexpectedReplyError(
"Received an unexpected reply from "
"device number {0:d}".format(reply.device_number),
reply,
)
return reply
def home(self):
"""Sends the "home" command (1), then waits for the device to
reply.
Returns: A BinaryReply containing the reply received.
"""
return self.send(1)
def move_abs(self, position):
"""Sends the "move absolute" command (20), then waits for the
device to reply.
Args:
position: The position in microsteps to which to move.
Returns: A BinaryReply containing the reply received.
"""
return self.send(20, position)
def move_rel(self, distance):
"""Sends the "move relative" command (21), then waits for the
device to reply.
Args:
distance: The distance in microsteps to which to move.
Returns: A BinaryReply containing the reply received.
"""
return self.send(21, distance)
def move_vel(self, speed):
"""Sends the "move at constant speed" command (22), then waits
for the device to reply.
Args:
speed: An integer representing the speed at which to move.
Notes:
Unlike the other "move" commands, the device replies
immediately to this command. This means that when this
function returns, it is likely that the device is still
moving.
Returns: A BinaryReply containing the reply received.
"""
return self.send(22, speed)
def stop(self):
"""Sends the "stop" command (23), then waits for the device to
reply.
Returns: A BinaryReply containing the reply received.
"""
return self.send(23)
def get_status(self):
"""Sends the "Return Status" command (54), and returns the
result.
Returns:
An integer representing a `status code`_, according to
Zaber's Binary Protocol Manual.
.. _status code: http://www.zaber.com/wiki/Manuals/Binary_Protoc
ol_Manual#Return_Status_-_Cmd_54
"""
return self.send(54).data
### Methods not included in Zaber provided Code
def get_position(self):
"""Sends the "Return Current Position" command (60), and returns the
result.
Returns:
An integer representing the current absolute postion in microsteps
.. see: http://www.zaber.com/wiki/Manuals/Binary_Protoc
ol_Manual#Return_Current_Position_-_Cmd_60
"""
return self.send(60).data
class BinaryReply(object):
"""Models a single reply in Zaber's Binary protocol.
Attributes:
device_number: The number of the device from which this reply
was sent.
command_number: The number of the command which triggered this
reply.
data: The data value associated with the reply.
message_id: The message ID number, if present, otherwise None.
"""
def __init__(self, reply, message_id=False):
"""
Args:
reply: A byte string of length 6 containing a binary reply
encoded according to Zaber's Binary Protocol Manual.
message_id: True if a message ID should be extracted from
the reply, False if not.
Notes:
Because a Binary reply's message ID truncates the last byte
of the data value of the reply, it is impossible to tell
whether a reply contains a message ID or not. Therefore, the
user must specify whether or not a message ID should be
assumed to be present.
Raises:
TypeError: An invalid type was passed as *reply*. This may
indicate that a unicode string was passed instead of a
binary (ascii) string.
"""
if isinstance(reply, bytes):
self.device_number, self.command_number, self.data = struct.unpack("<2Bl", reply)
if message_id:
# Use bitmasks to extract the message ID.
self.message_id = (self.data & 0xFF000000) >> 24
self.data = self.data & 0x00FFFFFF
# Sign extend 24 to 32 bits in the message ID case.
# If the data is more than 24 bits it will still be wrong,
# but now negative smaller values will be right.
if 0 != (self.data & 0x00800000):
self.data = (int)((self.data | 0xFF000000) - (1 << 32))
else:
self.message_id = None
elif isinstance(reply, list):
# Assume a 4th element is a message ID.
if len(reply) > 3:
message_id = True
self.device_number = reply[0]
self.command_number = reply[1]
self.data = reply[2]
self.message_id = reply[3] if message_id else None
else:
raise TypeError(
"BinaryReply must be passed a byte string " "('bytes' type) or a list."
)
def encode(self):
"""Returns the reply as a binary string, in the form in which it
would appear if it had been read from the serial port.
Returns:
A byte string of length 6 formatted according to the Binary
Protocol Manual.
"""
return struct.pack("<2Bl", self.device_number, self.command_number, self.data)
def __str__(self):
return "[{:d}, {:d}, {:d}]".format(self.device_number, self.command_number, self.data)
class BinarySerial(object):
"""A class for interacting with Zaber devices using the Binary protocol.
This class defines a few simple methods for writing to and reading
from a device connected over the serial port.
"""
def __init__(self, port, baud=9600, timeout=5, inter_char_timeout=0.1):
"""Creates a new instance of the BinarySerial class.
Args:
port: A string containing the name of the serial port to
which to connect.
baud: An integer representing the baud rate at which to
communicate over the serial port.
timeout: A number representing the number of seconds to wait
for a reply. Fractional numbers are accepted and can be
used to specify times shorter than a second.
inter_char_timeout : A number representing the number of seconds
to wait between bytes in a reply. If your computer is bad at
reading incoming serial data in a timely fashion, try
increasing this value.
Notes:
This class will open the port immediately upon
instantiation. This follows the pattern set by PySerial,
which this class uses internally to perform serial
communication.
Raises:
TypeError: The port argument passed was not a string.
"""
if not isinstance(port, str):
raise TypeError("port must be a string.")
try:
self._ser = serial.serial_for_url(port, do_not_open=True)
self._ser.baudrate = baud
self._ser.timeout = timeout
self._ser.interCharTimeout = inter_char_timeout
self._ser.open()
except AttributeError:
# serial_for_url not supported; use fallback
self._ser = serial.Serial(
port, baud, timeout=timeout * 1000, interCharTimeout=inter_char_timeout
)
self._ser.external_lock_control = True
def write(self, *args):
r"""Writes a command to the port.
This function accepts either a BinaryCommand object, a set
of integer arguments, a list of integers, or a string.
If passed integer arguments or a list of integers, those
integers must be in the same order as would be passed to the
BinaryCommand constructor (ie. device number, then command
number, then data, and then an optional message ID).
Args:
*args: A BinaryCommand to be sent, or between 2 and 4
integer arguements, or a list containing between 2 and
4 integers, or a string representing a
properly-formatted Binary command.
Notes:
Passing integers or a list of integers is equivalent to
passing a BinaryCommand with those integers as constructor
arguments.
For example, all of the following are equivalent::
>>> write(BinaryCommand(1, 55, 1000))
>>> write(1, 55, 1000)
>>> write([1, 55, 1000])
>>> write(struct.pack("<2Bl", 1, 55, 1000))
>>> write('\x01\x37\xe8\x03\x00\x00')
Raises:
TypeError: The arguments passed to write() did not conform
to the specification of ``*args`` above.
ValueError: A string of length other than 6 was passed.
"""
if len(args) == 1:
message = args[0]
if isinstance(message, list):
message = BinaryCommand(*message)
elif 1 < len(args) < 5:
message = BinaryCommand(*args)
else:
raise TypeError(
"write() takes at least 1 and no more than 4 "
"arguments ({0:d} given)".format(len(args))
)
if isinstance(message, str):
logger.debug("> %s", message)
if len(message) != 6:
raise ValueError("write of a string expects length 6.")
# pyserial doesn't handle hex strings.
if sys.version_info > (3, 0):
data = bytes(message, "UTF-8")
else:
data = bytes(message)
elif isinstance(message, BinaryCommand):
data = message.encode()
logger.debug("> %s", message)
else:
raise TypeError(
"write must be passed several integers, or a " "string, list, or BinaryCommand."
)
self._ser.write(data)
def read(self, message_id=False):
"""Reads six bytes from the port and returns a BinaryReply.
Args:
message_id: True if the response is expected to have a
message ID. Defaults to False.
Returns:
A BinaryCommand containing all of the information read from
the serial port.
Raises:
zaber.serial.TimeoutError: No data was read before the
specified timeout elapsed.
"""
reply = self._ser.read(6)
if len(reply) != 6:
logger.debug("< Receive timeout!")
raise TimeoutError("read timed out.")
parsed_reply = BinaryReply(reply, message_id)
logger.debug("< %s", parsed_reply)
return parsed_reply
def flush(self):
"""Flushes the buffers of the underlying serial port."""
self._ser.flush()
def open(self):
"""Opens the serial port."""
self._ser.open()
def close(self):
"""Closes the serial port."""
self._ser.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._ser.close()
@property
def timeout(self):
"""The number of seconds to wait for input while reading.
The ``timeout`` property accepts floating point numbers for
fractional wait times.
"""
return self._ser.timeout
@timeout.setter
def timeout(self, t):
self._ser.timeout = t
@property
def baudrate(self):
"""The baud rate at which to read and write.
The default baud rate for the Binary protocol is 9600. T-Series
devices are only capable of communication at 9600 baud.
A-Series devices can communicate at 115200, 57600, 38400,
19200, and 9600 baud.
Note that this changes the baud rate of the computer on which
this code is running. It does not change the baud rate of
connected devices.
"""
return self._ser.baudrate
@baudrate.setter
def baudrate(self, b):
if b not in (115200, 57600, 38400, 19200, 9600):
raise ValueError(
"Invalid baud rate: {:d}. Valid baud rates are "
"115200, 57600, 38400, 19200, and 9600.".format(b)
)
self._ser.baudrate = b
### Additional Methods not in Zaber library
def setMode(self, mode, device=0):
if isinstance(mode, str):
if mode == "computer":
mode = 0x482E
elif mode == "manual":
mode = 0x0827
else:
raise ValueError(
"Unkown mode string: {:s}. Valid strings are "
"'computer', 'manual'. Or input an integer".format(mode)
)
self.write(device, 40, mode)
|
{
"content_hash": "08ea8ee0a6ecb723bc9e2a6b96fbe65d",
"timestamp": "",
"source": "github",
"line_count": 527,
"max_line_length": 96,
"avg_line_length": 36.56736242884251,
"alnum_prop": 0.5879819417778008,
"repo_name": "wright-group/PyCMDS",
"id": "f5a96082da2f846dc3130cef91d157be76da095d",
"size": "19271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycmds/library/zaber/binary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "212"
},
{
"name": "C",
"bytes": "15060"
},
{
"name": "C++",
"bytes": "43260"
},
{
"name": "Python",
"bytes": "516847"
}
],
"symlink_target": ""
}
|
"""
Print all signature of a python module in alphabet order.
Usage:
./print_signature "paddle.fluid" > signature.txt
"""
from __future__ import print_function
import importlib
import inspect
import collections
import sys
import pydoc
import hashlib
member_dict = collections.OrderedDict()
experimental_namespace = {"paddle.fluid.LoDTensorset"}
def md5(doc):
hash = hashlib.md5()
hash.update(str(doc).encode('utf-8'))
return hash.hexdigest()
def queue_dict(member, cur_name):
try:
doc = ('document', md5(member.__doc__))
if inspect.isclass(member):
args = member.__module__ + "." + member.__name__
else:
args = inspect.getargspec(member)
all = (args, doc)
member_dict[cur_name] = all
except TypeError: # special for PyBind method
if cur_name in check_modules_list:
return
member_dict[cur_name] = " ".join([
line.strip() for line in pydoc.render_doc(member).split('\n')
if "->" in line
])
def visit_member(parent_name, member):
if parent_name + member.__name__ in experimental_namespace:
return
cur_name = ".".join([parent_name, member.__name__])
if inspect.isclass(member):
queue_dict(member, cur_name)
for name, value in inspect.getmembers(member):
if hasattr(value, '__name__') and (not name.startswith("_") or
name == "__init__"):
visit_member(cur_name, value)
elif callable(member):
queue_dict(member, cur_name)
elif inspect.isgetsetdescriptor(member):
return
else:
raise RuntimeError("Unsupported generate signature of member, type {0}".
format(str(type(member))))
def visit_all_module(mod):
if (mod.__name__ in experimental_namespace):
return
for member_name in (
name
for name in (mod.__all__ if hasattr(mod, "__all__") else dir(mod))
if not name.startswith("_")):
instance = getattr(mod, member_name, None)
if instance is None:
continue
if inspect.ismodule(instance):
visit_all_module(instance)
else:
visit_member(mod.__name__, instance)
check_modules_list = ["paddle.reader.ComposeNotAligned.__init__"]
modules = sys.argv[1].split(",")
for m in modules:
visit_all_module(importlib.import_module(m))
for name in member_dict:
print(name, member_dict[name])
|
{
"content_hash": "ea6cdb1ad1229bef20c501f57c48d4bb",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 80,
"avg_line_length": 29.441860465116278,
"alnum_prop": 0.5916271721958926,
"repo_name": "tensor-tang/Paddle",
"id": "486c88dd074e1859e39a664337ab0601c07a5cc5",
"size": "3142",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tools/print_signatures.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32490"
},
{
"name": "C++",
"bytes": "10161819"
},
{
"name": "CMake",
"bytes": "290828"
},
{
"name": "Cuda",
"bytes": "1183095"
},
{
"name": "Dockerfile",
"bytes": "10002"
},
{
"name": "Python",
"bytes": "7082088"
},
{
"name": "Ruby",
"bytes": "353"
},
{
"name": "Shell",
"bytes": "200906"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="name", parent_name="layout.yaxis.rangebreak", **kwargs
):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "69700b243ec34161f60bf5083b9b565a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 32.15384615384615,
"alnum_prop": 0.6028708133971292,
"repo_name": "plotly/plotly.py",
"id": "347ac646ad36b610bedc79f581fc1d676bde0cc5",
"size": "418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/yaxis/rangebreak/_name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""
A collection of utility functions.
Often these are not the most efficient implementations, especially when
dealing with large files, but for most files morphforge has to deal with,
they work fine and make code more readable.
"""
import os.path as fs
import hashlib
import re
import random
import os
import fnmatch
from morphforge.core.mgrs import LocMgr
def find_files_recursively(directory, pattern):
""" Recursive 'glob' for files.
This function walks over a directory looking for filenames matching a
certain pattern"""
for (root, _dirs, files) in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
class StrUtils(object):
""" A collection of string utility functions"""
@classmethod
def strip_comments_and_blank_lines(cls, text, cmt_sym='#'):
""" Removes comments and blank lines from block of text
"""
new = []
for line in text.split('\n'):
idx = line.find(cmt_sym)
if idx != -1:
line = line[:idx]
if line.strip():
new.append(line.strip())
return '\n'.join(new)
@classmethod
def get_hash_md5(cls, s):
""" Returns the md5 digest hash of a string"""
m = hashlib.new('md5')
m.update(s)
return m.hexdigest()
class FileIO(object):
""" A collection of file utility functions"""
@classmethod
def append_to_file(cls, txt, filename, file_expected=True):
""" Appends text to an existing file.
By default the file is expected to already exist, otherwise an IOError
exception be thrown. This can be overridden with the file_expected
parameter. Returns `filename`"""
if file_expected and not fs.exists(filename):
raise IOError("Can't append to non-existant file: %s"
% filename)
with open(filename, 'a') as f:
f.write(txt)
return filename
@classmethod
def write_to_file(
cls,
txt,
filename=None,
filedirectory=None,
suffix=None,
):
""" Writes text to a file
This function will overwrite an existing file. If no filename is given,
a filename will be invented, using LocMgr.get_temporary_filename().
The name of the file written to will be returned.
"""
if not filename:
filename = LocMgr.get_temporary_filename(suffix=suffix,
filedirectory=filedirectory)
#print filename
with open(filename, 'w') as f:
f.write(txt)
return filename
@classmethod
def read_from_file(cls, filename):
""" Reads text from a file"""
with open(filename) as f:
return f.read()
# txt = f.read()
# return txt
@classmethod
def get_hash_md5(cls, filename):
""" Returns the md5 checksum of a file.
This function should not be used for large files, since it loads
the entire file into memory.
"""
return StrUtils.get_hash_md5(FileIO.read_from_file(filename))
class SeqUtils(object):
""" A collection of utility functions for working with sequences"""
@classmethod
def flatten(cls, seq, flatten_types=(tuple, list)):
""" 'Flattens' a sequence recursively.
The objects types to flatten are specified by the flatten_types
parameter, which must by a tuple of classes. By default it flattens
lists and tuples.
"""
res = []
for item in seq:
if isinstance(item, flatten_types):
new_items = SeqUtils.flatten(item, flatten_types=flatten_types)
res.extend(new_items)
else:
res.append(item)
return res
@classmethod
def expect_single(cls, l):
""" Expects a sequence containing a single object and returns it.
If 0 or more than 1 objects are found, it raises an error.
"""
if len(l) != 1:
if len(l) == 0:
print 'ExpectSingle has none:', l
else:
print 'ExpectSingle has multiple:', l
raise ValueError('')
return l[0]
@classmethod
def filter_expect_single(cls, seq, filter_func):
""" Filters a sequence according to the predicate filter_func, then
expects a single item to remain, which it returns. If 0 or more than
1 objects are found, it raises an error.
"""
filtered_seq = [obj for obj in seq if filter_func(obj)]
if len(filtered_seq) == 0:
print seq
raise ValueError('Unable to find any occurances')
if len(filtered_seq) > 1:
raise ValueError('Found too many occurances')
return filtered_seq[0]
@classmethod
def filter_with_prob(cls, lst, p):
""" Returns a copy of the sequence, in which each item in the original
has a fixed probability of being in the new sequence.
"""
return [obj for obj in lst if random.random() < p]
@classmethod
def max_with_unique_check(cls, collection, key):
""" Return the maximum from a sequence, based on a key, but verify
that there is a unique maximum.
This is designed to be used when the
key generates integers."""
assert len(collection)
if len(collection) == 1:
return collection[0]
sorted_seq = sorted(collection, key=key)
assert key(sorted_seq[-1]) != key(sorted_seq[-2])
return sorted_seq[-1]
def is_iterable(f):
""" Returns True if an object can be iterated over by using iter(obj)
"""
try:
iter(f)
return True
except TypeError:
return False
def merge_dictionaries(dictionaries):
""" Merge a sequence of dictionaries safely.
This function merges dictionaries together, but ensures that there are
not same keys which point to different values. That is,
merge_dictionaries({'alpha':True}, {'alpha':True}) is OK, but
merge_dictionaries({'alpha':True}, {'alpha':False}) will raise an
exception
"""
res = {}
for dct in dictionaries:
for (k, v) in dct.iteritems():
if k in res:
assert res[k] == v
res[k] = v
return res
def check_cstyle_varname(name):
""" Check a string conforms to a C-style variable name.
"""
if not isinstance(name, basestring):
print name, name.__class__
raise ValueError('Invalid Name - Not String!')
valid_regex = '^[a-zA-Z][_a-zA-Z0-9]*$'
match_obj = re.match(valid_regex, name)
if not match_obj:
raise ValueError('Invalid Name: _%s_' % name)
return name
# Deprecated:
# =========================
def is_float(f):
"""Deprecated"""
# We are getting rid of the only function calling this './morphforge/src/morphforge/core/quantities/fromcore.py'
try:
float(f)
return True
except ValueError:
return False
def is_int(f):
"""Deprecated"""
# We are getting rid of the only function calling this './morphforge/src/morphforge/core/quantities/fromcore.py'
try:
int(f)
return True
except ValueError:
return False
import time
import traceback
import sys
# Lets not buffer any output:
class flushfile(file):
def __init__(self, f):
self.f = f
def write(self, x):
self.f.write(x)
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stderr = flushfile(sys.stderr)
class benchmark(object):
def __init__(self, name):
self.name = name
def __enter__(self):
self.start = time.time()
def __exit__(self, ty, val, tb):
end = time.time()
print '%s : %0.3f seconds' % (self.name, end - self.start)
return False
class TracePrints(object):
def __init__(self):
self.stdout = sys.stdout
def write(self, s):
self.stdout.write('Writing %r\n' % s)
traceback.print_stack(file=self.stdout)
# sys.stdout = TracePrints()
sweep_size = None
class ParameterSweepValues(object):
def __init__(self, values):
self.values = values
def _short_iter(self):
if sweep_size == 1:
yield self.values[0]
elif sweep_size == 2:
yield self.values[0]
yield self.values[-1]
else:
assert False
def __iter__(self):
if sweep_size is None:
return iter(self.values)
else:
return iter(self._short_iter())
def __getitem__(self, index):
assert sweep_size==None
return self.values[index]
def __len__(self):
if sweep_size is None:
return len(self.values)
else:
return sweep_size
|
{
"content_hash": "fc0ae41ad6388b5445f7c52f8f2ebd07",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 116,
"avg_line_length": 25.655270655270655,
"alnum_prop": 0.5878956135480289,
"repo_name": "mikehulluk/morphforge",
"id": "323be245892e5527ee5fe941574a89b6e1e39a37",
"size": "10544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/morphforge/core/misc.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "AMPL",
"bytes": "4818"
},
{
"name": "C",
"bytes": "1499"
},
{
"name": "Makefile",
"bytes": "4436"
},
{
"name": "Python",
"bytes": "1557833"
},
{
"name": "Shell",
"bytes": "14"
},
{
"name": "XSLT",
"bytes": "94266"
}
],
"symlink_target": ""
}
|
import abc
import six
from mongoengine import NotUniqueError
from st2common import log as logging
from st2common.exceptions.db import StackStormDBObjectConflictError
from st2common.models.system.common import ResourceReference
from st2common.transport.reactor import TriggerDispatcher
__all__ = [
'Access',
'ContentPackResource',
'StatusBasedResource'
]
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Access(object):
impl = None
publisher = None
dispatcher = None
# ModelAPI class for this resource
api_model_cls = None
# A list of operations for which we should dispatch a trigger
dispatch_trigger_for_operations = []
# Maps model operation name (e.g. create, update, delete) to the trigger reference which is
# used when dispatching a trigger
operation_to_trigger_ref_map = {}
@classmethod
@abc.abstractmethod
def _get_impl(cls):
pass
@classmethod
@abc.abstractmethod
def _get_publisher(cls):
return None
@classmethod
def _get_dispatcher(cls):
"""
Return a dispatcher class which is used for dispatching triggers.
"""
if not cls.dispatcher:
cls.dispatcher = TriggerDispatcher(LOG)
return cls.dispatcher
@classmethod
@abc.abstractmethod
def _get_by_object(cls, object):
return None
@classmethod
def get_by_name(cls, value):
return cls._get_impl().get_by_name(value)
@classmethod
def get_by_id(cls, value):
return cls._get_impl().get_by_id(value)
@classmethod
def get_by_ref(cls, value):
return cls._get_impl().get_by_ref(value)
@classmethod
def get(cls, *args, **kwargs):
return cls._get_impl().get(*args, **kwargs)
@classmethod
def get_all(cls, *args, **kwargs):
return cls._get_impl().get_all(*args, **kwargs)
@classmethod
def count(cls, *args, **kwargs):
return cls._get_impl().count(*args, **kwargs)
@classmethod
def query(cls, *args, **kwargs):
return cls._get_impl().query(*args, **kwargs)
@classmethod
def distinct(cls, *args, **kwargs):
return cls._get_impl().distinct(*args, **kwargs)
@classmethod
def aggregate(cls, *args, **kwargs):
return cls._get_impl().aggregate(*args, **kwargs)
@classmethod
def add_or_update(cls, model_object, publish=True, dispatch_trigger=True):
pre_persist_id = model_object.id
try:
model_object = cls._get_impl().add_or_update(model_object)
except NotUniqueError as e:
LOG.exception('Conflict while trying to save in DB.')
# On a conflict determine the conflicting object and return its id in
# the raised exception.
conflict_object = cls._get_by_object(model_object)
conflict_id = str(conflict_object.id) if conflict_object else None
raise StackStormDBObjectConflictError(str(e), conflict_id)
is_update = str(pre_persist_id) == str(model_object.id)
# Publish internal event on the message bus
if publish:
try:
if is_update:
cls.publish_update(model_object)
else:
cls.publish_create(model_object)
except:
LOG.exception('Publish failed.')
# Dispatch trigger
if dispatch_trigger:
try:
if is_update:
cls.dispatch_update_trigger(model_object)
else:
cls.dispatch_create_trigger(model_object)
except:
LOG.exception('Trigger dispatch failed.')
return model_object
@classmethod
def delete(cls, model_object, publish=True, dispatch_trigger=True):
persisted_object = cls._get_impl().delete(model_object)
# Publish internal event on the message bus
if publish:
try:
cls.publish_delete(model_object)
except Exception:
LOG.exception('Publish failed.')
# Dispatch trigger
if dispatch_trigger:
try:
cls.dispatch_delete_trigger(model_object)
except Exception:
LOG.exception('Trigger dispatch failed.')
return persisted_object
####################################################
# Internal event bus message publish related methods
####################################################
@classmethod
def publish_create(cls, model_object):
publisher = cls._get_publisher()
if publisher:
publisher.publish_create(model_object)
@classmethod
def publish_update(cls, model_object):
publisher = cls._get_publisher()
if publisher:
publisher.publish_update(model_object)
@classmethod
def publish_delete(cls, model_object):
publisher = cls._get_publisher()
if publisher:
publisher.publish_delete(model_object)
############################################
# Internal trigger dispatch related methods
###########################################
@classmethod
def dispatch_create_trigger(cls, model_object):
"""
Dispatch a resource-specific trigger which indicates a new resource has been created.
"""
return cls._dispatch_operation_trigger(operation='create', model_object=model_object)
@classmethod
def dispatch_update_trigger(cls, model_object):
"""
Dispatch a resource-specific trigger which indicates an existing resource has been updated.
"""
return cls._dispatch_operation_trigger(operation='update', model_object=model_object)
@classmethod
def dispatch_delete_trigger(cls, model_object):
"""
Dispatch a resource-specific trigger which indicates an existing resource has been
deleted.
"""
return cls._dispatch_operation_trigger(operation='delete', model_object=model_object)
@classmethod
def _get_trigger_ref_for_operation(cls, operation):
trigger_ref = cls.operation_to_trigger_ref_map.get(operation, None)
if not trigger_ref:
raise ValueError('Trigger ref not specified for operation: %s' % (operation))
return trigger_ref
@classmethod
def _dispatch_operation_trigger(cls, operation, model_object):
if operation not in cls.dispatch_trigger_for_operations:
return
trigger = cls._get_trigger_ref_for_operation(operation=operation)
object_payload = cls.api_model_cls.from_model(model_object, mask_secrets=True).__json__()
payload = {
'object': object_payload
}
return cls._dispatch_trigger(operation=operation, trigger=trigger, payload=payload)
@classmethod
def _dispatch_trigger(cls, operation, trigger, payload):
if operation not in cls.dispatch_trigger_for_operations:
return
dispatcher = cls._get_dispatcher()
return dispatcher.dispatch(trigger=trigger, payload=payload)
class ContentPackResource(Access):
@classmethod
def get_by_ref(cls, ref):
if not ref:
return None
ref_obj = ResourceReference.from_string_reference(ref=ref)
result = cls.query(name=ref_obj.name,
pack=ref_obj.pack).first()
return result
@classmethod
def _get_by_object(cls, object):
# For an object with a resourcepack pack.name is unique.
name = getattr(object, 'name', '')
pack = getattr(object, 'pack', '')
return cls.get_by_ref(ResourceReference.to_string_reference(pack=pack, name=name))
class StatusBasedResource(Access):
"""Persistence layer for models that needs to publish status to the message queue."""
@classmethod
def publish_status(cls, model_object):
"""Publish the object status to the message queue.
Publish the instance of the model as payload with the status
as routing key to the message queue via the StatePublisher.
:param model_object: An instance of the model.
:type model_object: ``object``
"""
publisher = cls._get_publisher()
if publisher:
publisher.publish_state(model_object, getattr(model_object, 'status', None))
|
{
"content_hash": "d90a03d6c9c625a0e2db258066e5d783",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 99,
"avg_line_length": 31.2,
"alnum_prop": 0.6127730294396961,
"repo_name": "grengojbo/st2",
"id": "a9cb12a27aeac845afebe299fbbb35f476f0d49b",
"size": "9204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "st2common/st2common/persistence/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "21186"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "2091976"
},
{
"name": "Shell",
"bytes": "7518"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
}
|
from mc.utils import context_value_setter
from .base_task_handler import BaseTaskHandler
class WireTaskHandler(BaseTaskHandler):
"""Performs wirings.
Dispatches to :meth:`mc.utils.context_value_setter.set_context_value`
task_params:
wirings: a list of value_specs,
per :meth:`mc.utils.context_value_setter.set_context_value`
"""
def validate_task_params(self):
assert self.task['task_params']['wirings']
def initial_tick(self):
for wiring in self.task['task_params'].get('wirings', []):
self.process_wiring(wiring=wiring)
self.task['status'] = 'COMPLETED'
def process_wiring(self, wiring=None):
context_value_setter.set_context_value(value_spec=wiring,
context=self.task_ctx)
TaskHandler = WireTaskHandler
|
{
"content_hash": "8f60435e973b53e7a92db701ca82d1ea",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 73,
"avg_line_length": 30.357142857142858,
"alnum_prop": 0.6458823529411765,
"repo_name": "aspuru-guzik-group/mission_control",
"id": "7822c65a70ade58cadc1e570270b4f583d111a09",
"size": "850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mc/task_handlers/wire_task_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "397242"
}
],
"symlink_target": ""
}
|
from datetime import date
from django.db import models
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.management import call_command
from django.dispatch import receiver
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.views.decorators.vary import vary_on_headers
from wagtail.wagtailcore.models import Page, Orderable
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailadmin.edit_handlers import FieldPanel, MultiFieldPanel, \
InlinePanel, PageChooserPanel
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailimages.models import Image
from wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel
from wagtail.wagtailsnippets.models import register_snippet
from wagtail.wagtailforms.models import AbstractEmailForm, AbstractFormField
# from wagtail.wagtailsearch import indexed
from modelcluster.fields import ParentalKey
from modelcluster.tags import ClusterTaggableManager
from taggit.models import Tag, TaggedItemBase
from south.signals import post_migrate
from flowpatrol.settings import MANDRILL_API_KEY
import mandrill
COMMON_PANELS = (
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('show_in_menus'),
FieldPanel('search_description'),
)
# A couple of abstract classes that contain commonly used fields
class LinkFields(models.Model):
link_external = models.URLField("External link", blank=True)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
related_name='+'
)
link_document = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
related_name='+'
)
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_document:
return self.link_document.url
else:
return self.link_external
panels = [
FieldPanel('link_external'),
PageChooserPanel('link_page'),
DocumentChooserPanel('link_document'),
]
class Meta:
abstract = True
# Carousel items
class CarouselItem(LinkFields):
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
embed_url = models.URLField("Embed URL", blank=True)
image_title = models.CharField(max_length=255, blank=True)
image_caption = models.CharField(max_length=255, blank=True)
panels = [
ImageChooserPanel('image'),
FieldPanel('embed_url'),
FieldPanel('image_title'),
FieldPanel('image_caption'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Related links
class RelatedLink(LinkFields):
title = models.CharField(max_length=255, help_text="Link title")
panels = [
FieldPanel('title'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Home Page
class HomePageCarouselItem(Orderable, CarouselItem):
page = ParentalKey('flowpatrolcore.HomePage', related_name='carousel_items')
class HomePageRelatedLink(Orderable, RelatedLink):
page = ParentalKey('flowpatrolcore.HomePage', related_name='related_links')
class HomePage(Page):
body = RichTextField(blank=True)
headline = models.CharField(max_length=255, blank=True)
features = RichTextField(blank=True)
indexed_fields = ('body', )
search_name = "Homepage"
class Meta:
verbose_name = "Homepage"
def serve(self, request):
if request.method == 'POST':
name = request.POST.get('name')
email = request.POST.get('email')
phone = request.POST.get('phone')
message = request.POST.get('message')
message_text = 'Thanks '+name+'. You\' soon get an email from us.'
print request.POST
try:
mandrill_client = mandrill.Mandrill(MANDRILL_API_KEY)
message = {
'from_email': 'chrxr@outlook.com',
'from_name': 'Chris Rogers',
'text': message_text,
'subject': 'Thanks for your enquiry',
'to': [{'email': email,
'name': 'Recipient Name',
'type': 'to'}],
}
result = mandrill_client.messages.send(message=message, async=False,)
except mandrill.Error, e:
# Mandrill errors are thrown as exceptions
print 'A mandrill error occurred: %s - %s' % (e.__class__, e)
# A mandrill error occurred: <class 'mandrill.UnknownSubaccountError'> - No subaccount exists with the id 'customer-123'
raise
return redirect(self.url)
return super(HomePage, self).serve(request)
HomePage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('headline'),
FieldPanel('features'),
FieldPanel('body', classname="full"),
InlinePanel(HomePage, 'carousel_items', label="Carousel items"),
InlinePanel(HomePage, 'related_links', label="Related links"),
]
HomePage.promote_panels = [
MultiFieldPanel(COMMON_PANELS, "Common page configuration"),
]
# Standard page
class StandardPageCarouselItem(Orderable, CarouselItem):
page = ParentalKey('flowpatrolcore.StandardPage', related_name='carousel_items')
class StandardPageRelatedLink(Orderable, RelatedLink):
page = ParentalKey('flowpatrolcore.StandardPage', related_name='related_links')
class StandardPage(Page):
intro = RichTextField(blank=True)
body = RichTextField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
indexed_fields = ('intro', 'body', )
search_name = None
StandardPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
InlinePanel(StandardPage, 'carousel_items', label="Carousel items"),
FieldPanel('body', classname="full"),
InlinePanel(StandardPage, 'related_links', label="Related links"),
]
StandardPage.promote_panels = [
MultiFieldPanel(COMMON_PANELS, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
|
{
"content_hash": "c758a6d8aa2a1575b058baac42e8d77b",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 140,
"avg_line_length": 29.624434389140273,
"alnum_prop": 0.6567893691767221,
"repo_name": "chrxr/flowpatrol_old",
"id": "42051725b8383ba75d597d3d26632d266f358b2c",
"size": "6547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flowpatrolcore/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "142232"
},
{
"name": "JavaScript",
"bytes": "3945"
},
{
"name": "Python",
"bytes": "41570"
},
{
"name": "Shell",
"bytes": "14534"
}
],
"symlink_target": ""
}
|
import base64
import copy
import requests
import time
from xml.etree import ElementTree as ET
from oslo_log import log
from oslo_serialization import jsonutils
import six
from manila import exception
from manila.i18n import _
from manila.share.drivers.huawei import constants
from manila import utils
LOG = log.getLogger(__name__)
class RestHelper(object):
"""Helper class for Huawei OceanStor V3 storage system."""
def __init__(self, configuration):
self.configuration = configuration
self.url = None
self.session = None
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
def init_http_head(self):
self.url = None
self.session = requests.Session()
self.session.headers.update({
"Connection": "keep-alive",
"Content-Type": "application/json"})
self.session.verify = False
def do_call(self, url, data, method, calltimeout=constants.SOCKET_TIMEOUT):
"""Send requests to server.
Send HTTPS call, get response in JSON.
Convert response into Python Object and return it.
"""
if self.url:
url = self.url + url
LOG.debug('Request URL: %(url)s\n'
'Call Method: %(method)s\n'
'Request Data: %(data)s\n',
{'url': url,
'method': method,
'data': data})
kwargs = {'timeout': calltimeout}
if data:
kwargs['data'] = data
if method in ('POST', 'PUT', 'GET', 'DELETE'):
func = getattr(self.session, method.lower())
else:
msg = _("Request method %s is invalid.") % method
LOG.error(msg)
raise exception.ShareBackendException(msg=msg)
try:
res = func(url, **kwargs)
except Exception as err:
LOG.error('\nBad response from server: %(url)s.'
' Error: %(err)s', {'url': url, 'err': err})
return {"error": {"code": constants.ERROR_CONNECT_TO_SERVER,
"description": "Connect server error"}}
try:
res.raise_for_status()
except requests.HTTPError as exc:
return {"error": {"code": exc.response.status_code,
"description": six.text_type(exc)}}
result = res.json()
LOG.debug('Response Data: %s', result)
return result
def login(self):
"""Login huawei array."""
login_info = self._get_login_info()
urlstr = login_info['RestURL']
url_list = urlstr.split(";")
deviceid = None
for item_url in url_list:
url = item_url.strip('').strip('\n') + "xx/sessions"
data = jsonutils.dumps({"username": login_info['UserName'],
"password": login_info['UserPassword'],
"scope": "0"})
self.init_http_head()
result = self.do_call(url, data, 'POST',
calltimeout=constants.LOGIN_SOCKET_TIMEOUT)
if((result['error']['code'] != 0)
or ("data" not in result)
or (result['data']['deviceid'] is None)):
LOG.error("Login to %s failed, try another.", item_url)
continue
LOG.debug('Login success: %(url)s\n', {'url': item_url})
deviceid = result['data']['deviceid']
self.url = item_url + deviceid
self.session.headers['iBaseToken'] = result['data']['iBaseToken']
break
if deviceid is None:
err_msg = _("All url login fail.")
LOG.error(err_msg)
raise exception.InvalidShare(reason=err_msg)
return deviceid
@utils.synchronized('huawei_manila')
def call(self, url, data, method):
"""Send requests to server.
If fail, try another RestURL.
"""
deviceid = None
old_url = self.url
result = self.do_call(url, data, method)
error_code = result['error']['code']
if(error_code == constants.ERROR_CONNECT_TO_SERVER
or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER):
LOG.error("Can't open the recent url, re-login.")
deviceid = self.login()
if deviceid is not None:
LOG.debug('Replace URL: \n'
'Old URL: %(old_url)s\n'
'New URL: %(new_url)s\n',
{'old_url': old_url,
'new_url': self.url})
result = self.do_call(url, data, method)
return result
def _create_filesystem(self, fs_param):
"""Create file system."""
url = "/filesystem"
data = jsonutils.dumps(fs_param)
result = self.call(url, data, 'POST')
msg = 'Create filesystem error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result['data']['ID']
def _assert_rest_result(self, result, err_str):
if result['error']['code'] != 0:
err_msg = (_('%(err)s\nresult: %(res)s.') % {'err': err_str,
'res': result})
LOG.error(err_msg)
raise exception.InvalidShare(reason=err_msg)
def _assert_data_in_result(self, result, msg):
if "data" not in result:
err_msg = (_('%s "data" was not in result.') % msg)
LOG.error(err_msg)
raise exception.InvalidShare(reason=err_msg)
def _get_login_info(self):
"""Get login IP, username and password from config file."""
logininfo = {}
filename = self.configuration.manila_huawei_conf_file
tree = ET.parse(filename)
root = tree.getroot()
RestURL = root.findtext('Storage/RestURL')
logininfo['RestURL'] = RestURL.strip()
# Prefix !$$$ means encoded already.
prefix_name = '!$$$'
need_encode = False
for key in ['UserName', 'UserPassword']:
node = root.find('Storage/%s' % key)
if node.text.startswith(prefix_name):
logininfo[key] = base64.b64decode(
six.b(node.text[4:])).decode()
else:
logininfo[key] = node.text
node.text = prefix_name + base64.b64encode(
six.b(node.text)).decode()
need_encode = True
if need_encode:
self._change_file_mode(filename)
try:
tree.write(filename, 'UTF-8')
except Exception as err:
err_msg = (_('File write error %s.') % err)
LOG.error(err_msg)
raise exception.InvalidShare(reason=err_msg)
return logininfo
def _change_file_mode(self, filepath):
try:
utils.execute('chmod', '666', filepath, run_as_root=True)
except Exception as err:
LOG.error('Bad response from change file: %s.', err)
raise
def create_share(self, share_name, fs_id, share_proto):
"""Create a share."""
share_url_type = self._get_share_url_type(share_proto)
share_path = self._get_share_path(share_name)
filepath = {}
if share_proto == 'NFS':
filepath = {
"DESCRIPTION": "",
"FSID": fs_id,
"SHAREPATH": share_path,
}
elif share_proto == 'CIFS':
filepath = {
"SHAREPATH": share_path,
"DESCRIPTION": "",
"ABEENABLE": "false",
"ENABLENOTIFY": "true",
"ENABLEOPLOCK": "true",
"NAME": share_name.replace("-", "_"),
"FSID": fs_id,
"TENANCYID": "0",
}
else:
raise exception.InvalidShare(
reason=(_('Invalid NAS protocol supplied: %s.')
% share_proto))
url = "/" + share_url_type
data = jsonutils.dumps(filepath)
result = self.call(url, data, "POST")
msg = 'Create share error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result['data']['ID']
def _delete_share_by_id(self, share_id, share_url_type):
"""Delete share by share id."""
url = "/" + share_url_type + "/" + share_id
result = self.call(url, None, "DELETE")
self._assert_rest_result(result, 'Delete share error.')
def _delete_fs(self, fs_id):
"""Delete file system."""
# Get available file system
url = "/filesystem/" + fs_id
result = self.call(url, None, "DELETE")
self._assert_rest_result(result, 'Delete file system error.')
def _get_cifs_service_status(self):
url = "/CIFSSERVICE"
result = self.call(url, None, "GET")
msg = 'Get CIFS service status error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result['data']['RUNNINGSTATUS']
def _get_nfs_service_status(self):
url = "/NFSSERVICE"
result = self.call(url, None, "GET")
msg = 'Get NFS service status error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
service = {}
service['RUNNINGSTATUS'] = result['data']['RUNNINGSTATUS']
service['SUPPORTV3'] = result['data']['SUPPORTV3']
service['SUPPORTV4'] = result['data']['SUPPORTV4']
return service
def _start_nfs_service_status(self):
url = "/NFSSERVICE"
nfsserviceinfo = {
"NFSV4DOMAIN": "localdomain",
"RUNNINGSTATUS": "2",
"SUPPORTV3": 'true',
"SUPPORTV4": 'true',
"TYPE": "16452",
}
data = jsonutils.dumps(nfsserviceinfo)
result = self.call(url, data, "PUT")
self._assert_rest_result(result, 'Start NFS service error.')
def _start_cifs_service_status(self):
url = "/CIFSSERVICE"
cifsserviceinfo = {
"ENABLENOTIFY": "true",
"ENABLEOPLOCK": "true",
"ENABLEOPLOCKLEASE": "false",
"GUESTENABLE": "false",
"OPLOCKTIMEOUT": "35",
"RUNNINGSTATUS": "2",
"SECURITYMODEL": "3",
"SIGNINGENABLE": "false",
"SIGNINGREQUIRED": "false",
"TYPE": "16453",
}
data = jsonutils.dumps(cifsserviceinfo)
result = self.call(url, data, "PUT")
self._assert_rest_result(result, 'Start CIFS service error.')
def _find_pool_info(self, pool_name, result):
if pool_name is None:
return
poolinfo = {}
pool_name = pool_name.strip()
for item in result.get('data', []):
if pool_name == item['NAME'] and '2' == item['USAGETYPE']:
poolinfo['name'] = pool_name
poolinfo['ID'] = item['ID']
poolinfo['CAPACITY'] = item['USERFREECAPACITY']
poolinfo['TOTALCAPACITY'] = item['USERTOTALCAPACITY']
poolinfo['CONSUMEDCAPACITY'] = item['USERCONSUMEDCAPACITY']
poolinfo['TIER0CAPACITY'] = item['TIER0CAPACITY']
poolinfo['TIER1CAPACITY'] = item['TIER1CAPACITY']
poolinfo['TIER2CAPACITY'] = item['TIER2CAPACITY']
break
return poolinfo
def _find_all_pool_info(self):
url = "/storagepool"
result = self.call(url, None, "GET")
msg = "Query resource pool error."
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result
def _read_xml(self):
"""Open xml file and parse the content."""
filename = self.configuration.manila_huawei_conf_file
try:
tree = ET.parse(filename)
root = tree.getroot()
except Exception as err:
message = (_('Read Huawei config file(%(filename)s)'
' for Manila error: %(err)s')
% {'filename': filename,
'err': err})
LOG.error(message)
raise exception.InvalidInput(reason=message)
return root
def _remove_access_from_share(self, access_id, share_proto):
access_type = self._get_share_client_type(share_proto)
url = "/" + access_type + "/" + access_id
result = self.call(url, None, "DELETE")
self._assert_rest_result(result, 'delete access from share error!')
def _get_access_count(self, share_id, share_client_type):
url_subfix = ("/" + share_client_type + "/count?"
+ "filter=PARENTID::" + share_id)
url = url_subfix
result = self.call(url, None, "GET")
msg = "Get access count by share error!"
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return int(result['data']['COUNT'])
def _get_all_access_from_share(self, share_id, share_proto):
"""Return a list of all the access IDs of the share"""
share_client_type = self._get_share_client_type(share_proto)
count = self._get_access_count(share_id, share_client_type)
access_ids = []
range_begin = 0
while count > 0:
access_range = self._get_access_from_share_range(share_id,
range_begin,
share_client_type)
for item in access_range:
access_ids.append(item['ID'])
range_begin += 100
count -= 100
return access_ids
def _get_access_from_share(self, share_id, access_to, share_proto):
"""Segments to find access for a period of 100."""
share_client_type = self._get_share_client_type(share_proto)
count = self._get_access_count(share_id, share_client_type)
access_id = None
range_begin = 0
while count > 0:
if access_id:
break
access_range = self._get_access_from_share_range(share_id,
range_begin,
share_client_type)
for item in access_range:
if item['NAME'] in (access_to, '@' + access_to):
access_id = item['ID']
range_begin += 100
count -= 100
return access_id
def _get_access_from_share_range(self, share_id,
range_begin,
share_client_type):
range_end = range_begin + 100
url = ("/" + share_client_type + "?filter=PARENTID::"
+ share_id + "&range=[" + six.text_type(range_begin)
+ "-" + six.text_type(range_end) + "]")
result = self.call(url, None, "GET")
self._assert_rest_result(result, 'Get access id by share error!')
return result.get('data', [])
def _get_level_by_access_id(self, access_id, share_proto):
share_client_type = self._get_share_client_type(share_proto)
url = "/" + share_client_type + "/" + access_id
result = self.call(url, None, "GET")
self._assert_rest_result(result, 'Get access information error!')
access_info = result.get('data', [])
access_level = access_info.get('ACCESSVAL')
if not access_level:
access_level = access_info.get('PERMISSION')
return access_level
def _change_access_rest(self, access_id,
share_proto, access_level):
"""Change access level of the share."""
if share_proto == 'NFS':
self._change_nfs_access_rest(access_id, access_level)
elif share_proto == 'CIFS':
self._change_cifs_access_rest(access_id, access_level)
else:
raise exception.InvalidInput(
reason=(_('Invalid NAS protocol supplied: %s.')
% share_proto))
def _change_nfs_access_rest(self, access_id, access_level):
url = "/NFS_SHARE_AUTH_CLIENT/" + access_id
access = {
"ACCESSVAL": access_level,
"SYNC": "0",
"ALLSQUASH": "1",
"ROOTSQUASH": "0",
}
data = jsonutils.dumps(access)
result = self.call(url, data, "PUT")
msg = 'Change access error.'
self._assert_rest_result(result, msg)
def _change_cifs_access_rest(self, access_id, access_level):
url = "/CIFS_SHARE_AUTH_CLIENT/" + access_id
access = {
"PERMISSION": access_level,
}
data = jsonutils.dumps(access)
result = self.call(url, data, "PUT")
msg = 'Change access error.'
self._assert_rest_result(result, msg)
def _allow_access_rest(self, share_id, access_to,
share_proto, access_level):
"""Allow access to the share."""
if share_proto == 'NFS':
self._allow_nfs_access_rest(share_id, access_to, access_level)
elif share_proto == 'CIFS':
self._allow_cifs_access_rest(share_id, access_to, access_level)
else:
raise exception.InvalidInput(
reason=(_('Invalid NAS protocol supplied: %s.')
% share_proto))
def _allow_nfs_access_rest(self, share_id, access_to, access_level):
url = "/NFS_SHARE_AUTH_CLIENT"
access = {
"TYPE": "16409",
"NAME": access_to,
"PARENTID": share_id,
"ACCESSVAL": access_level,
"SYNC": "0",
"ALLSQUASH": "1",
"ROOTSQUASH": "0",
}
data = jsonutils.dumps(access)
result = self.call(url, data, "POST")
msg = 'Allow access error.'
self._assert_rest_result(result, msg)
def _allow_cifs_access_rest(self, share_id, access_to, access_level):
url = "/CIFS_SHARE_AUTH_CLIENT"
domain_type = {
'local': '2',
'ad': '0'
}
error_msg = 'Allow access error.'
access_info = ('Access info (access_to: %(access_to)s, '
'access_level: %(access_level)s, share_id: %(id)s)'
% {'access_to': access_to,
'access_level': access_level,
'id': share_id})
def send_rest(access_to, domain_type):
access = {
"NAME": access_to,
"PARENTID": share_id,
"PERMISSION": access_level,
"DOMAINTYPE": domain_type,
}
data = jsonutils.dumps(access)
result = self.call(url, data, "POST")
error_code = result['error']['code']
if error_code == 0:
return True
elif error_code != constants.ERROR_USER_OR_GROUP_NOT_EXIST:
self._assert_rest_result(result, error_msg)
return False
if '\\' not in access_to:
# First, try to add user access.
LOG.debug('Try to add user access. %s.', access_info)
if send_rest(access_to, domain_type['local']):
return
# Second, if add user access failed,
# try to add group access.
LOG.debug('Failed with add user access, '
'try to add group access. %s.', access_info)
# Group name starts with @.
if send_rest('@' + access_to, domain_type['local']):
return
else:
LOG.debug('Try to add domain user access. %s.', access_info)
if send_rest(access_to, domain_type['ad']):
return
# If add domain user access failed,
# try to add domain group access.
LOG.debug('Failed with add domain user access, '
'try to add domain group access. %s.', access_info)
# Group name starts with @.
if send_rest('@' + access_to, domain_type['ad']):
return
raise exception.InvalidShare(reason=error_msg)
def _get_share_client_type(self, share_proto):
share_client_type = None
if share_proto == 'NFS':
share_client_type = "NFS_SHARE_AUTH_CLIENT"
elif share_proto == 'CIFS':
share_client_type = "CIFS_SHARE_AUTH_CLIENT"
else:
raise exception.InvalidInput(
reason=(_('Invalid NAS protocol supplied: %s.')
% share_proto))
return share_client_type
def _check_snapshot_id_exist(self, snapshot_info):
"""Check the snapshot id exists."""
if snapshot_info['error']['code'] == constants.MSG_SNAPSHOT_NOT_FOUND:
return False
elif snapshot_info['error']['code'] == 0:
return True
else:
err_str = "Check the snapshot id exists error!"
err_msg = (_('%(err)s\nresult: %(res)s.') % {'err': err_str,
'res': snapshot_info})
raise exception.InvalidShareSnapshot(reason=err_msg)
def _get_snapshot_by_id(self, snap_id):
"""Get snapshot by id"""
url = "/FSSNAPSHOT/" + snap_id
result = self.call(url, None, "GET")
return result
def _delete_snapshot(self, snap_id):
"""Deletes snapshot."""
url = "/FSSNAPSHOT/%s" % snap_id
data = jsonutils.dumps({"TYPE": "48", "ID": snap_id})
result = self.call(url, data, "DELETE")
self._assert_rest_result(result, 'Delete snapshot error.')
def _create_snapshot(self, sharefsid, snapshot_name):
"""Create a snapshot."""
filepath = {
"PARENTTYPE": "40",
"TYPE": "48",
"PARENTID": sharefsid,
"NAME": snapshot_name.replace("-", "_"),
"DESCRIPTION": "",
}
url = "/FSSNAPSHOT"
data = jsonutils.dumps(filepath)
result = self.call(url, data, "POST")
msg = 'Create a snapshot error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result['data']['ID']
def _get_share_by_name(self, share_name, share_url_type):
"""Segments to find share for a period of 100."""
count = self._get_share_count(share_url_type)
share = {}
range_begin = 0
while True:
if count < 0 or share:
break
share = self._get_share_by_name_range(share_name,
range_begin,
share_url_type)
range_begin += 100
count -= 100
return share
def _get_share_count(self, share_url_type):
"""Get share count."""
url = "/" + share_url_type + "/count"
result = self.call(url, None, "GET")
self._assert_rest_result(result, 'Get share count error!')
return int(result['data']['COUNT'])
def _get_share_by_name_range(self, share_name,
range_begin, share_url_type):
"""Get share by share name."""
range_end = range_begin + 100
url = ("/" + share_url_type + "?range=["
+ six.text_type(range_begin) + "-"
+ six.text_type(range_end) + "]")
result = self.call(url, None, "GET")
self._assert_rest_result(result, 'Get share by name error!')
share_path = self._get_share_path(share_name)
share = {}
for item in result.get('data', []):
if share_path == item['SHAREPATH']:
share['ID'] = item['ID']
share['FSID'] = item['FSID']
break
return share
def _get_share_url_type(self, share_proto):
share_url_type = None
if share_proto == 'NFS':
share_url_type = "NFSHARE"
elif share_proto == 'CIFS':
share_url_type = "CIFSHARE"
else:
raise exception.InvalidInput(
reason=(_('Invalid NAS protocol supplied: %s.')
% share_proto))
return share_url_type
def get_fsid_by_name(self, share_name):
share_name = share_name.replace("-", "_")
url = "/FILESYSTEM?filter=NAME::%s&range=[0-8191]" % share_name
result = self.call(url, None, "GET")
self._assert_rest_result(result, 'Get filesystem by name error!')
for item in result.get('data', []):
if share_name == item['NAME']:
return item['ID']
def _get_fs_info_by_id(self, fsid):
url = "/filesystem/%s" % fsid
result = self.call(url, None, "GET")
msg = "Get filesystem info by id error!"
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
fs = {}
fs['HEALTHSTATUS'] = result['data']['HEALTHSTATUS']
fs['RUNNINGSTATUS'] = result['data']['RUNNINGSTATUS']
fs['CAPACITY'] = result['data']['CAPACITY']
fs['ALLOCTYPE'] = result['data']['ALLOCTYPE']
fs['POOLNAME'] = result['data']['PARENTNAME']
fs['COMPRESSION'] = result['data']['ENABLECOMPRESSION']
fs['DEDUP'] = result['data']['ENABLEDEDUP']
fs['SMARTPARTITIONID'] = result['data']['CACHEPARTITIONID']
fs['SMARTCACHEID'] = result['data']['SMARTCACHEPARTITIONID']
return fs
def _get_share_path(self, share_name):
share_path = "/" + share_name.replace("-", "_") + "/"
return share_path
def get_share_name_by_id(self, share_id):
share_name = "share_" + share_id
return share_name
def _get_share_name_by_export_location(self, export_location, share_proto):
export_location_split = None
share_name = None
share_ip = None
if export_location:
if share_proto == 'NFS':
export_location_split = export_location.split(':/')
if len(export_location_split) == 2:
share_name = export_location_split[1]
share_ip = export_location_split[0]
elif share_proto == 'CIFS':
export_location_split = export_location.split('\\')
if (len(export_location_split) == 4 and
export_location_split[0] == "" and
export_location_split[1] == ""):
share_ip = export_location_split[2]
share_name = export_location_split[3]
if share_name is None:
raise exception.InvalidInput(
reason=(_('No share with export location %s could be found.')
% export_location))
root = self._read_xml()
target_ip = root.findtext('Storage/LogicalPortIP')
if target_ip:
if share_ip != target_ip.strip():
raise exception.InvalidInput(
reason=(_('The share IP %s is not configured.')
% share_ip))
else:
raise exception.InvalidInput(
reason=(_('The config parameter LogicalPortIP is not set.')))
return share_name
def _get_snapshot_id(self, fs_id, snap_name):
snapshot_id = (fs_id + "@" + "share_snapshot_"
+ snap_name.replace("-", "_"))
return snapshot_id
def _change_share_size(self, fsid, new_size):
url = "/filesystem/%s" % fsid
capacityinfo = {
"CAPACITY": new_size,
}
data = jsonutils.dumps(capacityinfo)
result = self.call(url, data, "PUT")
msg = "Change a share size error!"
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
def _change_fs_name(self, fsid, name):
url = "/filesystem/%s" % fsid
fs_param = {
"NAME": name.replace("-", "_"),
}
data = jsonutils.dumps(fs_param)
result = self.call(url, data, "PUT")
msg = _("Change filesystem name error.")
self._assert_rest_result(result, msg)
def _change_extra_specs(self, fsid, extra_specs):
url = "/filesystem/%s" % fsid
fs_param = {
"ENABLEDEDUP": extra_specs['dedupe'],
"ENABLECOMPRESSION": extra_specs['compression']
}
data = jsonutils.dumps(fs_param)
result = self.call(url, data, "PUT")
msg = _("Change extra_specs error.")
self._assert_rest_result(result, msg)
def _get_partition_id_by_name(self, name):
url = "/cachepartition"
result = self.call(url, None, "GET")
self._assert_rest_result(result, _('Get partition by name error.'))
if "data" in result:
for item in result['data']:
if name == item['NAME']:
return item['ID']
return None
def get_partition_info_by_id(self, partitionid):
url = '/cachepartition/' + partitionid
result = self.call(url, None, "GET")
self._assert_rest_result(result,
_('Get partition by partition id error.'))
return result['data']
def _add_fs_to_partition(self, fs_id, partition_id):
url = "/filesystem/associate/cachepartition"
data = jsonutils.dumps({"ID": partition_id,
"ASSOCIATEOBJTYPE": 40,
"ASSOCIATEOBJID": fs_id,
"TYPE": 268})
result = self.call(url, data, "POST")
self._assert_rest_result(result,
_('Add filesystem to partition error.'))
def _remove_fs_from_partition(self, fs_id, partition_id):
url = "/smartPartition/removeFs"
data = jsonutils.dumps({"ID": partition_id,
"ASSOCIATEOBJTYPE": 40,
"ASSOCIATEOBJID": fs_id,
"TYPE": 268})
result = self.call(url, data, "PUT")
self._assert_rest_result(result,
_('Remove filesystem from partition error.'))
def _rename_share_snapshot(self, snapshot_id, new_name):
url = "/FSSNAPSHOT/" + snapshot_id
data = jsonutils.dumps({"NAME": new_name})
result = self.call(url, data, "PUT")
msg = _('Rename share snapshot on array error.')
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
def _get_cache_id_by_name(self, name):
url = "/SMARTCACHEPARTITION"
result = self.call(url, None, "GET")
self._assert_rest_result(result, _('Get cache by name error.'))
if "data" in result:
for item in result['data']:
if name == item['NAME']:
return item['ID']
return None
def get_cache_info_by_id(self, cacheid):
url = "/SMARTCACHEPARTITION/" + cacheid
data = jsonutils.dumps({"TYPE": "273",
"ID": cacheid})
result = self.call(url, data, "GET")
self._assert_rest_result(
result, _('Get smartcache by cache id error.'))
return result['data']
def _add_fs_to_cache(self, fs_id, cache_id):
url = "/SMARTCACHEPARTITION/CREATE_ASSOCIATE"
data = jsonutils.dumps({"ID": cache_id,
"ASSOCIATEOBJTYPE": 40,
"ASSOCIATEOBJID": fs_id,
"TYPE": 273})
result = self.call(url, data, "PUT")
self._assert_rest_result(result, _('Add filesystem to cache error.'))
def get_qos(self):
url = "/ioclass"
result = self.call(url, None, "GET")
self._assert_rest_result(result, _('Get QoS information error.'))
return result
def find_available_qos(self, qos):
""""Find available QoS on the array."""
qos_id = None
fs_list = []
temp_qos = copy.deepcopy(qos)
result = self.get_qos()
if 'data' in result:
if 'LATENCY' not in temp_qos:
temp_qos['LATENCY'] = '0'
for item in result['data']:
for key in constants.OPTS_QOS_VALUE:
if temp_qos.get(key.upper()) != item.get(key.upper()):
break
else:
fs_num = len(item['FSLIST'].split(","))
# We use this QoS only if the filesystems in it is less
# than 64, else we cannot add filesystem to this QoS
# any more.
if (item['RUNNINGSTATUS'] == constants.STATUS_QOS_ACTIVE
and fs_num < constants.MAX_FS_NUM_IN_QOS
and item['NAME'].startswith(
constants.QOS_NAME_PREFIX)
and item['LUNLIST'] == '[""]'):
qos_id = item['ID']
fs_list = item['FSLIST']
break
return (qos_id, fs_list)
def add_share_to_qos(self, qos_id, fs_id, fs_list):
"""Add filesystem to QoS."""
url = "/ioclass/" + qos_id
new_fs_list = []
fs_list_string = fs_list[1:-1]
for fs_string in fs_list_string.split(","):
tmp_fs_id = fs_string[1:-1]
if '' != tmp_fs_id and tmp_fs_id != fs_id:
new_fs_list.append(tmp_fs_id)
new_fs_list.append(fs_id)
data = jsonutils.dumps({"FSLIST": new_fs_list,
"TYPE": 230,
"ID": qos_id})
result = self.call(url, data, "PUT")
msg = _('Associate filesystem to Qos error.')
self._assert_rest_result(result, msg)
def create_qos_policy(self, qos, fs_id):
# Get local time.
localtime = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
# Package QoS name.
qos_name = constants.QOS_NAME_PREFIX + fs_id + '_' + localtime
mergedata = {
"TYPE": "230",
"NAME": qos_name,
"FSLIST": ["%s" % fs_id],
"CLASSTYPE": "1",
"SCHEDULEPOLICY": "2",
"SCHEDULESTARTTIME": "1410969600",
"STARTTIME": "08:00",
"DURATION": "86400",
"CYCLESET": "[1,2,3,4,5,6,0]",
}
mergedata.update(qos)
data = jsonutils.dumps(mergedata)
url = "/ioclass"
result = self.call(url, data, 'POST')
self._assert_rest_result(result, _('Create QoS policy error.'))
return result['data']['ID']
def activate_deactivate_qos(self, qos_id, enablestatus):
"""Activate or deactivate QoS.
enablestatus: true (activate)
enablestatus: false (deactivate)
"""
url = "/ioclass/active/" + qos_id
data = jsonutils.dumps({
"TYPE": 230,
"ID": qos_id,
"ENABLESTATUS": enablestatus})
result = self.call(url, data, "PUT")
self._assert_rest_result(
result, _('Activate or deactivate QoS error.'))
def change_fs_priority_high(self, fs_id):
"""Change fs priority to high."""
url = "/filesystem/" + fs_id
data = jsonutils.dumps({"IOPRIORITY": "3"})
result = self.call(url, data, "PUT")
self._assert_rest_result(
result, _('Change filesystem priority error.'))
def delete_qos_policy(self, qos_id):
"""Delete a QoS policy."""
url = "/ioclass/" + qos_id
data = jsonutils.dumps({"TYPE": "230",
"ID": qos_id})
result = self.call(url, data, 'DELETE')
self._assert_rest_result(result, _('Delete QoS policy error.'))
def get_qosid_by_fsid(self, fs_id):
"""Get QoS id by fs id."""
url = "/filesystem/" + fs_id
result = self.call(url, None, "GET")
self._assert_rest_result(
result, _('Get QoS id by filesystem id error.'))
return result['data'].get('IOCLASSID')
def get_fs_list_in_qos(self, qos_id):
"""Get the filesystem list in QoS."""
qos_info = self.get_qos_info(qos_id)
fs_list = []
fs_string = qos_info['FSLIST'][1:-1]
for fs in fs_string.split(","):
fs_id = fs[1:-1]
fs_list.append(fs_id)
return fs_list
def get_qos_info(self, qos_id):
"""Get QoS information."""
url = "/ioclass/" + qos_id
result = self.call(url, None, "GET")
self._assert_rest_result(result, _('Get QoS information error.'))
return result['data']
def remove_fs_from_qos(self, fs_id, fs_list, qos_id):
"""Remove filesystem from QoS."""
fs_list = [i for i in fs_list if i != fs_id]
url = "/ioclass/" + qos_id
data = jsonutils.dumps({"FSLIST": fs_list,
"TYPE": 230,
"ID": qos_id})
result = self.call(url, data, "PUT")
msg = _('Remove filesystem from QoS error.')
self._assert_rest_result(result, msg)
def _remove_fs_from_cache(self, fs_id, cache_id):
url = "/SMARTCACHEPARTITION/REMOVE_ASSOCIATE"
data = jsonutils.dumps({"ID": cache_id,
"ASSOCIATEOBJTYPE": 40,
"ASSOCIATEOBJID": fs_id,
"TYPE": 273})
result = self.call(url, data, "PUT")
self._assert_rest_result(result,
_('Remove filesystem from cache error.'))
def get_all_eth_port(self):
url = "/ETH_PORT"
result = self.call(url, None, 'GET')
self._assert_rest_result(result, _('Get all eth port error.'))
all_eth = {}
if "data" in result:
all_eth = result['data']
return all_eth
def get_eth_port_by_id(self, port_id):
url = "/ETH_PORT/" + port_id
result = self.call(url, None, 'GET')
self._assert_rest_result(result, _('Get eth port by id error.'))
if "data" in result:
return result['data']
return None
def get_all_bond_port(self):
url = "/BOND_PORT"
result = self.call(url, None, 'GET')
self._assert_rest_result(result, _('Get all bond port error.'))
all_bond = {}
if "data" in result:
all_bond = result['data']
return all_bond
def get_port_id(self, port_name, port_type):
if port_type == constants.PORT_TYPE_ETH:
all_eth = self.get_all_eth_port()
for item in all_eth:
if port_name == item['LOCATION']:
return item['ID']
elif port_type == constants.PORT_TYPE_BOND:
all_bond = self.get_all_bond_port()
for item in all_bond:
if port_name == item['NAME']:
return item['ID']
return None
def get_all_vlan(self):
url = "/vlan"
result = self.call(url, None, 'GET')
self._assert_rest_result(result, _('Get all vlan error.'))
all_vlan = {}
if "data" in result:
all_vlan = result['data']
return all_vlan
def get_vlan(self, port_id, vlan_tag):
url = "/vlan"
result = self.call(url, None, 'GET')
self._assert_rest_result(result, _('Get vlan error.'))
vlan_tag = six.text_type(vlan_tag)
if "data" in result:
for item in result['data']:
if port_id == item['PORTID'] and vlan_tag == item['TAG']:
return True, item['ID']
return False, None
def create_vlan(self, port_id, port_type, vlan_tag):
url = "/vlan"
data = jsonutils.dumps({"PORTID": port_id,
"PORTTYPE": port_type,
"TAG": six.text_type(vlan_tag),
"TYPE": "280"})
result = self.call(url, data, "POST")
self._assert_rest_result(result, _('Create vlan error.'))
return result['data']['ID']
def check_vlan_exists_by_id(self, vlan_id):
all_vlan = self.get_all_vlan()
return any(vlan['ID'] == vlan_id for vlan in all_vlan)
def delete_vlan(self, vlan_id):
url = "/vlan/" + vlan_id
result = self.call(url, None, 'DELETE')
if result['error']['code'] == constants.ERROR_LOGICAL_PORT_EXIST:
LOG.warning('Cannot delete vlan because there is '
'a logical port on vlan.')
return
self._assert_rest_result(result, _('Delete vlan error.'))
def get_logical_port(self, home_port_id, ip, subnet):
url = "/LIF"
result = self.call(url, None, 'GET')
self._assert_rest_result(result, _('Get logical port error.'))
if "data" not in result:
return False, None
for item in result['data']:
if (home_port_id == item['HOMEPORTID']
and ip == item['IPV4ADDR']
and subnet == item['IPV4MASK']):
if item['OPERATIONALSTATUS'] != 'true':
self._activate_logical_port(item['ID'])
return True, item['ID']
return False, None
def _activate_logical_port(self, logical_port_id):
url = "/LIF/" + logical_port_id
data = jsonutils.dumps({"OPERATIONALSTATUS": "true"})
result = self.call(url, data, 'PUT')
self._assert_rest_result(result, _('Activate logical port error.'))
def create_logical_port(self, home_port_id, home_port_type, ip, subnet):
url = "/LIF"
info = {
"ADDRESSFAMILY": 0,
"CANFAILOVER": "true",
"HOMEPORTID": home_port_id,
"HOMEPORTTYPE": home_port_type,
"IPV4ADDR": ip,
"IPV4GATEWAY": "",
"IPV4MASK": subnet,
"NAME": ip,
"OPERATIONALSTATUS": "true",
"ROLE": 2,
"SUPPORTPROTOCOL": 3,
"TYPE": "279",
}
data = jsonutils.dumps(info)
result = self.call(url, data, 'POST')
self._assert_rest_result(result, _('Create logical port error.'))
return result['data']['ID']
def check_logical_port_exists_by_id(self, logical_port_id):
all_logical_port = self.get_all_logical_port()
return any(port['ID'] == logical_port_id for port in all_logical_port)
def get_all_logical_port(self):
url = "/LIF"
result = self.call(url, None, 'GET')
self._assert_rest_result(result, _('Get all logical port error.'))
all_logical_port = {}
if "data" in result:
all_logical_port = result['data']
return all_logical_port
def delete_logical_port(self, logical_port_id):
url = "/LIF/" + logical_port_id
result = self.call(url, None, 'DELETE')
self._assert_rest_result(result, _('Delete logical port error.'))
def set_DNS_ip_address(self, dns_ip_list):
if len(dns_ip_list) > 3:
message = _('Most three ips can be set to DNS.')
LOG.error(message)
raise exception.InvalidInput(reason=message)
url = "/DNS_Server"
dns_info = {
"ADDRESS": jsonutils.dumps(dns_ip_list),
"TYPE": "260",
}
data = jsonutils.dumps(dns_info)
result = self.call(url, data, 'PUT')
self._assert_rest_result(result, _('Set DNS ip address error.'))
if "data" in result:
return result['data']
return None
def get_DNS_ip_address(self):
url = "/DNS_Server"
result = self.call(url, None, 'GET')
self._assert_rest_result(result, _('Get DNS ip address error.'))
ip_address = {}
if "data" in result:
ip_address = jsonutils.loads(result['data']['ADDRESS'])
return ip_address
def add_AD_config(self, user, password, domain, system_name):
url = "/AD_CONFIG"
info = {
"ADMINNAME": user,
"ADMINPWD": password,
"DOMAINSTATUS": 1,
"FULLDOMAINNAME": domain,
"OU": "",
"SYSTEMNAME": system_name,
"TYPE": "16414",
}
data = jsonutils.dumps(info)
result = self.call(url, data, 'PUT')
self._assert_rest_result(result, _('Add AD config error.'))
def delete_AD_config(self, user, password):
url = "/AD_CONFIG"
info = {
"ADMINNAME": user,
"ADMINPWD": password,
"DOMAINSTATUS": 0,
"TYPE": "16414",
}
data = jsonutils.dumps(info)
result = self.call(url, data, 'PUT')
self._assert_rest_result(result, _('Delete AD config error.'))
def get_AD_config(self):
url = "/AD_CONFIG"
result = self.call(url, None, 'GET')
self._assert_rest_result(result, _('Get AD config error.'))
if "data" in result:
return result['data']
return None
def get_AD_domain_name(self):
result = self.get_AD_config()
if result and result['DOMAINSTATUS'] == '1':
return True, result['FULLDOMAINNAME']
return False, None
def add_LDAP_config(self, server, domain):
url = "/LDAP_CONFIG"
info = {
"BASEDN": domain,
"LDAPSERVER": server,
"PORTNUM": 389,
"TRANSFERTYPE": "1",
"TYPE": "16413",
"USERNAME": "",
}
data = jsonutils.dumps(info)
result = self.call(url, data, 'PUT')
self._assert_rest_result(result, _('Add LDAP config error.'))
def delete_LDAP_config(self):
url = "/LDAP_CONFIG"
result = self.call(url, None, 'DELETE')
self._assert_rest_result(result, _('Delete LDAP config error.'))
def get_LDAP_config(self):
url = "/LDAP_CONFIG"
result = self.call(url, None, 'GET')
self._assert_rest_result(result, _('Get LDAP config error.'))
if "data" in result:
return result['data']
return None
def get_LDAP_domain_server(self):
result = self.get_LDAP_config()
if result and result['LDAPSERVER']:
return True, result['LDAPSERVER']
return False, None
def _get_array_info(self):
url = "/system/"
result = self.call(url, None, "GET")
msg = _('Get array info error.')
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result.get('data')
def find_array_version(self):
info = self._get_array_info()
return info.get('PRODUCTVERSION')
def get_array_wwn(self):
info = self._get_array_info()
return info.get('wwn')
def _get_all_remote_devices(self):
url = "/remote_device"
result = self.call(url, None, "GET")
self._assert_rest_result(result, _('Get all remote devices error.'))
return result.get('data', [])
def get_remote_device_by_wwn(self, wwn):
devices = self._get_all_remote_devices()
for device in devices:
if device.get('WWN') == wwn:
return device
return {}
def create_replication_pair(self, pair_params):
url = "/REPLICATIONPAIR"
data = jsonutils.dumps(pair_params)
result = self.call(url, data, "POST")
msg = _('Failed to create replication pair for '
'(LOCALRESID: %(lres)s, REMOTEDEVICEID: %(rdev)s, '
'REMOTERESID: %(rres)s).') % {
'lres': pair_params['LOCALRESID'],
'rdev': pair_params['REMOTEDEVICEID'],
'rres': pair_params['REMOTERESID']}
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result['data']
def split_replication_pair(self, pair_id):
url = '/REPLICATIONPAIR/split'
data = jsonutils.dumps({"ID": pair_id, "TYPE": "263"})
result = self.call(url, data, "PUT")
msg = _('Failed to split replication pair %s.') % pair_id
self._assert_rest_result(result, msg)
def switch_replication_pair(self, pair_id):
url = '/REPLICATIONPAIR/switch'
data = jsonutils.dumps({"ID": pair_id, "TYPE": "263"})
result = self.call(url, data, "PUT")
msg = _('Failed to switch replication pair %s.') % pair_id
self._assert_rest_result(result, msg)
def delete_replication_pair(self, pair_id):
url = "/REPLICATIONPAIR/" + pair_id
data = None
result = self.call(url, data, "DELETE")
if (result['error']['code'] ==
constants.ERROR_REPLICATION_PAIR_NOT_EXIST):
LOG.warning('Replication pair %s was not found.',
pair_id)
return
msg = _('Failed to delete replication pair %s.') % pair_id
self._assert_rest_result(result, msg)
def sync_replication_pair(self, pair_id):
url = "/REPLICATIONPAIR/sync"
data = jsonutils.dumps({"ID": pair_id, "TYPE": "263"})
result = self.call(url, data, "PUT")
msg = _('Failed to sync replication pair %s.') % pair_id
self._assert_rest_result(result, msg)
def cancel_pair_secondary_write_lock(self, pair_id):
url = "/REPLICATIONPAIR/CANCEL_SECODARY_WRITE_LOCK"
data = jsonutils.dumps({"ID": pair_id, "TYPE": "263"})
result = self.call(url, data, "PUT")
msg = _('Failed to cancel replication pair %s '
'secondary write lock.') % pair_id
self._assert_rest_result(result, msg)
def set_pair_secondary_write_lock(self, pair_id):
url = "/REPLICATIONPAIR/SET_SECODARY_WRITE_LOCK"
data = jsonutils.dumps({"ID": pair_id, "TYPE": "263"})
result = self.call(url, data, "PUT")
msg = _('Failed to set replication pair %s '
'secondary write lock.') % pair_id
self._assert_rest_result(result, msg)
def get_replication_pair_by_id(self, pair_id):
url = "/REPLICATIONPAIR/" + pair_id
result = self.call(url, None, "GET")
msg = _('Failed to get replication pair %s.') % pair_id
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result.get('data')
def rollback_snapshot(self, snap_id):
url = "/FSSNAPSHOT/ROLLBACK_FSSNAPSHOT"
data = jsonutils.dumps({"ID": snap_id})
result = self.call(url, data, "PUT")
msg = _('Failed to rollback snapshot %s.') % snap_id
self._assert_rest_result(result, msg)
|
{
"content_hash": "2276b582b363926cf84bfcb01b210561",
"timestamp": "",
"source": "github",
"line_count": 1442,
"max_line_length": 79,
"avg_line_length": 35.64216366158114,
"alnum_prop": 0.5209938516616079,
"repo_name": "bswartz/manila",
"id": "f37d8dcedfbf4dac5d4d861e6131f4c74ced4aa6",
"size": "52045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/share/drivers/huawei/v3/helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "9952105"
},
{
"name": "Shell",
"bytes": "106606"
}
],
"symlink_target": ""
}
|
"""
=================================================================
Permutation t-test on source data with spatio-temporal clustering
=================================================================
Tests if the evoked response is significantly different between
conditions across subjects (simulated here using one subject's data).
The multiple comparisons problem is addressed with a cluster-level
permutation test across space and time.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
from scipy import stats as stats
import mne
from mne import (io, spatial_tris_connectivity, compute_morph_matrix,
grade_to_tris)
from mne.epochs import equalize_epoch_counts
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
from mne.viz import mne_analyze_colormap
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for all channels, removing a bad one
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
event_id = 1 # L auditory
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
event_id = 3 # L visual
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
equalize_epoch_counts([epochs1, epochs2])
###############################################################################
# Transform to source space
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
inverse_operator = read_inverse_operator(fname_inv)
sample_vertices = [s['vertno'] for s in inverse_operator['src']]
# Let's average and compute inverse, resampling to speed things up
evoked1 = epochs1.average()
evoked1.resample(50)
condition1 = apply_inverse(evoked1, inverse_operator, lambda2, method)
evoked2 = epochs2.average()
evoked2.resample(50)
condition2 = apply_inverse(evoked2, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition1.crop(0, None)
condition2.crop(0, None)
tmin = condition1.tmin
tstep = condition1.tstep
###############################################################################
# Transform to common cortical space
# Normally you would read in estimates across several subjects and morph
# them to the same cortical space (e.g. fsaverage). For example purposes,
# we will simulate this by just having each "subject" have the same
# response (just noisy in source space) here. Note that for 7 subjects
# with a two-sided statistical test, the minimum significance under a
# permutation test is only p = 1/(2 ** 6) = 0.015, which is large.
n_vertices_sample, n_times = condition1.data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 2) * 10
X[:, :, :, 0] += condition1.data[:, :, np.newaxis]
X[:, :, :, 1] += condition2.data[:, :, np.newaxis]
# It's a good idea to spatially smooth the data, and for visualization
# purposes, let's morph these to fsaverage, which is a grade 5 source space
# with vertices 0:10242 for each hemisphere. Usually you'd have to morph
# each subject's data separately (and you might want to use morph_data
# instead), but here since all estimates are on 'sample' we can use one
# morph matrix for all the heavy lifting.
fsave_vertices = [np.arange(10242), np.arange(10242)]
morph_mat = compute_morph_matrix('sample', 'fsaverage', sample_vertices,
fsave_vertices, 20, subjects_dir)
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 2)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 2)
# Finally, we want to compare the overall activity levels in each condition,
# the diff is taken along the last axis (condition). The negative sign makes
# it so condition1 > condition2 shows up as "red blobs" (instead of blue).
X = np.abs(X) # only magnitude
X = X[:, :, :, 0] - X[:, :, :, 1] # make paired contrast
###############################################################################
# Compute statistic
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal)
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(grade_to_tris(5))
# Note that X needs to be a multi-dimensional array of shape
# samples (subjects) x time x space, so we permute dimensions
X = np.transpose(X, [2, 1, 0])
# Now let's actually do the clustering. This can take a long time...
# Here we set the threshold quite high to reduce computation.
p_threshold = 0.001
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_1samp_test(X, connectivity=connectivity, n_jobs=2,
threshold=t_threshold)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
colormap = mne_analyze_colormap(limits=[0, 10, 50])
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A < condition B, red for A > B
brain = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'both', colormap,
subjects_dir=subjects_dir,
time_label='Duration significant (ms)')
brain.set_data_time_index(0)
# The colormap requires brain data to be scaled -fmax -> fmax
brain.scale_data_colormap(fmin=-50, fmid=0, fmax=50, transparent=False)
brain.show_view('lateral')
brain.save_image('clusters.png')
|
{
"content_hash": "12b181bd0c2bd83b2a9d2499777f6241",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 79,
"avg_line_length": 43.29608938547486,
"alnum_prop": 0.6430967741935484,
"repo_name": "agramfort/mne-python",
"id": "91a10c8bc9826cdc30106fa736fc9e8b54bada50",
"size": "7750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/stats/plot_cluster_stats_spatio_temporal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "3751581"
},
{
"name": "Shell",
"bytes": "4011"
}
],
"symlink_target": ""
}
|
"""Auth component configuration hooks.
Application that use 'auth' component can override settings defined here by
adding the following lines to appengine_config.py:
components_auth_UI_APP_NAME = 'My service name'
Code flow when this is used:
* GAE app starts and loads a module with main WSGI app.
* This module import 'components.auth'.
* components.auth imports components.auth.config (thus executing code here).
* lib_config.register below imports appengine_config.py.
* Later when code path hits auth-related code, ensure_configured is called.
* ensure_configured calls handler.configure and auth.ui.configure.
* Fin.
"""
import threading
from google.appengine.api import lib_config
# Used in ensure_configured.
_config_lock = threading.Lock()
_config_called = False
# Read the configuration. It would be applied later in 'ensure_configured'.
_config = lib_config.register(
'components_auth',
{
# Title of the service to show in UI.
'UI_APP_NAME': 'Auth',
# True if application is calling 'configure_ui' manually.
'UI_CUSTOM_CONFIG': False,
})
def ensure_configured():
"""Applies component configuration.
Called lazily when auth component is used for a first time.
"""
global _config_called
# Import lazily to avoid module reference cycle.
from components import utils
from . import handler
from .ui import ui
with _config_lock:
if not _config_called:
authenticators = []
# OAuth mocks on dev server always return useless values, don't use it.
if not utils.is_local_dev_server():
authenticators.append(handler.oauth_authentication)
authenticators.extend([
handler.cookie_authentication,
handler.service_to_service_authentication,
])
handler.configure(authenticators)
# Customize auth UI to show where it's running.
if not _config.UI_CUSTOM_CONFIG:
ui.configure_ui(_config.UI_APP_NAME)
# Mark as successfully completed.
_config_called = True
|
{
"content_hash": "0a196b5e358e9389ef64b9449ec3937e",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 31.153846153846153,
"alnum_prop": 0.7076543209876544,
"repo_name": "pombreda/swarming",
"id": "3cd9b7222131320bda8b7ed69581336029b963d9",
"size": "2192",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "appengine/components/components/auth/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3014"
},
{
"name": "HTML",
"bytes": "249103"
},
{
"name": "JavaScript",
"bytes": "925519"
},
{
"name": "Protocol Buffer",
"bytes": "8868"
},
{
"name": "Python",
"bytes": "1495594"
},
{
"name": "Shell",
"bytes": "1267"
}
],
"symlink_target": ""
}
|
""" Master object to combine the layers into a net """
# Imports
import json
import pathlib
import numpy as np
from . import layers
# Classes
class Model(object):
""" Model object
To build an 8-3-8 autoencoder::
auto = Model('8-3-8', input_size=8)
auto.add_layer(layers.FullyConnected(3, func='sigmoid'))
auto.add_layer(layers.FullyConnected(8, func='sigmoid'))
:param name:
The name of the model
:param input_size:
The number of input neurons into the model
:param rng:
If not None, the numpy.random.RandomState object to use for all
random numbers
"""
def __init__(self, name=None, input_size=None, rng=None):
self.name = name
self.input_size = input_size
self.layers = []
if rng is None:
rng = np.random
self.rng = rng
self.opt = None
def __eq__(self, other):
if len(self.layers) != len(other.layers):
return False
for sl, ol in zip(self.layers, other.layers):
if type(sl) != type(ol):
return False
if sl != ol:
return False
return True
def set_optimizer(self, opt):
""" Set the optimizer
:param opt:
The optimizer object to use in gradient descent
"""
self.opt = opt
def init_weights(self):
""" Initialize the layer's weights """
prev_size = self.input_size
for layer in self.layers:
layer.init_weights(prev_size, rng=self.rng)
prev_size = layer.size
def predict(self, x):
""" Predict the output from the input
:param x:
The input_size x batch_size array to predict on
:returns:
An output_size x batch_size prediction from the final layer
"""
if x.ndim == 1:
x = x[:, np.newaxis]
for i, layer in enumerate(self.layers):
x = layer.predict(x)
return x
def calc_error(self, y, yhat):
""" Calculate the MSE
:param y:
The output_size x batch_size True labels
:param yhat:
The output_size x batch_size predicted labels
:returns:
The mean squared error (1 x batch_size)
"""
return np.sum((yhat - y)**2, axis=0)[np.newaxis, :]
def gradient_descent(self, x, y):
""" Implement gradient descent
:param x:
The input_size x batch_size array to train on
:param y:
The output_size x batch_size True labels
:returns:
The mean squared error for this batch (1 x batch_size)
"""
if x.ndim == 1:
x = x[:, np.newaxis]
if y.ndim == 1:
y = y[:, np.newaxis]
assert x.shape[1] == y.shape[1]
yhat = self.predict(x)
assert yhat.shape == y.shape
# Backprop the error
deltas = [self.layers[-1].calc_error(y)]
for layer in reversed(self.layers[1:]):
deltas.append(layer.calc_delta(deltas[-1]))
assert len(deltas) == len(self.layers)
# Convert the deltas to gradients
weights = self.get_weight_list()
grads = self.get_grad_list(list(reversed(deltas)))
# Use the optimizer to update the weights
new_weights = self.opt.update(weights, grads)
# Update the weights
self.set_weight_list(new_weights)
# Return the MSE for each prediction
return self.calc_error(y, yhat)
def add_layer(self, layer):
""" Add a layer to the model
:param layer:
The layer object to add to the model
"""
if len(self.layers) == 0:
layer.prev_size = self.input_size
else:
layer.prev_size = self.layers[-1].size
self.layers.append(layer)
def save_model(self, modelfile):
""" Dump the model specification
:param modelfile:
The JSON file to write the specification to
"""
modelfile = pathlib.Path(modelfile)
# Convert the model to a JSON compatible dictionary
data = {'name': self.name,
'input_size': self.input_size,
'layers': [l.to_dict() for l in self.layers]}
with modelfile.open('wt') as fp:
json.dump(data, fp,
sort_keys=True,
indent=4,
separators=(',', ': '))
def save_weights(self, weightfile):
""" Dump the model weights
:param weightfile:
The numpy npz file to write to
"""
weightfile = pathlib.Path(weightfile)
layer_data = {}
for i, layer in enumerate(self.layers):
prefix = '{}-{:02d}-'.format(type(layer).__name__, i)
layer_data[prefix + 'weight'] = layer.weight
layer_data[prefix + 'bias'] = layer.bias
np.savez(str(weightfile), **layer_data)
def load_weights(self, weightfile):
""" Load weights from a file
:param weightfile:
The numpy npz file to read from
"""
layer_data = np.load(str(weightfile))
layer_keys = set(layer_data.keys())
for i, layer in enumerate(self.layers):
prefix = '{}-{:02d}-'.format(type(layer).__name__, i)
weight = layer_data[prefix + 'weight']
bias = layer_data[prefix + 'bias']
layer_keys.remove(prefix + 'weight')
layer_keys.remove(prefix + 'bias')
layer.set_weights(weight, bias)
if len(layer_keys) > 0:
raise ValueError('Got extra layer data: {}'.format(layer_keys))
def get_weight_list(self):
""" Get all the weights as a list
:returns:
A list of weight and bias arrays IN FORWARD PASS ORDER
"""
weights = []
for layer in self.layers:
weights.append(layer.weight)
weights.append(layer.bias)
return weights
def get_grad_list(self, deltas):
""" Get all the gradients as a list
:param deltas:
The deltas, IN FORWARD PASS ORDER
:returns:
The list of gradients
"""
assert len(deltas) == len(self.layers)
grads = []
for layer, delta in zip(self.layers, deltas):
d_weight, d_bias = layer.calc_grad(delta)
grads.append(d_weight)
grads.append(d_bias)
return grads
def set_weight_list(self, weights):
""" Set all the weights from a list
:param weights:
The list of weight and bias matricies as from get_weight_list
"""
assert len(weights) == len(self.layers) * 2
for i, w in enumerate(weights):
li = i // 2
layer = self.layers[li]
if i % 2 == 0:
layer.weight = w
else:
layer.bias = w
@classmethod
def load_model(cls, modelfile):
""" Load a model from the specficiation
:param modelfile:
The model spec, as written by save model
:returns:
A Model instance loaded from the file
"""
modelfile = pathlib.Path(modelfile)
with modelfile.open('rt') as fp:
data = json.load(fp)
# Build up the model layer by layer, just like normal
model = cls(name=data.get('name'),
input_size=data.get('input_size'))
for layer in data.get('layers', []):
model.add_layer(layers.from_dict(layer))
return model
def load_model(modelfile):
""" Load a model from a specification file
Top level helper constructor
:param modelfile:
The model spec, as written by save model
:returns:
A Model instance loaded from the file
"""
return Model.load_model(modelfile)
|
{
"content_hash": "0e3efdd621312697cf0a0002a87cf760",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 75,
"avg_line_length": 28.476702508960575,
"alnum_prop": 0.5443675267463813,
"repo_name": "david-joy/bmi203-final",
"id": "48fb98ddbf087d6f4ae231caf4e5d310c72fce40",
"size": "7945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "final_project/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "67921"
}
],
"symlink_target": ""
}
|
"""
Place files in Dusty containers
Assets are files to be put in containers, but which don't live in a repository.
Assets are declared in Dusty specs of apps and libraries, and their values are
managed with the CLI.
Usage:
assets list [<app_or_lib>]
assets read <asset_key>
assets set <asset_key> <local_path>
assets unset <asset_key>
Commands:
list List all assets that are defined in specs for active apps and libs
read Print the current value of an asset
set Associate an asset with the contents of a local file
unset Delete the currently registered value of an asset
Examples:
To set the value of the asset GITHUB_KEY to the contents of ~/.ssh/id_rsa:
dusty assets set GITHUB_KEY ~/.ssh/id_rsa
"""
import os
import sys
from docopt import docopt
from ..payload import Payload
from ..commands import assets
from ..log import log_to_client
def main(argv):
args = docopt(__doc__, argv)
if args['list']:
if args['<app_or_lib>']:
assets.list_by_app_or_lib(args['<app_or_lib>'])
else:
assets.list_all()
elif args['read']:
assets.read_asset(args['<asset_key>'])
elif args['set']:
if not os.access(args['<local_path>'], os.R_OK):
log_to_client('Local path {} does not exist, or you don\'t have permission to access it'.format(args['<local_path>']))
sys.exit(1)
assets.set_asset(args['<asset_key>'], os.path.abspath(args['<local_path>']))
elif args['unset']:
assets.unset_asset(args['<asset_key>'])
|
{
"content_hash": "eac03476ce48a1aec9806c8f020ddb23",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 130,
"avg_line_length": 32.44897959183673,
"alnum_prop": 0.6415094339622641,
"repo_name": "gamechanger/dusty",
"id": "258f515cbebcacec4c8b8c01ecfb2f17088d131b",
"size": "1590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dusty/cli/assets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "845"
},
{
"name": "JavaScript",
"bytes": "1675"
},
{
"name": "Python",
"bytes": "493669"
},
{
"name": "Ruby",
"bytes": "769"
},
{
"name": "Shell",
"bytes": "3875"
}
],
"symlink_target": ""
}
|
import pyexcel
from rest_framework import viewsets, views, parsers, response, status
from bazango.contrib.organization.filters import OrganizationFilterSet
from .models import Organization, Category, OrganizationProfileProposedChange
from .serializers import (OrganizationSerializer, CategorySerializer, OrganizationProfileProposedChangeSerializer)
class OrganizationViewSet(viewsets.ModelViewSet):
queryset = Organization.objects.all()
serializer_class = OrganizationSerializer
search_fields = ('name', 'address', 'krs')
filter_class = OrganizationFilterSet
ordering_fields = ('name',)
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
pagination_class = None
class OrganizationProfileProposedChangeViewSet(viewsets.ModelViewSet):
queryset = OrganizationProfileProposedChange.objects.all()
serializer_class = OrganizationProfileProposedChangeSerializer
class FileUploadView(views.APIView):
parser_classes = (parsers.FileUploadParser,)
def put(self, request, filename, format=None):
file_obj = request.data['file']
file_obj.save_to_database(Organization)
return response.Response(status=201)
|
{
"content_hash": "d458c54d46ddaeef4bd4dc2defce2806",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 114,
"avg_line_length": 36.529411764705884,
"alnum_prop": 0.784219001610306,
"repo_name": "rafal-jaworski/bazaNGObackend",
"id": "dcaae7c5be4738c1a976a721b83175c6ad142510",
"size": "1242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bazango/contrib/organization/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "73891"
},
{
"name": "JavaScript",
"bytes": "114523"
},
{
"name": "Python",
"bytes": "36785"
},
{
"name": "Shell",
"bytes": "200"
}
],
"symlink_target": ""
}
|
"""
File: abstract_operator.py
Author: Come Bertrand
Email: bertrand.cosme@gmail.com
Github: https://github.com/ComeBertrand
Description: Abstract operator.
"""
from enum import Enum
class OperatorType(Enum):
NEIGHBORHOOD = 'Neighborhood'
class AbstractOperator(object):
"""Abstract class describing an operator.
An operator is responsible for generating new solution from one or several
base solutions.
Args:
move_range (MoveRange): Define the step range for the operator function.
Default is None in the case no MoveRange is required for the
operator.
Attributes:
op_type (OperatorType): type of the operator.
move_range (MoveRange): Define the step range for the operator function.
"""
op_type = None
def __init__(self, move_range=None):
self.move_range = move_range
def __call__(self, *solutions, step=None):
"""Generate new solutions from the given base solutions.
Args:
solutions (list[Solution]): Base solution that will be used to
generate the new solutions.
step (float): Normalized step given by the metaheuristic. Strictly
between 0.0 and 1.0. Default is None in case no step is required
by the operator.
Yield:
Solution: created from the given solutions.
Modifs: the modifications made on the solution to create the new
solution.
"""
raise NotImplementedError('Abstract Class')
def _convert_step(self, step):
"""Convert the normalized step in a value defined by the move range.
Args:
step (float): Normalized step given by the metaheuristic. Strictly
between 0.0 and 1.0.
Returns:
int or float: step as defined by the MoveRange or None if the
operator as no move range defined.
"""
if self.move_range:
return self.move_range.convert(step)
return None
|
{
"content_hash": "5020ee1f784d7e7d98237bd2c87f6218",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 80,
"avg_line_length": 29.695652173913043,
"alnum_prop": 0.6300634455832114,
"repo_name": "ComeBertrand/metabench",
"id": "0a16fdd61f1c1790499aca3338bbb901190849f3",
"size": "2049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metabench/operators/abstract_operator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "167538"
}
],
"symlink_target": ""
}
|
from PyQt4.QtGui import QColor, QSizePolicy, QPalette, QPen, QFont
from PyQt4.QtCore import Qt, QRectF
import pyqtgraph as pg
import numpy as np
from Orange.data import Table, Domain
from Orange.data.variable import ContinuousVariable, StringVariable
from Orange.regression.linear import (RidgeRegressionLearner, PolynomialLearner,
LinearRegressionLearner, LinearModel)
from Orange.regression import Learner
from Orange.preprocess.preprocess import Preprocess
from Orange.widgets import settings, gui
from Orange.widgets.utils import itemmodels
from Orange.widgets.utils.owlearnerwidget import OWBaseLearner
from Orange.widgets.utils.sql import check_sql_input
from Orange.canvas import report
class OWUnivariateRegression(OWBaseLearner):
name = "Univariate Polynomial Regression"
description = "Univariate regression with polynomial expansion."
icon = "icons/UnivariateRegression.svg"
inputs = [("Learner", Learner, "set_learner")]
outputs = [("Coefficients", Table)]
LEARNER = PolynomialLearner
learner_name = settings.Setting("Univariate Regression")
polynomialexpansion = settings.Setting(1)
x_var_index = settings.ContextSetting(0)
y_var_index = settings.ContextSetting(1)
want_main_area = True
def add_main_layout(self):
self.data = None
self.preprocessors = None
self.learner = None
self.scatterplot_item = None
self.plot_item = None
self.x_label = 'x'
self.y_label = 'y'
box = gui.vBox(self.controlArea, "Variables")
self.x_var_model = itemmodels.VariableListModel()
self.comboBoxAttributesX = gui.comboBox(
box, self, value='x_var_index', label="Input: ",
orientation=Qt.Horizontal, callback=self.apply, contentsLength=12)
self.comboBoxAttributesX.setSizePolicy(
QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)
self.comboBoxAttributesX.setModel(self.x_var_model)
gui.doubleSpin(
gui.indentedBox(box),
self, "polynomialexpansion", 0, 10,
label="Polynomial expansion:", callback=self.apply)
gui.separator(box, height=8)
self.y_var_model = itemmodels.VariableListModel()
self.comboBoxAttributesY = gui.comboBox(
box, self, value="y_var_index", label="Target: ",
orientation=Qt.Horizontal, callback=self.apply, contentsLength=12)
self.comboBoxAttributesY.setSizePolicy(
QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)
self.comboBoxAttributesY.setModel(self.y_var_model)
gui.rubber(self.controlArea)
# main area GUI
self.plotview = pg.PlotWidget(background="w")
self.plot = self.plotview.getPlotItem()
axis_color = self.palette().color(QPalette.Text)
axis_pen = QPen(axis_color)
tickfont = QFont(self.font())
tickfont.setPixelSize(max(int(tickfont.pixelSize() * 2 // 3), 11))
axis = self.plot.getAxis("bottom")
axis.setLabel(self.x_label)
axis.setPen(axis_pen)
axis.setTickFont(tickfont)
axis = self.plot.getAxis("left")
axis.setLabel(self.y_label)
axis.setPen(axis_pen)
axis.setTickFont(tickfont)
self.plot.setRange(xRange=(0.0, 1.0), yRange=(0.0, 1.0),
disableAutoRange=True)
self.mainArea.layout().addWidget(self.plotview)
def send_report(self):
if self.data is None:
return
caption = report.render_items_vert((
("Polynomial Expansion: ", self.polynomialexpansion),
))
self.report_plot(self.plot)
if caption:
self.report_caption(caption)
def clear(self):
self.data = None
self.clear_plot()
def clear_plot(self):
if self.plot_item is not None:
self.plot_item.setParentItem(None)
self.plotview.removeItem(self.plot_item)
self.plot_item = None
if self.scatterplot_item is not None:
self.scatterplot_item.setParentItem(None)
self.plotview.removeItem(self.scatterplot_item)
self.scatterplot_item = None
self.plotview.clear()
@check_sql_input
def set_data(self, data):
self.clear()
self.data = data
if data is not None:
cvars = [var for var in data.domain.variables if var.is_continuous]
class_cvars = [var for var in data.domain.class_vars if var.is_continuous]
self.x_var_model[:] = cvars
self.y_var_model[:] = cvars
nvars = len(cvars)
nclass = len(class_cvars)
self.x_var_index = min(max(0, self.x_var_index), nvars - 1)
if nclass > 0:
self.y_var_index = min(max(0, nvars-nclass), nvars - 1)
else:
self.y_var_index = min(max(0, nvars-1), nvars - 1)
def set_learner(self, learner):
self.learner = learner
def handleNewSignals(self):
self.apply()
def plot_scatter_points(self, x_data, y_data):
if self.scatterplot_item:
self.plotview.removeItem(self.scatterplot_item)
self.n_points = len(x_data)
self.scatterplot_item = pg.ScatterPlotItem(
x=x_data, y=y_data, data=np.arange(self.n_points),
symbol="o", size=10, pen=pg.mkPen(0.2), brush=pg.mkBrush(0.7),
antialias=True)
self.scatterplot_item.opts["useCache"] = False
self.plotview.addItem(self.scatterplot_item)
self.plotview.replot()
def set_range(self, x_data, y_data):
min_x, max_x = np.nanmin(x_data), np.nanmax(x_data)
min_y, max_y = np.nanmin(y_data), np.nanmax(y_data)
self.plotview.setRange(
QRectF(min_x, min_y, max_x - min_x, max_y - min_y),
padding=0.025)
self.plotview.replot()
def plot_regression_line(self, x_data, y_data):
if self.plot_item:
self.plotview.removeItem(self.plot_item)
self.plot_item = pg.PlotCurveItem(
x=x_data, y=y_data,
pen=pg.mkPen(QColor(255, 0, 0), width=3),
antialias=True
)
self.plotview.addItem(self.plot_item)
self.plotview.replot()
def apply(self):
learner = self.learner
predictor = None
if self.data is not None:
degree = int(self.polynomialexpansion)
learner = self.LEARNER(preprocessors=self.preprocessors,
degree=degree,
learner=LinearRegressionLearner() if self.learner is None
else learner)
attributes = self.x_var_model[self.x_var_index]
class_var = self.y_var_model[self.y_var_index]
data_table = Table(Domain([attributes], class_vars=[class_var]), self.data)
learner.name = self.learner_name
predictor = learner(data_table)
preprocessed_data = data_table
if self.preprocessors is not None:
for preprocessor in self.preprocessors:
preprocessed_data = preprocessor(preprocessed_data)
x = preprocessed_data.X.ravel()
y = preprocessed_data.Y.ravel()
linspace = np.linspace(min(x), max(x), 1000).reshape(-1,1)
values = predictor(linspace, predictor.Value)
self.plot_scatter_points(x, y)
self.plot_regression_line(linspace.ravel(), values.ravel())
x_label = self.x_var_model[self.x_var_index]
axis = self.plot.getAxis("bottom")
axis.setLabel(x_label)
y_label = self.y_var_model[self.y_var_index]
axis = self.plot.getAxis("left")
axis.setLabel(y_label)
self.set_range(x, y)
self.send("Learner", learner)
self.send("Predictor", predictor)
# Send model coefficents
model = None
if predictor is not None:
model = predictor.model
if hasattr(model, "model"):
model = model.model
elif hasattr(model, "skl_model"):
model = model.skl_model
if model is not None and hasattr(model, "coef_"):
domain = Domain([ContinuousVariable("coef", number_of_decimals=7)],
metas=[StringVariable("name")])
coefs = [model.intercept_ + model.coef_[0]] + list(model.coef_[1:])
names = ["1", x_label] + \
["{}^{}".format(x_label, i) for i in range(2, degree + 1)]
coef_table = Table(domain, list(zip(coefs, names)))
self.send("Coefficients", coef_table)
else:
self.send("Coefficients", None)
if __name__ == "__main__":
import sys
from PyQt4.QtGui import QApplication
a = QApplication(sys.argv)
ow = OWUnivariateRegression()
learner = RidgeRegressionLearner(alpha=1.0)
polylearner = PolynomialLearner(learner, degree=2)
d = Table('iris')
ow.set_data(d)
ow.set_learner(learner)
ow.show()
a.exec_()
ow.saveSettings()
|
{
"content_hash": "9c958bfb8e93ac0120f0b74cb71c2960",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 92,
"avg_line_length": 34.18518518518518,
"alnum_prop": 0.6010834236186349,
"repo_name": "qPCR4vir/orange3",
"id": "99d330217db7b76b4bd4b5b59288986e4360d719",
"size": "9230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Orange/widgets/regression/owunivariateregression.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "3503"
},
{
"name": "JavaScript",
"bytes": "12007"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "20281"
},
{
"name": "Python",
"bytes": "4205054"
},
{
"name": "Shell",
"bytes": "48335"
}
],
"symlink_target": ""
}
|
from starcheck.parser import *
|
{
"content_hash": "29ce0dc5310715a7b176f3e9cd8e515f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 30,
"avg_line_length": 31,
"alnum_prop": 0.8064516129032258,
"repo_name": "sot/mica",
"id": "c5b773b97442d64961773de3c2c8085a3c047fa8",
"size": "209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mica/starcheck/starcheck_parser.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "34453"
},
{
"name": "Jupyter Notebook",
"bytes": "13882"
},
{
"name": "Python",
"bytes": "489250"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_lair_base_den_earthen_light.iff"
result.attribute_template_id = -1
result.stfName("lair_n","generic_den")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "57e9e84075a254d478cf30d3b7d2a705",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 24.153846153846153,
"alnum_prop": 0.6942675159235668,
"repo_name": "anhstudios/swganh",
"id": "2a94f3c1850aad1385516d74925e22c2bcc12a11",
"size": "459",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/lair/base/shared_lair_base_den_earthen_light.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import re
from datetime import datetime, timedelta
from icalendar import Event, Alarm
class Course:
def __init__(self, name, occur_time_str, teacher, course_id, credit, location, office_time_str, office):
self.__occur_time_str = occur_time_str
self.name = name
self.location = location
self.description = "Teacher: " + teacher + \
"\nCourse ID: " + course_id + \
"\nCredit: " + credit + \
"\nOffice Time: " + office_time_str + \
"\nOffice: " + office
print("Course %s initialized: occur time: %s, location: %s, description: %s"
% (name, occur_time_str, location, self.description))
def __get_occur_weeks(self):
weeks_match = re.search(pattern=r'\((.+?)周(.*?)\)', string=self.__occur_time_str)
if weeks_match:
weeks_str = weeks_match.group(1)
weeks = re.findall(pattern=r'[0-9]+', string=weeks_str)
if '-' in weeks_str:
start_week = int(weeks[0])
end_week = int(weeks[1])
return list(range(start_week, end_week + 1))
elif ',' in weeks_str or '第' in weeks_str:
return [int(i) for i in weeks]
else:
return list(range(1, 11))
def __get_occur_time_list(self, weekday_table, course_time_table):
occur_time_list = []
for split_time_string in self.__occur_time_str.split():
weekday_chinese_str = split_time_string[0]
try:
weekday = weekday_table[weekday_chinese_str]
except KeyError:
continue
occur_indexes = re.findall(pattern=r'[0-9]+', string=split_time_string)
start_index = int(occur_indexes[0]) - 1
end_index = int(occur_indexes[1]) - 1
occur_time_list_in_a_day = [datetime.combine(date=weekday, time=course_time_table[i])
for i in range(start_index, end_index + 1)]
occur_time_list.append(occur_time_list_in_a_day)
return occur_time_list
def get_events(self, weekday_table, course_time_table):
# Create a alarm which will notify user 20 minutes before the occur time.
alarm = Alarm()
alarm.add(name='action', value='DISPLAY')
alarm.add(name='trigger', value=timedelta(minutes=-25))
alarm.add(name='description', value='Event reminder')
occur_weeks = self.__get_occur_weeks()
occur_time_list = self.__get_occur_time_list(weekday_table=weekday_table, course_time_table=course_time_table)
print("Occur weeks %s generated for %s" % (str(occur_weeks), self.__occur_time_str))
print("Occur time list %s generated for %s" %
(str([str([occur_time.strftime('%Y-%m-%d %H:%M') for occur_time in i]) for i in occur_time_list]),
self.__occur_time_str))
events = []
for occur_time_list_in_a_day in occur_time_list:
for occur_time in occur_time_list_in_a_day:
event = Event()
event.add(name='summary', value=self.name)
event.add(name='dtstart', value=occur_time + timedelta(weeks=occur_weeks[0] - 1))
event.add(name='duration', value=timedelta(minutes=45))
event.add(name='dtend', value=occur_time + timedelta(weeks=occur_weeks[0] - 1, minutes=45))
event.add(name='location', value=self.location)
event.add(name='description', value=self.description)
if len(occur_weeks) > 1:
interval = occur_weeks[1] - occur_weeks[0]
repeat_rule = {"freq": "weekly", "count": len(occur_weeks), "interval": interval}
event.add(name='rrule', value=repeat_rule)
if occur_time == occur_time_list_in_a_day[0]:
event.add_component(alarm)
events.append(event)
print("%d events generated for Course %s\n" % (len(events), self.name))
return events
|
{
"content_hash": "ede3240c35c4dd234417e02571411681",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 118,
"avg_line_length": 41.77777777777778,
"alnum_prop": 0.5529497098646035,
"repo_name": "JeromeTan1997/SHUScheduleGenerator",
"id": "69cbe9060d9cead2c36239385d4448cf4590d28f",
"size": "4163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "course.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11671"
}
],
"symlink_target": ""
}
|
import pprint
import tempfile
from driver_pete_python_sandbox.download import S3
from driver_pete_python_sandbox.filter_gps import compute_velocities, \
remove_duplicate_points, \
extract_delta_time, extract_delta_dist, remove_stationary_points
from driver_pete_python_sandbox.filter_gps_processor import apply_filter, \
DuplicateTimeFilter, VelocityOutliersFilter, filter_gps_data
from driver_pete_python_sandbox.gmaps import trajectory_point_to_str
from driver_pete_python_sandbox.trajectory_reader import read_compressed_trajectory
import numpy as np
from driver_pete_python_sandbox.utilities import distance, ms_to_mph
def _get_test_data():
folder = tempfile.mkdtemp()
s3 = S3('driverpete-storage')
filename = s3.download("_testing/testing_raw_0", folder)
data = read_compressed_trajectory(filename)
# add few time duplicates
data[54] = data[53]
data[100] = data[99]
# add few distance duplicates
data[23][1:] = data[22][1:]
data[40][1:] = data[39][1:]
data[60][1:] = data[59][1:]
return data
def test_remove_duplicate_readings():
data = _get_test_data()
velocities = compute_velocities(data)
number_of_duplicates = np.sum(np.isnan(velocities))
fixed_data = remove_duplicate_points(data)
fixed_velocities = compute_velocities(fixed_data)
assert(np.sum(np.isnan(fixed_velocities)) == 0)
# check that we deleted duplicates only
print(data.shape[0])
print(fixed_data.shape[0])
print(number_of_duplicates)
assert(fixed_data.shape[0] == data.shape[0] - number_of_duplicates)
print(fixed_data.shape[0], data.shape[0])
fixed_data_processor = apply_filter(data, DuplicateTimeFilter())
assert((fixed_data == fixed_data_processor).all())
def test_remove_outliers():
np.set_printoptions(suppress=True)
data = remove_stationary_points(remove_duplicate_points(_get_test_data()))
velocity_threshold = 85.
fixed_data = apply_filter(data, VelocityOutliersFilter(velocity_threshold))
# check that data has outliers in velocity and distance
velocities = compute_velocities(data)
outliers = np.where(velocities*ms_to_mph > 85)[0]
assert(len(outliers)>0)
assert(np.amax(extract_delta_dist(data)) > 157900)
# no large velocities left
velocities = compute_velocities(fixed_data)
assert(np.amax(velocities)*ms_to_mph < velocity_threshold)
assert(np.amax(extract_delta_dist(fixed_data)) < 330)
# we expect this number of point to be removed
print(data.shape[0] - fixed_data.shape[0])
assert(data.shape[0] - fixed_data.shape[0] == 5)
def test_remove_stationary_noise():
'''
The data has large amount of noise - switching between SD and LA every 10 seconds.
It starts from SD, then noise, later it returns to SD. We test that LA is ignored
'''
data = remove_duplicate_points(_get_test_data())[561:576]
fixed_data = apply_filter(data, VelocityOutliersFilter())
print(len(fixed_data))
assert(len(fixed_data) == 11)
stationary_point = [0, 33.004964, -117.060207]
distances = np.array([distance(stationary_point, d)
for d in fixed_data])
assert((distances < 246.6).all())
def test_remove_stationary_noise_return_to_stable():
'''
The data has large amount of noise - switching between SD and LA every 10 seconds.
It starts from the noisy point, later it returns to SD.
Here we test that even if data starts with noisy value, we still converge
to stable point
'''
data = remove_duplicate_points(_get_test_data())[563:576]
fixed_data = apply_filter(data, VelocityOutliersFilter(85))
stationary_point = [0, 33.004964, -117.060207]
distances = np.array([distance(stationary_point, d)
for d in fixed_data])
print(fixed_data)
assert(len(fixed_data) == 7)
# filter converged after few steps
assert((distances[:4] > 157000).all())
assert((distances[4:] < 246.6).all())
def test_filter_gps():
original_data = _get_test_data()
assert(len(original_data) == 793)
data = filter_gps_data(original_data)
print(len(data))
assert(len(data) == 780)
print(len(original_data)-len(data))
assert(len(original_data)-len(data) == 13)
def test_velocity_filter_decay():
'''
Sometimes points between outliers are removed by other filters, so VelocityOutliersFilter
starts to compare outliers with earlier and earlier points that makes velocity to go
down so they are not considered as outliers anymore
'''
timestamps = np.array([
735856.225625, 735856.22609954, 735856.2265625,
735856.22701389, 735856.25675926, 735856.2572338,
735856.25731481, 735856.41100694])
coords = np.array([
[3.30049220e+01, -1.17060744e+02],
[3.29771600e+01, -1.17078251e+02],
[3.29771580e+01, -1.17078254e+02],
[3.30049220e+01, -1.17060744e+02],
[3.30049220e+01, -1.17060744e+02],
[3.29771590e+01, -1.17078258e+02],
[3.30049230e+01, -1.17060575e+02],
[3.30049410e+01, -1.17060618e+02]])
data = np.hstack([timestamps[:, None], coords])
filtered_coords = filter_gps_data(data)[:, 1:]
print(filtered_coords)
np.testing.assert_array_almost_equal(
filtered_coords,
[[3.30049220e+01, -1.17060744e+02],
[3.30049220e+01, -1.17060744e+02],
[3.30049220e+01, -1.17060744e+02],
[3.30049230e+01, -1.17060575e+02],
[3.30049410e+01, -1.17060618e+02]]
)
if __name__ == '__main__':
test_remove_duplicate_readings()
test_remove_outliers()
test_remove_stationary_noise()
test_remove_stationary_noise_return_to_stable()
test_filter_gps()
test_velocity_filter_decay()
|
{
"content_hash": "4b51420e46c34121a93d84ae657a5caf",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 93,
"avg_line_length": 35.646341463414636,
"alnum_prop": 0.664385904892234,
"repo_name": "driver-pete/driver-pete-python-sandbox",
"id": "d9c9fee16fb117e7d242846430ebba100f9d79ad",
"size": "6291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "driver_pete_python_sandbox/test_filter_gps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "52524"
},
{
"name": "Shell",
"bytes": "1623"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import textwrap
import pytest
from pants.backend.python.tasks.checkstyle.checker import PythonCheckStyleTask
from pants.backend.python.tasks.checkstyle.file_excluder import FileExcluder
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
logger = logging.getLogger(__name__)
class TestExcluder(PythonTaskTestBase):
def task_type(cls):
"""Required method"""
return PythonCheckStyleTask
def setUp(self, *args, **kwargs):
super(TestExcluder, self).setUp(*args, **kwargs)
excludes_text = textwrap.dedent('''
# ignore C++
.*\.cpp::.*
# ignore python
.*\.py::Flake8''')
self.excluder = FileExcluder(
self._create_scalastyle_excludes_file([excludes_text]),
logger)
def _create_scalastyle_excludes_file(self, exclude_patterns=None):
return self.create_file(
relpath='scalastyle_excludes.txt',
contents='\n'.join(exclude_patterns) if exclude_patterns else '')
def test_excludes_cpp_any(self):
assert not self.excluder.should_include('test/file.cpp', '.*')
def test_excludes_cpp_flake8(self):
assert not self.excluder.should_include('test/file.cpp', 'Flake8')
def test_excludes_python_flake8(self):
assert not self.excluder.should_include('test/file.py', 'Flake8')
def test_excludes_python_trailingws(self):
assert self.excluder.should_include('test/file.py', 'TrailingWhiteSpace')
|
{
"content_hash": "20b0530af855d65d1e0032f2a7aed13c",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 93,
"avg_line_length": 33.0625,
"alnum_prop": 0.7107750472589792,
"repo_name": "areitz/pants",
"id": "706f962ec7f6d5ba18f9c44d7849a0f4e462c8ca",
"size": "1734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/python/tasks/checkstyle/test_file_excluder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11139"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "68162"
},
{
"name": "Java",
"bytes": "291340"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "3548183"
},
{
"name": "Scala",
"bytes": "76015"
},
{
"name": "Shell",
"bytes": "48115"
},
{
"name": "Thrift",
"bytes": "2583"
}
],
"symlink_target": ""
}
|
__author__ = "Kiran Vemuri"
__email__ = "kkvemuri@uh.edu"
__status__ = "Development"
__maintainer__ = "Kiran Vemuri"
import logging
class Log:
"""
Class to facilitate logging
"""
def __init__(self, logfile):
"""
:param logfile(str): file path to the log file to which logs are to be written to
"""
self.logfile = logfile
def log_handler(self):
"""
Method to create a log handler for the specified log file
:return log handler
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(self.logfile)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
|
{
"content_hash": "1a06782660da08760b9d77b0cc9fc029",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 93,
"avg_line_length": 25.34285714285714,
"alnum_prop": 0.5952649379932357,
"repo_name": "DreamForgeContrive/tinyblox",
"id": "5c132cc32742b849736dc7b3ded084b7160dd1c9",
"size": "887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tinyblox/logx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "98041"
}
],
"symlink_target": ""
}
|
"""Trace a subunit stream in reasonable detail and high accuracy."""
import argparse
import datetime
import functools
import os
import re
import sys
import subunit
import testtools
# NOTE(mtreinish) on python3 anydbm was renamed dbm and the python2 dbm module
# was renamed to dbm.ndbm, this block takes that into account
try:
import anydbm as dbm
except ImportError:
import dbm
DAY_SECONDS = 60 * 60 * 24
FAILS = []
RESULTS = {}
def total_seconds(timedelta):
# NOTE(mtreinish): This method is built-in to the timedelta class in
# python >= 2.7 it is here to enable it's use on older versions
return ((timedelta.days * DAY_SECONDS + timedelta.seconds) * 10 ** 6 +
timedelta.microseconds) / 10 ** 6
def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
"""Clean up the test name for display.
By default we strip out the tags in the test because they don't help us
in identifying the test that is run to it's result.
Make it possible to strip out the testscenarios information (not to
be confused with tempest scenarios) however that's often needed to
indentify generated negative tests.
"""
if strip_tags:
tags_start = name.find('[')
tags_end = name.find(']')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
if strip_scenarios:
tags_start = name.find('(')
tags_end = name.find(')')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
return name
def get_duration(timestamps):
start, end = timestamps
if not start or not end:
duration = ''
else:
delta = end - start
duration = '%d.%06ds' % (
delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
return duration
def find_worker(test):
"""Get the worker number.
If there are no workers because we aren't in a concurrent environment,
assume the worker number is 0.
"""
for tag in test['tags']:
if tag.startswith('worker-'):
return int(tag[7:])
return 0
# Print out stdout/stderr if it exists, always
def print_attachments(stream, test, all_channels=False):
"""Print out subunit attachments.
Print out subunit attachments that contain content. This
runs in 2 modes, one for successes where we print out just stdout
and stderr, and an override that dumps all the attachments.
"""
channels = ('stdout', 'stderr')
for name, detail in test['details'].items():
# NOTE(sdague): the subunit names are a little crazy, and actually
# are in the form pythonlogging:'' (with the colon and quotes)
name = name.split(':')[0]
if detail.content_type.type == 'test':
detail.content_type.type = 'text'
if (all_channels or name in channels) and detail.as_text():
title = "Captured %s:" % name
stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
# indent attachment lines 4 spaces to make them visually
# offset
for line in detail.as_text().split('\n'):
line = line.encode('utf8')
stream.write(" %s\n" % line)
def find_test_run_time_diff(test_id, run_time):
times_db_path = os.path.join(os.path.join(os.getcwd(), '.testrepository'),
'times.dbm')
if os.path.isfile(times_db_path):
try:
test_times = dbm.open(times_db_path)
except Exception:
return False
try:
avg_runtime = float(test_times.get(str(test_id), False))
except Exception:
try:
avg_runtime = float(test_times[str(test_id)])
except Exception:
avg_runtime = False
if avg_runtime and avg_runtime > 0:
run_time = float(run_time.rstrip('s'))
perc_diff = ((run_time - avg_runtime) / avg_runtime) * 100
return perc_diff
return False
def show_outcome(stream, test, print_failures=False, failonly=False,
enable_diff=False, threshold='0', abbreviate=False):
global RESULTS
status = test['status']
# TODO(sdague): ask lifeless why on this?
if status == 'exists':
return
worker = find_worker(test)
name = cleanup_test_name(test['id'])
duration = get_duration(test['timestamps'])
if worker not in RESULTS:
RESULTS[worker] = []
RESULTS[worker].append(test)
# don't count the end of the return code as a fail
if name == 'process-returncode':
return
if status == 'fail':
FAILS.append(test)
if abbreviate:
stream.write('F')
else:
stream.write('{%s} %s [%s] ... FAILED\n' % (
worker, name, duration))
if not print_failures:
print_attachments(stream, test, all_channels=True)
elif not failonly:
if status == 'success':
if abbreviate:
stream.write('.')
else:
out_string = '{%s} %s [%s' % (worker, name, duration)
perc_diff = find_test_run_time_diff(test['id'], duration)
if enable_diff:
if perc_diff and abs(perc_diff) >= abs(float(threshold)):
if perc_diff > 0:
out_string = out_string + ' +%.2f%%' % perc_diff
else:
out_string = out_string + ' %.2f%%' % perc_diff
stream.write(out_string + '] ... ok\n')
print_attachments(stream, test)
elif status == 'skip':
if abbreviate:
stream.write('S')
else:
reason = test['details'].get('reason', '')
if reason:
reason = ': ' + reason.as_text()
stream.write('{%s} %s ... SKIPPED%s\n' % (
worker, name, reason))
else:
if abbreviate:
stream.write('%s' % test['status'][0])
else:
stream.write('{%s} %s [%s] ... %s\n' % (
worker, name, duration, test['status']))
if not print_failures:
print_attachments(stream, test, all_channels=True)
stream.flush()
def print_fails(stream):
"""Print summary failure report.
Currently unused, however there remains debate on inline vs. at end
reporting, so leave the utility function for later use.
"""
if not FAILS:
return
stream.write("\n==============================\n")
stream.write("Failed %s tests - output below:" % len(FAILS))
stream.write("\n==============================\n")
for f in FAILS:
stream.write("\n%s\n" % f['id'])
stream.write("%s\n" % ('-' * len(f['id'])))
print_attachments(stream, f, all_channels=True)
stream.write('\n')
def count_tests(key, value):
count = 0
for k, v in RESULTS.items():
for item in v:
if key in item:
if re.search(value, item[key]):
count += 1
return count
def run_time():
runtime = 0.0
for k, v in RESULTS.items():
for test in v:
test_dur = get_duration(test['timestamps']).strip('s')
# NOTE(toabctl): get_duration() can return an empty string
# which leads to a ValueError when casting to float
if test_dur:
runtime += float(test_dur)
return runtime
def worker_stats(worker):
tests = RESULTS[worker]
num_tests = len(tests)
stop_time = tests[-1]['timestamps'][1]
start_time = tests[0]['timestamps'][0]
if not start_time or not stop_time:
delta = 'N/A'
else:
delta = stop_time - start_time
return num_tests, str(delta)
def print_summary(stream, elapsed_time):
stream.write("\n======\nTotals\n======\n")
stream.write("Ran: %s tests in %.4f sec.\n" % (
count_tests('status', '.*'), total_seconds(elapsed_time)))
stream.write(" - Passed: %s\n" % count_tests('status', '^success$'))
stream.write(" - Skipped: %s\n" % count_tests('status', '^skip$'))
stream.write(" - Expected Fail: %s\n" % count_tests('status', '^xfail$'))
stream.write(" - Unexpected Success: %s\n" % count_tests('status',
'^uxsuccess$'))
stream.write(" - Failed: %s\n" % count_tests('status', '^fail$'))
stream.write("Sum of execute time for each test: %.4f sec.\n" % run_time())
# we could have no results, especially as we filter out the process-codes
if RESULTS:
stream.write("\n==============\nWorker Balance\n==============\n")
for w in range(max(RESULTS.keys()) + 1):
if w not in RESULTS:
stream.write(
" - WARNING: missing Worker %s! "
"Race in testr accounting.\n" % w)
else:
num, time = worker_stats(w)
out_str = " - Worker %s (%s tests) => %s" % (w, num, time)
if time.isdigit():
out_str += 's'
out_str += '\n'
stream.write(out_str)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--no-failure-debug', '-n', action='store_true',
dest='print_failures', help='Disable printing failure '
'debug information in realtime')
parser.add_argument('--fails', '-f', action='store_true',
dest='post_fails', help='Print failure debug '
'information after the stream is proccesed')
parser.add_argument('--failonly', action='store_true',
dest='failonly', help="Don't print success items",
default=(
os.environ.get('TRACE_FAILONLY', False)
is not False))
parser.add_argument('--abbreviate', '-a', action='store_true',
dest='abbreviate', help='Print one character status'
'for each test')
parser.add_argument('--perc-diff', '-d', action='store_true',
dest='enable_diff',
help="Print percent change in run time on each test ")
parser.add_argument('--diff-threshold', '-t', dest='threshold',
help="Threshold to use for displaying percent change "
"from the avg run time. If one is not specified "
"the percent change will always be displayed")
parser.add_argument('--no-summary', action='store_true',
help="Don't print the summary of the test run after "
" completes")
return parser.parse_args()
def main():
args = parse_args()
stream = subunit.ByteStreamToStreamResult(
sys.stdin, non_subunit_name='stdout')
outcomes = testtools.StreamToDict(
functools.partial(show_outcome, sys.stdout,
print_failures=args.print_failures,
failonly=args.failonly,
enable_diff=args.enable_diff,
abbreviate=args.abbreviate))
summary = testtools.StreamSummary()
result = testtools.CopyStreamResult([outcomes, summary])
result = testtools.StreamResultRouter(result)
cat = subunit.test_results.CatFiles(sys.stdout)
result.add_rule(cat, 'test_id', test_id=None)
start_time = datetime.datetime.utcnow()
result.startTestRun()
try:
stream.run(result)
finally:
result.stopTestRun()
stop_time = datetime.datetime.utcnow()
elapsed_time = stop_time - start_time
if count_tests('status', '.*') == 0:
print("The test run didn't actually run any tests")
exit(1)
if args.post_fails:
print_fails(sys.stdout)
if not args.no_summary:
print_summary(sys.stdout, elapsed_time)
exit(0 if summary.wasSuccessful() else 1)
if __name__ == '__main__':
main()
|
{
"content_hash": "931fde72bf0a98efdd5e46042f2e3648",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 79,
"avg_line_length": 36.08454810495627,
"alnum_prop": 0.5481942312353559,
"repo_name": "dims/os-testr",
"id": "315d850142e0863acc61ab36c12a29d2129bdb4a",
"size": "13066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "os_testr/subunit_trace.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "66792"
}
],
"symlink_target": ""
}
|
import ast
import os
from collections import namedtuple
from eg import substitute
# Support Python 2 and 3.
try:
import ConfigParser
except:
from configparser import ConfigParser
# The directory containing example files, relative to the eg executable. The
# directory structure is assumed to be:
# eg.py*
# eg_util.py
# examples/
# |- cp.md, etc
DEFAULT_EXAMPLES_DIR = os.path.join(os.path.dirname(__file__), 'examples')
DEFAULT_EGRC_PATH = os.path.join('~', '.egrc')
DEFAULT_USE_COLOR = True
# We're using less -R to support color on Unix machines, which by default don't
# let their output from less be colorized. Other options:
# -M: show line number information in the bottom of screen (current/pages X%)
# -F: automatically quit less if the entire example fits on the first screen
# -X: do not use init/deinit strings; in other words: do not clear the screen
# -K: exit less in response to Ctrl-C
DEFAULT_PAGER_CMD = 'less -RMFXK'
DEFAULT_SQUEEZE = False
# We need this just because the ConfigParser library requires it.
DEFAULT_SECTION = 'eg-config'
# Properties in the rc file.
EG_EXAMPLES_DIR = 'examples-dir'
CUSTOM_EXAMPLES_DIR = 'custom-dir'
USE_COLOR = 'color'
PAGER_CMD = 'pager-cmd'
SQUEEZE = 'squeeze'
# A basic struct containing configuration values.
# examples_dir: path to the directory of examples that ship with eg
# custom_dir: path to the directory where custom examples are found
# use_color: True if we should colorize output, else False
# color_config: the config object specifying which colors to use
# pager_cmd: the command to use to page output
# squeeze: True if we should remove blank lines, else false
# subs: a list of Substitution objects to apply to the output
Config = namedtuple(
'Config',
[
'examples_dir',
'custom_dir',
'use_color',
'color_config',
'pager_cmd',
'squeeze',
'subs',
]
)
# A struct with color values
ColorConfig = namedtuple(
'ColorConfig',
[
'pound',
'heading',
'code',
'backticks',
'prompt',
'pound_reset',
'heading_reset',
'code_reset',
'backticks_reset',
'prompt_reset'
]
)
# Default colors. These are intentionally simple to try and accomodate more
# terminals. This is mostly envisioned as a unix tool, so we're not going to
# worry about windows output.
_BRIGHT = '\x1b[1m'
_BLACK = '\x1b[30m'
_RED = '\x1b[31m'
_CYAN = '\x1b[36m'
_GREEN = '\x1b[32m'
_BLUE = '\x1b[34m'
_RESET_ALL = '\x1b[0m'
DEFAULT_COLOR_POUND = _BLACK + _BRIGHT
DEFAULT_COLOR_HEADING = _RED + _BRIGHT
DEFAULT_COLOR_PROMPT = _CYAN + _BRIGHT
DEFAULT_COLOR_CODE = _GREEN + _BRIGHT
DEFAULT_COLOR_BACKTICKS = _BLUE + _BRIGHT
DEFAULT_COLOR_POUND_RESET = _RESET_ALL
DEFAULT_COLOR_HEADING_RESET = _RESET_ALL
DEFAULT_COLOR_PROMPT_RESET = _RESET_ALL
DEFAULT_COLOR_CODE_RESET = _RESET_ALL
DEFAULT_COLOR_BACKTICKS_RESET = _RESET_ALL
CONFIG_NAMES = ColorConfig(
pound='pound',
heading='heading',
code='code',
backticks='backticks',
prompt='prompt',
pound_reset='pound_reset',
heading_reset='heading_reset',
code_reset='code_reset',
backticks_reset='backticks_reset',
prompt_reset='prompt_reset'
)
# The name of the section in the config file containing colors.
COLOR_SECTION = 'color'
# The name of the section in the config file containing substitutions.
SUBSTITUTION_SECTION = 'substitutions'
def get_resolved_config_items(
egrc_path,
examples_dir,
custom_dir,
use_color,
pager_cmd,
squeeze,
debug=True
):
"""
Create a Config namedtuple. Passed in values will override defaults.
"""
# Expand the paths so we can use them with impunity later.
egrc_path = get_expanded_path(egrc_path)
examples_dir = get_expanded_path(examples_dir)
custom_dir = get_expanded_path(custom_dir)
# Print helpful failures.
if egrc_path and debug:
_inform_if_path_does_not_exist(egrc_path)
if examples_dir and debug:
_inform_if_path_does_not_exist(examples_dir)
if custom_dir and debug:
_inform_if_path_does_not_exist(custom_dir)
# The general rule is: caller-defined, egrc-defined, defaults. We'll try and
# get all three then use get_priority to choose the right one.
resolved_egrc_path = get_priority(egrc_path, DEFAULT_EGRC_PATH, None)
resolved_egrc_path = get_expanded_path(resolved_egrc_path)
# Start as if nothing was defined in the egrc.
empty_color_config = get_empty_color_config()
egrc_config = Config(
examples_dir=None,
custom_dir=None,
color_config=empty_color_config,
use_color=None,
pager_cmd=None,
squeeze=None,
subs=None
)
if os.path.isfile(resolved_egrc_path):
egrc_config = get_config_tuple_from_egrc(resolved_egrc_path)
resolved_examples_dir = get_priority(
examples_dir,
egrc_config.examples_dir,
DEFAULT_EXAMPLES_DIR
)
resolved_custom_dir = get_priority(
custom_dir,
egrc_config.custom_dir,
None
)
resolved_use_color = get_priority(
use_color,
egrc_config.use_color,
DEFAULT_USE_COLOR
)
resolved_pager_cmd = get_priority(
pager_cmd,
egrc_config.pager_cmd,
DEFAULT_PAGER_CMD
)
color_config = None
if resolved_use_color:
default_color_config = get_default_color_config()
color_config = merge_color_configs(
egrc_config.color_config,
default_color_config
)
resolved_squeeze = get_priority(
squeeze,
egrc_config.squeeze,
DEFAULT_SQUEEZE
)
# Pass in None, as subs can't be specified at the command line.
resolved_subs = get_priority(
None,
egrc_config.subs,
get_default_subs()
)
result = Config(
examples_dir=resolved_examples_dir,
custom_dir=resolved_custom_dir,
color_config=color_config,
use_color=resolved_use_color,
pager_cmd=resolved_pager_cmd,
squeeze=resolved_squeeze,
subs=resolved_subs
)
return result
def get_config_tuple_from_egrc(egrc_path):
"""
Create a Config named tuple from the values specified in the .egrc. Expands
any paths as necessary.
egrc_path must exist and point a file.
If not present in the .egrc, properties of the Config are returned as None.
"""
with open(egrc_path, 'r') as egrc:
try:
config = ConfigParser.RawConfigParser()
except AttributeError:
config = ConfigParser()
config.readfp(egrc)
# default to None
examples_dir = None
custom_dir = None
use_color = None
pager_cmd = None
squeeze = None
subs = None
if config.has_option(DEFAULT_SECTION, EG_EXAMPLES_DIR):
examples_dir = config.get(DEFAULT_SECTION, EG_EXAMPLES_DIR)
examples_dir = get_expanded_path(examples_dir)
if config.has_option(DEFAULT_SECTION, CUSTOM_EXAMPLES_DIR):
custom_dir = config.get(DEFAULT_SECTION, CUSTOM_EXAMPLES_DIR)
custom_dir = get_expanded_path(custom_dir)
if config.has_option(DEFAULT_SECTION, USE_COLOR):
use_color_raw = config.get(DEFAULT_SECTION, USE_COLOR)
use_color = _parse_bool_from_raw_egrc_value(use_color_raw)
if config.has_option(DEFAULT_SECTION, PAGER_CMD):
pager_cmd_raw = config.get(DEFAULT_SECTION, PAGER_CMD)
pager_cmd = ast.literal_eval(pager_cmd_raw)
color_config = get_custom_color_config_from_egrc(config)
if config.has_option(DEFAULT_SECTION, SQUEEZE):
squeeze_raw = config.get(DEFAULT_SECTION, SQUEEZE)
squeeze = _parse_bool_from_raw_egrc_value(squeeze_raw)
if config.has_section(SUBSTITUTION_SECTION):
subs = get_substitutions_from_config(config)
return Config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
squeeze=squeeze,
subs=subs
)
def get_expanded_path(path):
"""Expand ~ and variables in a path. If path is not truthy, return None."""
if path:
result = path
result = os.path.expanduser(result)
result = os.path.expandvars(result)
return result
else:
return None
def get_priority(first, second, third):
"""
Return the items based on priority and their truthiness. If first is
present, it will be returned. If only second and third, second will be
returned. If all three are absent, will return None.
"""
if first is not None:
return first
elif second is not None:
return second
else:
return third
def _inform_if_path_does_not_exist(path):
"""
If the path does not exist, print a message saying so. This is intended to
be helpful to users if they specify a custom path that eg cannot find.
"""
expanded_path = get_expanded_path(path)
if not os.path.exists(expanded_path):
print('Could not find custom path at: {}'.format(expanded_path))
def get_custom_color_config_from_egrc(config):
"""
Get the ColorConfig from the egrc config object. Any colors not defined
will be None.
"""
pound = _get_color_from_config(config, CONFIG_NAMES.pound)
heading = _get_color_from_config(config, CONFIG_NAMES.heading)
code = _get_color_from_config(config, CONFIG_NAMES.code)
backticks = _get_color_from_config(config, CONFIG_NAMES.backticks)
prompt = _get_color_from_config(config, CONFIG_NAMES.prompt)
pound_reset = _get_color_from_config(config, CONFIG_NAMES.pound_reset)
heading_reset = _get_color_from_config(
config,
CONFIG_NAMES.heading_reset
)
code_reset = _get_color_from_config(config, CONFIG_NAMES.code_reset)
backticks_reset = _get_color_from_config(
config,
CONFIG_NAMES.backticks_reset
)
prompt_reset = _get_color_from_config(config, CONFIG_NAMES.prompt_reset)
result = ColorConfig(
pound=pound,
heading=heading,
code=code,
backticks=backticks,
prompt=prompt,
pound_reset=pound_reset,
heading_reset=heading_reset,
code_reset=code_reset,
backticks_reset=backticks_reset,
prompt_reset=prompt_reset
)
return result
def _get_color_from_config(config, option):
"""
Helper method to uet an option from the COLOR_SECTION of the config.
Returns None if the value is not present. If the value is present, it tries
to parse the value as a raw string literal, allowing escape sequences in the
egrc.
"""
if not config.has_option(COLOR_SECTION, option):
return None
else:
return ast.literal_eval(config.get(COLOR_SECTION, option))
def parse_substitution_from_list(list_rep):
"""
Parse a substitution from the list representation in the config file.
"""
# We are expecting [pattern, replacement [, is_multiline]]
if type(list_rep) is not list:
raise SyntaxError('Substitution must be a list')
if len(list_rep) < 2:
raise SyntaxError('Substitution must be a list of size 2')
pattern = list_rep[0]
replacement = list_rep[1]
# By default, substitutions are not multiline.
is_multiline = False
if (len(list_rep) > 2):
is_multiline = list_rep[2]
if type(is_multiline) is not bool:
raise SyntaxError('is_multiline must be a boolean')
result = substitute.Substitution(pattern, replacement, is_multiline)
return result
def get_substitutions_from_config(config):
"""
Return a list of Substitution objects from the config, sorted alphabetically
by pattern name. Returns an empty list if no Substitutions are specified. If
there are problems parsing the values, a help message will be printed and an
error will be thrown.
"""
result = []
pattern_names = config.options(SUBSTITUTION_SECTION)
pattern_names.sort()
for name in pattern_names:
pattern_val = config.get(SUBSTITUTION_SECTION, name)
list_rep = ast.literal_eval(pattern_val)
substitution = parse_substitution_from_list(list_rep)
result.append(substitution)
return result
def get_default_color_config():
"""Get a color config object with all the defaults."""
result = ColorConfig(
pound=DEFAULT_COLOR_POUND,
heading=DEFAULT_COLOR_HEADING,
code=DEFAULT_COLOR_CODE,
backticks=DEFAULT_COLOR_BACKTICKS,
prompt=DEFAULT_COLOR_PROMPT,
pound_reset=DEFAULT_COLOR_POUND_RESET,
heading_reset=DEFAULT_COLOR_HEADING_RESET,
code_reset=DEFAULT_COLOR_CODE_RESET,
backticks_reset=DEFAULT_COLOR_BACKTICKS_RESET,
prompt_reset=DEFAULT_COLOR_PROMPT_RESET
)
return result
def get_empty_color_config():
"""Return a color_config with all values set to None."""
empty_color_config = ColorConfig(
pound=None,
heading=None,
code=None,
backticks=None,
prompt=None,
pound_reset=None,
heading_reset=None,
code_reset=None,
backticks_reset=None,
prompt_reset=None
)
return empty_color_config
def merge_color_configs(first, second):
"""
Merge the color configs.
Values in the first will overwrite non-None values in the second.
"""
# We have to get the desired values first and simultaneously, as nametuple
# is immutable.
pound = get_priority(first.pound, second.pound, None)
heading = get_priority(first.heading, second.heading, None)
code = get_priority(first.code, second.code, None)
backticks = get_priority(first.backticks, second.backticks, None)
prompt = get_priority(first.prompt, second.prompt, None)
pound_reset = get_priority(
first.pound_reset,
second.pound_reset,
None
)
heading_reset = get_priority(
first.heading_reset,
second.heading_reset,
None
)
code_reset = get_priority(
first.code_reset,
second.code_reset,
None
)
backticks_reset = get_priority(
first.backticks_reset,
second.backticks_reset,
None
)
prompt_reset = get_priority(
first.prompt_reset,
second.prompt_reset,
None
)
result = ColorConfig(
pound=pound,
heading=heading,
code=code,
backticks=backticks,
prompt=prompt,
pound_reset=pound_reset,
heading_reset=heading_reset,
code_reset=code_reset,
backticks_reset=backticks_reset,
prompt_reset=prompt_reset
)
return result
def _parse_bool_from_raw_egrc_value(raw_value):
"""
Parse the value from an egrc into a boolean.
"""
truthy_values = ['True', 'true']
return raw_value in truthy_values
def get_default_subs():
"""
Get the list of default substitutions. We're not storing this as a module
level object like the other DEFAULT values, as lists are mutable, and we
could get into trouble by modifying that list and not having it remain empty
as might be expected.
"""
return []
|
{
"content_hash": "1cfdadb2683daa259c38a92ad88d0630",
"timestamp": "",
"source": "github",
"line_count": 519,
"max_line_length": 80,
"avg_line_length": 29.776493256262043,
"alnum_prop": 0.6480522841982658,
"repo_name": "milouse/eg",
"id": "a143cc600dcdb5ae081df222482a2815bf09c92c",
"size": "15454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eg/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "121130"
}
],
"symlink_target": ""
}
|
from ansible.module_utils.basic import AnsibleModule
def main():
return AnsibleModule(
argument_spec=dict(
data=dict(default=None),
path=dict(default=None, type=str),
file=dict(default=None, type=str),
)
)
|
{
"content_hash": "9d0faa73e8c2a96504c308e32040d4e0",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 52,
"avg_line_length": 24.363636363636363,
"alnum_prop": 0.5932835820895522,
"repo_name": "openstack-infra/project-config",
"id": "48f8c915545ad8b8cefe761173c408e1cd7576f4",
"size": "324",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/fake-ansible/library/zuul_return.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "97773"
},
{
"name": "Shell",
"bytes": "123895"
}
],
"symlink_target": ""
}
|
"""Generated test for checking pynos based actions
"""
import xml.etree.ElementTree as ET
from st2tests.base import BaseActionTestCase
from bgp_redistribute import bgp_redistribute
__all__ = [
'TestBgpRedistribute'
]
class MockCallback(object): # pylint:disable=too-few-public-methods
"""Class to hold mock callback and result
"""
returned_data = None
def callback(self, call, **kwargs): # pylint:disable=unused-argument
"""Mock callback method
"""
xml_result = ET.tostring(call)
self.returned_data = xml_result
class TestBgpRedistribute(BaseActionTestCase):
"""Test holder class
"""
action_cls = bgp_redistribute
def test_action(self):
"""Generated test to check action
"""
action = self.get_action_instance()
mock_callback = MockCallback()
kwargs = {
'username': '',
'rbridge_id': '224',
'get': False,
'ip': '',
'source': 'connected',
'vrf': 'test',
'password': '',
'port': '22',
'afi': 'ipv4',
'test': True,
'callback': mock_callback.callback
}
action.run(**kwargs)
expected_xml = (
'<config><rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">'
'<rbridge-id>224</rbridge-id><router><bgp xmlns="urn:brocade.com:m'
'gmt:brocade-bgp"><vrf-name>test</vrf-name><router-bgp-cmds-holder'
'><address-family><ipv4><ipv4-unicast><af-ipv4-uc-and-vrf-cmds-cal'
'l-point-holder><redistribute><connected><redistribute-connected /'
'></connected></redistribute></af-ipv4-uc-and-vrf-cmds-call-point-'
'holder></ipv4-unicast></ipv4></address-family></router-bgp-cmds-h'
'older></bgp></router></rbridge-id></config>'
)
self.assertTrue(expected_xml, mock_callback.returned_data)
|
{
"content_hash": "7d69fb69b1eae8646c878c591510e094",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 79,
"avg_line_length": 32.08196721311475,
"alnum_prop": 0.5804803270311701,
"repo_name": "tonybaloney/st2contrib",
"id": "53b5674d8077133cd0224ef0973ca4a10a2fdaa5",
"size": "1957",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "packs/vdx/tests/test_action_bgp_redistribute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "5392"
},
{
"name": "Python",
"bytes": "1285946"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "7547"
}
],
"symlink_target": ""
}
|
import unittest
import uuid
from fairseq import metrics
class TestMetrics(unittest.TestCase):
def test_nesting(self):
with metrics.aggregate() as a:
metrics.log_scalar('loss', 1)
with metrics.aggregate() as b:
metrics.log_scalar('loss', 2)
self.assertEqual(a.get_smoothed_values()['loss'], 1.5)
self.assertEqual(b.get_smoothed_values()['loss'], 2)
def test_new_root(self):
with metrics.aggregate() as a:
metrics.log_scalar('loss', 1)
with metrics.aggregate(new_root=True) as b:
metrics.log_scalar('loss', 2)
self.assertEqual(a.get_smoothed_values()['loss'], 1)
self.assertEqual(b.get_smoothed_values()['loss'], 2)
def test_nested_new_root(self):
with metrics.aggregate() as layer1:
metrics.log_scalar('loss', 1)
with metrics.aggregate(new_root=True) as layer2:
metrics.log_scalar('loss', 2)
with metrics.aggregate() as layer3:
metrics.log_scalar('loss', 3)
with metrics.aggregate(new_root=True) as layer4:
metrics.log_scalar('loss', 4)
metrics.log_scalar('loss', 1.5)
self.assertEqual(layer4.get_smoothed_values()['loss'], 4)
self.assertEqual(layer3.get_smoothed_values()['loss'], 3)
self.assertEqual(layer2.get_smoothed_values()['loss'], 2.5)
self.assertEqual(layer1.get_smoothed_values()['loss'], 1.25)
def test_named(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar('loss', 1)
metrics.log_scalar('loss', 3)
with metrics.aggregate(name):
metrics.log_scalar('loss', 2)
self.assertEqual(metrics.get_smoothed_values(name)['loss'], 1.5)
def test_nested_duplicate_names(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar('loss', 1)
with metrics.aggregate() as other:
with metrics.aggregate(name):
metrics.log_scalar('loss', 2)
metrics.log_scalar('loss', 6)
self.assertEqual(metrics.get_smoothed_values(name)['loss'], 3)
self.assertEqual(other.get_smoothed_values()['loss'], 2)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "d2d062b9141c86bb8c94c8564a9737a7",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 72,
"avg_line_length": 33.6986301369863,
"alnum_prop": 0.5804878048780487,
"repo_name": "hfp/libxsmm",
"id": "060291808e37d9b69597522519fdf25d4b13d3e3",
"size": "2638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/deeplearning/sparse_training/fairseq/tests/test_metrics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3115"
},
{
"name": "C",
"bytes": "8335143"
},
{
"name": "C++",
"bytes": "84416"
},
{
"name": "CSS",
"bytes": "242"
},
{
"name": "Fortran",
"bytes": "102021"
},
{
"name": "HTML",
"bytes": "390"
},
{
"name": "JavaScript",
"bytes": "1062"
},
{
"name": "Makefile",
"bytes": "158870"
},
{
"name": "Python",
"bytes": "36612"
},
{
"name": "Shell",
"bytes": "84205"
},
{
"name": "Starlark",
"bytes": "882"
}
],
"symlink_target": ""
}
|
import hashlib
import hmac
import base64
import random
import time
import urllib
import string
AUTH_ID = "clientid"
SECRET = "supersecret"
def prepare_authenticated_uri(uri):
"""Prepare an authenticated URI.
The uri param includes all parameters except id, timestamp, and hash"""
uristring = ""
timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
nonce = generate_nonce()
params = "authid={0}&time={1}&nonce={2}".format(AUTH_ID, timestamp, nonce)
if uri.find("?") == -1:
uristring = "{0}?{1}".format(uri, params)
else:
uristring = "{0}&{1}".format(uri, params)
hashed_string = generate_hash(uristring)
auth_uri = "{0}&sign={1}".format(uristring, hashed_string)
print auth_uri
return auth_uri
def generate_hash(data):
"""Return a SHA1 hash of the data using the secret"""
digest = hmac.new(SECRET, data, hashlib.sha1).digest()
hash64 = base64.b64encode(digest)
return urllib.quote_plus(hash64) # escape chars for URI
def generate_nonce():
"""Generate a random number 30 digits long"""
randomnum = random.randrange(10**30)
rjust_str = '{:0<30}'.format(str(randomnum))
return rjust_str
|
{
"content_hash": "7770dc201c8ef79391e0bc6a475a25a6",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 78,
"avg_line_length": 28.093023255813954,
"alnum_prop": 0.6539735099337748,
"repo_name": "daisy/pipeline-issues",
"id": "32dbe6befbdb17fb31b42779766e88cd099ab376",
"size": "1208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "framework/webservice/samples/clients/python/authentication.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
SUGGESTS_MAIN_PATH = 'suggest'
SUGGEST_CREATE = 'suggest_create'
SUGGEST_SHOW = 'suggest_show'
SUGGEST_UPDATE = 'suggest_update'
SUGGEST_INDEX = 'suggest_index'
SUGGEST_DELETE = 'suggest_delete'
SUGGEST_CLOSE = 'suggest_close'
SUGGEST_COMMENT = 'suggest_comment'
SUGGEST_COMMENT_LIST = 'suggest_comment_list'
SUGGEST_COMMENT_SHOW = 'suggest_comment_show'
SUGGEST_COMMENT_UPDATE = 'suggest_comment_update'
SUGGEST_COMMENT_DELETE = 'suggest_comment_delete'
SUGGEST_VIEWS = 'suggest_views'
NAME_MAX_LENGTH = 100
DESCRIPTION_MAX_LENGTH = 1000
COMMENT_MAX_LENGTH = DESCRIPTION_MAX_LENGTH
SUGGESTS_PER_PAGE = 10
DATASET_NAME_MAX_LENGTH = 100
SUGGEST_COLUMNS_MAX_LENGTH = 1000
|
{
"content_hash": "52c82e283c8979ea109f561a7e66d406",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 49,
"avg_line_length": 33.55,
"alnum_prop": 0.7719821162444114,
"repo_name": "WilJoey/ckanext-tnext",
"id": "522c12371fe66596d943acc2dc2e164208aac534",
"size": "671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ckanext/tnext/constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6995"
},
{
"name": "HTML",
"bytes": "134710"
},
{
"name": "JavaScript",
"bytes": "9650"
},
{
"name": "Python",
"bytes": "63710"
}
],
"symlink_target": ""
}
|
import config
from sprite import StaticSprite
# ----------- Scene -----------
# A scene comprised of a set of layers.
class Scene( ):
def __init__( self ):
self.layers = []
self.actors = []
def addLayer( self, layer ):
self.layers.append( layer )
def addActor( self, actor ):
self.actors.append( actor )
# ----------- Scene Layer -----------
# A layer image with depth position in the scene.
class SceneLayer( StaticSprite ):
def __init__( self, src, depth ):
StaticSprite.__init__( self, config.folders['scenes'] + src )
self.depth = depth
|
{
"content_hash": "ce87aef9014da5e256b8078c54b10d76",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 63,
"avg_line_length": 20.925925925925927,
"alnum_prop": 0.6141592920353982,
"repo_name": "lsjroberts/7d7g",
"id": "a3b4642ba6bde3f0a1ddad300db20bae3af69c19",
"size": "682",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "framework/scene.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53894"
}
],
"symlink_target": ""
}
|
import csv
import logging
import subprocess
import win32ts
def GetActiveSessionID():
"""Get the active session ID."""
local_server = win32ts.WTS_CURRENT_SERVER_HANDLE
for session in win32ts.WTSEnumerateSessions(local_server):
if session['State'] == win32ts.WTSActive:
return session['SessionId']
logging.warning('Unexpected: no active session.')
return None
def GetPIDsWithName(image_name, session=None):
"""Gets all process PIDs, with the given image name.
Args:
image_name: Case-insensitive process image name.
session: Session filter. Only search processes within given session.
None means no filter.
Returns:
A list of process ID.
"""
cmd = ['tasklist', '/FO:csv', '/NH', '/FI', 'IMAGENAME eq %s' % image_name]
if session is not None:
cmd.extend(['/FI', 'SESSION eq %s' % session])
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
logging.error('Unable to list processes, %s', stderr)
return []
stdout = stdout.decode('ascii').splitlines()
return [int(row[1]) for row in csv.reader(stdout, delimiter=',')]
|
{
"content_hash": "3ed71c6a00c1da52a09cad22a5ea8022",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 30.441860465116278,
"alnum_prop": 0.6256684491978609,
"repo_name": "scheib/chromium",
"id": "e3366634f1d0682a5c0d2ba7eb192a846798a8f7",
"size": "1472",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "chrome/updater/test/service/win/proc_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import multiprocessing
import sys,os
pool = multiprocessing.Pool(processes=7)
if __name__ == "__main__":
cmds = open(sys.argv[1])
results = []
for cmd in cmds:
results.append(pool.apply_async(os.system, (cmd.strip(),)))
pool.close()
pool.join()
|
{
"content_hash": "6c82339a5360b7157d378035af69f891",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 67,
"avg_line_length": 21.846153846153847,
"alnum_prop": 0.5985915492957746,
"repo_name": "zhmz90/Daily",
"id": "e79e74bc2bac60ac013de20b44c23b12c814c9e0",
"size": "307",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/parallel_multiprocess.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1723"
},
{
"name": "C",
"bytes": "1558699"
},
{
"name": "C++",
"bytes": "39093"
},
{
"name": "CMake",
"bytes": "9888"
},
{
"name": "Java",
"bytes": "413"
},
{
"name": "Julia",
"bytes": "27890"
},
{
"name": "Jupyter Notebook",
"bytes": "813562"
},
{
"name": "LLVM",
"bytes": "1211"
},
{
"name": "Lua",
"bytes": "45884"
},
{
"name": "M4",
"bytes": "7552"
},
{
"name": "Makefile",
"bytes": "30262"
},
{
"name": "Perl",
"bytes": "15640"
},
{
"name": "Protocol Buffer",
"bytes": "705"
},
{
"name": "Python",
"bytes": "45107"
},
{
"name": "R",
"bytes": "13427"
},
{
"name": "Rebol",
"bytes": "2109"
},
{
"name": "Roff",
"bytes": "19873"
},
{
"name": "Scala",
"bytes": "3079"
},
{
"name": "Shell",
"bytes": "3808"
},
{
"name": "TeX",
"bytes": "1146"
}
],
"symlink_target": ""
}
|
import datetime
from django.core import signing
from django.test import SimpleTestCase
from django.test.utils import freeze_time
from django.utils.crypto import InvalidAlgorithm
class TestSigner(SimpleTestCase):
def test_signature(self):
"signature() method should generate a signature"
signer = signing.Signer('predictable-secret')
signer2 = signing.Signer('predictable-secret2')
for s in (
b'hello',
b'3098247:529:087:',
'\u2019'.encode(),
):
self.assertEqual(
signer.signature(s),
signing.base64_hmac(
signer.salt + 'signer',
s,
'predictable-secret',
algorithm=signer.algorithm,
)
)
self.assertNotEqual(signer.signature(s), signer2.signature(s))
def test_signature_with_salt(self):
"signature(value, salt=...) should work"
signer = signing.Signer('predictable-secret', salt='extra-salt')
self.assertEqual(
signer.signature('hello'),
signing.base64_hmac(
'extra-salt' + 'signer',
'hello',
'predictable-secret',
algorithm=signer.algorithm,
)
)
self.assertNotEqual(
signing.Signer('predictable-secret', salt='one').signature('hello'),
signing.Signer('predictable-secret', salt='two').signature('hello'))
def test_custom_algorithm(self):
signer = signing.Signer('predictable-secret', algorithm='sha512')
self.assertEqual(
signer.signature('hello'),
'Usf3uVQOZ9m6uPfVonKR-EBXjPe7bjMbp3_Fq8MfsptgkkM1ojidN0BxYaT5HAEN1'
'VzO9_jVu7R-VkqknHYNvw',
)
def test_invalid_algorithm(self):
signer = signing.Signer('predictable-secret', algorithm='whatever')
msg = "'whatever' is not an algorithm accepted by the hashlib module."
with self.assertRaisesMessage(InvalidAlgorithm, msg):
signer.sign('hello')
def test_legacy_signature(self):
# RemovedInDjango40Warning: pre-Django 3.1 signatures won't be
# supported.
signer = signing.Signer()
sha1_sig = 'foo:l-EMM5FtewpcHMbKFeQodt3X9z8'
self.assertNotEqual(signer.sign('foo'), sha1_sig)
self.assertEqual(signer.unsign(sha1_sig), 'foo')
def test_sign_unsign(self):
"sign/unsign should be reversible"
signer = signing.Signer('predictable-secret')
examples = [
'q;wjmbk;wkmb',
'3098247529087',
'3098247:529:087:',
'jkw osanteuh ,rcuh nthu aou oauh ,ud du',
'\u2019',
]
for example in examples:
signed = signer.sign(example)
self.assertIsInstance(signed, str)
self.assertNotEqual(example, signed)
self.assertEqual(example, signer.unsign(signed))
def test_sign_unsign_non_string(self):
signer = signing.Signer('predictable-secret')
values = [
123,
1.23,
True,
datetime.date.today(),
]
for value in values:
with self.subTest(value):
signed = signer.sign(value)
self.assertIsInstance(signed, str)
self.assertNotEqual(signed, value)
self.assertEqual(signer.unsign(signed), str(value))
def test_unsign_detects_tampering(self):
"unsign should raise an exception if the value has been tampered with"
signer = signing.Signer('predictable-secret')
value = 'Another string'
signed_value = signer.sign(value)
transforms = (
lambda s: s.upper(),
lambda s: s + 'a',
lambda s: 'a' + s[1:],
lambda s: s.replace(':', ''),
)
self.assertEqual(value, signer.unsign(signed_value))
for transform in transforms:
with self.assertRaises(signing.BadSignature):
signer.unsign(transform(signed_value))
def test_dumps_loads(self):
"dumps and loads be reversible for any JSON serializable object"
objects = [
['a', 'list'],
'a string \u2019',
{'a': 'dictionary'},
]
for o in objects:
self.assertNotEqual(o, signing.dumps(o))
self.assertEqual(o, signing.loads(signing.dumps(o)))
self.assertNotEqual(o, signing.dumps(o, compress=True))
self.assertEqual(o, signing.loads(signing.dumps(o, compress=True)))
def test_decode_detects_tampering(self):
"loads should raise exception for tampered objects"
transforms = (
lambda s: s.upper(),
lambda s: s + 'a',
lambda s: 'a' + s[1:],
lambda s: s.replace(':', ''),
)
value = {
'foo': 'bar',
'baz': 1,
}
encoded = signing.dumps(value)
self.assertEqual(value, signing.loads(encoded))
for transform in transforms:
with self.assertRaises(signing.BadSignature):
signing.loads(transform(encoded))
def test_works_with_non_ascii_keys(self):
binary_key = b'\xe7' # Set some binary (non-ASCII key)
s = signing.Signer(binary_key)
self.assertEqual(
'foo:EE4qGC5MEKyQG5msxYA0sBohAxLC0BJf8uRhemh0BGU',
s.sign('foo'),
)
def test_valid_sep(self):
separators = ['/', '*sep*', ',']
for sep in separators:
signer = signing.Signer('predictable-secret', sep=sep)
self.assertEqual(
'foo%sjZQoX_FtSO70jX9HLRGg2A_2s4kdDBxz1QoO_OpEQb0' % sep,
signer.sign('foo'),
)
def test_invalid_sep(self):
"""should warn on invalid separator"""
msg = 'Unsafe Signer separator: %r (cannot be empty or consist of only A-z0-9-_=)'
separators = ['', '-', 'abc']
for sep in separators:
with self.assertRaisesMessage(ValueError, msg % sep):
signing.Signer(sep=sep)
class TestTimestampSigner(SimpleTestCase):
def test_timestamp_signer(self):
value = 'hello'
with freeze_time(123456789):
signer = signing.TimestampSigner('predictable-key')
ts = signer.sign(value)
self.assertNotEqual(ts, signing.Signer('predictable-key').sign(value))
self.assertEqual(signer.unsign(ts), value)
with freeze_time(123456800):
self.assertEqual(signer.unsign(ts, max_age=12), value)
# max_age parameter can also accept a datetime.timedelta object
self.assertEqual(signer.unsign(ts, max_age=datetime.timedelta(seconds=11)), value)
with self.assertRaises(signing.SignatureExpired):
signer.unsign(ts, max_age=10)
|
{
"content_hash": "8a2784720e259623152a5807dacc6560",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 94,
"avg_line_length": 36.86772486772487,
"alnum_prop": 0.5730482204362801,
"repo_name": "claudep/django",
"id": "6b7268179d73a15e885e1ff538ed9a00ac7cd27e",
"size": "6968",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/signing/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "78915"
},
{
"name": "HTML",
"bytes": "227663"
},
{
"name": "JavaScript",
"bytes": "137605"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13662118"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
}
|
import urllib2
from BeautifulSoup import BeautifulSoup
def scrape(query):
# Check for multiple word input
if not isinstance(query, basestring):
try:
query = "+".join(query)
except Exception as e:
print(e)
return None
page = urllib2.urlopen("http://www.bandcamp.com/search?q={0}".format(
query)).read()
# Search bandcamp for first artist that appears from the search
soup = BeautifulSoup(page)
try:
song = soup.find('li', 'searchresult').a['href']
except AttributeError:
return None
# Search the first artist's page for the first song
try:
song_page = urllib2.urlopen(song).read()
song_soup = BeautifulSoup(song_page)
except Exception as e:
print(e)
return None
# Print out the url to that song
song_player = song_soup.find('meta', property="og:video")
if song_player:
song_player = song_player['content']
song_title = song_soup.find('meta', property="og:title")['content']
# If the first item is an artist, access the first album
else:
album = song + song_soup.find('div', 'ipCellSet').find('a')['href']
album_page = urllib2.urlopen(album).read()
album_soup = BeautifulSoup(album_page)
song_player = album_soup.find('meta', property="og:video")['content']
song_title = album_soup.find('meta', property="og:title")['content']
return (song_player, song_title)
|
{
"content_hash": "c67bab5226806380d483dca10decf311",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 77,
"avg_line_length": 31.80851063829787,
"alnum_prop": 0.6193979933110368,
"repo_name": "ajman1101/BandcampRoulette",
"id": "64ebb99974e1130502b720daa44394b86b470f49",
"size": "1495",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "roulette/scrape.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2561"
},
{
"name": "Python",
"bytes": "6673"
}
],
"symlink_target": ""
}
|
"""compromise game.
this example comes from the games introduced in paper
A Polynomial-time Nash Equilibrium Algorithm for Repeated Stochastic game.Games
by Enrique Munoz de Cote and Michael L. Littman
"""
import numpy as np
from psycholab import game
from psycholab import visualizer
def create_game():
"""Create the compromise game."""
art = ['###########',
'# #b# #a# #',
'# A B #',
'###########'
]
item_a = game.Item(color=(0, 254, 254))
item_b = game.Item(color=(254, 254, 0))
items = {'a': item_a, 'b': item_b}
player_a = game.Player(color=(0, 100, 254))
player_b = game.Player(color=(254, 100, 0))
players = {'A': player_a, 'B': player_b}
env = game.Game(art, items, players, tabular=True)
env.display()
env.add_reward('A_moves', {'A': -1})
env.add_reward('B_moves', {'B': -1})
env.add_reward('A_collects_a', {'A': 100})
env.add_reward('B_collects_b', {'B': 100})
env.add_terminaison('A_collects_a')
env.add_terminaison('B_collects_b')
# for frame-by-frame visualization:
env = visualizer.Visualizer(env, fps=2, by_episode=False)
# for fast visualization:
# env = visualizer.Visualizer(env, fps=1000, by_episode=True)
return env
def run_game(env, max_step):
"""Runs `max_step` iterations of the game `env` and print players returns."""
obs = env.reset()
# discrete_state converts observations into states
# 'obs' contains all agent x, y positions.
# 'state' is an integer representing the combination of
# all agents x, y positions.
state = env.discrete_state(obs)
transitions = []
returns = 0
episode = 0
for _ in range(max_step):
# Pick a random action for all agents:
actions = np.random.choice(range(env.num_actions), env.num_players)
# Environment step:
obs, rewards, done, info = env.step(actions)
new_state = env.discrete_state(obs)
transitions.append((state, new_state, rewards, actions, done, info))
state = new_state
# Sum rewards:
returns += rewards
if done:
# The last episode is finished:
episode += 1
print('episode', episode, 'returns', returns)
# Reset env for new episode
obs = env.reset()
# state = env.discrete_state(obs)
returns = 0
# Close visualizer:
env.finish()
if __name__ == '__main__':
game_env = create_game()
run_game(game_env, max_step=200000)
|
{
"content_hash": "73117ed1dbe90ff8c2230e34bbeda283",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 26.910112359550563,
"alnum_prop": 0.6313152400835073,
"repo_name": "google-research/google-research",
"id": "f95285fad32db55011cb8ebeea83f06a1b9be2fd",
"size": "3003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psycholab/examples/compromise.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
def extractIsekaiTranslation(item):
"""
# Isekai Soul-Cyborg Translations
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Manga' in item['tags']:
return None
if 'Isekai Maou to Shoukan Shoujo Dorei Majutsu' in item['tags'] and (chp or vol) and not 'manga' in item['title'].lower():
if chp == 11 and frag == 10:
return None
return buildReleaseMessageWithType(item, 'Isekai Maou to Shoukan Shoujo no Dorei Majutsu', vol, chp, frag=frag, postfix=postfix)
return False
|
{
"content_hash": "661a59cd915011332b5623d12edba634",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 130,
"avg_line_length": 37,
"alnum_prop": 0.7094594594594594,
"repo_name": "fake-name/ReadableWebProxy",
"id": "5b2bddbb58bda4260f0b741c12e5db77351cb722",
"size": "592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractIsekaiTranslation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.