repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py | CTOC.frombinary | def frombinary(self, s):
"""Decode the binary string into an in memory list.
S is a binary string."""
entrylen = struct.calcsize(self.ENTRYSTRUCT)
p = 0
while p<len(s):
(slen, dpos, dlen, ulen, flag, typcd) = struct.unpack(self.ENTRYSTRUCT,
s[p:p+entrylen])
nmlen = slen - entrylen
p = p + entrylen
(nm,) = struct.unpack(`nmlen`+'s', s[p:p+nmlen])
p = p + nmlen
# version 4
# self.data.append((dpos, dlen, ulen, flag, typcd, nm[:-1]))
# version 5
# nm may have up to 15 bytes of padding
pos = nm.find('\0')
if pos < 0:
self.data.append((dpos, dlen, ulen, flag, typcd, nm))
else:
self.data.append((dpos, dlen, ulen, flag, typcd, nm[:pos])) | python | def frombinary(self, s):
"""Decode the binary string into an in memory list.
S is a binary string."""
entrylen = struct.calcsize(self.ENTRYSTRUCT)
p = 0
while p<len(s):
(slen, dpos, dlen, ulen, flag, typcd) = struct.unpack(self.ENTRYSTRUCT,
s[p:p+entrylen])
nmlen = slen - entrylen
p = p + entrylen
(nm,) = struct.unpack(`nmlen`+'s', s[p:p+nmlen])
p = p + nmlen
# version 4
# self.data.append((dpos, dlen, ulen, flag, typcd, nm[:-1]))
# version 5
# nm may have up to 15 bytes of padding
pos = nm.find('\0')
if pos < 0:
self.data.append((dpos, dlen, ulen, flag, typcd, nm))
else:
self.data.append((dpos, dlen, ulen, flag, typcd, nm[:pos])) | [
"def",
"frombinary",
"(",
"self",
",",
"s",
")",
":",
"entrylen",
"=",
"struct",
".",
"calcsize",
"(",
"self",
".",
"ENTRYSTRUCT",
")",
"p",
"=",
"0",
"while",
"p",
"<",
"len",
"(",
"s",
")",
":",
"(",
"slen",
",",
"dpos",
",",
"dlen",
",",
"ul... | Decode the binary string into an in memory list.
S is a binary string. | [
"Decode",
"the",
"binary",
"string",
"into",
"an",
"in",
"memory",
"list",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py#L35-L56 | train | 32,200 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py | CTOC.tobinary | def tobinary(self):
"""Return self as a binary string."""
entrylen = struct.calcsize(self.ENTRYSTRUCT)
rslt = []
for (dpos, dlen, ulen, flag, typcd, nm) in self.data:
nmlen = len(nm) + 1 # add 1 for a '\0'
# version 4
# rslt.append(struct.pack(self.ENTRYSTRUCT+`nmlen`+'s',
# nmlen+entrylen, dpos, dlen, ulen, flag, typcd, nm+'\0'))
# version 5
# align to 16 byte boundary so xplatform C can read
toclen = nmlen + entrylen
if toclen % 16 == 0:
pad = '\0'
else:
padlen = 16 - (toclen % 16)
pad = '\0'*padlen
nmlen = nmlen + padlen
rslt.append(struct.pack(self.ENTRYSTRUCT+`nmlen`+'s',
nmlen+entrylen, dpos, dlen, ulen, flag, typcd, nm+pad))
# end version 5
return ''.join(rslt) | python | def tobinary(self):
"""Return self as a binary string."""
entrylen = struct.calcsize(self.ENTRYSTRUCT)
rslt = []
for (dpos, dlen, ulen, flag, typcd, nm) in self.data:
nmlen = len(nm) + 1 # add 1 for a '\0'
# version 4
# rslt.append(struct.pack(self.ENTRYSTRUCT+`nmlen`+'s',
# nmlen+entrylen, dpos, dlen, ulen, flag, typcd, nm+'\0'))
# version 5
# align to 16 byte boundary so xplatform C can read
toclen = nmlen + entrylen
if toclen % 16 == 0:
pad = '\0'
else:
padlen = 16 - (toclen % 16)
pad = '\0'*padlen
nmlen = nmlen + padlen
rslt.append(struct.pack(self.ENTRYSTRUCT+`nmlen`+'s',
nmlen+entrylen, dpos, dlen, ulen, flag, typcd, nm+pad))
# end version 5
return ''.join(rslt) | [
"def",
"tobinary",
"(",
"self",
")",
":",
"entrylen",
"=",
"struct",
".",
"calcsize",
"(",
"self",
".",
"ENTRYSTRUCT",
")",
"rslt",
"=",
"[",
"]",
"for",
"(",
"dpos",
",",
"dlen",
",",
"ulen",
",",
"flag",
",",
"typcd",
",",
"nm",
")",
"in",
"sel... | Return self as a binary string. | [
"Return",
"self",
"as",
"a",
"binary",
"string",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py#L60-L82 | train | 32,201 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py | CTOC.add | def add(self, dpos, dlen, ulen, flag, typcd, nm):
"""Add an entry to the table of contents.
DPOS is data position.
DLEN is data length.
ULEN is the uncompressed data len.
FLAG says if the data is compressed.
TYPCD is the "type" of the entry (used by the C code)
NM is the entry's name."""
self.data.append((dpos, dlen, ulen, flag, typcd, nm)) | python | def add(self, dpos, dlen, ulen, flag, typcd, nm):
"""Add an entry to the table of contents.
DPOS is data position.
DLEN is data length.
ULEN is the uncompressed data len.
FLAG says if the data is compressed.
TYPCD is the "type" of the entry (used by the C code)
NM is the entry's name."""
self.data.append((dpos, dlen, ulen, flag, typcd, nm)) | [
"def",
"add",
"(",
"self",
",",
"dpos",
",",
"dlen",
",",
"ulen",
",",
"flag",
",",
"typcd",
",",
"nm",
")",
":",
"self",
".",
"data",
".",
"append",
"(",
"(",
"dpos",
",",
"dlen",
",",
"ulen",
",",
"flag",
",",
"typcd",
",",
"nm",
")",
")"
] | Add an entry to the table of contents.
DPOS is data position.
DLEN is data length.
ULEN is the uncompressed data len.
FLAG says if the data is compressed.
TYPCD is the "type" of the entry (used by the C code)
NM is the entry's name. | [
"Add",
"an",
"entry",
"to",
"the",
"table",
"of",
"contents",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py#L84-L93 | train | 32,202 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py | CTOC.find | def find(self, name):
"""Return the index of the toc entry with name NAME.
Return -1 for failure."""
for i, nm in enumerate(self.data):
if nm[-1] == name:
return i
return -1 | python | def find(self, name):
"""Return the index of the toc entry with name NAME.
Return -1 for failure."""
for i, nm in enumerate(self.data):
if nm[-1] == name:
return i
return -1 | [
"def",
"find",
"(",
"self",
",",
"name",
")",
":",
"for",
"i",
",",
"nm",
"in",
"enumerate",
"(",
"self",
".",
"data",
")",
":",
"if",
"nm",
"[",
"-",
"1",
"]",
"==",
"name",
":",
"return",
"i",
"return",
"-",
"1"
] | Return the index of the toc entry with name NAME.
Return -1 for failure. | [
"Return",
"the",
"index",
"of",
"the",
"toc",
"entry",
"with",
"name",
"NAME",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py#L102-L109 | train | 32,203 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py | CArchive.checkmagic | def checkmagic(self):
"""Verify that self is a valid CArchive.
Magic signature is at end of the archive."""
#magic is at EOF; if we're embedded, we need to figure where that is
if self.len:
self.lib.seek(self.start+self.len, 0)
else:
self.lib.seek(0, 2)
filelen = self.lib.tell()
if self.len:
self.lib.seek(self.start+self.len-self.TRLLEN, 0)
else:
self.lib.seek(-self.TRLLEN, 2)
(magic, totallen, tocpos, toclen, pyvers) = struct.unpack(self.TRLSTRUCT,
self.lib.read(self.TRLLEN))
if magic != self.MAGIC:
raise RuntimeError("%s is not a valid %s archive file"
% (self.path, self.__class__.__name__))
self.pkgstart = filelen - totallen
if self.len:
if totallen != self.len or self.pkgstart != self.start:
raise RuntimeError, "Problem with embedded archive in %s" % self.path
self.tocpos, self.toclen = tocpos, toclen | python | def checkmagic(self):
"""Verify that self is a valid CArchive.
Magic signature is at end of the archive."""
#magic is at EOF; if we're embedded, we need to figure where that is
if self.len:
self.lib.seek(self.start+self.len, 0)
else:
self.lib.seek(0, 2)
filelen = self.lib.tell()
if self.len:
self.lib.seek(self.start+self.len-self.TRLLEN, 0)
else:
self.lib.seek(-self.TRLLEN, 2)
(magic, totallen, tocpos, toclen, pyvers) = struct.unpack(self.TRLSTRUCT,
self.lib.read(self.TRLLEN))
if magic != self.MAGIC:
raise RuntimeError("%s is not a valid %s archive file"
% (self.path, self.__class__.__name__))
self.pkgstart = filelen - totallen
if self.len:
if totallen != self.len or self.pkgstart != self.start:
raise RuntimeError, "Problem with embedded archive in %s" % self.path
self.tocpos, self.toclen = tocpos, toclen | [
"def",
"checkmagic",
"(",
"self",
")",
":",
"#magic is at EOF; if we're embedded, we need to figure where that is",
"if",
"self",
".",
"len",
":",
"self",
".",
"lib",
".",
"seek",
"(",
"self",
".",
"start",
"+",
"self",
".",
"len",
",",
"0",
")",
"else",
":"... | Verify that self is a valid CArchive.
Magic signature is at end of the archive. | [
"Verify",
"that",
"self",
"is",
"a",
"valid",
"CArchive",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py#L130-L153 | train | 32,204 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py | CArchive.loadtoc | def loadtoc(self):
"""Load the table of contents into memory."""
self.toc = self.TOCTMPLT()
self.lib.seek(self.pkgstart+self.tocpos)
tocstr = self.lib.read(self.toclen)
self.toc.frombinary(tocstr) | python | def loadtoc(self):
"""Load the table of contents into memory."""
self.toc = self.TOCTMPLT()
self.lib.seek(self.pkgstart+self.tocpos)
tocstr = self.lib.read(self.toclen)
self.toc.frombinary(tocstr) | [
"def",
"loadtoc",
"(",
"self",
")",
":",
"self",
".",
"toc",
"=",
"self",
".",
"TOCTMPLT",
"(",
")",
"self",
".",
"lib",
".",
"seek",
"(",
"self",
".",
"pkgstart",
"+",
"self",
".",
"tocpos",
")",
"tocstr",
"=",
"self",
".",
"lib",
".",
"read",
... | Load the table of contents into memory. | [
"Load",
"the",
"table",
"of",
"contents",
"into",
"memory",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py#L155-L160 | train | 32,205 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py | CArchive.extract | def extract(self, name):
"""Get the contents of an entry.
NAME is an entry name.
Return the tuple (ispkg, contents).
For non-Python resoures, ispkg is meaningless (and 0).
Used by the import mechanism."""
if type(name) == type(''):
ndx = self.toc.find(name)
if ndx == -1:
return None
else:
ndx = name
(dpos, dlen, ulen, flag, typcd, nm) = self.toc.get(ndx)
self.lib.seek(self.pkgstart+dpos)
rslt = self.lib.read(dlen)
if flag == 2:
global AES
import AES
key = rslt[:32]
# Note: keep this in sync with bootloader's code
rslt = AES.new(key, AES.MODE_CFB, "\0"*AES.block_size).decrypt(rslt[32:])
if flag == 1 or flag == 2:
rslt = zlib.decompress(rslt)
if typcd == 'M':
return (1, rslt)
return (0, rslt) | python | def extract(self, name):
"""Get the contents of an entry.
NAME is an entry name.
Return the tuple (ispkg, contents).
For non-Python resoures, ispkg is meaningless (and 0).
Used by the import mechanism."""
if type(name) == type(''):
ndx = self.toc.find(name)
if ndx == -1:
return None
else:
ndx = name
(dpos, dlen, ulen, flag, typcd, nm) = self.toc.get(ndx)
self.lib.seek(self.pkgstart+dpos)
rslt = self.lib.read(dlen)
if flag == 2:
global AES
import AES
key = rslt[:32]
# Note: keep this in sync with bootloader's code
rslt = AES.new(key, AES.MODE_CFB, "\0"*AES.block_size).decrypt(rslt[32:])
if flag == 1 or flag == 2:
rslt = zlib.decompress(rslt)
if typcd == 'M':
return (1, rslt)
return (0, rslt) | [
"def",
"extract",
"(",
"self",
",",
"name",
")",
":",
"if",
"type",
"(",
"name",
")",
"==",
"type",
"(",
"''",
")",
":",
"ndx",
"=",
"self",
".",
"toc",
".",
"find",
"(",
"name",
")",
"if",
"ndx",
"==",
"-",
"1",
":",
"return",
"None",
"else"... | Get the contents of an entry.
NAME is an entry name.
Return the tuple (ispkg, contents).
For non-Python resoures, ispkg is meaningless (and 0).
Used by the import mechanism. | [
"Get",
"the",
"contents",
"of",
"an",
"entry",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py#L162-L188 | train | 32,206 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py | CArchive.contents | def contents(self):
"""Return the names of the entries"""
rslt = []
for (dpos, dlen, ulen, flag, typcd, nm) in self.toc:
rslt.append(nm)
return rslt | python | def contents(self):
"""Return the names of the entries"""
rslt = []
for (dpos, dlen, ulen, flag, typcd, nm) in self.toc:
rslt.append(nm)
return rslt | [
"def",
"contents",
"(",
"self",
")",
":",
"rslt",
"=",
"[",
"]",
"for",
"(",
"dpos",
",",
"dlen",
",",
"ulen",
",",
"flag",
",",
"typcd",
",",
"nm",
")",
"in",
"self",
".",
"toc",
":",
"rslt",
".",
"append",
"(",
"nm",
")",
"return",
"rslt"
] | Return the names of the entries | [
"Return",
"the",
"names",
"of",
"the",
"entries"
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py#L190-L195 | train | 32,207 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py | CArchive.add | def add(self, entry):
"""Add an ENTRY to the CArchive.
ENTRY must have:
entry[0] is name (under which it will be saved).
entry[1] is fullpathname of the file.
entry[2] is a flag for it's storage format (0==uncompressed,
1==compressed)
entry[3] is the entry's type code.
Version 5:
If the type code is 'o':
entry[0] is the runtime option
eg: v (meaning verbose imports)
u (menaing unbuffered)
W arg (warning option arg)
s (meaning do site.py processing."""
(nm, pathnm, flag, typcd) = entry[:4]
# version 5 - allow type 'o' = runtime option
try:
if typcd in ('o', 'd'):
s = ''
flag = 0
elif typcd == 's':
# If it's a source code file, add \0 terminator as it will be
# executed as-is by the bootloader.
s = open(pathnm, 'rU').read()
s = s + '\n\0'
else:
s = open(pathnm, 'rb').read()
except IOError:
print "Cannot find ('%s', '%s', %s, '%s')" % (nm, pathnm, flag, typcd)
raise
ulen = len(s)
assert flag in range(3)
if flag == 1 or flag == 2:
s = zlib.compress(s, self.LEVEL)
if flag == 2:
global AES
import AES, Crypt
key = Crypt.gen_random_key(32)
# Note: keep this in sync with bootloader's code
s = key + AES.new(key, AES.MODE_CFB, "\0"*AES.block_size).encrypt(s)
dlen = len(s)
where = self.lib.tell()
if typcd == 'm':
if pathnm.find('.__init__.py') > -1:
typcd = 'M'
self.toc.add(where, dlen, ulen, flag, typcd, nm)
self.lib.write(s) | python | def add(self, entry):
"""Add an ENTRY to the CArchive.
ENTRY must have:
entry[0] is name (under which it will be saved).
entry[1] is fullpathname of the file.
entry[2] is a flag for it's storage format (0==uncompressed,
1==compressed)
entry[3] is the entry's type code.
Version 5:
If the type code is 'o':
entry[0] is the runtime option
eg: v (meaning verbose imports)
u (menaing unbuffered)
W arg (warning option arg)
s (meaning do site.py processing."""
(nm, pathnm, flag, typcd) = entry[:4]
# version 5 - allow type 'o' = runtime option
try:
if typcd in ('o', 'd'):
s = ''
flag = 0
elif typcd == 's':
# If it's a source code file, add \0 terminator as it will be
# executed as-is by the bootloader.
s = open(pathnm, 'rU').read()
s = s + '\n\0'
else:
s = open(pathnm, 'rb').read()
except IOError:
print "Cannot find ('%s', '%s', %s, '%s')" % (nm, pathnm, flag, typcd)
raise
ulen = len(s)
assert flag in range(3)
if flag == 1 or flag == 2:
s = zlib.compress(s, self.LEVEL)
if flag == 2:
global AES
import AES, Crypt
key = Crypt.gen_random_key(32)
# Note: keep this in sync with bootloader's code
s = key + AES.new(key, AES.MODE_CFB, "\0"*AES.block_size).encrypt(s)
dlen = len(s)
where = self.lib.tell()
if typcd == 'm':
if pathnm.find('.__init__.py') > -1:
typcd = 'M'
self.toc.add(where, dlen, ulen, flag, typcd, nm)
self.lib.write(s) | [
"def",
"add",
"(",
"self",
",",
"entry",
")",
":",
"(",
"nm",
",",
"pathnm",
",",
"flag",
",",
"typcd",
")",
"=",
"entry",
"[",
":",
"4",
"]",
"# version 5 - allow type 'o' = runtime option",
"try",
":",
"if",
"typcd",
"in",
"(",
"'o'",
",",
"'d'",
"... | Add an ENTRY to the CArchive.
ENTRY must have:
entry[0] is name (under which it will be saved).
entry[1] is fullpathname of the file.
entry[2] is a flag for it's storage format (0==uncompressed,
1==compressed)
entry[3] is the entry's type code.
Version 5:
If the type code is 'o':
entry[0] is the runtime option
eg: v (meaning verbose imports)
u (menaing unbuffered)
W arg (warning option arg)
s (meaning do site.py processing. | [
"Add",
"an",
"ENTRY",
"to",
"the",
"CArchive",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py#L197-L245 | train | 32,208 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py | CArchive.save_toc | def save_toc(self, tocpos):
"""Save the table of contents to disk."""
self.tocpos = tocpos
tocstr = self.toc.tobinary()
self.toclen = len(tocstr)
self.lib.write(tocstr) | python | def save_toc(self, tocpos):
"""Save the table of contents to disk."""
self.tocpos = tocpos
tocstr = self.toc.tobinary()
self.toclen = len(tocstr)
self.lib.write(tocstr) | [
"def",
"save_toc",
"(",
"self",
",",
"tocpos",
")",
":",
"self",
".",
"tocpos",
"=",
"tocpos",
"tocstr",
"=",
"self",
".",
"toc",
".",
"tobinary",
"(",
")",
"self",
".",
"toclen",
"=",
"len",
"(",
"tocstr",
")",
"self",
".",
"lib",
".",
"write",
... | Save the table of contents to disk. | [
"Save",
"the",
"table",
"of",
"contents",
"to",
"disk",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py#L247-L252 | train | 32,209 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py | CArchive.save_trailer | def save_trailer(self, tocpos):
"""Save the trailer to disk.
CArchives can be opened from the end - the trailer points
back to the start. """
totallen = tocpos + self.toclen + self.TRLLEN
pyvers = sys.version_info[0]*10 + sys.version_info[1]
trl = struct.pack(self.TRLSTRUCT, self.MAGIC, totallen,
tocpos, self.toclen, pyvers)
self.lib.write(trl) | python | def save_trailer(self, tocpos):
"""Save the trailer to disk.
CArchives can be opened from the end - the trailer points
back to the start. """
totallen = tocpos + self.toclen + self.TRLLEN
pyvers = sys.version_info[0]*10 + sys.version_info[1]
trl = struct.pack(self.TRLSTRUCT, self.MAGIC, totallen,
tocpos, self.toclen, pyvers)
self.lib.write(trl) | [
"def",
"save_trailer",
"(",
"self",
",",
"tocpos",
")",
":",
"totallen",
"=",
"tocpos",
"+",
"self",
".",
"toclen",
"+",
"self",
".",
"TRLLEN",
"pyvers",
"=",
"sys",
".",
"version_info",
"[",
"0",
"]",
"*",
"10",
"+",
"sys",
".",
"version_info",
"[",... | Save the trailer to disk.
CArchives can be opened from the end - the trailer points
back to the start. | [
"Save",
"the",
"trailer",
"to",
"disk",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py#L254-L263 | train | 32,210 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py | CArchive.openEmbedded | def openEmbedded(self, name):
"""Open a CArchive of name NAME embedded within this CArchive."""
ndx = self.toc.find(name)
if ndx == -1:
raise KeyError, "Member '%s' not found in %s" % (name, self.path)
(dpos, dlen, ulen, flag, typcd, nm) = self.toc.get(ndx)
if flag:
raise ValueError, "Cannot open compressed archive %s in place"
return CArchive(self.path, self.pkgstart+dpos, dlen) | python | def openEmbedded(self, name):
"""Open a CArchive of name NAME embedded within this CArchive."""
ndx = self.toc.find(name)
if ndx == -1:
raise KeyError, "Member '%s' not found in %s" % (name, self.path)
(dpos, dlen, ulen, flag, typcd, nm) = self.toc.get(ndx)
if flag:
raise ValueError, "Cannot open compressed archive %s in place"
return CArchive(self.path, self.pkgstart+dpos, dlen) | [
"def",
"openEmbedded",
"(",
"self",
",",
"name",
")",
":",
"ndx",
"=",
"self",
".",
"toc",
".",
"find",
"(",
"name",
")",
"if",
"ndx",
"==",
"-",
"1",
":",
"raise",
"KeyError",
",",
"\"Member '%s' not found in %s\"",
"%",
"(",
"name",
",",
"self",
".... | Open a CArchive of name NAME embedded within this CArchive. | [
"Open",
"a",
"CArchive",
"of",
"name",
"NAME",
"embedded",
"within",
"this",
"CArchive",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/loader/carchive.py#L265-L273 | train | 32,211 |
bwhite/hadoopy | hadoopy/_runner.py | _find_hstreaming | def _find_hstreaming():
"""Finds the whole path to the hadoop streaming jar.
If the environmental var HADOOP_HOME is specified, then start the search
from there.
Returns:
Full path to the hadoop streaming jar if found, else return an empty
string.
"""
global WARNED_HADOOP_HOME, HADOOP_STREAMING_PATH_CACHE
if HADOOP_STREAMING_PATH_CACHE:
return HADOOP_STREAMING_PATH_CACHE
try:
search_root = os.environ['HADOOP_HOME']
except KeyError:
search_root = '/'
cmd = 'find %s -name hadoop*streaming*.jar' % (search_root)
p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
HADOOP_STREAMING_PATH_CACHE = p.communicate()[0].split('\n')[0]
if search_root == '/' and not WARNED_HADOOP_HOME:
WARNED_HADOOP_HOME = True
hadoop_home = HADOOP_STREAMING_PATH_CACHE[:HADOOP_STREAMING_PATH_CACHE.rfind('/contrib/')]
logging.warn('Set the HADOOP_HOME environmental variable to your hadoop path to improve performance. Put the following [export HADOOP_HOME="%s"] in ~/.bashrc' % hadoop_home)
return HADOOP_STREAMING_PATH_CACHE | python | def _find_hstreaming():
"""Finds the whole path to the hadoop streaming jar.
If the environmental var HADOOP_HOME is specified, then start the search
from there.
Returns:
Full path to the hadoop streaming jar if found, else return an empty
string.
"""
global WARNED_HADOOP_HOME, HADOOP_STREAMING_PATH_CACHE
if HADOOP_STREAMING_PATH_CACHE:
return HADOOP_STREAMING_PATH_CACHE
try:
search_root = os.environ['HADOOP_HOME']
except KeyError:
search_root = '/'
cmd = 'find %s -name hadoop*streaming*.jar' % (search_root)
p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
HADOOP_STREAMING_PATH_CACHE = p.communicate()[0].split('\n')[0]
if search_root == '/' and not WARNED_HADOOP_HOME:
WARNED_HADOOP_HOME = True
hadoop_home = HADOOP_STREAMING_PATH_CACHE[:HADOOP_STREAMING_PATH_CACHE.rfind('/contrib/')]
logging.warn('Set the HADOOP_HOME environmental variable to your hadoop path to improve performance. Put the following [export HADOOP_HOME="%s"] in ~/.bashrc' % hadoop_home)
return HADOOP_STREAMING_PATH_CACHE | [
"def",
"_find_hstreaming",
"(",
")",
":",
"global",
"WARNED_HADOOP_HOME",
",",
"HADOOP_STREAMING_PATH_CACHE",
"if",
"HADOOP_STREAMING_PATH_CACHE",
":",
"return",
"HADOOP_STREAMING_PATH_CACHE",
"try",
":",
"search_root",
"=",
"os",
".",
"environ",
"[",
"'HADOOP_HOME'",
"... | Finds the whole path to the hadoop streaming jar.
If the environmental var HADOOP_HOME is specified, then start the search
from there.
Returns:
Full path to the hadoop streaming jar if found, else return an empty
string. | [
"Finds",
"the",
"whole",
"path",
"to",
"the",
"hadoop",
"streaming",
"jar",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_runner.py#L38-L63 | train | 32,212 |
bwhite/hadoopy | hadoopy/_runner.py | _listeq_to_dict | def _listeq_to_dict(jobconfs):
"""Convert iterators of 'key=val' into a dictionary with later values taking priority."""
if not isinstance(jobconfs, dict):
jobconfs = dict(x.split('=', 1) for x in jobconfs)
return dict((str(k), str(v)) for k, v in jobconfs.items()) | python | def _listeq_to_dict(jobconfs):
"""Convert iterators of 'key=val' into a dictionary with later values taking priority."""
if not isinstance(jobconfs, dict):
jobconfs = dict(x.split('=', 1) for x in jobconfs)
return dict((str(k), str(v)) for k, v in jobconfs.items()) | [
"def",
"_listeq_to_dict",
"(",
"jobconfs",
")",
":",
"if",
"not",
"isinstance",
"(",
"jobconfs",
",",
"dict",
")",
":",
"jobconfs",
"=",
"dict",
"(",
"x",
".",
"split",
"(",
"'='",
",",
"1",
")",
"for",
"x",
"in",
"jobconfs",
")",
"return",
"dict",
... | Convert iterators of 'key=val' into a dictionary with later values taking priority. | [
"Convert",
"iterators",
"of",
"key",
"=",
"val",
"into",
"a",
"dictionary",
"with",
"later",
"values",
"taking",
"priority",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_runner.py#L66-L70 | train | 32,213 |
bwhite/hadoopy | hadoopy/_runner.py | launch_frozen | def launch_frozen(in_name, out_name, script_path, frozen_tar_path=None,
temp_path='_hadoopy_temp', cache=True, check_script=False,
**kw):
"""Freezes a script and then launches it.
This function will freeze your python program, and place it on HDFS
in 'temp_path'. It will not remove it afterwards as they are typically
small, you can easily reuse/debug them, and to avoid any risks involved
with removing the file.
:param in_name: Input path (string or list)
:param out_name: Output path
:param script_path: Path to the script (e.g., script.py)
:param frozen_tar_path: If not None, use this path to a previously frozen archive. You can get such a path from the return value of this function, it is particularly helpful in iterative programs.
:param cache: If True (default) then use previously frozen scripts. Cache is stored in memory (not persistent).
:param temp_path: HDFS path that we can use to store temporary files (default to _hadoopy_temp)
:param partitioner: If True, the partitioner is the value.
:param wait: If True, wait till the process is completed (default True) this is useful if you want to run multiple jobs concurrently by using the 'process' entry in the output.
:param files: Extra files (other than the script) (iterator). NOTE: Hadoop copies the files into working directory
:param jobconfs: Extra jobconf parameters (iterator)
:param cmdenvs: Extra cmdenv parameters (iterator)
:param hstreaming: The full hadoop streaming path to call.
:param name: Set the job name to this (default None, job name is the script name)
:param use_typedbytes: If True (default), use typedbytes IO.
:param use_seqoutput: True (default), output sequence file. If False, output is text.
:param use_autoinput: If True (default), sets the input format to auto.
:param config: If a string, set the hadoop config path
:param pipe: If true (default) then call user code through a pipe to isolate it and stop bugs when printing to stdout. See project docs.
:param python_cmd: The python command to use. The default is "python". Can be used to override the system default python, e.g. python_cmd = "python2.6"
:param num_mappers: The number of mappers to use, i.e. the argument given to 'numMapTasks'. If None, then do not specify this argument to hadoop streaming.
:param num_reducers: The number of reducers to use, i.e. the argument given to 'numReduceTasks'. If None, then do not specify this argument to hadoop streaming.
:param check_script: If True, then copy script and .py(c) files to a temporary directory and verify that it can be executed. This catches the majority of errors related to not included locally imported files. The default is False when using launch_frozen as the freeze process packages local files.
:rtype: Dictionary with some of the following entries (depending on options)
:returns: freeze_cmds: Freeze command(s) ran
:returns: frozen_tar_path: HDFS path to frozen file
:returns: hadoop_cmds: Hadoopy command(s) ran
:returns: process: subprocess.Popen object
:returns: output: Iterator of (key, value) pairs
:raises: subprocess.CalledProcessError: Hadoop error.
:raises: OSError: Hadoop streaming not found.
:raises: TypeError: Input types are not correct.
:raises: ValueError: Script not found
"""
if (('files' in kw and isinstance(kw['files'], (str, unicode))) or
('jobconfs' in kw and isinstance(kw['jobconfs'], (str, unicode))) or
('cmdenvs' in kw and isinstance(kw['cmdenvs'], (str, unicode)))):
raise TypeError('files, jobconfs, and cmdenvs must be iterators of strings and not strings!')
if 'jobconfs' in kw:
kw['jobconfs'] = _listeq_to_dict(kw['jobconfs'])
if 'cmdenvs' in kw:
kw['cmdenvs'] = _listeq_to_dict(kw['cmdenvs'])
cmds = []
if not frozen_tar_path:
freeze_out = hadoopy.freeze_script(script_path, temp_path=temp_path, cache=cache)
frozen_tar_path = freeze_out['frozen_tar_path']
cmds += freeze_out['cmds']
jobconfs = kw.get('jobconfs', {})
jobconfs['mapred.cache.archives'] = '%s#_frozen' % frozen_tar_path
jobconfs['mapreduce.job.cache.archives'] = '%s#_frozen' % frozen_tar_path
kw['copy_script'] = False
kw['add_python'] = False
kw['jobconfs'] = jobconfs
out = launch(in_name, out_name, script_path,
script_dir='_frozen', remove_ext=True, check_script=check_script,
make_executable=False, **kw)
out['freeze_cmds'] = cmds
out['frozen_tar_path'] = frozen_tar_path
return out | python | def launch_frozen(in_name, out_name, script_path, frozen_tar_path=None,
temp_path='_hadoopy_temp', cache=True, check_script=False,
**kw):
"""Freezes a script and then launches it.
This function will freeze your python program, and place it on HDFS
in 'temp_path'. It will not remove it afterwards as they are typically
small, you can easily reuse/debug them, and to avoid any risks involved
with removing the file.
:param in_name: Input path (string or list)
:param out_name: Output path
:param script_path: Path to the script (e.g., script.py)
:param frozen_tar_path: If not None, use this path to a previously frozen archive. You can get such a path from the return value of this function, it is particularly helpful in iterative programs.
:param cache: If True (default) then use previously frozen scripts. Cache is stored in memory (not persistent).
:param temp_path: HDFS path that we can use to store temporary files (default to _hadoopy_temp)
:param partitioner: If True, the partitioner is the value.
:param wait: If True, wait till the process is completed (default True) this is useful if you want to run multiple jobs concurrently by using the 'process' entry in the output.
:param files: Extra files (other than the script) (iterator). NOTE: Hadoop copies the files into working directory
:param jobconfs: Extra jobconf parameters (iterator)
:param cmdenvs: Extra cmdenv parameters (iterator)
:param hstreaming: The full hadoop streaming path to call.
:param name: Set the job name to this (default None, job name is the script name)
:param use_typedbytes: If True (default), use typedbytes IO.
:param use_seqoutput: True (default), output sequence file. If False, output is text.
:param use_autoinput: If True (default), sets the input format to auto.
:param config: If a string, set the hadoop config path
:param pipe: If true (default) then call user code through a pipe to isolate it and stop bugs when printing to stdout. See project docs.
:param python_cmd: The python command to use. The default is "python". Can be used to override the system default python, e.g. python_cmd = "python2.6"
:param num_mappers: The number of mappers to use, i.e. the argument given to 'numMapTasks'. If None, then do not specify this argument to hadoop streaming.
:param num_reducers: The number of reducers to use, i.e. the argument given to 'numReduceTasks'. If None, then do not specify this argument to hadoop streaming.
:param check_script: If True, then copy script and .py(c) files to a temporary directory and verify that it can be executed. This catches the majority of errors related to not included locally imported files. The default is False when using launch_frozen as the freeze process packages local files.
:rtype: Dictionary with some of the following entries (depending on options)
:returns: freeze_cmds: Freeze command(s) ran
:returns: frozen_tar_path: HDFS path to frozen file
:returns: hadoop_cmds: Hadoopy command(s) ran
:returns: process: subprocess.Popen object
:returns: output: Iterator of (key, value) pairs
:raises: subprocess.CalledProcessError: Hadoop error.
:raises: OSError: Hadoop streaming not found.
:raises: TypeError: Input types are not correct.
:raises: ValueError: Script not found
"""
if (('files' in kw and isinstance(kw['files'], (str, unicode))) or
('jobconfs' in kw and isinstance(kw['jobconfs'], (str, unicode))) or
('cmdenvs' in kw and isinstance(kw['cmdenvs'], (str, unicode)))):
raise TypeError('files, jobconfs, and cmdenvs must be iterators of strings and not strings!')
if 'jobconfs' in kw:
kw['jobconfs'] = _listeq_to_dict(kw['jobconfs'])
if 'cmdenvs' in kw:
kw['cmdenvs'] = _listeq_to_dict(kw['cmdenvs'])
cmds = []
if not frozen_tar_path:
freeze_out = hadoopy.freeze_script(script_path, temp_path=temp_path, cache=cache)
frozen_tar_path = freeze_out['frozen_tar_path']
cmds += freeze_out['cmds']
jobconfs = kw.get('jobconfs', {})
jobconfs['mapred.cache.archives'] = '%s#_frozen' % frozen_tar_path
jobconfs['mapreduce.job.cache.archives'] = '%s#_frozen' % frozen_tar_path
kw['copy_script'] = False
kw['add_python'] = False
kw['jobconfs'] = jobconfs
out = launch(in_name, out_name, script_path,
script_dir='_frozen', remove_ext=True, check_script=check_script,
make_executable=False, **kw)
out['freeze_cmds'] = cmds
out['frozen_tar_path'] = frozen_tar_path
return out | [
"def",
"launch_frozen",
"(",
"in_name",
",",
"out_name",
",",
"script_path",
",",
"frozen_tar_path",
"=",
"None",
",",
"temp_path",
"=",
"'_hadoopy_temp'",
",",
"cache",
"=",
"True",
",",
"check_script",
"=",
"False",
",",
"*",
"*",
"kw",
")",
":",
"if",
... | Freezes a script and then launches it.
This function will freeze your python program, and place it on HDFS
in 'temp_path'. It will not remove it afterwards as they are typically
small, you can easily reuse/debug them, and to avoid any risks involved
with removing the file.
:param in_name: Input path (string or list)
:param out_name: Output path
:param script_path: Path to the script (e.g., script.py)
:param frozen_tar_path: If not None, use this path to a previously frozen archive. You can get such a path from the return value of this function, it is particularly helpful in iterative programs.
:param cache: If True (default) then use previously frozen scripts. Cache is stored in memory (not persistent).
:param temp_path: HDFS path that we can use to store temporary files (default to _hadoopy_temp)
:param partitioner: If True, the partitioner is the value.
:param wait: If True, wait till the process is completed (default True) this is useful if you want to run multiple jobs concurrently by using the 'process' entry in the output.
:param files: Extra files (other than the script) (iterator). NOTE: Hadoop copies the files into working directory
:param jobconfs: Extra jobconf parameters (iterator)
:param cmdenvs: Extra cmdenv parameters (iterator)
:param hstreaming: The full hadoop streaming path to call.
:param name: Set the job name to this (default None, job name is the script name)
:param use_typedbytes: If True (default), use typedbytes IO.
:param use_seqoutput: True (default), output sequence file. If False, output is text.
:param use_autoinput: If True (default), sets the input format to auto.
:param config: If a string, set the hadoop config path
:param pipe: If true (default) then call user code through a pipe to isolate it and stop bugs when printing to stdout. See project docs.
:param python_cmd: The python command to use. The default is "python". Can be used to override the system default python, e.g. python_cmd = "python2.6"
:param num_mappers: The number of mappers to use, i.e. the argument given to 'numMapTasks'. If None, then do not specify this argument to hadoop streaming.
:param num_reducers: The number of reducers to use, i.e. the argument given to 'numReduceTasks'. If None, then do not specify this argument to hadoop streaming.
:param check_script: If True, then copy script and .py(c) files to a temporary directory and verify that it can be executed. This catches the majority of errors related to not included locally imported files. The default is False when using launch_frozen as the freeze process packages local files.
:rtype: Dictionary with some of the following entries (depending on options)
:returns: freeze_cmds: Freeze command(s) ran
:returns: frozen_tar_path: HDFS path to frozen file
:returns: hadoop_cmds: Hadoopy command(s) ran
:returns: process: subprocess.Popen object
:returns: output: Iterator of (key, value) pairs
:raises: subprocess.CalledProcessError: Hadoop error.
:raises: OSError: Hadoop streaming not found.
:raises: TypeError: Input types are not correct.
:raises: ValueError: Script not found | [
"Freezes",
"a",
"script",
"and",
"then",
"launches",
"it",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_runner.py#L341-L410 | train | 32,214 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/depend/dylib.py | include_library | def include_library(libname):
"""Check if a dynamic library should be included with application or not."""
# For configuration phase we need to have exclude / include lists None
# so these checking is skipped and library gets included.
if exclude_list:
if exclude_list.search(libname) and not include_list.search(libname):
# Library is excluded and is not overriden by include list.
# It should be then excluded.
return False
else:
# Include library
return True
else:
# By default include library.
return True | python | def include_library(libname):
"""Check if a dynamic library should be included with application or not."""
# For configuration phase we need to have exclude / include lists None
# so these checking is skipped and library gets included.
if exclude_list:
if exclude_list.search(libname) and not include_list.search(libname):
# Library is excluded and is not overriden by include list.
# It should be then excluded.
return False
else:
# Include library
return True
else:
# By default include library.
return True | [
"def",
"include_library",
"(",
"libname",
")",
":",
"# For configuration phase we need to have exclude / include lists None",
"# so these checking is skipped and library gets included.",
"if",
"exclude_list",
":",
"if",
"exclude_list",
".",
"search",
"(",
"libname",
")",
"and",
... | Check if a dynamic library should be included with application or not. | [
"Check",
"if",
"a",
"dynamic",
"library",
"should",
"be",
"included",
"with",
"application",
"or",
"not",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/depend/dylib.py#L158-L172 | train | 32,215 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/depend/dylib.py | mac_set_relative_dylib_deps | def mac_set_relative_dylib_deps(libname):
"""
On Mac OS X set relative paths to dynamic library dependencies of `libname`.
Relative paths allow to avoid using environment variable DYLD_LIBRARY_PATH.
There are known some issues with DYLD_LIBRARY_PATH. Relative paths is
more flexible mechanism.
Current location of dependend libraries is derived from the location
of the executable (paths start with '@executable_path').
@executable_path or @loader_path fail in some situations
(@loader_path - qt4 plugins, @executable_path -
Python built-in hashlib module).
"""
from PyInstaller.lib.macholib import util
from PyInstaller.lib.macholib.MachO import MachO
# Ignore bootloader otherwise PyInstaller fails with exception like
# 'ValueError: total_size > low_offset (288 > 0)'
if os.path.basename(libname) in _BOOTLOADER_FNAMES:
return
def match_func(pth):
"""For system libraries is still used absolute path. It is unchanged."""
# Match non system dynamic libraries.
if not util.in_system_path(pth):
# Use relative path to dependend dynamic libraries bases on
# location of the executable.
return os.path.join('@executable_path', os.path.basename(pth))
# Rewrite mach headers with @executable_path.
dll = MachO(libname)
dll.rewriteLoadCommands(match_func)
# Write changes into file.
# Write code is based on macholib example.
try:
f = open(dll.filename, 'rb+')
for header in dll.headers:
f.seek(0)
dll.write(f)
f.seek(0, 2)
f.flush()
f.close()
except Exception:
pass | python | def mac_set_relative_dylib_deps(libname):
"""
On Mac OS X set relative paths to dynamic library dependencies of `libname`.
Relative paths allow to avoid using environment variable DYLD_LIBRARY_PATH.
There are known some issues with DYLD_LIBRARY_PATH. Relative paths is
more flexible mechanism.
Current location of dependend libraries is derived from the location
of the executable (paths start with '@executable_path').
@executable_path or @loader_path fail in some situations
(@loader_path - qt4 plugins, @executable_path -
Python built-in hashlib module).
"""
from PyInstaller.lib.macholib import util
from PyInstaller.lib.macholib.MachO import MachO
# Ignore bootloader otherwise PyInstaller fails with exception like
# 'ValueError: total_size > low_offset (288 > 0)'
if os.path.basename(libname) in _BOOTLOADER_FNAMES:
return
def match_func(pth):
"""For system libraries is still used absolute path. It is unchanged."""
# Match non system dynamic libraries.
if not util.in_system_path(pth):
# Use relative path to dependend dynamic libraries bases on
# location of the executable.
return os.path.join('@executable_path', os.path.basename(pth))
# Rewrite mach headers with @executable_path.
dll = MachO(libname)
dll.rewriteLoadCommands(match_func)
# Write changes into file.
# Write code is based on macholib example.
try:
f = open(dll.filename, 'rb+')
for header in dll.headers:
f.seek(0)
dll.write(f)
f.seek(0, 2)
f.flush()
f.close()
except Exception:
pass | [
"def",
"mac_set_relative_dylib_deps",
"(",
"libname",
")",
":",
"from",
"PyInstaller",
".",
"lib",
".",
"macholib",
"import",
"util",
"from",
"PyInstaller",
".",
"lib",
".",
"macholib",
".",
"MachO",
"import",
"MachO",
"# Ignore bootloader otherwise PyInstaller fails ... | On Mac OS X set relative paths to dynamic library dependencies of `libname`.
Relative paths allow to avoid using environment variable DYLD_LIBRARY_PATH.
There are known some issues with DYLD_LIBRARY_PATH. Relative paths is
more flexible mechanism.
Current location of dependend libraries is derived from the location
of the executable (paths start with '@executable_path').
@executable_path or @loader_path fail in some situations
(@loader_path - qt4 plugins, @executable_path -
Python built-in hashlib module). | [
"On",
"Mac",
"OS",
"X",
"set",
"relative",
"paths",
"to",
"dynamic",
"library",
"dependencies",
"of",
"libname",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/depend/dylib.py#L175-L222 | train | 32,216 |
roycehaynes/scrapy-rabbitmq | scrapy_rabbitmq/connection.py | from_settings | def from_settings(settings):
""" Factory method that returns an instance of channel
:param str connection_type: This field can be `blocking`
`asyncore`, `libev`, `select`, `tornado`, or `twisted`
See pika documentation for more details:
TODO: put pika url regarding connection type
Parameters is a dictionary that can
include the following values:
:param str host: Hostname or IP Address to connect to
:param int port: TCP port to connect to
:param str virtual_host: RabbitMQ virtual host to use
:param pika.credentials.Credentials credentials: auth credentials
:param int channel_max: Maximum number of channels to allow
:param int frame_max: The maximum byte size for an AMQP frame
:param int heartbeat_interval: How often to send heartbeats
:param bool ssl: Enable SSL
:param dict ssl_options: Arguments passed to ssl.wrap_socket as
:param int connection_attempts: Maximum number of retry attempts
:param int|float retry_delay: Time to wait in seconds, before the next
:param int|float socket_timeout: Use for high latency networks
:param str locale: Set the locale value
:param bool backpressure_detection: Toggle backpressure detection
:return: Channel object
"""
connection_type = settings.get('RABBITMQ_CONNECTION_TYPE', RABBITMQ_CONNECTION_TYPE)
queue_name = settings.get('RABBITMQ_QUEUE_NAME', RABBITMQ_QUEUE_NAME)
connection_parameters = settings.get('RABBITMQ_CONNECTION_PARAMETERS', RABBITMQ_CONNECTION_PARAMETERS)
connection = {
'blocking': pika.BlockingConnection,
'libev': pika.LibevConnection,
'select': pika.SelectConnection,
'tornado': pika.TornadoConnection,
'twisted': pika.TwistedConnection
}[connection_type](pika.ConnectionParameters(**connection_parameters))
channel = connection.channel()
channel.queue_declare(queue=queue_name, durable=True)
return channel | python | def from_settings(settings):
""" Factory method that returns an instance of channel
:param str connection_type: This field can be `blocking`
`asyncore`, `libev`, `select`, `tornado`, or `twisted`
See pika documentation for more details:
TODO: put pika url regarding connection type
Parameters is a dictionary that can
include the following values:
:param str host: Hostname or IP Address to connect to
:param int port: TCP port to connect to
:param str virtual_host: RabbitMQ virtual host to use
:param pika.credentials.Credentials credentials: auth credentials
:param int channel_max: Maximum number of channels to allow
:param int frame_max: The maximum byte size for an AMQP frame
:param int heartbeat_interval: How often to send heartbeats
:param bool ssl: Enable SSL
:param dict ssl_options: Arguments passed to ssl.wrap_socket as
:param int connection_attempts: Maximum number of retry attempts
:param int|float retry_delay: Time to wait in seconds, before the next
:param int|float socket_timeout: Use for high latency networks
:param str locale: Set the locale value
:param bool backpressure_detection: Toggle backpressure detection
:return: Channel object
"""
connection_type = settings.get('RABBITMQ_CONNECTION_TYPE', RABBITMQ_CONNECTION_TYPE)
queue_name = settings.get('RABBITMQ_QUEUE_NAME', RABBITMQ_QUEUE_NAME)
connection_parameters = settings.get('RABBITMQ_CONNECTION_PARAMETERS', RABBITMQ_CONNECTION_PARAMETERS)
connection = {
'blocking': pika.BlockingConnection,
'libev': pika.LibevConnection,
'select': pika.SelectConnection,
'tornado': pika.TornadoConnection,
'twisted': pika.TwistedConnection
}[connection_type](pika.ConnectionParameters(**connection_parameters))
channel = connection.channel()
channel.queue_declare(queue=queue_name, durable=True)
return channel | [
"def",
"from_settings",
"(",
"settings",
")",
":",
"connection_type",
"=",
"settings",
".",
"get",
"(",
"'RABBITMQ_CONNECTION_TYPE'",
",",
"RABBITMQ_CONNECTION_TYPE",
")",
"queue_name",
"=",
"settings",
".",
"get",
"(",
"'RABBITMQ_QUEUE_NAME'",
",",
"RABBITMQ_QUEUE_NA... | Factory method that returns an instance of channel
:param str connection_type: This field can be `blocking`
`asyncore`, `libev`, `select`, `tornado`, or `twisted`
See pika documentation for more details:
TODO: put pika url regarding connection type
Parameters is a dictionary that can
include the following values:
:param str host: Hostname or IP Address to connect to
:param int port: TCP port to connect to
:param str virtual_host: RabbitMQ virtual host to use
:param pika.credentials.Credentials credentials: auth credentials
:param int channel_max: Maximum number of channels to allow
:param int frame_max: The maximum byte size for an AMQP frame
:param int heartbeat_interval: How often to send heartbeats
:param bool ssl: Enable SSL
:param dict ssl_options: Arguments passed to ssl.wrap_socket as
:param int connection_attempts: Maximum number of retry attempts
:param int|float retry_delay: Time to wait in seconds, before the next
:param int|float socket_timeout: Use for high latency networks
:param str locale: Set the locale value
:param bool backpressure_detection: Toggle backpressure detection
:return: Channel object | [
"Factory",
"method",
"that",
"returns",
"an",
"instance",
"of",
"channel"
] | 5053b500aff1d6679cc0e3d3e338c2bf74fadc22 | https://github.com/roycehaynes/scrapy-rabbitmq/blob/5053b500aff1d6679cc0e3d3e338c2bf74fadc22/scrapy_rabbitmq/connection.py#L14-L59 | train | 32,217 |
roycehaynes/scrapy-rabbitmq | scrapy_rabbitmq/spiders.py | RabbitMQMixin.setup_rabbitmq | def setup_rabbitmq(self):
""" Setup RabbitMQ connection.
Call this method after spider has set its crawler object.
:return: None
"""
if not self.rabbitmq_key:
self.rabbitmq_key = '{}:start_urls'.format(self.name)
self.server = connection.from_settings(self.crawler.settings)
self.crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)
self.crawler.signals.connect(self.item_scraped, signal=signals.item_scraped) | python | def setup_rabbitmq(self):
""" Setup RabbitMQ connection.
Call this method after spider has set its crawler object.
:return: None
"""
if not self.rabbitmq_key:
self.rabbitmq_key = '{}:start_urls'.format(self.name)
self.server = connection.from_settings(self.crawler.settings)
self.crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)
self.crawler.signals.connect(self.item_scraped, signal=signals.item_scraped) | [
"def",
"setup_rabbitmq",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"rabbitmq_key",
":",
"self",
".",
"rabbitmq_key",
"=",
"'{}:start_urls'",
".",
"format",
"(",
"self",
".",
"name",
")",
"self",
".",
"server",
"=",
"connection",
".",
"from_settings"... | Setup RabbitMQ connection.
Call this method after spider has set its crawler object.
:return: None | [
"Setup",
"RabbitMQ",
"connection",
"."
] | 5053b500aff1d6679cc0e3d3e338c2bf74fadc22 | https://github.com/roycehaynes/scrapy-rabbitmq/blob/5053b500aff1d6679cc0e3d3e338c2bf74fadc22/scrapy_rabbitmq/spiders.py#L19-L31 | train | 32,218 |
roycehaynes/scrapy-rabbitmq | scrapy_rabbitmq/spiders.py | RabbitMQMixin.schedule_next_request | def schedule_next_request(self):
""" Schedules a request, if exists.
:return:
"""
req = self.next_request()
if req:
self.crawler.engine.crawl(req, spider=self) | python | def schedule_next_request(self):
""" Schedules a request, if exists.
:return:
"""
req = self.next_request()
if req:
self.crawler.engine.crawl(req, spider=self) | [
"def",
"schedule_next_request",
"(",
"self",
")",
":",
"req",
"=",
"self",
".",
"next_request",
"(",
")",
"if",
"req",
":",
"self",
".",
"crawler",
".",
"engine",
".",
"crawl",
"(",
"req",
",",
"spider",
"=",
"self",
")"
] | Schedules a request, if exists.
:return: | [
"Schedules",
"a",
"request",
"if",
"exists",
"."
] | 5053b500aff1d6679cc0e3d3e338c2bf74fadc22 | https://github.com/roycehaynes/scrapy-rabbitmq/blob/5053b500aff1d6679cc0e3d3e338c2bf74fadc22/scrapy_rabbitmq/spiders.py#L46-L54 | train | 32,219 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | set_flags | def set_flags(obj, flag_field, flags):
"""Will process the flags and set attributes in the object accordingly.
The object "obj" will gain attributes named after the flags provided in
"flags" and valued True/False, matching the results of applying each
flag value from "flags" to flag_field.
"""
for flag in flags:
if flag[1] & flag_field:
#setattr(obj, flag[0], True)
obj.__dict__[flag[0]] = True
else:
#setattr(obj, flag[0], False)
obj.__dict__[flag[0]] = False | python | def set_flags(obj, flag_field, flags):
"""Will process the flags and set attributes in the object accordingly.
The object "obj" will gain attributes named after the flags provided in
"flags" and valued True/False, matching the results of applying each
flag value from "flags" to flag_field.
"""
for flag in flags:
if flag[1] & flag_field:
#setattr(obj, flag[0], True)
obj.__dict__[flag[0]] = True
else:
#setattr(obj, flag[0], False)
obj.__dict__[flag[0]] = False | [
"def",
"set_flags",
"(",
"obj",
",",
"flag_field",
",",
"flags",
")",
":",
"for",
"flag",
"in",
"flags",
":",
"if",
"flag",
"[",
"1",
"]",
"&",
"flag_field",
":",
"#setattr(obj, flag[0], True)",
"obj",
".",
"__dict__",
"[",
"flag",
"[",
"0",
"]",
"]",
... | Will process the flags and set attributes in the object accordingly.
The object "obj" will gain attributes named after the flags provided in
"flags" and valued True/False, matching the results of applying each
flag value from "flags" to flag_field. | [
"Will",
"process",
"the",
"flags",
"and",
"set",
"attributes",
"in",
"the",
"object",
"accordingly",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L559-L573 | train | 32,220 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | UnicodeStringWrapperPostProcessor.ask_pascal_16 | def ask_pascal_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
with the possible length contained in the first word.
"""
length = self.__get_pascal_16_length()
if length == (next_rva_ptr - (self.rva_ptr+2)) / 2:
self.length = length
return True
return False | python | def ask_pascal_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
with the possible length contained in the first word.
"""
length = self.__get_pascal_16_length()
if length == (next_rva_ptr - (self.rva_ptr+2)) / 2:
self.length = length
return True
return False | [
"def",
"ask_pascal_16",
"(",
"self",
",",
"next_rva_ptr",
")",
":",
"length",
"=",
"self",
".",
"__get_pascal_16_length",
"(",
")",
"if",
"length",
"==",
"(",
"next_rva_ptr",
"-",
"(",
"self",
".",
"rva_ptr",
"+",
"2",
")",
")",
"/",
"2",
":",
"self",
... | The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
with the possible length contained in the first word. | [
"The",
"next",
"RVA",
"is",
"taken",
"to",
"be",
"the",
"one",
"immediately",
"following",
"this",
"one",
".",
"Such",
"RVA",
"could",
"indicate",
"the",
"natural",
"end",
"of",
"the",
"string",
"and",
"will",
"be",
"checked",
"with",
"the",
"possible",
... | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L663-L676 | train | 32,221 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | Dump.add | def add(self, txt, indent=0):
"""Adds some text, no newline will be appended.
The text can be indented with the optional argument 'indent'.
"""
if isinstance(txt, unicode):
try:
txt = str(txt)
except UnicodeEncodeError:
s = []
for c in txt:
try:
s.append(str(c))
except UnicodeEncodeError:
s.append(repr(c))
txt = ''.join(s)
self.text.append( ' '*indent + txt ) | python | def add(self, txt, indent=0):
"""Adds some text, no newline will be appended.
The text can be indented with the optional argument 'indent'.
"""
if isinstance(txt, unicode):
try:
txt = str(txt)
except UnicodeEncodeError:
s = []
for c in txt:
try:
s.append(str(c))
except UnicodeEncodeError:
s.append(repr(c))
txt = ''.join(s)
self.text.append( ' '*indent + txt ) | [
"def",
"add",
"(",
"self",
",",
"txt",
",",
"indent",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"txt",
",",
"unicode",
")",
":",
"try",
":",
"txt",
"=",
"str",
"(",
"txt",
")",
"except",
"UnicodeEncodeError",
":",
"s",
"=",
"[",
"]",
"for",
... | Adds some text, no newline will be appended.
The text can be indented with the optional argument 'indent'. | [
"Adds",
"some",
"text",
"no",
"newline",
"will",
"be",
"appended",
".",
"The",
"text",
"can",
"be",
"indented",
"with",
"the",
"optional",
"argument",
"indent",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L756-L775 | train | 32,222 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | SectionStructure.contains_offset | def contains_offset(self, offset):
"""Check whether the section contains the file offset provided."""
if self.PointerToRawData is None:
# bss and other sections containing only uninitialized data must have 0
# and do not take space in the file
return False
return ( adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) <=
offset <
adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) +
self.SizeOfRawData ) | python | def contains_offset(self, offset):
"""Check whether the section contains the file offset provided."""
if self.PointerToRawData is None:
# bss and other sections containing only uninitialized data must have 0
# and do not take space in the file
return False
return ( adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) <=
offset <
adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) +
self.SizeOfRawData ) | [
"def",
"contains_offset",
"(",
"self",
",",
"offset",
")",
":",
"if",
"self",
".",
"PointerToRawData",
"is",
"None",
":",
"# bss and other sections containing only uninitialized data must have 0",
"# and do not take space in the file",
"return",
"False",
"return",
"(",
"adj... | Check whether the section contains the file offset provided. | [
"Check",
"whether",
"the",
"section",
"contains",
"the",
"file",
"offset",
"provided",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L1052-L1064 | train | 32,223 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | PE.parse_resource_entry | def parse_resource_entry(self, rva):
"""Parse a directory entry from the resources directory."""
try:
data = self.get_data( rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__).sizeof() )
except PEFormatError, excp:
# A warning will be added by the caller if this method returns None
return None
resource = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
if resource is None:
return None
#resource.NameIsString = (resource.Name & 0x80000000L) >> 31
resource.NameOffset = resource.Name & 0x7FFFFFFFL
resource.__pad = resource.Name & 0xFFFF0000L
resource.Id = resource.Name & 0x0000FFFFL
resource.DataIsDirectory = (resource.OffsetToData & 0x80000000L) >> 31
resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFFL
return resource | python | def parse_resource_entry(self, rva):
"""Parse a directory entry from the resources directory."""
try:
data = self.get_data( rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__).sizeof() )
except PEFormatError, excp:
# A warning will be added by the caller if this method returns None
return None
resource = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
if resource is None:
return None
#resource.NameIsString = (resource.Name & 0x80000000L) >> 31
resource.NameOffset = resource.Name & 0x7FFFFFFFL
resource.__pad = resource.Name & 0xFFFF0000L
resource.Id = resource.Name & 0x0000FFFFL
resource.DataIsDirectory = (resource.OffsetToData & 0x80000000L) >> 31
resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFFL
return resource | [
"def",
"parse_resource_entry",
"(",
"self",
",",
"rva",
")",
":",
"try",
":",
"data",
"=",
"self",
".",
"get_data",
"(",
"rva",
",",
"Structure",
"(",
"self",
".",
"__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__",
")",
".",
"sizeof",
"(",
")",
")",
"except",
"P... | Parse a directory entry from the resources directory. | [
"Parse",
"a",
"directory",
"entry",
"from",
"the",
"resources",
"directory",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L2790-L2815 | train | 32,224 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | PE.parse_imports | def parse_imports(self, original_first_thunk, first_thunk, forwarder_chain):
"""Parse the imported symbols.
It will fill a list, which will be available as the dictionary
attribute "imports". Its keys will be the DLL names and the values
all the symbols imported from that object.
"""
imported_symbols = []
# The following has been commented as a PE does not
# need to have the import data necessarily witin
# a section, it can keep it in gaps between sections
# or overlapping other data.
#
#imports_section = self.get_section_by_rva(first_thunk)
#if not imports_section:
# raise PEFormatError, 'Invalid/corrupt imports.'
# Import Lookup Table. Contains ordinals or pointers to strings.
ilt = self.get_import_table(original_first_thunk)
# Import Address Table. May have identical content to ILT if
# PE file is not bounded, Will contain the address of the
# imported symbols once the binary is loaded or if it is already
# bound.
iat = self.get_import_table(first_thunk)
# OC Patch:
# Would crash if IAT or ILT had None type
if (not iat or len(iat)==0) and (not ilt or len(ilt)==0):
raise PEFormatError(
'Invalid Import Table information. ' +
'Both ILT and IAT appear to be broken.')
table = None
if ilt:
table = ilt
elif iat:
table = iat
else:
return None
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
for idx in xrange(len(table)):
imp_ord = None
imp_hint = None
imp_name = None
name_offset = None
hint_name_table_rva = None
if table[idx].AddressOfData:
# If imported by ordinal, we will append the ordinal number
#
if table[idx].AddressOfData & ordinal_flag:
import_by_ordinal = True
imp_ord = table[idx].AddressOfData & 0xffff
imp_name = None
name_offset = None
else:
import_by_ordinal = False
try:
hint_name_table_rva = table[idx].AddressOfData & 0x7fffffff
data = self.get_data(hint_name_table_rva, 2)
# Get the Hint
imp_hint = self.get_word_from_data(data, 0)
imp_name = self.get_string_at_rva(table[idx].AddressOfData+2)
if not is_valid_function_name(imp_name):
imp_name = '*invalid*'
name_offset = self.get_offset_from_rva(table[idx].AddressOfData+2)
except PEFormatError, e:
pass
# by nriva: we want the ThunkRVA and ThunkOffset
thunk_offset = table[idx].get_file_offset()
thunk_rva = self.get_rva_from_offset(thunk_offset)
imp_address = first_thunk + self.OPTIONAL_HEADER.ImageBase + idx * 4
struct_iat = None
try:
if iat and ilt and ilt[idx].AddressOfData != iat[idx].AddressOfData:
imp_bound = iat[idx].AddressOfData
struct_iat = iat[idx]
else:
imp_bound = None
except IndexError:
imp_bound = None
# The file with hashes:
#
# MD5: bfe97192e8107d52dd7b4010d12b2924
# SHA256: 3d22f8b001423cb460811ab4f4789f277b35838d45c62ec0454c877e7c82c7f5
#
# has an invalid table built in a way that it's parseable but contains invalid
# entries that lead pefile to take extremely long amounts of time to
# parse. It also leads to extreme memory consumption.
# To prevent similar cases, if invalid entries are found in the middle of a
# table the parsing will be aborted
#
if imp_ord == None and imp_name == None:
raise PEFormatError( 'Invalid entries in the Import Table. Aborting parsing.' )
if imp_name != '' and (imp_ord or imp_name):
imported_symbols.append(
ImportData(
pe = self,
struct_table = table[idx],
struct_iat = struct_iat, # for bound imports if any
import_by_ordinal = import_by_ordinal,
ordinal = imp_ord,
ordinal_offset = table[idx].get_file_offset(),
hint = imp_hint,
name = imp_name,
name_offset = name_offset,
bound = imp_bound,
address = imp_address,
hint_name_table_rva = hint_name_table_rva,
thunk_offset = thunk_offset,
thunk_rva = thunk_rva ))
return imported_symbols | python | def parse_imports(self, original_first_thunk, first_thunk, forwarder_chain):
"""Parse the imported symbols.
It will fill a list, which will be available as the dictionary
attribute "imports". Its keys will be the DLL names and the values
all the symbols imported from that object.
"""
imported_symbols = []
# The following has been commented as a PE does not
# need to have the import data necessarily witin
# a section, it can keep it in gaps between sections
# or overlapping other data.
#
#imports_section = self.get_section_by_rva(first_thunk)
#if not imports_section:
# raise PEFormatError, 'Invalid/corrupt imports.'
# Import Lookup Table. Contains ordinals or pointers to strings.
ilt = self.get_import_table(original_first_thunk)
# Import Address Table. May have identical content to ILT if
# PE file is not bounded, Will contain the address of the
# imported symbols once the binary is loaded or if it is already
# bound.
iat = self.get_import_table(first_thunk)
# OC Patch:
# Would crash if IAT or ILT had None type
if (not iat or len(iat)==0) and (not ilt or len(ilt)==0):
raise PEFormatError(
'Invalid Import Table information. ' +
'Both ILT and IAT appear to be broken.')
table = None
if ilt:
table = ilt
elif iat:
table = iat
else:
return None
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
for idx in xrange(len(table)):
imp_ord = None
imp_hint = None
imp_name = None
name_offset = None
hint_name_table_rva = None
if table[idx].AddressOfData:
# If imported by ordinal, we will append the ordinal number
#
if table[idx].AddressOfData & ordinal_flag:
import_by_ordinal = True
imp_ord = table[idx].AddressOfData & 0xffff
imp_name = None
name_offset = None
else:
import_by_ordinal = False
try:
hint_name_table_rva = table[idx].AddressOfData & 0x7fffffff
data = self.get_data(hint_name_table_rva, 2)
# Get the Hint
imp_hint = self.get_word_from_data(data, 0)
imp_name = self.get_string_at_rva(table[idx].AddressOfData+2)
if not is_valid_function_name(imp_name):
imp_name = '*invalid*'
name_offset = self.get_offset_from_rva(table[idx].AddressOfData+2)
except PEFormatError, e:
pass
# by nriva: we want the ThunkRVA and ThunkOffset
thunk_offset = table[idx].get_file_offset()
thunk_rva = self.get_rva_from_offset(thunk_offset)
imp_address = first_thunk + self.OPTIONAL_HEADER.ImageBase + idx * 4
struct_iat = None
try:
if iat and ilt and ilt[idx].AddressOfData != iat[idx].AddressOfData:
imp_bound = iat[idx].AddressOfData
struct_iat = iat[idx]
else:
imp_bound = None
except IndexError:
imp_bound = None
# The file with hashes:
#
# MD5: bfe97192e8107d52dd7b4010d12b2924
# SHA256: 3d22f8b001423cb460811ab4f4789f277b35838d45c62ec0454c877e7c82c7f5
#
# has an invalid table built in a way that it's parseable but contains invalid
# entries that lead pefile to take extremely long amounts of time to
# parse. It also leads to extreme memory consumption.
# To prevent similar cases, if invalid entries are found in the middle of a
# table the parsing will be aborted
#
if imp_ord == None and imp_name == None:
raise PEFormatError( 'Invalid entries in the Import Table. Aborting parsing.' )
if imp_name != '' and (imp_ord or imp_name):
imported_symbols.append(
ImportData(
pe = self,
struct_table = table[idx],
struct_iat = struct_iat, # for bound imports if any
import_by_ordinal = import_by_ordinal,
ordinal = imp_ord,
ordinal_offset = table[idx].get_file_offset(),
hint = imp_hint,
name = imp_name,
name_offset = name_offset,
bound = imp_bound,
address = imp_address,
hint_name_table_rva = hint_name_table_rva,
thunk_offset = thunk_offset,
thunk_rva = thunk_rva ))
return imported_symbols | [
"def",
"parse_imports",
"(",
"self",
",",
"original_first_thunk",
",",
"first_thunk",
",",
"forwarder_chain",
")",
":",
"imported_symbols",
"=",
"[",
"]",
"# The following has been commented as a PE does not",
"# need to have the import data necessarily witin",
"# a section, it c... | Parse the imported symbols.
It will fill a list, which will be available as the dictionary
attribute "imports". Its keys will be the DLL names and the values
all the symbols imported from that object. | [
"Parse",
"the",
"imported",
"symbols",
".",
"It",
"will",
"fill",
"a",
"list",
"which",
"will",
"be",
"available",
"as",
"the",
"dictionary",
"attribute",
"imports",
".",
"Its",
"keys",
"will",
"be",
"the",
"DLL",
"names",
"and",
"the",
"values",
"all",
... | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L3437-L3565 | train | 32,225 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | PE.get_data | def get_data(self, rva=0, length=None):
"""Get data regardless of the section where it lies on.
Given a RVA and the size of the chunk to retrieve, this method
will find the section where the data lies and return the data.
"""
s = self.get_section_by_rva(rva)
if length:
end = rva + length
else:
end = None
if not s:
if rva < len(self.header):
return self.header[rva:end]
# Before we give up we check whether the file might
# contain the data anyway. There are cases of PE files
# without sections that rely on windows loading the first
# 8291 bytes into memory and assume the data will be
# there
# A functional file with these characteristics is:
# MD5: 0008892cdfbc3bda5ce047c565e52295
# SHA-1: c7116b9ff950f86af256defb95b5d4859d4752a9
#
if rva < len(self.__data__):
return self.__data__[rva:end]
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_data(rva, length) | python | def get_data(self, rva=0, length=None):
"""Get data regardless of the section where it lies on.
Given a RVA and the size of the chunk to retrieve, this method
will find the section where the data lies and return the data.
"""
s = self.get_section_by_rva(rva)
if length:
end = rva + length
else:
end = None
if not s:
if rva < len(self.header):
return self.header[rva:end]
# Before we give up we check whether the file might
# contain the data anyway. There are cases of PE files
# without sections that rely on windows loading the first
# 8291 bytes into memory and assume the data will be
# there
# A functional file with these characteristics is:
# MD5: 0008892cdfbc3bda5ce047c565e52295
# SHA-1: c7116b9ff950f86af256defb95b5d4859d4752a9
#
if rva < len(self.__data__):
return self.__data__[rva:end]
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_data(rva, length) | [
"def",
"get_data",
"(",
"self",
",",
"rva",
"=",
"0",
",",
"length",
"=",
"None",
")",
":",
"s",
"=",
"self",
".",
"get_section_by_rva",
"(",
"rva",
")",
"if",
"length",
":",
"end",
"=",
"rva",
"+",
"length",
"else",
":",
"end",
"=",
"None",
"if"... | Get data regardless of the section where it lies on.
Given a RVA and the size of the chunk to retrieve, this method
will find the section where the data lies and return the data. | [
"Get",
"data",
"regardless",
"of",
"the",
"section",
"where",
"it",
"lies",
"on",
".",
"Given",
"a",
"RVA",
"and",
"the",
"size",
"of",
"the",
"chunk",
"to",
"retrieve",
"this",
"method",
"will",
"find",
"the",
"section",
"where",
"the",
"data",
"lies",
... | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L3711-L3743 | train | 32,226 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | PE.get_rva_from_offset | def get_rva_from_offset(self, offset):
"""Get the RVA corresponding to this file offset. """
s = self.get_section_by_offset(offset)
if not s:
if self.sections:
lowest_rva = min( [ adjust_SectionAlignment( s.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) for s in self.sections] )
if offset < lowest_rva:
# We will assume that the offset lies within the headers, or
# at least points before where the earliest section starts
# and we will simply return the offset as the RVA
#
# The case illustrating this behavior can be found at:
# http://corkami.blogspot.com/2010/01/hey-hey-hey-whats-in-your-head.html
# where the import table is not contained by any section
# hence the RVA needs to be resolved to a raw offset
return offset
else:
return offset
#raise PEFormatError("specified offset (0x%x) doesn't belong to any section." % offset)
return s.get_rva_from_offset(offset) | python | def get_rva_from_offset(self, offset):
"""Get the RVA corresponding to this file offset. """
s = self.get_section_by_offset(offset)
if not s:
if self.sections:
lowest_rva = min( [ adjust_SectionAlignment( s.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) for s in self.sections] )
if offset < lowest_rva:
# We will assume that the offset lies within the headers, or
# at least points before where the earliest section starts
# and we will simply return the offset as the RVA
#
# The case illustrating this behavior can be found at:
# http://corkami.blogspot.com/2010/01/hey-hey-hey-whats-in-your-head.html
# where the import table is not contained by any section
# hence the RVA needs to be resolved to a raw offset
return offset
else:
return offset
#raise PEFormatError("specified offset (0x%x) doesn't belong to any section." % offset)
return s.get_rva_from_offset(offset) | [
"def",
"get_rva_from_offset",
"(",
"self",
",",
"offset",
")",
":",
"s",
"=",
"self",
".",
"get_section_by_offset",
"(",
"offset",
")",
"if",
"not",
"s",
":",
"if",
"self",
".",
"sections",
":",
"lowest_rva",
"=",
"min",
"(",
"[",
"adjust_SectionAlignment"... | Get the RVA corresponding to this file offset. | [
"Get",
"the",
"RVA",
"corresponding",
"to",
"this",
"file",
"offset",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L3746-L3767 | train | 32,227 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | PE.get_offset_from_rva | def get_offset_from_rva(self, rva):
"""Get the file offset corresponding to this RVA.
Given a RVA , this method will find the section where the
data lies and return the offset within the file.
"""
s = self.get_section_by_rva(rva)
if not s:
# If not found within a section assume it might
# point to overlay data or otherwise data present
# but not contained in any section. In those
# cases the RVA should equal the offset
if rva<len(self.__data__):
return rva
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_offset_from_rva(rva) | python | def get_offset_from_rva(self, rva):
"""Get the file offset corresponding to this RVA.
Given a RVA , this method will find the section where the
data lies and return the offset within the file.
"""
s = self.get_section_by_rva(rva)
if not s:
# If not found within a section assume it might
# point to overlay data or otherwise data present
# but not contained in any section. In those
# cases the RVA should equal the offset
if rva<len(self.__data__):
return rva
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_offset_from_rva(rva) | [
"def",
"get_offset_from_rva",
"(",
"self",
",",
"rva",
")",
":",
"s",
"=",
"self",
".",
"get_section_by_rva",
"(",
"rva",
")",
"if",
"not",
"s",
":",
"# If not found within a section assume it might",
"# point to overlay data or otherwise data present",
"# but not contain... | Get the file offset corresponding to this RVA.
Given a RVA , this method will find the section where the
data lies and return the offset within the file. | [
"Get",
"the",
"file",
"offset",
"corresponding",
"to",
"this",
"RVA",
".",
"Given",
"a",
"RVA",
"this",
"method",
"will",
"find",
"the",
"section",
"where",
"the",
"data",
"lies",
"and",
"return",
"the",
"offset",
"within",
"the",
"file",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L3769-L3788 | train | 32,228 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | PE.get_string_at_rva | def get_string_at_rva(self, rva):
"""Get an ASCII string located at the given address."""
s = self.get_section_by_rva(rva)
if not s:
return self.get_string_from_data(0, self.__data__[rva:rva+MAX_STRING_LENGTH])
return self.get_string_from_data( 0, s.get_data(rva, length=MAX_STRING_LENGTH) ) | python | def get_string_at_rva(self, rva):
"""Get an ASCII string located at the given address."""
s = self.get_section_by_rva(rva)
if not s:
return self.get_string_from_data(0, self.__data__[rva:rva+MAX_STRING_LENGTH])
return self.get_string_from_data( 0, s.get_data(rva, length=MAX_STRING_LENGTH) ) | [
"def",
"get_string_at_rva",
"(",
"self",
",",
"rva",
")",
":",
"s",
"=",
"self",
".",
"get_section_by_rva",
"(",
"rva",
")",
"if",
"not",
"s",
":",
"return",
"self",
".",
"get_string_from_data",
"(",
"0",
",",
"self",
".",
"__data__",
"[",
"rva",
":",
... | Get an ASCII string located at the given address. | [
"Get",
"an",
"ASCII",
"string",
"located",
"at",
"the",
"given",
"address",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L3791-L3798 | train | 32,229 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | PE.get_string_from_data | def get_string_from_data(self, offset, data):
"""Get an ASCII string from within the data."""
# OC Patch
b = None
try:
b = data[offset]
except IndexError:
return ''
s = ''
while ord(b):
s += b
offset += 1
try:
b = data[offset]
except IndexError:
break
return s | python | def get_string_from_data(self, offset, data):
"""Get an ASCII string from within the data."""
# OC Patch
b = None
try:
b = data[offset]
except IndexError:
return ''
s = ''
while ord(b):
s += b
offset += 1
try:
b = data[offset]
except IndexError:
break
return s | [
"def",
"get_string_from_data",
"(",
"self",
",",
"offset",
",",
"data",
")",
":",
"# OC Patch",
"b",
"=",
"None",
"try",
":",
"b",
"=",
"data",
"[",
"offset",
"]",
"except",
"IndexError",
":",
"return",
"''",
"s",
"=",
"''",
"while",
"ord",
"(",
"b",... | Get an ASCII string from within the data. | [
"Get",
"an",
"ASCII",
"string",
"from",
"within",
"the",
"data",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L3801-L3821 | train | 32,230 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | PE.set_bytes_at_rva | def set_bytes_at_rva(self, rva, data):
"""Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
offset = self.get_physical_by_rva(rva)
if not offset:
raise False
return self.set_bytes_at_offset(offset, data) | python | def set_bytes_at_rva(self, rva, data):
"""Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
offset = self.get_physical_by_rva(rva)
if not offset:
raise False
return self.set_bytes_at_offset(offset, data) | [
"def",
"set_bytes_at_rva",
"(",
"self",
",",
"rva",
",",
"data",
")",
":",
"offset",
"=",
"self",
".",
"get_physical_by_rva",
"(",
"rva",
")",
"if",
"not",
"offset",
":",
"raise",
"False",
"return",
"self",
".",
"set_bytes_at_offset",
"(",
"offset",
",",
... | Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries. | [
"Overwrite",
"with",
"the",
"given",
"string",
"the",
"bytes",
"at",
"the",
"file",
"offset",
"corresponding",
"to",
"the",
"given",
"RVA",
".",
"Return",
"True",
"if",
"successful",
"False",
"otherwise",
".",
"It",
"can",
"fail",
"if",
"the",
"offset",
"i... | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L4367-L4378 | train | 32,231 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | PE.merge_modified_section_data | def merge_modified_section_data(self):
"""Update the PE image content with any individual section data that has been modified."""
for section in self.sections:
section_data_start = adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment )
section_data_end = section_data_start+section.SizeOfRawData
if section_data_start < len(self.__data__) and section_data_end < len(self.__data__):
self.__data__ = self.__data__[:section_data_start] + section.get_data() + self.__data__[section_data_end:] | python | def merge_modified_section_data(self):
"""Update the PE image content with any individual section data that has been modified."""
for section in self.sections:
section_data_start = adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment )
section_data_end = section_data_start+section.SizeOfRawData
if section_data_start < len(self.__data__) and section_data_end < len(self.__data__):
self.__data__ = self.__data__[:section_data_start] + section.get_data() + self.__data__[section_data_end:] | [
"def",
"merge_modified_section_data",
"(",
"self",
")",
":",
"for",
"section",
"in",
"self",
".",
"sections",
":",
"section_data_start",
"=",
"adjust_FileAlignment",
"(",
"section",
".",
"PointerToRawData",
",",
"self",
".",
"OPTIONAL_HEADER",
".",
"FileAlignment",
... | Update the PE image content with any individual section data that has been modified. | [
"Update",
"the",
"PE",
"image",
"content",
"with",
"any",
"individual",
"section",
"data",
"that",
"has",
"been",
"modified",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L4399-L4407 | train | 32,232 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py | PE.is_driver | def is_driver(self):
"""Check whether the file is a Windows driver.
This will return true only if there are reliable indicators of the image
being a driver.
"""
# Checking that the ImageBase field of the OptionalHeader is above or
# equal to 0x80000000 (that is, whether it lies in the upper 2GB of
# the address space, normally belonging to the kernel) is not a
# reliable enough indicator. For instance, PEs that play the invalid
# ImageBase trick to get relocated could be incorrectly assumed to be
# drivers.
# This is not reliable either...
#
# if any( (section.Characteristics & SECTION_CHARACTERISTICS['IMAGE_SCN_MEM_NOT_PAGED']) for section in self.sections ):
# return True
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
# If it imports from "ntoskrnl.exe" or other kernel components it should be a driver
#
if set( ('ntoskrnl.exe', 'hal.dll', 'ndis.sys', 'bootvid.dll', 'kdcom.dll' ) ).intersection( [ imp.dll.lower() for imp in self.DIRECTORY_ENTRY_IMPORT ] ):
return True
return False | python | def is_driver(self):
"""Check whether the file is a Windows driver.
This will return true only if there are reliable indicators of the image
being a driver.
"""
# Checking that the ImageBase field of the OptionalHeader is above or
# equal to 0x80000000 (that is, whether it lies in the upper 2GB of
# the address space, normally belonging to the kernel) is not a
# reliable enough indicator. For instance, PEs that play the invalid
# ImageBase trick to get relocated could be incorrectly assumed to be
# drivers.
# This is not reliable either...
#
# if any( (section.Characteristics & SECTION_CHARACTERISTICS['IMAGE_SCN_MEM_NOT_PAGED']) for section in self.sections ):
# return True
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
# If it imports from "ntoskrnl.exe" or other kernel components it should be a driver
#
if set( ('ntoskrnl.exe', 'hal.dll', 'ndis.sys', 'bootvid.dll', 'kdcom.dll' ) ).intersection( [ imp.dll.lower() for imp in self.DIRECTORY_ENTRY_IMPORT ] ):
return True
return False | [
"def",
"is_driver",
"(",
"self",
")",
":",
"# Checking that the ImageBase field of the OptionalHeader is above or",
"# equal to 0x80000000 (that is, whether it lies in the upper 2GB of",
"# the address space, normally belonging to the kernel) is not a",
"# reliable enough indicator. For instance,... | Check whether the file is a Windows driver.
This will return true only if there are reliable indicators of the image
being a driver. | [
"Check",
"whether",
"the",
"file",
"is",
"a",
"Windows",
"driver",
".",
"This",
"will",
"return",
"true",
"only",
"if",
"there",
"are",
"reliable",
"indicators",
"of",
"the",
"image",
"being",
"a",
"driver",
"."
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L4580-L4606 | train | 32,233 |
bwhite/hadoopy | hadoopy/_freeze.py | _copytree | def _copytree(src, dst):
"""Similar to shutils.copytree, except that dst is already there
"""
try:
os.makedirs(dst)
except OSError:
pass # It must already exist
for file in os.listdir(src):
try:
shutil.copy2('%s/%s' % (src, file), '%s/%s' % (dst, file))
except IOError, e:
try:
shutil.copytree('%s/%s' % (src, file), '%s/%s' % (dst, file))
except OSError: # Not a directory, reraise copy2 exception
raise e | python | def _copytree(src, dst):
"""Similar to shutils.copytree, except that dst is already there
"""
try:
os.makedirs(dst)
except OSError:
pass # It must already exist
for file in os.listdir(src):
try:
shutil.copy2('%s/%s' % (src, file), '%s/%s' % (dst, file))
except IOError, e:
try:
shutil.copytree('%s/%s' % (src, file), '%s/%s' % (dst, file))
except OSError: # Not a directory, reraise copy2 exception
raise e | [
"def",
"_copytree",
"(",
"src",
",",
"dst",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"dst",
")",
"except",
"OSError",
":",
"pass",
"# It must already exist",
"for",
"file",
"in",
"os",
".",
"listdir",
"(",
"src",
")",
":",
"try",
":",
"shuti... | Similar to shutils.copytree, except that dst is already there | [
"Similar",
"to",
"shutils",
".",
"copytree",
"except",
"that",
"dst",
"is",
"already",
"there"
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_freeze.py#L53-L67 | train | 32,234 |
bwhite/hadoopy | hadoopy/_freeze.py | _md5_file | def _md5_file(fn, block_size=1048576):
"""Builds the MD5 of a file block by block
Args:
fn: File path
block_size: Size of the blocks to consider (default 1048576)
Returns:
File MD5
"""
h = hashlib.md5()
with open(fn) as fp:
d = 1
while d:
d = fp.read(block_size)
h.update(d)
return h.hexdigest() | python | def _md5_file(fn, block_size=1048576):
"""Builds the MD5 of a file block by block
Args:
fn: File path
block_size: Size of the blocks to consider (default 1048576)
Returns:
File MD5
"""
h = hashlib.md5()
with open(fn) as fp:
d = 1
while d:
d = fp.read(block_size)
h.update(d)
return h.hexdigest() | [
"def",
"_md5_file",
"(",
"fn",
",",
"block_size",
"=",
"1048576",
")",
":",
"h",
"=",
"hashlib",
".",
"md5",
"(",
")",
"with",
"open",
"(",
"fn",
")",
"as",
"fp",
":",
"d",
"=",
"1",
"while",
"d",
":",
"d",
"=",
"fp",
".",
"read",
"(",
"block... | Builds the MD5 of a file block by block
Args:
fn: File path
block_size: Size of the blocks to consider (default 1048576)
Returns:
File MD5 | [
"Builds",
"the",
"MD5",
"of",
"a",
"file",
"block",
"by",
"block"
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_freeze.py#L70-L86 | train | 32,235 |
bwhite/hadoopy | hadoopy/_freeze.py | freeze_script | def freeze_script(script_path, cache=True, temp_path='_hadoopy_temp'):
"""Freezes a script, puts it on hdfs, and gives you the path
'frozen_tar_path' can be given to launch_frozen and it will use that
instead of making its own, this is useful for repeated calls. If a
file with the same md5 already exists in the temp_path, it is used
instead of putting a new copy there to avoid the file transfer. The
files are put into a temporary file based on the timestamp first, then
moved to a location that is only a function of their md5 to prevent partial
files.
Args:
script_path: Path to a hadoopy script
cache: If True (default) then use previously frozen scripts. Cache is stored in memory (not persistent).
temp_path: HDFS temporary path (default is '_hadoopy_temp')
Returns:
{'cmds': commands_ran, 'frozen_tar_path': frozen_tar_path}
Raises:
ValueError: Script cannot be found
"""
script_abspath = os.path.abspath(script_path)
if not os.path.exists(script_abspath):
raise ValueError('Script [%s] does not exist.' % script_abspath)
try:
if not cache:
raise KeyError # NOTE(brandyn): Don't use cache item
cmds, frozen_tar_path = FREEZE_CACHE[script_abspath]
except KeyError:
tmp_frozen_tar_path = temp_path + '/%f.tar' % time.time()
freeze_fp = tempfile.NamedTemporaryFile(suffix='.tar')
cmds = hadoopy._freeze.freeze_to_tar(os.path.abspath(script_path), freeze_fp.name)
md5 = _md5_file(freeze_fp.name)
frozen_tar_path = temp_path + '/%s.tar' % md5
if not hadoopy.exists(frozen_tar_path):
if not hadoopy.exists(temp_path): # CDH4 Fix
hadoopy.mkdir(temp_path)
hadoopy.put(freeze_fp.name, tmp_frozen_tar_path)
try:
hadoopy.mv(tmp_frozen_tar_path, frozen_tar_path)
except IOError:
if not hadoopy.exists(frozen_tar_path): # Check again
raise
FREEZE_CACHE[script_abspath] = cmds, frozen_tar_path
return {'cmds': cmds, 'frozen_tar_path': frozen_tar_path} | python | def freeze_script(script_path, cache=True, temp_path='_hadoopy_temp'):
"""Freezes a script, puts it on hdfs, and gives you the path
'frozen_tar_path' can be given to launch_frozen and it will use that
instead of making its own, this is useful for repeated calls. If a
file with the same md5 already exists in the temp_path, it is used
instead of putting a new copy there to avoid the file transfer. The
files are put into a temporary file based on the timestamp first, then
moved to a location that is only a function of their md5 to prevent partial
files.
Args:
script_path: Path to a hadoopy script
cache: If True (default) then use previously frozen scripts. Cache is stored in memory (not persistent).
temp_path: HDFS temporary path (default is '_hadoopy_temp')
Returns:
{'cmds': commands_ran, 'frozen_tar_path': frozen_tar_path}
Raises:
ValueError: Script cannot be found
"""
script_abspath = os.path.abspath(script_path)
if not os.path.exists(script_abspath):
raise ValueError('Script [%s] does not exist.' % script_abspath)
try:
if not cache:
raise KeyError # NOTE(brandyn): Don't use cache item
cmds, frozen_tar_path = FREEZE_CACHE[script_abspath]
except KeyError:
tmp_frozen_tar_path = temp_path + '/%f.tar' % time.time()
freeze_fp = tempfile.NamedTemporaryFile(suffix='.tar')
cmds = hadoopy._freeze.freeze_to_tar(os.path.abspath(script_path), freeze_fp.name)
md5 = _md5_file(freeze_fp.name)
frozen_tar_path = temp_path + '/%s.tar' % md5
if not hadoopy.exists(frozen_tar_path):
if not hadoopy.exists(temp_path): # CDH4 Fix
hadoopy.mkdir(temp_path)
hadoopy.put(freeze_fp.name, tmp_frozen_tar_path)
try:
hadoopy.mv(tmp_frozen_tar_path, frozen_tar_path)
except IOError:
if not hadoopy.exists(frozen_tar_path): # Check again
raise
FREEZE_CACHE[script_abspath] = cmds, frozen_tar_path
return {'cmds': cmds, 'frozen_tar_path': frozen_tar_path} | [
"def",
"freeze_script",
"(",
"script_path",
",",
"cache",
"=",
"True",
",",
"temp_path",
"=",
"'_hadoopy_temp'",
")",
":",
"script_abspath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"script_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"... | Freezes a script, puts it on hdfs, and gives you the path
'frozen_tar_path' can be given to launch_frozen and it will use that
instead of making its own, this is useful for repeated calls. If a
file with the same md5 already exists in the temp_path, it is used
instead of putting a new copy there to avoid the file transfer. The
files are put into a temporary file based on the timestamp first, then
moved to a location that is only a function of their md5 to prevent partial
files.
Args:
script_path: Path to a hadoopy script
cache: If True (default) then use previously frozen scripts. Cache is stored in memory (not persistent).
temp_path: HDFS temporary path (default is '_hadoopy_temp')
Returns:
{'cmds': commands_ran, 'frozen_tar_path': frozen_tar_path}
Raises:
ValueError: Script cannot be found | [
"Freezes",
"a",
"script",
"puts",
"it",
"on",
"hdfs",
"and",
"gives",
"you",
"the",
"path"
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_freeze.py#L92-L137 | train | 32,236 |
bwhite/hadoopy | hadoopy/_freeze.py | freeze | def freeze(script_path, target_dir='frozen', **kw):
"""Wraps pyinstaller and provides an easy to use interface
Args:
script_path: Absolute path to python script to be frozen.
Returns:
List of freeze commands ran
Raises:
subprocess.CalledProcessError: Freeze error.
OSError: Freeze not found.
"""
cmds = []
freeze_start_time = time.time()
logging.debug('/\\%s%s Output%s/\\' % ('-' * 10, 'Pyinstaller', '-' * 10))
orig_dir = os.path.abspath('.')
script_path = os.path.abspath(script_path)
try:
os.chdir(target_dir)
cmds += _freeze_config()
pyinst_path = '%s/thirdparty/pyinstaller' % __path__[0]
cur_cmd = 'python -O %s/pyinstaller.py %s --skip-configure' % (pyinst_path, script_path)
cmds.append(cur_cmd)
if _run(cur_cmd): # If there is a problem, try removing the config and re-doing
_freeze_config(force=True)
cur_cmd = 'python -O %s/pyinstaller.py %s' % (pyinst_path, script_path)
_run(cur_cmd)
finally:
os.chdir(orig_dir)
logging.debug('\\/%s%s Output%s\\/' % ('-' * 10, 'Pyinstaller', '-' * 10))
logging.info('Pyinstaller took [%f] seconds' % (time.time() - freeze_start_time))
return cmds | python | def freeze(script_path, target_dir='frozen', **kw):
"""Wraps pyinstaller and provides an easy to use interface
Args:
script_path: Absolute path to python script to be frozen.
Returns:
List of freeze commands ran
Raises:
subprocess.CalledProcessError: Freeze error.
OSError: Freeze not found.
"""
cmds = []
freeze_start_time = time.time()
logging.debug('/\\%s%s Output%s/\\' % ('-' * 10, 'Pyinstaller', '-' * 10))
orig_dir = os.path.abspath('.')
script_path = os.path.abspath(script_path)
try:
os.chdir(target_dir)
cmds += _freeze_config()
pyinst_path = '%s/thirdparty/pyinstaller' % __path__[0]
cur_cmd = 'python -O %s/pyinstaller.py %s --skip-configure' % (pyinst_path, script_path)
cmds.append(cur_cmd)
if _run(cur_cmd): # If there is a problem, try removing the config and re-doing
_freeze_config(force=True)
cur_cmd = 'python -O %s/pyinstaller.py %s' % (pyinst_path, script_path)
_run(cur_cmd)
finally:
os.chdir(orig_dir)
logging.debug('\\/%s%s Output%s\\/' % ('-' * 10, 'Pyinstaller', '-' * 10))
logging.info('Pyinstaller took [%f] seconds' % (time.time() - freeze_start_time))
return cmds | [
"def",
"freeze",
"(",
"script_path",
",",
"target_dir",
"=",
"'frozen'",
",",
"*",
"*",
"kw",
")",
":",
"cmds",
"=",
"[",
"]",
"freeze_start_time",
"=",
"time",
".",
"time",
"(",
")",
"logging",
".",
"debug",
"(",
"'/\\\\%s%s Output%s/\\\\'",
"%",
"(",
... | Wraps pyinstaller and provides an easy to use interface
Args:
script_path: Absolute path to python script to be frozen.
Returns:
List of freeze commands ran
Raises:
subprocess.CalledProcessError: Freeze error.
OSError: Freeze not found. | [
"Wraps",
"pyinstaller",
"and",
"provides",
"an",
"easy",
"to",
"use",
"interface"
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_freeze.py#L156-L188 | train | 32,237 |
bwhite/hadoopy | hadoopy/_freeze.py | freeze_to_tar | def freeze_to_tar(script_path, freeze_fn, extra_files=None):
"""Freezes a script to a .tar or .tar.gz file
The script contains all of the files at the root of the tar
Args:
script_path: Path to python script to be frozen.
freeze_fn: Tar filename (must end in .tar or .tar.gz)
extra_files: List of paths to add to the tar (default is None)
Returns:
List of freeze commands ran
Raises:
subprocess.CalledProcessError: freeze error.
OSError: freeze not found.
NameError: Tar must end in .tar or .tar.gz
"""
if not extra_files:
extra_files = []
freeze_dir = tempfile.mkdtemp()
try:
cmds = freeze(script_path, target_dir=freeze_dir)
if freeze_fn.endswith('.tar.gz'):
mode = 'w|gz'
elif freeze_fn.endswith('.tar'):
mode = 'w'
else:
raise NameError('[%s] must end in .tar or .tar.gz' % freeze_fn)
fp = tarfile.open(freeze_fn, mode)
proj_name = os.path.basename(script_path)
proj_name = proj_name[:proj_name.rfind('.')] # Remove extension
for x in glob.glob('%s/dist/%s/*' % (freeze_dir, proj_name)) + extra_files:
fp.add(x, arcname=os.path.basename(x))
fp.close()
finally:
shutil.rmtree(freeze_dir)
return cmds | python | def freeze_to_tar(script_path, freeze_fn, extra_files=None):
"""Freezes a script to a .tar or .tar.gz file
The script contains all of the files at the root of the tar
Args:
script_path: Path to python script to be frozen.
freeze_fn: Tar filename (must end in .tar or .tar.gz)
extra_files: List of paths to add to the tar (default is None)
Returns:
List of freeze commands ran
Raises:
subprocess.CalledProcessError: freeze error.
OSError: freeze not found.
NameError: Tar must end in .tar or .tar.gz
"""
if not extra_files:
extra_files = []
freeze_dir = tempfile.mkdtemp()
try:
cmds = freeze(script_path, target_dir=freeze_dir)
if freeze_fn.endswith('.tar.gz'):
mode = 'w|gz'
elif freeze_fn.endswith('.tar'):
mode = 'w'
else:
raise NameError('[%s] must end in .tar or .tar.gz' % freeze_fn)
fp = tarfile.open(freeze_fn, mode)
proj_name = os.path.basename(script_path)
proj_name = proj_name[:proj_name.rfind('.')] # Remove extension
for x in glob.glob('%s/dist/%s/*' % (freeze_dir, proj_name)) + extra_files:
fp.add(x, arcname=os.path.basename(x))
fp.close()
finally:
shutil.rmtree(freeze_dir)
return cmds | [
"def",
"freeze_to_tar",
"(",
"script_path",
",",
"freeze_fn",
",",
"extra_files",
"=",
"None",
")",
":",
"if",
"not",
"extra_files",
":",
"extra_files",
"=",
"[",
"]",
"freeze_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"try",
":",
"cmds",
"=",
"fre... | Freezes a script to a .tar or .tar.gz file
The script contains all of the files at the root of the tar
Args:
script_path: Path to python script to be frozen.
freeze_fn: Tar filename (must end in .tar or .tar.gz)
extra_files: List of paths to add to the tar (default is None)
Returns:
List of freeze commands ran
Raises:
subprocess.CalledProcessError: freeze error.
OSError: freeze not found.
NameError: Tar must end in .tar or .tar.gz | [
"Freezes",
"a",
"script",
"to",
"a",
".",
"tar",
"or",
".",
"tar",
".",
"gz",
"file"
] | ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6 | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_freeze.py#L191-L228 | train | 32,238 |
rehandalal/flask-mobility | flask_mobility/decorators.py | mobile_template | def mobile_template(template):
"""
Mark a function as mobile-ready and pass a mobile template if MOBILE.
For example::
@mobile_template('a/{mobile/}b.html')
def view(template=None):
...
if ``request.MOBILE=True`` the template will be `a/mobile/b.html`.
if ``request.MOBILE=False`` the template will be `a/b.html`.
This function is useful if the mobile view uses the same context but a
different template.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
ctx = stack.top
if ctx is not None and hasattr(ctx, 'request'):
request = ctx.request
is_mobile = getattr(request, 'MOBILE', None)
kwargs['template'] = re.sub(r'{(.+?)}',
r'\1' if is_mobile else '',
template)
return f(*args, **kwargs)
return wrapper
return decorator | python | def mobile_template(template):
"""
Mark a function as mobile-ready and pass a mobile template if MOBILE.
For example::
@mobile_template('a/{mobile/}b.html')
def view(template=None):
...
if ``request.MOBILE=True`` the template will be `a/mobile/b.html`.
if ``request.MOBILE=False`` the template will be `a/b.html`.
This function is useful if the mobile view uses the same context but a
different template.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
ctx = stack.top
if ctx is not None and hasattr(ctx, 'request'):
request = ctx.request
is_mobile = getattr(request, 'MOBILE', None)
kwargs['template'] = re.sub(r'{(.+?)}',
r'\1' if is_mobile else '',
template)
return f(*args, **kwargs)
return wrapper
return decorator | [
"def",
"mobile_template",
"(",
"template",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ctx",
"=",
"stack",
".",
"top",
"i... | Mark a function as mobile-ready and pass a mobile template if MOBILE.
For example::
@mobile_template('a/{mobile/}b.html')
def view(template=None):
...
if ``request.MOBILE=True`` the template will be `a/mobile/b.html`.
if ``request.MOBILE=False`` the template will be `a/b.html`.
This function is useful if the mobile view uses the same context but a
different template. | [
"Mark",
"a",
"function",
"as",
"mobile",
"-",
"ready",
"and",
"pass",
"a",
"mobile",
"template",
"if",
"MOBILE",
"."
] | 6fff3272baf870f2267c05c611df2ba505230321 | https://github.com/rehandalal/flask-mobility/blob/6fff3272baf870f2267c05c611df2ba505230321/flask_mobility/decorators.py#L7-L37 | train | 32,239 |
rehandalal/flask-mobility | flask_mobility/decorators.py | mobilized | def mobilized(normal_fn):
"""
Replace a view function with a normal and mobile view.
For example::
def view():
...
@mobilized(view)
def view():
...
The second function is the mobile version of view. The original
function is overwritten, and the decorator will choose the correct
function based on ``request.MOBILE``.
"""
def decorator(mobile_fn):
@functools.wraps(mobile_fn)
def wrapper(*args, **kwargs):
ctx = stack.top
if ctx is not None and hasattr(ctx, 'request'):
request = ctx.request
if not request.MOBILE:
return normal_fn(*args, **kwargs)
return mobile_fn(*args, **kwargs)
return wrapper
return decorator | python | def mobilized(normal_fn):
"""
Replace a view function with a normal and mobile view.
For example::
def view():
...
@mobilized(view)
def view():
...
The second function is the mobile version of view. The original
function is overwritten, and the decorator will choose the correct
function based on ``request.MOBILE``.
"""
def decorator(mobile_fn):
@functools.wraps(mobile_fn)
def wrapper(*args, **kwargs):
ctx = stack.top
if ctx is not None and hasattr(ctx, 'request'):
request = ctx.request
if not request.MOBILE:
return normal_fn(*args, **kwargs)
return mobile_fn(*args, **kwargs)
return wrapper
return decorator | [
"def",
"mobilized",
"(",
"normal_fn",
")",
":",
"def",
"decorator",
"(",
"mobile_fn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"mobile_fn",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ctx",
"=",
"stack",
".",
... | Replace a view function with a normal and mobile view.
For example::
def view():
...
@mobilized(view)
def view():
...
The second function is the mobile version of view. The original
function is overwritten, and the decorator will choose the correct
function based on ``request.MOBILE``. | [
"Replace",
"a",
"view",
"function",
"with",
"a",
"normal",
"and",
"mobile",
"view",
"."
] | 6fff3272baf870f2267c05c611df2ba505230321 | https://github.com/rehandalal/flask-mobility/blob/6fff3272baf870f2267c05c611df2ba505230321/flask_mobility/decorators.py#L40-L69 | train | 32,240 |
achiku/jungle | jungle/rds.py | format_output | def format_output(instances, flag):
"""return formatted string per instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len+5) + '}{1:<16}{2:<65}{3:<16}'
for i in instances:
endpoint = "{0}:{1}".format(i['Endpoint']['Address'], i['Endpoint']['Port'])
out.append(
line_format.format(i['DBInstanceIdentifier'], i['DBInstanceStatus'], endpoint, i['Engine']))
return out | python | def format_output(instances, flag):
"""return formatted string per instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len+5) + '}{1:<16}{2:<65}{3:<16}'
for i in instances:
endpoint = "{0}:{1}".format(i['Endpoint']['Address'], i['Endpoint']['Port'])
out.append(
line_format.format(i['DBInstanceIdentifier'], i['DBInstanceStatus'], endpoint, i['Engine']))
return out | [
"def",
"format_output",
"(",
"instances",
",",
"flag",
")",
":",
"out",
"=",
"[",
"]",
"line_format",
"=",
"'{0}\\t{1}\\t{2}\\t{3}'",
"name_len",
"=",
"_get_max_name_len",
"(",
"instances",
")",
"+",
"3",
"if",
"flag",
":",
"line_format",
"=",
"'{0:<'",
"+",... | return formatted string per instance | [
"return",
"formatted",
"string",
"per",
"instance"
] | fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca | https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/rds.py#L6-L18 | train | 32,241 |
achiku/jungle | jungle/rds.py | ls | def ls(ctx, list_formatted):
"""List RDS instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
rds = session.client('rds')
instances = rds.describe_db_instances()
out = format_output(instances['DBInstances'], list_formatted)
click.echo('\n'.join(out)) | python | def ls(ctx, list_formatted):
"""List RDS instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
rds = session.client('rds')
instances = rds.describe_db_instances()
out = format_output(instances['DBInstances'], list_formatted)
click.echo('\n'.join(out)) | [
"def",
"ls",
"(",
"ctx",
",",
"list_formatted",
")",
":",
"session",
"=",
"create_session",
"(",
"ctx",
".",
"obj",
"[",
"'AWS_PROFILE_NAME'",
"]",
")",
"rds",
"=",
"session",
".",
"client",
"(",
"'rds'",
")",
"instances",
"=",
"rds",
".",
"describe_db_i... | List RDS instances | [
"List",
"RDS",
"instances"
] | fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca | https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/rds.py#L39-L46 | train | 32,242 |
achiku/jungle | jungle/emr.py | ls | def ls(ctx, name):
"""List EMR instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
results = client.list_clusters(
ClusterStates=['RUNNING', 'STARTING', 'BOOTSTRAPPING', 'WAITING']
)
for cluster in results['Clusters']:
click.echo("{0}\t{1}\t{2}".format(cluster['Id'], cluster['Name'], cluster['Status']['State'])) | python | def ls(ctx, name):
"""List EMR instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
results = client.list_clusters(
ClusterStates=['RUNNING', 'STARTING', 'BOOTSTRAPPING', 'WAITING']
)
for cluster in results['Clusters']:
click.echo("{0}\t{1}\t{2}".format(cluster['Id'], cluster['Name'], cluster['Status']['State'])) | [
"def",
"ls",
"(",
"ctx",
",",
"name",
")",
":",
"session",
"=",
"create_session",
"(",
"ctx",
".",
"obj",
"[",
"'AWS_PROFILE_NAME'",
"]",
")",
"client",
"=",
"session",
".",
"client",
"(",
"'emr'",
")",
"results",
"=",
"client",
".",
"list_clusters",
"... | List EMR instances | [
"List",
"EMR",
"instances"
] | fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca | https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/emr.py#L20-L29 | train | 32,243 |
achiku/jungle | jungle/emr.py | ssh | def ssh(ctx, cluster_id, key_file):
"""SSH login to EMR master node"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
result = client.describe_cluster(ClusterId=cluster_id)
target_dns = result['Cluster']['MasterPublicDnsName']
ssh_options = '-o StrictHostKeyChecking=no -o ServerAliveInterval=10'
cmd = 'ssh {ssh_options} -i {key_file} hadoop@{target_dns}'.format(
ssh_options=ssh_options, key_file=key_file, target_dns=target_dns)
subprocess.call(cmd, shell=True) | python | def ssh(ctx, cluster_id, key_file):
"""SSH login to EMR master node"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
result = client.describe_cluster(ClusterId=cluster_id)
target_dns = result['Cluster']['MasterPublicDnsName']
ssh_options = '-o StrictHostKeyChecking=no -o ServerAliveInterval=10'
cmd = 'ssh {ssh_options} -i {key_file} hadoop@{target_dns}'.format(
ssh_options=ssh_options, key_file=key_file, target_dns=target_dns)
subprocess.call(cmd, shell=True) | [
"def",
"ssh",
"(",
"ctx",
",",
"cluster_id",
",",
"key_file",
")",
":",
"session",
"=",
"create_session",
"(",
"ctx",
".",
"obj",
"[",
"'AWS_PROFILE_NAME'",
"]",
")",
"client",
"=",
"session",
".",
"client",
"(",
"'emr'",
")",
"result",
"=",
"client",
... | SSH login to EMR master node | [
"SSH",
"login",
"to",
"EMR",
"master",
"node"
] | fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca | https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/emr.py#L36-L46 | train | 32,244 |
achiku/jungle | jungle/emr.py | rm | def rm(ctx, cluster_id):
"""Terminate a EMR cluster"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
try:
result = client.describe_cluster(ClusterId=cluster_id)
target_dns = result['Cluster']['MasterPublicDnsName']
flag = click.prompt(
"Are you sure you want to terminate {0}: {1}? [y/Y]".format(
cluster_id, target_dns), type=str, default='n')
if flag.lower() == 'y':
result = client.terminate_job_flows(JobFlowIds=[cluster_id])
except ClientError as e:
click.echo(e, err=True) | python | def rm(ctx, cluster_id):
"""Terminate a EMR cluster"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
try:
result = client.describe_cluster(ClusterId=cluster_id)
target_dns = result['Cluster']['MasterPublicDnsName']
flag = click.prompt(
"Are you sure you want to terminate {0}: {1}? [y/Y]".format(
cluster_id, target_dns), type=str, default='n')
if flag.lower() == 'y':
result = client.terminate_job_flows(JobFlowIds=[cluster_id])
except ClientError as e:
click.echo(e, err=True) | [
"def",
"rm",
"(",
"ctx",
",",
"cluster_id",
")",
":",
"session",
"=",
"create_session",
"(",
"ctx",
".",
"obj",
"[",
"'AWS_PROFILE_NAME'",
"]",
")",
"client",
"=",
"session",
".",
"client",
"(",
"'emr'",
")",
"try",
":",
"result",
"=",
"client",
".",
... | Terminate a EMR cluster | [
"Terminate",
"a",
"EMR",
"cluster"
] | fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca | https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/emr.py#L52-L66 | train | 32,245 |
achiku/jungle | jungle/ec2.py | get_tag_value | def get_tag_value(x, key):
"""Get a value from tag"""
if x is None:
return ''
result = [y['Value'] for y in x if y['Key'] == key]
if result:
return result[0]
return '' | python | def get_tag_value(x, key):
"""Get a value from tag"""
if x is None:
return ''
result = [y['Value'] for y in x if y['Key'] == key]
if result:
return result[0]
return '' | [
"def",
"get_tag_value",
"(",
"x",
",",
"key",
")",
":",
"if",
"x",
"is",
"None",
":",
"return",
"''",
"result",
"=",
"[",
"y",
"[",
"'Value'",
"]",
"for",
"y",
"in",
"x",
"if",
"y",
"[",
"'Key'",
"]",
"==",
"key",
"]",
"if",
"result",
":",
"r... | Get a value from tag | [
"Get",
"a",
"value",
"from",
"tag"
] | fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca | https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/ec2.py#L43-L50 | train | 32,246 |
achiku/jungle | jungle/ec2.py | ls | def ls(ctx, name, list_formatted):
"""List EC2 instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
if name == '*':
instances = ec2.instances.filter()
else:
condition = {'Name': 'tag:Name', 'Values': [name]}
instances = ec2.instances.filter(Filters=[condition])
out = format_output(instances, list_formatted)
click.echo('\n'.join(out)) | python | def ls(ctx, name, list_formatted):
"""List EC2 instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
if name == '*':
instances = ec2.instances.filter()
else:
condition = {'Name': 'tag:Name', 'Values': [name]}
instances = ec2.instances.filter(Filters=[condition])
out = format_output(instances, list_formatted)
click.echo('\n'.join(out)) | [
"def",
"ls",
"(",
"ctx",
",",
"name",
",",
"list_formatted",
")",
":",
"session",
"=",
"create_session",
"(",
"ctx",
".",
"obj",
"[",
"'AWS_PROFILE_NAME'",
"]",
")",
"ec2",
"=",
"session",
".",
"resource",
"(",
"'ec2'",
")",
"if",
"name",
"==",
"'*'",
... | List EC2 instances | [
"List",
"EC2",
"instances"
] | fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca | https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/ec2.py#L65-L75 | train | 32,247 |
achiku/jungle | jungle/ec2.py | up | def up(ctx, instance_id):
"""Start EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.start()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2) | python | def up(ctx, instance_id):
"""Start EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.start()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2) | [
"def",
"up",
"(",
"ctx",
",",
"instance_id",
")",
":",
"session",
"=",
"create_session",
"(",
"ctx",
".",
"obj",
"[",
"'AWS_PROFILE_NAME'",
"]",
")",
"ec2",
"=",
"session",
".",
"resource",
"(",
"'ec2'",
")",
"try",
":",
"instance",
"=",
"ec2",
".",
... | Start EC2 instance | [
"Start",
"EC2",
"instance"
] | fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca | https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/ec2.py#L81-L90 | train | 32,248 |
achiku/jungle | jungle/ec2.py | create_ssh_command | def create_ssh_command(session, instance_id, instance_name, username, key_file, port, ssh_options,
use_private_ip, gateway_instance_id, gateway_username):
"""Create SSH Login command string"""
ec2 = session.resource('ec2')
if instance_id is not None:
try:
instance = ec2.Instance(instance_id)
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
elif instance_name is not None:
try:
conditions = [
{'Name': 'tag:Name', 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']},
]
instances = ec2.instances.filter(Filters=conditions)
target_instances = []
for idx, i in enumerate(instances):
target_instances.append(i)
if len(target_instances) == 1:
instance = target_instances[0]
hostname = _get_instance_ip_address(instance, use_private_ip)
else:
for idx, i in enumerate(instances):
tag_name = get_tag_value(i.tags, 'Name')
click.echo('[{0}]: {1}\t{2}\t{3}\t{4}\t{5}'.format(
idx, i.id, i.public_ip_address, i.state['Name'], tag_name, i.key_name))
selected_idx = click.prompt("Please enter a valid number", type=int, default=0)
if len(target_instances) - 1 < selected_idx or selected_idx < 0:
click.echo("selected number [{0}] is invalid".format(selected_idx), err=True)
sys.exit(2)
click.echo("{0} is selected.".format(selected_idx))
instance = target_instances[selected_idx]
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
# TODO: need to refactor and make it testable
if key_file is None:
key_file_option = ''
else:
key_file_option = ' -i {0}'.format(key_file)
gateway_username_option = build_option_username(gateway_username)
username_option = build_option_username(username)
if ssh_options is None:
ssh_options = ''
else:
ssh_options = ' {0}'.format(ssh_options)
if gateway_instance_id is not None:
gateway_instance = ec2.Instance(gateway_instance_id)
gateway_public_ip = gateway_instance.public_ip_address
hostname = instance.private_ip_address
cmd = 'ssh -tt{0} {1}{2} -p {3}{4} ssh{5} {6}'.format(
gateway_username_option, gateway_public_ip, key_file_option,
port, ssh_options, username_option, hostname)
else:
cmd = 'ssh{0} {1}{2} -p {3}{4}'.format(username_option, hostname, key_file_option, port, ssh_options)
return cmd | python | def create_ssh_command(session, instance_id, instance_name, username, key_file, port, ssh_options,
use_private_ip, gateway_instance_id, gateway_username):
"""Create SSH Login command string"""
ec2 = session.resource('ec2')
if instance_id is not None:
try:
instance = ec2.Instance(instance_id)
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
elif instance_name is not None:
try:
conditions = [
{'Name': 'tag:Name', 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']},
]
instances = ec2.instances.filter(Filters=conditions)
target_instances = []
for idx, i in enumerate(instances):
target_instances.append(i)
if len(target_instances) == 1:
instance = target_instances[0]
hostname = _get_instance_ip_address(instance, use_private_ip)
else:
for idx, i in enumerate(instances):
tag_name = get_tag_value(i.tags, 'Name')
click.echo('[{0}]: {1}\t{2}\t{3}\t{4}\t{5}'.format(
idx, i.id, i.public_ip_address, i.state['Name'], tag_name, i.key_name))
selected_idx = click.prompt("Please enter a valid number", type=int, default=0)
if len(target_instances) - 1 < selected_idx or selected_idx < 0:
click.echo("selected number [{0}] is invalid".format(selected_idx), err=True)
sys.exit(2)
click.echo("{0} is selected.".format(selected_idx))
instance = target_instances[selected_idx]
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
# TODO: need to refactor and make it testable
if key_file is None:
key_file_option = ''
else:
key_file_option = ' -i {0}'.format(key_file)
gateway_username_option = build_option_username(gateway_username)
username_option = build_option_username(username)
if ssh_options is None:
ssh_options = ''
else:
ssh_options = ' {0}'.format(ssh_options)
if gateway_instance_id is not None:
gateway_instance = ec2.Instance(gateway_instance_id)
gateway_public_ip = gateway_instance.public_ip_address
hostname = instance.private_ip_address
cmd = 'ssh -tt{0} {1}{2} -p {3}{4} ssh{5} {6}'.format(
gateway_username_option, gateway_public_ip, key_file_option,
port, ssh_options, username_option, hostname)
else:
cmd = 'ssh{0} {1}{2} -p {3}{4}'.format(username_option, hostname, key_file_option, port, ssh_options)
return cmd | [
"def",
"create_ssh_command",
"(",
"session",
",",
"instance_id",
",",
"instance_name",
",",
"username",
",",
"key_file",
",",
"port",
",",
"ssh_options",
",",
"use_private_ip",
",",
"gateway_instance_id",
",",
"gateway_username",
")",
":",
"ec2",
"=",
"session",
... | Create SSH Login command string | [
"Create",
"SSH",
"Login",
"command",
"string"
] | fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca | https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/ec2.py#L108-L169 | train | 32,249 |
achiku/jungle | jungle/ec2.py | ssh | def ssh(ctx, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username, dry_run):
"""SSH to EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
if instance_id is None and instance_name is None:
click.echo(
"One of --instance-id/-i or --instance-name/-n"
" has to be specified.", err=True)
sys.exit(1)
elif instance_id is not None and instance_name is not None:
click.echo(
"Both --instance-id/-i and --instance-name/-n "
"can't to be specified at the same time.", err=True)
sys.exit(1)
cmd = create_ssh_command(
session, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username)
if not dry_run:
subprocess.call(cmd, shell=True)
else:
click.echo(cmd) | python | def ssh(ctx, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username, dry_run):
"""SSH to EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
if instance_id is None and instance_name is None:
click.echo(
"One of --instance-id/-i or --instance-name/-n"
" has to be specified.", err=True)
sys.exit(1)
elif instance_id is not None and instance_name is not None:
click.echo(
"Both --instance-id/-i and --instance-name/-n "
"can't to be specified at the same time.", err=True)
sys.exit(1)
cmd = create_ssh_command(
session, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username)
if not dry_run:
subprocess.call(cmd, shell=True)
else:
click.echo(cmd) | [
"def",
"ssh",
"(",
"ctx",
",",
"instance_id",
",",
"instance_name",
",",
"username",
",",
"key_file",
",",
"port",
",",
"ssh_options",
",",
"private_ip",
",",
"gateway_instance_id",
",",
"gateway_username",
",",
"dry_run",
")",
":",
"session",
"=",
"create_ses... | SSH to EC2 instance | [
"SSH",
"to",
"EC2",
"instance"
] | fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca | https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/ec2.py#L191-L212 | train | 32,250 |
achiku/jungle | jungle/elb.py | ls | def ls(ctx, name, list_instances):
"""List ELB instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('elb')
inst = {'LoadBalancerDescriptions': []}
if name == '*':
inst = client.describe_load_balancers()
else:
try:
inst = client.describe_load_balancers(LoadBalancerNames=[name])
except ClientError as e:
click.echo(e, err=True)
for i in inst['LoadBalancerDescriptions']:
click.echo(i['LoadBalancerName'])
if list_instances:
for ec2 in i['Instances']:
health = client.describe_instance_health(
LoadBalancerName=name,
Instances=[ec2]
)
click.echo('{0}\t{1}'.format(ec2['InstanceId'], health['InstanceStates'][0]['State'])) | python | def ls(ctx, name, list_instances):
"""List ELB instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('elb')
inst = {'LoadBalancerDescriptions': []}
if name == '*':
inst = client.describe_load_balancers()
else:
try:
inst = client.describe_load_balancers(LoadBalancerNames=[name])
except ClientError as e:
click.echo(e, err=True)
for i in inst['LoadBalancerDescriptions']:
click.echo(i['LoadBalancerName'])
if list_instances:
for ec2 in i['Instances']:
health = client.describe_instance_health(
LoadBalancerName=name,
Instances=[ec2]
)
click.echo('{0}\t{1}'.format(ec2['InstanceId'], health['InstanceStates'][0]['State'])) | [
"def",
"ls",
"(",
"ctx",
",",
"name",
",",
"list_instances",
")",
":",
"session",
"=",
"create_session",
"(",
"ctx",
".",
"obj",
"[",
"'AWS_PROFILE_NAME'",
"]",
")",
"client",
"=",
"session",
".",
"client",
"(",
"'elb'",
")",
"inst",
"=",
"{",
"'LoadBa... | List ELB instances | [
"List",
"ELB",
"instances"
] | fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca | https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/elb.py#L19-L41 | train | 32,251 |
achiku/jungle | jungle/asg.py | ls | def ls(ctx, name, list_formatted):
"""List AutoScaling groups"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('autoscaling')
if name == "*":
groups = client.describe_auto_scaling_groups()
else:
groups = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[
name,
]
)
out = format_output(groups, list_formatted)
click.echo('\n'.join(out)) | python | def ls(ctx, name, list_formatted):
"""List AutoScaling groups"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('autoscaling')
if name == "*":
groups = client.describe_auto_scaling_groups()
else:
groups = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[
name,
]
)
out = format_output(groups, list_formatted)
click.echo('\n'.join(out)) | [
"def",
"ls",
"(",
"ctx",
",",
"name",
",",
"list_formatted",
")",
":",
"session",
"=",
"create_session",
"(",
"ctx",
".",
"obj",
"[",
"'AWS_PROFILE_NAME'",
"]",
")",
"client",
"=",
"session",
".",
"client",
"(",
"'autoscaling'",
")",
"if",
"name",
"==",
... | List AutoScaling groups | [
"List",
"AutoScaling",
"groups"
] | fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca | https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/asg.py#L35-L49 | train | 32,252 |
wooyek/flask-social-blueprint | src/flask_social_blueprint/core.py | SocialBlueprint.authenticate | def authenticate(self, provider):
"""
Starts OAuth authorization flow, will redirect to 3rd party site.
"""
callback_url = url_for(".callback", provider=provider, _external=True)
provider = self.get_provider(provider)
session['next'] = request.args.get('next') or ''
return provider.authorize(callback_url) | python | def authenticate(self, provider):
"""
Starts OAuth authorization flow, will redirect to 3rd party site.
"""
callback_url = url_for(".callback", provider=provider, _external=True)
provider = self.get_provider(provider)
session['next'] = request.args.get('next') or ''
return provider.authorize(callback_url) | [
"def",
"authenticate",
"(",
"self",
",",
"provider",
")",
":",
"callback_url",
"=",
"url_for",
"(",
"\".callback\"",
",",
"provider",
"=",
"provider",
",",
"_external",
"=",
"True",
")",
"provider",
"=",
"self",
".",
"get_provider",
"(",
"provider",
")",
"... | Starts OAuth authorization flow, will redirect to 3rd party site. | [
"Starts",
"OAuth",
"authorization",
"flow",
"will",
"redirect",
"to",
"3rd",
"party",
"site",
"."
] | fc0ea5bc7a2efb833d78366cdaa4a1b7add807a4 | https://github.com/wooyek/flask-social-blueprint/blob/fc0ea5bc7a2efb833d78366cdaa4a1b7add807a4/src/flask_social_blueprint/core.py#L30-L37 | train | 32,253 |
wooyek/flask-social-blueprint | src/flask_social_blueprint/core.py | SocialBlueprint.callback | def callback(self, provider):
"""
Handles 3rd party callback and processes it's data
"""
provider = self.get_provider(provider)
try:
return provider.authorized_handler(self.login)(provider=provider)
except OAuthException as ex:
logging.error("Data: %s", ex.data)
raise | python | def callback(self, provider):
"""
Handles 3rd party callback and processes it's data
"""
provider = self.get_provider(provider)
try:
return provider.authorized_handler(self.login)(provider=provider)
except OAuthException as ex:
logging.error("Data: %s", ex.data)
raise | [
"def",
"callback",
"(",
"self",
",",
"provider",
")",
":",
"provider",
"=",
"self",
".",
"get_provider",
"(",
"provider",
")",
"try",
":",
"return",
"provider",
".",
"authorized_handler",
"(",
"self",
".",
"login",
")",
"(",
"provider",
"=",
"provider",
... | Handles 3rd party callback and processes it's data | [
"Handles",
"3rd",
"party",
"callback",
"and",
"processes",
"it",
"s",
"data"
] | fc0ea5bc7a2efb833d78366cdaa4a1b7add807a4 | https://github.com/wooyek/flask-social-blueprint/blob/fc0ea5bc7a2efb833d78366cdaa4a1b7add807a4/src/flask_social_blueprint/core.py#L39-L48 | train | 32,254 |
ecometrica/django-vinaigrette | vinaigrette/__init__.py | register | def register(model, fields, restrict_to=None, manager=None, properties=None, contexts=None):
"""Tell vinaigrette which fields on a Django model should be translated.
Arguments:
model -- The relevant model class
fields -- A list or tuple of field names. e.g. ['name', 'nickname']
restrict_to -- Optional. A django.db.models.Q object representing the subset
of objects to collect translation strings from.
manager -- Optional. A reference to a manager -- e.g. Person.objects -- to use
when collecting translation strings.
properties -- A dictionary of "read only" properties that are composed by more that one field
e.g. {'full_name': ['first_name', 'last_name']}
contexts -- A dictionary including any (pgettext) context that may need
to be applied to each field.
e.g. {'name': 'db category name', 'description': 'db detailed category description'}
Note that both restrict_to and manager are only used when collecting translation
strings. Gettext lookups will always be performed on relevant fields for all
objects on registered models.
"""
if not contexts:
contexts = {}
global _REGISTRY
_REGISTRY[model] = {
'fields': fields,
'contexts': contexts,
'restrict_to': restrict_to,
'manager': manager,
'properties': properties,
}
for field in fields:
setattr(model, field, VinaigretteDescriptor(field, contexts.get(field, None)))
model.untranslated = lambda self, fieldname: self.__dict__[fieldname]
pre_save.connect(_vinaigrette_pre_save, sender=model)
post_save.connect(_vinaigrette_post_save, sender=model) | python | def register(model, fields, restrict_to=None, manager=None, properties=None, contexts=None):
"""Tell vinaigrette which fields on a Django model should be translated.
Arguments:
model -- The relevant model class
fields -- A list or tuple of field names. e.g. ['name', 'nickname']
restrict_to -- Optional. A django.db.models.Q object representing the subset
of objects to collect translation strings from.
manager -- Optional. A reference to a manager -- e.g. Person.objects -- to use
when collecting translation strings.
properties -- A dictionary of "read only" properties that are composed by more that one field
e.g. {'full_name': ['first_name', 'last_name']}
contexts -- A dictionary including any (pgettext) context that may need
to be applied to each field.
e.g. {'name': 'db category name', 'description': 'db detailed category description'}
Note that both restrict_to and manager are only used when collecting translation
strings. Gettext lookups will always be performed on relevant fields for all
objects on registered models.
"""
if not contexts:
contexts = {}
global _REGISTRY
_REGISTRY[model] = {
'fields': fields,
'contexts': contexts,
'restrict_to': restrict_to,
'manager': manager,
'properties': properties,
}
for field in fields:
setattr(model, field, VinaigretteDescriptor(field, contexts.get(field, None)))
model.untranslated = lambda self, fieldname: self.__dict__[fieldname]
pre_save.connect(_vinaigrette_pre_save, sender=model)
post_save.connect(_vinaigrette_post_save, sender=model) | [
"def",
"register",
"(",
"model",
",",
"fields",
",",
"restrict_to",
"=",
"None",
",",
"manager",
"=",
"None",
",",
"properties",
"=",
"None",
",",
"contexts",
"=",
"None",
")",
":",
"if",
"not",
"contexts",
":",
"contexts",
"=",
"{",
"}",
"global",
"... | Tell vinaigrette which fields on a Django model should be translated.
Arguments:
model -- The relevant model class
fields -- A list or tuple of field names. e.g. ['name', 'nickname']
restrict_to -- Optional. A django.db.models.Q object representing the subset
of objects to collect translation strings from.
manager -- Optional. A reference to a manager -- e.g. Person.objects -- to use
when collecting translation strings.
properties -- A dictionary of "read only" properties that are composed by more that one field
e.g. {'full_name': ['first_name', 'last_name']}
contexts -- A dictionary including any (pgettext) context that may need
to be applied to each field.
e.g. {'name': 'db category name', 'description': 'db detailed category description'}
Note that both restrict_to and manager are only used when collecting translation
strings. Gettext lookups will always be performed on relevant fields for all
objects on registered models. | [
"Tell",
"vinaigrette",
"which",
"fields",
"on",
"a",
"Django",
"model",
"should",
"be",
"translated",
"."
] | abfb3396407f06150af9ea8c333afa286ad08235 | https://github.com/ecometrica/django-vinaigrette/blob/abfb3396407f06150af9ea8c333afa286ad08235/vinaigrette/__init__.py#L29-L66 | train | 32,255 |
MicroPyramid/django-simple-pagination | simple_pagination/utils.py | get_page_numbers | def get_page_numbers(current_page, num_pages):
"""Default callable for page listing.
Produce a Digg-style pagination.
"""
if current_page <= 2:
start_page = 1
else:
start_page = current_page - 2
if num_pages <= 4:
end_page = num_pages
else:
end_page = start_page + 4
if end_page > num_pages:
end_page = num_pages
pages = []
if current_page != 1:
pages.append('first')
pages.append('previous')
pages.extend([i for i in range(start_page, end_page + 1)])
if current_page != num_pages:
pages.append('next')
pages.append('last')
return pages | python | def get_page_numbers(current_page, num_pages):
"""Default callable for page listing.
Produce a Digg-style pagination.
"""
if current_page <= 2:
start_page = 1
else:
start_page = current_page - 2
if num_pages <= 4:
end_page = num_pages
else:
end_page = start_page + 4
if end_page > num_pages:
end_page = num_pages
pages = []
if current_page != 1:
pages.append('first')
pages.append('previous')
pages.extend([i for i in range(start_page, end_page + 1)])
if current_page != num_pages:
pages.append('next')
pages.append('last')
return pages | [
"def",
"get_page_numbers",
"(",
"current_page",
",",
"num_pages",
")",
":",
"if",
"current_page",
"<=",
"2",
":",
"start_page",
"=",
"1",
"else",
":",
"start_page",
"=",
"current_page",
"-",
"2",
"if",
"num_pages",
"<=",
"4",
":",
"end_page",
"=",
"num_pag... | Default callable for page listing.
Produce a Digg-style pagination. | [
"Default",
"callable",
"for",
"page",
"listing",
".",
"Produce",
"a",
"Digg",
"-",
"style",
"pagination",
"."
] | 8cb68b2b51b06cf8f72a13afa64564b1c49c3b41 | https://github.com/MicroPyramid/django-simple-pagination/blob/8cb68b2b51b06cf8f72a13afa64564b1c49c3b41/simple_pagination/utils.py#L42-L67 | train | 32,256 |
ecometrica/django-vinaigrette | vinaigrette/management/commands/makemessages.py | _get_po_paths | def _get_po_paths(locales=[]):
"""Returns paths to all relevant po files in the current project."""
basedirs = [os.path.join('conf', 'locale'), 'locale']
if os.environ.get('DJANGO_SETTINGS_MODULE'):
from django.conf import settings
basedirs.extend(settings.LOCALE_PATHS)
# Gather existing directories.
basedirs = set(map(os.path.abspath, filter(os.path.isdir, basedirs)))
if not basedirs:
raise CommandError("This script should be run from the Django SVN tree or your project or app tree, or with the settings module specified.")
po_paths = []
for basedir in basedirs:
for locale in locales:
basedir_locale = os.path.join(basedir, locale, 'LC_MESSAGES')
for dirpath, dirnames, filenames in os.walk(basedir_locale):
for f in filenames:
if f.endswith('.po'):
po_paths.append(os.path.join(dirpath, f))
return po_paths | python | def _get_po_paths(locales=[]):
"""Returns paths to all relevant po files in the current project."""
basedirs = [os.path.join('conf', 'locale'), 'locale']
if os.environ.get('DJANGO_SETTINGS_MODULE'):
from django.conf import settings
basedirs.extend(settings.LOCALE_PATHS)
# Gather existing directories.
basedirs = set(map(os.path.abspath, filter(os.path.isdir, basedirs)))
if not basedirs:
raise CommandError("This script should be run from the Django SVN tree or your project or app tree, or with the settings module specified.")
po_paths = []
for basedir in basedirs:
for locale in locales:
basedir_locale = os.path.join(basedir, locale, 'LC_MESSAGES')
for dirpath, dirnames, filenames in os.walk(basedir_locale):
for f in filenames:
if f.endswith('.po'):
po_paths.append(os.path.join(dirpath, f))
return po_paths | [
"def",
"_get_po_paths",
"(",
"locales",
"=",
"[",
"]",
")",
":",
"basedirs",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"'conf'",
",",
"'locale'",
")",
",",
"'locale'",
"]",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'DJANGO_SETTINGS_MODULE'",
... | Returns paths to all relevant po files in the current project. | [
"Returns",
"paths",
"to",
"all",
"relevant",
"po",
"files",
"in",
"the",
"current",
"project",
"."
] | abfb3396407f06150af9ea8c333afa286ad08235 | https://github.com/ecometrica/django-vinaigrette/blob/abfb3396407f06150af9ea8c333afa286ad08235/vinaigrette/management/commands/makemessages.py#L14-L35 | train | 32,257 |
MicroPyramid/django-simple-pagination | simple_pagination/templatetags/paginate.py | show_pageitems | def show_pageitems(_, token):
"""Show page items.
Usage:
.. code-block:: html+django
{% show_pageitems per_page %}
"""
# Validate args.
if len(token.contents.split()) != 1:
msg = '%r tag takes no arguments' % token.contents.split()[0]
raise template.TemplateSyntaxError(msg)
# Call the node.
return ShowPageItemsNode() | python | def show_pageitems(_, token):
"""Show page items.
Usage:
.. code-block:: html+django
{% show_pageitems per_page %}
"""
# Validate args.
if len(token.contents.split()) != 1:
msg = '%r tag takes no arguments' % token.contents.split()[0]
raise template.TemplateSyntaxError(msg)
# Call the node.
return ShowPageItemsNode() | [
"def",
"show_pageitems",
"(",
"_",
",",
"token",
")",
":",
"# Validate args.",
"if",
"len",
"(",
"token",
".",
"contents",
".",
"split",
"(",
")",
")",
"!=",
"1",
":",
"msg",
"=",
"'%r tag takes no arguments'",
"%",
"token",
".",
"contents",
".",
"split"... | Show page items.
Usage:
.. code-block:: html+django
{% show_pageitems per_page %} | [
"Show",
"page",
"items",
"."
] | 8cb68b2b51b06cf8f72a13afa64564b1c49c3b41 | https://github.com/MicroPyramid/django-simple-pagination/blob/8cb68b2b51b06cf8f72a13afa64564b1c49c3b41/simple_pagination/templatetags/paginate.py#L359-L374 | train | 32,258 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/errors.py | user_error | def user_error(status_code, title, detail, pointer):
"""Create and return a general user error response that is jsonapi compliant.
Required args:
status_code: The HTTP status code associated with the problem.
title: A short summary of the problem.
detail: An explanation specific to the occurence of the problem.
pointer: The request path associated with the source of the problem.
"""
response = {
'errors': [{
'status': status_code,
'source': {'pointer': '{0}'.format(pointer)},
'title': title,
'detail': detail,
}],
'jsonapi': {
'version': '1.0'
},
'meta': {
'sqlalchemy_jsonapi_version': __version__
}
}
return json.dumps(response), status_code | python | def user_error(status_code, title, detail, pointer):
"""Create and return a general user error response that is jsonapi compliant.
Required args:
status_code: The HTTP status code associated with the problem.
title: A short summary of the problem.
detail: An explanation specific to the occurence of the problem.
pointer: The request path associated with the source of the problem.
"""
response = {
'errors': [{
'status': status_code,
'source': {'pointer': '{0}'.format(pointer)},
'title': title,
'detail': detail,
}],
'jsonapi': {
'version': '1.0'
},
'meta': {
'sqlalchemy_jsonapi_version': __version__
}
}
return json.dumps(response), status_code | [
"def",
"user_error",
"(",
"status_code",
",",
"title",
",",
"detail",
",",
"pointer",
")",
":",
"response",
"=",
"{",
"'errors'",
":",
"[",
"{",
"'status'",
":",
"status_code",
",",
"'source'",
":",
"{",
"'pointer'",
":",
"'{0}'",
".",
"format",
"(",
"... | Create and return a general user error response that is jsonapi compliant.
Required args:
status_code: The HTTP status code associated with the problem.
title: A short summary of the problem.
detail: An explanation specific to the occurence of the problem.
pointer: The request path associated with the source of the problem. | [
"Create",
"and",
"return",
"a",
"general",
"user",
"error",
"response",
"that",
"is",
"jsonapi",
"compliant",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/errors.py#L146-L169 | train | 32,259 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/flaskext.py | override | def override(original, results):
"""
If a receiver to a signal returns a value, we override the original value
with the last returned value.
:param original: The original value
:param results: The results from the signal
"""
overrides = [v for fn, v in results if v is not None]
if len(overrides) == 0:
return original
return overrides[-1] | python | def override(original, results):
"""
If a receiver to a signal returns a value, we override the original value
with the last returned value.
:param original: The original value
:param results: The results from the signal
"""
overrides = [v for fn, v in results if v is not None]
if len(overrides) == 0:
return original
return overrides[-1] | [
"def",
"override",
"(",
"original",
",",
"results",
")",
":",
"overrides",
"=",
"[",
"v",
"for",
"fn",
",",
"v",
"in",
"results",
"if",
"v",
"is",
"not",
"None",
"]",
"if",
"len",
"(",
"overrides",
")",
"==",
"0",
":",
"return",
"original",
"return... | If a receiver to a signal returns a value, we override the original value
with the last returned value.
:param original: The original value
:param results: The results from the signal | [
"If",
"a",
"receiver",
"to",
"a",
"signal",
"returns",
"a",
"value",
"we",
"override",
"the",
"original",
"value",
"with",
"the",
"last",
"returned",
"value",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/flaskext.py#L49-L60 | train | 32,260 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/flaskext.py | JSONAPIEncoder.default | def default(self, value):
"""
Handle UUID, datetime, and callables.
:param value: Value to encode
"""
if isinstance(value, uuid.UUID):
return str(value)
elif isinstance(value, datetime.datetime):
return value.isoformat()
elif callable(value):
return str(value)
return json.JSONEncoder.default(self, value) | python | def default(self, value):
"""
Handle UUID, datetime, and callables.
:param value: Value to encode
"""
if isinstance(value, uuid.UUID):
return str(value)
elif isinstance(value, datetime.datetime):
return value.isoformat()
elif callable(value):
return str(value)
return json.JSONEncoder.default(self, value) | [
"def",
"default",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"uuid",
".",
"UUID",
")",
":",
"return",
"str",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"datetime",
")",
":",
"return",... | Handle UUID, datetime, and callables.
:param value: Value to encode | [
"Handle",
"UUID",
"datetime",
"and",
"callables",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/flaskext.py#L24-L36 | train | 32,261 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/flaskext.py | FlaskJSONAPI.init_app | def init_app(self, app, sqla, namespace='api', route_prefix='/api'):
"""
Initialize the adapter if it hasn't already been initialized.
:param app: Flask application
:param sqla: Flask-SQLAlchemy instance
:param namespace: Prefixes all generated routes
:param route_prefix: The base path for the generated routes
"""
self.app = app
self.sqla = sqla
self._setup_adapter(namespace, route_prefix) | python | def init_app(self, app, sqla, namespace='api', route_prefix='/api'):
"""
Initialize the adapter if it hasn't already been initialized.
:param app: Flask application
:param sqla: Flask-SQLAlchemy instance
:param namespace: Prefixes all generated routes
:param route_prefix: The base path for the generated routes
"""
self.app = app
self.sqla = sqla
self._setup_adapter(namespace, route_prefix) | [
"def",
"init_app",
"(",
"self",
",",
"app",
",",
"sqla",
",",
"namespace",
"=",
"'api'",
",",
"route_prefix",
"=",
"'/api'",
")",
":",
"self",
".",
"app",
"=",
"app",
"self",
".",
"sqla",
"=",
"sqla",
"self",
".",
"_setup_adapter",
"(",
"namespace",
... | Initialize the adapter if it hasn't already been initialized.
:param app: Flask application
:param sqla: Flask-SQLAlchemy instance
:param namespace: Prefixes all generated routes
:param route_prefix: The base path for the generated routes | [
"Initialize",
"the",
"adapter",
"if",
"it",
"hasn",
"t",
"already",
"been",
"initialized",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/flaskext.py#L106-L118 | train | 32,262 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/flaskext.py | FlaskJSONAPI.wrap_handler | def wrap_handler(self, api_types, methods, endpoints):
"""
Allow for a handler to be wrapped in a chain.
:param api_types: Types to wrap handlers for
:param methods: Methods to wrap handlers for
:param endpoints: Endpoints to wrap handlers for
"""
def wrapper(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
return fn(*args, **kwargs)
for api_type in api_types:
for method in methods:
for endpoint in endpoints:
key = (api_type, method, endpoint)
self._handler_chains.setdefault(key, [])
self._handler_chains[key].append(wrapped)
return wrapped
return wrapper | python | def wrap_handler(self, api_types, methods, endpoints):
"""
Allow for a handler to be wrapped in a chain.
:param api_types: Types to wrap handlers for
:param methods: Methods to wrap handlers for
:param endpoints: Endpoints to wrap handlers for
"""
def wrapper(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
return fn(*args, **kwargs)
for api_type in api_types:
for method in methods:
for endpoint in endpoints:
key = (api_type, method, endpoint)
self._handler_chains.setdefault(key, [])
self._handler_chains[key].append(wrapped)
return wrapped
return wrapper | [
"def",
"wrap_handler",
"(",
"self",
",",
"api_types",
",",
"methods",
",",
"endpoints",
")",
":",
"def",
"wrapper",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
... | Allow for a handler to be wrapped in a chain.
:param api_types: Types to wrap handlers for
:param methods: Methods to wrap handlers for
:param endpoints: Endpoints to wrap handlers for | [
"Allow",
"for",
"a",
"handler",
"to",
"be",
"wrapped",
"in",
"a",
"chain",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/flaskext.py#L120-L142 | train | 32,263 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/flaskext.py | FlaskJSONAPI._call_next | def _call_next(self, handler_chain):
"""
Generates an express-like chain for handling requests.
:param handler_chain: The current chain of handlers
"""
def wrapped(*args, **kwargs):
if len(handler_chain) == 1:
return handler_chain[0](*args, **kwargs)
else:
return handler_chain[0](self._call_next(handler_chain[1:]),
*args, **kwargs)
return wrapped | python | def _call_next(self, handler_chain):
"""
Generates an express-like chain for handling requests.
:param handler_chain: The current chain of handlers
"""
def wrapped(*args, **kwargs):
if len(handler_chain) == 1:
return handler_chain[0](*args, **kwargs)
else:
return handler_chain[0](self._call_next(handler_chain[1:]),
*args, **kwargs)
return wrapped | [
"def",
"_call_next",
"(",
"self",
",",
"handler_chain",
")",
":",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"handler_chain",
")",
"==",
"1",
":",
"return",
"handler_chain",
"[",
"0",
"]",
"(",
"*",
"ar... | Generates an express-like chain for handling requests.
:param handler_chain: The current chain of handlers | [
"Generates",
"an",
"express",
"-",
"like",
"chain",
"for",
"handling",
"requests",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/flaskext.py#L144-L158 | train | 32,264 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/flaskext.py | FlaskJSONAPI._setup_adapter | def _setup_adapter(self, namespace, route_prefix):
"""
Initialize the serializer and loop through the views to generate them.
:param namespace: Prefix for generated endpoints
:param route_prefix: Prefix for route patterns
"""
self.serializer = JSONAPI(
self.sqla.Model, prefix='{}://{}{}'.format(
self.app.config['PREFERRED_URL_SCHEME'],
self.app.config['SERVER_NAME'], route_prefix))
for view in views:
method, endpoint = view
pattern = route_prefix + endpoint.value
name = '{}_{}_{}'.format(namespace, method.name, endpoint.name)
view = self._generate_view(method, endpoint)
self.app.add_url_rule(pattern + '/',
name + '_slashed',
view,
methods=[method.name],
strict_slashes=False)
self.app.add_url_rule(pattern, name, view, methods=[method.name]) | python | def _setup_adapter(self, namespace, route_prefix):
"""
Initialize the serializer and loop through the views to generate them.
:param namespace: Prefix for generated endpoints
:param route_prefix: Prefix for route patterns
"""
self.serializer = JSONAPI(
self.sqla.Model, prefix='{}://{}{}'.format(
self.app.config['PREFERRED_URL_SCHEME'],
self.app.config['SERVER_NAME'], route_prefix))
for view in views:
method, endpoint = view
pattern = route_prefix + endpoint.value
name = '{}_{}_{}'.format(namespace, method.name, endpoint.name)
view = self._generate_view(method, endpoint)
self.app.add_url_rule(pattern + '/',
name + '_slashed',
view,
methods=[method.name],
strict_slashes=False)
self.app.add_url_rule(pattern, name, view, methods=[method.name]) | [
"def",
"_setup_adapter",
"(",
"self",
",",
"namespace",
",",
"route_prefix",
")",
":",
"self",
".",
"serializer",
"=",
"JSONAPI",
"(",
"self",
".",
"sqla",
".",
"Model",
",",
"prefix",
"=",
"'{}://{}{}'",
".",
"format",
"(",
"self",
".",
"app",
".",
"c... | Initialize the serializer and loop through the views to generate them.
:param namespace: Prefix for generated endpoints
:param route_prefix: Prefix for route patterns | [
"Initialize",
"the",
"serializer",
"and",
"loop",
"through",
"the",
"views",
"to",
"generate",
"them",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/flaskext.py#L160-L181 | train | 32,265 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/flaskext.py | FlaskJSONAPI._generate_view | def _generate_view(self, method, endpoint):
"""
Generate a view for the specified method and endpoint.
:param method: HTTP Method
:param endpoint: Pattern
"""
def new_view(**kwargs):
if method == Method.GET:
data = request.args
else:
content_length = request.headers.get('content-length', 0)
if content_length and int(content_length) > 0:
content_type = request.headers.get('content-type', None)
if content_type != 'application/vnd.api+json':
data = MissingContentTypeError().data
data = json.dumps(data, cls=JSONAPIEncoder)
response = make_response(data)
response.status_code = 409
response.content_type = 'application/vnd.api+json'
return response
data = request.get_json(force=True)
else:
data = None
event_kwargs = {
'method': method,
'endpoint': endpoint,
'data': data,
'req_args': kwargs
}
results = self.on_request.send(self, **event_kwargs)
data = override(data, results)
args = [self.sqla.session, data, kwargs['api_type']]
if 'obj_id' in kwargs.keys():
args.append(kwargs['obj_id'])
if 'relationship' in kwargs.keys():
args.append(kwargs['relationship'])
try:
attr = '{}_{}'.format(method.name, endpoint.name).lower()
handler = getattr(self.serializer, attr)
handler_chain = list(self._handler_chains.get((
kwargs['api_type'], method, endpoint), []))
handler_chain.append(handler)
chained_handler = self._call_next(handler_chain)
response = chained_handler(*args)
results = self.on_success.send(self,
response=response,
**event_kwargs)
response = override(response, results)
except BaseError as exc:
self.sqla.session.rollback()
results = self.on_error.send(self, error=exc, **event_kwargs)
response = override(exc, results)
rendered_response = make_response('')
if response.status_code != 204:
data = json.dumps(response.data, cls=self.json_encoder)
rendered_response = make_response(data)
rendered_response.status_code = response.status_code
rendered_response.content_type = 'application/vnd.api+json'
results = self.on_response.send(self,
response=rendered_response,
**event_kwargs)
return override(rendered_response, results)
return new_view | python | def _generate_view(self, method, endpoint):
"""
Generate a view for the specified method and endpoint.
:param method: HTTP Method
:param endpoint: Pattern
"""
def new_view(**kwargs):
if method == Method.GET:
data = request.args
else:
content_length = request.headers.get('content-length', 0)
if content_length and int(content_length) > 0:
content_type = request.headers.get('content-type', None)
if content_type != 'application/vnd.api+json':
data = MissingContentTypeError().data
data = json.dumps(data, cls=JSONAPIEncoder)
response = make_response(data)
response.status_code = 409
response.content_type = 'application/vnd.api+json'
return response
data = request.get_json(force=True)
else:
data = None
event_kwargs = {
'method': method,
'endpoint': endpoint,
'data': data,
'req_args': kwargs
}
results = self.on_request.send(self, **event_kwargs)
data = override(data, results)
args = [self.sqla.session, data, kwargs['api_type']]
if 'obj_id' in kwargs.keys():
args.append(kwargs['obj_id'])
if 'relationship' in kwargs.keys():
args.append(kwargs['relationship'])
try:
attr = '{}_{}'.format(method.name, endpoint.name).lower()
handler = getattr(self.serializer, attr)
handler_chain = list(self._handler_chains.get((
kwargs['api_type'], method, endpoint), []))
handler_chain.append(handler)
chained_handler = self._call_next(handler_chain)
response = chained_handler(*args)
results = self.on_success.send(self,
response=response,
**event_kwargs)
response = override(response, results)
except BaseError as exc:
self.sqla.session.rollback()
results = self.on_error.send(self, error=exc, **event_kwargs)
response = override(exc, results)
rendered_response = make_response('')
if response.status_code != 204:
data = json.dumps(response.data, cls=self.json_encoder)
rendered_response = make_response(data)
rendered_response.status_code = response.status_code
rendered_response.content_type = 'application/vnd.api+json'
results = self.on_response.send(self,
response=rendered_response,
**event_kwargs)
return override(rendered_response, results)
return new_view | [
"def",
"_generate_view",
"(",
"self",
",",
"method",
",",
"endpoint",
")",
":",
"def",
"new_view",
"(",
"*",
"*",
"kwargs",
")",
":",
"if",
"method",
"==",
"Method",
".",
"GET",
":",
"data",
"=",
"request",
".",
"args",
"else",
":",
"content_length",
... | Generate a view for the specified method and endpoint.
:param method: HTTP Method
:param endpoint: Pattern | [
"Generate",
"a",
"view",
"for",
"the",
"specified",
"method",
"and",
"endpoint",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/flaskext.py#L183-L251 | train | 32,266 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/declarative/serializer.py | JSONAPISerializer._render_resource | def _render_resource(self, resource):
"""Renders a resource's top level members based on json-api spec.
Top level members include:
'id', 'type', 'attributes', 'relationships'
"""
if not resource:
return None
# Must not render a resource that has same named
# attributes as different model.
if not isinstance(resource, self.model):
raise TypeError(
'Resource(s) type must be the same as the serializer model type.')
top_level_members = {}
try:
top_level_members['id'] = str(getattr(resource, self.primary_key))
except AttributeError:
raise
top_level_members['type'] = resource.__tablename__
top_level_members['attributes'] = self._render_attributes(resource)
top_level_members['relationships'] = self._render_relationships(
resource)
return top_level_members | python | def _render_resource(self, resource):
"""Renders a resource's top level members based on json-api spec.
Top level members include:
'id', 'type', 'attributes', 'relationships'
"""
if not resource:
return None
# Must not render a resource that has same named
# attributes as different model.
if not isinstance(resource, self.model):
raise TypeError(
'Resource(s) type must be the same as the serializer model type.')
top_level_members = {}
try:
top_level_members['id'] = str(getattr(resource, self.primary_key))
except AttributeError:
raise
top_level_members['type'] = resource.__tablename__
top_level_members['attributes'] = self._render_attributes(resource)
top_level_members['relationships'] = self._render_relationships(
resource)
return top_level_members | [
"def",
"_render_resource",
"(",
"self",
",",
"resource",
")",
":",
"if",
"not",
"resource",
":",
"return",
"None",
"# Must not render a resource that has same named",
"# attributes as different model.",
"if",
"not",
"isinstance",
"(",
"resource",
",",
"self",
".",
"mo... | Renders a resource's top level members based on json-api spec.
Top level members include:
'id', 'type', 'attributes', 'relationships' | [
"Renders",
"a",
"resource",
"s",
"top",
"level",
"members",
"based",
"on",
"json",
"-",
"api",
"spec",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/declarative/serializer.py#L45-L68 | train | 32,267 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/declarative/serializer.py | JSONAPISerializer._render_attributes | def _render_attributes(self, resource):
"""Render the resources's attributes."""
attributes = {}
attrs_to_ignore = set()
for key, relationship in resource.__mapper__.relationships.items():
attrs_to_ignore.update(set(
[column.name for column in relationship.local_columns]).union(
{key}))
if self.dasherize:
mapped_fields = {x: dasherize(underscore(x)) for x in self.fields}
else:
mapped_fields = {x: x for x in self.fields}
for attribute in self.fields:
if attribute == self.primary_key:
continue
# Per json-api spec, we cannot render foreign keys
# or relationsips in attributes.
if attribute in attrs_to_ignore:
raise AttributeError
try:
value = getattr(resource, attribute)
if isinstance(value, datetime.datetime):
attributes[mapped_fields[attribute]] = value.isoformat()
else:
attributes[mapped_fields[attribute]] = value
except AttributeError:
raise
return attributes | python | def _render_attributes(self, resource):
"""Render the resources's attributes."""
attributes = {}
attrs_to_ignore = set()
for key, relationship in resource.__mapper__.relationships.items():
attrs_to_ignore.update(set(
[column.name for column in relationship.local_columns]).union(
{key}))
if self.dasherize:
mapped_fields = {x: dasherize(underscore(x)) for x in self.fields}
else:
mapped_fields = {x: x for x in self.fields}
for attribute in self.fields:
if attribute == self.primary_key:
continue
# Per json-api spec, we cannot render foreign keys
# or relationsips in attributes.
if attribute in attrs_to_ignore:
raise AttributeError
try:
value = getattr(resource, attribute)
if isinstance(value, datetime.datetime):
attributes[mapped_fields[attribute]] = value.isoformat()
else:
attributes[mapped_fields[attribute]] = value
except AttributeError:
raise
return attributes | [
"def",
"_render_attributes",
"(",
"self",
",",
"resource",
")",
":",
"attributes",
"=",
"{",
"}",
"attrs_to_ignore",
"=",
"set",
"(",
")",
"for",
"key",
",",
"relationship",
"in",
"resource",
".",
"__mapper__",
".",
"relationships",
".",
"items",
"(",
")",... | Render the resources's attributes. | [
"Render",
"the",
"resources",
"s",
"attributes",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/declarative/serializer.py#L70-L101 | train | 32,268 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/declarative/serializer.py | JSONAPISerializer._render_relationships | def _render_relationships(self, resource):
"""Render the resource's relationships."""
relationships = {}
related_models = resource.__mapper__.relationships.keys()
primary_key_val = getattr(resource, self.primary_key)
if self.dasherize:
mapped_relationships = {
x: dasherize(underscore(x)) for x in related_models}
else:
mapped_relationships = {x: x for x in related_models}
for model in related_models:
relationships[mapped_relationships[model]] = {
'links': {
'self': '/{}/{}/relationships/{}'.format(
resource.__tablename__,
primary_key_val,
mapped_relationships[model]),
'related': '/{}/{}/{}'.format(
resource.__tablename__,
primary_key_val,
mapped_relationships[model])
}
}
return relationships | python | def _render_relationships(self, resource):
"""Render the resource's relationships."""
relationships = {}
related_models = resource.__mapper__.relationships.keys()
primary_key_val = getattr(resource, self.primary_key)
if self.dasherize:
mapped_relationships = {
x: dasherize(underscore(x)) for x in related_models}
else:
mapped_relationships = {x: x for x in related_models}
for model in related_models:
relationships[mapped_relationships[model]] = {
'links': {
'self': '/{}/{}/relationships/{}'.format(
resource.__tablename__,
primary_key_val,
mapped_relationships[model]),
'related': '/{}/{}/{}'.format(
resource.__tablename__,
primary_key_val,
mapped_relationships[model])
}
}
return relationships | [
"def",
"_render_relationships",
"(",
"self",
",",
"resource",
")",
":",
"relationships",
"=",
"{",
"}",
"related_models",
"=",
"resource",
".",
"__mapper__",
".",
"relationships",
".",
"keys",
"(",
")",
"primary_key_val",
"=",
"getattr",
"(",
"resource",
",",
... | Render the resource's relationships. | [
"Render",
"the",
"resource",
"s",
"relationships",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/declarative/serializer.py#L103-L128 | train | 32,269 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | attr_descriptor | def attr_descriptor(action, *names):
"""
Wrap a function that allows for getting or setting of an attribute. This
allows for specific handling of an attribute when it comes to serializing
and deserializing.
:param action: The AttributeActions that this descriptor performs
:param names: A list of names of the attributes this references
"""
if isinstance(action, AttributeActions):
action = [action]
def wrapped(fn):
if not hasattr(fn, '__jsonapi_action__'):
fn.__jsonapi_action__ = set()
fn.__jsonapi_desc_for_attrs__ = set()
fn.__jsonapi_desc_for_attrs__ |= set(names)
fn.__jsonapi_action__ |= set(action)
return fn
return wrapped | python | def attr_descriptor(action, *names):
"""
Wrap a function that allows for getting or setting of an attribute. This
allows for specific handling of an attribute when it comes to serializing
and deserializing.
:param action: The AttributeActions that this descriptor performs
:param names: A list of names of the attributes this references
"""
if isinstance(action, AttributeActions):
action = [action]
def wrapped(fn):
if not hasattr(fn, '__jsonapi_action__'):
fn.__jsonapi_action__ = set()
fn.__jsonapi_desc_for_attrs__ = set()
fn.__jsonapi_desc_for_attrs__ |= set(names)
fn.__jsonapi_action__ |= set(action)
return fn
return wrapped | [
"def",
"attr_descriptor",
"(",
"action",
",",
"*",
"names",
")",
":",
"if",
"isinstance",
"(",
"action",
",",
"AttributeActions",
")",
":",
"action",
"=",
"[",
"action",
"]",
"def",
"wrapped",
"(",
"fn",
")",
":",
"if",
"not",
"hasattr",
"(",
"fn",
"... | Wrap a function that allows for getting or setting of an attribute. This
allows for specific handling of an attribute when it comes to serializing
and deserializing.
:param action: The AttributeActions that this descriptor performs
:param names: A list of names of the attributes this references | [
"Wrap",
"a",
"function",
"that",
"allows",
"for",
"getting",
"or",
"setting",
"of",
"an",
"attribute",
".",
"This",
"allows",
"for",
"specific",
"handling",
"of",
"an",
"attribute",
"when",
"it",
"comes",
"to",
"serializing",
"and",
"deserializing",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L65-L85 | train | 32,270 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | relationship_descriptor | def relationship_descriptor(action, *names):
"""
Wrap a function for modification of a relationship. This allows for
specific handling for serialization and deserialization.
:param action: The RelationshipActions that this descriptor performs
:param names: A list of names of the relationships this references
"""
if isinstance(action, RelationshipActions):
action = [action]
def wrapped(fn):
if not hasattr(fn, '__jsonapi_action__'):
fn.__jsonapi_action__ = set()
fn.__jsonapi_desc_for_rels__ = set()
fn.__jsonapi_desc_for_rels__ |= set(names)
fn.__jsonapi_action__ |= set(action)
return fn
return wrapped | python | def relationship_descriptor(action, *names):
"""
Wrap a function for modification of a relationship. This allows for
specific handling for serialization and deserialization.
:param action: The RelationshipActions that this descriptor performs
:param names: A list of names of the relationships this references
"""
if isinstance(action, RelationshipActions):
action = [action]
def wrapped(fn):
if not hasattr(fn, '__jsonapi_action__'):
fn.__jsonapi_action__ = set()
fn.__jsonapi_desc_for_rels__ = set()
fn.__jsonapi_desc_for_rels__ |= set(names)
fn.__jsonapi_action__ |= set(action)
return fn
return wrapped | [
"def",
"relationship_descriptor",
"(",
"action",
",",
"*",
"names",
")",
":",
"if",
"isinstance",
"(",
"action",
",",
"RelationshipActions",
")",
":",
"action",
"=",
"[",
"action",
"]",
"def",
"wrapped",
"(",
"fn",
")",
":",
"if",
"not",
"hasattr",
"(",
... | Wrap a function for modification of a relationship. This allows for
specific handling for serialization and deserialization.
:param action: The RelationshipActions that this descriptor performs
:param names: A list of names of the relationships this references | [
"Wrap",
"a",
"function",
"for",
"modification",
"of",
"a",
"relationship",
".",
"This",
"allows",
"for",
"specific",
"handling",
"for",
"serialization",
"and",
"deserialization",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L88-L107 | train | 32,271 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | check_permission | def check_permission(instance, field, permission):
"""
Check a permission for a given instance or field. Raises an error if
denied.
:param instance: The instance to check
:param field: The field name to check or None for instance
:param permission: The permission to check
"""
if not get_permission_test(instance, field, permission)(instance):
raise PermissionDeniedError(permission, instance, instance, field) | python | def check_permission(instance, field, permission):
"""
Check a permission for a given instance or field. Raises an error if
denied.
:param instance: The instance to check
:param field: The field name to check or None for instance
:param permission: The permission to check
"""
if not get_permission_test(instance, field, permission)(instance):
raise PermissionDeniedError(permission, instance, instance, field) | [
"def",
"check_permission",
"(",
"instance",
",",
"field",
",",
"permission",
")",
":",
"if",
"not",
"get_permission_test",
"(",
"instance",
",",
"field",
",",
"permission",
")",
"(",
"instance",
")",
":",
"raise",
"PermissionDeniedError",
"(",
"permission",
",... | Check a permission for a given instance or field. Raises an error if
denied.
:param instance: The instance to check
:param field: The field name to check or None for instance
:param permission: The permission to check | [
"Check",
"a",
"permission",
"for",
"a",
"given",
"instance",
"or",
"field",
".",
"Raises",
"an",
"error",
"if",
"denied",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L170-L180 | train | 32,272 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | get_attr_desc | def get_attr_desc(instance, attribute, action):
"""
Fetch the appropriate descriptor for the attribute.
:param instance: Model instance
:param attribute: Name of the attribute
:param action: AttributeAction
"""
descs = instance.__jsonapi_attribute_descriptors__.get(attribute, {})
if action == AttributeActions.GET:
check_permission(instance, attribute, Permissions.VIEW)
return descs.get(action, lambda x: getattr(x, attribute))
check_permission(instance, attribute, Permissions.EDIT)
return descs.get(action, lambda x, v: setattr(x, attribute, v)) | python | def get_attr_desc(instance, attribute, action):
"""
Fetch the appropriate descriptor for the attribute.
:param instance: Model instance
:param attribute: Name of the attribute
:param action: AttributeAction
"""
descs = instance.__jsonapi_attribute_descriptors__.get(attribute, {})
if action == AttributeActions.GET:
check_permission(instance, attribute, Permissions.VIEW)
return descs.get(action, lambda x: getattr(x, attribute))
check_permission(instance, attribute, Permissions.EDIT)
return descs.get(action, lambda x, v: setattr(x, attribute, v)) | [
"def",
"get_attr_desc",
"(",
"instance",
",",
"attribute",
",",
"action",
")",
":",
"descs",
"=",
"instance",
".",
"__jsonapi_attribute_descriptors__",
".",
"get",
"(",
"attribute",
",",
"{",
"}",
")",
"if",
"action",
"==",
"AttributeActions",
".",
"GET",
":... | Fetch the appropriate descriptor for the attribute.
:param instance: Model instance
:param attribute: Name of the attribute
:param action: AttributeAction | [
"Fetch",
"the",
"appropriate",
"descriptor",
"for",
"the",
"attribute",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L183-L196 | train | 32,273 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | get_rel_desc | def get_rel_desc(instance, key, action):
"""
Fetch the appropriate descriptor for the relationship.
:param instance: Model instance
:param key: Name of the relationship
:param action: RelationshipAction
"""
descs = instance.__jsonapi_rel_desc__.get(key, {})
if action == RelationshipActions.GET:
check_permission(instance, key, Permissions.VIEW)
return descs.get(action, lambda x: getattr(x, key))
elif action == RelationshipActions.APPEND:
check_permission(instance, key, Permissions.CREATE)
return descs.get(action, lambda x, v: getattr(x, key).append(v))
elif action == RelationshipActions.SET:
check_permission(instance, key, Permissions.EDIT)
return descs.get(action, lambda x, v: setattr(x, key, v))
else:
check_permission(instance, key, Permissions.DELETE)
return descs.get(action, lambda x, v: getattr(x, key).remove(v)) | python | def get_rel_desc(instance, key, action):
"""
Fetch the appropriate descriptor for the relationship.
:param instance: Model instance
:param key: Name of the relationship
:param action: RelationshipAction
"""
descs = instance.__jsonapi_rel_desc__.get(key, {})
if action == RelationshipActions.GET:
check_permission(instance, key, Permissions.VIEW)
return descs.get(action, lambda x: getattr(x, key))
elif action == RelationshipActions.APPEND:
check_permission(instance, key, Permissions.CREATE)
return descs.get(action, lambda x, v: getattr(x, key).append(v))
elif action == RelationshipActions.SET:
check_permission(instance, key, Permissions.EDIT)
return descs.get(action, lambda x, v: setattr(x, key, v))
else:
check_permission(instance, key, Permissions.DELETE)
return descs.get(action, lambda x, v: getattr(x, key).remove(v)) | [
"def",
"get_rel_desc",
"(",
"instance",
",",
"key",
",",
"action",
")",
":",
"descs",
"=",
"instance",
".",
"__jsonapi_rel_desc__",
".",
"get",
"(",
"key",
",",
"{",
"}",
")",
"if",
"action",
"==",
"RelationshipActions",
".",
"GET",
":",
"check_permission"... | Fetch the appropriate descriptor for the relationship.
:param instance: Model instance
:param key: Name of the relationship
:param action: RelationshipAction | [
"Fetch",
"the",
"appropriate",
"descriptor",
"for",
"the",
"relationship",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L199-L219 | train | 32,274 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | JSONAPI._check_json_data | def _check_json_data(self, json_data):
"""
Ensure that the request body is both a hash and has a data key.
:param json_data: The json data provided with the request
"""
if not isinstance(json_data, dict):
raise BadRequestError('Request body should be a JSON hash')
if 'data' not in json_data.keys():
raise BadRequestError('Request should contain data key') | python | def _check_json_data(self, json_data):
"""
Ensure that the request body is both a hash and has a data key.
:param json_data: The json data provided with the request
"""
if not isinstance(json_data, dict):
raise BadRequestError('Request body should be a JSON hash')
if 'data' not in json_data.keys():
raise BadRequestError('Request should contain data key') | [
"def",
"_check_json_data",
"(",
"self",
",",
"json_data",
")",
":",
"if",
"not",
"isinstance",
"(",
"json_data",
",",
"dict",
")",
":",
"raise",
"BadRequestError",
"(",
"'Request body should be a JSON hash'",
")",
"if",
"'data'",
"not",
"in",
"json_data",
".",
... | Ensure that the request body is both a hash and has a data key.
:param json_data: The json data provided with the request | [
"Ensure",
"that",
"the",
"request",
"body",
"is",
"both",
"a",
"hash",
"and",
"has",
"a",
"data",
"key",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L321-L330 | train | 32,275 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | JSONAPI._fetch_resource | def _fetch_resource(self, session, api_type, obj_id, permission):
"""
Fetch a resource by type and id, also doing a permission check.
:param session: SQLAlchemy session
:param api_type: The type
:param obj_id: ID for the resource
:param permission: Permission to check
"""
if api_type not in self.models.keys():
raise ResourceTypeNotFoundError(api_type)
obj = session.query(self.models[api_type]).get(obj_id)
if obj is None:
raise ResourceNotFoundError(self.models[api_type], obj_id)
check_permission(obj, None, permission)
return obj | python | def _fetch_resource(self, session, api_type, obj_id, permission):
"""
Fetch a resource by type and id, also doing a permission check.
:param session: SQLAlchemy session
:param api_type: The type
:param obj_id: ID for the resource
:param permission: Permission to check
"""
if api_type not in self.models.keys():
raise ResourceTypeNotFoundError(api_type)
obj = session.query(self.models[api_type]).get(obj_id)
if obj is None:
raise ResourceNotFoundError(self.models[api_type], obj_id)
check_permission(obj, None, permission)
return obj | [
"def",
"_fetch_resource",
"(",
"self",
",",
"session",
",",
"api_type",
",",
"obj_id",
",",
"permission",
")",
":",
"if",
"api_type",
"not",
"in",
"self",
".",
"models",
".",
"keys",
"(",
")",
":",
"raise",
"ResourceTypeNotFoundError",
"(",
"api_type",
")"... | Fetch a resource by type and id, also doing a permission check.
:param session: SQLAlchemy session
:param api_type: The type
:param obj_id: ID for the resource
:param permission: Permission to check | [
"Fetch",
"a",
"resource",
"by",
"type",
"and",
"id",
"also",
"doing",
"a",
"permission",
"check",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L332-L347 | train | 32,276 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | JSONAPI._render_short_instance | def _render_short_instance(self, instance):
"""
For those very short versions of resources, we have this.
:param instance: The instance to render
"""
check_permission(instance, None, Permissions.VIEW)
return {'type': instance.__jsonapi_type__, 'id': instance.id} | python | def _render_short_instance(self, instance):
"""
For those very short versions of resources, we have this.
:param instance: The instance to render
"""
check_permission(instance, None, Permissions.VIEW)
return {'type': instance.__jsonapi_type__, 'id': instance.id} | [
"def",
"_render_short_instance",
"(",
"self",
",",
"instance",
")",
":",
"check_permission",
"(",
"instance",
",",
"None",
",",
"Permissions",
".",
"VIEW",
")",
"return",
"{",
"'type'",
":",
"instance",
".",
"__jsonapi_type__",
",",
"'id'",
":",
"instance",
... | For those very short versions of resources, we have this.
:param instance: The instance to render | [
"For",
"those",
"very",
"short",
"versions",
"of",
"resources",
"we",
"have",
"this",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L349-L356 | train | 32,277 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | JSONAPI._check_instance_relationships_for_delete | def _check_instance_relationships_for_delete(self, instance):
"""
Ensure we are authorized to delete this and all cascaded resources.
:param instance: The instance to check the relationships of.
"""
check_permission(instance, None, Permissions.DELETE)
for rel_key, rel in instance.__mapper__.relationships.items():
check_permission(instance, rel_key, Permissions.EDIT)
if rel.cascade.delete:
if rel.direction == MANYTOONE:
related = getattr(instance, rel_key)
self._check_instance_relationships_for_delete(related)
else:
instances = getattr(instance, rel_key)
for to_check in instances:
self._check_instance_relationships_for_delete(to_check) | python | def _check_instance_relationships_for_delete(self, instance):
"""
Ensure we are authorized to delete this and all cascaded resources.
:param instance: The instance to check the relationships of.
"""
check_permission(instance, None, Permissions.DELETE)
for rel_key, rel in instance.__mapper__.relationships.items():
check_permission(instance, rel_key, Permissions.EDIT)
if rel.cascade.delete:
if rel.direction == MANYTOONE:
related = getattr(instance, rel_key)
self._check_instance_relationships_for_delete(related)
else:
instances = getattr(instance, rel_key)
for to_check in instances:
self._check_instance_relationships_for_delete(to_check) | [
"def",
"_check_instance_relationships_for_delete",
"(",
"self",
",",
"instance",
")",
":",
"check_permission",
"(",
"instance",
",",
"None",
",",
"Permissions",
".",
"DELETE",
")",
"for",
"rel_key",
",",
"rel",
"in",
"instance",
".",
"__mapper__",
".",
"relation... | Ensure we are authorized to delete this and all cascaded resources.
:param instance: The instance to check the relationships of. | [
"Ensure",
"we",
"are",
"authorized",
"to",
"delete",
"this",
"and",
"all",
"cascaded",
"resources",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L462-L479 | train | 32,278 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | JSONAPI._parse_fields | def _parse_fields(self, query):
"""
Parse the querystring args for fields.
:param query: Dict of query args
"""
field_args = {
k: v
for k, v in query.items() if k.startswith('fields[')
}
fields = {}
for k, v in field_args.items():
fields[k[7:-1]] = v.split(',')
return fields | python | def _parse_fields(self, query):
"""
Parse the querystring args for fields.
:param query: Dict of query args
"""
field_args = {
k: v
for k, v in query.items() if k.startswith('fields[')
}
fields = {}
for k, v in field_args.items():
fields[k[7:-1]] = v.split(',')
return fields | [
"def",
"_parse_fields",
"(",
"self",
",",
"query",
")",
":",
"field_args",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"query",
".",
"items",
"(",
")",
"if",
"k",
".",
"startswith",
"(",
"'fields['",
")",
"}",
"fields",
"=",
"{",
"}",
... | Parse the querystring args for fields.
:param query: Dict of query args | [
"Parse",
"the",
"querystring",
"args",
"for",
"fields",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L481-L497 | train | 32,279 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | JSONAPI._parse_include | def _parse_include(self, include):
"""
Parse the querystring args or parent includes for includes.
:param include: Dict of query args or includes
"""
ret = {}
for item in include:
if '.' in item:
local, remote = item.split('.', 1)
else:
local = item
remote = None
ret.setdefault(local, [])
if remote:
ret[local].append(remote)
return ret | python | def _parse_include(self, include):
"""
Parse the querystring args or parent includes for includes.
:param include: Dict of query args or includes
"""
ret = {}
for item in include:
if '.' in item:
local, remote = item.split('.', 1)
else:
local = item
remote = None
ret.setdefault(local, [])
if remote:
ret[local].append(remote)
return ret | [
"def",
"_parse_include",
"(",
"self",
",",
"include",
")",
":",
"ret",
"=",
"{",
"}",
"for",
"item",
"in",
"include",
":",
"if",
"'.'",
"in",
"item",
":",
"local",
",",
"remote",
"=",
"item",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"else",
":",
... | Parse the querystring args or parent includes for includes.
:param include: Dict of query args or includes | [
"Parse",
"the",
"querystring",
"args",
"or",
"parent",
"includes",
"for",
"includes",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L499-L517 | train | 32,280 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | JSONAPI._parse_page | def _parse_page(self, query):
"""
Parse the querystring args for pagination.
:param query: Dict of query args
"""
args = {k[5:-1]: v for k, v in query.items() if k.startswith('page[')}
if {'number', 'size'} == set(args.keys()):
if not args['number'].isdecimal() or not args['size'].isdecimal():
raise BadRequestError('Page query parameters must be integers')
number = int(args['number'])
size = int(args['size'])
start = number * size
return start, start + size - 1
if {'limit', 'offset'} == set(args.keys()):
if not args['limit'].isdecimal() or not args['offset'].isdecimal():
raise BadRequestError('Page query parameters must be integers')
limit = int(args['limit'])
offset = int(args['offset'])
return offset, offset + limit - 1
return 0, None | python | def _parse_page(self, query):
"""
Parse the querystring args for pagination.
:param query: Dict of query args
"""
args = {k[5:-1]: v for k, v in query.items() if k.startswith('page[')}
if {'number', 'size'} == set(args.keys()):
if not args['number'].isdecimal() or not args['size'].isdecimal():
raise BadRequestError('Page query parameters must be integers')
number = int(args['number'])
size = int(args['size'])
start = number * size
return start, start + size - 1
if {'limit', 'offset'} == set(args.keys()):
if not args['limit'].isdecimal() or not args['offset'].isdecimal():
raise BadRequestError('Page query parameters must be integers')
limit = int(args['limit'])
offset = int(args['offset'])
return offset, offset + limit - 1
return 0, None | [
"def",
"_parse_page",
"(",
"self",
",",
"query",
")",
":",
"args",
"=",
"{",
"k",
"[",
"5",
":",
"-",
"1",
"]",
":",
"v",
"for",
"k",
",",
"v",
"in",
"query",
".",
"items",
"(",
")",
"if",
"k",
".",
"startswith",
"(",
"'page['",
")",
"}",
"... | Parse the querystring args for pagination.
:param query: Dict of query args | [
"Parse",
"the",
"querystring",
"args",
"for",
"pagination",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L519-L546 | train | 32,281 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | JSONAPI.delete_relationship | def delete_relationship(self, session, data, api_type, obj_id, rel_key):
"""
Delete a resource or multiple resources from a to-many relationship.
:param session: SQLAlchemy session
:param data: JSON data provided with the request
:param api_type: Type of the resource
:param obj_id: ID of the resource
:param rel_key: Key of the relationship to fetch
"""
model = self._fetch_model(api_type)
resource = self._fetch_resource(session, api_type, obj_id,
Permissions.EDIT)
relationship = self._get_relationship(resource, rel_key,
Permissions.DELETE)
self._check_json_data(data)
if not isinstance(data['data'], list):
raise ValidationError('Provided data must be an array.')
if relationship.direction == MANYTOONE:
return ToManyExpectedError(model, resource, relationship)
response = JSONAPIResponse()
response.data = {'data': []}
session.add(resource)
remove = get_rel_desc(resource, relationship.key,
RelationshipActions.DELETE)
reverse_side = relationship.back_populates
for item in data['data']:
item = self._fetch_resource(session, item['type'], item['id'],
Permissions.EDIT)
if reverse_side:
reverse_rel = item.__mapper__.relationships[reverse_side]
if reverse_rel.direction == MANYTOONE:
permission = Permissions.EDIT
else:
permission = Permissions.DELETE
check_permission(item, reverse_side, permission)
remove(resource, item)
session.commit()
session.refresh(resource)
get = get_rel_desc(resource, relationship.key, RelationshipActions.GET)
for item in get(resource):
response.data['data'].append(self._render_short_instance(item))
return response | python | def delete_relationship(self, session, data, api_type, obj_id, rel_key):
"""
Delete a resource or multiple resources from a to-many relationship.
:param session: SQLAlchemy session
:param data: JSON data provided with the request
:param api_type: Type of the resource
:param obj_id: ID of the resource
:param rel_key: Key of the relationship to fetch
"""
model = self._fetch_model(api_type)
resource = self._fetch_resource(session, api_type, obj_id,
Permissions.EDIT)
relationship = self._get_relationship(resource, rel_key,
Permissions.DELETE)
self._check_json_data(data)
if not isinstance(data['data'], list):
raise ValidationError('Provided data must be an array.')
if relationship.direction == MANYTOONE:
return ToManyExpectedError(model, resource, relationship)
response = JSONAPIResponse()
response.data = {'data': []}
session.add(resource)
remove = get_rel_desc(resource, relationship.key,
RelationshipActions.DELETE)
reverse_side = relationship.back_populates
for item in data['data']:
item = self._fetch_resource(session, item['type'], item['id'],
Permissions.EDIT)
if reverse_side:
reverse_rel = item.__mapper__.relationships[reverse_side]
if reverse_rel.direction == MANYTOONE:
permission = Permissions.EDIT
else:
permission = Permissions.DELETE
check_permission(item, reverse_side, permission)
remove(resource, item)
session.commit()
session.refresh(resource)
get = get_rel_desc(resource, relationship.key, RelationshipActions.GET)
for item in get(resource):
response.data['data'].append(self._render_short_instance(item))
return response | [
"def",
"delete_relationship",
"(",
"self",
",",
"session",
",",
"data",
",",
"api_type",
",",
"obj_id",
",",
"rel_key",
")",
":",
"model",
"=",
"self",
".",
"_fetch_model",
"(",
"api_type",
")",
"resource",
"=",
"self",
".",
"_fetch_resource",
"(",
"sessio... | Delete a resource or multiple resources from a to-many relationship.
:param session: SQLAlchemy session
:param data: JSON data provided with the request
:param api_type: Type of the resource
:param obj_id: ID of the resource
:param rel_key: Key of the relationship to fetch | [
"Delete",
"a",
"resource",
"or",
"multiple",
"resources",
"from",
"a",
"to",
"-",
"many",
"relationship",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L548-L604 | train | 32,282 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | JSONAPI.get_collection | def get_collection(self, session, query, api_key):
"""
Fetch a collection of resources of a specified type.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: The type of the model
"""
model = self._fetch_model(api_key)
include = self._parse_include(query.get('include', '').split(','))
fields = self._parse_fields(query)
included = {}
sorts = query.get('sort', '').split(',')
order_by = []
collection = session.query(model)
for attr in sorts:
if attr == '':
break
attr_name, is_asc = [attr[1:], False]\
if attr[0] == '-'\
else [attr, True]
if attr_name not in model.__mapper__.all_orm_descriptors.keys()\
or not hasattr(model, attr_name)\
or attr_name in model.__mapper__.relationships.keys():
return NotSortableError(model, attr_name)
attr = getattr(model, attr_name)
if not hasattr(attr, 'asc'):
# pragma: no cover
return NotSortableError(model, attr_name)
check_permission(model, attr_name, Permissions.VIEW)
order_by.append(attr.asc() if is_asc else attr.desc())
if len(order_by) > 0:
collection = collection.order_by(*order_by)
pos = -1
start, end = self._parse_page(query)
response = JSONAPIResponse()
response.data['data'] = []
for instance in collection:
try:
check_permission(instance, None, Permissions.VIEW)
except PermissionDeniedError:
continue
pos += 1
if end is not None and (pos < start or pos > end):
continue
built = self._render_full_resource(instance, include, fields)
included.update(built.pop('included'))
response.data['data'].append(built)
response.data['included'] = list(included.values())
return response | python | def get_collection(self, session, query, api_key):
"""
Fetch a collection of resources of a specified type.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: The type of the model
"""
model = self._fetch_model(api_key)
include = self._parse_include(query.get('include', '').split(','))
fields = self._parse_fields(query)
included = {}
sorts = query.get('sort', '').split(',')
order_by = []
collection = session.query(model)
for attr in sorts:
if attr == '':
break
attr_name, is_asc = [attr[1:], False]\
if attr[0] == '-'\
else [attr, True]
if attr_name not in model.__mapper__.all_orm_descriptors.keys()\
or not hasattr(model, attr_name)\
or attr_name in model.__mapper__.relationships.keys():
return NotSortableError(model, attr_name)
attr = getattr(model, attr_name)
if not hasattr(attr, 'asc'):
# pragma: no cover
return NotSortableError(model, attr_name)
check_permission(model, attr_name, Permissions.VIEW)
order_by.append(attr.asc() if is_asc else attr.desc())
if len(order_by) > 0:
collection = collection.order_by(*order_by)
pos = -1
start, end = self._parse_page(query)
response = JSONAPIResponse()
response.data['data'] = []
for instance in collection:
try:
check_permission(instance, None, Permissions.VIEW)
except PermissionDeniedError:
continue
pos += 1
if end is not None and (pos < start or pos > end):
continue
built = self._render_full_resource(instance, include, fields)
included.update(built.pop('included'))
response.data['data'].append(built)
response.data['included'] = list(included.values())
return response | [
"def",
"get_collection",
"(",
"self",
",",
"session",
",",
"query",
",",
"api_key",
")",
":",
"model",
"=",
"self",
".",
"_fetch_model",
"(",
"api_key",
")",
"include",
"=",
"self",
".",
"_parse_include",
"(",
"query",
".",
"get",
"(",
"'include'",
",",
... | Fetch a collection of resources of a specified type.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: The type of the model | [
"Fetch",
"a",
"collection",
"of",
"resources",
"of",
"a",
"specified",
"type",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L627-L690 | train | 32,283 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | JSONAPI.get_relationship | def get_relationship(self, session, query, api_type, obj_id, rel_key):
"""
Fetch a collection of related resource types and ids.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: Type of the resource
:param obj_id: ID of the resource
:param rel_key: Key of the relationship to fetch
"""
resource = self._fetch_resource(session, api_type, obj_id,
Permissions.VIEW)
if rel_key not in resource.__jsonapi_map_to_py__.keys():
raise RelationshipNotFoundError(resource, resource, rel_key)
py_key = resource.__jsonapi_map_to_py__[rel_key]
relationship = self._get_relationship(resource, py_key,
Permissions.VIEW)
response = JSONAPIResponse()
related = get_rel_desc(resource, relationship.key,
RelationshipActions.GET)(resource)
if relationship.direction == MANYTOONE:
if related is None:
response.data['data'] = None
else:
try:
response.data['data'] = self._render_short_instance(
related)
except PermissionDeniedError:
response.data['data'] = None
else:
response.data['data'] = []
for item in related:
try:
response.data['data'].append(
self._render_short_instance(item))
except PermissionDeniedError:
continue
return response | python | def get_relationship(self, session, query, api_type, obj_id, rel_key):
"""
Fetch a collection of related resource types and ids.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: Type of the resource
:param obj_id: ID of the resource
:param rel_key: Key of the relationship to fetch
"""
resource = self._fetch_resource(session, api_type, obj_id,
Permissions.VIEW)
if rel_key not in resource.__jsonapi_map_to_py__.keys():
raise RelationshipNotFoundError(resource, resource, rel_key)
py_key = resource.__jsonapi_map_to_py__[rel_key]
relationship = self._get_relationship(resource, py_key,
Permissions.VIEW)
response = JSONAPIResponse()
related = get_rel_desc(resource, relationship.key,
RelationshipActions.GET)(resource)
if relationship.direction == MANYTOONE:
if related is None:
response.data['data'] = None
else:
try:
response.data['data'] = self._render_short_instance(
related)
except PermissionDeniedError:
response.data['data'] = None
else:
response.data['data'] = []
for item in related:
try:
response.data['data'].append(
self._render_short_instance(item))
except PermissionDeniedError:
continue
return response | [
"def",
"get_relationship",
"(",
"self",
",",
"session",
",",
"query",
",",
"api_type",
",",
"obj_id",
",",
"rel_key",
")",
":",
"resource",
"=",
"self",
".",
"_fetch_resource",
"(",
"session",
",",
"api_type",
",",
"obj_id",
",",
"Permissions",
".",
"VIEW"... | Fetch a collection of related resource types and ids.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: Type of the resource
:param obj_id: ID of the resource
:param rel_key: Key of the relationship to fetch | [
"Fetch",
"a",
"collection",
"of",
"related",
"resource",
"types",
"and",
"ids",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L758-L798 | train | 32,284 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | JSONAPI.patch_relationship | def patch_relationship(self, session, json_data, api_type, obj_id,
rel_key):
"""
Replacement of relationship values.
:param session: SQLAlchemy session
:param json_data: Request JSON Data
:param api_type: Type of the resource
:param obj_id: ID of the resource
:param rel_key: Key of the relationship to fetch
"""
model = self._fetch_model(api_type)
resource = self._fetch_resource(session, api_type, obj_id,
Permissions.EDIT)
if rel_key not in resource.__jsonapi_map_to_py__.keys():
raise RelationshipNotFoundError(resource, resource, rel_key)
py_key = resource.__jsonapi_map_to_py__[rel_key]
relationship = self._get_relationship(resource, py_key,
Permissions.EDIT)
self._check_json_data(json_data)
session.add(resource)
remote_side = relationship.back_populates
try:
if relationship.direction == MANYTOONE:
if not isinstance(json_data['data'], dict)\
and json_data['data'] is not None:
raise ValidationError('Provided data must be a hash.')
related = getattr(resource, relationship.key)
check_permission(related, None, Permissions.EDIT)
check_permission(related, remote_side, Permissions.EDIT)
setter = get_rel_desc(resource, relationship.key,
RelationshipActions.SET)
if json_data['data'] is None:
setter(resource, None)
else:
to_relate = self._fetch_resource(
session, json_data['data']['type'],
json_data['data']['id'], Permissions.EDIT)
check_permission(to_relate, remote_side, Permissions.EDIT)
setter(resource, to_relate)
else:
if not isinstance(json_data['data'], list):
raise ValidationError('Provided data must be an array.')
related = getattr(resource, relationship.key)
remover = get_rel_desc(resource, relationship.key,
RelationshipActions.DELETE)
appender = get_rel_desc(resource, relationship.key,
RelationshipActions.APPEND)
for item in related:
check_permission(item, None, Permissions.EDIT)
remote = item.__mapper__.relationships[remote_side]
if remote.direction == MANYTOONE:
check_permission(item, remote_side, Permissions.EDIT)
else:
check_permission(item, remote_side, Permissions.DELETE)
remover(resource, item)
for item in json_data['data']:
to_relate = self._fetch_resource(
session, item['type'], item['id'], Permissions.EDIT)
remote = to_relate.__mapper__.relationships[remote_side]
if remote.direction == MANYTOONE:
check_permission(to_relate, remote_side,
Permissions.EDIT)
else:
check_permission(to_relate, remote_side,
Permissions.CREATE)
appender(resource, to_relate)
session.commit()
except KeyError:
raise ValidationError('Incompatible Type')
return self.get_relationship(session, {}, model.__jsonapi_type__,
resource.id, rel_key) | python | def patch_relationship(self, session, json_data, api_type, obj_id,
rel_key):
"""
Replacement of relationship values.
:param session: SQLAlchemy session
:param json_data: Request JSON Data
:param api_type: Type of the resource
:param obj_id: ID of the resource
:param rel_key: Key of the relationship to fetch
"""
model = self._fetch_model(api_type)
resource = self._fetch_resource(session, api_type, obj_id,
Permissions.EDIT)
if rel_key not in resource.__jsonapi_map_to_py__.keys():
raise RelationshipNotFoundError(resource, resource, rel_key)
py_key = resource.__jsonapi_map_to_py__[rel_key]
relationship = self._get_relationship(resource, py_key,
Permissions.EDIT)
self._check_json_data(json_data)
session.add(resource)
remote_side = relationship.back_populates
try:
if relationship.direction == MANYTOONE:
if not isinstance(json_data['data'], dict)\
and json_data['data'] is not None:
raise ValidationError('Provided data must be a hash.')
related = getattr(resource, relationship.key)
check_permission(related, None, Permissions.EDIT)
check_permission(related, remote_side, Permissions.EDIT)
setter = get_rel_desc(resource, relationship.key,
RelationshipActions.SET)
if json_data['data'] is None:
setter(resource, None)
else:
to_relate = self._fetch_resource(
session, json_data['data']['type'],
json_data['data']['id'], Permissions.EDIT)
check_permission(to_relate, remote_side, Permissions.EDIT)
setter(resource, to_relate)
else:
if not isinstance(json_data['data'], list):
raise ValidationError('Provided data must be an array.')
related = getattr(resource, relationship.key)
remover = get_rel_desc(resource, relationship.key,
RelationshipActions.DELETE)
appender = get_rel_desc(resource, relationship.key,
RelationshipActions.APPEND)
for item in related:
check_permission(item, None, Permissions.EDIT)
remote = item.__mapper__.relationships[remote_side]
if remote.direction == MANYTOONE:
check_permission(item, remote_side, Permissions.EDIT)
else:
check_permission(item, remote_side, Permissions.DELETE)
remover(resource, item)
for item in json_data['data']:
to_relate = self._fetch_resource(
session, item['type'], item['id'], Permissions.EDIT)
remote = to_relate.__mapper__.relationships[remote_side]
if remote.direction == MANYTOONE:
check_permission(to_relate, remote_side,
Permissions.EDIT)
else:
check_permission(to_relate, remote_side,
Permissions.CREATE)
appender(resource, to_relate)
session.commit()
except KeyError:
raise ValidationError('Incompatible Type')
return self.get_relationship(session, {}, model.__jsonapi_type__,
resource.id, rel_key) | [
"def",
"patch_relationship",
"(",
"self",
",",
"session",
",",
"json_data",
",",
"api_type",
",",
"obj_id",
",",
"rel_key",
")",
":",
"model",
"=",
"self",
".",
"_fetch_model",
"(",
"api_type",
")",
"resource",
"=",
"self",
".",
"_fetch_resource",
"(",
"se... | Replacement of relationship values.
:param session: SQLAlchemy session
:param json_data: Request JSON Data
:param api_type: Type of the resource
:param obj_id: ID of the resource
:param rel_key: Key of the relationship to fetch | [
"Replacement",
"of",
"relationship",
"values",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L800-L880 | train | 32,285 |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/serializer.py | JSONAPI.post_relationship | def post_relationship(self, session, json_data, api_type, obj_id, rel_key):
"""
Append to a relationship.
:param session: SQLAlchemy session
:param json_data: Request JSON Data
:param api_type: Type of the resource
:param obj_id: ID of the resource
:param rel_key: Key of the relationship to fetch
"""
model = self._fetch_model(api_type)
resource = self._fetch_resource(session, api_type, obj_id,
Permissions.EDIT)
if rel_key not in resource.__jsonapi_map_to_py__.keys():
raise RelationshipNotFoundError(resource, resource, rel_key)
py_key = resource.__jsonapi_map_to_py__[rel_key]
relationship = self._get_relationship(resource, py_key,
Permissions.CREATE)
if relationship.direction == MANYTOONE:
raise ValidationError('Cannot post to to-one relationship')
if not isinstance(json_data['data'], list):
raise ValidationError('/data must be an array')
remote_side = relationship.back_populates
try:
for item in json_data['data']:
setter = get_rel_desc(resource, relationship.key,
RelationshipActions.APPEND)
if not isinstance(json_data['data'], list):
raise BadRequestError(
'{} must be an array'.format(relationship.key))
for item in json_data['data']:
if {'type', 'id'} != set(item.keys()):
raise BadRequestError(
'{} must have type and id keys'
.format(relationship.key))
to_relate = self._fetch_resource(
session, item['type'], item['id'], Permissions.EDIT)
rem = to_relate.__mapper__.relationships[remote_side]
if rem.direction == MANYTOONE:
check_permission(to_relate, remote_side,
Permissions.EDIT)
else:
check_permission(to_relate, remote_side,
Permissions.CREATE)
setter(resource, to_relate)
session.add(resource)
session.commit()
except KeyError:
raise ValidationError('Incompatible type provided')
return self.get_relationship(
session, {}, model.__jsonapi_type__, resource.id, rel_key) | python | def post_relationship(self, session, json_data, api_type, obj_id, rel_key):
"""
Append to a relationship.
:param session: SQLAlchemy session
:param json_data: Request JSON Data
:param api_type: Type of the resource
:param obj_id: ID of the resource
:param rel_key: Key of the relationship to fetch
"""
model = self._fetch_model(api_type)
resource = self._fetch_resource(session, api_type, obj_id,
Permissions.EDIT)
if rel_key not in resource.__jsonapi_map_to_py__.keys():
raise RelationshipNotFoundError(resource, resource, rel_key)
py_key = resource.__jsonapi_map_to_py__[rel_key]
relationship = self._get_relationship(resource, py_key,
Permissions.CREATE)
if relationship.direction == MANYTOONE:
raise ValidationError('Cannot post to to-one relationship')
if not isinstance(json_data['data'], list):
raise ValidationError('/data must be an array')
remote_side = relationship.back_populates
try:
for item in json_data['data']:
setter = get_rel_desc(resource, relationship.key,
RelationshipActions.APPEND)
if not isinstance(json_data['data'], list):
raise BadRequestError(
'{} must be an array'.format(relationship.key))
for item in json_data['data']:
if {'type', 'id'} != set(item.keys()):
raise BadRequestError(
'{} must have type and id keys'
.format(relationship.key))
to_relate = self._fetch_resource(
session, item['type'], item['id'], Permissions.EDIT)
rem = to_relate.__mapper__.relationships[remote_side]
if rem.direction == MANYTOONE:
check_permission(to_relate, remote_side,
Permissions.EDIT)
else:
check_permission(to_relate, remote_side,
Permissions.CREATE)
setter(resource, to_relate)
session.add(resource)
session.commit()
except KeyError:
raise ValidationError('Incompatible type provided')
return self.get_relationship(
session, {}, model.__jsonapi_type__, resource.id, rel_key) | [
"def",
"post_relationship",
"(",
"self",
",",
"session",
",",
"json_data",
",",
"api_type",
",",
"obj_id",
",",
"rel_key",
")",
":",
"model",
"=",
"self",
".",
"_fetch_model",
"(",
"api_type",
")",
"resource",
"=",
"self",
".",
"_fetch_resource",
"(",
"ses... | Append to a relationship.
:param session: SQLAlchemy session
:param json_data: Request JSON Data
:param api_type: Type of the resource
:param obj_id: ID of the resource
:param rel_key: Key of the relationship to fetch | [
"Append",
"to",
"a",
"relationship",
"."
] | 40f8b5970d44935b27091c2bf3224482d23311bb | https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L1107-L1170 | train | 32,286 |
MisterWil/abodepy | abodepy/helpers/timeline.py | map_event_code | def map_event_code(event_code):
"""Map a specific event_code to an event group."""
event_code = int(event_code)
# Honestly, these are just guessing based on the below event list.
# It could be wrong, I have no idea.
if 1100 <= event_code <= 1199:
return ALARM_GROUP
elif 3100 <= event_code <= 3199:
return ALARM_END_GROUP
elif 1300 <= event_code <= 1399:
return PANEL_FAULT_GROUP
elif 3300 <= event_code <= 3399:
return PANEL_RESTORE_GROUP
elif 1400 <= event_code <= 1499:
return DISARM_GROUP
elif 3400 <= event_code <= 3799:
return ARM_GROUP
elif 1600 <= event_code <= 1699:
return TEST_GROUP
elif 5000 <= event_code <= 5099:
return CAPTURE_GROUP
elif 5100 <= event_code <= 5199:
return DEVICE_GROUP
elif 5200 <= event_code <= 5299:
return AUTOMATION_GROUP
return None | python | def map_event_code(event_code):
"""Map a specific event_code to an event group."""
event_code = int(event_code)
# Honestly, these are just guessing based on the below event list.
# It could be wrong, I have no idea.
if 1100 <= event_code <= 1199:
return ALARM_GROUP
elif 3100 <= event_code <= 3199:
return ALARM_END_GROUP
elif 1300 <= event_code <= 1399:
return PANEL_FAULT_GROUP
elif 3300 <= event_code <= 3399:
return PANEL_RESTORE_GROUP
elif 1400 <= event_code <= 1499:
return DISARM_GROUP
elif 3400 <= event_code <= 3799:
return ARM_GROUP
elif 1600 <= event_code <= 1699:
return TEST_GROUP
elif 5000 <= event_code <= 5099:
return CAPTURE_GROUP
elif 5100 <= event_code <= 5199:
return DEVICE_GROUP
elif 5200 <= event_code <= 5299:
return AUTOMATION_GROUP
return None | [
"def",
"map_event_code",
"(",
"event_code",
")",
":",
"event_code",
"=",
"int",
"(",
"event_code",
")",
"# Honestly, these are just guessing based on the below event list.",
"# It could be wrong, I have no idea.",
"if",
"1100",
"<=",
"event_code",
"<=",
"1199",
":",
"return... | Map a specific event_code to an event group. | [
"Map",
"a",
"specific",
"event_code",
"to",
"an",
"event",
"group",
"."
] | 6f84bb428fd1da98855f55083cd427bebbcc57ae | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/helpers/timeline.py#L23-L59 | train | 32,287 |
MisterWil/abodepy | abodepy/automation.py | AbodeAutomation.set_active | def set_active(self, active):
"""Activate and deactivate an automation."""
url = CONST.AUTOMATION_EDIT_URL
url = url.replace(
'$AUTOMATIONID$', self.automation_id)
self._automation['is_active'] = str(int(active))
response = self._abode.send_request(
method="put", url=url, data=self._automation)
response_object = json.loads(response.text)
if isinstance(response_object, (tuple, list)):
response_object = response_object[0]
if (str(response_object['id']) != str(self._automation['id']) or
response_object['is_active'] != self._automation['is_active']):
raise AbodeException((ERROR.INVALID_AUTOMATION_EDIT_RESPONSE))
self.update(response_object)
return True | python | def set_active(self, active):
"""Activate and deactivate an automation."""
url = CONST.AUTOMATION_EDIT_URL
url = url.replace(
'$AUTOMATIONID$', self.automation_id)
self._automation['is_active'] = str(int(active))
response = self._abode.send_request(
method="put", url=url, data=self._automation)
response_object = json.loads(response.text)
if isinstance(response_object, (tuple, list)):
response_object = response_object[0]
if (str(response_object['id']) != str(self._automation['id']) or
response_object['is_active'] != self._automation['is_active']):
raise AbodeException((ERROR.INVALID_AUTOMATION_EDIT_RESPONSE))
self.update(response_object)
return True | [
"def",
"set_active",
"(",
"self",
",",
"active",
")",
":",
"url",
"=",
"CONST",
".",
"AUTOMATION_EDIT_URL",
"url",
"=",
"url",
".",
"replace",
"(",
"'$AUTOMATIONID$'",
",",
"self",
".",
"automation_id",
")",
"self",
".",
"_automation",
"[",
"'is_active'",
... | Activate and deactivate an automation. | [
"Activate",
"and",
"deactivate",
"an",
"automation",
"."
] | 6f84bb428fd1da98855f55083cd427bebbcc57ae | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/automation.py#L18-L40 | train | 32,288 |
MisterWil/abodepy | abodepy/automation.py | AbodeAutomation.trigger | def trigger(self, only_manual=True):
"""Trigger a quick-action automation."""
if not self.is_quick_action and only_manual:
raise AbodeException((ERROR.TRIGGER_NON_QUICKACTION))
url = CONST.AUTOMATION_APPLY_URL
url = url.replace(
'$AUTOMATIONID$', self.automation_id)
self._abode.send_request(
method="put", url=url, data=self._automation)
return True | python | def trigger(self, only_manual=True):
"""Trigger a quick-action automation."""
if not self.is_quick_action and only_manual:
raise AbodeException((ERROR.TRIGGER_NON_QUICKACTION))
url = CONST.AUTOMATION_APPLY_URL
url = url.replace(
'$AUTOMATIONID$', self.automation_id)
self._abode.send_request(
method="put", url=url, data=self._automation)
return True | [
"def",
"trigger",
"(",
"self",
",",
"only_manual",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"is_quick_action",
"and",
"only_manual",
":",
"raise",
"AbodeException",
"(",
"(",
"ERROR",
".",
"TRIGGER_NON_QUICKACTION",
")",
")",
"url",
"=",
"CONST",
"... | Trigger a quick-action automation. | [
"Trigger",
"a",
"quick",
"-",
"action",
"automation",
"."
] | 6f84bb428fd1da98855f55083cd427bebbcc57ae | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/automation.py#L42-L54 | train | 32,289 |
MisterWil/abodepy | abodepy/automation.py | AbodeAutomation.refresh | def refresh(self):
"""Refresh the automation."""
url = CONST.AUTOMATION_ID_URL
url = url.replace(
'$AUTOMATIONID$', self.automation_id)
response = self._abode.send_request(method="get", url=url)
response_object = json.loads(response.text)
if isinstance(response_object, (tuple, list)):
response_object = response_object[0]
if str(response_object['id']) != self.automation_id:
raise AbodeException((ERROR.INVALID_AUTOMATION_REFRESH_RESPONSE))
self.update(response_object) | python | def refresh(self):
"""Refresh the automation."""
url = CONST.AUTOMATION_ID_URL
url = url.replace(
'$AUTOMATIONID$', self.automation_id)
response = self._abode.send_request(method="get", url=url)
response_object = json.loads(response.text)
if isinstance(response_object, (tuple, list)):
response_object = response_object[0]
if str(response_object['id']) != self.automation_id:
raise AbodeException((ERROR.INVALID_AUTOMATION_REFRESH_RESPONSE))
self.update(response_object) | [
"def",
"refresh",
"(",
"self",
")",
":",
"url",
"=",
"CONST",
".",
"AUTOMATION_ID_URL",
"url",
"=",
"url",
".",
"replace",
"(",
"'$AUTOMATIONID$'",
",",
"self",
".",
"automation_id",
")",
"response",
"=",
"self",
".",
"_abode",
".",
"send_request",
"(",
... | Refresh the automation. | [
"Refresh",
"the",
"automation",
"."
] | 6f84bb428fd1da98855f55083cd427bebbcc57ae | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/automation.py#L56-L71 | train | 32,290 |
MisterWil/abodepy | abodepy/automation.py | AbodeAutomation.update | def update(self, automation):
"""Update the internal automation json."""
self._automation.update(
{k: automation[k] for k in automation if self._automation.get(k)}) | python | def update(self, automation):
"""Update the internal automation json."""
self._automation.update(
{k: automation[k] for k in automation if self._automation.get(k)}) | [
"def",
"update",
"(",
"self",
",",
"automation",
")",
":",
"self",
".",
"_automation",
".",
"update",
"(",
"{",
"k",
":",
"automation",
"[",
"k",
"]",
"for",
"k",
"in",
"automation",
"if",
"self",
".",
"_automation",
".",
"get",
"(",
"k",
")",
"}",... | Update the internal automation json. | [
"Update",
"the",
"internal",
"automation",
"json",
"."
] | 6f84bb428fd1da98855f55083cd427bebbcc57ae | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/automation.py#L73-L76 | train | 32,291 |
MisterWil/abodepy | abodepy/automation.py | AbodeAutomation.desc | def desc(self):
"""Get a short description of the automation."""
# Auto Away (1) - Location - Enabled
active = 'inactive'
if self.is_active:
active = 'active'
return '{0} (ID: {1}) - {2} - {3}'.format(
self.name, self.automation_id, self.type, active) | python | def desc(self):
"""Get a short description of the automation."""
# Auto Away (1) - Location - Enabled
active = 'inactive'
if self.is_active:
active = 'active'
return '{0} (ID: {1}) - {2} - {3}'.format(
self.name, self.automation_id, self.type, active) | [
"def",
"desc",
"(",
"self",
")",
":",
"# Auto Away (1) - Location - Enabled",
"active",
"=",
"'inactive'",
"if",
"self",
".",
"is_active",
":",
"active",
"=",
"'active'",
"return",
"'{0} (ID: {1}) - {2} - {3}'",
".",
"format",
"(",
"self",
".",
"name",
",",
"sel... | Get a short description of the automation. | [
"Get",
"a",
"short",
"description",
"of",
"the",
"automation",
"."
] | 6f84bb428fd1da98855f55083cd427bebbcc57ae | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/automation.py#L117-L125 | train | 32,292 |
MisterWil/abodepy | abodepy/devices/sensor.py | AbodeSensor._get_numeric_status | def _get_numeric_status(self, key):
"""Extract the numeric value from the statuses object."""
value = self._get_status(key)
if value and any(i.isdigit() for i in value):
return float(re.sub("[^0-9.]", "", value))
return None | python | def _get_numeric_status(self, key):
"""Extract the numeric value from the statuses object."""
value = self._get_status(key)
if value and any(i.isdigit() for i in value):
return float(re.sub("[^0-9.]", "", value))
return None | [
"def",
"_get_numeric_status",
"(",
"self",
",",
"key",
")",
":",
"value",
"=",
"self",
".",
"_get_status",
"(",
"key",
")",
"if",
"value",
"and",
"any",
"(",
"i",
".",
"isdigit",
"(",
")",
"for",
"i",
"in",
"value",
")",
":",
"return",
"float",
"("... | Extract the numeric value from the statuses object. | [
"Extract",
"the",
"numeric",
"value",
"from",
"the",
"statuses",
"object",
"."
] | 6f84bb428fd1da98855f55083cd427bebbcc57ae | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/sensor.py#L14-L20 | train | 32,293 |
MisterWil/abodepy | abodepy/devices/sensor.py | AbodeSensor.temp_unit | def temp_unit(self):
"""Get unit of temp."""
if CONST.UNIT_FAHRENHEIT in self._get_status(CONST.TEMP_STATUS_KEY):
return CONST.UNIT_FAHRENHEIT
elif CONST.UNIT_CELSIUS in self._get_status(CONST.TEMP_STATUS_KEY):
return CONST.UNIT_CELSIUS
return None | python | def temp_unit(self):
"""Get unit of temp."""
if CONST.UNIT_FAHRENHEIT in self._get_status(CONST.TEMP_STATUS_KEY):
return CONST.UNIT_FAHRENHEIT
elif CONST.UNIT_CELSIUS in self._get_status(CONST.TEMP_STATUS_KEY):
return CONST.UNIT_CELSIUS
return None | [
"def",
"temp_unit",
"(",
"self",
")",
":",
"if",
"CONST",
".",
"UNIT_FAHRENHEIT",
"in",
"self",
".",
"_get_status",
"(",
"CONST",
".",
"TEMP_STATUS_KEY",
")",
":",
"return",
"CONST",
".",
"UNIT_FAHRENHEIT",
"elif",
"CONST",
".",
"UNIT_CELSIUS",
"in",
"self",... | Get unit of temp. | [
"Get",
"unit",
"of",
"temp",
"."
] | 6f84bb428fd1da98855f55083cd427bebbcc57ae | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/sensor.py#L28-L34 | train | 32,294 |
MisterWil/abodepy | abodepy/devices/sensor.py | AbodeSensor.humidity_unit | def humidity_unit(self):
"""Get unit of humidity."""
if CONST.UNIT_PERCENT in self._get_status(CONST.HUMI_STATUS_KEY):
return CONST.UNIT_PERCENT
return None | python | def humidity_unit(self):
"""Get unit of humidity."""
if CONST.UNIT_PERCENT in self._get_status(CONST.HUMI_STATUS_KEY):
return CONST.UNIT_PERCENT
return None | [
"def",
"humidity_unit",
"(",
"self",
")",
":",
"if",
"CONST",
".",
"UNIT_PERCENT",
"in",
"self",
".",
"_get_status",
"(",
"CONST",
".",
"HUMI_STATUS_KEY",
")",
":",
"return",
"CONST",
".",
"UNIT_PERCENT",
"return",
"None"
] | Get unit of humidity. | [
"Get",
"unit",
"of",
"humidity",
"."
] | 6f84bb428fd1da98855f55083cd427bebbcc57ae | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/sensor.py#L42-L46 | train | 32,295 |
MisterWil/abodepy | abodepy/devices/sensor.py | AbodeSensor.lux_unit | def lux_unit(self):
"""Get unit of lux."""
if CONST.UNIT_LUX in self._get_status(CONST.LUX_STATUS_KEY):
return CONST.LUX
return None | python | def lux_unit(self):
"""Get unit of lux."""
if CONST.UNIT_LUX in self._get_status(CONST.LUX_STATUS_KEY):
return CONST.LUX
return None | [
"def",
"lux_unit",
"(",
"self",
")",
":",
"if",
"CONST",
".",
"UNIT_LUX",
"in",
"self",
".",
"_get_status",
"(",
"CONST",
".",
"LUX_STATUS_KEY",
")",
":",
"return",
"CONST",
".",
"LUX",
"return",
"None"
] | Get unit of lux. | [
"Get",
"unit",
"of",
"lux",
"."
] | 6f84bb428fd1da98855f55083cd427bebbcc57ae | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/sensor.py#L54-L58 | train | 32,296 |
MisterWil/abodepy | abodepy/devices/valve.py | AbodeValve.switch_on | def switch_on(self):
"""Open the valve."""
success = self.set_status(CONST.STATUS_ON_INT)
if success:
self._json_state['status'] = CONST.STATUS_OPEN
return success | python | def switch_on(self):
"""Open the valve."""
success = self.set_status(CONST.STATUS_ON_INT)
if success:
self._json_state['status'] = CONST.STATUS_OPEN
return success | [
"def",
"switch_on",
"(",
"self",
")",
":",
"success",
"=",
"self",
".",
"set_status",
"(",
"CONST",
".",
"STATUS_ON_INT",
")",
"if",
"success",
":",
"self",
".",
"_json_state",
"[",
"'status'",
"]",
"=",
"CONST",
".",
"STATUS_OPEN",
"return",
"success"
] | Open the valve. | [
"Open",
"the",
"valve",
"."
] | 6f84bb428fd1da98855f55083cd427bebbcc57ae | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/valve.py#L10-L17 | train | 32,297 |
MisterWil/abodepy | abodepy/devices/valve.py | AbodeValve.switch_off | def switch_off(self):
"""Close the valve."""
success = self.set_status(CONST.STATUS_OFF_INT)
if success:
self._json_state['status'] = CONST.STATUS_CLOSED
return success | python | def switch_off(self):
"""Close the valve."""
success = self.set_status(CONST.STATUS_OFF_INT)
if success:
self._json_state['status'] = CONST.STATUS_CLOSED
return success | [
"def",
"switch_off",
"(",
"self",
")",
":",
"success",
"=",
"self",
".",
"set_status",
"(",
"CONST",
".",
"STATUS_OFF_INT",
")",
"if",
"success",
":",
"self",
".",
"_json_state",
"[",
"'status'",
"]",
"=",
"CONST",
".",
"STATUS_CLOSED",
"return",
"success"... | Close the valve. | [
"Close",
"the",
"valve",
"."
] | 6f84bb428fd1da98855f55083cd427bebbcc57ae | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/valve.py#L19-L26 | train | 32,298 |
MisterWil/abodepy | abodepy/socketio.py | SocketIO.set_origin | def set_origin(self, origin=None):
"""Set the Origin header."""
if origin:
self._origin = origin.encode()
else:
self._origin = None | python | def set_origin(self, origin=None):
"""Set the Origin header."""
if origin:
self._origin = origin.encode()
else:
self._origin = None | [
"def",
"set_origin",
"(",
"self",
",",
"origin",
"=",
"None",
")",
":",
"if",
"origin",
":",
"self",
".",
"_origin",
"=",
"origin",
".",
"encode",
"(",
")",
"else",
":",
"self",
".",
"_origin",
"=",
"None"
] | Set the Origin header. | [
"Set",
"the",
"Origin",
"header",
"."
] | 6f84bb428fd1da98855f55083cd427bebbcc57ae | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/socketio.py#L84-L89 | train | 32,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.