repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
THLO/map
map/mapper.py
MapExecutor.replaceInCommand
def replaceInCommand(self,command, pattern, replacement, replacementAtBeginning): """ This is in internal method that replaces a certain 'pattern' in the provided command with a 'replacement'. A different replacement can be specified when the pattern occurs right at the beginning of the command. """ # Turn the command into a list: commandAsList = list(command) # Get the indices of the pattern in the list: indices = [index.start() for index in re.finditer(pattern, command)] # Replace at the indices, unless the preceding character is the # escape character: for index in indices: if index == 0: commandAsList[index] = replacementAtBeginning elif commandAsList[index-1] != MapConstants.escape_char: commandAsList[index] = replacement # Put the pieces of the new command together: newCommand = ''.join(commandAsList) # Remove superfluous slashes and return: return newCommand.replace("//","/")
python
def replaceInCommand(self,command, pattern, replacement, replacementAtBeginning): """ This is in internal method that replaces a certain 'pattern' in the provided command with a 'replacement'. A different replacement can be specified when the pattern occurs right at the beginning of the command. """ # Turn the command into a list: commandAsList = list(command) # Get the indices of the pattern in the list: indices = [index.start() for index in re.finditer(pattern, command)] # Replace at the indices, unless the preceding character is the # escape character: for index in indices: if index == 0: commandAsList[index] = replacementAtBeginning elif commandAsList[index-1] != MapConstants.escape_char: commandAsList[index] = replacement # Put the pieces of the new command together: newCommand = ''.join(commandAsList) # Remove superfluous slashes and return: return newCommand.replace("//","/")
[ "def", "replaceInCommand", "(", "self", ",", "command", ",", "pattern", ",", "replacement", ",", "replacementAtBeginning", ")", ":", "# Turn the command into a list:", "commandAsList", "=", "list", "(", "command", ")", "# Get the indices of the pattern in the list:", "ind...
This is in internal method that replaces a certain 'pattern' in the provided command with a 'replacement'. A different replacement can be specified when the pattern occurs right at the beginning of the command.
[ "This", "is", "in", "internal", "method", "that", "replaces", "a", "certain", "pattern", "in", "the", "provided", "command", "with", "a", "replacement", ".", "A", "different", "replacement", "can", "be", "specified", "when", "the", "pattern", "occurs", "right"...
train
https://github.com/THLO/map/blob/6c1571187662bbf2e66faaf96b11a3e151ed4c87/map/mapper.py#L152-L173
THLO/map
map/mapper.py
MapExecutor.escapePlaceholders
def escapePlaceholders(self,inputString): """ This is an internal method that escapes all the placeholders defined in MapConstants.py. """ escaped = inputString.replace(MapConstants.placeholder,'\\'+MapConstants.placeholder) escaped = escaped.replace(MapConstants.placeholderFileName,'\\'+MapConstants.placeholderFileName) escaped = escaped.replace(MapConstants.placeholderPath,'\\'+MapConstants.placeholderPath) escaped = escaped.replace(MapConstants.placeholderExtension,'\\'+MapConstants.placeholderExtension) escaped = escaped.replace(MapConstants.placeholderCounter,'\\'+MapConstants.placeholderCounter) return escaped
python
def escapePlaceholders(self,inputString): """ This is an internal method that escapes all the placeholders defined in MapConstants.py. """ escaped = inputString.replace(MapConstants.placeholder,'\\'+MapConstants.placeholder) escaped = escaped.replace(MapConstants.placeholderFileName,'\\'+MapConstants.placeholderFileName) escaped = escaped.replace(MapConstants.placeholderPath,'\\'+MapConstants.placeholderPath) escaped = escaped.replace(MapConstants.placeholderExtension,'\\'+MapConstants.placeholderExtension) escaped = escaped.replace(MapConstants.placeholderCounter,'\\'+MapConstants.placeholderCounter) return escaped
[ "def", "escapePlaceholders", "(", "self", ",", "inputString", ")", ":", "escaped", "=", "inputString", ".", "replace", "(", "MapConstants", ".", "placeholder", ",", "'\\\\'", "+", "MapConstants", ".", "placeholder", ")", "escaped", "=", "escaped", ".", "replac...
This is an internal method that escapes all the placeholders defined in MapConstants.py.
[ "This", "is", "an", "internal", "method", "that", "escapes", "all", "the", "placeholders", "defined", "in", "MapConstants", ".", "py", "." ]
train
https://github.com/THLO/map/blob/6c1571187662bbf2e66faaf96b11a3e151ed4c87/map/mapper.py#L175-L185
THLO/map
map/mapper.py
MapExecutor.buildPart
def buildPart(self,commandPart,fileNameWithPath,count,args): """ This is in internal method that builds a part of the command, see buildCommand(). """ # Get the path to the file: filePath = os.path.split(fileNameWithPath)[0] # Append '/' if there is a path, i.e., the file is not in the local directory: if filePath != '': filePath = filePath +'/' # Get the file name without the path: fileNameWithoutPath = os.path.basename(fileNameWithPath) # Get the file name without the path and without the extension: plainFileName = os.path.splitext(fileNameWithoutPath)[0] # Get the extension: fileExtension = os.path.splitext(fileNameWithoutPath)[1] # The original command part is retained: originalCommandPart = commandPart # Replace the file placeholder character with the file: commandPart = self.replaceInCommand(commandPart,MapConstants.placeholder,fileNameWithoutPath,fileNameWithPath) # Replace the path placeholder with the path: commandPart = self.replaceInCommand(commandPart,MapConstants.placeholderPath,filePath,filePath) # Replace the plain file placeholder with the plain file: commandPart = self.replaceInCommand(commandPart,MapConstants.placeholderFileName,plainFileName,plainFileName) # Replace the extension placeholder with the extension: commandPart = self.replaceInCommand(commandPart,MapConstants.placeholderExtension,fileExtension,fileExtension) # Replace the placeholder for the counter with the actual count: if args.number_length == 0: replacementString = str(count) else: replacementString = ('{0:0'+str(args.number_length)+'d}').format(count) commandPart = self.replaceInCommand(commandPart,MapConstants.placeholderCounter,replacementString,replacementString) # If the command part changed, it is put in quotes to avoid problems with special characters: if originalCommandPart != commandPart: commandPart = '\"' + commandPart + '\"' return commandPart
python
def buildPart(self,commandPart,fileNameWithPath,count,args): """ This is in internal method that builds a part of the command, see buildCommand(). """ # Get the path to the file: filePath = os.path.split(fileNameWithPath)[0] # Append '/' if there is a path, i.e., the file is not in the local directory: if filePath != '': filePath = filePath +'/' # Get the file name without the path: fileNameWithoutPath = os.path.basename(fileNameWithPath) # Get the file name without the path and without the extension: plainFileName = os.path.splitext(fileNameWithoutPath)[0] # Get the extension: fileExtension = os.path.splitext(fileNameWithoutPath)[1] # The original command part is retained: originalCommandPart = commandPart # Replace the file placeholder character with the file: commandPart = self.replaceInCommand(commandPart,MapConstants.placeholder,fileNameWithoutPath,fileNameWithPath) # Replace the path placeholder with the path: commandPart = self.replaceInCommand(commandPart,MapConstants.placeholderPath,filePath,filePath) # Replace the plain file placeholder with the plain file: commandPart = self.replaceInCommand(commandPart,MapConstants.placeholderFileName,plainFileName,plainFileName) # Replace the extension placeholder with the extension: commandPart = self.replaceInCommand(commandPart,MapConstants.placeholderExtension,fileExtension,fileExtension) # Replace the placeholder for the counter with the actual count: if args.number_length == 0: replacementString = str(count) else: replacementString = ('{0:0'+str(args.number_length)+'d}').format(count) commandPart = self.replaceInCommand(commandPart,MapConstants.placeholderCounter,replacementString,replacementString) # If the command part changed, it is put in quotes to avoid problems with special characters: if originalCommandPart != commandPart: commandPart = '\"' + commandPart + '\"' return commandPart
[ "def", "buildPart", "(", "self", ",", "commandPart", ",", "fileNameWithPath", ",", "count", ",", "args", ")", ":", "# Get the path to the file:", "filePath", "=", "os", ".", "path", ".", "split", "(", "fileNameWithPath", ")", "[", "0", "]", "# Append '/' if th...
This is in internal method that builds a part of the command, see buildCommand().
[ "This", "is", "in", "internal", "method", "that", "builds", "a", "part", "of", "the", "command", "see", "buildCommand", "()", "." ]
train
https://github.com/THLO/map/blob/6c1571187662bbf2e66faaf96b11a3e151ed4c87/map/mapper.py#L193-L230
THLO/map
map/mapper.py
MapExecutor.buildCommand
def buildCommand(self,fileName,count,args): """ This is an internal method, building the command for a particular file. """ # Escape all placeholders in the file path: fileNameWithPath = self.escapePlaceholders(fileName) # The command is split into 'parts', which are separated by blank spaces: commandParts = args.command.split(' ') processedParts = [] # Each part of the command is processed separately: for part in commandParts: processedParts.append(self.buildPart(part,fileNameWithPath,count,args)) # The parts are put together at the end and the new command is returned: return self.unescapePlaceholders(' '.join(processedParts))
python
def buildCommand(self,fileName,count,args): """ This is an internal method, building the command for a particular file. """ # Escape all placeholders in the file path: fileNameWithPath = self.escapePlaceholders(fileName) # The command is split into 'parts', which are separated by blank spaces: commandParts = args.command.split(' ') processedParts = [] # Each part of the command is processed separately: for part in commandParts: processedParts.append(self.buildPart(part,fileNameWithPath,count,args)) # The parts are put together at the end and the new command is returned: return self.unescapePlaceholders(' '.join(processedParts))
[ "def", "buildCommand", "(", "self", ",", "fileName", ",", "count", ",", "args", ")", ":", "# Escape all placeholders in the file path:", "fileNameWithPath", "=", "self", ".", "escapePlaceholders", "(", "fileName", ")", "# The command is split into 'parts', which are separat...
This is an internal method, building the command for a particular file.
[ "This", "is", "an", "internal", "method", "building", "the", "command", "for", "a", "particular", "file", "." ]
train
https://github.com/THLO/map/blob/6c1571187662bbf2e66faaf96b11a3e151ed4c87/map/mapper.py#L232-L246
THLO/map
map/mapper.py
MapExecutor.buildCommands
def buildCommands(self,files,args): """ Given a list of (input) files, buildCommands builds all the commands. This is one of the two key methods of MapExecutor. """ commands = [] count = args.count_from # For each file, a command is created: for fileName in files: commands.append(self.buildCommand(fileName,count,args)) count = count+1 return commands
python
def buildCommands(self,files,args): """ Given a list of (input) files, buildCommands builds all the commands. This is one of the two key methods of MapExecutor. """ commands = [] count = args.count_from # For each file, a command is created: for fileName in files: commands.append(self.buildCommand(fileName,count,args)) count = count+1 return commands
[ "def", "buildCommands", "(", "self", ",", "files", ",", "args", ")", ":", "commands", "=", "[", "]", "count", "=", "args", ".", "count_from", "# For each file, a command is created:", "for", "fileName", "in", "files", ":", "commands", ".", "append", "(", "se...
Given a list of (input) files, buildCommands builds all the commands. This is one of the two key methods of MapExecutor.
[ "Given", "a", "list", "of", "(", "input", ")", "files", "buildCommands", "builds", "all", "the", "commands", ".", "This", "is", "one", "of", "the", "two", "key", "methods", "of", "MapExecutor", "." ]
train
https://github.com/THLO/map/blob/6c1571187662bbf2e66faaf96b11a3e151ed4c87/map/mapper.py#L248-L259
THLO/map
map/mapper.py
MapExecutor.runCommands
def runCommands(self,commands,args): """ Given a list of commands, runCommands executes them. This is one of the two key methods of MapExecutor. """ errorCounter = 0 if args.list: print '\n'.join(commands) else: # Each command is executed sequentially: for command in commands: process = subprocess.Popen(command, stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True) stream = process.communicate() output = stream[0] erroroutput = stream[1] returncode = process.returncode if args.verbose: print 'Executing command: '+command if returncode != 0: errorCounter = errorCounter + 1 if args.verbose or not args.ignore_errors: print 'An error occurred:\n' print erroroutput if not args.ignore_errors: print('Terminating map process.') break if returncode == 0 and len(output) > 0: sys.stdout.write(output) if args.verbose: print 'Process completed successfully.' if errorCounter > 0: if errorCounter > 1: print str(errorCounter) + ' errors occurred during the process.' else: print str(errorCounter) + ' error occurred during the process.'
python
def runCommands(self,commands,args): """ Given a list of commands, runCommands executes them. This is one of the two key methods of MapExecutor. """ errorCounter = 0 if args.list: print '\n'.join(commands) else: # Each command is executed sequentially: for command in commands: process = subprocess.Popen(command, stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True) stream = process.communicate() output = stream[0] erroroutput = stream[1] returncode = process.returncode if args.verbose: print 'Executing command: '+command if returncode != 0: errorCounter = errorCounter + 1 if args.verbose or not args.ignore_errors: print 'An error occurred:\n' print erroroutput if not args.ignore_errors: print('Terminating map process.') break if returncode == 0 and len(output) > 0: sys.stdout.write(output) if args.verbose: print 'Process completed successfully.' if errorCounter > 0: if errorCounter > 1: print str(errorCounter) + ' errors occurred during the process.' else: print str(errorCounter) + ' error occurred during the process.'
[ "def", "runCommands", "(", "self", ",", "commands", ",", "args", ")", ":", "errorCounter", "=", "0", "if", "args", ".", "list", ":", "print", "'\\n'", ".", "join", "(", "commands", ")", "else", ":", "# Each command is executed sequentially:", "for", "command...
Given a list of commands, runCommands executes them. This is one of the two key methods of MapExecutor.
[ "Given", "a", "list", "of", "commands", "runCommands", "executes", "them", ".", "This", "is", "one", "of", "the", "two", "key", "methods", "of", "MapExecutor", "." ]
train
https://github.com/THLO/map/blob/6c1571187662bbf2e66faaf96b11a3e151ed4c87/map/mapper.py#L261-L295
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/fileio.py
_generate_filenames
def _generate_filenames(sources): """Generate filenames. :param tuple sources: Sequence of strings representing path to file(s). :return: Path to file(s). :rtype: :py:class:`str` """ for source in sources: if os.path.isdir(source): for path, dirlist, filelist in os.walk(source): for fname in filelist: if nmrstarlib.VERBOSE: print("Processing file: {}".format(os.path.abspath(fname))) if GenericFilePath.is_compressed(fname): if nmrstarlib.VERBOSE: print("Skipping compressed file: {}".format(os.path.abspath(fname))) continue else: yield os.path.join(path, fname) elif os.path.isfile(source): yield source elif GenericFilePath.is_url(source): yield source elif source.isdigit(): try: urlopen(nmrstarlib.BMRB_REST + source) yield nmrstarlib.BMRB_REST + source except HTTPError: urlopen(nmrstarlib.PDB_REST + source + ".cif") yield nmrstarlib.PDB_REST + source + ".cif" elif re.match("[\w\d]{4}", source): yield nmrstarlib.PDB_REST + source + ".cif" else: raise TypeError("Unknown file source.")
python
def _generate_filenames(sources): """Generate filenames. :param tuple sources: Sequence of strings representing path to file(s). :return: Path to file(s). :rtype: :py:class:`str` """ for source in sources: if os.path.isdir(source): for path, dirlist, filelist in os.walk(source): for fname in filelist: if nmrstarlib.VERBOSE: print("Processing file: {}".format(os.path.abspath(fname))) if GenericFilePath.is_compressed(fname): if nmrstarlib.VERBOSE: print("Skipping compressed file: {}".format(os.path.abspath(fname))) continue else: yield os.path.join(path, fname) elif os.path.isfile(source): yield source elif GenericFilePath.is_url(source): yield source elif source.isdigit(): try: urlopen(nmrstarlib.BMRB_REST + source) yield nmrstarlib.BMRB_REST + source except HTTPError: urlopen(nmrstarlib.PDB_REST + source + ".cif") yield nmrstarlib.PDB_REST + source + ".cif" elif re.match("[\w\d]{4}", source): yield nmrstarlib.PDB_REST + source + ".cif" else: raise TypeError("Unknown file source.")
[ "def", "_generate_filenames", "(", "sources", ")", ":", "for", "source", "in", "sources", ":", "if", "os", ".", "path", ".", "isdir", "(", "source", ")", ":", "for", "path", ",", "dirlist", ",", "filelist", "in", "os", ".", "walk", "(", "source", ")"...
Generate filenames. :param tuple sources: Sequence of strings representing path to file(s). :return: Path to file(s). :rtype: :py:class:`str`
[ "Generate", "filenames", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/fileio.py#L39-L79
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/fileio.py
_generate_handles
def _generate_handles(filenames): """Open a sequence of filenames one at time producing file objects. The file is closed immediately when proceeding to the next iteration. :param generator filenames: Generator object that yields the path to each file, one at a time. :return: Filehandle to be processed into a :class:`~nmrstarlib.nmrstarlib.StarFile` instance. """ for fname in filenames: if nmrstarlib.VERBOSE: print("Processing file: {}".format(os.path.abspath(fname))) path = GenericFilePath(fname) for filehandle, source in path.open(): yield filehandle, source filehandle.close()
python
def _generate_handles(filenames): """Open a sequence of filenames one at time producing file objects. The file is closed immediately when proceeding to the next iteration. :param generator filenames: Generator object that yields the path to each file, one at a time. :return: Filehandle to be processed into a :class:`~nmrstarlib.nmrstarlib.StarFile` instance. """ for fname in filenames: if nmrstarlib.VERBOSE: print("Processing file: {}".format(os.path.abspath(fname))) path = GenericFilePath(fname) for filehandle, source in path.open(): yield filehandle, source filehandle.close()
[ "def", "_generate_handles", "(", "filenames", ")", ":", "for", "fname", "in", "filenames", ":", "if", "nmrstarlib", ".", "VERBOSE", ":", "print", "(", "\"Processing file: {}\"", ".", "format", "(", "os", ".", "path", ".", "abspath", "(", "fname", ")", ")",...
Open a sequence of filenames one at time producing file objects. The file is closed immediately when proceeding to the next iteration. :param generator filenames: Generator object that yields the path to each file, one at a time. :return: Filehandle to be processed into a :class:`~nmrstarlib.nmrstarlib.StarFile` instance.
[ "Open", "a", "sequence", "of", "filenames", "one", "at", "time", "producing", "file", "objects", ".", "The", "file", "is", "closed", "immediately", "when", "proceeding", "to", "the", "next", "iteration", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/fileio.py#L82-L95
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/fileio.py
read_files
def read_files(*sources): """Construct a generator that yields :class:`~nmrstarlib.nmrstarlib.StarFile` instances. :param sources: One or more strings representing path to file(s). :return: :class:`~nmrstarlib.nmrstarlib.StarFile` instance(s). :rtype: :class:`~nmrstarlib.nmrstarlib.StarFile` """ filenames = _generate_filenames(sources) filehandles = _generate_handles(filenames) for fh, source in filehandles: starfile = nmrstarlib.StarFile.read(fh, source) yield starfile
python
def read_files(*sources): """Construct a generator that yields :class:`~nmrstarlib.nmrstarlib.StarFile` instances. :param sources: One or more strings representing path to file(s). :return: :class:`~nmrstarlib.nmrstarlib.StarFile` instance(s). :rtype: :class:`~nmrstarlib.nmrstarlib.StarFile` """ filenames = _generate_filenames(sources) filehandles = _generate_handles(filenames) for fh, source in filehandles: starfile = nmrstarlib.StarFile.read(fh, source) yield starfile
[ "def", "read_files", "(", "*", "sources", ")", ":", "filenames", "=", "_generate_filenames", "(", "sources", ")", "filehandles", "=", "_generate_handles", "(", "filenames", ")", "for", "fh", ",", "source", "in", "filehandles", ":", "starfile", "=", "nmrstarlib...
Construct a generator that yields :class:`~nmrstarlib.nmrstarlib.StarFile` instances. :param sources: One or more strings representing path to file(s). :return: :class:`~nmrstarlib.nmrstarlib.StarFile` instance(s). :rtype: :class:`~nmrstarlib.nmrstarlib.StarFile`
[ "Construct", "a", "generator", "that", "yields", ":", "class", ":", "~nmrstarlib", ".", "nmrstarlib", ".", "StarFile", "instances", "." ]
train
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/fileio.py#L98-L109
datasift/datasift-python
datasift/pylon.py
Pylon.validate
def validate(self, csdl, service='facebook'): """ Validate the given CSDL :param csdl: The CSDL to be validated for analysis :type csdl: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('validate', data=dict(csdl=csdl))
python
def validate(self, csdl, service='facebook'): """ Validate the given CSDL :param csdl: The CSDL to be validated for analysis :type csdl: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('validate', data=dict(csdl=csdl))
[ "def", "validate", "(", "self", ",", "csdl", ",", "service", "=", "'facebook'", ")", ":", "return", "self", ".", "request", ".", "post", "(", "'validate'", ",", "data", "=", "dict", "(", "csdl", "=", "csdl", ")", ")" ]
Validate the given CSDL :param csdl: The CSDL to be validated for analysis :type csdl: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Validate", "the", "given", "CSDL" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/pylon.py#L9-L21
datasift/datasift-python
datasift/pylon.py
Pylon.start
def start(self, hash, name=None, service='facebook'): """ Start a recording for the provided hash :param hash: The hash to start recording with :type hash: str :param name: The name of the recording :type name: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'hash': hash} if name: params['name'] = name return self.request.post(service + '/start', params)
python
def start(self, hash, name=None, service='facebook'): """ Start a recording for the provided hash :param hash: The hash to start recording with :type hash: str :param name: The name of the recording :type name: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'hash': hash} if name: params['name'] = name return self.request.post(service + '/start', params)
[ "def", "start", "(", "self", ",", "hash", ",", "name", "=", "None", ",", "service", "=", "'facebook'", ")", ":", "params", "=", "{", "'hash'", ":", "hash", "}", "if", "name", ":", "params", "[", "'name'", "]", "=", "name", "return", "self", ".", ...
Start a recording for the provided hash :param hash: The hash to start recording with :type hash: str :param name: The name of the recording :type name: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Start", "a", "recording", "for", "the", "provided", "hash" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/pylon.py#L37-L57
datasift/datasift-python
datasift/pylon.py
Pylon.stop
def stop(self, id, service='facebook'): """ Stop the recording for the provided id :param id: The hash to start recording with :type id: str :param service: The service for this API call (facebook, etc) :type service: str :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post(service + '/stop', data=dict(id=id))
python
def stop(self, id, service='facebook'): """ Stop the recording for the provided id :param id: The hash to start recording with :type id: str :param service: The service for this API call (facebook, etc) :type service: str :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post(service + '/stop', data=dict(id=id))
[ "def", "stop", "(", "self", ",", "id", ",", "service", "=", "'facebook'", ")", ":", "return", "self", ".", "request", ".", "post", "(", "service", "+", "'/stop'", ",", "data", "=", "dict", "(", "id", "=", "id", ")", ")" ]
Stop the recording for the provided id :param id: The hash to start recording with :type id: str :param service: The service for this API call (facebook, etc) :type service: str :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Stop", "the", "recording", "for", "the", "provided", "id" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/pylon.py#L59-L70
datasift/datasift-python
datasift/pylon.py
Pylon.analyze
def analyze(self, id, parameters, filter=None, start=None, end=None, service='facebook'): """ Analyze the recorded data for a given hash :param id: The id of the recording :type id: str :param parameters: To set settings such as threshold and target :type parameters: dict :param filter: An optional secondary filter :type filter: str :param start: Determines time period of the analyze :type start: int :param end: Determines time period of the analyze :type end: int :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': id, 'parameters': parameters} if filter: params['filter'] = filter if start: params['start'] = start if end: params['end'] = end return self.request.post(service + '/analyze', params)
python
def analyze(self, id, parameters, filter=None, start=None, end=None, service='facebook'): """ Analyze the recorded data for a given hash :param id: The id of the recording :type id: str :param parameters: To set settings such as threshold and target :type parameters: dict :param filter: An optional secondary filter :type filter: str :param start: Determines time period of the analyze :type start: int :param end: Determines time period of the analyze :type end: int :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': id, 'parameters': parameters} if filter: params['filter'] = filter if start: params['start'] = start if end: params['end'] = end return self.request.post(service + '/analyze', params)
[ "def", "analyze", "(", "self", ",", "id", ",", "parameters", ",", "filter", "=", "None", ",", "start", "=", "None", ",", "end", "=", "None", ",", "service", "=", "'facebook'", ")", ":", "params", "=", "{", "'id'", ":", "id", ",", "'parameters'", ":...
Analyze the recorded data for a given hash :param id: The id of the recording :type id: str :param parameters: To set settings such as threshold and target :type parameters: dict :param filter: An optional secondary filter :type filter: str :param start: Determines time period of the analyze :type start: int :param end: Determines time period of the analyze :type end: int :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Analyze", "the", "recorded", "data", "for", "a", "given", "hash" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/pylon.py#L72-L104
datasift/datasift-python
datasift/pylon.py
Pylon.get
def get(self, id, service='facebook'): """ Get the existing analysis for a given hash :param service: The service for this API call (facebook, etc) :type service: str :param id: The optional hash to get recordings with :type id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': id} return self.request.get(service + '/get', params)
python
def get(self, id, service='facebook'): """ Get the existing analysis for a given hash :param service: The service for this API call (facebook, etc) :type service: str :param id: The optional hash to get recordings with :type id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': id} return self.request.get(service + '/get', params)
[ "def", "get", "(", "self", ",", "id", ",", "service", "=", "'facebook'", ")", ":", "params", "=", "{", "'id'", ":", "id", "}", "return", "self", ".", "request", ".", "get", "(", "service", "+", "'/get'", ",", "params", ")" ]
Get the existing analysis for a given hash :param service: The service for this API call (facebook, etc) :type service: str :param id: The optional hash to get recordings with :type id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "the", "existing", "analysis", "for", "a", "given", "hash" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/pylon.py#L106-L121
datasift/datasift-python
datasift/pylon.py
Pylon.list
def list(self, page=None, per_page=None, order_by='created_at', order_dir='DESC', service='facebook'): """ List pylon recordings :param page: page number for pagination :type page: int :param per_page: number of items per page, default 20 :type per_page: int :param order_by: field to order by, default request_time :type order_by: str :param order_dir: direction to order by, asc or desc, default desc :type order_dir: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if page: params['page'] = page if per_page: params['per_page'] = per_page if order_by: params['order_by'] = order_by if order_dir: params['order_dir'] = order_dir return self.request.get(service + '/get', params)
python
def list(self, page=None, per_page=None, order_by='created_at', order_dir='DESC', service='facebook'): """ List pylon recordings :param page: page number for pagination :type page: int :param per_page: number of items per page, default 20 :type per_page: int :param order_by: field to order by, default request_time :type order_by: str :param order_dir: direction to order by, asc or desc, default desc :type order_dir: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if page: params['page'] = page if per_page: params['per_page'] = per_page if order_by: params['order_by'] = order_by if order_dir: params['order_dir'] = order_dir return self.request.get(service + '/get', params)
[ "def", "list", "(", "self", ",", "page", "=", "None", ",", "per_page", "=", "None", ",", "order_by", "=", "'created_at'", ",", "order_dir", "=", "'DESC'", ",", "service", "=", "'facebook'", ")", ":", "params", "=", "{", "}", "if", "page", ":", "param...
List pylon recordings :param page: page number for pagination :type page: int :param per_page: number of items per page, default 20 :type per_page: int :param order_by: field to order by, default request_time :type order_by: str :param order_dir: direction to order by, asc or desc, default desc :type order_dir: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "List", "pylon", "recordings" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/pylon.py#L123-L155
datasift/datasift-python
datasift/pylon.py
Pylon.tags
def tags(self, id, service='facebook'): """ Get the existing analysis for a given hash :param id: The hash to get tag analysis for :type id: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get(service + '/tags', params=dict(id=id))
python
def tags(self, id, service='facebook'): """ Get the existing analysis for a given hash :param id: The hash to get tag analysis for :type id: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get(service + '/tags', params=dict(id=id))
[ "def", "tags", "(", "self", ",", "id", ",", "service", "=", "'facebook'", ")", ":", "return", "self", ".", "request", ".", "get", "(", "service", "+", "'/tags'", ",", "params", "=", "dict", "(", "id", "=", "id", ")", ")" ]
Get the existing analysis for a given hash :param id: The hash to get tag analysis for :type id: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "the", "existing", "analysis", "for", "a", "given", "hash" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/pylon.py#L157-L169
datasift/datasift-python
datasift/pylon.py
Pylon.sample
def sample(self, id, count=None, start=None, end=None, filter=None, service='facebook'): """ Get sample interactions for a given hash :param id: The hash to get tag analysis for :type id: str :param start: Determines time period of the sample data :type start: int :param end: Determines time period of the sample data :type end: int :param filter: An optional secondary filter :type filter: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': id} if count: params['count'] = count if start: params['start'] = start if end: params['end'] = end if filter: params['filter'] = filter return self.request.get(service + '/sample', params)
python
def sample(self, id, count=None, start=None, end=None, filter=None, service='facebook'): """ Get sample interactions for a given hash :param id: The hash to get tag analysis for :type id: str :param start: Determines time period of the sample data :type start: int :param end: Determines time period of the sample data :type end: int :param filter: An optional secondary filter :type filter: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': id} if count: params['count'] = count if start: params['start'] = start if end: params['end'] = end if filter: params['filter'] = filter return self.request.get(service + '/sample', params)
[ "def", "sample", "(", "self", ",", "id", ",", "count", "=", "None", ",", "start", "=", "None", ",", "end", "=", "None", ",", "filter", "=", "None", ",", "service", "=", "'facebook'", ")", ":", "params", "=", "{", "'id'", ":", "id", "}", "if", "...
Get sample interactions for a given hash :param id: The hash to get tag analysis for :type id: str :param start: Determines time period of the sample data :type start: int :param end: Determines time period of the sample data :type end: int :param filter: An optional secondary filter :type filter: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "sample", "interactions", "for", "a", "given", "hash" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/pylon.py#L171-L202
datasift/datasift-python
datasift/limit.py
Limit.get
def get(self, identity_id, service): """ Get the limit for the given identity and service :param identity_id: The ID of the identity to retrieve :param service: The service that the limit is linked to :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get(str(identity_id) + '/limit/' + service)
python
def get(self, identity_id, service): """ Get the limit for the given identity and service :param identity_id: The ID of the identity to retrieve :param service: The service that the limit is linked to :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get(str(identity_id) + '/limit/' + service)
[ "def", "get", "(", "self", ",", "identity_id", ",", "service", ")", ":", "return", "self", ".", "request", ".", "get", "(", "str", "(", "identity_id", ")", "+", "'/limit/'", "+", "service", ")" ]
Get the limit for the given identity and service :param identity_id: The ID of the identity to retrieve :param service: The service that the limit is linked to :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "the", "limit", "for", "the", "given", "identity", "and", "service" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/limit.py#L10-L21
datasift/datasift-python
datasift/limit.py
Limit.list
def list(self, service, per_page=20, page=1): """ Get a list of limits for the given service :param service: The service that the limit is linked to :param per_page: The number of results per page returned :param page: The page number of the results :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'per_page': per_page, 'page': page} return self.request.get('limit/' + service, params)
python
def list(self, service, per_page=20, page=1): """ Get a list of limits for the given service :param service: The service that the limit is linked to :param per_page: The number of results per page returned :param page: The page number of the results :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'per_page': per_page, 'page': page} return self.request.get('limit/' + service, params)
[ "def", "list", "(", "self", ",", "service", ",", "per_page", "=", "20", ",", "page", "=", "1", ")", ":", "params", "=", "{", "'per_page'", ":", "per_page", ",", "'page'", ":", "page", "}", "return", "self", ".", "request", ".", "get", "(", "'limit/...
Get a list of limits for the given service :param service: The service that the limit is linked to :param per_page: The number of results per page returned :param page: The page number of the results :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "a", "list", "of", "limits", "for", "the", "given", "service" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/limit.py#L23-L37
datasift/datasift-python
datasift/limit.py
Limit.create
def create(self, identity_id, service, total_allowance=None, analyze_queries=None): """ Create the limit :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :param total_allowance: The total allowance for this token's limit :param analyze_queries: The number of analyze calls :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'service': service} if total_allowance is not None: params['total_allowance'] = total_allowance if analyze_queries is not None: params['analyze_queries'] = analyze_queries return self.request.post(str(identity_id) + '/limit/', params)
python
def create(self, identity_id, service, total_allowance=None, analyze_queries=None): """ Create the limit :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :param total_allowance: The total allowance for this token's limit :param analyze_queries: The number of analyze calls :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'service': service} if total_allowance is not None: params['total_allowance'] = total_allowance if analyze_queries is not None: params['analyze_queries'] = analyze_queries return self.request.post(str(identity_id) + '/limit/', params)
[ "def", "create", "(", "self", ",", "identity_id", ",", "service", ",", "total_allowance", "=", "None", ",", "analyze_queries", "=", "None", ")", ":", "params", "=", "{", "'service'", ":", "service", "}", "if", "total_allowance", "is", "not", "None", ":", ...
Create the limit :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :param total_allowance: The total allowance for this token's limit :param analyze_queries: The number of analyze calls :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Create", "the", "limit" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/limit.py#L39-L59
datasift/datasift-python
datasift/limit.py
Limit.update
def update(self, identity_id, service, total_allowance=None, analyze_queries=None): """ Update the limit :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :param total_allowance: The total allowance for this token's limit :param analyze_queries: The number of analyze calls :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'service': service} if total_allowance is not None: params['total_allowance'] = total_allowance if analyze_queries is not None: params['analyze_queries'] = analyze_queries return self.request.put(str(identity_id) + '/limit/' + service, params)
python
def update(self, identity_id, service, total_allowance=None, analyze_queries=None): """ Update the limit :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :param total_allowance: The total allowance for this token's limit :param analyze_queries: The number of analyze calls :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'service': service} if total_allowance is not None: params['total_allowance'] = total_allowance if analyze_queries is not None: params['analyze_queries'] = analyze_queries return self.request.put(str(identity_id) + '/limit/' + service, params)
[ "def", "update", "(", "self", ",", "identity_id", ",", "service", ",", "total_allowance", "=", "None", ",", "analyze_queries", "=", "None", ")", ":", "params", "=", "{", "'service'", ":", "service", "}", "if", "total_allowance", "is", "not", "None", ":", ...
Update the limit :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :param total_allowance: The total allowance for this token's limit :param analyze_queries: The number of analyze calls :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Update", "the", "limit" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/limit.py#L61-L79
datasift/datasift-python
datasift/limit.py
Limit.delete
def delete(self, identity_id, service): """ Delete the limit for the given identity and service :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.delete(str(identity_id) + '/limit/' + service)
python
def delete(self, identity_id, service): """ Delete the limit for the given identity and service :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.delete(str(identity_id) + '/limit/' + service)
[ "def", "delete", "(", "self", ",", "identity_id", ",", "service", ")", ":", "return", "self", ".", "request", ".", "delete", "(", "str", "(", "identity_id", ")", "+", "'/limit/'", "+", "service", ")" ]
Delete the limit for the given identity and service :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Delete", "the", "limit", "for", "the", "given", "identity", "and", "service" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/limit.py#L81-L92
yunojuno-archive/django-package-monitor
package_monitor/views.py
reload
def reload(request): """Reload local requirements file.""" refresh_packages.clean() refresh_packages.local() refresh_packages.remote() url = request.META.get('HTTP_REFERER') if url: return HttpResponseRedirect(url) else: return HttpResponse('Local requirements list has been reloaded.')
python
def reload(request): """Reload local requirements file.""" refresh_packages.clean() refresh_packages.local() refresh_packages.remote() url = request.META.get('HTTP_REFERER') if url: return HttpResponseRedirect(url) else: return HttpResponse('Local requirements list has been reloaded.')
[ "def", "reload", "(", "request", ")", ":", "refresh_packages", ".", "clean", "(", ")", "refresh_packages", ".", "local", "(", ")", "refresh_packages", ".", "remote", "(", ")", "url", "=", "request", ".", "META", ".", "get", "(", "'HTTP_REFERER'", ")", "i...
Reload local requirements file.
[ "Reload", "local", "requirements", "file", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/views.py#L8-L17
mattaustin/django-storages-s3upload
s3upload/forms.py
ValidateS3UploadForm._generate_processed_key_name
def _generate_processed_key_name(process_to, upload_name): """Returns a key name to use after processing based on timestamp and upload key name.""" timestamp = datetime.now().strftime('%Y%m%d%H%M%S%f') name, extension = os.path.splitext(upload_name) digest = md5(''.join([timestamp, upload_name])).hexdigest() return os.path.join(process_to, '{0}.{1}'.format(digest, extension))
python
def _generate_processed_key_name(process_to, upload_name): """Returns a key name to use after processing based on timestamp and upload key name.""" timestamp = datetime.now().strftime('%Y%m%d%H%M%S%f') name, extension = os.path.splitext(upload_name) digest = md5(''.join([timestamp, upload_name])).hexdigest() return os.path.join(process_to, '{0}.{1}'.format(digest, extension))
[ "def", "_generate_processed_key_name", "(", "process_to", ",", "upload_name", ")", ":", "timestamp", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y%m%d%H%M%S%f'", ")", "name", ",", "extension", "=", "os", ".", "path", ".", "splitext", "(",...
Returns a key name to use after processing based on timestamp and upload key name.
[ "Returns", "a", "key", "name", "to", "use", "after", "processing", "based", "on", "timestamp", "and", "upload", "key", "name", "." ]
train
https://github.com/mattaustin/django-storages-s3upload/blob/d4e6b4799aa32de469a00e75732a18716845d6b8/s3upload/forms.py#L269-L275
mattaustin/django-storages-s3upload
s3upload/forms.py
ValidateS3UploadForm.clean_bucket_name
def clean_bucket_name(self): """Validates that the bucket name in the provided data matches the bucket name from the storage backend.""" bucket_name = self.cleaned_data['bucket_name'] if not bucket_name == self.get_bucket_name(): raise forms.ValidationError('Bucket name does not validate.') return bucket_name
python
def clean_bucket_name(self): """Validates that the bucket name in the provided data matches the bucket name from the storage backend.""" bucket_name = self.cleaned_data['bucket_name'] if not bucket_name == self.get_bucket_name(): raise forms.ValidationError('Bucket name does not validate.') return bucket_name
[ "def", "clean_bucket_name", "(", "self", ")", ":", "bucket_name", "=", "self", ".", "cleaned_data", "[", "'bucket_name'", "]", "if", "not", "bucket_name", "==", "self", ".", "get_bucket_name", "(", ")", ":", "raise", "forms", ".", "ValidationError", "(", "'B...
Validates that the bucket name in the provided data matches the bucket name from the storage backend.
[ "Validates", "that", "the", "bucket", "name", "in", "the", "provided", "data", "matches", "the", "bucket", "name", "from", "the", "storage", "backend", "." ]
train
https://github.com/mattaustin/django-storages-s3upload/blob/d4e6b4799aa32de469a00e75732a18716845d6b8/s3upload/forms.py#L292-L298
mattaustin/django-storages-s3upload
s3upload/forms.py
ValidateS3UploadForm.clean_key_name
def clean_key_name(self): """Validates that the key in the provided data starts with the required prefix, and that it exists in the bucket.""" key = self.cleaned_data['key_name'] # Ensure key starts with prefix if not key.startswith(self.get_key_prefix()): raise forms.ValidationError('Key does not have required prefix.') # Ensure key exists if not self.get_upload_key(): raise forms.ValidationError('Key does not exist.') return key
python
def clean_key_name(self): """Validates that the key in the provided data starts with the required prefix, and that it exists in the bucket.""" key = self.cleaned_data['key_name'] # Ensure key starts with prefix if not key.startswith(self.get_key_prefix()): raise forms.ValidationError('Key does not have required prefix.') # Ensure key exists if not self.get_upload_key(): raise forms.ValidationError('Key does not exist.') return key
[ "def", "clean_key_name", "(", "self", ")", ":", "key", "=", "self", ".", "cleaned_data", "[", "'key_name'", "]", "# Ensure key starts with prefix", "if", "not", "key", ".", "startswith", "(", "self", ".", "get_key_prefix", "(", ")", ")", ":", "raise", "forms...
Validates that the key in the provided data starts with the required prefix, and that it exists in the bucket.
[ "Validates", "that", "the", "key", "in", "the", "provided", "data", "starts", "with", "the", "required", "prefix", "and", "that", "it", "exists", "in", "the", "bucket", "." ]
train
https://github.com/mattaustin/django-storages-s3upload/blob/d4e6b4799aa32de469a00e75732a18716845d6b8/s3upload/forms.py#L300-L310
mattaustin/django-storages-s3upload
s3upload/forms.py
ValidateS3UploadForm.get_processed_key_name
def get_processed_key_name(self): """Return the full path to use for the processed file.""" if not hasattr(self, '_processed_key_name'): path, upload_name = os.path.split(self.get_upload_key().name) key_name = self._generate_processed_key_name( self.process_to, upload_name) self._processed_key_name = os.path.join( self.get_storage().location, key_name) return self._processed_key_name
python
def get_processed_key_name(self): """Return the full path to use for the processed file.""" if not hasattr(self, '_processed_key_name'): path, upload_name = os.path.split(self.get_upload_key().name) key_name = self._generate_processed_key_name( self.process_to, upload_name) self._processed_key_name = os.path.join( self.get_storage().location, key_name) return self._processed_key_name
[ "def", "get_processed_key_name", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_processed_key_name'", ")", ":", "path", ",", "upload_name", "=", "os", ".", "path", ".", "split", "(", "self", ".", "get_upload_key", "(", ")", ".", "nam...
Return the full path to use for the processed file.
[ "Return", "the", "full", "path", "to", "use", "for", "the", "processed", "file", "." ]
train
https://github.com/mattaustin/django-storages-s3upload/blob/d4e6b4799aa32de469a00e75732a18716845d6b8/s3upload/forms.py#L316-L324
mattaustin/django-storages-s3upload
s3upload/forms.py
ValidateS3UploadForm.get_processed_path
def get_processed_path(self): """Returns the processed file path from the storage backend. :returns: File path from the storage backend. :rtype: :py:class:`unicode` """ location = self.get_storage().location return self.get_processed_key_name()[len(location):]
python
def get_processed_path(self): """Returns the processed file path from the storage backend. :returns: File path from the storage backend. :rtype: :py:class:`unicode` """ location = self.get_storage().location return self.get_processed_key_name()[len(location):]
[ "def", "get_processed_path", "(", "self", ")", ":", "location", "=", "self", ".", "get_storage", "(", ")", ".", "location", "return", "self", ".", "get_processed_key_name", "(", ")", "[", "len", "(", "location", ")", ":", "]" ]
Returns the processed file path from the storage backend. :returns: File path from the storage backend. :rtype: :py:class:`unicode`
[ "Returns", "the", "processed", "file", "path", "from", "the", "storage", "backend", "." ]
train
https://github.com/mattaustin/django-storages-s3upload/blob/d4e6b4799aa32de469a00e75732a18716845d6b8/s3upload/forms.py#L326-L334
mattaustin/django-storages-s3upload
s3upload/forms.py
ValidateS3UploadForm.process_upload
def process_upload(self, set_content_type=True): """Process the uploaded file.""" metadata = self.get_upload_key_metadata() if set_content_type: content_type = self.get_upload_content_type() metadata.update({b'Content-Type': b'{0}'.format(content_type)}) upload_key = self.get_upload_key() processed_key_name = self.get_processed_key_name() processed_key = upload_key.copy(upload_key.bucket.name, processed_key_name, metadata) processed_key.set_acl(self.get_processed_acl()) upload_key.delete() return processed_key
python
def process_upload(self, set_content_type=True): """Process the uploaded file.""" metadata = self.get_upload_key_metadata() if set_content_type: content_type = self.get_upload_content_type() metadata.update({b'Content-Type': b'{0}'.format(content_type)}) upload_key = self.get_upload_key() processed_key_name = self.get_processed_key_name() processed_key = upload_key.copy(upload_key.bucket.name, processed_key_name, metadata) processed_key.set_acl(self.get_processed_acl()) upload_key.delete() return processed_key
[ "def", "process_upload", "(", "self", ",", "set_content_type", "=", "True", ")", ":", "metadata", "=", "self", ".", "get_upload_key_metadata", "(", ")", "if", "set_content_type", ":", "content_type", "=", "self", ".", "get_upload_content_type", "(", ")", "metada...
Process the uploaded file.
[ "Process", "the", "uploaded", "file", "." ]
train
https://github.com/mattaustin/django-storages-s3upload/blob/d4e6b4799aa32de469a00e75732a18716845d6b8/s3upload/forms.py#L336-L350
mattaustin/django-storages-s3upload
s3upload/forms.py
ValidateS3UploadForm.get_upload_content_type
def get_upload_content_type(self): """Determine the actual content type of the upload.""" if not hasattr(self, '_upload_content_type'): with self.get_storage().open(self.get_upload_path()) as upload: content_type = Magic(mime=True).from_buffer(upload.read(1024)) self._upload_content_type = content_type return self._upload_content_type
python
def get_upload_content_type(self): """Determine the actual content type of the upload.""" if not hasattr(self, '_upload_content_type'): with self.get_storage().open(self.get_upload_path()) as upload: content_type = Magic(mime=True).from_buffer(upload.read(1024)) self._upload_content_type = content_type return self._upload_content_type
[ "def", "get_upload_content_type", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_upload_content_type'", ")", ":", "with", "self", ".", "get_storage", "(", ")", ".", "open", "(", "self", ".", "get_upload_path", "(", ")", ")", "as", "u...
Determine the actual content type of the upload.
[ "Determine", "the", "actual", "content", "type", "of", "the", "upload", "." ]
train
https://github.com/mattaustin/django-storages-s3upload/blob/d4e6b4799aa32de469a00e75732a18716845d6b8/s3upload/forms.py#L353-L359
mattaustin/django-storages-s3upload
s3upload/forms.py
ValidateS3UploadForm.get_upload_key
def get_upload_key(self): """Get the `Key` from the S3 bucket for the uploaded file. :returns: Key (object) of the uploaded file. :rtype: :py:class:`boto.s3.key.Key` """ if not hasattr(self, '_upload_key'): self._upload_key = self.get_storage().bucket.get_key( self.cleaned_data['key_name']) return self._upload_key
python
def get_upload_key(self): """Get the `Key` from the S3 bucket for the uploaded file. :returns: Key (object) of the uploaded file. :rtype: :py:class:`boto.s3.key.Key` """ if not hasattr(self, '_upload_key'): self._upload_key = self.get_storage().bucket.get_key( self.cleaned_data['key_name']) return self._upload_key
[ "def", "get_upload_key", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_upload_key'", ")", ":", "self", ".", "_upload_key", "=", "self", ".", "get_storage", "(", ")", ".", "bucket", ".", "get_key", "(", "self", ".", "cleaned_data", ...
Get the `Key` from the S3 bucket for the uploaded file. :returns: Key (object) of the uploaded file. :rtype: :py:class:`boto.s3.key.Key`
[ "Get", "the", "Key", "from", "the", "S3", "bucket", "for", "the", "uploaded", "file", "." ]
train
https://github.com/mattaustin/django-storages-s3upload/blob/d4e6b4799aa32de469a00e75732a18716845d6b8/s3upload/forms.py#L361-L372
mattaustin/django-storages-s3upload
s3upload/forms.py
ValidateS3UploadForm.get_upload_key_metadata
def get_upload_key_metadata(self): """Generate metadata dictionary from a bucket key.""" key = self.get_upload_key() metadata = key.metadata.copy() # Some http header properties which are stored on the key need to be # copied to the metadata when updating headers = { # http header name, key attribute name 'Cache-Control': 'cache_control', 'Content-Type': 'content_type', 'Content-Disposition': 'content_disposition', 'Content-Encoding': 'content_encoding', } for header_name, attribute_name in headers.items(): attribute_value = getattr(key, attribute_name, False) if attribute_value: metadata.update({b'{0}'.format(header_name): b'{0}'.format(attribute_value)}) return metadata
python
def get_upload_key_metadata(self): """Generate metadata dictionary from a bucket key.""" key = self.get_upload_key() metadata = key.metadata.copy() # Some http header properties which are stored on the key need to be # copied to the metadata when updating headers = { # http header name, key attribute name 'Cache-Control': 'cache_control', 'Content-Type': 'content_type', 'Content-Disposition': 'content_disposition', 'Content-Encoding': 'content_encoding', } for header_name, attribute_name in headers.items(): attribute_value = getattr(key, attribute_name, False) if attribute_value: metadata.update({b'{0}'.format(header_name): b'{0}'.format(attribute_value)}) return metadata
[ "def", "get_upload_key_metadata", "(", "self", ")", ":", "key", "=", "self", ".", "get_upload_key", "(", ")", "metadata", "=", "key", ".", "metadata", ".", "copy", "(", ")", "# Some http header properties which are stored on the key need to be", "# copied to the metadat...
Generate metadata dictionary from a bucket key.
[ "Generate", "metadata", "dictionary", "from", "a", "bucket", "key", "." ]
train
https://github.com/mattaustin/django-storages-s3upload/blob/d4e6b4799aa32de469a00e75732a18716845d6b8/s3upload/forms.py#L374-L394
mattaustin/django-storages-s3upload
s3upload/forms.py
ValidateS3UploadForm.get_upload_path
def get_upload_path(self): """Returns the uploaded file path from the storage backend. :returns: File path from the storage backend. :rtype: :py:class:`unicode` """ location = self.get_storage().location return self.cleaned_data['key_name'][len(location):]
python
def get_upload_path(self): """Returns the uploaded file path from the storage backend. :returns: File path from the storage backend. :rtype: :py:class:`unicode` """ location = self.get_storage().location return self.cleaned_data['key_name'][len(location):]
[ "def", "get_upload_path", "(", "self", ")", ":", "location", "=", "self", ".", "get_storage", "(", ")", ".", "location", "return", "self", ".", "cleaned_data", "[", "'key_name'", "]", "[", "len", "(", "location", ")", ":", "]" ]
Returns the uploaded file path from the storage backend. :returns: File path from the storage backend. :rtype: :py:class:`unicode`
[ "Returns", "the", "uploaded", "file", "path", "from", "the", "storage", "backend", "." ]
train
https://github.com/mattaustin/django-storages-s3upload/blob/d4e6b4799aa32de469a00e75732a18716845d6b8/s3upload/forms.py#L396-L404
brunobord/md2ebook
md2ebook/ui.py
ask
def ask(question, escape=True): "Return the answer" answer = raw_input(question) if escape: answer.replace('"', '\\"') return answer.decode('utf')
python
def ask(question, escape=True): "Return the answer" answer = raw_input(question) if escape: answer.replace('"', '\\"') return answer.decode('utf')
[ "def", "ask", "(", "question", ",", "escape", "=", "True", ")", ":", "answer", "=", "raw_input", "(", "question", ")", "if", "escape", ":", "answer", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")", "return", "answer", ".", "decode", "(", "'utf'", ...
Return the answer
[ "Return", "the", "answer" ]
train
https://github.com/brunobord/md2ebook/blob/31e0d06b77f2d986e6af1115c9e613dfec0591a9/md2ebook/ui.py#L14-L19
trisk/pysesame
pysesame/sesame.py
Sesame.update_state
def update_state(self, cache=True): """Update the internal state of the Sesame.""" self.use_cached_state = cache endpoint = API_SESAME_ENDPOINT.format(self._device_id) response = self.account.request('GET', endpoint) if response is None or response.status_code != 200: return state = json.loads(response.text) self._nickname = state['nickname'] self._is_unlocked = state['is_unlocked'] self._api_enabled = state['api_enabled'] self._battery = state['battery']
python
def update_state(self, cache=True): """Update the internal state of the Sesame.""" self.use_cached_state = cache endpoint = API_SESAME_ENDPOINT.format(self._device_id) response = self.account.request('GET', endpoint) if response is None or response.status_code != 200: return state = json.loads(response.text) self._nickname = state['nickname'] self._is_unlocked = state['is_unlocked'] self._api_enabled = state['api_enabled'] self._battery = state['battery']
[ "def", "update_state", "(", "self", ",", "cache", "=", "True", ")", ":", "self", ".", "use_cached_state", "=", "cache", "endpoint", "=", "API_SESAME_ENDPOINT", ".", "format", "(", "self", ".", "_device_id", ")", "response", "=", "self", ".", "account", "."...
Update the internal state of the Sesame.
[ "Update", "the", "internal", "state", "of", "the", "Sesame", "." ]
train
https://github.com/trisk/pysesame/blob/8f9df4a478cf8f328ec8185bcac7c8704cbd9c01/pysesame/sesame.py#L31-L44
trisk/pysesame
pysesame/sesame.py
Sesame.lock
def lock(self): """Lock the Sesame. Return True on success, else False.""" endpoint = API_SESAME_CONTROL_ENDPOINT.format(self.device_id) payload = {'type': 'lock'} response = self.account.request('POST', endpoint, payload=payload) if response is None: return False if response.status_code == 200 or response.status_code == 204: return True return False
python
def lock(self): """Lock the Sesame. Return True on success, else False.""" endpoint = API_SESAME_CONTROL_ENDPOINT.format(self.device_id) payload = {'type': 'lock'} response = self.account.request('POST', endpoint, payload=payload) if response is None: return False if response.status_code == 200 or response.status_code == 204: return True return False
[ "def", "lock", "(", "self", ")", ":", "endpoint", "=", "API_SESAME_CONTROL_ENDPOINT", ".", "format", "(", "self", ".", "device_id", ")", "payload", "=", "{", "'type'", ":", "'lock'", "}", "response", "=", "self", ".", "account", ".", "request", "(", "'PO...
Lock the Sesame. Return True on success, else False.
[ "Lock", "the", "Sesame", ".", "Return", "True", "on", "success", "else", "False", "." ]
train
https://github.com/trisk/pysesame/blob/8f9df4a478cf8f328ec8185bcac7c8704cbd9c01/pysesame/sesame.py#L87-L96
ClimateImpactLab/DataFS
datafs/config/config_file.py
ConfigFile.write_config_from_api
def write_config_from_api(self, api, config_file=None, profile=None): ''' Create/update the config file from a DataAPI object Parameters ---------- api : object The :py:class:`datafs.DataAPI` object from which to create the config profile profile : str Name of the profile to use in the config file (default "default-profile") config_file : str or file Path or file in which to write config (default is your OS's default datafs application directory) Examples -------- Create a simple API and then write the config to a buffer: .. code-block:: python >>> from datafs import DataAPI >>> from datafs.managers.manager_mongo import MongoDBManager >>> from fs.osfs import OSFS >>> from fs.tempfs import TempFS >>> import os >>> import tempfile >>> import shutil >>> >>> api = DataAPI( ... username='My Name', ... contact = 'me@demo.com') >>> >>> manager = MongoDBManager( ... database_name = 'MyDatabase', ... table_name = 'DataFiles') >>> >>> manager.create_archive_table( ... 'DataFiles', ... raise_on_err=False) >>> >>> api.attach_manager(manager) >>> >>> tmpdir = tempfile.mkdtemp() >>> local = OSFS(tmpdir) >>> >>> api.attach_authority('local', local) >>> >>> # Create a StringIO object for the config file ... >>> try: ... from StringIO import StringIO ... except ImportError: ... from io import StringIO ... >>> conf = StringIO() >>> >>> config_file = ConfigFile(default_profile='my-api') >>> config_file.write_config_from_api( ... api, ... profile='my-api', ... config_file=conf) >>> >>> print(conf.getvalue()) # doctest: +SKIP default-profile: my-api profiles: my-api: api: user_config: {contact: me@demo.com, username: My Name} authorities: local: args: [...] service: OSFS kwargs: {} manager: args: [] class: MongoDBManager kwargs: client_kwargs: {} database_name: MyDatabase table_name: DataFiles <BLANKLINE> >>> conf.close() >>> local.close() >>> shutil.rmtree(tmpdir) At this point, we can retrieve the api object from the configuration file: .. code-block:: python >>> try: ... from StringIO import StringIO ... except ImportError: ... from io import StringIO ... >>> conf = StringIO(""" ... default-profile: my-api ... profiles: ... my-api: ... api: ... user_config: {contact: me@demo.com, username: My Name} ... authorities: ... local: ... args: [] ... service: TempFS ... kwargs: {} ... manager: ... args: [] ... class: MongoDBManager ... kwargs: ... client_kwargs: {} ... database_name: MyDatabase ... table_name: DataFiles ... """) >>> >>> import datafs >>> from fs.tempfs import TempFS >>> api = datafs.get_api(profile='my-api', config_file=conf) >>> >>> cache = TempFS() >>> api.attach_cache(cache) >>> >>> conf2 = StringIO() >>> >>> config_file = ConfigFile(default_profile='my-api') >>> config_file.write_config_from_api( ... api, ... profile='my-api', ... config_file=conf2) >>> >>> print(conf2.getvalue()) # doctest: +SKIP default-profile: my-api profiles: my-api: api: user_config: {contact: me@demo.com, username: My Name} authorities: local: args: [] service: TempFS kwargs: {} cache: args: [] service: TempFS kwargs: {} manager: args: [] class: MongoDBManager kwargs: client_kwargs: {} database_name: MyDatabase table_name: DataFiles <BLANKLINE> ''' if profile is None: profile = self.default_profile self.get_config_from_api(api, profile) self.write_config(config_file)
python
def write_config_from_api(self, api, config_file=None, profile=None): ''' Create/update the config file from a DataAPI object Parameters ---------- api : object The :py:class:`datafs.DataAPI` object from which to create the config profile profile : str Name of the profile to use in the config file (default "default-profile") config_file : str or file Path or file in which to write config (default is your OS's default datafs application directory) Examples -------- Create a simple API and then write the config to a buffer: .. code-block:: python >>> from datafs import DataAPI >>> from datafs.managers.manager_mongo import MongoDBManager >>> from fs.osfs import OSFS >>> from fs.tempfs import TempFS >>> import os >>> import tempfile >>> import shutil >>> >>> api = DataAPI( ... username='My Name', ... contact = 'me@demo.com') >>> >>> manager = MongoDBManager( ... database_name = 'MyDatabase', ... table_name = 'DataFiles') >>> >>> manager.create_archive_table( ... 'DataFiles', ... raise_on_err=False) >>> >>> api.attach_manager(manager) >>> >>> tmpdir = tempfile.mkdtemp() >>> local = OSFS(tmpdir) >>> >>> api.attach_authority('local', local) >>> >>> # Create a StringIO object for the config file ... >>> try: ... from StringIO import StringIO ... except ImportError: ... from io import StringIO ... >>> conf = StringIO() >>> >>> config_file = ConfigFile(default_profile='my-api') >>> config_file.write_config_from_api( ... api, ... profile='my-api', ... config_file=conf) >>> >>> print(conf.getvalue()) # doctest: +SKIP default-profile: my-api profiles: my-api: api: user_config: {contact: me@demo.com, username: My Name} authorities: local: args: [...] service: OSFS kwargs: {} manager: args: [] class: MongoDBManager kwargs: client_kwargs: {} database_name: MyDatabase table_name: DataFiles <BLANKLINE> >>> conf.close() >>> local.close() >>> shutil.rmtree(tmpdir) At this point, we can retrieve the api object from the configuration file: .. code-block:: python >>> try: ... from StringIO import StringIO ... except ImportError: ... from io import StringIO ... >>> conf = StringIO(""" ... default-profile: my-api ... profiles: ... my-api: ... api: ... user_config: {contact: me@demo.com, username: My Name} ... authorities: ... local: ... args: [] ... service: TempFS ... kwargs: {} ... manager: ... args: [] ... class: MongoDBManager ... kwargs: ... client_kwargs: {} ... database_name: MyDatabase ... table_name: DataFiles ... """) >>> >>> import datafs >>> from fs.tempfs import TempFS >>> api = datafs.get_api(profile='my-api', config_file=conf) >>> >>> cache = TempFS() >>> api.attach_cache(cache) >>> >>> conf2 = StringIO() >>> >>> config_file = ConfigFile(default_profile='my-api') >>> config_file.write_config_from_api( ... api, ... profile='my-api', ... config_file=conf2) >>> >>> print(conf2.getvalue()) # doctest: +SKIP default-profile: my-api profiles: my-api: api: user_config: {contact: me@demo.com, username: My Name} authorities: local: args: [] service: TempFS kwargs: {} cache: args: [] service: TempFS kwargs: {} manager: args: [] class: MongoDBManager kwargs: client_kwargs: {} database_name: MyDatabase table_name: DataFiles <BLANKLINE> ''' if profile is None: profile = self.default_profile self.get_config_from_api(api, profile) self.write_config(config_file)
[ "def", "write_config_from_api", "(", "self", ",", "api", ",", "config_file", "=", "None", ",", "profile", "=", "None", ")", ":", "if", "profile", "is", "None", ":", "profile", "=", "self", ".", "default_profile", "self", ".", "get_config_from_api", "(", "a...
Create/update the config file from a DataAPI object Parameters ---------- api : object The :py:class:`datafs.DataAPI` object from which to create the config profile profile : str Name of the profile to use in the config file (default "default-profile") config_file : str or file Path or file in which to write config (default is your OS's default datafs application directory) Examples -------- Create a simple API and then write the config to a buffer: .. code-block:: python >>> from datafs import DataAPI >>> from datafs.managers.manager_mongo import MongoDBManager >>> from fs.osfs import OSFS >>> from fs.tempfs import TempFS >>> import os >>> import tempfile >>> import shutil >>> >>> api = DataAPI( ... username='My Name', ... contact = 'me@demo.com') >>> >>> manager = MongoDBManager( ... database_name = 'MyDatabase', ... table_name = 'DataFiles') >>> >>> manager.create_archive_table( ... 'DataFiles', ... raise_on_err=False) >>> >>> api.attach_manager(manager) >>> >>> tmpdir = tempfile.mkdtemp() >>> local = OSFS(tmpdir) >>> >>> api.attach_authority('local', local) >>> >>> # Create a StringIO object for the config file ... >>> try: ... from StringIO import StringIO ... except ImportError: ... from io import StringIO ... >>> conf = StringIO() >>> >>> config_file = ConfigFile(default_profile='my-api') >>> config_file.write_config_from_api( ... api, ... profile='my-api', ... config_file=conf) >>> >>> print(conf.getvalue()) # doctest: +SKIP default-profile: my-api profiles: my-api: api: user_config: {contact: me@demo.com, username: My Name} authorities: local: args: [...] service: OSFS kwargs: {} manager: args: [] class: MongoDBManager kwargs: client_kwargs: {} database_name: MyDatabase table_name: DataFiles <BLANKLINE> >>> conf.close() >>> local.close() >>> shutil.rmtree(tmpdir) At this point, we can retrieve the api object from the configuration file: .. code-block:: python >>> try: ... from StringIO import StringIO ... except ImportError: ... from io import StringIO ... >>> conf = StringIO(""" ... default-profile: my-api ... profiles: ... my-api: ... api: ... user_config: {contact: me@demo.com, username: My Name} ... authorities: ... local: ... args: [] ... service: TempFS ... kwargs: {} ... manager: ... args: [] ... class: MongoDBManager ... kwargs: ... client_kwargs: {} ... database_name: MyDatabase ... table_name: DataFiles ... """) >>> >>> import datafs >>> from fs.tempfs import TempFS >>> api = datafs.get_api(profile='my-api', config_file=conf) >>> >>> cache = TempFS() >>> api.attach_cache(cache) >>> >>> conf2 = StringIO() >>> >>> config_file = ConfigFile(default_profile='my-api') >>> config_file.write_config_from_api( ... api, ... profile='my-api', ... config_file=conf2) >>> >>> print(conf2.getvalue()) # doctest: +SKIP default-profile: my-api profiles: my-api: api: user_config: {contact: me@demo.com, username: My Name} authorities: local: args: [] service: TempFS kwargs: {} cache: args: [] service: TempFS kwargs: {} manager: args: [] class: MongoDBManager kwargs: client_kwargs: {} database_name: MyDatabase table_name: DataFiles <BLANKLINE>
[ "Create", "/", "update", "the", "config", "file", "from", "a", "DataAPI", "object" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/config/config_file.py#L146-L313
ampledata/pypirc
pypirc/pypirc.py
PyPiRC.save
def save(self): """Saves pypirc file with new configuration information.""" for server, conf in self.servers.iteritems(): self._add_index_server() for conf_k, conf_v in conf.iteritems(): if not self.conf.has_section(server): self.conf.add_section(server) self.conf.set(server, conf_k, conf_v) with open(self.rc_file, 'wb') as configfile: self.conf.write(configfile) self.conf.read(self.rc_file)
python
def save(self): """Saves pypirc file with new configuration information.""" for server, conf in self.servers.iteritems(): self._add_index_server() for conf_k, conf_v in conf.iteritems(): if not self.conf.has_section(server): self.conf.add_section(server) self.conf.set(server, conf_k, conf_v) with open(self.rc_file, 'wb') as configfile: self.conf.write(configfile) self.conf.read(self.rc_file)
[ "def", "save", "(", "self", ")", ":", "for", "server", ",", "conf", "in", "self", ".", "servers", ".", "iteritems", "(", ")", ":", "self", ".", "_add_index_server", "(", ")", "for", "conf_k", ",", "conf_v", "in", "conf", ".", "iteritems", "(", ")", ...
Saves pypirc file with new configuration information.
[ "Saves", "pypirc", "file", "with", "new", "configuration", "information", "." ]
train
https://github.com/ampledata/pypirc/blob/c10397a4cf82e40591a075bcc79a5ececcaef4a4/pypirc/pypirc.py#L57-L68
ampledata/pypirc
pypirc/pypirc.py
PyPiRC._get_index_servers
def _get_index_servers(self): """Gets index-servers current configured in pypirc.""" idx_srvs = [] if 'index-servers' in self.conf.options('distutils'): idx = self.conf.get('distutils', 'index-servers') idx_srvs = [srv.strip() for srv in idx.split('\n') if srv.strip()] return idx_srvs
python
def _get_index_servers(self): """Gets index-servers current configured in pypirc.""" idx_srvs = [] if 'index-servers' in self.conf.options('distutils'): idx = self.conf.get('distutils', 'index-servers') idx_srvs = [srv.strip() for srv in idx.split('\n') if srv.strip()] return idx_srvs
[ "def", "_get_index_servers", "(", "self", ")", ":", "idx_srvs", "=", "[", "]", "if", "'index-servers'", "in", "self", ".", "conf", ".", "options", "(", "'distutils'", ")", ":", "idx", "=", "self", ".", "conf", ".", "get", "(", "'distutils'", ",", "'ind...
Gets index-servers current configured in pypirc.
[ "Gets", "index", "-", "servers", "current", "configured", "in", "pypirc", "." ]
train
https://github.com/ampledata/pypirc/blob/c10397a4cf82e40591a075bcc79a5ececcaef4a4/pypirc/pypirc.py#L70-L76
ampledata/pypirc
pypirc/pypirc.py
PyPiRC._add_index_server
def _add_index_server(self): """Adds index-server to 'distutil's 'index-servers' param.""" index_servers = '\n\t'.join(self.servers.keys()) self.conf.set('distutils', 'index-servers', index_servers)
python
def _add_index_server(self): """Adds index-server to 'distutil's 'index-servers' param.""" index_servers = '\n\t'.join(self.servers.keys()) self.conf.set('distutils', 'index-servers', index_servers)
[ "def", "_add_index_server", "(", "self", ")", ":", "index_servers", "=", "'\\n\\t'", ".", "join", "(", "self", ".", "servers", ".", "keys", "(", ")", ")", "self", ".", "conf", ".", "set", "(", "'distutils'", ",", "'index-servers'", ",", "index_servers", ...
Adds index-server to 'distutil's 'index-servers' param.
[ "Adds", "index", "-", "server", "to", "distutil", "s", "index", "-", "servers", "param", "." ]
train
https://github.com/ampledata/pypirc/blob/c10397a4cf82e40591a075bcc79a5ececcaef4a4/pypirc/pypirc.py#L78-L81
datasift/datasift-python
datasift/output_mapper.py
OutputMapper.outputmap
def outputmap(self, data): """ Internal function used to traverse a data structure and map the contents onto python-friendly objects inplace. This uses recursion, so try not to pass in anything that's over 255 objects deep. :param data: data structure :type data: any :param prefix: endpoint family, eg. sources, historics :type prefix: str :param endpoint: endpoint being called on the API :type endpoint: str :returns: Nothing, edits inplace """ if isinstance(data, list): for item in data: self.outputmap(item) elif isinstance(data, dict): for map_target in self.output_map: if map_target in data: data[map_target] = getattr(self, self.output_map[map_target])(data[map_target]) for item in data.values(): self.outputmap(item)
python
def outputmap(self, data): """ Internal function used to traverse a data structure and map the contents onto python-friendly objects inplace. This uses recursion, so try not to pass in anything that's over 255 objects deep. :param data: data structure :type data: any :param prefix: endpoint family, eg. sources, historics :type prefix: str :param endpoint: endpoint being called on the API :type endpoint: str :returns: Nothing, edits inplace """ if isinstance(data, list): for item in data: self.outputmap(item) elif isinstance(data, dict): for map_target in self.output_map: if map_target in data: data[map_target] = getattr(self, self.output_map[map_target])(data[map_target]) for item in data.values(): self.outputmap(item)
[ "def", "outputmap", "(", "self", ",", "data", ")", ":", "if", "isinstance", "(", "data", ",", "list", ")", ":", "for", "item", "in", "data", ":", "self", ".", "outputmap", "(", "item", ")", "elif", "isinstance", "(", "data", ",", "dict", ")", ":", ...
Internal function used to traverse a data structure and map the contents onto python-friendly objects inplace. This uses recursion, so try not to pass in anything that's over 255 objects deep. :param data: data structure :type data: any :param prefix: endpoint family, eg. sources, historics :type prefix: str :param endpoint: endpoint being called on the API :type endpoint: str :returns: Nothing, edits inplace
[ "Internal", "function", "used", "to", "traverse", "a", "data", "structure", "and", "map", "the", "contents", "onto", "python", "-", "friendly", "objects", "inplace", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/output_mapper.py#L43-L64
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/i2c_utility.py
TCA_select
def TCA_select(bus, addr, channel): """ This function will write to the control register of the TCA module to select the channel that will be exposed on the TCA module. After doing this, the desired module can be used as it would be normally. (The caller should use the address of the I2C sensor module. The TCA module is only written to when the channel is switched.) addr contains address of the TCA module channel specifies the desired channel on the TCA that will be used. Usage - Enable a channel TCA_select(bus, self.mux_addr, channel_to_enable) Channel to enable begins at 0 (enables first channel) ends at 3 (enables fourth channel) Usage - Disable all channels TCA_select(bus, self.mux_addr, "off") This call must be made whenever the sensor node is no longer being accessed. If this is not done, there will be addressing conflicts. """ if addr < 0x70 or addr > 0x77: print("The TCA address(" + str(addr) + ") is invalid. Aborting") return False if channel == "off": bus.write_byte(addr, 0) elif channel < 0 or channel > 3: print("The requested channel does not exist.") return False else: bus.write_byte(addr, 1 << channel) status = bus.read_byte(addr) return status
python
def TCA_select(bus, addr, channel): """ This function will write to the control register of the TCA module to select the channel that will be exposed on the TCA module. After doing this, the desired module can be used as it would be normally. (The caller should use the address of the I2C sensor module. The TCA module is only written to when the channel is switched.) addr contains address of the TCA module channel specifies the desired channel on the TCA that will be used. Usage - Enable a channel TCA_select(bus, self.mux_addr, channel_to_enable) Channel to enable begins at 0 (enables first channel) ends at 3 (enables fourth channel) Usage - Disable all channels TCA_select(bus, self.mux_addr, "off") This call must be made whenever the sensor node is no longer being accessed. If this is not done, there will be addressing conflicts. """ if addr < 0x70 or addr > 0x77: print("The TCA address(" + str(addr) + ") is invalid. Aborting") return False if channel == "off": bus.write_byte(addr, 0) elif channel < 0 or channel > 3: print("The requested channel does not exist.") return False else: bus.write_byte(addr, 1 << channel) status = bus.read_byte(addr) return status
[ "def", "TCA_select", "(", "bus", ",", "addr", ",", "channel", ")", ":", "if", "addr", "<", "0x70", "or", "addr", ">", "0x77", ":", "print", "(", "\"The TCA address(\"", "+", "str", "(", "addr", ")", "+", "\") is invalid. Aborting\"", ")", "return", "Fals...
This function will write to the control register of the TCA module to select the channel that will be exposed on the TCA module. After doing this, the desired module can be used as it would be normally. (The caller should use the address of the I2C sensor module. The TCA module is only written to when the channel is switched.) addr contains address of the TCA module channel specifies the desired channel on the TCA that will be used. Usage - Enable a channel TCA_select(bus, self.mux_addr, channel_to_enable) Channel to enable begins at 0 (enables first channel) ends at 3 (enables fourth channel) Usage - Disable all channels TCA_select(bus, self.mux_addr, "off") This call must be made whenever the sensor node is no longer being accessed. If this is not done, there will be addressing conflicts.
[ "This", "function", "will", "write", "to", "the", "control", "register", "of", "the", "TCA", "module", "to", "select", "the", "channel", "that", "will", "be", "exposed", "on", "the", "TCA", "module", ".", "After", "doing", "this", "the", "desired", "module...
train
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/i2c_utility.py#L6-L40
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/i2c_utility.py
get_ADC_value
def get_ADC_value(bus, addr, channel): """ This method selects a channel and initiates conversion The ADC operates at 240 SPS (12 bits) with 1x gain One shot conversions are used, meaning a wait period is needed in order to acquire new data. This is done via a constant poll of the ready bit. Upon completion, a voltage value is returned to the caller. Usage - ADC_start(bus, SensorCluster.ADC_addr, channel_to_read) IMPORTANT NOTE: The ADC uses a 2.048V voltage reference """ if channel == 1: INIT = 0b10000000 elif channel == 2: INIT = 0b10100000 elif channel == 3: INIT = 0b11000000 elif channel == 4: INIT = 0b11100000 bus.write_byte(addr, INIT) data = bus.read_i2c_block_data(addr, 0, 3) status = (data[2] & 0b10000000) >> 7 while(status == 1): data = bus.read_i2c_block_data(addr, 0, 3) status = (data[2] & 0b10000000) >> 7 sign = data[0] & 0b00001000 val = ((data[0] & 0b0000111) << 8) | (data[1]) if (sign == 1): val = (val ^ 0x3ff) + 1 # compute 2s complement for 12 bit val # Convert val to a ratiomerical ADC reading return float(val) * 2.048 / float(2047)
python
def get_ADC_value(bus, addr, channel): """ This method selects a channel and initiates conversion The ADC operates at 240 SPS (12 bits) with 1x gain One shot conversions are used, meaning a wait period is needed in order to acquire new data. This is done via a constant poll of the ready bit. Upon completion, a voltage value is returned to the caller. Usage - ADC_start(bus, SensorCluster.ADC_addr, channel_to_read) IMPORTANT NOTE: The ADC uses a 2.048V voltage reference """ if channel == 1: INIT = 0b10000000 elif channel == 2: INIT = 0b10100000 elif channel == 3: INIT = 0b11000000 elif channel == 4: INIT = 0b11100000 bus.write_byte(addr, INIT) data = bus.read_i2c_block_data(addr, 0, 3) status = (data[2] & 0b10000000) >> 7 while(status == 1): data = bus.read_i2c_block_data(addr, 0, 3) status = (data[2] & 0b10000000) >> 7 sign = data[0] & 0b00001000 val = ((data[0] & 0b0000111) << 8) | (data[1]) if (sign == 1): val = (val ^ 0x3ff) + 1 # compute 2s complement for 12 bit val # Convert val to a ratiomerical ADC reading return float(val) * 2.048 / float(2047)
[ "def", "get_ADC_value", "(", "bus", ",", "addr", ",", "channel", ")", ":", "if", "channel", "==", "1", ":", "INIT", "=", "0b10000000", "elif", "channel", "==", "2", ":", "INIT", "=", "0b10100000", "elif", "channel", "==", "3", ":", "INIT", "=", "0b11...
This method selects a channel and initiates conversion The ADC operates at 240 SPS (12 bits) with 1x gain One shot conversions are used, meaning a wait period is needed in order to acquire new data. This is done via a constant poll of the ready bit. Upon completion, a voltage value is returned to the caller. Usage - ADC_start(bus, SensorCluster.ADC_addr, channel_to_read) IMPORTANT NOTE: The ADC uses a 2.048V voltage reference
[ "This", "method", "selects", "a", "channel", "and", "initiates", "conversion", "The", "ADC", "operates", "at", "240", "SPS", "(", "12", "bits", ")", "with", "1x", "gain", "One", "shot", "conversions", "are", "used", "meaning", "a", "wait", "period", "is", ...
train
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/i2c_utility.py#L43-L77
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/i2c_utility.py
IO_expander_output
def IO_expander_output(bus, addr, bank, mask): """ Method for controlling the GPIO expander via I2C which accepts a bank - A(0) or B(1) and a mask to push to the pins of the expander. The method also assumes the the expander is operating in sequential mode. If this mode is not used, the register addresses will need to be changed. Usage: GPIO_out(bus, GPIO_addr, 0, 0b00011111) This call would turn on A0 through A4. """ IODIR_map = [0x00, 0x01] output_map = [0x14, 0x15] if (bank != 0) and (bank != 1): print() raise InvalidIOUsage("An invalid IO bank has been selected") IO_direction = IODIR_map[bank] output_reg = output_map[bank] current_status = bus.read_byte_data(addr, output_reg) if current_status == mask: # This means nothing needs to happen print("Current control status matches requested controls. " + "No action is required.") return True bus.write_byte_data(addr, IO_direction, 0) bus.write_byte_data(addr, output_reg, mask)
python
def IO_expander_output(bus, addr, bank, mask): """ Method for controlling the GPIO expander via I2C which accepts a bank - A(0) or B(1) and a mask to push to the pins of the expander. The method also assumes the the expander is operating in sequential mode. If this mode is not used, the register addresses will need to be changed. Usage: GPIO_out(bus, GPIO_addr, 0, 0b00011111) This call would turn on A0 through A4. """ IODIR_map = [0x00, 0x01] output_map = [0x14, 0x15] if (bank != 0) and (bank != 1): print() raise InvalidIOUsage("An invalid IO bank has been selected") IO_direction = IODIR_map[bank] output_reg = output_map[bank] current_status = bus.read_byte_data(addr, output_reg) if current_status == mask: # This means nothing needs to happen print("Current control status matches requested controls. " + "No action is required.") return True bus.write_byte_data(addr, IO_direction, 0) bus.write_byte_data(addr, output_reg, mask)
[ "def", "IO_expander_output", "(", "bus", ",", "addr", ",", "bank", ",", "mask", ")", ":", "IODIR_map", "=", "[", "0x00", ",", "0x01", "]", "output_map", "=", "[", "0x14", ",", "0x15", "]", "if", "(", "bank", "!=", "0", ")", "and", "(", "bank", "!...
Method for controlling the GPIO expander via I2C which accepts a bank - A(0) or B(1) and a mask to push to the pins of the expander. The method also assumes the the expander is operating in sequential mode. If this mode is not used, the register addresses will need to be changed. Usage: GPIO_out(bus, GPIO_addr, 0, 0b00011111) This call would turn on A0 through A4.
[ "Method", "for", "controlling", "the", "GPIO", "expander", "via", "I2C", "which", "accepts", "a", "bank", "-", "A", "(", "0", ")", "or", "B", "(", "1", ")", "and", "a", "mask", "to", "push", "to", "the", "pins", "of", "the", "expander", ".", "The",...
train
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/i2c_utility.py#L80-L114
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/i2c_utility.py
get_IO_reg
def get_IO_reg(bus, addr, bank): """ Method retrieves the register corresponding to respective bank (0 or 1) """ output_map = [0x14, 0x15] if (bank != 0) and (bank != 1): print() raise InvalidIOUsage("An invalid IO bank has been selected") output_reg = output_map[bank] current_status = bus.read_byte_data(addr, output_reg) return current_status
python
def get_IO_reg(bus, addr, bank): """ Method retrieves the register corresponding to respective bank (0 or 1) """ output_map = [0x14, 0x15] if (bank != 0) and (bank != 1): print() raise InvalidIOUsage("An invalid IO bank has been selected") output_reg = output_map[bank] current_status = bus.read_byte_data(addr, output_reg) return current_status
[ "def", "get_IO_reg", "(", "bus", ",", "addr", ",", "bank", ")", ":", "output_map", "=", "[", "0x14", ",", "0x15", "]", "if", "(", "bank", "!=", "0", ")", "and", "(", "bank", "!=", "1", ")", ":", "print", "(", ")", "raise", "InvalidIOUsage", "(", ...
Method retrieves the register corresponding to respective bank (0 or 1)
[ "Method", "retrieves", "the", "register", "corresponding", "to", "respective", "bank", "(", "0", "or", "1", ")" ]
train
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/i2c_utility.py#L116-L127
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/i2c_utility.py
import_i2c_addr
def import_i2c_addr(bus, opt="sensors"): """ import_i2c_addresses will return a list of the currently connected I2C devices. This can be used a means to automatically detect the number of connected sensor modules. Modules are between int(112) and int(119) By default, the method will return a list of sensor addresses. """ i2c_list = [] for device in range(128): try: bus.read_byte(device) i2c_list.append((device)) except IOError: pass if opt == "sensors": sensor_list = [] for module in range(112,120): try: indx = i2c_list.index(module) sensor_list.append(module) except ValueError: pass return sensor_list else: return i2c_list
python
def import_i2c_addr(bus, opt="sensors"): """ import_i2c_addresses will return a list of the currently connected I2C devices. This can be used a means to automatically detect the number of connected sensor modules. Modules are between int(112) and int(119) By default, the method will return a list of sensor addresses. """ i2c_list = [] for device in range(128): try: bus.read_byte(device) i2c_list.append((device)) except IOError: pass if opt == "sensors": sensor_list = [] for module in range(112,120): try: indx = i2c_list.index(module) sensor_list.append(module) except ValueError: pass return sensor_list else: return i2c_list
[ "def", "import_i2c_addr", "(", "bus", ",", "opt", "=", "\"sensors\"", ")", ":", "i2c_list", "=", "[", "]", "for", "device", "in", "range", "(", "128", ")", ":", "try", ":", "bus", ".", "read_byte", "(", "device", ")", "i2c_list", ".", "append", "(", ...
import_i2c_addresses will return a list of the currently connected I2C devices. This can be used a means to automatically detect the number of connected sensor modules. Modules are between int(112) and int(119) By default, the method will return a list of sensor addresses.
[ "import_i2c_addresses", "will", "return", "a", "list", "of", "the", "currently", "connected", "I2C", "devices", ".", "This", "can", "be", "used", "a", "means", "to", "automatically", "detect", "the", "number", "of", "connected", "sensor", "modules", ".", "Modu...
train
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/i2c_utility.py#L129-L160
snipsco/snipsmanagercore
snipsmanagercore/tts.py
GTTS.speak
def speak(self, sentence): """ Speak a sentence using Google TTS. :param sentence: the sentence to speak. """ temp_dir = "/tmp/" filename = "gtts.mp3" file_path = "{}/{}".format(temp_dir, filename) if not os.path.exists(temp_dir): os.makedirs(temp_dir) def delete_file(): try: os.remove(file_path) if not os.listdir(temp_dir): try: os.rmdir(temp_dir) except OSError: pass except: pass if self.logger is not None: self.logger.info("Google TTS: {}".format(sentence)) tts = gTTS(text=sentence, lang=self.locale) tts.save(file_path) AudioPlayer.play_async(file_path, delete_file)
python
def speak(self, sentence): """ Speak a sentence using Google TTS. :param sentence: the sentence to speak. """ temp_dir = "/tmp/" filename = "gtts.mp3" file_path = "{}/{}".format(temp_dir, filename) if not os.path.exists(temp_dir): os.makedirs(temp_dir) def delete_file(): try: os.remove(file_path) if not os.listdir(temp_dir): try: os.rmdir(temp_dir) except OSError: pass except: pass if self.logger is not None: self.logger.info("Google TTS: {}".format(sentence)) tts = gTTS(text=sentence, lang=self.locale) tts.save(file_path) AudioPlayer.play_async(file_path, delete_file)
[ "def", "speak", "(", "self", ",", "sentence", ")", ":", "temp_dir", "=", "\"/tmp/\"", "filename", "=", "\"gtts.mp3\"", "file_path", "=", "\"{}/{}\"", ".", "format", "(", "temp_dir", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "("...
Speak a sentence using Google TTS. :param sentence: the sentence to speak.
[ "Speak", "a", "sentence", "using", "Google", "TTS", ".", ":", "param", "sentence", ":", "the", "sentence", "to", "speak", "." ]
train
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/tts.py#L23-L49
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/sense.py
get_lux_count
def get_lux_count(lux_byte): """ Method to convert data from the TSL2550D lux sensor into more easily usable ADC count values. """ LUX_VALID_MASK = 0b10000000 LUX_CHORD_MASK = 0b01110000 LUX_STEP_MASK = 0b00001111 valid = lux_byte & LUX_VALID_MASK if valid != 0: step_num = (lux_byte & LUX_STEP_MASK) # Shift to normalize value chord_num = (lux_byte & LUX_CHORD_MASK) >> 4 step_val = 2**chord_num chord_val = int(16.5 * (step_val - 1)) count = chord_val + step_val * step_num return count else: raise SensorError("Invalid lux sensor data.")
python
def get_lux_count(lux_byte): """ Method to convert data from the TSL2550D lux sensor into more easily usable ADC count values. """ LUX_VALID_MASK = 0b10000000 LUX_CHORD_MASK = 0b01110000 LUX_STEP_MASK = 0b00001111 valid = lux_byte & LUX_VALID_MASK if valid != 0: step_num = (lux_byte & LUX_STEP_MASK) # Shift to normalize value chord_num = (lux_byte & LUX_CHORD_MASK) >> 4 step_val = 2**chord_num chord_val = int(16.5 * (step_val - 1)) count = chord_val + step_val * step_num return count else: raise SensorError("Invalid lux sensor data.")
[ "def", "get_lux_count", "(", "lux_byte", ")", ":", "LUX_VALID_MASK", "=", "0b10000000", "LUX_CHORD_MASK", "=", "0b01110000", "LUX_STEP_MASK", "=", "0b00001111", "valid", "=", "lux_byte", "&", "LUX_VALID_MASK", "if", "valid", "!=", "0", ":", "step_num", "=", "(",...
Method to convert data from the TSL2550D lux sensor into more easily usable ADC count values.
[ "Method", "to", "convert", "data", "from", "the", "TSL2550D", "lux", "sensor", "into", "more", "easily", "usable", "ADC", "count", "values", "." ]
train
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L282-L300
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/sense.py
SensorCluster.update_lux
def update_lux(self, extend=0): """ Communicates with the TSL2550D light sensor and returns a lux value. Note that this method contains approximately 1 second of total delay. This delay is necessary in order to obtain full resolution compensated lux values. Alternatively, the device could be put in extended mode, which drops some resolution in favor of shorter delays. """ DEVICE_REG_OUT = 0x1d LUX_PWR_ON = 0x03 if extend == 1: LUX_MODE = 0x1d delay = .08 scale = 5 else: LUX_MODE = 0x18 delay = .4 scale = 1 LUX_READ_CH0 = 0x43 LUX_READ_CH1 = 0x83 # Select correct I2C mux channel on TCA module TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan) # Make sure lux sensor is powered up. SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON) lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON) # Check for successful powerup if (lux_on == LUX_PWR_ON): # Send command to initiate ADC on each channel # Read each channel after the new data is ready SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE) SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0) sleep(delay) adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr) count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1) sleep(delay) adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr) count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode ratio = count1 / (count0 - count1) lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2)) self.light_ratio = float(count1)/float(count0) print("Light ratio Ch1/Ch0: ", self.light_ratio) self.lux = round(lux, 3) return TCA_select(SensorCluster.bus, self.mux_addr, "off") else: raise SensorError("The lux sensor is powered down.")
python
def update_lux(self, extend=0): """ Communicates with the TSL2550D light sensor and returns a lux value. Note that this method contains approximately 1 second of total delay. This delay is necessary in order to obtain full resolution compensated lux values. Alternatively, the device could be put in extended mode, which drops some resolution in favor of shorter delays. """ DEVICE_REG_OUT = 0x1d LUX_PWR_ON = 0x03 if extend == 1: LUX_MODE = 0x1d delay = .08 scale = 5 else: LUX_MODE = 0x18 delay = .4 scale = 1 LUX_READ_CH0 = 0x43 LUX_READ_CH1 = 0x83 # Select correct I2C mux channel on TCA module TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan) # Make sure lux sensor is powered up. SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON) lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON) # Check for successful powerup if (lux_on == LUX_PWR_ON): # Send command to initiate ADC on each channel # Read each channel after the new data is ready SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE) SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0) sleep(delay) adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr) count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1) sleep(delay) adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr) count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode ratio = count1 / (count0 - count1) lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2)) self.light_ratio = float(count1)/float(count0) print("Light ratio Ch1/Ch0: ", self.light_ratio) self.lux = round(lux, 3) return TCA_select(SensorCluster.bus, self.mux_addr, "off") else: raise SensorError("The lux sensor is powered down.")
[ "def", "update_lux", "(", "self", ",", "extend", "=", "0", ")", ":", "DEVICE_REG_OUT", "=", "0x1d", "LUX_PWR_ON", "=", "0x03", "if", "extend", "==", "1", ":", "LUX_MODE", "=", "0x1d", "delay", "=", ".08", "scale", "=", "5", "else", ":", "LUX_MODE", "...
Communicates with the TSL2550D light sensor and returns a lux value. Note that this method contains approximately 1 second of total delay. This delay is necessary in order to obtain full resolution compensated lux values. Alternatively, the device could be put in extended mode, which drops some resolution in favor of shorter delays.
[ "Communicates", "with", "the", "TSL2550D", "light", "sensor", "and", "returns", "a", "lux", "value", ".", "Note", "that", "this", "method", "contains", "approximately", "1", "second", "of", "total", "delay", ".", "This", "delay", "is", "necessary", "in", "or...
train
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L64-L115
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/sense.py
SensorCluster.update_humidity_temp
def update_humidity_temp(self): """ This method utilizes the HIH7xxx sensor to read humidity and temperature in one call. """ # Create mask for STATUS (first two bits of 64 bit wide result) STATUS = 0b11 << 6 TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.humidity_chan) SensorCluster.bus.write_quick(SensorCluster.humidity_addr) # Begin conversion sleep(.25) # wait 100ms to make sure the conversion takes place. data = SensorCluster.bus.read_i2c_block_data(SensorCluster.humidity_addr, 0, 4) status = (data[0] & STATUS) >> 6 if status == 0 or status == 1: # will always pass for now. humidity = round((((data[0] & 0x3f) << 8) | data[1]) * 100.0 / (2**14 - 2), 3) self.humidity = humidity self.temp = (round((((data[2] << 6) + ((data[3] & 0xfc) >> 2)) * 165.0 / 16382.0 - 40.0), 3) * 9/5) + 32 return TCA_select(SensorCluster.bus, self.mux_addr, "off") else: raise I2CBusError("Unable to retrieve humidity")
python
def update_humidity_temp(self): """ This method utilizes the HIH7xxx sensor to read humidity and temperature in one call. """ # Create mask for STATUS (first two bits of 64 bit wide result) STATUS = 0b11 << 6 TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.humidity_chan) SensorCluster.bus.write_quick(SensorCluster.humidity_addr) # Begin conversion sleep(.25) # wait 100ms to make sure the conversion takes place. data = SensorCluster.bus.read_i2c_block_data(SensorCluster.humidity_addr, 0, 4) status = (data[0] & STATUS) >> 6 if status == 0 or status == 1: # will always pass for now. humidity = round((((data[0] & 0x3f) << 8) | data[1]) * 100.0 / (2**14 - 2), 3) self.humidity = humidity self.temp = (round((((data[2] << 6) + ((data[3] & 0xfc) >> 2)) * 165.0 / 16382.0 - 40.0), 3) * 9/5) + 32 return TCA_select(SensorCluster.bus, self.mux_addr, "off") else: raise I2CBusError("Unable to retrieve humidity")
[ "def", "update_humidity_temp", "(", "self", ")", ":", "# Create mask for STATUS (first two bits of 64 bit wide result)\r", "STATUS", "=", "0b11", "<<", "6", "TCA_select", "(", "SensorCluster", ".", "bus", ",", "self", ".", "mux_addr", ",", "SensorCluster", ".", "humid...
This method utilizes the HIH7xxx sensor to read humidity and temperature in one call.
[ "This", "method", "utilizes", "the", "HIH7xxx", "sensor", "to", "read", "humidity", "and", "temperature", "in", "one", "call", "." ]
train
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L117-L139
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/sense.py
SensorCluster.update_soil_moisture
def update_soil_moisture(self): """ Method will select the ADC module, turn on the analog sensor, wait for voltage settle, and then digitize the sensor voltage. Voltage division/signal loss is accounted for by scaling up the sensor output. This may need to be adjusted if a different sensor is used """ SensorCluster.analog_sensor_power(SensorCluster.bus, "on") # turn on sensor sleep(.2) TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.adc_chan) moisture = get_ADC_value( SensorCluster.bus, SensorCluster.adc_addr, SensorCluster.moisture_chan) status = TCA_select(SensorCluster.bus, self.mux_addr, "off") # Turn off mux. SensorCluster.analog_sensor_power(SensorCluster.bus, "off") # turn off sensor if (moisture >= 0): soil_moisture = moisture/2.048 # Scale to a percentage value self.soil_moisture = round(soil_moisture,3) else: raise SensorError( "The soil moisture meter is not configured correctly.") return status
python
def update_soil_moisture(self): """ Method will select the ADC module, turn on the analog sensor, wait for voltage settle, and then digitize the sensor voltage. Voltage division/signal loss is accounted for by scaling up the sensor output. This may need to be adjusted if a different sensor is used """ SensorCluster.analog_sensor_power(SensorCluster.bus, "on") # turn on sensor sleep(.2) TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.adc_chan) moisture = get_ADC_value( SensorCluster.bus, SensorCluster.adc_addr, SensorCluster.moisture_chan) status = TCA_select(SensorCluster.bus, self.mux_addr, "off") # Turn off mux. SensorCluster.analog_sensor_power(SensorCluster.bus, "off") # turn off sensor if (moisture >= 0): soil_moisture = moisture/2.048 # Scale to a percentage value self.soil_moisture = round(soil_moisture,3) else: raise SensorError( "The soil moisture meter is not configured correctly.") return status
[ "def", "update_soil_moisture", "(", "self", ")", ":", "SensorCluster", ".", "analog_sensor_power", "(", "SensorCluster", ".", "bus", ",", "\"on\"", ")", "# turn on sensor\r", "sleep", "(", ".2", ")", "TCA_select", "(", "SensorCluster", ".", "bus", ",", "self", ...
Method will select the ADC module, turn on the analog sensor, wait for voltage settle, and then digitize the sensor voltage. Voltage division/signal loss is accounted for by scaling up the sensor output. This may need to be adjusted if a different sensor is used
[ "Method", "will", "select", "the", "ADC", "module", "turn", "on", "the", "analog", "sensor", "wait", "for", "voltage", "settle", "and", "then", "digitize", "the", "sensor", "voltage", ".", "Voltage", "division", "/", "signal", "loss", "is", "accounted", "for...
train
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L141-L162
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/sense.py
SensorCluster.update_instance_sensors
def update_instance_sensors(self, opt=None): """ Method runs through all sensor modules and updates to the latest sensor values. After running through each sensor module, The sensor head (the I2C multiplexer), is disabled in order to avoid address conflicts. Usage: plant_sensor_object.updateAllSensors(bus_object) """ self.update_count += 1 self.update_lux() self.update_humidity_temp() if opt == "all": try: self.update_soil_moisture() except SensorError: # This could be handled with a repeat request later. pass self.timestamp = time() # disable sensor module tca_status = TCA_select(SensorCluster.bus, self.mux_addr, "off") if tca_status != 0: raise I2CBusError( "Bus multiplexer was unable to switch off to prevent conflicts")
python
def update_instance_sensors(self, opt=None): """ Method runs through all sensor modules and updates to the latest sensor values. After running through each sensor module, The sensor head (the I2C multiplexer), is disabled in order to avoid address conflicts. Usage: plant_sensor_object.updateAllSensors(bus_object) """ self.update_count += 1 self.update_lux() self.update_humidity_temp() if opt == "all": try: self.update_soil_moisture() except SensorError: # This could be handled with a repeat request later. pass self.timestamp = time() # disable sensor module tca_status = TCA_select(SensorCluster.bus, self.mux_addr, "off") if tca_status != 0: raise I2CBusError( "Bus multiplexer was unable to switch off to prevent conflicts")
[ "def", "update_instance_sensors", "(", "self", ",", "opt", "=", "None", ")", ":", "self", ".", "update_count", "+=", "1", "self", ".", "update_lux", "(", ")", "self", ".", "update_humidity_temp", "(", ")", "if", "opt", "==", "\"all\"", ":", "try", ":", ...
Method runs through all sensor modules and updates to the latest sensor values. After running through each sensor module, The sensor head (the I2C multiplexer), is disabled in order to avoid address conflicts. Usage: plant_sensor_object.updateAllSensors(bus_object)
[ "Method", "runs", "through", "all", "sensor", "modules", "and", "updates", "to", "the", "latest", "sensor", "values", ".", "After", "running", "through", "each", "sensor", "module", "The", "sensor", "head", "(", "the", "I2C", "multiplexer", ")", "is", "disab...
train
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L164-L189
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/sense.py
SensorCluster.sensor_values
def sensor_values(self): """ Returns the values of all sensors for this cluster """ self.update_instance_sensors(opt="all") return { "light": self.lux, "water": self.soil_moisture, "humidity": self.humidity, "temperature": self.temp }
python
def sensor_values(self): """ Returns the values of all sensors for this cluster """ self.update_instance_sensors(opt="all") return { "light": self.lux, "water": self.soil_moisture, "humidity": self.humidity, "temperature": self.temp }
[ "def", "sensor_values", "(", "self", ")", ":", "self", ".", "update_instance_sensors", "(", "opt", "=", "\"all\"", ")", "return", "{", "\"light\"", ":", "self", ".", "lux", ",", "\"water\"", ":", "self", ".", "soil_moisture", ",", "\"humidity\"", ":", "sel...
Returns the values of all sensors for this cluster
[ "Returns", "the", "values", "of", "all", "sensors", "for", "this", "cluster" ]
train
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L191-L201
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/sense.py
SensorCluster.analog_sensor_power
def analog_sensor_power(cls, bus, operation): """ Method that turns on all of the analog sensor modules Includes all attached soil moisture sensors Note that all of the SensorCluster object should be attached in parallel and only 1 GPIO pin is available to toggle analog sensor power. The sensor power should be left on for at least 100ms in order to allow the sensors to stabilize before reading. Usage: SensorCluster.analog_sensor_power(bus,"high") OR SensorCluster.analog_sensor_power(bus,"low") This method should be removed if an off-board GPIO extender is used. """ # Set appropriate analog sensor power bit in GPIO mask # using the ControlCluster bank_mask to avoid overwriting any data reg_data = get_IO_reg(bus, 0x20, cls.power_bank) if operation == "on": reg_data = reg_data | 1 << cls.analog_power_pin elif operation == "off": reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin)) else: raise SensorError( "Invalid command used while enabling analog sensors") # Send updated IO mask to output IO_expander_output(bus, 0x20, cls.power_bank, reg_data)
python
def analog_sensor_power(cls, bus, operation): """ Method that turns on all of the analog sensor modules Includes all attached soil moisture sensors Note that all of the SensorCluster object should be attached in parallel and only 1 GPIO pin is available to toggle analog sensor power. The sensor power should be left on for at least 100ms in order to allow the sensors to stabilize before reading. Usage: SensorCluster.analog_sensor_power(bus,"high") OR SensorCluster.analog_sensor_power(bus,"low") This method should be removed if an off-board GPIO extender is used. """ # Set appropriate analog sensor power bit in GPIO mask # using the ControlCluster bank_mask to avoid overwriting any data reg_data = get_IO_reg(bus, 0x20, cls.power_bank) if operation == "on": reg_data = reg_data | 1 << cls.analog_power_pin elif operation == "off": reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin)) else: raise SensorError( "Invalid command used while enabling analog sensors") # Send updated IO mask to output IO_expander_output(bus, 0x20, cls.power_bank, reg_data)
[ "def", "analog_sensor_power", "(", "cls", ",", "bus", ",", "operation", ")", ":", "# Set appropriate analog sensor power bit in GPIO mask\r", "# using the ControlCluster bank_mask to avoid overwriting any data\r", "reg_data", "=", "get_IO_reg", "(", "bus", ",", "0x20", ",", "...
Method that turns on all of the analog sensor modules Includes all attached soil moisture sensors Note that all of the SensorCluster object should be attached in parallel and only 1 GPIO pin is available to toggle analog sensor power. The sensor power should be left on for at least 100ms in order to allow the sensors to stabilize before reading. Usage: SensorCluster.analog_sensor_power(bus,"high") OR SensorCluster.analog_sensor_power(bus,"low") This method should be removed if an off-board GPIO extender is used.
[ "Method", "that", "turns", "on", "all", "of", "the", "analog", "sensor", "modules", "Includes", "all", "attached", "soil", "moisture", "sensors", "Note", "that", "all", "of", "the", "SensorCluster", "object", "should", "be", "attached", "in", "parallel", "and"...
train
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L220-L244
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/sense.py
SensorCluster.get_water_level
def get_water_level(cls): """ This method uses the ADC on the control module to measure the current water tank level and returns the water volume remaining in the tank. For this method, it is assumed that a simple voltage divider is used to interface the sensor to the ADC module. Testing shows that the sensor response is not completely linear, though it is quite close. To make the results more accurate, a mapping method approximated by a linear fit to data is used. """ # ---------- # These values should be updated based on the real system parameters vref = 4.95 tank_height = 17.5 # in centimeters (height of container) rref = 2668 # Reference resistor # ---------- val = 0 for i in range(5): # Take five readings and do an average # Fetch value from ADC (0x69 - ch1) val = get_ADC_value(cls.bus, 0x6c, 1) + val avg = val / 5 water_sensor_res = rref * avg/(vref - avg) depth_cm = water_sensor_res * \ (-.0163) + 28.127 # measured transfer adjusted offset if depth_cm < 1.0: # Below 1cm, the values should not be trusted. depth_cm = 0 cls.water_remaining = depth_cm / tank_height # Return the current depth in case the user is interested in # that parameter alone. (IE for automatic shut-off) return depth_cm/tank_height
python
def get_water_level(cls): """ This method uses the ADC on the control module to measure the current water tank level and returns the water volume remaining in the tank. For this method, it is assumed that a simple voltage divider is used to interface the sensor to the ADC module. Testing shows that the sensor response is not completely linear, though it is quite close. To make the results more accurate, a mapping method approximated by a linear fit to data is used. """ # ---------- # These values should be updated based on the real system parameters vref = 4.95 tank_height = 17.5 # in centimeters (height of container) rref = 2668 # Reference resistor # ---------- val = 0 for i in range(5): # Take five readings and do an average # Fetch value from ADC (0x69 - ch1) val = get_ADC_value(cls.bus, 0x6c, 1) + val avg = val / 5 water_sensor_res = rref * avg/(vref - avg) depth_cm = water_sensor_res * \ (-.0163) + 28.127 # measured transfer adjusted offset if depth_cm < 1.0: # Below 1cm, the values should not be trusted. depth_cm = 0 cls.water_remaining = depth_cm / tank_height # Return the current depth in case the user is interested in # that parameter alone. (IE for automatic shut-off) return depth_cm/tank_height
[ "def", "get_water_level", "(", "cls", ")", ":", "# ----------\r", "# These values should be updated based on the real system parameters\r", "vref", "=", "4.95", "tank_height", "=", "17.5", "# in centimeters (height of container)\r", "rref", "=", "2668", "# Reference resistor\r", ...
This method uses the ADC on the control module to measure the current water tank level and returns the water volume remaining in the tank. For this method, it is assumed that a simple voltage divider is used to interface the sensor to the ADC module. Testing shows that the sensor response is not completely linear, though it is quite close. To make the results more accurate, a mapping method approximated by a linear fit to data is used.
[ "This", "method", "uses", "the", "ADC", "on", "the", "control", "module", "to", "measure", "the", "current", "water", "tank", "level", "and", "returns", "the", "water", "volume", "remaining", "in", "the", "tank", ".", "For", "this", "method", "it", "is", ...
train
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L247-L279
ClimateImpactLab/DataFS
datafs/core/versions.py
BumpableVersion.bump
def bump(self, kind=None, prerelease=None, inplace=True): ''' Increment the version and/or pre-release value Parameters ---------- kind : str Increment the version. Can be ``major``, ``minor``, or ``patch``, corresponding to the three segments of the version number (in order). A value of ``None`` will not increment the version number (default). prerelease : str Increment the version's pre-release value. Can be ``alpha`` or ``beta``. A prerelease value of ``None`` will remove a pre-release value if it exists (default). inplace : bool If false, returns a new ``BumpableVersion`` instance. If ``True`` (default), bumps the version in place. Examples -------- The ``kind`` argument increments the version: .. code-block:: python >>> v = BumpableVersion('1.0.1') >>> v.bump('patch') >>> v BumpableVersion ('1.0.2') >>> >>> v.bump('minor') >>> v BumpableVersion ('1.1') >>> >>> v.bump('minor') >>> v BumpableVersion ('1.2') >>> >>> v.bump('major') >>> v BumpableVersion ('2.0') >>> >>> v.bump('release') # doctest: +ELLIPSIS Traceback (most recent call last): ValueError: Bump kind "release" not understood The prerelease argument increments the pre-release value. If ``kind`` is not supplied simultaneously the version is bumped with a patch before entering pre-release: .. code-block:: python >>> v = BumpableVersion('1.0.0') >>> v.bump(prerelease='alpha') >>> v BumpableVersion ('1.0.1a1') >>> >>> v.bump(prerelease='alpha') >>> v BumpableVersion ('1.0.1a2') >>> >>> v.bump(prerelease='beta') >>> v BumpableVersion ('1.0.1b1') >>> >>> v.bump('minor') >>> v BumpableVersion ('1.1') >>> >>> v.bump('minor', prerelease='beta') >>> v BumpableVersion ('1.2b1') >>> >>> v.bump(prerelease='beta') >>> v BumpableVersion ('1.2b2') >>> >>> v.bump('minor') >>> v BumpableVersion ('1.2') >>> >>> v.bump('minor', prerelease='beta') >>> v BumpableVersion ('1.3b1') >>> >>> v.bump('major', prerelease='alpha') >>> v BumpableVersion ('2.0a1') >>> >>> v.bump('major') >>> v BumpableVersion ('3.0') >>> >>> v.bump('patch', prerelease='beta') >>> v BumpableVersion ('3.0.1b1') >>> >>> v.bump('patch') >>> v BumpableVersion ('3.0.1') >>> >>> v.bump(prerelease='gamma') # doctest: +ELLIPSIS Traceback (most recent call last): ValueError: Prerelease type "gamma" not understood Releases cannot move from beta to alpha without a new major/minor/patch bump: .. code-block:: python >>> v = BumpableVersion('0.2b1') >>> v.bump(prerelease='alpha') # doctest: \ +ELLIPSIS +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Cannot bump version "0.2b1" to prerelease stage \ "alpha" - version already in beta >>> v.bump('minor') >>> v BumpableVersion ('0.2') >>> Versions can return a new version or can be bumped in-place (default): .. code-block:: python >>> v = BumpableVersion('0.2') >>> v.bump('minor', inplace=False) BumpableVersion ('0.3') >>> v BumpableVersion ('0.2') ''' if kind is not None: # if already in pre-release and we want to move to pre-release, # increment version + prerelease if self.prerelease and prerelease: new_prerelease = self._increment_prerelease(None, prerelease) new_version = self._increment_version(self.version, kind) # if already in pre-release and we want to exit pre-release, # remove prerelease elif self.prerelease: new_prerelease = None if self.version[2] == 0: if kind == 'minor': new_version = self.version else: new_version = self._increment_version( self.version, kind) else: if kind == 'patch': new_version = self.version else: new_version = self._increment_version( self.version, kind) else: new_prerelease = self._increment_prerelease(None, prerelease) new_version = self._increment_version(self.version, kind) elif prerelease is not None: if self.prerelease: new_prerelease = self._increment_prerelease( self.prerelease, prerelease) new_version = self.version else: new_prerelease = self._increment_prerelease(None, prerelease) new_version = self._increment_version(self.version, 'patch') else: # default is bump patch new_prerelease = None new_version = self._increment_version(self.version, 'patch') if inplace: self.version = new_version self.prerelease = new_prerelease else: new = BumpableVersion() new.version = new_version new.prerelease = new_prerelease return new
python
def bump(self, kind=None, prerelease=None, inplace=True): ''' Increment the version and/or pre-release value Parameters ---------- kind : str Increment the version. Can be ``major``, ``minor``, or ``patch``, corresponding to the three segments of the version number (in order). A value of ``None`` will not increment the version number (default). prerelease : str Increment the version's pre-release value. Can be ``alpha`` or ``beta``. A prerelease value of ``None`` will remove a pre-release value if it exists (default). inplace : bool If false, returns a new ``BumpableVersion`` instance. If ``True`` (default), bumps the version in place. Examples -------- The ``kind`` argument increments the version: .. code-block:: python >>> v = BumpableVersion('1.0.1') >>> v.bump('patch') >>> v BumpableVersion ('1.0.2') >>> >>> v.bump('minor') >>> v BumpableVersion ('1.1') >>> >>> v.bump('minor') >>> v BumpableVersion ('1.2') >>> >>> v.bump('major') >>> v BumpableVersion ('2.0') >>> >>> v.bump('release') # doctest: +ELLIPSIS Traceback (most recent call last): ValueError: Bump kind "release" not understood The prerelease argument increments the pre-release value. If ``kind`` is not supplied simultaneously the version is bumped with a patch before entering pre-release: .. code-block:: python >>> v = BumpableVersion('1.0.0') >>> v.bump(prerelease='alpha') >>> v BumpableVersion ('1.0.1a1') >>> >>> v.bump(prerelease='alpha') >>> v BumpableVersion ('1.0.1a2') >>> >>> v.bump(prerelease='beta') >>> v BumpableVersion ('1.0.1b1') >>> >>> v.bump('minor') >>> v BumpableVersion ('1.1') >>> >>> v.bump('minor', prerelease='beta') >>> v BumpableVersion ('1.2b1') >>> >>> v.bump(prerelease='beta') >>> v BumpableVersion ('1.2b2') >>> >>> v.bump('minor') >>> v BumpableVersion ('1.2') >>> >>> v.bump('minor', prerelease='beta') >>> v BumpableVersion ('1.3b1') >>> >>> v.bump('major', prerelease='alpha') >>> v BumpableVersion ('2.0a1') >>> >>> v.bump('major') >>> v BumpableVersion ('3.0') >>> >>> v.bump('patch', prerelease='beta') >>> v BumpableVersion ('3.0.1b1') >>> >>> v.bump('patch') >>> v BumpableVersion ('3.0.1') >>> >>> v.bump(prerelease='gamma') # doctest: +ELLIPSIS Traceback (most recent call last): ValueError: Prerelease type "gamma" not understood Releases cannot move from beta to alpha without a new major/minor/patch bump: .. code-block:: python >>> v = BumpableVersion('0.2b1') >>> v.bump(prerelease='alpha') # doctest: \ +ELLIPSIS +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Cannot bump version "0.2b1" to prerelease stage \ "alpha" - version already in beta >>> v.bump('minor') >>> v BumpableVersion ('0.2') >>> Versions can return a new version or can be bumped in-place (default): .. code-block:: python >>> v = BumpableVersion('0.2') >>> v.bump('minor', inplace=False) BumpableVersion ('0.3') >>> v BumpableVersion ('0.2') ''' if kind is not None: # if already in pre-release and we want to move to pre-release, # increment version + prerelease if self.prerelease and prerelease: new_prerelease = self._increment_prerelease(None, prerelease) new_version = self._increment_version(self.version, kind) # if already in pre-release and we want to exit pre-release, # remove prerelease elif self.prerelease: new_prerelease = None if self.version[2] == 0: if kind == 'minor': new_version = self.version else: new_version = self._increment_version( self.version, kind) else: if kind == 'patch': new_version = self.version else: new_version = self._increment_version( self.version, kind) else: new_prerelease = self._increment_prerelease(None, prerelease) new_version = self._increment_version(self.version, kind) elif prerelease is not None: if self.prerelease: new_prerelease = self._increment_prerelease( self.prerelease, prerelease) new_version = self.version else: new_prerelease = self._increment_prerelease(None, prerelease) new_version = self._increment_version(self.version, 'patch') else: # default is bump patch new_prerelease = None new_version = self._increment_version(self.version, 'patch') if inplace: self.version = new_version self.prerelease = new_prerelease else: new = BumpableVersion() new.version = new_version new.prerelease = new_prerelease return new
[ "def", "bump", "(", "self", ",", "kind", "=", "None", ",", "prerelease", "=", "None", ",", "inplace", "=", "True", ")", ":", "if", "kind", "is", "not", "None", ":", "# if already in pre-release and we want to move to pre-release,", "# increment version + prerelease"...
Increment the version and/or pre-release value Parameters ---------- kind : str Increment the version. Can be ``major``, ``minor``, or ``patch``, corresponding to the three segments of the version number (in order). A value of ``None`` will not increment the version number (default). prerelease : str Increment the version's pre-release value. Can be ``alpha`` or ``beta``. A prerelease value of ``None`` will remove a pre-release value if it exists (default). inplace : bool If false, returns a new ``BumpableVersion`` instance. If ``True`` (default), bumps the version in place. Examples -------- The ``kind`` argument increments the version: .. code-block:: python >>> v = BumpableVersion('1.0.1') >>> v.bump('patch') >>> v BumpableVersion ('1.0.2') >>> >>> v.bump('minor') >>> v BumpableVersion ('1.1') >>> >>> v.bump('minor') >>> v BumpableVersion ('1.2') >>> >>> v.bump('major') >>> v BumpableVersion ('2.0') >>> >>> v.bump('release') # doctest: +ELLIPSIS Traceback (most recent call last): ValueError: Bump kind "release" not understood The prerelease argument increments the pre-release value. If ``kind`` is not supplied simultaneously the version is bumped with a patch before entering pre-release: .. code-block:: python >>> v = BumpableVersion('1.0.0') >>> v.bump(prerelease='alpha') >>> v BumpableVersion ('1.0.1a1') >>> >>> v.bump(prerelease='alpha') >>> v BumpableVersion ('1.0.1a2') >>> >>> v.bump(prerelease='beta') >>> v BumpableVersion ('1.0.1b1') >>> >>> v.bump('minor') >>> v BumpableVersion ('1.1') >>> >>> v.bump('minor', prerelease='beta') >>> v BumpableVersion ('1.2b1') >>> >>> v.bump(prerelease='beta') >>> v BumpableVersion ('1.2b2') >>> >>> v.bump('minor') >>> v BumpableVersion ('1.2') >>> >>> v.bump('minor', prerelease='beta') >>> v BumpableVersion ('1.3b1') >>> >>> v.bump('major', prerelease='alpha') >>> v BumpableVersion ('2.0a1') >>> >>> v.bump('major') >>> v BumpableVersion ('3.0') >>> >>> v.bump('patch', prerelease='beta') >>> v BumpableVersion ('3.0.1b1') >>> >>> v.bump('patch') >>> v BumpableVersion ('3.0.1') >>> >>> v.bump(prerelease='gamma') # doctest: +ELLIPSIS Traceback (most recent call last): ValueError: Prerelease type "gamma" not understood Releases cannot move from beta to alpha without a new major/minor/patch bump: .. code-block:: python >>> v = BumpableVersion('0.2b1') >>> v.bump(prerelease='alpha') # doctest: \ +ELLIPSIS +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Cannot bump version "0.2b1" to prerelease stage \ "alpha" - version already in beta >>> v.bump('minor') >>> v BumpableVersion ('0.2') >>> Versions can return a new version or can be bumped in-place (default): .. code-block:: python >>> v = BumpableVersion('0.2') >>> v.bump('minor', inplace=False) BumpableVersion ('0.3') >>> v BumpableVersion ('0.2')
[ "Increment", "the", "version", "and", "/", "or", "pre", "-", "release", "value" ]
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/versions.py#L65-L259
dominicrodger/django-tinycontent
tinycontent/utils/importer.py
_imported_symbol
def _imported_symbol(import_path): """Resolve a dotted path into a symbol, and return that. For example... >>> _imported_symbol('django.db.models.Model') <class 'django.db.models.base.Model'> Raise ImportError if there's no such module, AttributeError if no such symbol. """ module_name, symbol_name = import_path.rsplit('.', 1) module = import_module(module_name) return getattr(module, symbol_name)
python
def _imported_symbol(import_path): """Resolve a dotted path into a symbol, and return that. For example... >>> _imported_symbol('django.db.models.Model') <class 'django.db.models.base.Model'> Raise ImportError if there's no such module, AttributeError if no such symbol. """ module_name, symbol_name = import_path.rsplit('.', 1) module = import_module(module_name) return getattr(module, symbol_name)
[ "def", "_imported_symbol", "(", "import_path", ")", ":", "module_name", ",", "symbol_name", "=", "import_path", ".", "rsplit", "(", "'.'", ",", "1", ")", "module", "=", "import_module", "(", "module_name", ")", "return", "getattr", "(", "module", ",", "symbo...
Resolve a dotted path into a symbol, and return that. For example... >>> _imported_symbol('django.db.models.Model') <class 'django.db.models.base.Model'> Raise ImportError if there's no such module, AttributeError if no such symbol.
[ "Resolve", "a", "dotted", "path", "into", "a", "symbol", "and", "return", "that", "." ]
train
https://github.com/dominicrodger/django-tinycontent/blob/2ecfce0bcc2849b97eedb8b21f33bfc8ff7b1659/tinycontent/utils/importer.py#L5-L19
timstaley/voevent-parse
src/voeventparse/misc.py
Param
def Param(name, value=None, unit=None, ucd=None, dataType=None, utype=None, ac=True): """ 'Parameter', used as a general purpose key-value entry in the 'What' section. May be assembled into a :class:`Group`. NB ``name`` is not mandated by schema, but *is* mandated in full spec. Args: value(str): String representing parameter value. Or, if ``ac`` is true, then 'autoconversion' is attempted, in which case ``value`` can also be an instance of one of the following: * :py:obj:`bool` * :py:obj:`int` * :py:obj:`float` * :py:class:`datetime.datetime` This allows you to create Params without littering your code with string casts, or worrying if the passed value is a float or a string, etc. NB the value is always *stored* as a string representation, as per VO spec. unit(str): Units of value. See :class:`.definitions.units` ucd(str): `unified content descriptor <http://arxiv.org/abs/1110.0525>`_. For a list of valid UCDs, see: http://vocabularies.referata.com/wiki/Category:IVOA_UCD. dataType(str): Denotes type of ``value``; restricted to 3 options: ``string`` (default), ``int`` , or ``float``. (NB *not* to be confused with standard XML Datatypes, which have many more possible values.) utype(str): See http://wiki.ivoa.net/twiki/bin/view/IVOA/Utypes ac(bool): Attempt automatic conversion of passed ``value`` to string, and set ``dataType`` accordingly (only attempted if ``dataType`` is the default, i.e. ``None``). (NB only supports types listed in _datatypes_autoconversion dict) """ # We use locals() to allow concise looping over the arguments. atts = locals() atts.pop('ac') temp_dict = {} temp_dict.update(atts) for k in temp_dict.keys(): if atts[k] is None: del atts[k] if (ac and value is not None and (not isinstance(value, string_types)) and dataType is None ): if type(value) in _datatypes_autoconversion: datatype, func = _datatypes_autoconversion[type(value)] atts['dataType'] = datatype atts['value'] = func(value) return objectify.Element('Param', attrib=atts)
python
def Param(name, value=None, unit=None, ucd=None, dataType=None, utype=None, ac=True): """ 'Parameter', used as a general purpose key-value entry in the 'What' section. May be assembled into a :class:`Group`. NB ``name`` is not mandated by schema, but *is* mandated in full spec. Args: value(str): String representing parameter value. Or, if ``ac`` is true, then 'autoconversion' is attempted, in which case ``value`` can also be an instance of one of the following: * :py:obj:`bool` * :py:obj:`int` * :py:obj:`float` * :py:class:`datetime.datetime` This allows you to create Params without littering your code with string casts, or worrying if the passed value is a float or a string, etc. NB the value is always *stored* as a string representation, as per VO spec. unit(str): Units of value. See :class:`.definitions.units` ucd(str): `unified content descriptor <http://arxiv.org/abs/1110.0525>`_. For a list of valid UCDs, see: http://vocabularies.referata.com/wiki/Category:IVOA_UCD. dataType(str): Denotes type of ``value``; restricted to 3 options: ``string`` (default), ``int`` , or ``float``. (NB *not* to be confused with standard XML Datatypes, which have many more possible values.) utype(str): See http://wiki.ivoa.net/twiki/bin/view/IVOA/Utypes ac(bool): Attempt automatic conversion of passed ``value`` to string, and set ``dataType`` accordingly (only attempted if ``dataType`` is the default, i.e. ``None``). (NB only supports types listed in _datatypes_autoconversion dict) """ # We use locals() to allow concise looping over the arguments. atts = locals() atts.pop('ac') temp_dict = {} temp_dict.update(atts) for k in temp_dict.keys(): if atts[k] is None: del atts[k] if (ac and value is not None and (not isinstance(value, string_types)) and dataType is None ): if type(value) in _datatypes_autoconversion: datatype, func = _datatypes_autoconversion[type(value)] atts['dataType'] = datatype atts['value'] = func(value) return objectify.Element('Param', attrib=atts)
[ "def", "Param", "(", "name", ",", "value", "=", "None", ",", "unit", "=", "None", ",", "ucd", "=", "None", ",", "dataType", "=", "None", ",", "utype", "=", "None", ",", "ac", "=", "True", ")", ":", "# We use locals() to allow concise looping over the argum...
'Parameter', used as a general purpose key-value entry in the 'What' section. May be assembled into a :class:`Group`. NB ``name`` is not mandated by schema, but *is* mandated in full spec. Args: value(str): String representing parameter value. Or, if ``ac`` is true, then 'autoconversion' is attempted, in which case ``value`` can also be an instance of one of the following: * :py:obj:`bool` * :py:obj:`int` * :py:obj:`float` * :py:class:`datetime.datetime` This allows you to create Params without littering your code with string casts, or worrying if the passed value is a float or a string, etc. NB the value is always *stored* as a string representation, as per VO spec. unit(str): Units of value. See :class:`.definitions.units` ucd(str): `unified content descriptor <http://arxiv.org/abs/1110.0525>`_. For a list of valid UCDs, see: http://vocabularies.referata.com/wiki/Category:IVOA_UCD. dataType(str): Denotes type of ``value``; restricted to 3 options: ``string`` (default), ``int`` , or ``float``. (NB *not* to be confused with standard XML Datatypes, which have many more possible values.) utype(str): See http://wiki.ivoa.net/twiki/bin/view/IVOA/Utypes ac(bool): Attempt automatic conversion of passed ``value`` to string, and set ``dataType`` accordingly (only attempted if ``dataType`` is the default, i.e. ``None``). (NB only supports types listed in _datatypes_autoconversion dict)
[ "Parameter", "used", "as", "a", "general", "purpose", "key", "-", "value", "entry", "in", "the", "What", "section", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/misc.py#L36-L92
timstaley/voevent-parse
src/voeventparse/misc.py
Group
def Group(params, name=None, type=None): """Groups together Params for adding under the 'What' section. Args: params(list of :func:`Param`): Parameter elements to go in this group. name(str): Group name. NB ``None`` is valid, since the group may be best identified by its type. type(str): Type of group, e.g. 'complex' (for real and imaginary). """ atts = {} if name: atts['name'] = name if type: atts['type'] = type g = objectify.Element('Group', attrib=atts) for p in params: g.append(p) return g
python
def Group(params, name=None, type=None): """Groups together Params for adding under the 'What' section. Args: params(list of :func:`Param`): Parameter elements to go in this group. name(str): Group name. NB ``None`` is valid, since the group may be best identified by its type. type(str): Type of group, e.g. 'complex' (for real and imaginary). """ atts = {} if name: atts['name'] = name if type: atts['type'] = type g = objectify.Element('Group', attrib=atts) for p in params: g.append(p) return g
[ "def", "Group", "(", "params", ",", "name", "=", "None", ",", "type", "=", "None", ")", ":", "atts", "=", "{", "}", "if", "name", ":", "atts", "[", "'name'", "]", "=", "name", "if", "type", ":", "atts", "[", "'type'", "]", "=", "type", "g", "...
Groups together Params for adding under the 'What' section. Args: params(list of :func:`Param`): Parameter elements to go in this group. name(str): Group name. NB ``None`` is valid, since the group may be best identified by its type. type(str): Type of group, e.g. 'complex' (for real and imaginary).
[ "Groups", "together", "Params", "for", "adding", "under", "the", "What", "section", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/misc.py#L95-L112
timstaley/voevent-parse
src/voeventparse/misc.py
Reference
def Reference(uri, meaning=None): """ Represents external information, typically original obs data and metadata. Args: uri(str): Uniform resource identifier for external data, e.g. FITS file. meaning(str): The nature of the document referenced, e.g. what instrument and filter was used to create the data? """ attrib = {'uri': uri} if meaning is not None: attrib['meaning'] = meaning return objectify.Element('Reference', attrib)
python
def Reference(uri, meaning=None): """ Represents external information, typically original obs data and metadata. Args: uri(str): Uniform resource identifier for external data, e.g. FITS file. meaning(str): The nature of the document referenced, e.g. what instrument and filter was used to create the data? """ attrib = {'uri': uri} if meaning is not None: attrib['meaning'] = meaning return objectify.Element('Reference', attrib)
[ "def", "Reference", "(", "uri", ",", "meaning", "=", "None", ")", ":", "attrib", "=", "{", "'uri'", ":", "uri", "}", "if", "meaning", "is", "not", "None", ":", "attrib", "[", "'meaning'", "]", "=", "meaning", "return", "objectify", ".", "Element", "(...
Represents external information, typically original obs data and metadata. Args: uri(str): Uniform resource identifier for external data, e.g. FITS file. meaning(str): The nature of the document referenced, e.g. what instrument and filter was used to create the data?
[ "Represents", "external", "information", "typically", "original", "obs", "data", "and", "metadata", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/misc.py#L115-L127
timstaley/voevent-parse
src/voeventparse/misc.py
Inference
def Inference(probability=None, relation=None, name=None, concept=None): """Represents a probable cause / relation between this event and some prior. Args: probability(float): Value 0.0 to 1.0. relation(str): e.g. 'associated' or 'identified' (see Voevent spec) name(str): e.g. name of identified progenitor. concept(str): One of a 'formal UCD-like vocabulary of astronomical concepts', e.g. http://ivoat.ivoa.net/stars.supernova.Ia - see VOEvent spec. """ atts = {} if probability is not None: atts['probability'] = str(probability) if relation is not None: atts['relation'] = relation inf = objectify.Element('Inference', attrib=atts) if name is not None: inf.Name = name if concept is not None: inf.Concept = concept return inf
python
def Inference(probability=None, relation=None, name=None, concept=None): """Represents a probable cause / relation between this event and some prior. Args: probability(float): Value 0.0 to 1.0. relation(str): e.g. 'associated' or 'identified' (see Voevent spec) name(str): e.g. name of identified progenitor. concept(str): One of a 'formal UCD-like vocabulary of astronomical concepts', e.g. http://ivoat.ivoa.net/stars.supernova.Ia - see VOEvent spec. """ atts = {} if probability is not None: atts['probability'] = str(probability) if relation is not None: atts['relation'] = relation inf = objectify.Element('Inference', attrib=atts) if name is not None: inf.Name = name if concept is not None: inf.Concept = concept return inf
[ "def", "Inference", "(", "probability", "=", "None", ",", "relation", "=", "None", ",", "name", "=", "None", ",", "concept", "=", "None", ")", ":", "atts", "=", "{", "}", "if", "probability", "is", "not", "None", ":", "atts", "[", "'probability'", "]...
Represents a probable cause / relation between this event and some prior. Args: probability(float): Value 0.0 to 1.0. relation(str): e.g. 'associated' or 'identified' (see Voevent spec) name(str): e.g. name of identified progenitor. concept(str): One of a 'formal UCD-like vocabulary of astronomical concepts', e.g. http://ivoat.ivoa.net/stars.supernova.Ia - see VOEvent spec.
[ "Represents", "a", "probable", "cause", "/", "relation", "between", "this", "event", "and", "some", "prior", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/misc.py#L130-L151
timstaley/voevent-parse
src/voeventparse/misc.py
EventIvorn
def EventIvorn(ivorn, cite_type): """ Used to cite earlier VOEvents. Use in conjunction with :func:`.add_citations` Args: ivorn(str): It is assumed this will be copied verbatim from elsewhere, and so these should have any prefix (e.g. 'ivo://','http://') already in place - the function will not alter the value. cite_type (:class:`.definitions.cite_types`): String conforming to one of the standard citation types. """ # This is an ugly hack around the limitations of the lxml.objectify API: c = objectify.StringElement(cite=cite_type) c._setText(ivorn) c.tag = "EventIVORN" return c
python
def EventIvorn(ivorn, cite_type): """ Used to cite earlier VOEvents. Use in conjunction with :func:`.add_citations` Args: ivorn(str): It is assumed this will be copied verbatim from elsewhere, and so these should have any prefix (e.g. 'ivo://','http://') already in place - the function will not alter the value. cite_type (:class:`.definitions.cite_types`): String conforming to one of the standard citation types. """ # This is an ugly hack around the limitations of the lxml.objectify API: c = objectify.StringElement(cite=cite_type) c._setText(ivorn) c.tag = "EventIVORN" return c
[ "def", "EventIvorn", "(", "ivorn", ",", "cite_type", ")", ":", "# This is an ugly hack around the limitations of the lxml.objectify API:", "c", "=", "objectify", ".", "StringElement", "(", "cite", "=", "cite_type", ")", "c", ".", "_setText", "(", "ivorn", ")", "c",...
Used to cite earlier VOEvents. Use in conjunction with :func:`.add_citations` Args: ivorn(str): It is assumed this will be copied verbatim from elsewhere, and so these should have any prefix (e.g. 'ivo://','http://') already in place - the function will not alter the value. cite_type (:class:`.definitions.cite_types`): String conforming to one of the standard citation types.
[ "Used", "to", "cite", "earlier", "VOEvents", "." ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/misc.py#L154-L172
timstaley/voevent-parse
src/voeventparse/misc.py
Citation
def Citation(ivorn, cite_type): """ Deprecated alias of :func:`.EventIvorn` """ import warnings warnings.warn( """ `Citation` class has been renamed `EventIvorn` to reflect naming conventions in the VOEvent standard. As such this class name is a deprecated alias and may be removed in a future release. """, FutureWarning) return EventIvorn(ivorn, cite_type)
python
def Citation(ivorn, cite_type): """ Deprecated alias of :func:`.EventIvorn` """ import warnings warnings.warn( """ `Citation` class has been renamed `EventIvorn` to reflect naming conventions in the VOEvent standard. As such this class name is a deprecated alias and may be removed in a future release. """, FutureWarning) return EventIvorn(ivorn, cite_type)
[ "def", "Citation", "(", "ivorn", ",", "cite_type", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"\"\"\n `Citation` class has been renamed `EventIvorn` to reflect naming\n conventions in the VOEvent standard.\n As such this class name is a deprecate...
Deprecated alias of :func:`.EventIvorn`
[ "Deprecated", "alias", "of", ":", "func", ":", ".", "EventIvorn" ]
train
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/misc.py#L175-L188
datasift/datasift-python
datasift/odp.py
Odp.batch
def batch(self, source_id, data): """ Upload data to the given soruce :param source_id: The ID of the source to upload to :type source_id: str :param data: The data to upload to the source :type data: list :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`, :class:`~datasift.exceptions.BadRequest` """ if type(data) is not list or type(data[0]) is not dict: raise BadRequest("Ingestion data must be a list of dicts") data = "\r\n".join(map(json.dumps, data)) return self.request.post(source_id, data, {'Accept-Encoding': 'application/text'})
python
def batch(self, source_id, data): """ Upload data to the given soruce :param source_id: The ID of the source to upload to :type source_id: str :param data: The data to upload to the source :type data: list :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`, :class:`~datasift.exceptions.BadRequest` """ if type(data) is not list or type(data[0]) is not dict: raise BadRequest("Ingestion data must be a list of dicts") data = "\r\n".join(map(json.dumps, data)) return self.request.post(source_id, data, {'Accept-Encoding': 'application/text'})
[ "def", "batch", "(", "self", ",", "source_id", ",", "data", ")", ":", "if", "type", "(", "data", ")", "is", "not", "list", "or", "type", "(", "data", "[", "0", "]", ")", "is", "not", "dict", ":", "raise", "BadRequest", "(", "\"Ingestion data must be ...
Upload data to the given soruce :param source_id: The ID of the source to upload to :type source_id: str :param data: The data to upload to the source :type data: list :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`, :class:`~datasift.exceptions.BadRequest`
[ "Upload", "data", "to", "the", "given", "soruce" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/odp.py#L13-L29
yunojuno-archive/django-package-monitor
package_monitor/management/commands/refresh_packages.py
create_package_version
def create_package_version(requirement): """Create a new PackageVersion from a requirement. Handles errors.""" try: PackageVersion(requirement=requirement).save() logger.info("Package '%s' added.", requirement.name) # noqa except IntegrityError: logger.info("Package '%s' already exists.", requirement.name)
python
def create_package_version(requirement): """Create a new PackageVersion from a requirement. Handles errors.""" try: PackageVersion(requirement=requirement).save() logger.info("Package '%s' added.", requirement.name) # noqa except IntegrityError: logger.info("Package '%s' already exists.", requirement.name)
[ "def", "create_package_version", "(", "requirement", ")", ":", "try", ":", "PackageVersion", "(", "requirement", "=", "requirement", ")", ".", "save", "(", ")", "logger", ".", "info", "(", "\"Package '%s' added.\"", ",", "requirement", ".", "name", ")", "# noq...
Create a new PackageVersion from a requirement. Handles errors.
[ "Create", "a", "new", "PackageVersion", "from", "a", "requirement", ".", "Handles", "errors", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/management/commands/refresh_packages.py#L19-L25
yunojuno-archive/django-package-monitor
package_monitor/management/commands/refresh_packages.py
local
def local(): """Load local requirements file.""" logger.info("Loading requirements from local file.") with open(REQUIREMENTS_FILE, 'r') as f: requirements = parse(f) for r in requirements: logger.debug("Creating new package: %r", r) create_package_version(r)
python
def local(): """Load local requirements file.""" logger.info("Loading requirements from local file.") with open(REQUIREMENTS_FILE, 'r') as f: requirements = parse(f) for r in requirements: logger.debug("Creating new package: %r", r) create_package_version(r)
[ "def", "local", "(", ")", ":", "logger", ".", "info", "(", "\"Loading requirements from local file.\"", ")", "with", "open", "(", "REQUIREMENTS_FILE", ",", "'r'", ")", "as", "f", ":", "requirements", "=", "parse", "(", "f", ")", "for", "r", "in", "requirem...
Load local requirements file.
[ "Load", "local", "requirements", "file", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/management/commands/refresh_packages.py#L28-L35
yunojuno-archive/django-package-monitor
package_monitor/management/commands/refresh_packages.py
remote
def remote(): """Update package info from PyPI.""" logger.info("Fetching latest data from PyPI.") results = defaultdict(list) packages = PackageVersion.objects.exclude(is_editable=True) for pv in packages: pv.update_from_pypi() results[pv.diff_status].append(pv) logger.debug("Updated package from PyPI: %r", pv) results['refreshed_at'] = tz_now() return results
python
def remote(): """Update package info from PyPI.""" logger.info("Fetching latest data from PyPI.") results = defaultdict(list) packages = PackageVersion.objects.exclude(is_editable=True) for pv in packages: pv.update_from_pypi() results[pv.diff_status].append(pv) logger.debug("Updated package from PyPI: %r", pv) results['refreshed_at'] = tz_now() return results
[ "def", "remote", "(", ")", ":", "logger", ".", "info", "(", "\"Fetching latest data from PyPI.\"", ")", "results", "=", "defaultdict", "(", "list", ")", "packages", "=", "PackageVersion", ".", "objects", ".", "exclude", "(", "is_editable", "=", "True", ")", ...
Update package info from PyPI.
[ "Update", "package", "info", "from", "PyPI", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/management/commands/refresh_packages.py#L38-L48
yunojuno-archive/django-package-monitor
package_monitor/management/commands/refresh_packages.py
Command.handle
def handle(self, *args, **options): """Run the managemement command.""" if options['clean']: clean() if options['local']: local() if options['remote']: results = remote() render = lambda t: render_to_string(t, results) if options['notify']: send_mail( options['subject'], render('summary.txt'), options['from'], [options['notify']], html_message=render('summary.html'), fail_silently=False, )
python
def handle(self, *args, **options): """Run the managemement command.""" if options['clean']: clean() if options['local']: local() if options['remote']: results = remote() render = lambda t: render_to_string(t, results) if options['notify']: send_mail( options['subject'], render('summary.txt'), options['from'], [options['notify']], html_message=render('summary.html'), fail_silently=False, )
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "if", "options", "[", "'clean'", "]", ":", "clean", "(", ")", "if", "options", "[", "'local'", "]", ":", "local", "(", ")", "if", "options", "[", "'remote'", "]",...
Run the managemement command.
[ "Run", "the", "managemement", "command", "." ]
train
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/management/commands/refresh_packages.py#L101-L120
datasift/datasift-python
datasift/historics_preview.py
HistoricsPreview.create
def create(self, stream, start, parameters, sources, end=None): """ Create a hitorics preview job. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/previewcreate :param stream: hash of the CSDL filter to create the job for :type stream: str :param start: Unix timestamp for the start of the period :type start: int :param parameters: list of historics preview parameters, can be found at http://dev.datasift.com/docs/api/rest-api/endpoints/previewcreate :type parameters: list :param sources: list of sources to include, eg. ['tumblr','facebook'] :type sources: list :param end: (optional) Unix timestamp for the end of the period, defaults to min(start+24h, now-1h) :type end: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ if len(sources) == 0: raise HistoricSourcesRequired() if isinstance(sources, six.string_types): sources = [sources] params = {'hash': stream, 'start': start, 'sources': ','.join(sources), 'parameters': ','.join(parameters)} if end: params['end'] = end return self.request.post('create', params)
python
def create(self, stream, start, parameters, sources, end=None): """ Create a hitorics preview job. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/previewcreate :param stream: hash of the CSDL filter to create the job for :type stream: str :param start: Unix timestamp for the start of the period :type start: int :param parameters: list of historics preview parameters, can be found at http://dev.datasift.com/docs/api/rest-api/endpoints/previewcreate :type parameters: list :param sources: list of sources to include, eg. ['tumblr','facebook'] :type sources: list :param end: (optional) Unix timestamp for the end of the period, defaults to min(start+24h, now-1h) :type end: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ if len(sources) == 0: raise HistoricSourcesRequired() if isinstance(sources, six.string_types): sources = [sources] params = {'hash': stream, 'start': start, 'sources': ','.join(sources), 'parameters': ','.join(parameters)} if end: params['end'] = end return self.request.post('create', params)
[ "def", "create", "(", "self", ",", "stream", ",", "start", ",", "parameters", ",", "sources", ",", "end", "=", "None", ")", ":", "if", "len", "(", "sources", ")", "==", "0", ":", "raise", "HistoricSourcesRequired", "(", ")", "if", "isinstance", "(", ...
Create a hitorics preview job. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/previewcreate :param stream: hash of the CSDL filter to create the job for :type stream: str :param start: Unix timestamp for the start of the period :type start: int :param parameters: list of historics preview parameters, can be found at http://dev.datasift.com/docs/api/rest-api/endpoints/previewcreate :type parameters: list :param sources: list of sources to include, eg. ['tumblr','facebook'] :type sources: list :param end: (optional) Unix timestamp for the end of the period, defaults to min(start+24h, now-1h) :type end: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Create", "a", "hitorics", "preview", "job", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics_preview.py#L12-L38
datasift/datasift-python
datasift/historics_preview.py
HistoricsPreview.get
def get(self, preview_id): """ Retrieve a Historics preview job. Warning: previews expire after 24 hours. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/previewget :param preview_id: historics preview job hash of the job to retrieve :type preview_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get('get', params=dict(id=preview_id))
python
def get(self, preview_id): """ Retrieve a Historics preview job. Warning: previews expire after 24 hours. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/previewget :param preview_id: historics preview job hash of the job to retrieve :type preview_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get('get', params=dict(id=preview_id))
[ "def", "get", "(", "self", ",", "preview_id", ")", ":", "return", "self", ".", "request", ".", "get", "(", "'get'", ",", "params", "=", "dict", "(", "id", "=", "preview_id", ")", ")" ]
Retrieve a Historics preview job. Warning: previews expire after 24 hours. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/previewget :param preview_id: historics preview job hash of the job to retrieve :type preview_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Retrieve", "a", "Historics", "preview", "job", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics_preview.py#L40-L53
datasift/datasift-python
datasift/historics.py
Historics.prepare
def prepare(self, hash, start, end, name, sources, sample=None): """ Prepare a historics query which can later be started. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsprepare :param hash: The hash of a CSDL create the query for :type hash: str :param start: when to start querying data from - unix timestamp :type start: int :param end: when the query should end - unix timestamp :type end: int :param name: the name of the query :type name: str :param sources: list of sources e.g. ['facebook','bitly','tumblr'] :type sources: list :param sample: percentage to sample, either 10 or 100 :type sample: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ if len(sources) == 0: raise HistoricSourcesRequired() if not isinstance(sources, list): sources = [sources] params = {'hash': hash, 'start': start, 'end': end, 'name': name, 'sources': ','.join(sources)} if sample: params['sample'] = sample return self.request.post('prepare', params)
python
def prepare(self, hash, start, end, name, sources, sample=None): """ Prepare a historics query which can later be started. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsprepare :param hash: The hash of a CSDL create the query for :type hash: str :param start: when to start querying data from - unix timestamp :type start: int :param end: when the query should end - unix timestamp :type end: int :param name: the name of the query :type name: str :param sources: list of sources e.g. ['facebook','bitly','tumblr'] :type sources: list :param sample: percentage to sample, either 10 or 100 :type sample: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ if len(sources) == 0: raise HistoricSourcesRequired() if not isinstance(sources, list): sources = [sources] params = {'hash': hash, 'start': start, 'end': end, 'name': name, 'sources': ','.join(sources)} if sample: params['sample'] = sample return self.request.post('prepare', params)
[ "def", "prepare", "(", "self", ",", "hash", ",", "start", ",", "end", ",", "name", ",", "sources", ",", "sample", "=", "None", ")", ":", "if", "len", "(", "sources", ")", "==", "0", ":", "raise", "HistoricSourcesRequired", "(", ")", "if", "not", "i...
Prepare a historics query which can later be started. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsprepare :param hash: The hash of a CSDL create the query for :type hash: str :param start: when to start querying data from - unix timestamp :type start: int :param end: when the query should end - unix timestamp :type end: int :param name: the name of the query :type name: str :param sources: list of sources e.g. ['facebook','bitly','tumblr'] :type sources: list :param sample: percentage to sample, either 10 or 100 :type sample: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Prepare", "a", "historics", "query", "which", "can", "later", "be", "started", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics.py#L10-L39
datasift/datasift-python
datasift/historics.py
Historics.start
def start(self, historics_id): """ Start the historics job with the given ID. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstart :param historics_id: hash of the job to start :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('start', data=dict(id=historics_id))
python
def start(self, historics_id): """ Start the historics job with the given ID. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstart :param historics_id: hash of the job to start :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('start', data=dict(id=historics_id))
[ "def", "start", "(", "self", ",", "historics_id", ")", ":", "return", "self", ".", "request", ".", "post", "(", "'start'", ",", "data", "=", "dict", "(", "id", "=", "historics_id", ")", ")" ]
Start the historics job with the given ID. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstart :param historics_id: hash of the job to start :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Start", "the", "historics", "job", "with", "the", "given", "ID", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics.py#L41-L52
datasift/datasift-python
datasift/historics.py
Historics.update
def update(self, historics_id, name): """ Update the name of the given Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsupdate :param historics_id: playback id of the job to start :type historics_id: str :param name: new name of the stream :type name: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('update', data=dict(id=historics_id, name=name))
python
def update(self, historics_id, name): """ Update the name of the given Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsupdate :param historics_id: playback id of the job to start :type historics_id: str :param name: new name of the stream :type name: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('update', data=dict(id=historics_id, name=name))
[ "def", "update", "(", "self", ",", "historics_id", ",", "name", ")", ":", "return", "self", ".", "request", ".", "post", "(", "'update'", ",", "data", "=", "dict", "(", "id", "=", "historics_id", ",", "name", "=", "name", ")", ")" ]
Update the name of the given Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsupdate :param historics_id: playback id of the job to start :type historics_id: str :param name: new name of the stream :type name: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Update", "the", "name", "of", "the", "given", "Historics", "query", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics.py#L54-L67
datasift/datasift-python
datasift/historics.py
Historics.stop
def stop(self, historics_id, reason=''): """ Stop an existing Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstop :param historics_id: playback id of the job to stop :type historics_id: str :param reason: optional reason for stopping the job :type reason: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('stop', data=dict(id=historics_id, reason=reason))
python
def stop(self, historics_id, reason=''): """ Stop an existing Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstop :param historics_id: playback id of the job to stop :type historics_id: str :param reason: optional reason for stopping the job :type reason: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('stop', data=dict(id=historics_id, reason=reason))
[ "def", "stop", "(", "self", ",", "historics_id", ",", "reason", "=", "''", ")", ":", "return", "self", ".", "request", ".", "post", "(", "'stop'", ",", "data", "=", "dict", "(", "id", "=", "historics_id", ",", "reason", "=", "reason", ")", ")" ]
Stop an existing Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstop :param historics_id: playback id of the job to stop :type historics_id: str :param reason: optional reason for stopping the job :type reason: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Stop", "an", "existing", "Historics", "query", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics.py#L69-L82
datasift/datasift-python
datasift/historics.py
Historics.status
def status(self, start, end, sources=None): """ Check the data coverage in the Historics archive for a given interval. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstatus :param start: Unix timestamp for the start time :type start: int :param end: Unix timestamp for the start time :type end: int :param sources: list of data sources to include. :type sources: list :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'start': start, 'end': end} if sources: params['sources'] = ','.join(sources) return self.request.get('status', params=params)
python
def status(self, start, end, sources=None): """ Check the data coverage in the Historics archive for a given interval. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstatus :param start: Unix timestamp for the start time :type start: int :param end: Unix timestamp for the start time :type end: int :param sources: list of data sources to include. :type sources: list :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'start': start, 'end': end} if sources: params['sources'] = ','.join(sources) return self.request.get('status', params=params)
[ "def", "status", "(", "self", ",", "start", ",", "end", ",", "sources", "=", "None", ")", ":", "params", "=", "{", "'start'", ":", "start", ",", "'end'", ":", "end", "}", "if", "sources", ":", "params", "[", "'sources'", "]", "=", "','", ".", "jo...
Check the data coverage in the Historics archive for a given interval. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstatus :param start: Unix timestamp for the start time :type start: int :param end: Unix timestamp for the start time :type end: int :param sources: list of data sources to include. :type sources: list :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Check", "the", "data", "coverage", "in", "the", "Historics", "archive", "for", "a", "given", "interval", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics.py#L84-L102
datasift/datasift-python
datasift/historics.py
Historics.delete
def delete(self, historics_id): """ Delete one specified playback query. If the query is currently running, stop it. status_code is set to 204 on success Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsdelete :param historics_id: playback id of the query to delete :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('delete', data=dict(id=historics_id))
python
def delete(self, historics_id): """ Delete one specified playback query. If the query is currently running, stop it. status_code is set to 204 on success Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsdelete :param historics_id: playback id of the query to delete :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('delete', data=dict(id=historics_id))
[ "def", "delete", "(", "self", ",", "historics_id", ")", ":", "return", "self", ".", "request", ".", "post", "(", "'delete'", ",", "data", "=", "dict", "(", "id", "=", "historics_id", ")", ")" ]
Delete one specified playback query. If the query is currently running, stop it. status_code is set to 204 on success Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsdelete :param historics_id: playback id of the query to delete :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Delete", "one", "specified", "playback", "query", ".", "If", "the", "query", "is", "currently", "running", "stop", "it", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics.py#L104-L117
datasift/datasift-python
datasift/historics.py
Historics.get_for
def get_for(self, historics_id, with_estimate=None): """ Get the historic query for the given ID Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsget :param historics_id: playback id of the query :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.get(historics_id, maximum=None, page=None, with_estimate=with_estimate)
python
def get_for(self, historics_id, with_estimate=None): """ Get the historic query for the given ID Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsget :param historics_id: playback id of the query :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.get(historics_id, maximum=None, page=None, with_estimate=with_estimate)
[ "def", "get_for", "(", "self", ",", "historics_id", ",", "with_estimate", "=", "None", ")", ":", "return", "self", ".", "get", "(", "historics_id", ",", "maximum", "=", "None", ",", "page", "=", "None", ",", "with_estimate", "=", "with_estimate", ")" ]
Get the historic query for the given ID Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsget :param historics_id: playback id of the query :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "the", "historic", "query", "for", "the", "given", "ID" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics.py#L119-L130
datasift/datasift-python
datasift/historics.py
Historics.get
def get(self, historics_id=None, maximum=None, page=None, with_estimate=None): """ Get the historics query with the given ID, if no ID is provided then get a list of historics queries. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsget :param historics_id: (optional) ID of the query to retrieve :type historics_id: str :param maximum: (optional) maximum number of queries to recieve (default 20) :type maximum: int :param page: (optional) page to retrieve for paginated queries :type page: int :param with_estimate: include estimate of completion time in output :type with_estimate: bool :param historics_id: playback id of the query :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': historics_id} if maximum: params['max'] = maximum if page: params['page'] = page params['with_estimate'] = 1 if with_estimate else 0 return self.request.get('get', params=params)
python
def get(self, historics_id=None, maximum=None, page=None, with_estimate=None): """ Get the historics query with the given ID, if no ID is provided then get a list of historics queries. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsget :param historics_id: (optional) ID of the query to retrieve :type historics_id: str :param maximum: (optional) maximum number of queries to recieve (default 20) :type maximum: int :param page: (optional) page to retrieve for paginated queries :type page: int :param with_estimate: include estimate of completion time in output :type with_estimate: bool :param historics_id: playback id of the query :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': historics_id} if maximum: params['max'] = maximum if page: params['page'] = page params['with_estimate'] = 1 if with_estimate else 0 return self.request.get('get', params=params)
[ "def", "get", "(", "self", ",", "historics_id", "=", "None", ",", "maximum", "=", "None", ",", "page", "=", "None", ",", "with_estimate", "=", "None", ")", ":", "params", "=", "{", "'id'", ":", "historics_id", "}", "if", "maximum", ":", "params", "["...
Get the historics query with the given ID, if no ID is provided then get a list of historics queries. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsget :param historics_id: (optional) ID of the query to retrieve :type historics_id: str :param maximum: (optional) maximum number of queries to recieve (default 20) :type maximum: int :param page: (optional) page to retrieve for paginated queries :type page: int :param with_estimate: include estimate of completion time in output :type with_estimate: bool :param historics_id: playback id of the query :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "the", "historics", "query", "with", "the", "given", "ID", "if", "no", "ID", "is", "provided", "then", "get", "a", "list", "of", "historics", "queries", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics.py#L132-L158
datasift/datasift-python
datasift/historics.py
Historics.pause
def pause(self, historics_id, reason=""): """ Pause an existing Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicspause :param historics_id: id of the job to pause :type historics_id: str :param reason: optional reason for pausing it :type reason: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {"id": historics_id} if reason != "": params["reason"] = reason return self.request.post('pause', data=params)
python
def pause(self, historics_id, reason=""): """ Pause an existing Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicspause :param historics_id: id of the job to pause :type historics_id: str :param reason: optional reason for pausing it :type reason: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {"id": historics_id} if reason != "": params["reason"] = reason return self.request.post('pause', data=params)
[ "def", "pause", "(", "self", ",", "historics_id", ",", "reason", "=", "\"\"", ")", ":", "params", "=", "{", "\"id\"", ":", "historics_id", "}", "if", "reason", "!=", "\"\"", ":", "params", "[", "\"reason\"", "]", "=", "reason", "return", "self", ".", ...
Pause an existing Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicspause :param historics_id: id of the job to pause :type historics_id: str :param reason: optional reason for pausing it :type reason: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Pause", "an", "existing", "Historics", "query", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics.py#L160-L176
datasift/datasift-python
datasift/historics.py
Historics.resume
def resume(self, historics_id): """ Resume a paused Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsresume :param historics_id: id of the job to resume :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('resume', data=dict(id=historics_id))
python
def resume(self, historics_id): """ Resume a paused Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsresume :param historics_id: id of the job to resume :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('resume', data=dict(id=historics_id))
[ "def", "resume", "(", "self", ",", "historics_id", ")", ":", "return", "self", ".", "request", ".", "post", "(", "'resume'", ",", "data", "=", "dict", "(", "id", "=", "historics_id", ")", ")" ]
Resume a paused Historics query. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsresume :param historics_id: id of the job to resume :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Resume", "a", "paused", "Historics", "query", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics.py#L178-L189
datasift/datasift-python
datasift/managed_sources.py
Resource.remove
def remove(self, source_id, resource_ids): """ Remove one or more resources from a Managed Source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceresourceremove :param source_id: target Source ID :type source_id: str :param resources: An array of the resource IDs that you would like to remove.. :type resources: array of str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': source_id, 'resource_ids': resource_ids} return self.request.post('remove', params)
python
def remove(self, source_id, resource_ids): """ Remove one or more resources from a Managed Source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceresourceremove :param source_id: target Source ID :type source_id: str :param resources: An array of the resource IDs that you would like to remove.. :type resources: array of str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': source_id, 'resource_ids': resource_ids} return self.request.post('remove', params)
[ "def", "remove", "(", "self", ",", "source_id", ",", "resource_ids", ")", ":", "params", "=", "{", "'id'", ":", "source_id", ",", "'resource_ids'", ":", "resource_ids", "}", "return", "self", ".", "request", ".", "post", "(", "'remove'", ",", "params", "...
Remove one or more resources from a Managed Source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceresourceremove :param source_id: target Source ID :type source_id: str :param resources: An array of the resource IDs that you would like to remove.. :type resources: array of str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Remove", "one", "or", "more", "resources", "from", "a", "Managed", "Source" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/managed_sources.py#L26-L40
datasift/datasift-python
datasift/managed_sources.py
Auth.add
def add(self, source_id, auth, validate=True): """ Add one or more sets of authorization credentials to a Managed Source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceauthadd :param source_id: target Source ID :type source_id: str :param auth: An array of the source-specific authorization credential sets that you're adding. :type auth: array of strings :param validate: Allows you to suppress the validation of the authorization credentials, defaults to true. :type validate: bool :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': source_id, 'auth': auth, 'validate': validate} return self.request.post('add', params)
python
def add(self, source_id, auth, validate=True): """ Add one or more sets of authorization credentials to a Managed Source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceauthadd :param source_id: target Source ID :type source_id: str :param auth: An array of the source-specific authorization credential sets that you're adding. :type auth: array of strings :param validate: Allows you to suppress the validation of the authorization credentials, defaults to true. :type validate: bool :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': source_id, 'auth': auth, 'validate': validate} return self.request.post('add', params)
[ "def", "add", "(", "self", ",", "source_id", ",", "auth", ",", "validate", "=", "True", ")", ":", "params", "=", "{", "'id'", ":", "source_id", ",", "'auth'", ":", "auth", ",", "'validate'", ":", "validate", "}", "return", "self", ".", "request", "."...
Add one or more sets of authorization credentials to a Managed Source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceauthadd :param source_id: target Source ID :type source_id: str :param auth: An array of the source-specific authorization credential sets that you're adding. :type auth: array of strings :param validate: Allows you to suppress the validation of the authorization credentials, defaults to true. :type validate: bool :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Add", "one", "or", "more", "sets", "of", "authorization", "credentials", "to", "a", "Managed", "Source" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/managed_sources.py#L50-L66
datasift/datasift-python
datasift/managed_sources.py
Auth.remove
def remove(self, source_id, auth_ids): """ Remove one or more sets of authorization credentials from a Managed Source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceauthremove :param source_id: target Source ID :type source_id: str :param resources: An array of the authorization credential set IDs that you would like to remove. :type resources: array of str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': source_id, 'auth_ids': auth_ids} return self.request.post('remove', params)
python
def remove(self, source_id, auth_ids): """ Remove one or more sets of authorization credentials from a Managed Source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceauthremove :param source_id: target Source ID :type source_id: str :param resources: An array of the authorization credential set IDs that you would like to remove. :type resources: array of str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': source_id, 'auth_ids': auth_ids} return self.request.post('remove', params)
[ "def", "remove", "(", "self", ",", "source_id", ",", "auth_ids", ")", ":", "params", "=", "{", "'id'", ":", "source_id", ",", "'auth_ids'", ":", "auth_ids", "}", "return", "self", ".", "request", ".", "post", "(", "'remove'", ",", "params", ")" ]
Remove one or more sets of authorization credentials from a Managed Source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceauthremove :param source_id: target Source ID :type source_id: str :param resources: An array of the authorization credential set IDs that you would like to remove. :type resources: array of str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Remove", "one", "or", "more", "sets", "of", "authorization", "credentials", "from", "a", "Managed", "Source" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/managed_sources.py#L68-L82
datasift/datasift-python
datasift/managed_sources.py
ManagedSources.create
def create(self, source_type, name, resources, auth=None, parameters=None, validate=True): """ Create a managed source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourcecreate :param source_type: data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param name: name to use to identify the managed source being created :type name: str :param resources: list of source-specific config dicts :type resources: list :param auth: list of source-specific authentication dicts :type auth: list :param parameters: (optional) dict with config information on how to treat each resource :type parameters: dict :param validate: bool to determine if validation should be performed on the source :type validate: bool :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ assert resources, "Need at least one resource" params = { 'source_type': source_type, 'name': name, 'resources': resources, 'validate': validate } if auth: params['auth'] = auth if parameters: params['parameters'] = parameters return self.request.post('create', params)
python
def create(self, source_type, name, resources, auth=None, parameters=None, validate=True): """ Create a managed source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourcecreate :param source_type: data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param name: name to use to identify the managed source being created :type name: str :param resources: list of source-specific config dicts :type resources: list :param auth: list of source-specific authentication dicts :type auth: list :param parameters: (optional) dict with config information on how to treat each resource :type parameters: dict :param validate: bool to determine if validation should be performed on the source :type validate: bool :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ assert resources, "Need at least one resource" params = { 'source_type': source_type, 'name': name, 'resources': resources, 'validate': validate } if auth: params['auth'] = auth if parameters: params['parameters'] = parameters return self.request.post('create', params)
[ "def", "create", "(", "self", ",", "source_type", ",", "name", ",", "resources", ",", "auth", "=", "None", ",", "parameters", "=", "None", ",", "validate", "=", "True", ")", ":", "assert", "resources", ",", "\"Need at least one resource\"", "params", "=", ...
Create a managed source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourcecreate :param source_type: data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param name: name to use to identify the managed source being created :type name: str :param resources: list of source-specific config dicts :type resources: list :param auth: list of source-specific authentication dicts :type auth: list :param parameters: (optional) dict with config information on how to treat each resource :type parameters: dict :param validate: bool to determine if validation should be performed on the source :type validate: bool :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Create", "a", "managed", "source" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/managed_sources.py#L94-L129
datasift/datasift-python
datasift/managed_sources.py
ManagedSources.update
def update(self, source_id, source_type, name, resources, auth, parameters=None, validate=True): """ Update a managed source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceupdate :param source_type: data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param name: name to use to identify the managed source being created :type name: str :param resources: list of source-specific config dicts :type resources: list :param auth: list of source-specific authentication dicts :type auth: list :param parameters: (optional) dict with config information on how to treat each resource :type parameters: dict :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ assert resources, "Need at least one resource" assert auth, "Need at least one authentication token" params = {'id': source_id, 'source_type': source_type, 'name': name, 'resources': resources, 'auth': auth, 'validate': validate} if parameters: params['parameters'] = parameters return self.request.post('update', params)
python
def update(self, source_id, source_type, name, resources, auth, parameters=None, validate=True): """ Update a managed source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceupdate :param source_type: data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param name: name to use to identify the managed source being created :type name: str :param resources: list of source-specific config dicts :type resources: list :param auth: list of source-specific authentication dicts :type auth: list :param parameters: (optional) dict with config information on how to treat each resource :type parameters: dict :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ assert resources, "Need at least one resource" assert auth, "Need at least one authentication token" params = {'id': source_id, 'source_type': source_type, 'name': name, 'resources': resources, 'auth': auth, 'validate': validate} if parameters: params['parameters'] = parameters return self.request.post('update', params)
[ "def", "update", "(", "self", ",", "source_id", ",", "source_type", ",", "name", ",", "resources", ",", "auth", ",", "parameters", "=", "None", ",", "validate", "=", "True", ")", ":", "assert", "resources", ",", "\"Need at least one resource\"", "assert", "a...
Update a managed source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceupdate :param source_type: data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param name: name to use to identify the managed source being created :type name: str :param resources: list of source-specific config dicts :type resources: list :param auth: list of source-specific authentication dicts :type auth: list :param parameters: (optional) dict with config information on how to treat each resource :type parameters: dict :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Update", "a", "managed", "source" ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/managed_sources.py#L131-L156
datasift/datasift-python
datasift/managed_sources.py
ManagedSources.log
def log(self, source_id, page=None, per_page=None): """ Get the log for a specific Managed Source. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourcelog :param source_id: target Source ID :type source_id: str :param page: (optional) page number for pagination :type page: int :param per_page: (optional) number of items per page, default 20 :type per_page: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': source_id} if page: params['page'] = page if per_page: params['per_page'] = per_page return self.request.get('log', params=params)
python
def log(self, source_id, page=None, per_page=None): """ Get the log for a specific Managed Source. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourcelog :param source_id: target Source ID :type source_id: str :param page: (optional) page number for pagination :type page: int :param per_page: (optional) number of items per page, default 20 :type per_page: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'id': source_id} if page: params['page'] = page if per_page: params['per_page'] = per_page return self.request.get('log', params=params)
[ "def", "log", "(", "self", ",", "source_id", ",", "page", "=", "None", ",", "per_page", "=", "None", ")", ":", "params", "=", "{", "'id'", ":", "source_id", "}", "if", "page", ":", "params", "[", "'page'", "]", "=", "page", "if", "per_page", ":", ...
Get the log for a specific Managed Source. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourcelog :param source_id: target Source ID :type source_id: str :param page: (optional) page number for pagination :type page: int :param per_page: (optional) number of items per page, default 20 :type per_page: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "the", "log", "for", "a", "specific", "Managed", "Source", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/managed_sources.py#L197-L218
datasift/datasift-python
datasift/managed_sources.py
ManagedSources.get
def get(self, source_id=None, source_type=None, page=None, per_page=None): """ Get a specific managed source or a list of them. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceget :param source_id: (optional) target Source ID :type source_id: str :param source_type: (optional) data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param page: (optional) page number for pagination, default 1 :type page: int :param per_page: (optional) number of items per page, default 20 :type per_page: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if source_type: params['source_type'] = source_type if source_id: params['id'] = source_id if page: params['page'] = page if per_page: params['per_page'] = per_page return self.request.get('get', params=params)
python
def get(self, source_id=None, source_type=None, page=None, per_page=None): """ Get a specific managed source or a list of them. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceget :param source_id: (optional) target Source ID :type source_id: str :param source_type: (optional) data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param page: (optional) page number for pagination, default 1 :type page: int :param per_page: (optional) number of items per page, default 20 :type per_page: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if source_type: params['source_type'] = source_type if source_id: params['id'] = source_id if page: params['page'] = page if per_page: params['per_page'] = per_page return self.request.get('get', params=params)
[ "def", "get", "(", "self", ",", "source_id", "=", "None", ",", "source_type", "=", "None", ",", "page", "=", "None", ",", "per_page", "=", "None", ")", ":", "params", "=", "{", "}", "if", "source_type", ":", "params", "[", "'source_type'", "]", "=", ...
Get a specific managed source or a list of them. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceget :param source_id: (optional) target Source ID :type source_id: str :param source_type: (optional) data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param page: (optional) page number for pagination, default 1 :type page: int :param per_page: (optional) number of items per page, default 20 :type per_page: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Get", "a", "specific", "managed", "source", "or", "a", "list", "of", "them", "." ]
train
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/managed_sources.py#L220-L247
pletzer/pnumpy
src/pnLaplacian.py
Laplacian.apply
def apply(self, localArray): """ Apply Laplacian stencil to data @param localArray local array @return new array on local proc """ # input dist array inp = gdaZeros(localArray.shape, localArray.dtype, numGhosts=1) # output array out = numpy.zeros(localArray.shape, localArray.dtype) # no displacement term weight = self.stencil[self.zeros] out[...] += weight * localArray for disp in self.srcLocalDomains: weight = self.stencil[disp] # no communication required here srcDom = self.srcLocalDomains[disp] dstDom = self.dstLocalDomains[disp] out[dstDom] += weight * localArray[srcDom] # # now the part that requires communication # # set the ghost values srcSlab = self.srcSlab[disp] # copy inp[srcSlab] = localArray[srcSlab] # send over to local process dstSlab = self.dstSlab[disp] winId = self.winIds[disp] rk = self.neighRk[disp] # remote fetch out[dstSlab] += weight * inp.getData(rk, winId) # some implementations require this inp.free() return out
python
def apply(self, localArray): """ Apply Laplacian stencil to data @param localArray local array @return new array on local proc """ # input dist array inp = gdaZeros(localArray.shape, localArray.dtype, numGhosts=1) # output array out = numpy.zeros(localArray.shape, localArray.dtype) # no displacement term weight = self.stencil[self.zeros] out[...] += weight * localArray for disp in self.srcLocalDomains: weight = self.stencil[disp] # no communication required here srcDom = self.srcLocalDomains[disp] dstDom = self.dstLocalDomains[disp] out[dstDom] += weight * localArray[srcDom] # # now the part that requires communication # # set the ghost values srcSlab = self.srcSlab[disp] # copy inp[srcSlab] = localArray[srcSlab] # send over to local process dstSlab = self.dstSlab[disp] winId = self.winIds[disp] rk = self.neighRk[disp] # remote fetch out[dstSlab] += weight * inp.getData(rk, winId) # some implementations require this inp.free() return out
[ "def", "apply", "(", "self", ",", "localArray", ")", ":", "# input dist array", "inp", "=", "gdaZeros", "(", "localArray", ".", "shape", ",", "localArray", ".", "dtype", ",", "numGhosts", "=", "1", ")", "# output array", "out", "=", "numpy", ".", "zeros", ...
Apply Laplacian stencil to data @param localArray local array @return new array on local proc
[ "Apply", "Laplacian", "stencil", "to", "data" ]
train
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnLaplacian.py#L85-L131
carpedm20/ndrive
ndrive/models.py
ndrive.login
def login(self, user_id, password, svctype = "Android NDrive App ver", auth = 0): """Log in Naver and get cookie Agrs: user_id: Naver account's login id password: Naver account's login password Returns: True: Login success False: Login failed Remarks: self.cookie is a dictionary with 5 keys: path, domain, NID_AUT, nid_inf, NID_SES """ self.user_id = user_id self.password = password if self.user_id == None or self.password == None: print "[*] Error __init__: user_id and password is needed" return False try: cookie = naver_login(user_id, password) except: return False self.session.cookies.set('NID_AUT', cookie["NID_AUT"]) self.session.cookies.set('NID_SES', cookie["NID_SES"]) s = self.getRegisterUserInfo(svctype, auth) if s is True: return True else: print "[*] Error getRegisterUserInfo: failed" return False
python
def login(self, user_id, password, svctype = "Android NDrive App ver", auth = 0): """Log in Naver and get cookie Agrs: user_id: Naver account's login id password: Naver account's login password Returns: True: Login success False: Login failed Remarks: self.cookie is a dictionary with 5 keys: path, domain, NID_AUT, nid_inf, NID_SES """ self.user_id = user_id self.password = password if self.user_id == None or self.password == None: print "[*] Error __init__: user_id and password is needed" return False try: cookie = naver_login(user_id, password) except: return False self.session.cookies.set('NID_AUT', cookie["NID_AUT"]) self.session.cookies.set('NID_SES', cookie["NID_SES"]) s = self.getRegisterUserInfo(svctype, auth) if s is True: return True else: print "[*] Error getRegisterUserInfo: failed" return False
[ "def", "login", "(", "self", ",", "user_id", ",", "password", ",", "svctype", "=", "\"Android NDrive App ver\"", ",", "auth", "=", "0", ")", ":", "self", ".", "user_id", "=", "user_id", "self", ".", "password", "=", "password", "if", "self", ".", "user_i...
Log in Naver and get cookie Agrs: user_id: Naver account's login id password: Naver account's login password Returns: True: Login success False: Login failed Remarks: self.cookie is a dictionary with 5 keys: path, domain, NID_AUT, nid_inf, NID_SES
[ "Log", "in", "Naver", "and", "get", "cookie" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L185-L220
carpedm20/ndrive
ndrive/models.py
ndrive.getRegisterUserInfo
def getRegisterUserInfo(self, svctype = "Android NDrive App ver", auth = 0): """Get registerUserInfo Args: svctype: Platform information auth: ??? Returns: True: Success False: Failed """ data = {'userid': self.user_id, 'svctype': svctype, 'auth': auth} r = self.session.get(nurls['getRegisterUserInfo'], params = data) j = json.loads(r.text) if j['message'] != 'success': print "[*] Error getRegisterUserInfo: " + j['message'] return False else: self.useridx = j['resultvalue']['useridx'] return True
python
def getRegisterUserInfo(self, svctype = "Android NDrive App ver", auth = 0): """Get registerUserInfo Args: svctype: Platform information auth: ??? Returns: True: Success False: Failed """ data = {'userid': self.user_id, 'svctype': svctype, 'auth': auth} r = self.session.get(nurls['getRegisterUserInfo'], params = data) j = json.loads(r.text) if j['message'] != 'success': print "[*] Error getRegisterUserInfo: " + j['message'] return False else: self.useridx = j['resultvalue']['useridx'] return True
[ "def", "getRegisterUserInfo", "(", "self", ",", "svctype", "=", "\"Android NDrive App ver\"", ",", "auth", "=", "0", ")", ":", "data", "=", "{", "'userid'", ":", "self", ".", "user_id", ",", "'svctype'", ":", "svctype", ",", "'auth'", ":", "auth", "}", "...
Get registerUserInfo Args: svctype: Platform information auth: ??? Returns: True: Success False: Failed
[ "Get", "registerUserInfo" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L223-L246
carpedm20/ndrive
ndrive/models.py
ndrive.checkStatus
def checkStatus(self): """Check status Args: Returns: True: Sucess False: Failed """ checkAccount() data = {'userid': self.user_id, 'useridx': self.useridx } r = self.session.post(nurls['checkStatus'], data = data) p = re.compile(r'\<message\>(?P<message>.+)\</message\>') message = p.search(r.text).group('message') if message == 'success': return True else: return False
python
def checkStatus(self): """Check status Args: Returns: True: Sucess False: Failed """ checkAccount() data = {'userid': self.user_id, 'useridx': self.useridx } r = self.session.post(nurls['checkStatus'], data = data) p = re.compile(r'\<message\>(?P<message>.+)\</message\>') message = p.search(r.text).group('message') if message == 'success': return True else: return False
[ "def", "checkStatus", "(", "self", ")", ":", "checkAccount", "(", ")", "data", "=", "{", "'userid'", ":", "self", ".", "user_id", ",", "'useridx'", ":", "self", ".", "useridx", "}", "r", "=", "self", ".", "session", ".", "post", "(", "nurls", "[", ...
Check status Args: Returns: True: Sucess False: Failed
[ "Check", "status" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L248-L271
carpedm20/ndrive
ndrive/models.py
ndrive.uploadFile
def uploadFile(self, file_path, upload_path = '', overwrite = False): """uploadFile Remarks: How Ndrive uploads a file to its server: 1. POST /CheckStatus.ndrive 2. POST /GetDiskSpace.ndrive 3. POST /CheckUpload.ndrive 4. PUT /FILE_PATH 5. POST /GetList.ndrive 6. POST /GetWasteInfo.ndrive 7. POST /GetDiskSpace.ndrive """ s = self.checkStatus() s = self.getDiskSpace() s = self.checkUpload(file_path, upload_path, overwrite) if s is True: self.put(file_path, upload_path)
python
def uploadFile(self, file_path, upload_path = '', overwrite = False): """uploadFile Remarks: How Ndrive uploads a file to its server: 1. POST /CheckStatus.ndrive 2. POST /GetDiskSpace.ndrive 3. POST /CheckUpload.ndrive 4. PUT /FILE_PATH 5. POST /GetList.ndrive 6. POST /GetWasteInfo.ndrive 7. POST /GetDiskSpace.ndrive """ s = self.checkStatus() s = self.getDiskSpace() s = self.checkUpload(file_path, upload_path, overwrite) if s is True: self.put(file_path, upload_path)
[ "def", "uploadFile", "(", "self", ",", "file_path", ",", "upload_path", "=", "''", ",", "overwrite", "=", "False", ")", ":", "s", "=", "self", ".", "checkStatus", "(", ")", "s", "=", "self", ".", "getDiskSpace", "(", ")", "s", "=", "self", ".", "ch...
uploadFile Remarks: How Ndrive uploads a file to its server: 1. POST /CheckStatus.ndrive 2. POST /GetDiskSpace.ndrive 3. POST /CheckUpload.ndrive 4. PUT /FILE_PATH 5. POST /GetList.ndrive 6. POST /GetWasteInfo.ndrive 7. POST /GetDiskSpace.ndrive
[ "uploadFile" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L282-L300
carpedm20/ndrive
ndrive/models.py
ndrive.getDiskSpace
def getDiskSpace(self, file_path, upload_path = '', overwrite = False): """getDiskSpace Args: file_path: Full path for a file you want to checkUpload upload_path: Ndrive path where you want to upload file ex) /Picture/ Returns: True: Possible to upload a file with a given file_size False: Impossible to upload a file with a given file_size """ self.checkAccount() url = nurls['checkUpload'] file_size = os.stat(file_path).st_size file_name = os.path.basename(file_path) now = datetime.datetime.now().isoformat() data = {'userid': self.user_id, 'useridx': self.useridx, 'getlastmodified': now, 'dstresource': upload_path + file_name, 'overwrite': overwrite, 'uploadsize': file_size, } r = self.session.post(nurls['getDiskSpace'], data = data) return resultManager(r.text)
python
def getDiskSpace(self, file_path, upload_path = '', overwrite = False): """getDiskSpace Args: file_path: Full path for a file you want to checkUpload upload_path: Ndrive path where you want to upload file ex) /Picture/ Returns: True: Possible to upload a file with a given file_size False: Impossible to upload a file with a given file_size """ self.checkAccount() url = nurls['checkUpload'] file_size = os.stat(file_path).st_size file_name = os.path.basename(file_path) now = datetime.datetime.now().isoformat() data = {'userid': self.user_id, 'useridx': self.useridx, 'getlastmodified': now, 'dstresource': upload_path + file_name, 'overwrite': overwrite, 'uploadsize': file_size, } r = self.session.post(nurls['getDiskSpace'], data = data) return resultManager(r.text)
[ "def", "getDiskSpace", "(", "self", ",", "file_path", ",", "upload_path", "=", "''", ",", "overwrite", "=", "False", ")", ":", "self", ".", "checkAccount", "(", ")", "url", "=", "nurls", "[", "'checkUpload'", "]", "file_size", "=", "os", ".", "stat", "...
getDiskSpace Args: file_path: Full path for a file you want to checkUpload upload_path: Ndrive path where you want to upload file ex) /Picture/ Returns: True: Possible to upload a file with a given file_size False: Impossible to upload a file with a given file_size
[ "getDiskSpace" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L302-L334
carpedm20/ndrive
ndrive/models.py
ndrive.put
def put(self, file_path, upload_path = ''): """PUT Args: file_path: Full path for a file you want to upload upload_path: Ndrive path where you want to upload file ex) /Picture/ Returns: True: Upload success False: Upload failed """ f = open(file_path, "r") c = f.read() file_name = os.path.basename(file_path) now = datetime.datetime.now().isoformat() url = nurls['put'] + upload_path + file_name headers = {'userid': self.user_id, 'useridx': self.useridx, 'MODIFYDATE': now, 'Content-Type': magic.from_file(file_path, mime=True), 'charset': 'UTF-8', 'Origin': 'http://ndrive2.naver.com', } r = self.session.put(url = url, data = c, headers = headers) return self.resultManager(r.text)
python
def put(self, file_path, upload_path = ''): """PUT Args: file_path: Full path for a file you want to upload upload_path: Ndrive path where you want to upload file ex) /Picture/ Returns: True: Upload success False: Upload failed """ f = open(file_path, "r") c = f.read() file_name = os.path.basename(file_path) now = datetime.datetime.now().isoformat() url = nurls['put'] + upload_path + file_name headers = {'userid': self.user_id, 'useridx': self.useridx, 'MODIFYDATE': now, 'Content-Type': magic.from_file(file_path, mime=True), 'charset': 'UTF-8', 'Origin': 'http://ndrive2.naver.com', } r = self.session.put(url = url, data = c, headers = headers) return self.resultManager(r.text)
[ "def", "put", "(", "self", ",", "file_path", ",", "upload_path", "=", "''", ")", ":", "f", "=", "open", "(", "file_path", ",", "\"r\"", ")", "c", "=", "f", ".", "read", "(", ")", "file_name", "=", "os", ".", "path", ".", "basename", "(", "file_pa...
PUT Args: file_path: Full path for a file you want to upload upload_path: Ndrive path where you want to upload file ex) /Picture/ Returns: True: Upload success False: Upload failed
[ "PUT" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L368-L398
carpedm20/ndrive
ndrive/models.py
ndrive.delete
def delete(self, file_path): """DELETE Args: file_path: Full path for a file you want to delete upload_path: Ndrive path where you want to delete file ex) /Picture/ Returns: True: Delete success False: Delete failed """ now = datetime.datetime.now().isoformat() url = nurls['put'] + upload_path + file_name headers = {'userid': self.user_id, 'useridx': self.useridx, 'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8", 'charset': 'UTF-8', 'Origin': 'http://ndrive2.naver.com', } r = self.session.delete(url = url, headers = headers) return self.resultManager(r.text)
python
def delete(self, file_path): """DELETE Args: file_path: Full path for a file you want to delete upload_path: Ndrive path where you want to delete file ex) /Picture/ Returns: True: Delete success False: Delete failed """ now = datetime.datetime.now().isoformat() url = nurls['put'] + upload_path + file_name headers = {'userid': self.user_id, 'useridx': self.useridx, 'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8", 'charset': 'UTF-8', 'Origin': 'http://ndrive2.naver.com', } r = self.session.delete(url = url, headers = headers) return self.resultManager(r.text)
[ "def", "delete", "(", "self", ",", "file_path", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "url", "=", "nurls", "[", "'put'", "]", "+", "upload_path", "+", "file_name", "headers", "=", "{", ...
DELETE Args: file_path: Full path for a file you want to delete upload_path: Ndrive path where you want to delete file ex) /Picture/ Returns: True: Delete success False: Delete failed
[ "DELETE" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L400-L424
carpedm20/ndrive
ndrive/models.py
ndrive.getList
def getList(self, dummy = 56184, orgresource = '/', type = 1, dept = 0, sort = 'name', order = 'asc', startnum = 0, pagingrow = 1000): """GetList Args: dummy: ??? orgresource: Directory path to get the file list ex) /Picture/ type: 1 => only directories with idxfolder property 2 => only files 3 => directories and files with thumbnail info ex) viewHeight, viewWidth for Image file 4 => only directories except idxfolder 5 => directories and files without thumbnail info depth: Dept for file list sort: name => 이름 file => file type, 종류 length => size of file, 크기 date => edited date, 수정한 날짜 credate => creation date, 올린 날짜 protect => protect or not, 중요 표시 order: Order by (asc, desc) startnum: ??? pagingrow: start index ? Returns: FileInfo list: List of files for a path False: Failed to get list """ url = nurls['getList'] data = {'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, 'orgresource': orgresource, 'type': type, 'dept': dept, 'sort': sort, 'order': order, 'startnum': startnum, 'pagingrow': pagingrow, } r = self.session.post(url = url, data = data) try: j = json.loads(r.text) except: print '[*] Success checkUpload: 0 result' return [] if j['message'] != 'success': print '[*] Error checkUpload: ' + j['message'] return False else: files = [] for i in j['resultvalue']: f = FileInfo() f.protect = i['protect'] f.resourceno = i['resourceno'] f.copyright = i['copyright'] f.subfoldercnt = i['subfoldercnt'] f.resourcetype = i['resourcetype'] f.fileuploadstatus = i['fileuploadstatus'] f.prority = i['priority'] f.filelink = i['filelink'] f.href = i['href'] f.thumbnailpath = i['thumbnailpath'] f.sharedinfo = i['sharedinfo'] f.getlastmodified = i['getlastmodified'] f.shareno = i['shareno'] f.lastmodifieduser = i['lastmodifieduser'] f.getcontentlength = i['getcontentlength'] f.lastaccessed = i['lastaccessed'] f.virusstatus = i['virusstatus'] f.idxfolder = i['idxfolder'] f.creationdate = i['creationdate'] f.nocache = i['nocache'] f.viewWidth = i['viewWidth'] f.viewHeight = i['viewHeight'] f.setJson(j['resultvalue']) files.append(f) return files
python
def getList(self, dummy = 56184, orgresource = '/', type = 1, dept = 0, sort = 'name', order = 'asc', startnum = 0, pagingrow = 1000): """GetList Args: dummy: ??? orgresource: Directory path to get the file list ex) /Picture/ type: 1 => only directories with idxfolder property 2 => only files 3 => directories and files with thumbnail info ex) viewHeight, viewWidth for Image file 4 => only directories except idxfolder 5 => directories and files without thumbnail info depth: Dept for file list sort: name => 이름 file => file type, 종류 length => size of file, 크기 date => edited date, 수정한 날짜 credate => creation date, 올린 날짜 protect => protect or not, 중요 표시 order: Order by (asc, desc) startnum: ??? pagingrow: start index ? Returns: FileInfo list: List of files for a path False: Failed to get list """ url = nurls['getList'] data = {'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, 'orgresource': orgresource, 'type': type, 'dept': dept, 'sort': sort, 'order': order, 'startnum': startnum, 'pagingrow': pagingrow, } r = self.session.post(url = url, data = data) try: j = json.loads(r.text) except: print '[*] Success checkUpload: 0 result' return [] if j['message'] != 'success': print '[*] Error checkUpload: ' + j['message'] return False else: files = [] for i in j['resultvalue']: f = FileInfo() f.protect = i['protect'] f.resourceno = i['resourceno'] f.copyright = i['copyright'] f.subfoldercnt = i['subfoldercnt'] f.resourcetype = i['resourcetype'] f.fileuploadstatus = i['fileuploadstatus'] f.prority = i['priority'] f.filelink = i['filelink'] f.href = i['href'] f.thumbnailpath = i['thumbnailpath'] f.sharedinfo = i['sharedinfo'] f.getlastmodified = i['getlastmodified'] f.shareno = i['shareno'] f.lastmodifieduser = i['lastmodifieduser'] f.getcontentlength = i['getcontentlength'] f.lastaccessed = i['lastaccessed'] f.virusstatus = i['virusstatus'] f.idxfolder = i['idxfolder'] f.creationdate = i['creationdate'] f.nocache = i['nocache'] f.viewWidth = i['viewWidth'] f.viewHeight = i['viewHeight'] f.setJson(j['resultvalue']) files.append(f) return files
[ "def", "getList", "(", "self", ",", "dummy", "=", "56184", ",", "orgresource", "=", "'/'", ",", "type", "=", "1", ",", "dept", "=", "0", ",", "sort", "=", "'name'", ",", "order", "=", "'asc'", ",", "startnum", "=", "0", ",", "pagingrow", "=", "10...
GetList Args: dummy: ??? orgresource: Directory path to get the file list ex) /Picture/ type: 1 => only directories with idxfolder property 2 => only files 3 => directories and files with thumbnail info ex) viewHeight, viewWidth for Image file 4 => only directories except idxfolder 5 => directories and files without thumbnail info depth: Dept for file list sort: name => 이름 file => file type, 종류 length => size of file, 크기 date => edited date, 수정한 날짜 credate => creation date, 올린 날짜 protect => protect or not, 중요 표시 order: Order by (asc, desc) startnum: ??? pagingrow: start index ? Returns: FileInfo list: List of files for a path False: Failed to get list
[ "GetList" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L426-L519
carpedm20/ndrive
ndrive/models.py
ndrive.doMove
def doMove(self, orgresource, dstresource, dummy = 56184, stresource = 'F', bShareFireCopy = 'false'): """DoMove Args: dummy: ??? orgresource: Path for a file which you want to move dstresource: Destination path bShareFireCopy: ??? Returns: True: Move success False: Move failed """ url = nurls['doMove'] data = {'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, 'orgresource': orgresource, 'dstresource': dstresource, 'overwrite': overwrite, 'bShareFireCopy': bShareFireCopy, } r = self.session.post(url = url, data = data) try: j = json.loads(r.text) except: print '[*] Success checkUpload: 0 result' return False return self.resultManager(r.text)
python
def doMove(self, orgresource, dstresource, dummy = 56184, stresource = 'F', bShareFireCopy = 'false'): """DoMove Args: dummy: ??? orgresource: Path for a file which you want to move dstresource: Destination path bShareFireCopy: ??? Returns: True: Move success False: Move failed """ url = nurls['doMove'] data = {'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, 'orgresource': orgresource, 'dstresource': dstresource, 'overwrite': overwrite, 'bShareFireCopy': bShareFireCopy, } r = self.session.post(url = url, data = data) try: j = json.loads(r.text) except: print '[*] Success checkUpload: 0 result' return False return self.resultManager(r.text)
[ "def", "doMove", "(", "self", ",", "orgresource", ",", "dstresource", ",", "dummy", "=", "56184", ",", "stresource", "=", "'F'", ",", "bShareFireCopy", "=", "'false'", ")", ":", "url", "=", "nurls", "[", "'doMove'", "]", "data", "=", "{", "'userid'", "...
DoMove Args: dummy: ??? orgresource: Path for a file which you want to move dstresource: Destination path bShareFireCopy: ??? Returns: True: Move success False: Move failed
[ "DoMove" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L521-L556
carpedm20/ndrive
ndrive/models.py
ndrive.getProperty
def getProperty(self, orgresource, dummy = 56184): """GetProperty Args: dummy: ??? orgresource: File path Returns: FileInfo object: False: Failed to get property """ url = nurls['getProperty'] data = {'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, 'orgresource': orgresource, } r = self.session.post(url = url, data = data) j = json.loads(r.text) if self.resultManager(r.text): f = FileInfo() result = j['resultvalue'] f.resourcetype = result['resourcetype'] f.resourceno = result['resourceno'] return f else: return False
python
def getProperty(self, orgresource, dummy = 56184): """GetProperty Args: dummy: ??? orgresource: File path Returns: FileInfo object: False: Failed to get property """ url = nurls['getProperty'] data = {'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, 'orgresource': orgresource, } r = self.session.post(url = url, data = data) j = json.loads(r.text) if self.resultManager(r.text): f = FileInfo() result = j['resultvalue'] f.resourcetype = result['resourcetype'] f.resourceno = result['resourceno'] return f else: return False
[ "def", "getProperty", "(", "self", ",", "orgresource", ",", "dummy", "=", "56184", ")", ":", "url", "=", "nurls", "[", "'getProperty'", "]", "data", "=", "{", "'userid'", ":", "self", ".", "user_id", ",", "'useridx'", ":", "self", ".", "useridx", ",", ...
GetProperty Args: dummy: ??? orgresource: File path Returns: FileInfo object: False: Failed to get property
[ "GetProperty" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L558-L592
carpedm20/ndrive
ndrive/models.py
ndrive.getVersionListCount
def getVersionListCount(self, orgresource): """GetVersionListCount Args: orgresource: File path Returns: Integer number: # of version list False: Failed to get property """ url = nurls['getVersionListCount'] data = {'userid': self.user_id, 'useridx': self.useridx, 'orgresource': orgresource, } r = self.session.post(url = url, data = data) j = json.loads(r.text) if j['message'] != 'success': print "[*] Error getVersionListCount: " + j['message'] return False else: return int(j['resultvalue']['count'])
python
def getVersionListCount(self, orgresource): """GetVersionListCount Args: orgresource: File path Returns: Integer number: # of version list False: Failed to get property """ url = nurls['getVersionListCount'] data = {'userid': self.user_id, 'useridx': self.useridx, 'orgresource': orgresource, } r = self.session.post(url = url, data = data) j = json.loads(r.text) if j['message'] != 'success': print "[*] Error getVersionListCount: " + j['message'] return False else: return int(j['resultvalue']['count'])
[ "def", "getVersionListCount", "(", "self", ",", "orgresource", ")", ":", "url", "=", "nurls", "[", "'getVersionListCount'", "]", "data", "=", "{", "'userid'", ":", "self", ".", "user_id", ",", "'useridx'", ":", "self", ".", "useridx", ",", "'orgresource'", ...
GetVersionListCount Args: orgresource: File path Returns: Integer number: # of version list False: Failed to get property
[ "GetVersionListCount" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L633-L659
carpedm20/ndrive
ndrive/models.py
ndrive.setProperty
def setProperty(self, orgresource, protect, dummy = 7046): """SetProperty Args: orgresource: File path protect: 'Y' or 'N', 중요 표시 Returns: Integer number: # of version list False: Failed to get property """ url = nurls['setProperty'] data = {'userid': self.user_id, 'useridx': self.useridx, 'orgresource': orgresource, 'protect': protect, 'dummy': dummy, } r = self.session.post(url = url, data = data) return resultManager(r.text)
python
def setProperty(self, orgresource, protect, dummy = 7046): """SetProperty Args: orgresource: File path protect: 'Y' or 'N', 중요 표시 Returns: Integer number: # of version list False: Failed to get property """ url = nurls['setProperty'] data = {'userid': self.user_id, 'useridx': self.useridx, 'orgresource': orgresource, 'protect': protect, 'dummy': dummy, } r = self.session.post(url = url, data = data) return resultManager(r.text)
[ "def", "setProperty", "(", "self", ",", "orgresource", ",", "protect", ",", "dummy", "=", "7046", ")", ":", "url", "=", "nurls", "[", "'setProperty'", "]", "data", "=", "{", "'userid'", ":", "self", ".", "user_id", ",", "'useridx'", ":", "self", ".", ...
SetProperty Args: orgresource: File path protect: 'Y' or 'N', 중요 표시 Returns: Integer number: # of version list False: Failed to get property
[ "SetProperty" ]
train
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L661-L685