code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def credit_notes(request, form): ''' Shows all of the credit notes in the system. ''' notes = commerce.CreditNote.objects.all().select_related( "creditnoterefund", "creditnoteapplication", "invoice", "invoice__user__attendee__attendeeprofilebase", ) return QuerysetReport( "Credit Notes", ["id", "invoice__user__attendee__attendeeprofilebase__invoice_recipient", "status", "value"], notes, headings=["id", "Owner", "Status", "Value"], link_view=views.credit_note, )
Shows all of the credit notes in the system.
Below is the the instruction that describes the task: ### Input: Shows all of the credit notes in the system. ### Response: def credit_notes(request, form): ''' Shows all of the credit notes in the system. ''' notes = commerce.CreditNote.objects.all().select_related( "creditnoterefund", "creditnoteapplication", "invoice", "invoice__user__attendee__attendeeprofilebase", ) return QuerysetReport( "Credit Notes", ["id", "invoice__user__attendee__attendeeprofilebase__invoice_recipient", "status", "value"], notes, headings=["id", "Owner", "Status", "Value"], link_view=views.credit_note, )
def main(argv=None): """Generates documentation for signature generation pipeline""" parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument( 'pipeline', help='Python dotted path to rules pipeline to document' ) parser.add_argument('output', help='output file') if argv is None: args = parser.parse_args() else: args = parser.parse_args(argv) print('Generating documentation for %s in %s...' % (args.pipeline, args.output)) rules = import_rules(args.pipeline) with open(args.output, 'w') as fp: fp.write('.. THIS IS AUTOGEMERATED USING:\n') fp.write(' \n') fp.write(' %s\n' % (' '.join(sys.argv))) fp.write(' \n') fp.write('Signature generation rules pipeline\n') fp.write('===================================\n') fp.write('\n') fp.write('\n') fp.write( 'This is the signature generation pipeline defined at ``%s``:\n' % args.pipeline ) fp.write('\n') for i, rule in enumerate(rules): li = '%s. ' % (i + 1) fp.write('%s%s\n' % ( li, indent(get_doc(rule), ' ' * len(li)) )) fp.write('\n')
Generates documentation for signature generation pipeline
Below is the the instruction that describes the task: ### Input: Generates documentation for signature generation pipeline ### Response: def main(argv=None): """Generates documentation for signature generation pipeline""" parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument( 'pipeline', help='Python dotted path to rules pipeline to document' ) parser.add_argument('output', help='output file') if argv is None: args = parser.parse_args() else: args = parser.parse_args(argv) print('Generating documentation for %s in %s...' % (args.pipeline, args.output)) rules = import_rules(args.pipeline) with open(args.output, 'w') as fp: fp.write('.. THIS IS AUTOGEMERATED USING:\n') fp.write(' \n') fp.write(' %s\n' % (' '.join(sys.argv))) fp.write(' \n') fp.write('Signature generation rules pipeline\n') fp.write('===================================\n') fp.write('\n') fp.write('\n') fp.write( 'This is the signature generation pipeline defined at ``%s``:\n' % args.pipeline ) fp.write('\n') for i, rule in enumerate(rules): li = '%s. ' % (i + 1) fp.write('%s%s\n' % ( li, indent(get_doc(rule), ' ' * len(li)) )) fp.write('\n')
def get_rlz(self, rlzstr): r""" Get a Realization instance for a string of the form 'rlz-\d+' """ mo = re.match(r'rlz-(\d+)', rlzstr) if not mo: return return self.realizations[int(mo.group(1))]
r""" Get a Realization instance for a string of the form 'rlz-\d+'
Below is the the instruction that describes the task: ### Input: r""" Get a Realization instance for a string of the form 'rlz-\d+' ### Response: def get_rlz(self, rlzstr): r""" Get a Realization instance for a string of the form 'rlz-\d+' """ mo = re.match(r'rlz-(\d+)', rlzstr) if not mo: return return self.realizations[int(mo.group(1))]
def which(program, win_allow_cross_arch=True): """Identify the location of an executable file.""" def is_exe(path): return os.path.isfile(path) and os.access(path, os.X_OK) def _get_path_list(): return os.environ['PATH'].split(os.pathsep) if os.name == 'nt': def find_exe(program): root, ext = os.path.splitext(program) if ext: if is_exe(program): return program else: for ext in os.environ['PATHEXT'].split(os.pathsep): program_path = root + ext.lower() if is_exe(program_path): return program_path return None def get_path_list(): paths = _get_path_list() if win_allow_cross_arch: alt_sys_path = os.path.expandvars(r"$WINDIR\Sysnative") if os.path.isdir(alt_sys_path): paths.insert(0, alt_sys_path) else: alt_sys_path = os.path.expandvars(r"$WINDIR\SysWOW64") if os.path.isdir(alt_sys_path): paths.append(alt_sys_path) return paths else: def find_exe(program): return program if is_exe(program) else None get_path_list = _get_path_list if os.path.split(program)[0]: program_path = find_exe(program) if program_path: return program_path else: for path in get_path_list(): program_path = find_exe(os.path.join(path, program)) if program_path: return program_path return None
Identify the location of an executable file.
Below is the the instruction that describes the task: ### Input: Identify the location of an executable file. ### Response: def which(program, win_allow_cross_arch=True): """Identify the location of an executable file.""" def is_exe(path): return os.path.isfile(path) and os.access(path, os.X_OK) def _get_path_list(): return os.environ['PATH'].split(os.pathsep) if os.name == 'nt': def find_exe(program): root, ext = os.path.splitext(program) if ext: if is_exe(program): return program else: for ext in os.environ['PATHEXT'].split(os.pathsep): program_path = root + ext.lower() if is_exe(program_path): return program_path return None def get_path_list(): paths = _get_path_list() if win_allow_cross_arch: alt_sys_path = os.path.expandvars(r"$WINDIR\Sysnative") if os.path.isdir(alt_sys_path): paths.insert(0, alt_sys_path) else: alt_sys_path = os.path.expandvars(r"$WINDIR\SysWOW64") if os.path.isdir(alt_sys_path): paths.append(alt_sys_path) return paths else: def find_exe(program): return program if is_exe(program) else None get_path_list = _get_path_list if os.path.split(program)[0]: program_path = find_exe(program) if program_path: return program_path else: for path in get_path_list(): program_path = find_exe(os.path.join(path, program)) if program_path: return program_path return None
def create_data_and_metadata_from_data(self, data: numpy.ndarray, intensity_calibration: Calibration.Calibration=None, dimensional_calibrations: typing.List[Calibration.Calibration]=None, metadata: dict=None, timestamp: str=None) -> DataAndMetadata.DataAndMetadata: """Create a data_and_metadata object from data. .. versionadded:: 1.0 .. deprecated:: 1.1 Use :py:meth:`~nion.swift.Facade.DataItem.create_data_and_metadata` instead. Scriptable: No """ ...
Create a data_and_metadata object from data. .. versionadded:: 1.0 .. deprecated:: 1.1 Use :py:meth:`~nion.swift.Facade.DataItem.create_data_and_metadata` instead. Scriptable: No
Below is the the instruction that describes the task: ### Input: Create a data_and_metadata object from data. .. versionadded:: 1.0 .. deprecated:: 1.1 Use :py:meth:`~nion.swift.Facade.DataItem.create_data_and_metadata` instead. Scriptable: No ### Response: def create_data_and_metadata_from_data(self, data: numpy.ndarray, intensity_calibration: Calibration.Calibration=None, dimensional_calibrations: typing.List[Calibration.Calibration]=None, metadata: dict=None, timestamp: str=None) -> DataAndMetadata.DataAndMetadata: """Create a data_and_metadata object from data. .. versionadded:: 1.0 .. deprecated:: 1.1 Use :py:meth:`~nion.swift.Facade.DataItem.create_data_and_metadata` instead. Scriptable: No """ ...
def _ensure_file_path(self): """ Ensure the storage path exists. If it doesn't, create it with "go-rwx" permissions. """ storage_root = os.path.dirname(self.file_path) needs_storage_root = storage_root and not os.path.isdir(storage_root) if needs_storage_root: # pragma: no cover os.makedirs(storage_root) if not os.path.isfile(self.file_path): # create the file without group/world permissions with open(self.file_path, 'w'): pass user_read_write = 0o600 os.chmod(self.file_path, user_read_write)
Ensure the storage path exists. If it doesn't, create it with "go-rwx" permissions.
Below is the the instruction that describes the task: ### Input: Ensure the storage path exists. If it doesn't, create it with "go-rwx" permissions. ### Response: def _ensure_file_path(self): """ Ensure the storage path exists. If it doesn't, create it with "go-rwx" permissions. """ storage_root = os.path.dirname(self.file_path) needs_storage_root = storage_root and not os.path.isdir(storage_root) if needs_storage_root: # pragma: no cover os.makedirs(storage_root) if not os.path.isfile(self.file_path): # create the file without group/world permissions with open(self.file_path, 'w'): pass user_read_write = 0o600 os.chmod(self.file_path, user_read_write)
def parents(self): """ Returns a list of all the current category's parents.""" parents = [] if self.parent is None: return [] category = self while category.parent is not None: parents.append(category.parent) category = category.parent return parents[::-1]
Returns a list of all the current category's parents.
Below is the the instruction that describes the task: ### Input: Returns a list of all the current category's parents. ### Response: def parents(self): """ Returns a list of all the current category's parents.""" parents = [] if self.parent is None: return [] category = self while category.parent is not None: parents.append(category.parent) category = category.parent return parents[::-1]
def set_power_state(self, is_on, bulb=ALL_BULBS, timeout=None): """ Sets the power state of one or more bulbs. """ with _blocking(self.lock, self.power_state, self.light_state_event, timeout): self.send(REQ_SET_POWER_STATE, bulb, '2s', '\x00\x01' if is_on else '\x00\x00') self.send(REQ_GET_LIGHT_STATE, ALL_BULBS, '') return self.power_state
Sets the power state of one or more bulbs.
Below is the the instruction that describes the task: ### Input: Sets the power state of one or more bulbs. ### Response: def set_power_state(self, is_on, bulb=ALL_BULBS, timeout=None): """ Sets the power state of one or more bulbs. """ with _blocking(self.lock, self.power_state, self.light_state_event, timeout): self.send(REQ_SET_POWER_STATE, bulb, '2s', '\x00\x01' if is_on else '\x00\x00') self.send(REQ_GET_LIGHT_STATE, ALL_BULBS, '') return self.power_state
def _fold_line(self, line): """Write string line as one or more folded lines.""" if len(line) <= self._cols: self._output_file.write(line) self._output_file.write(self._line_sep) else: pos = self._cols self._output_file.write(line[0:self._cols]) self._output_file.write(self._line_sep) while pos < len(line): self._output_file.write(b' ') end = min(len(line), pos + self._cols - 1) self._output_file.write(line[pos:end]) self._output_file.write(self._line_sep) pos = end
Write string line as one or more folded lines.
Below is the the instruction that describes the task: ### Input: Write string line as one or more folded lines. ### Response: def _fold_line(self, line): """Write string line as one or more folded lines.""" if len(line) <= self._cols: self._output_file.write(line) self._output_file.write(self._line_sep) else: pos = self._cols self._output_file.write(line[0:self._cols]) self._output_file.write(self._line_sep) while pos < len(line): self._output_file.write(b' ') end = min(len(line), pos + self._cols - 1) self._output_file.write(line[pos:end]) self._output_file.write(self._line_sep) pos = end
def profit_construct(self): """利润构成 Returns: dict -- 利润构成表 """ return { 'total_buyandsell': round( self.profit_money - self.total_commission - self.total_tax, 2 ), 'total_tax': self.total_tax, 'total_commission': self.total_commission, 'total_profit': self.profit_money }
利润构成 Returns: dict -- 利润构成表
Below is the the instruction that describes the task: ### Input: 利润构成 Returns: dict -- 利润构成表 ### Response: def profit_construct(self): """利润构成 Returns: dict -- 利润构成表 """ return { 'total_buyandsell': round( self.profit_money - self.total_commission - self.total_tax, 2 ), 'total_tax': self.total_tax, 'total_commission': self.total_commission, 'total_profit': self.profit_money }
def resume(self): """Resume tracing after a `pause`.""" for tracer in self.tracers: tracer.start() threading.settrace(self._installation_trace)
Resume tracing after a `pause`.
Below is the the instruction that describes the task: ### Input: Resume tracing after a `pause`. ### Response: def resume(self): """Resume tracing after a `pause`.""" for tracer in self.tracers: tracer.start() threading.settrace(self._installation_trace)
def systemSettings(self): """ Returns the settings associated with this application for all users. :return <projexui.xsettings.XSettings> """ if not self._systemSettings: if self.isCompiled(): settings = QtCore.QSettings(XSettings.IniFormat, XSettings.SystemScope, self.organizationName(), self.applicationName()) rootpath = os.path.dirname(settings.fileName()) else: rootpath = os.path.abspath('.') name = self.applicationName() filename = os.path.join(rootpath, '{0}.yaml'.format(name)) self._systemSettings = XSettings(XSettings.YamlFormat, XSettings.SystemScope, self.organizationName(), self.applicationName(), filename=filename) return self._systemSettings
Returns the settings associated with this application for all users. :return <projexui.xsettings.XSettings>
Below is the the instruction that describes the task: ### Input: Returns the settings associated with this application for all users. :return <projexui.xsettings.XSettings> ### Response: def systemSettings(self): """ Returns the settings associated with this application for all users. :return <projexui.xsettings.XSettings> """ if not self._systemSettings: if self.isCompiled(): settings = QtCore.QSettings(XSettings.IniFormat, XSettings.SystemScope, self.organizationName(), self.applicationName()) rootpath = os.path.dirname(settings.fileName()) else: rootpath = os.path.abspath('.') name = self.applicationName() filename = os.path.join(rootpath, '{0}.yaml'.format(name)) self._systemSettings = XSettings(XSettings.YamlFormat, XSettings.SystemScope, self.organizationName(), self.applicationName(), filename=filename) return self._systemSettings
def _weight_by_hue(self): """ Returns a list of (hue, ranges, total weight, normalized total weight)-tuples. ColorTheme is made up out of (color, range, weight) tuples. For consistency with XML-output in the old Prism format (i.e. <color>s made up of <shade>s) we need a group weight per different hue. The same is true for the swatch() draw method. Hues are grouped as a single unit (e.g. dark red, intense red, weak red) after which the dimensions (rows/columns) is determined. """ grouped = {} weights = [] for clr, rng, weight in self.ranges: h = clr.nearest_hue(primary=False) if grouped.has_key(h): ranges, total_weight = grouped[h] ranges.append((clr, rng, weight)) total_weight += weight grouped[h] = (ranges, total_weight) else: grouped[h] = ([(clr, rng, weight)], weight) # Calculate the normalized (0.0-1.0) weight for each hue, # and transform the dictionary to a list. s = 1.0 * sum([w for r, w in grouped.values()]) grouped = [(grouped[h][1], grouped[h][1] / s, h, grouped[h][0]) for h in grouped] grouped.sort() grouped.reverse() return grouped
Returns a list of (hue, ranges, total weight, normalized total weight)-tuples. ColorTheme is made up out of (color, range, weight) tuples. For consistency with XML-output in the old Prism format (i.e. <color>s made up of <shade>s) we need a group weight per different hue. The same is true for the swatch() draw method. Hues are grouped as a single unit (e.g. dark red, intense red, weak red) after which the dimensions (rows/columns) is determined.
Below is the the instruction that describes the task: ### Input: Returns a list of (hue, ranges, total weight, normalized total weight)-tuples. ColorTheme is made up out of (color, range, weight) tuples. For consistency with XML-output in the old Prism format (i.e. <color>s made up of <shade>s) we need a group weight per different hue. The same is true for the swatch() draw method. Hues are grouped as a single unit (e.g. dark red, intense red, weak red) after which the dimensions (rows/columns) is determined. ### Response: def _weight_by_hue(self): """ Returns a list of (hue, ranges, total weight, normalized total weight)-tuples. ColorTheme is made up out of (color, range, weight) tuples. For consistency with XML-output in the old Prism format (i.e. <color>s made up of <shade>s) we need a group weight per different hue. The same is true for the swatch() draw method. Hues are grouped as a single unit (e.g. dark red, intense red, weak red) after which the dimensions (rows/columns) is determined. """ grouped = {} weights = [] for clr, rng, weight in self.ranges: h = clr.nearest_hue(primary=False) if grouped.has_key(h): ranges, total_weight = grouped[h] ranges.append((clr, rng, weight)) total_weight += weight grouped[h] = (ranges, total_weight) else: grouped[h] = ([(clr, rng, weight)], weight) # Calculate the normalized (0.0-1.0) weight for each hue, # and transform the dictionary to a list. s = 1.0 * sum([w for r, w in grouped.values()]) grouped = [(grouped[h][1], grouped[h][1] / s, h, grouped[h][0]) for h in grouped] grouped.sort() grouped.reverse() return grouped
def TIF_to_jpg(fnameTiff, overwrite=False, saveAs=""): """ given a TIF taken by our cameras, make it a pretty labeled JPG. if the filename contains "f10" or "f20", add appropraite scale bars. automatic contrast adjustment is different depending on if its a DIC image or fluorescent image (which is detected automatically). """ if saveAs == "": saveAs=fnameTiff+".jpg" if overwrite is False and os.path.exists(saveAs): print("file exists, not overwriting...") return # load the image img=pylab.imread(fnameTiff) img=img/np.max(img) # now the data is from 0 to 1 # determine the old histogram hist1,bins1=np.histogram(img.ravel(),bins=256, range=(0,1)) #pylab.plot(bins[:-1],hist) # detect darkfield by average: if np.average(img)<.2: vmin=None vmax=None msg=" | FLU" while np.average(img)<.5: img=np.sqrt(img) msg+="^(.5)" else: msg=" | DIC" percentile=.005 vmin=np.percentile(img.ravel(),percentile) vmax=np.percentile(img.ravel(),100-percentile) # determine the new histogram hist2,bins2=np.histogram(img.ravel(),bins=256, range=(0,1)) # plot it with resizing magic fig=pylab.figure(facecolor='r') fig.gca().imshow(img,cmap=pylab.gray(),vmin=vmin,vmax=vmax) pylab.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) pylab.gca().xaxis.set_major_locator(pylab.NullLocator()) pylab.gca().yaxis.set_major_locator(pylab.NullLocator()) pylab.axis('off') # resize it to the original size fig.set_size_inches(img.shape[1]/100, img.shape[0]/100) # add text msg="%s | %s"%(os.path.basename(fnameTiff), datetime.datetime.fromtimestamp(os.path.getmtime(fnameTiff)))+msg center=10 pylab.text(center,center,"%s"%(msg),va="top",color='w',size='small', family='monospace',weight='bold', bbox=dict(facecolor='k', alpha=.5)) # add scale bar scaleWidthPx=False if "f10" in fnameTiff: scaleWidthPx,scaleBarText=39,"25 um" if "f20" in fnameTiff: scaleWidthPx,scaleBarText=31,"10 um" if scaleWidthPx: scaleBarPadding=10 x2,y2=img.shape[1]-scaleBarPadding,img.shape[0]-scaleBarPadding x1,y1=x2-scaleWidthPx,y2 for offset,color,alpha in [[2,'k',.5],[0,'w',1]]: pylab.plot([x1+offset,x2+offset],[y1+offset,y2+offset],'-', color=color,lw=4,alpha=alpha) pylab.text((x1+x2)/2+offset,y1-5+offset,scaleBarText,color=color, ha="center",weight="bold",alpha=alpha, size="small",va="bottom",family="monospace") # add histogram #pylab.plot(img.shape[1]-bins1[:-1][::-1]*200,-hist1/max(hist1)*100+110,color='g') #pylab.plot(img.shape[1]-bins2[:-1][::-1]*200,-hist2/max(hist2)*100+110,color='b') #pylab.show() # save it pylab.savefig(saveAs,dpi=100) # clean up pylab.close()
given a TIF taken by our cameras, make it a pretty labeled JPG. if the filename contains "f10" or "f20", add appropraite scale bars. automatic contrast adjustment is different depending on if its a DIC image or fluorescent image (which is detected automatically).
Below is the the instruction that describes the task: ### Input: given a TIF taken by our cameras, make it a pretty labeled JPG. if the filename contains "f10" or "f20", add appropraite scale bars. automatic contrast adjustment is different depending on if its a DIC image or fluorescent image (which is detected automatically). ### Response: def TIF_to_jpg(fnameTiff, overwrite=False, saveAs=""): """ given a TIF taken by our cameras, make it a pretty labeled JPG. if the filename contains "f10" or "f20", add appropraite scale bars. automatic contrast adjustment is different depending on if its a DIC image or fluorescent image (which is detected automatically). """ if saveAs == "": saveAs=fnameTiff+".jpg" if overwrite is False and os.path.exists(saveAs): print("file exists, not overwriting...") return # load the image img=pylab.imread(fnameTiff) img=img/np.max(img) # now the data is from 0 to 1 # determine the old histogram hist1,bins1=np.histogram(img.ravel(),bins=256, range=(0,1)) #pylab.plot(bins[:-1],hist) # detect darkfield by average: if np.average(img)<.2: vmin=None vmax=None msg=" | FLU" while np.average(img)<.5: img=np.sqrt(img) msg+="^(.5)" else: msg=" | DIC" percentile=.005 vmin=np.percentile(img.ravel(),percentile) vmax=np.percentile(img.ravel(),100-percentile) # determine the new histogram hist2,bins2=np.histogram(img.ravel(),bins=256, range=(0,1)) # plot it with resizing magic fig=pylab.figure(facecolor='r') fig.gca().imshow(img,cmap=pylab.gray(),vmin=vmin,vmax=vmax) pylab.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) pylab.gca().xaxis.set_major_locator(pylab.NullLocator()) pylab.gca().yaxis.set_major_locator(pylab.NullLocator()) pylab.axis('off') # resize it to the original size fig.set_size_inches(img.shape[1]/100, img.shape[0]/100) # add text msg="%s | %s"%(os.path.basename(fnameTiff), datetime.datetime.fromtimestamp(os.path.getmtime(fnameTiff)))+msg center=10 pylab.text(center,center,"%s"%(msg),va="top",color='w',size='small', family='monospace',weight='bold', bbox=dict(facecolor='k', alpha=.5)) # add scale bar scaleWidthPx=False if "f10" in fnameTiff: scaleWidthPx,scaleBarText=39,"25 um" if "f20" in fnameTiff: scaleWidthPx,scaleBarText=31,"10 um" if scaleWidthPx: scaleBarPadding=10 x2,y2=img.shape[1]-scaleBarPadding,img.shape[0]-scaleBarPadding x1,y1=x2-scaleWidthPx,y2 for offset,color,alpha in [[2,'k',.5],[0,'w',1]]: pylab.plot([x1+offset,x2+offset],[y1+offset,y2+offset],'-', color=color,lw=4,alpha=alpha) pylab.text((x1+x2)/2+offset,y1-5+offset,scaleBarText,color=color, ha="center",weight="bold",alpha=alpha, size="small",va="bottom",family="monospace") # add histogram #pylab.plot(img.shape[1]-bins1[:-1][::-1]*200,-hist1/max(hist1)*100+110,color='g') #pylab.plot(img.shape[1]-bins2[:-1][::-1]*200,-hist2/max(hist2)*100+110,color='b') #pylab.show() # save it pylab.savefig(saveAs,dpi=100) # clean up pylab.close()
def getNameByPosition(self, idx): """Return field name by its position in fields set. Parameters ---------- idx: :py:class:`idx` Field index Returns ------- : :py:class:`str` Field name Raises ------ : :class:`~pyasn1.error.PyAsn1Error` If given field name is not present in callee *NamedTypes* """ try: return self.__namedTypes[idx].name except IndexError: raise error.PyAsn1Error('Type position out of range')
Return field name by its position in fields set. Parameters ---------- idx: :py:class:`idx` Field index Returns ------- : :py:class:`str` Field name Raises ------ : :class:`~pyasn1.error.PyAsn1Error` If given field name is not present in callee *NamedTypes*
Below is the the instruction that describes the task: ### Input: Return field name by its position in fields set. Parameters ---------- idx: :py:class:`idx` Field index Returns ------- : :py:class:`str` Field name Raises ------ : :class:`~pyasn1.error.PyAsn1Error` If given field name is not present in callee *NamedTypes* ### Response: def getNameByPosition(self, idx): """Return field name by its position in fields set. Parameters ---------- idx: :py:class:`idx` Field index Returns ------- : :py:class:`str` Field name Raises ------ : :class:`~pyasn1.error.PyAsn1Error` If given field name is not present in callee *NamedTypes* """ try: return self.__namedTypes[idx].name except IndexError: raise error.PyAsn1Error('Type position out of range')
def _messageFromSender(self, sender, messageID): """ Locate a previously queued message by a given sender and messageID. """ return self.store.findUnique( _QueuedMessage, AND(_QueuedMessage.senderUsername == sender.localpart, _QueuedMessage.senderDomain == sender.domain, _QueuedMessage.messageID == messageID), default=None)
Locate a previously queued message by a given sender and messageID.
Below is the the instruction that describes the task: ### Input: Locate a previously queued message by a given sender and messageID. ### Response: def _messageFromSender(self, sender, messageID): """ Locate a previously queued message by a given sender and messageID. """ return self.store.findUnique( _QueuedMessage, AND(_QueuedMessage.senderUsername == sender.localpart, _QueuedMessage.senderDomain == sender.domain, _QueuedMessage.messageID == messageID), default=None)
def connect(self, host, port): """Connect to a host on a given port. If the hostname ends with a colon (`:') followed by a number, and there is no port specified, that suffix will be stripped off and the number interpreted as the port number to use. """ if not port and (host.find(':') == host.rfind(':')): i = host.rfind(':') if i >= 0: host, port = host[:i], host[i+1:] try: port = int(port) except ValueError: raise socket.error, "nonnumeric port" #if self.verbose > 0: # print 'connect:', (host, port) msg = "getaddrinfo returns an empty list" self.sock = None for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res try: self.sock = socket.socket(af, socktype, proto) #if self.debuglevel > 0: print 'connect:', (host, port) self.sock.connect(sa) except socket.error, msg: #if self.debuglevel > 0: print 'connect fail:', (host, port) self.close() continue break if not self.sock: raise socket.error, msg
Connect to a host on a given port. If the hostname ends with a colon (`:') followed by a number, and there is no port specified, that suffix will be stripped off and the number interpreted as the port number to use.
Below is the the instruction that describes the task: ### Input: Connect to a host on a given port. If the hostname ends with a colon (`:') followed by a number, and there is no port specified, that suffix will be stripped off and the number interpreted as the port number to use. ### Response: def connect(self, host, port): """Connect to a host on a given port. If the hostname ends with a colon (`:') followed by a number, and there is no port specified, that suffix will be stripped off and the number interpreted as the port number to use. """ if not port and (host.find(':') == host.rfind(':')): i = host.rfind(':') if i >= 0: host, port = host[:i], host[i+1:] try: port = int(port) except ValueError: raise socket.error, "nonnumeric port" #if self.verbose > 0: # print 'connect:', (host, port) msg = "getaddrinfo returns an empty list" self.sock = None for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res try: self.sock = socket.socket(af, socktype, proto) #if self.debuglevel > 0: print 'connect:', (host, port) self.sock.connect(sa) except socket.error, msg: #if self.debuglevel > 0: print 'connect fail:', (host, port) self.close() continue break if not self.sock: raise socket.error, msg
def _detect(self): """ Detect un-indexed ERC20 event parameters in all contracts. """ results = [] for c in self.contracts: unindexed_params = self.detect_erc20_unindexed_event_params(c) if unindexed_params: info = "{} ({}) does not mark important ERC20 parameters as 'indexed':\n" info = info.format(c.name, c.source_mapping_str) for (event, parameter) in unindexed_params: info += "\t-{} ({}) does not index parameter '{}'\n".format(event.name, event.source_mapping_str, parameter.name) # Add the events to the JSON (note: we do not add the params/vars as they have no source mapping). json = self.generate_json_result(info) self.add_functions_to_json([event for event, _ in unindexed_params], json) results.append(json) return results
Detect un-indexed ERC20 event parameters in all contracts.
Below is the the instruction that describes the task: ### Input: Detect un-indexed ERC20 event parameters in all contracts. ### Response: def _detect(self): """ Detect un-indexed ERC20 event parameters in all contracts. """ results = [] for c in self.contracts: unindexed_params = self.detect_erc20_unindexed_event_params(c) if unindexed_params: info = "{} ({}) does not mark important ERC20 parameters as 'indexed':\n" info = info.format(c.name, c.source_mapping_str) for (event, parameter) in unindexed_params: info += "\t-{} ({}) does not index parameter '{}'\n".format(event.name, event.source_mapping_str, parameter.name) # Add the events to the JSON (note: we do not add the params/vars as they have no source mapping). json = self.generate_json_result(info) self.add_functions_to_json([event for event, _ in unindexed_params], json) results.append(json) return results
def reverse_point(self, latitude, longitude, **kwargs): """ Method for identifying an address from a geographic point """ fields = ",".join(kwargs.pop("fields", [])) point_param = "{0},{1}".format(latitude, longitude) response = self._req( verb="reverse", params={"q": point_param, "fields": fields} ) if response.status_code != 200: return error_response(response) return Location(response.json())
Method for identifying an address from a geographic point
Below is the the instruction that describes the task: ### Input: Method for identifying an address from a geographic point ### Response: def reverse_point(self, latitude, longitude, **kwargs): """ Method for identifying an address from a geographic point """ fields = ",".join(kwargs.pop("fields", [])) point_param = "{0},{1}".format(latitude, longitude) response = self._req( verb="reverse", params={"q": point_param, "fields": fields} ) if response.status_code != 200: return error_response(response) return Location(response.json())
def move(self, partition, source, dest): """Return a new state that is the result of moving a single partition. :param partition: The partition index of the partition to move. :param source: The broker index of the broker to move the partition from. :param dest: The broker index of the broker to move the partition to. """ new_state = copy(self) # Update the partition replica tuple source_index = self.replicas[partition].index(source) new_state.replicas = tuple_alter( self.replicas, (partition, lambda replicas: tuple_replace( replicas, (source_index, dest), )), ) new_state.pending_partitions = self.pending_partitions + (partition, ) # Update the broker weights partition_weight = self.partition_weights[partition] new_state.broker_weights = tuple_alter( self.broker_weights, (source, lambda broker_weight: broker_weight - partition_weight), (dest, lambda broker_weight: broker_weight + partition_weight), ) # Update the broker partition count new_state.broker_partition_counts = tuple_alter( self.broker_partition_counts, (source, lambda partition_count: partition_count - 1), (dest, lambda partition_count: partition_count + 1), ) # Update the broker leader weights if source_index == 0: new_state.broker_leader_weights = tuple_alter( self.broker_leader_weights, (source, lambda lw: lw - partition_weight), (dest, lambda lw: lw + partition_weight), ) new_state.broker_leader_counts = tuple_alter( self.broker_leader_counts, (source, lambda leader_count: leader_count - 1), (dest, lambda leader_count: leader_count + 1), ) new_state.leader_movement_count += 1 # Update the topic broker counts topic = self.partition_topic[partition] new_state.topic_broker_count = tuple_alter( self.topic_broker_count, (topic, lambda broker_count: tuple_alter( broker_count, (source, lambda count: count - 1), (dest, lambda count: count + 1), )), ) # Update the topic broker imbalance new_state.topic_broker_imbalance = tuple_replace( self.topic_broker_imbalance, (topic, new_state._calculate_topic_imbalance(topic)), ) new_state._weighted_topic_broker_imbalance = ( self._weighted_topic_broker_imbalance + self.topic_weights[topic] * ( new_state.topic_broker_imbalance[topic] - self.topic_broker_imbalance[topic] ) ) # Update the replication group replica counts source_rg = self.broker_rg[source] dest_rg = self.broker_rg[dest] if source_rg != dest_rg: new_state.rg_replicas = tuple_alter( self.rg_replicas, (source_rg, lambda replica_counts: tuple_alter( replica_counts, (partition, lambda replica_count: replica_count - 1), )), (dest_rg, lambda replica_counts: tuple_alter( replica_counts, (partition, lambda replica_count: replica_count + 1), )), ) # Update the movement sizes new_state.movement_size += self.partition_sizes[partition] new_state.movement_count += 1 return new_state
Return a new state that is the result of moving a single partition. :param partition: The partition index of the partition to move. :param source: The broker index of the broker to move the partition from. :param dest: The broker index of the broker to move the partition to.
Below is the the instruction that describes the task: ### Input: Return a new state that is the result of moving a single partition. :param partition: The partition index of the partition to move. :param source: The broker index of the broker to move the partition from. :param dest: The broker index of the broker to move the partition to. ### Response: def move(self, partition, source, dest): """Return a new state that is the result of moving a single partition. :param partition: The partition index of the partition to move. :param source: The broker index of the broker to move the partition from. :param dest: The broker index of the broker to move the partition to. """ new_state = copy(self) # Update the partition replica tuple source_index = self.replicas[partition].index(source) new_state.replicas = tuple_alter( self.replicas, (partition, lambda replicas: tuple_replace( replicas, (source_index, dest), )), ) new_state.pending_partitions = self.pending_partitions + (partition, ) # Update the broker weights partition_weight = self.partition_weights[partition] new_state.broker_weights = tuple_alter( self.broker_weights, (source, lambda broker_weight: broker_weight - partition_weight), (dest, lambda broker_weight: broker_weight + partition_weight), ) # Update the broker partition count new_state.broker_partition_counts = tuple_alter( self.broker_partition_counts, (source, lambda partition_count: partition_count - 1), (dest, lambda partition_count: partition_count + 1), ) # Update the broker leader weights if source_index == 0: new_state.broker_leader_weights = tuple_alter( self.broker_leader_weights, (source, lambda lw: lw - partition_weight), (dest, lambda lw: lw + partition_weight), ) new_state.broker_leader_counts = tuple_alter( self.broker_leader_counts, (source, lambda leader_count: leader_count - 1), (dest, lambda leader_count: leader_count + 1), ) new_state.leader_movement_count += 1 # Update the topic broker counts topic = self.partition_topic[partition] new_state.topic_broker_count = tuple_alter( self.topic_broker_count, (topic, lambda broker_count: tuple_alter( broker_count, (source, lambda count: count - 1), (dest, lambda count: count + 1), )), ) # Update the topic broker imbalance new_state.topic_broker_imbalance = tuple_replace( self.topic_broker_imbalance, (topic, new_state._calculate_topic_imbalance(topic)), ) new_state._weighted_topic_broker_imbalance = ( self._weighted_topic_broker_imbalance + self.topic_weights[topic] * ( new_state.topic_broker_imbalance[topic] - self.topic_broker_imbalance[topic] ) ) # Update the replication group replica counts source_rg = self.broker_rg[source] dest_rg = self.broker_rg[dest] if source_rg != dest_rg: new_state.rg_replicas = tuple_alter( self.rg_replicas, (source_rg, lambda replica_counts: tuple_alter( replica_counts, (partition, lambda replica_count: replica_count - 1), )), (dest_rg, lambda replica_counts: tuple_alter( replica_counts, (partition, lambda replica_count: replica_count + 1), )), ) # Update the movement sizes new_state.movement_size += self.partition_sizes[partition] new_state.movement_count += 1 return new_state
def change_status(self, status): """ Change the user's status :param user: :param email: :return: """ def cb(): self.user.update(status=status) return status return signals.user_update(self, ACTIONS["STATUS"], cb, data={"status": self.status})
Change the user's status :param user: :param email: :return:
Below is the the instruction that describes the task: ### Input: Change the user's status :param user: :param email: :return: ### Response: def change_status(self, status): """ Change the user's status :param user: :param email: :return: """ def cb(): self.user.update(status=status) return status return signals.user_update(self, ACTIONS["STATUS"], cb, data={"status": self.status})
def setMaxSpeedLat(self, typeID, speed): """setMaxSpeedLat(string, double) -> None Sets the maximum lateral speed of this type. """ self._connection._sendDoubleCmd( tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_MAXSPEED_LAT, typeID, speed)
setMaxSpeedLat(string, double) -> None Sets the maximum lateral speed of this type.
Below is the the instruction that describes the task: ### Input: setMaxSpeedLat(string, double) -> None Sets the maximum lateral speed of this type. ### Response: def setMaxSpeedLat(self, typeID, speed): """setMaxSpeedLat(string, double) -> None Sets the maximum lateral speed of this type. """ self._connection._sendDoubleCmd( tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_MAXSPEED_LAT, typeID, speed)
def cast_primitive(state, value, to_type): """ Cast the value of primtive types. :param value: Bitvector storing the primitive value. :param to_type: Name of the targeted type. :return: Resized value. """ if to_type in ['float', 'double']: if value.symbolic: # TODO extend support for floating point types l.warning('No support for symbolic floating-point arguments.' 'Value gets concretized.') value = float(state.solver.eval(value)) sort = FSORT_FLOAT if to_type == 'float' else FSORT_DOUBLE return FPV(value, sort) elif to_type == 'int' and isinstance(value, FP): # TODO fix fpToIEEEBV in claripty l.warning('Converting FP to BV might provide incorrect results.') return fpToIEEEBV(value)[63:32] elif to_type == 'long' and isinstance(value, FP): # TODO fix fpToIEEEBV in claripty l.warning('Converting FP to BV might provide incorrect results.') return fpToIEEEBV(value) else: # lookup the type size and extract value value_size = ArchSoot.sizeof[to_type] value_extracted = value.reversed.get_bytes(index=0, size=value_size//8).reversed # determine size of Soot bitvector and resize bitvector # Note: smaller types than int's are stored in a 32-bit BV value_soot_size = value_size if value_size >= 32 else 32 if to_type in ['char', 'boolean']: # unsigned extend return value_extracted.zero_extend(value_soot_size-value_extracted.size()) # signed extend return value_extracted.sign_extend(value_soot_size-value_extracted.size())
Cast the value of primtive types. :param value: Bitvector storing the primitive value. :param to_type: Name of the targeted type. :return: Resized value.
Below is the the instruction that describes the task: ### Input: Cast the value of primtive types. :param value: Bitvector storing the primitive value. :param to_type: Name of the targeted type. :return: Resized value. ### Response: def cast_primitive(state, value, to_type): """ Cast the value of primtive types. :param value: Bitvector storing the primitive value. :param to_type: Name of the targeted type. :return: Resized value. """ if to_type in ['float', 'double']: if value.symbolic: # TODO extend support for floating point types l.warning('No support for symbolic floating-point arguments.' 'Value gets concretized.') value = float(state.solver.eval(value)) sort = FSORT_FLOAT if to_type == 'float' else FSORT_DOUBLE return FPV(value, sort) elif to_type == 'int' and isinstance(value, FP): # TODO fix fpToIEEEBV in claripty l.warning('Converting FP to BV might provide incorrect results.') return fpToIEEEBV(value)[63:32] elif to_type == 'long' and isinstance(value, FP): # TODO fix fpToIEEEBV in claripty l.warning('Converting FP to BV might provide incorrect results.') return fpToIEEEBV(value) else: # lookup the type size and extract value value_size = ArchSoot.sizeof[to_type] value_extracted = value.reversed.get_bytes(index=0, size=value_size//8).reversed # determine size of Soot bitvector and resize bitvector # Note: smaller types than int's are stored in a 32-bit BV value_soot_size = value_size if value_size >= 32 else 32 if to_type in ['char', 'boolean']: # unsigned extend return value_extracted.zero_extend(value_soot_size-value_extracted.size()) # signed extend return value_extracted.sign_extend(value_soot_size-value_extracted.size())
def get_services(self): """Returns a list of FritzService-objects.""" result = [] nodes = self.root.iterfind( './/ns:service', namespaces={'ns': self.namespace}) for node in nodes: result.append(FritzService( node.find(self.nodename('serviceType')).text, node.find(self.nodename('controlURL')).text, node.find(self.nodename('SCPDURL')).text)) return result
Returns a list of FritzService-objects.
Below is the the instruction that describes the task: ### Input: Returns a list of FritzService-objects. ### Response: def get_services(self): """Returns a list of FritzService-objects.""" result = [] nodes = self.root.iterfind( './/ns:service', namespaces={'ns': self.namespace}) for node in nodes: result.append(FritzService( node.find(self.nodename('serviceType')).text, node.find(self.nodename('controlURL')).text, node.find(self.nodename('SCPDURL')).text)) return result
def log_metrics(self, metrics_by_name, info): """Store new measurements into metrics.json. """ try: metrics_path = os.path.join(self.dir, "metrics.json") with open(metrics_path, 'r') as f: saved_metrics = json.load(f) except IOError: # We haven't recorded anything yet. Start Collecting. saved_metrics = {} for metric_name, metric_ptr in metrics_by_name.items(): if metric_name not in saved_metrics: saved_metrics[metric_name] = {"values": [], "steps": [], "timestamps": []} saved_metrics[metric_name]["values"] += metric_ptr["values"] saved_metrics[metric_name]["steps"] += metric_ptr["steps"] # Manually convert them to avoid passing a datetime dtype handler # when we're trying to convert into json. timestamps_norm = [ts.isoformat() for ts in metric_ptr["timestamps"]] saved_metrics[metric_name]["timestamps"] += timestamps_norm self.save_json(saved_metrics, 'metrics.json')
Store new measurements into metrics.json.
Below is the the instruction that describes the task: ### Input: Store new measurements into metrics.json. ### Response: def log_metrics(self, metrics_by_name, info): """Store new measurements into metrics.json. """ try: metrics_path = os.path.join(self.dir, "metrics.json") with open(metrics_path, 'r') as f: saved_metrics = json.load(f) except IOError: # We haven't recorded anything yet. Start Collecting. saved_metrics = {} for metric_name, metric_ptr in metrics_by_name.items(): if metric_name not in saved_metrics: saved_metrics[metric_name] = {"values": [], "steps": [], "timestamps": []} saved_metrics[metric_name]["values"] += metric_ptr["values"] saved_metrics[metric_name]["steps"] += metric_ptr["steps"] # Manually convert them to avoid passing a datetime dtype handler # when we're trying to convert into json. timestamps_norm = [ts.isoformat() for ts in metric_ptr["timestamps"]] saved_metrics[metric_name]["timestamps"] += timestamps_norm self.save_json(saved_metrics, 'metrics.json')
def RegisterRecordType(cls, record_class): """Register a known record type in KNOWN_CLASSES. Args: record_class (UpdateRecord): An update record subclass. """ record_type = record_class.MatchType() if record_type not in UpdateRecord.KNOWN_CLASSES: UpdateRecord.KNOWN_CLASSES[record_type] = [] UpdateRecord.KNOWN_CLASSES[record_type].append(record_class)
Register a known record type in KNOWN_CLASSES. Args: record_class (UpdateRecord): An update record subclass.
Below is the the instruction that describes the task: ### Input: Register a known record type in KNOWN_CLASSES. Args: record_class (UpdateRecord): An update record subclass. ### Response: def RegisterRecordType(cls, record_class): """Register a known record type in KNOWN_CLASSES. Args: record_class (UpdateRecord): An update record subclass. """ record_type = record_class.MatchType() if record_type not in UpdateRecord.KNOWN_CLASSES: UpdateRecord.KNOWN_CLASSES[record_type] = [] UpdateRecord.KNOWN_CLASSES[record_type].append(record_class)
def edges(self): """ Return the edge characters of this node. """ edge_str = ctypes.create_string_buffer(MAX_CHARS) cgaddag.gdg_edges(self.gdg, self.node, edge_str) return [char for char in edge_str.value.decode("ascii")]
Return the edge characters of this node.
Below is the the instruction that describes the task: ### Input: Return the edge characters of this node. ### Response: def edges(self): """ Return the edge characters of this node. """ edge_str = ctypes.create_string_buffer(MAX_CHARS) cgaddag.gdg_edges(self.gdg, self.node, edge_str) return [char for char in edge_str.value.decode("ascii")]
def docstring_to_markdown(docstring): """Convert a Python object's docstring to markdown Parameters ---------- docstring : str The docstring body. Returns ---------- clean_lst : list The markdown formatted docstring as lines (str) in a Python list. """ new_docstring_lst = [] for idx, line in enumerate(docstring.split('\n')): line = line.strip() if set(line) in ({'-'}, {'='}): new_docstring_lst[idx-1] = '**%s**' % new_docstring_lst[idx-1] elif line.startswith('>>>'): line = ' %s' % line new_docstring_lst.append(line) for idx, line in enumerate(new_docstring_lst[1:]): if line: if line.startswith('Description : '): new_docstring_lst[idx+1] = (new_docstring_lst[idx+1] .replace('Description : ', '')) elif ' : ' in line: line = line.replace(' : ', '` : ') new_docstring_lst[idx+1] = '\n- `%s\n' % line elif '**' in new_docstring_lst[idx-1] and '**' not in line: new_docstring_lst[idx+1] = '\n%s' % line.lstrip() elif '**' not in line: new_docstring_lst[idx+1] = ' %s' % line.lstrip() clean_lst = [] for line in new_docstring_lst: if set(line.strip()) not in ({'-'}, {'='}): clean_lst.append(line) return clean_lst
Convert a Python object's docstring to markdown Parameters ---------- docstring : str The docstring body. Returns ---------- clean_lst : list The markdown formatted docstring as lines (str) in a Python list.
Below is the the instruction that describes the task: ### Input: Convert a Python object's docstring to markdown Parameters ---------- docstring : str The docstring body. Returns ---------- clean_lst : list The markdown formatted docstring as lines (str) in a Python list. ### Response: def docstring_to_markdown(docstring): """Convert a Python object's docstring to markdown Parameters ---------- docstring : str The docstring body. Returns ---------- clean_lst : list The markdown formatted docstring as lines (str) in a Python list. """ new_docstring_lst = [] for idx, line in enumerate(docstring.split('\n')): line = line.strip() if set(line) in ({'-'}, {'='}): new_docstring_lst[idx-1] = '**%s**' % new_docstring_lst[idx-1] elif line.startswith('>>>'): line = ' %s' % line new_docstring_lst.append(line) for idx, line in enumerate(new_docstring_lst[1:]): if line: if line.startswith('Description : '): new_docstring_lst[idx+1] = (new_docstring_lst[idx+1] .replace('Description : ', '')) elif ' : ' in line: line = line.replace(' : ', '` : ') new_docstring_lst[idx+1] = '\n- `%s\n' % line elif '**' in new_docstring_lst[idx-1] and '**' not in line: new_docstring_lst[idx+1] = '\n%s' % line.lstrip() elif '**' not in line: new_docstring_lst[idx+1] = ' %s' % line.lstrip() clean_lst = [] for line in new_docstring_lst: if set(line.strip()) not in ({'-'}, {'='}): clean_lst.append(line) return clean_lst
def search( self, base=False, trim=False, objects=False, **kwargs ): """ Returns matching entries for search in ldap structured as [(dn, {attributes})] UNLESS searching by dn, in which case the first match is returned """ scope = pyldap.SCOPE_SUBTREE if not base: base = self.users filterstr ='' for key, value in kwargs.iteritems(): filterstr += '({0}={1})'.format(key,value) if key == 'dn': filterstr = '(objectClass=*)' base = value scope = pyldap.SCOPE_BASE break if len(kwargs) > 1: filterstr = '(&'+filterstr+')' result = self.ldap.search_s(base, pyldap.SCOPE_SUBTREE, filterstr, ['*','+']) if base == self.users: for member in result: groups = self.getGroups(member[0]) member[1]['groups'] = groups if 'eboard' in member[1]['groups']: member[1]['committee'] = self.search(base=self.committees, \ head=member[0])[0][1]['cn'][0] if objects: return self.memberObjects(result) finalResult = self.trimResult(result) if trim else result return finalResult
Returns matching entries for search in ldap structured as [(dn, {attributes})] UNLESS searching by dn, in which case the first match is returned
Below is the the instruction that describes the task: ### Input: Returns matching entries for search in ldap structured as [(dn, {attributes})] UNLESS searching by dn, in which case the first match is returned ### Response: def search( self, base=False, trim=False, objects=False, **kwargs ): """ Returns matching entries for search in ldap structured as [(dn, {attributes})] UNLESS searching by dn, in which case the first match is returned """ scope = pyldap.SCOPE_SUBTREE if not base: base = self.users filterstr ='' for key, value in kwargs.iteritems(): filterstr += '({0}={1})'.format(key,value) if key == 'dn': filterstr = '(objectClass=*)' base = value scope = pyldap.SCOPE_BASE break if len(kwargs) > 1: filterstr = '(&'+filterstr+')' result = self.ldap.search_s(base, pyldap.SCOPE_SUBTREE, filterstr, ['*','+']) if base == self.users: for member in result: groups = self.getGroups(member[0]) member[1]['groups'] = groups if 'eboard' in member[1]['groups']: member[1]['committee'] = self.search(base=self.committees, \ head=member[0])[0][1]['cn'][0] if objects: return self.memberObjects(result) finalResult = self.trimResult(result) if trim else result return finalResult
def _loadData(self, data): """ Load attribute values from Plex XML response. """ self._data = data self.fastKey = data.attrib.get('fastKey') self.key = data.attrib.get('key') self.thumb = data.attrib.get('thumb') self.title = data.attrib.get('title') self.type = data.attrib.get('type')
Load attribute values from Plex XML response.
Below is the the instruction that describes the task: ### Input: Load attribute values from Plex XML response. ### Response: def _loadData(self, data): """ Load attribute values from Plex XML response. """ self._data = data self.fastKey = data.attrib.get('fastKey') self.key = data.attrib.get('key') self.thumb = data.attrib.get('thumb') self.title = data.attrib.get('title') self.type = data.attrib.get('type')
def transcribe_word(self, word): ''' The heart of the transcription process. Similar to the system in in cltk.phonology.utils, the algorithm: 1) Applies digraphs and diphthongs to the text of the word. 2) Carries out a naive ("greedy", per @clemsciences) substitution of letters to phonemes, according to the alphabet. 3) Applies the conditions of the rules to the environment of each phoneme in turn. The first rule matched fires. There is no restart and later rules are not tested. Also, if a rule returns multiple phonemes, these are never re-tested by the rule set. ''' phonemes = [] i = 0 while i < len(word): # check for digraphs and dipththongs if i < len(word) - 1 and word[i:i + 2] in self.di: letter_pair = word[i:i + 2] replacement = self.di[letter_pair] replacement = replacement if isinstance(replacement, list) else [replacement] phonemes.extend(replacement) i += 2 else: phonemes.append(self[word[i]]) i += 1 # apply phonological rules. Note: no restart! i = 0 while i < len(phonemes): for rule in self.rules: phonemes = self._position_phonemes(phonemes) if rule.check_environment(phonemes, i): replacement = rule(phonemes, i) replacement = [replacement] if not isinstance(replacement, list) else replacement new_phonemes = [self._find_sound(p) for p in replacement] phonemes[i:i + 1] = new_phonemes i += len(replacement) - 1 break i += 1 return phonemes
The heart of the transcription process. Similar to the system in in cltk.phonology.utils, the algorithm: 1) Applies digraphs and diphthongs to the text of the word. 2) Carries out a naive ("greedy", per @clemsciences) substitution of letters to phonemes, according to the alphabet. 3) Applies the conditions of the rules to the environment of each phoneme in turn. The first rule matched fires. There is no restart and later rules are not tested. Also, if a rule returns multiple phonemes, these are never re-tested by the rule set.
Below is the the instruction that describes the task: ### Input: The heart of the transcription process. Similar to the system in in cltk.phonology.utils, the algorithm: 1) Applies digraphs and diphthongs to the text of the word. 2) Carries out a naive ("greedy", per @clemsciences) substitution of letters to phonemes, according to the alphabet. 3) Applies the conditions of the rules to the environment of each phoneme in turn. The first rule matched fires. There is no restart and later rules are not tested. Also, if a rule returns multiple phonemes, these are never re-tested by the rule set. ### Response: def transcribe_word(self, word): ''' The heart of the transcription process. Similar to the system in in cltk.phonology.utils, the algorithm: 1) Applies digraphs and diphthongs to the text of the word. 2) Carries out a naive ("greedy", per @clemsciences) substitution of letters to phonemes, according to the alphabet. 3) Applies the conditions of the rules to the environment of each phoneme in turn. The first rule matched fires. There is no restart and later rules are not tested. Also, if a rule returns multiple phonemes, these are never re-tested by the rule set. ''' phonemes = [] i = 0 while i < len(word): # check for digraphs and dipththongs if i < len(word) - 1 and word[i:i + 2] in self.di: letter_pair = word[i:i + 2] replacement = self.di[letter_pair] replacement = replacement if isinstance(replacement, list) else [replacement] phonemes.extend(replacement) i += 2 else: phonemes.append(self[word[i]]) i += 1 # apply phonological rules. Note: no restart! i = 0 while i < len(phonemes): for rule in self.rules: phonemes = self._position_phonemes(phonemes) if rule.check_environment(phonemes, i): replacement = rule(phonemes, i) replacement = [replacement] if not isinstance(replacement, list) else replacement new_phonemes = [self._find_sound(p) for p in replacement] phonemes[i:i + 1] = new_phonemes i += len(replacement) - 1 break i += 1 return phonemes
def csv_dump(request, uid): """ Returns a CSV dump of all of the specified metric's counts and cumulative counts. """ metric = Metric.objects.get(uid=uid) frequency = request.GET.get('frequency', settings.STATISTIC_FREQUENCY_DAILY) response = HttpResponse(mimetype='text/csv') response['Content-Disposition'] = 'attachment; filename=%s%s.csv' % (uid, datetime.datetime.now().strftime("%Y%m%d-%H%M")) writer = csv.writer(response) writer.writerow([_('Date/time'), _('Count'), _('Cumulative count')]) for stat in metric.statistics.filter(frequency=frequency).order_by('date_time'): writer.writerow([stat.date_time.strftime(settings.CSV_DATETIME_FORMAT), stat.count, stat.cumulative_count]) return response
Returns a CSV dump of all of the specified metric's counts and cumulative counts.
Below is the the instruction that describes the task: ### Input: Returns a CSV dump of all of the specified metric's counts and cumulative counts. ### Response: def csv_dump(request, uid): """ Returns a CSV dump of all of the specified metric's counts and cumulative counts. """ metric = Metric.objects.get(uid=uid) frequency = request.GET.get('frequency', settings.STATISTIC_FREQUENCY_DAILY) response = HttpResponse(mimetype='text/csv') response['Content-Disposition'] = 'attachment; filename=%s%s.csv' % (uid, datetime.datetime.now().strftime("%Y%m%d-%H%M")) writer = csv.writer(response) writer.writerow([_('Date/time'), _('Count'), _('Cumulative count')]) for stat in metric.statistics.filter(frequency=frequency).order_by('date_time'): writer.writerow([stat.date_time.strftime(settings.CSV_DATETIME_FORMAT), stat.count, stat.cumulative_count]) return response
def deserialize_iso(attr): """Deserialize ISO-8601 formatted string into Datetime object. :param str attr: response string to be deserialized. :rtype: Datetime :raises: DeserializationError if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text try: attr = attr.upper() match = Deserializer.valid_date.match(attr) if not match: raise ValueError("Invalid datetime string: " + attr) check_decimal = attr.split('.') if len(check_decimal) > 1: decimal_str = "" for digit in check_decimal[1]: if digit.isdigit(): decimal_str += digit else: break if len(decimal_str) > 6: attr = attr.replace(decimal_str, decimal_str[0:6]) date_obj = isodate.parse_datetime(attr) test_utc = date_obj.utctimetuple() if test_utc.tm_year > 9999 or test_utc.tm_year < 1: raise OverflowError("Hit max or min date") except(ValueError, OverflowError, AttributeError) as err: msg = "Cannot deserialize datetime object." raise_with_traceback(DeserializationError, msg, err) else: return date_obj
Deserialize ISO-8601 formatted string into Datetime object. :param str attr: response string to be deserialized. :rtype: Datetime :raises: DeserializationError if string format invalid.
Below is the the instruction that describes the task: ### Input: Deserialize ISO-8601 formatted string into Datetime object. :param str attr: response string to be deserialized. :rtype: Datetime :raises: DeserializationError if string format invalid. ### Response: def deserialize_iso(attr): """Deserialize ISO-8601 formatted string into Datetime object. :param str attr: response string to be deserialized. :rtype: Datetime :raises: DeserializationError if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text try: attr = attr.upper() match = Deserializer.valid_date.match(attr) if not match: raise ValueError("Invalid datetime string: " + attr) check_decimal = attr.split('.') if len(check_decimal) > 1: decimal_str = "" for digit in check_decimal[1]: if digit.isdigit(): decimal_str += digit else: break if len(decimal_str) > 6: attr = attr.replace(decimal_str, decimal_str[0:6]) date_obj = isodate.parse_datetime(attr) test_utc = date_obj.utctimetuple() if test_utc.tm_year > 9999 or test_utc.tm_year < 1: raise OverflowError("Hit max or min date") except(ValueError, OverflowError, AttributeError) as err: msg = "Cannot deserialize datetime object." raise_with_traceback(DeserializationError, msg, err) else: return date_obj
def get_search_result(self, ddoc_id, index_name, **query_params): """ Retrieves the raw JSON content from the remote database based on the search index on the server, using the query_params provided as query parameters. A ``query`` parameter containing the Lucene query syntax is mandatory. Example for search queries: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve documents where the Lucene field name is 'name' and # the value is 'julia*' resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', include_docs=True) for row in resp['rows']: # Process search index data (in JSON format). Example if the search query requires grouping by using the ``group_field`` parameter: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve JSON response content, limiting response to 10 documents resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', group_field='name', limit=10) for group in resp['groups']: for row in group['rows']: # Process search index data (in JSON format). :param str ddoc_id: Design document id used to get the search result. :param str index_name: Name used in part to identify the index. :param str bookmark: Optional string that enables you to specify which page of results you require. Only valid for queries that do not specify the ``group_field`` query parameter. :param list counts: Optional JSON array of field names for which counts should be produced. The response will contain counts for each unique value of this field name among the documents matching the search query. Requires the index to have faceting enabled. :param list drilldown: Optional list of fields that each define a pair of a field name and a value. This field can be used several times. The search will only match documents that have the given value in the field name. It differs from using ``query=fieldname:value`` only in that the values are not analyzed. :param str group_field: Optional string field by which to group search matches. Fields containing other data (numbers, objects, arrays) can not be used. :param int group_limit: Optional number with the maximum group count. This field can only be used if ``group_field`` query parameter is specified. :param group_sort: Optional JSON field that defines the order of the groups in a search using ``group_field``. The default sort order is relevance. This field can have the same values as the sort field, so single fields as well as arrays of fields are supported. :param int limit: Optional number to limit the maximum count of the returned documents. In case of a grouped search, this parameter limits the number of documents per group. :param query/q: A Lucene query in the form of ``name:value``. If name is omitted, the special value ``default`` is used. The ``query`` parameter can be abbreviated as ``q``. :param ranges: Optional JSON facet syntax that reuses the standard Lucene syntax to return counts of results which fit into each specified category. Inclusive range queries are denoted by brackets. Exclusive range queries are denoted by curly brackets. For example ``ranges={"price":{"cheap":"[0 TO 100]"}}`` has an inclusive range of 0 to 100. Requires the index to have faceting enabled. :param sort: Optional JSON string of the form ``fieldname<type>`` for ascending or ``-fieldname<type>`` for descending sort order. Fieldname is the name of a string or number field and type is either number or string or a JSON array of such strings. The type part is optional and defaults to number. :param str stale: Optional string to allow the results from a stale index to be used. This makes the request return immediately, even if the index has not been completely built yet. :param list highlight_fields: Optional list of fields which should be highlighted. :param str highlight_pre_tag: Optional string inserted before the highlighted word in the highlights output. Defaults to ``<em>``. :param str highlight_post_tag: Optional string inserted after the highlighted word in the highlights output. Defaults to ``</em>``. :param int highlight_number: Optional number of fragments returned in highlights. If the search term occurs less often than the number of fragments specified, longer fragments are returned. Default is 1. :param int highlight_size: Optional number of characters in each fragment for highlights. Defaults to 100 characters. :param list include_fields: Optional list of field names to include in search results. Any fields included must have been indexed with the ``store:true`` option. :returns: Search query result data in JSON format """ ddoc = DesignDocument(self, ddoc_id) return self._get_search_result( '/'.join((ddoc.document_url, '_search', index_name)), **query_params )
Retrieves the raw JSON content from the remote database based on the search index on the server, using the query_params provided as query parameters. A ``query`` parameter containing the Lucene query syntax is mandatory. Example for search queries: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve documents where the Lucene field name is 'name' and # the value is 'julia*' resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', include_docs=True) for row in resp['rows']: # Process search index data (in JSON format). Example if the search query requires grouping by using the ``group_field`` parameter: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve JSON response content, limiting response to 10 documents resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', group_field='name', limit=10) for group in resp['groups']: for row in group['rows']: # Process search index data (in JSON format). :param str ddoc_id: Design document id used to get the search result. :param str index_name: Name used in part to identify the index. :param str bookmark: Optional string that enables you to specify which page of results you require. Only valid for queries that do not specify the ``group_field`` query parameter. :param list counts: Optional JSON array of field names for which counts should be produced. The response will contain counts for each unique value of this field name among the documents matching the search query. Requires the index to have faceting enabled. :param list drilldown: Optional list of fields that each define a pair of a field name and a value. This field can be used several times. The search will only match documents that have the given value in the field name. It differs from using ``query=fieldname:value`` only in that the values are not analyzed. :param str group_field: Optional string field by which to group search matches. Fields containing other data (numbers, objects, arrays) can not be used. :param int group_limit: Optional number with the maximum group count. This field can only be used if ``group_field`` query parameter is specified. :param group_sort: Optional JSON field that defines the order of the groups in a search using ``group_field``. The default sort order is relevance. This field can have the same values as the sort field, so single fields as well as arrays of fields are supported. :param int limit: Optional number to limit the maximum count of the returned documents. In case of a grouped search, this parameter limits the number of documents per group. :param query/q: A Lucene query in the form of ``name:value``. If name is omitted, the special value ``default`` is used. The ``query`` parameter can be abbreviated as ``q``. :param ranges: Optional JSON facet syntax that reuses the standard Lucene syntax to return counts of results which fit into each specified category. Inclusive range queries are denoted by brackets. Exclusive range queries are denoted by curly brackets. For example ``ranges={"price":{"cheap":"[0 TO 100]"}}`` has an inclusive range of 0 to 100. Requires the index to have faceting enabled. :param sort: Optional JSON string of the form ``fieldname<type>`` for ascending or ``-fieldname<type>`` for descending sort order. Fieldname is the name of a string or number field and type is either number or string or a JSON array of such strings. The type part is optional and defaults to number. :param str stale: Optional string to allow the results from a stale index to be used. This makes the request return immediately, even if the index has not been completely built yet. :param list highlight_fields: Optional list of fields which should be highlighted. :param str highlight_pre_tag: Optional string inserted before the highlighted word in the highlights output. Defaults to ``<em>``. :param str highlight_post_tag: Optional string inserted after the highlighted word in the highlights output. Defaults to ``</em>``. :param int highlight_number: Optional number of fragments returned in highlights. If the search term occurs less often than the number of fragments specified, longer fragments are returned. Default is 1. :param int highlight_size: Optional number of characters in each fragment for highlights. Defaults to 100 characters. :param list include_fields: Optional list of field names to include in search results. Any fields included must have been indexed with the ``store:true`` option. :returns: Search query result data in JSON format
Below is the the instruction that describes the task: ### Input: Retrieves the raw JSON content from the remote database based on the search index on the server, using the query_params provided as query parameters. A ``query`` parameter containing the Lucene query syntax is mandatory. Example for search queries: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve documents where the Lucene field name is 'name' and # the value is 'julia*' resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', include_docs=True) for row in resp['rows']: # Process search index data (in JSON format). Example if the search query requires grouping by using the ``group_field`` parameter: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve JSON response content, limiting response to 10 documents resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', group_field='name', limit=10) for group in resp['groups']: for row in group['rows']: # Process search index data (in JSON format). :param str ddoc_id: Design document id used to get the search result. :param str index_name: Name used in part to identify the index. :param str bookmark: Optional string that enables you to specify which page of results you require. Only valid for queries that do not specify the ``group_field`` query parameter. :param list counts: Optional JSON array of field names for which counts should be produced. The response will contain counts for each unique value of this field name among the documents matching the search query. Requires the index to have faceting enabled. :param list drilldown: Optional list of fields that each define a pair of a field name and a value. This field can be used several times. The search will only match documents that have the given value in the field name. It differs from using ``query=fieldname:value`` only in that the values are not analyzed. :param str group_field: Optional string field by which to group search matches. Fields containing other data (numbers, objects, arrays) can not be used. :param int group_limit: Optional number with the maximum group count. This field can only be used if ``group_field`` query parameter is specified. :param group_sort: Optional JSON field that defines the order of the groups in a search using ``group_field``. The default sort order is relevance. This field can have the same values as the sort field, so single fields as well as arrays of fields are supported. :param int limit: Optional number to limit the maximum count of the returned documents. In case of a grouped search, this parameter limits the number of documents per group. :param query/q: A Lucene query in the form of ``name:value``. If name is omitted, the special value ``default`` is used. The ``query`` parameter can be abbreviated as ``q``. :param ranges: Optional JSON facet syntax that reuses the standard Lucene syntax to return counts of results which fit into each specified category. Inclusive range queries are denoted by brackets. Exclusive range queries are denoted by curly brackets. For example ``ranges={"price":{"cheap":"[0 TO 100]"}}`` has an inclusive range of 0 to 100. Requires the index to have faceting enabled. :param sort: Optional JSON string of the form ``fieldname<type>`` for ascending or ``-fieldname<type>`` for descending sort order. Fieldname is the name of a string or number field and type is either number or string or a JSON array of such strings. The type part is optional and defaults to number. :param str stale: Optional string to allow the results from a stale index to be used. This makes the request return immediately, even if the index has not been completely built yet. :param list highlight_fields: Optional list of fields which should be highlighted. :param str highlight_pre_tag: Optional string inserted before the highlighted word in the highlights output. Defaults to ``<em>``. :param str highlight_post_tag: Optional string inserted after the highlighted word in the highlights output. Defaults to ``</em>``. :param int highlight_number: Optional number of fragments returned in highlights. If the search term occurs less often than the number of fragments specified, longer fragments are returned. Default is 1. :param int highlight_size: Optional number of characters in each fragment for highlights. Defaults to 100 characters. :param list include_fields: Optional list of field names to include in search results. Any fields included must have been indexed with the ``store:true`` option. :returns: Search query result data in JSON format ### Response: def get_search_result(self, ddoc_id, index_name, **query_params): """ Retrieves the raw JSON content from the remote database based on the search index on the server, using the query_params provided as query parameters. A ``query`` parameter containing the Lucene query syntax is mandatory. Example for search queries: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve documents where the Lucene field name is 'name' and # the value is 'julia*' resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', include_docs=True) for row in resp['rows']: # Process search index data (in JSON format). Example if the search query requires grouping by using the ``group_field`` parameter: .. code-block:: python # Assuming that 'searchindex001' exists as part of the # 'ddoc001' design document in the remote database... # Retrieve JSON response content, limiting response to 10 documents resp = db.get_search_result('ddoc001', 'searchindex001', query='name:julia*', group_field='name', limit=10) for group in resp['groups']: for row in group['rows']: # Process search index data (in JSON format). :param str ddoc_id: Design document id used to get the search result. :param str index_name: Name used in part to identify the index. :param str bookmark: Optional string that enables you to specify which page of results you require. Only valid for queries that do not specify the ``group_field`` query parameter. :param list counts: Optional JSON array of field names for which counts should be produced. The response will contain counts for each unique value of this field name among the documents matching the search query. Requires the index to have faceting enabled. :param list drilldown: Optional list of fields that each define a pair of a field name and a value. This field can be used several times. The search will only match documents that have the given value in the field name. It differs from using ``query=fieldname:value`` only in that the values are not analyzed. :param str group_field: Optional string field by which to group search matches. Fields containing other data (numbers, objects, arrays) can not be used. :param int group_limit: Optional number with the maximum group count. This field can only be used if ``group_field`` query parameter is specified. :param group_sort: Optional JSON field that defines the order of the groups in a search using ``group_field``. The default sort order is relevance. This field can have the same values as the sort field, so single fields as well as arrays of fields are supported. :param int limit: Optional number to limit the maximum count of the returned documents. In case of a grouped search, this parameter limits the number of documents per group. :param query/q: A Lucene query in the form of ``name:value``. If name is omitted, the special value ``default`` is used. The ``query`` parameter can be abbreviated as ``q``. :param ranges: Optional JSON facet syntax that reuses the standard Lucene syntax to return counts of results which fit into each specified category. Inclusive range queries are denoted by brackets. Exclusive range queries are denoted by curly brackets. For example ``ranges={"price":{"cheap":"[0 TO 100]"}}`` has an inclusive range of 0 to 100. Requires the index to have faceting enabled. :param sort: Optional JSON string of the form ``fieldname<type>`` for ascending or ``-fieldname<type>`` for descending sort order. Fieldname is the name of a string or number field and type is either number or string or a JSON array of such strings. The type part is optional and defaults to number. :param str stale: Optional string to allow the results from a stale index to be used. This makes the request return immediately, even if the index has not been completely built yet. :param list highlight_fields: Optional list of fields which should be highlighted. :param str highlight_pre_tag: Optional string inserted before the highlighted word in the highlights output. Defaults to ``<em>``. :param str highlight_post_tag: Optional string inserted after the highlighted word in the highlights output. Defaults to ``</em>``. :param int highlight_number: Optional number of fragments returned in highlights. If the search term occurs less often than the number of fragments specified, longer fragments are returned. Default is 1. :param int highlight_size: Optional number of characters in each fragment for highlights. Defaults to 100 characters. :param list include_fields: Optional list of field names to include in search results. Any fields included must have been indexed with the ``store:true`` option. :returns: Search query result data in JSON format """ ddoc = DesignDocument(self, ddoc_id) return self._get_search_result( '/'.join((ddoc.document_url, '_search', index_name)), **query_params )
def _select_date_range(self, lines): """Identify lines containing headers within the range begin_date to end_date. Parameters ----- lines: list list of lines from the IGRA2 data file. """ headers = [] num_lev = [] dates = [] # Get indices of headers, and make a list of dates and num_lev for idx, line in enumerate(lines): if line[0] == '#': year, month, day, hour = map(int, line[13:26].split()) # All soundings have YMD, most have hour try: date = datetime.datetime(year, month, day, hour) except ValueError: date = datetime.datetime(year, month, day) # Check date if self.begin_date <= date <= self.end_date: headers.append(idx) num_lev.append(int(line[32:36])) dates.append(date) if date > self.end_date: break if len(dates) == 0: # Break if no matched dates. # Could improve this later by showing the date range for the station. raise ValueError('No dates match selection.') # Compress body of data into a string begin_idx = min(headers) end_idx = max(headers) + num_lev[-1] # Make a boolean vector that selects only list indices within the time range selector = np.zeros(len(lines), dtype=bool) selector[begin_idx:end_idx + 1] = True selector[headers] = False body = ''.join([line for line in itertools.compress(lines, selector)]) selector[begin_idx:end_idx + 1] = ~selector[begin_idx:end_idx + 1] header = ''.join([line for line in itertools.compress(lines, selector)]) # expand date vector to match length of the body dataframe. dates_long = np.repeat(dates, num_lev) return body, header, dates_long, dates
Identify lines containing headers within the range begin_date to end_date. Parameters ----- lines: list list of lines from the IGRA2 data file.
Below is the the instruction that describes the task: ### Input: Identify lines containing headers within the range begin_date to end_date. Parameters ----- lines: list list of lines from the IGRA2 data file. ### Response: def _select_date_range(self, lines): """Identify lines containing headers within the range begin_date to end_date. Parameters ----- lines: list list of lines from the IGRA2 data file. """ headers = [] num_lev = [] dates = [] # Get indices of headers, and make a list of dates and num_lev for idx, line in enumerate(lines): if line[0] == '#': year, month, day, hour = map(int, line[13:26].split()) # All soundings have YMD, most have hour try: date = datetime.datetime(year, month, day, hour) except ValueError: date = datetime.datetime(year, month, day) # Check date if self.begin_date <= date <= self.end_date: headers.append(idx) num_lev.append(int(line[32:36])) dates.append(date) if date > self.end_date: break if len(dates) == 0: # Break if no matched dates. # Could improve this later by showing the date range for the station. raise ValueError('No dates match selection.') # Compress body of data into a string begin_idx = min(headers) end_idx = max(headers) + num_lev[-1] # Make a boolean vector that selects only list indices within the time range selector = np.zeros(len(lines), dtype=bool) selector[begin_idx:end_idx + 1] = True selector[headers] = False body = ''.join([line for line in itertools.compress(lines, selector)]) selector[begin_idx:end_idx + 1] = ~selector[begin_idx:end_idx + 1] header = ''.join([line for line in itertools.compress(lines, selector)]) # expand date vector to match length of the body dataframe. dates_long = np.repeat(dates, num_lev) return body, header, dates_long, dates
def JsonResponseModel(self): """In this context, return raw JSON instead of proto.""" old_model = self.response_type_model self.__response_type_model = 'json' yield self.__response_type_model = old_model
In this context, return raw JSON instead of proto.
Below is the the instruction that describes the task: ### Input: In this context, return raw JSON instead of proto. ### Response: def JsonResponseModel(self): """In this context, return raw JSON instead of proto.""" old_model = self.response_type_model self.__response_type_model = 'json' yield self.__response_type_model = old_model
def initialize(self, request, response): """Initialize. 1. call webapp init. 2. check request is indeed from taskqueue. 3. check the task has not been retried too many times. 4. run handler specific processing logic. 5. run error handling logic if precessing failed. Args: request: a webapp.Request instance. response: a webapp.Response instance. """ super(TaskQueueHandler, self).initialize(request, response) # Check request is from taskqueue. if "X-AppEngine-QueueName" not in self.request.headers: logging.error(self.request.headers) logging.error("Task queue handler received non-task queue request") self.response.set_status( 403, message="Task queue handler received non-task queue request") return # Check task has not been retried too many times. if self.task_retry_count() + 1 > parameters.config.TASK_MAX_ATTEMPTS: logging.error( "Task %s has been attempted %s times. Dropping it permanently.", self.request.headers["X-AppEngine-TaskName"], self.task_retry_count() + 1) self._drop_gracefully() return try: self._preprocess() self._preprocess_success = True # pylint: disable=bare-except except: self._preprocess_success = False logging.error( "Preprocess task %s failed. Dropping it permanently.", self.request.headers["X-AppEngine-TaskName"]) self._drop_gracefully()
Initialize. 1. call webapp init. 2. check request is indeed from taskqueue. 3. check the task has not been retried too many times. 4. run handler specific processing logic. 5. run error handling logic if precessing failed. Args: request: a webapp.Request instance. response: a webapp.Response instance.
Below is the the instruction that describes the task: ### Input: Initialize. 1. call webapp init. 2. check request is indeed from taskqueue. 3. check the task has not been retried too many times. 4. run handler specific processing logic. 5. run error handling logic if precessing failed. Args: request: a webapp.Request instance. response: a webapp.Response instance. ### Response: def initialize(self, request, response): """Initialize. 1. call webapp init. 2. check request is indeed from taskqueue. 3. check the task has not been retried too many times. 4. run handler specific processing logic. 5. run error handling logic if precessing failed. Args: request: a webapp.Request instance. response: a webapp.Response instance. """ super(TaskQueueHandler, self).initialize(request, response) # Check request is from taskqueue. if "X-AppEngine-QueueName" not in self.request.headers: logging.error(self.request.headers) logging.error("Task queue handler received non-task queue request") self.response.set_status( 403, message="Task queue handler received non-task queue request") return # Check task has not been retried too many times. if self.task_retry_count() + 1 > parameters.config.TASK_MAX_ATTEMPTS: logging.error( "Task %s has been attempted %s times. Dropping it permanently.", self.request.headers["X-AppEngine-TaskName"], self.task_retry_count() + 1) self._drop_gracefully() return try: self._preprocess() self._preprocess_success = True # pylint: disable=bare-except except: self._preprocess_success = False logging.error( "Preprocess task %s failed. Dropping it permanently.", self.request.headers["X-AppEngine-TaskName"]) self._drop_gracefully()
def _treat_star_format(self,arg_format_list,args): """ Deal with "*" format if specified. """ num_stars = len([a for a in arg_format_list if a == "*"]) if num_stars > 0: # Make sure the repeated format argument only occurs once, is last, # and that there is at least one format in addition to it. if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1: # Trim * from end arg_format_list = arg_format_list[:-1] # If we need extra arguments... if len(arg_format_list) < len(args): f = arg_format_list[-1] len_diff = len(args) - len(arg_format_list) tmp = list(arg_format_list) tmp.extend([f for i in range(len_diff)]) arg_format_list = "".join(tmp) else: err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format." raise ValueError(err) return arg_format_list
Deal with "*" format if specified.
Below is the the instruction that describes the task: ### Input: Deal with "*" format if specified. ### Response: def _treat_star_format(self,arg_format_list,args): """ Deal with "*" format if specified. """ num_stars = len([a for a in arg_format_list if a == "*"]) if num_stars > 0: # Make sure the repeated format argument only occurs once, is last, # and that there is at least one format in addition to it. if num_stars == 1 and arg_format_list[-1] == "*" and len(arg_format_list) > 1: # Trim * from end arg_format_list = arg_format_list[:-1] # If we need extra arguments... if len(arg_format_list) < len(args): f = arg_format_list[-1] len_diff = len(args) - len(arg_format_list) tmp = list(arg_format_list) tmp.extend([f for i in range(len_diff)]) arg_format_list = "".join(tmp) else: err = "'*' format must occur only once, be at end of string, and be preceded by at least one other format." raise ValueError(err) return arg_format_list
def gradient(self): r"""Gradient of the KL functional. The gradient of `KullbackLeibler` with ``prior`` :math:`g` is given as .. math:: \nabla F(x) = 1 - \frac{g}{x}. The gradient is not defined in points where one or more components are non-positive. """ functional = self class KLGradient(Operator): """The gradient operator of this functional.""" def __init__(self): """Initialize a new instance.""" super(KLGradient, self).__init__( functional.domain, functional.domain, linear=False) def _call(self, x): """Apply the gradient operator to the given point. The gradient is not defined in points where one or more components are non-positive. """ if functional.prior is None: return (-1.0) / x + 1 else: return (-functional.prior) / x + 1 return KLGradient()
r"""Gradient of the KL functional. The gradient of `KullbackLeibler` with ``prior`` :math:`g` is given as .. math:: \nabla F(x) = 1 - \frac{g}{x}. The gradient is not defined in points where one or more components are non-positive.
Below is the the instruction that describes the task: ### Input: r"""Gradient of the KL functional. The gradient of `KullbackLeibler` with ``prior`` :math:`g` is given as .. math:: \nabla F(x) = 1 - \frac{g}{x}. The gradient is not defined in points where one or more components are non-positive. ### Response: def gradient(self): r"""Gradient of the KL functional. The gradient of `KullbackLeibler` with ``prior`` :math:`g` is given as .. math:: \nabla F(x) = 1 - \frac{g}{x}. The gradient is not defined in points where one or more components are non-positive. """ functional = self class KLGradient(Operator): """The gradient operator of this functional.""" def __init__(self): """Initialize a new instance.""" super(KLGradient, self).__init__( functional.domain, functional.domain, linear=False) def _call(self, x): """Apply the gradient operator to the given point. The gradient is not defined in points where one or more components are non-positive. """ if functional.prior is None: return (-1.0) / x + 1 else: return (-functional.prior) / x + 1 return KLGradient()
def createFieldDescription(self): """ Provides a field description dict for swarm description. :return: (dict) """ return { "fieldName": self.getName(), "fieldType": self._dataType, "minValue": self._min, "maxValue": self._max }
Provides a field description dict for swarm description. :return: (dict)
Below is the the instruction that describes the task: ### Input: Provides a field description dict for swarm description. :return: (dict) ### Response: def createFieldDescription(self): """ Provides a field description dict for swarm description. :return: (dict) """ return { "fieldName": self.getName(), "fieldType": self._dataType, "minValue": self._min, "maxValue": self._max }
def start_push_sync(self): """ Starts the detection of unsynced Git data. """ self.active_thread = True self.active_push = True self.thread_push_instance = Thread(target=self.thread_push) self.thread_push_instance.daemon = True self.thread_push_instance.start()
Starts the detection of unsynced Git data.
Below is the the instruction that describes the task: ### Input: Starts the detection of unsynced Git data. ### Response: def start_push_sync(self): """ Starts the detection of unsynced Git data. """ self.active_thread = True self.active_push = True self.thread_push_instance = Thread(target=self.thread_push) self.thread_push_instance.daemon = True self.thread_push_instance.start()
def path(self, which=None): """Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: available_repositories /repository_sets/<id>/available_repositories enable /repository_sets/<id>/enable disable /repository_sets/<id>/disable ``super`` is called otherwise. """ if which in ( 'available_repositories', 'enable', 'disable', ): return '{0}/{1}'.format( super(RepositorySet, self).path(which='self'), which ) return super(RepositorySet, self).path(which)
Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: available_repositories /repository_sets/<id>/available_repositories enable /repository_sets/<id>/enable disable /repository_sets/<id>/disable ``super`` is called otherwise.
Below is the the instruction that describes the task: ### Input: Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: available_repositories /repository_sets/<id>/available_repositories enable /repository_sets/<id>/enable disable /repository_sets/<id>/disable ``super`` is called otherwise. ### Response: def path(self, which=None): """Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: available_repositories /repository_sets/<id>/available_repositories enable /repository_sets/<id>/enable disable /repository_sets/<id>/disable ``super`` is called otherwise. """ if which in ( 'available_repositories', 'enable', 'disable', ): return '{0}/{1}'.format( super(RepositorySet, self).path(which='self'), which ) return super(RepositorySet, self).path(which)
def cov(self, ddof=None, bias=0): '''The covariance matrix from the aggregate sample. It accepts an optional parameter for the degree of freedoms. :parameter ddof: If not ``None`` normalization is by (N - ddof), where N is the number of observations; this overrides the value implied by bias. The default value is None. ''' N = self.n M = N if bias else N-1 M = M if ddof is None else N-ddof return (self.sxx - outer(self.sx,self.sx)/N)/M
The covariance matrix from the aggregate sample. It accepts an optional parameter for the degree of freedoms. :parameter ddof: If not ``None`` normalization is by (N - ddof), where N is the number of observations; this overrides the value implied by bias. The default value is None.
Below is the the instruction that describes the task: ### Input: The covariance matrix from the aggregate sample. It accepts an optional parameter for the degree of freedoms. :parameter ddof: If not ``None`` normalization is by (N - ddof), where N is the number of observations; this overrides the value implied by bias. The default value is None. ### Response: def cov(self, ddof=None, bias=0): '''The covariance matrix from the aggregate sample. It accepts an optional parameter for the degree of freedoms. :parameter ddof: If not ``None`` normalization is by (N - ddof), where N is the number of observations; this overrides the value implied by bias. The default value is None. ''' N = self.n M = N if bias else N-1 M = M if ddof is None else N-ddof return (self.sxx - outer(self.sx,self.sx)/N)/M
def setOverlayTransformTrackedDeviceComponent(self, ulOverlayHandle, unDeviceIndex, pchComponentName): """ Sets the transform to draw the overlay on a rendermodel component mesh instead of a quad. This will only draw when the system is drawing the device. Overlays with this transform type cannot receive mouse events. """ fn = self.function_table.setOverlayTransformTrackedDeviceComponent result = fn(ulOverlayHandle, unDeviceIndex, pchComponentName) return result
Sets the transform to draw the overlay on a rendermodel component mesh instead of a quad. This will only draw when the system is drawing the device. Overlays with this transform type cannot receive mouse events.
Below is the the instruction that describes the task: ### Input: Sets the transform to draw the overlay on a rendermodel component mesh instead of a quad. This will only draw when the system is drawing the device. Overlays with this transform type cannot receive mouse events. ### Response: def setOverlayTransformTrackedDeviceComponent(self, ulOverlayHandle, unDeviceIndex, pchComponentName): """ Sets the transform to draw the overlay on a rendermodel component mesh instead of a quad. This will only draw when the system is drawing the device. Overlays with this transform type cannot receive mouse events. """ fn = self.function_table.setOverlayTransformTrackedDeviceComponent result = fn(ulOverlayHandle, unDeviceIndex, pchComponentName) return result
def new_from_bundle_config(self, config): """ Create a new bundle, or link to an existing one, based on the identity in config data. :param config: A Dict form of a bundle.yaml file :return: """ identity = Identity.from_dict(config['identity']) ds = self._db.dataset(identity.vid, exception=False) if not ds: ds = self._db.new_dataset(**identity.dict) b = Bundle(ds, self) b.commit() b.state = Bundle.STATES.NEW b.set_last_access(Bundle.STATES.NEW) # b.set_file_system(source_url=self._fs.source(ds.name), # build_url=self._fs.build(ds.name)) return b
Create a new bundle, or link to an existing one, based on the identity in config data. :param config: A Dict form of a bundle.yaml file :return:
Below is the the instruction that describes the task: ### Input: Create a new bundle, or link to an existing one, based on the identity in config data. :param config: A Dict form of a bundle.yaml file :return: ### Response: def new_from_bundle_config(self, config): """ Create a new bundle, or link to an existing one, based on the identity in config data. :param config: A Dict form of a bundle.yaml file :return: """ identity = Identity.from_dict(config['identity']) ds = self._db.dataset(identity.vid, exception=False) if not ds: ds = self._db.new_dataset(**identity.dict) b = Bundle(ds, self) b.commit() b.state = Bundle.STATES.NEW b.set_last_access(Bundle.STATES.NEW) # b.set_file_system(source_url=self._fs.source(ds.name), # build_url=self._fs.build(ds.name)) return b
def enable_category_lists_editor(self, request, editor_init_kwargs=None, additional_parents_aliases=None, lists_init_kwargs=None, handler_init_kwargs=None): """Enables editor functionality for categories of this object. :param Request request: Django request object :param dict editor_init_kwargs: Keyword args to initialize category lists editor with. See CategoryList.enable_editor() :param list additional_parents_aliases: Aliases of categories for editor to render even if this object has no tie to them. :param dict lists_init_kwargs: Keyword args to initialize CategoryList objects with :param dict handler_init_kwargs: Keyword args to initialize CategoryRequestHandler object with :return: """ from .toolbox import CategoryRequestHandler additional_parents_aliases = additional_parents_aliases or [] lists_init_kwargs = lists_init_kwargs or {} editor_init_kwargs = editor_init_kwargs or {} handler_init_kwargs = handler_init_kwargs or {} handler = CategoryRequestHandler(request, self, **handler_init_kwargs) lists = self.get_category_lists( init_kwargs=lists_init_kwargs, additional_parents_aliases=additional_parents_aliases) handler.register_lists(lists, lists_init_kwargs=lists_init_kwargs, editor_init_kwargs=editor_init_kwargs) self._category_editor = handler # Set link to handler to mutate get_category_lists() behaviour. return handler.listen()
Enables editor functionality for categories of this object. :param Request request: Django request object :param dict editor_init_kwargs: Keyword args to initialize category lists editor with. See CategoryList.enable_editor() :param list additional_parents_aliases: Aliases of categories for editor to render even if this object has no tie to them. :param dict lists_init_kwargs: Keyword args to initialize CategoryList objects with :param dict handler_init_kwargs: Keyword args to initialize CategoryRequestHandler object with :return:
Below is the the instruction that describes the task: ### Input: Enables editor functionality for categories of this object. :param Request request: Django request object :param dict editor_init_kwargs: Keyword args to initialize category lists editor with. See CategoryList.enable_editor() :param list additional_parents_aliases: Aliases of categories for editor to render even if this object has no tie to them. :param dict lists_init_kwargs: Keyword args to initialize CategoryList objects with :param dict handler_init_kwargs: Keyword args to initialize CategoryRequestHandler object with :return: ### Response: def enable_category_lists_editor(self, request, editor_init_kwargs=None, additional_parents_aliases=None, lists_init_kwargs=None, handler_init_kwargs=None): """Enables editor functionality for categories of this object. :param Request request: Django request object :param dict editor_init_kwargs: Keyword args to initialize category lists editor with. See CategoryList.enable_editor() :param list additional_parents_aliases: Aliases of categories for editor to render even if this object has no tie to them. :param dict lists_init_kwargs: Keyword args to initialize CategoryList objects with :param dict handler_init_kwargs: Keyword args to initialize CategoryRequestHandler object with :return: """ from .toolbox import CategoryRequestHandler additional_parents_aliases = additional_parents_aliases or [] lists_init_kwargs = lists_init_kwargs or {} editor_init_kwargs = editor_init_kwargs or {} handler_init_kwargs = handler_init_kwargs or {} handler = CategoryRequestHandler(request, self, **handler_init_kwargs) lists = self.get_category_lists( init_kwargs=lists_init_kwargs, additional_parents_aliases=additional_parents_aliases) handler.register_lists(lists, lists_init_kwargs=lists_init_kwargs, editor_init_kwargs=editor_init_kwargs) self._category_editor = handler # Set link to handler to mutate get_category_lists() behaviour. return handler.listen()
def has_port_by_ref(self, port_ref): '''Check if this component has a port by the given reference to a CORBA PortService object. ''' with self._mutex: if self.get_port_by_ref(self, port_ref): return True return False
Check if this component has a port by the given reference to a CORBA PortService object.
Below is the the instruction that describes the task: ### Input: Check if this component has a port by the given reference to a CORBA PortService object. ### Response: def has_port_by_ref(self, port_ref): '''Check if this component has a port by the given reference to a CORBA PortService object. ''' with self._mutex: if self.get_port_by_ref(self, port_ref): return True return False
def cublasDtpsv(handle, uplo, trans, diag, n, AP, x, incx): """ Solve real triangular-packed system with one right-hand side. """ status = _libcublas.cublasDtpsv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
Solve real triangular-packed system with one right-hand side.
Below is the the instruction that describes the task: ### Input: Solve real triangular-packed system with one right-hand side. ### Response: def cublasDtpsv(handle, uplo, trans, diag, n, AP, x, incx): """ Solve real triangular-packed system with one right-hand side. """ status = _libcublas.cublasDtpsv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
def create_ctm_miohandler(fileobj, title=None, comment=None, register_prefixes=True, register_templates=True, detect_prefixes=False): """\ """ handler = CTMHandler(fileobj) handler.title = title handler.comment = comment handler.detect_prefixes = detect_prefixes if register_prefixes: register_default_prefixes(handler) if register_templates: handler.add_association_template(u'classified', psis.CLASSIFIED_AS_TYPE, psis.CABLE_TYPE, psis.CLASSIFICATION_TYPE) handler.add_association_template(u'origin', psis.SENT_BY_TYPE, psis.CABLE_TYPE, psis.SENDER_TYPE) handler.add_association_template(u'references', psis.REFERENCES_TYPE, psis.SOURCE_TYPE, psis.TARGET_TYPE) handler.add_association_template(u'to', psis.RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE) handler.add_association_template(u'to', psis.RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE, psis.ROUTE_TYPE) handler.add_association_template(u'to', psis.RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE, psis.ROUTE_TYPE, psis.PRECEDENCE_TYPE) handler.add_association_template(u'to', psis.RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE, psis.ROUTE_TYPE, psis.PRECEDENCE_TYPE, psis.MCN_TYPE) handler.add_association_template(u'info', psis.INFO_RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE) handler.add_association_template(u'info', psis.INFO_RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE, psis.ROUTE_TYPE) handler.add_association_template(u'info', psis.INFO_RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE, psis.ROUTE_TYPE, psis.PRECEDENCE_TYPE) handler.add_association_template(u'info', psis.INFO_RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE, psis.ROUTE_TYPE, psis.PRECEDENCE_TYPE, psis.MCN_TYPE) handler.add_association_template(u'tagged', psis.TAGGED_TYPE, psis.CABLE_TYPE, psis.TAG_TYPE) handler.add_association_template(u'is-partial', psis.IS_PARTIAL_TYPE, psis.PARTIAL_TYPE) handler.add_association_template(u'signed-by', psis.SIGNED_TYPE, psis.CABLE_TYPE, psis.SIGNER_TYPE) return handler
\
Below is the the instruction that describes the task: ### Input: \ ### Response: def create_ctm_miohandler(fileobj, title=None, comment=None, register_prefixes=True, register_templates=True, detect_prefixes=False): """\ """ handler = CTMHandler(fileobj) handler.title = title handler.comment = comment handler.detect_prefixes = detect_prefixes if register_prefixes: register_default_prefixes(handler) if register_templates: handler.add_association_template(u'classified', psis.CLASSIFIED_AS_TYPE, psis.CABLE_TYPE, psis.CLASSIFICATION_TYPE) handler.add_association_template(u'origin', psis.SENT_BY_TYPE, psis.CABLE_TYPE, psis.SENDER_TYPE) handler.add_association_template(u'references', psis.REFERENCES_TYPE, psis.SOURCE_TYPE, psis.TARGET_TYPE) handler.add_association_template(u'to', psis.RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE) handler.add_association_template(u'to', psis.RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE, psis.ROUTE_TYPE) handler.add_association_template(u'to', psis.RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE, psis.ROUTE_TYPE, psis.PRECEDENCE_TYPE) handler.add_association_template(u'to', psis.RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE, psis.ROUTE_TYPE, psis.PRECEDENCE_TYPE, psis.MCN_TYPE) handler.add_association_template(u'info', psis.INFO_RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE) handler.add_association_template(u'info', psis.INFO_RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE, psis.ROUTE_TYPE) handler.add_association_template(u'info', psis.INFO_RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE, psis.ROUTE_TYPE, psis.PRECEDENCE_TYPE) handler.add_association_template(u'info', psis.INFO_RECIPIENT_TYPE, psis.CABLE_TYPE, psis.RECIPIENT_TYPE, psis.ROUTE_TYPE, psis.PRECEDENCE_TYPE, psis.MCN_TYPE) handler.add_association_template(u'tagged', psis.TAGGED_TYPE, psis.CABLE_TYPE, psis.TAG_TYPE) handler.add_association_template(u'is-partial', psis.IS_PARTIAL_TYPE, psis.PARTIAL_TYPE) handler.add_association_template(u'signed-by', psis.SIGNED_TYPE, psis.CABLE_TYPE, psis.SIGNER_TYPE) return handler
def describe_field(field_definition): """Build descriptor for Field instance. Args: field_definition: Field instance to provide descriptor for. Returns: Initialized FieldDescriptor instance describing the Field instance. """ field_descriptor = FieldDescriptor() field_descriptor.name = field_definition.name field_descriptor.number = field_definition.number field_descriptor.variant = field_definition.variant if isinstance(field_definition, messages.EnumField): field_descriptor.type_name = field_definition.type.definition_name() if isinstance(field_definition, messages.MessageField): field_descriptor.type_name = ( field_definition.message_type.definition_name()) if field_definition.default is not None: field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[ type(field_definition)](field_definition.default) # Set label. if field_definition.repeated: field_descriptor.label = FieldDescriptor.Label.REPEATED elif field_definition.required: field_descriptor.label = FieldDescriptor.Label.REQUIRED else: field_descriptor.label = FieldDescriptor.Label.OPTIONAL return field_descriptor
Build descriptor for Field instance. Args: field_definition: Field instance to provide descriptor for. Returns: Initialized FieldDescriptor instance describing the Field instance.
Below is the the instruction that describes the task: ### Input: Build descriptor for Field instance. Args: field_definition: Field instance to provide descriptor for. Returns: Initialized FieldDescriptor instance describing the Field instance. ### Response: def describe_field(field_definition): """Build descriptor for Field instance. Args: field_definition: Field instance to provide descriptor for. Returns: Initialized FieldDescriptor instance describing the Field instance. """ field_descriptor = FieldDescriptor() field_descriptor.name = field_definition.name field_descriptor.number = field_definition.number field_descriptor.variant = field_definition.variant if isinstance(field_definition, messages.EnumField): field_descriptor.type_name = field_definition.type.definition_name() if isinstance(field_definition, messages.MessageField): field_descriptor.type_name = ( field_definition.message_type.definition_name()) if field_definition.default is not None: field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[ type(field_definition)](field_definition.default) # Set label. if field_definition.repeated: field_descriptor.label = FieldDescriptor.Label.REPEATED elif field_definition.required: field_descriptor.label = FieldDescriptor.Label.REQUIRED else: field_descriptor.label = FieldDescriptor.Label.OPTIONAL return field_descriptor
def _interpret_contents(contentstream, initial_shorthand=UNIT_SQUARE): """Interpret the PDF content stream. The stack represents the state of the PDF graphics stack. We are only interested in the current transformation matrix (CTM) so we only track this object; a full implementation would need to track many other items. The CTM is initialized to the mapping from user space to device space. PDF units are 1/72". In a PDF viewer or printer this matrix is initialized to the transformation to device space. For example if set to (1/72, 0, 0, 1/72, 0, 0) then all units would be calculated in inches. Images are always considered to be (0, 0) -> (1, 1). Before drawing an image there should be a 'cm' that sets up an image coordinate system where drawing from (0, 0) -> (1, 1) will draw on the desired area of the page. PDF units suit our needs so we initialize ctm to the identity matrix. According to the PDF specification, the maximum stack depth is 32. Other viewers tolerate some amount beyond this. We issue a warning if the stack depth exceeds the spec limit and set a hard limit beyond this to bound our memory requirements. If the stack underflows behavior is undefined in the spec, but we just pretend nothing happened and leave the CTM unchanged. """ stack = [] ctm = PdfMatrix(initial_shorthand) xobject_settings = [] inline_images = [] found_vector = False vector_ops = set('S s f F f* B B* b b*'.split()) image_ops = set('BI ID EI q Q Do cm'.split()) operator_whitelist = ' '.join(vector_ops | image_ops) for n, graphobj in enumerate( _normalize_stack( pikepdf.parse_content_stream(contentstream, operator_whitelist) ) ): operands, operator = graphobj if operator == 'q': stack.append(ctm) if len(stack) > 32: # See docstring if len(stack) > 128: raise RuntimeError( "PDF graphics stack overflowed hard limit, operator %i" % n ) warn("PDF graphics stack overflowed spec limit") elif operator == 'Q': try: ctm = stack.pop() except IndexError: # Keeping the ctm the same seems to be the only sensible thing # to do. Just pretend nothing happened, keep calm and carry on. warn("PDF graphics stack underflowed - PDF may be malformed") elif operator == 'cm': ctm = PdfMatrix(operands) @ ctm elif operator == 'Do': image_name = operands[0] settings = XobjectSettings( name=image_name, shorthand=ctm.shorthand, stack_depth=len(stack) ) xobject_settings.append(settings) elif operator == 'INLINE IMAGE': # BI/ID/EI are grouped into this iimage = operands[0] inline = InlineSettings( iimage=iimage, shorthand=ctm.shorthand, stack_depth=len(stack) ) inline_images.append(inline) elif operator in vector_ops: found_vector = True return ContentsInfo( xobject_settings=xobject_settings, inline_images=inline_images, found_vector=found_vector, )
Interpret the PDF content stream. The stack represents the state of the PDF graphics stack. We are only interested in the current transformation matrix (CTM) so we only track this object; a full implementation would need to track many other items. The CTM is initialized to the mapping from user space to device space. PDF units are 1/72". In a PDF viewer or printer this matrix is initialized to the transformation to device space. For example if set to (1/72, 0, 0, 1/72, 0, 0) then all units would be calculated in inches. Images are always considered to be (0, 0) -> (1, 1). Before drawing an image there should be a 'cm' that sets up an image coordinate system where drawing from (0, 0) -> (1, 1) will draw on the desired area of the page. PDF units suit our needs so we initialize ctm to the identity matrix. According to the PDF specification, the maximum stack depth is 32. Other viewers tolerate some amount beyond this. We issue a warning if the stack depth exceeds the spec limit and set a hard limit beyond this to bound our memory requirements. If the stack underflows behavior is undefined in the spec, but we just pretend nothing happened and leave the CTM unchanged.
Below is the the instruction that describes the task: ### Input: Interpret the PDF content stream. The stack represents the state of the PDF graphics stack. We are only interested in the current transformation matrix (CTM) so we only track this object; a full implementation would need to track many other items. The CTM is initialized to the mapping from user space to device space. PDF units are 1/72". In a PDF viewer or printer this matrix is initialized to the transformation to device space. For example if set to (1/72, 0, 0, 1/72, 0, 0) then all units would be calculated in inches. Images are always considered to be (0, 0) -> (1, 1). Before drawing an image there should be a 'cm' that sets up an image coordinate system where drawing from (0, 0) -> (1, 1) will draw on the desired area of the page. PDF units suit our needs so we initialize ctm to the identity matrix. According to the PDF specification, the maximum stack depth is 32. Other viewers tolerate some amount beyond this. We issue a warning if the stack depth exceeds the spec limit and set a hard limit beyond this to bound our memory requirements. If the stack underflows behavior is undefined in the spec, but we just pretend nothing happened and leave the CTM unchanged. ### Response: def _interpret_contents(contentstream, initial_shorthand=UNIT_SQUARE): """Interpret the PDF content stream. The stack represents the state of the PDF graphics stack. We are only interested in the current transformation matrix (CTM) so we only track this object; a full implementation would need to track many other items. The CTM is initialized to the mapping from user space to device space. PDF units are 1/72". In a PDF viewer or printer this matrix is initialized to the transformation to device space. For example if set to (1/72, 0, 0, 1/72, 0, 0) then all units would be calculated in inches. Images are always considered to be (0, 0) -> (1, 1). Before drawing an image there should be a 'cm' that sets up an image coordinate system where drawing from (0, 0) -> (1, 1) will draw on the desired area of the page. PDF units suit our needs so we initialize ctm to the identity matrix. According to the PDF specification, the maximum stack depth is 32. Other viewers tolerate some amount beyond this. We issue a warning if the stack depth exceeds the spec limit and set a hard limit beyond this to bound our memory requirements. If the stack underflows behavior is undefined in the spec, but we just pretend nothing happened and leave the CTM unchanged. """ stack = [] ctm = PdfMatrix(initial_shorthand) xobject_settings = [] inline_images = [] found_vector = False vector_ops = set('S s f F f* B B* b b*'.split()) image_ops = set('BI ID EI q Q Do cm'.split()) operator_whitelist = ' '.join(vector_ops | image_ops) for n, graphobj in enumerate( _normalize_stack( pikepdf.parse_content_stream(contentstream, operator_whitelist) ) ): operands, operator = graphobj if operator == 'q': stack.append(ctm) if len(stack) > 32: # See docstring if len(stack) > 128: raise RuntimeError( "PDF graphics stack overflowed hard limit, operator %i" % n ) warn("PDF graphics stack overflowed spec limit") elif operator == 'Q': try: ctm = stack.pop() except IndexError: # Keeping the ctm the same seems to be the only sensible thing # to do. Just pretend nothing happened, keep calm and carry on. warn("PDF graphics stack underflowed - PDF may be malformed") elif operator == 'cm': ctm = PdfMatrix(operands) @ ctm elif operator == 'Do': image_name = operands[0] settings = XobjectSettings( name=image_name, shorthand=ctm.shorthand, stack_depth=len(stack) ) xobject_settings.append(settings) elif operator == 'INLINE IMAGE': # BI/ID/EI are grouped into this iimage = operands[0] inline = InlineSettings( iimage=iimage, shorthand=ctm.shorthand, stack_depth=len(stack) ) inline_images.append(inline) elif operator in vector_ops: found_vector = True return ContentsInfo( xobject_settings=xobject_settings, inline_images=inline_images, found_vector=found_vector, )
def gate(self, gate, apply_now=True): ''' Apply given gate and return new gated sample (with assigned data). Parameters ---------- gate : {_gate_available_classes} Returns ------- FCMeasurement Sample with data that passes gates ''' data = self.get_data() newdata = gate(data) newsample = self.copy() newsample.data = newdata return newsample
Apply given gate and return new gated sample (with assigned data). Parameters ---------- gate : {_gate_available_classes} Returns ------- FCMeasurement Sample with data that passes gates
Below is the the instruction that describes the task: ### Input: Apply given gate and return new gated sample (with assigned data). Parameters ---------- gate : {_gate_available_classes} Returns ------- FCMeasurement Sample with data that passes gates ### Response: def gate(self, gate, apply_now=True): ''' Apply given gate and return new gated sample (with assigned data). Parameters ---------- gate : {_gate_available_classes} Returns ------- FCMeasurement Sample with data that passes gates ''' data = self.get_data() newdata = gate(data) newsample = self.copy() newsample.data = newdata return newsample
def set_settings(instance=None): """Pick correct settings instance and set it to a global variable.""" global settings settings = None if instance: settings = import_settings(instance) elif "INSTANCE_FOR_DYNACONF" in os.environ: settings = import_settings(os.environ["INSTANCE_FOR_DYNACONF"]) elif "FLASK_APP" in os.environ: # pragma: no cover with suppress(ImportError, click.UsageError): from flask.cli import ScriptInfo flask_app = ScriptInfo().load_app() settings = flask_app.config click.echo( click.style( "Flask app detected", fg="white", bg="bright_black" ) ) elif "DJANGO_SETTINGS_MODULE" in os.environ: # pragma: no cover sys.path.insert(0, os.path.abspath(os.getcwd())) try: # Django extension v2 from django.conf import settings settings.DYNACONF.configure() except (ImportError, AttributeError): # Backwards compatible with old django extension (pre 2.0.0) import dynaconf.contrib.django_dynaconf # noqa from django.conf import settings as django_settings django_settings.configure() settings = django_settings if settings is not None: click.echo( click.style( "Django app detected", fg="white", bg="bright_black" ) ) if settings is None: settings = LazySettings()
Pick correct settings instance and set it to a global variable.
Below is the the instruction that describes the task: ### Input: Pick correct settings instance and set it to a global variable. ### Response: def set_settings(instance=None): """Pick correct settings instance and set it to a global variable.""" global settings settings = None if instance: settings = import_settings(instance) elif "INSTANCE_FOR_DYNACONF" in os.environ: settings = import_settings(os.environ["INSTANCE_FOR_DYNACONF"]) elif "FLASK_APP" in os.environ: # pragma: no cover with suppress(ImportError, click.UsageError): from flask.cli import ScriptInfo flask_app = ScriptInfo().load_app() settings = flask_app.config click.echo( click.style( "Flask app detected", fg="white", bg="bright_black" ) ) elif "DJANGO_SETTINGS_MODULE" in os.environ: # pragma: no cover sys.path.insert(0, os.path.abspath(os.getcwd())) try: # Django extension v2 from django.conf import settings settings.DYNACONF.configure() except (ImportError, AttributeError): # Backwards compatible with old django extension (pre 2.0.0) import dynaconf.contrib.django_dynaconf # noqa from django.conf import settings as django_settings django_settings.configure() settings = django_settings if settings is not None: click.echo( click.style( "Django app detected", fg="white", bg="bright_black" ) ) if settings is None: settings = LazySettings()
def remove_network(self, net_id): """ Remove a network. Similar to the ``docker network rm`` command. Args: net_id (str): The network's id """ url = self._url("/networks/{0}", net_id) res = self._delete(url) self._raise_for_status(res)
Remove a network. Similar to the ``docker network rm`` command. Args: net_id (str): The network's id
Below is the the instruction that describes the task: ### Input: Remove a network. Similar to the ``docker network rm`` command. Args: net_id (str): The network's id ### Response: def remove_network(self, net_id): """ Remove a network. Similar to the ``docker network rm`` command. Args: net_id (str): The network's id """ url = self._url("/networks/{0}", net_id) res = self._delete(url) self._raise_for_status(res)
def _has_only_files(local_folder): """ Return whether a folder contains only files. This will be False if the folder contains any subdirectories. :param local_folder: full path to the local folder :type local_folder: string :returns: True if the folder contains only files :rtype: bool """ return not any(os.path.isdir(os.path.join(local_folder, entry)) for entry in os.listdir(local_folder))
Return whether a folder contains only files. This will be False if the folder contains any subdirectories. :param local_folder: full path to the local folder :type local_folder: string :returns: True if the folder contains only files :rtype: bool
Below is the the instruction that describes the task: ### Input: Return whether a folder contains only files. This will be False if the folder contains any subdirectories. :param local_folder: full path to the local folder :type local_folder: string :returns: True if the folder contains only files :rtype: bool ### Response: def _has_only_files(local_folder): """ Return whether a folder contains only files. This will be False if the folder contains any subdirectories. :param local_folder: full path to the local folder :type local_folder: string :returns: True if the folder contains only files :rtype: bool """ return not any(os.path.isdir(os.path.join(local_folder, entry)) for entry in os.listdir(local_folder))
def _run(name, cmd, exec_driver=None, output=None, stdin=None, python_shell=True, output_loglevel='debug', ignore_retcode=False, use_vt=False, keep_env=None): ''' Common logic for docker.run functions ''' if exec_driver is None: exec_driver = _get_exec_driver() ret = __salt__['container_resource.run']( name, cmd, container_type=__virtualname__, exec_driver=exec_driver, output=output, stdin=stdin, python_shell=python_shell, output_loglevel=output_loglevel, ignore_retcode=ignore_retcode, use_vt=use_vt, keep_env=keep_env) if output in (None, 'all'): return ret else: return ret[output]
Common logic for docker.run functions
Below is the the instruction that describes the task: ### Input: Common logic for docker.run functions ### Response: def _run(name, cmd, exec_driver=None, output=None, stdin=None, python_shell=True, output_loglevel='debug', ignore_retcode=False, use_vt=False, keep_env=None): ''' Common logic for docker.run functions ''' if exec_driver is None: exec_driver = _get_exec_driver() ret = __salt__['container_resource.run']( name, cmd, container_type=__virtualname__, exec_driver=exec_driver, output=output, stdin=stdin, python_shell=python_shell, output_loglevel=output_loglevel, ignore_retcode=ignore_retcode, use_vt=use_vt, keep_env=keep_env) if output in (None, 'all'): return ret else: return ret[output]
def addPygletListener(self,event_type,handler): """ Registers an event handler. The specified callable handler will be called every time an event with the same ``event_type`` is encountered. All event arguments are passed as positional arguments. This method should be used to listen for pyglet events. For new code, it is recommended to use :py:meth:`addEventListener()` instead. See :py:meth:`handleEvent()` for information about tunneled pyglet events. For custom events, use :py:meth:`addEventListener()` instead. """ if self.cfg["debug.events.register"]: print("Registered Event: %s Handler: %s"%(event_type,handler)) if event_type not in self.pygletEventHandlers: self.pygletEventHandlers[event_type]=[] # Only a weak reference is kept if inspect.ismethod(handler): handler = weakref.WeakMethod(handler) else: handler = weakref.ref(handler) self.pygletEventHandlers[event_type].append(handler)
Registers an event handler. The specified callable handler will be called every time an event with the same ``event_type`` is encountered. All event arguments are passed as positional arguments. This method should be used to listen for pyglet events. For new code, it is recommended to use :py:meth:`addEventListener()` instead. See :py:meth:`handleEvent()` for information about tunneled pyglet events. For custom events, use :py:meth:`addEventListener()` instead.
Below is the the instruction that describes the task: ### Input: Registers an event handler. The specified callable handler will be called every time an event with the same ``event_type`` is encountered. All event arguments are passed as positional arguments. This method should be used to listen for pyglet events. For new code, it is recommended to use :py:meth:`addEventListener()` instead. See :py:meth:`handleEvent()` for information about tunneled pyglet events. For custom events, use :py:meth:`addEventListener()` instead. ### Response: def addPygletListener(self,event_type,handler): """ Registers an event handler. The specified callable handler will be called every time an event with the same ``event_type`` is encountered. All event arguments are passed as positional arguments. This method should be used to listen for pyglet events. For new code, it is recommended to use :py:meth:`addEventListener()` instead. See :py:meth:`handleEvent()` for information about tunneled pyglet events. For custom events, use :py:meth:`addEventListener()` instead. """ if self.cfg["debug.events.register"]: print("Registered Event: %s Handler: %s"%(event_type,handler)) if event_type not in self.pygletEventHandlers: self.pygletEventHandlers[event_type]=[] # Only a weak reference is kept if inspect.ismethod(handler): handler = weakref.WeakMethod(handler) else: handler = weakref.ref(handler) self.pygletEventHandlers[event_type].append(handler)
def add_settings_parser(subparsers, parent_parser): """Creates the args parser needed for the settings command and its subcommands. """ # The following parser is for the settings subsection of commands. These # commands display information about the currently applied on-chain # settings. settings_parser = subparsers.add_parser( 'settings', help='Displays on-chain settings', description='Displays the values of currently active on-chain ' 'settings.') settings_parsers = settings_parser.add_subparsers( title='settings', dest='settings_cmd') settings_parsers.required = True list_parser = settings_parsers.add_parser( 'list', help='Lists the current keys and values of on-chain settings', description='List the current keys and values of on-chain ' 'settings. The content can be exported to various ' 'formats for external consumption.' ) list_parser.add_argument( '--url', type=str, help="identify the URL of a validator's REST API", default='http://localhost:8008') list_parser.add_argument( '--filter', type=str, default='', help='filters keys that begin with this value') list_parser.add_argument( '--format', default='default', choices=['default', 'csv', 'json', 'yaml'], help='choose the output format')
Creates the args parser needed for the settings command and its subcommands.
Below is the the instruction that describes the task: ### Input: Creates the args parser needed for the settings command and its subcommands. ### Response: def add_settings_parser(subparsers, parent_parser): """Creates the args parser needed for the settings command and its subcommands. """ # The following parser is for the settings subsection of commands. These # commands display information about the currently applied on-chain # settings. settings_parser = subparsers.add_parser( 'settings', help='Displays on-chain settings', description='Displays the values of currently active on-chain ' 'settings.') settings_parsers = settings_parser.add_subparsers( title='settings', dest='settings_cmd') settings_parsers.required = True list_parser = settings_parsers.add_parser( 'list', help='Lists the current keys and values of on-chain settings', description='List the current keys and values of on-chain ' 'settings. The content can be exported to various ' 'formats for external consumption.' ) list_parser.add_argument( '--url', type=str, help="identify the URL of a validator's REST API", default='http://localhost:8008') list_parser.add_argument( '--filter', type=str, default='', help='filters keys that begin with this value') list_parser.add_argument( '--format', default='default', choices=['default', 'csv', 'json', 'yaml'], help='choose the output format')
def formatter(self, api_client, data, newval): """Get audio-related fields Try to find fields for the audio url for specified preferred quality level, or next-lowest available quality url otherwise. """ url_map = data.get("audioUrlMap") audio_url = data.get("audioUrl") # Only an audio URL, not a quality map. This happens for most of the # mobile client tokens and some of the others now. In this case # substitute the empirically determined default values in the format # used by the rest of the function so downstream consumers continue to # work. if audio_url and not url_map: url_map = { BaseAPIClient.HIGH_AUDIO_QUALITY: { "audioUrl": audio_url, "bitrate": 64, "encoding": "aacplus", } } elif not url_map: # No audio url available (e.g. ad tokens) return None valid_audio_formats = [BaseAPIClient.HIGH_AUDIO_QUALITY, BaseAPIClient.MED_AUDIO_QUALITY, BaseAPIClient.LOW_AUDIO_QUALITY] # Only iterate over sublist, starting at preferred audio quality, or # from the beginning of the list if nothing is found. Ensures that the # bitrate used will always be the same or lower quality than was # specified to prevent audio from skipping for slow connections. preferred_quality = api_client.default_audio_quality if preferred_quality in valid_audio_formats: i = valid_audio_formats.index(preferred_quality) valid_audio_formats = valid_audio_formats[i:] for quality in valid_audio_formats: audio_url = url_map.get(quality) if audio_url: return audio_url[self.field] return audio_url[self.field] if audio_url else None
Get audio-related fields Try to find fields for the audio url for specified preferred quality level, or next-lowest available quality url otherwise.
Below is the the instruction that describes the task: ### Input: Get audio-related fields Try to find fields for the audio url for specified preferred quality level, or next-lowest available quality url otherwise. ### Response: def formatter(self, api_client, data, newval): """Get audio-related fields Try to find fields for the audio url for specified preferred quality level, or next-lowest available quality url otherwise. """ url_map = data.get("audioUrlMap") audio_url = data.get("audioUrl") # Only an audio URL, not a quality map. This happens for most of the # mobile client tokens and some of the others now. In this case # substitute the empirically determined default values in the format # used by the rest of the function so downstream consumers continue to # work. if audio_url and not url_map: url_map = { BaseAPIClient.HIGH_AUDIO_QUALITY: { "audioUrl": audio_url, "bitrate": 64, "encoding": "aacplus", } } elif not url_map: # No audio url available (e.g. ad tokens) return None valid_audio_formats = [BaseAPIClient.HIGH_AUDIO_QUALITY, BaseAPIClient.MED_AUDIO_QUALITY, BaseAPIClient.LOW_AUDIO_QUALITY] # Only iterate over sublist, starting at preferred audio quality, or # from the beginning of the list if nothing is found. Ensures that the # bitrate used will always be the same or lower quality than was # specified to prevent audio from skipping for slow connections. preferred_quality = api_client.default_audio_quality if preferred_quality in valid_audio_formats: i = valid_audio_formats.index(preferred_quality) valid_audio_formats = valid_audio_formats[i:] for quality in valid_audio_formats: audio_url = url_map.get(quality) if audio_url: return audio_url[self.field] return audio_url[self.field] if audio_url else None
def window_handles(self): """ Returns the handles of all windows within the current session. :Usage: :: driver.window_handles """ if self.w3c: return self.execute(Command.W3C_GET_WINDOW_HANDLES)['value'] else: return self.execute(Command.GET_WINDOW_HANDLES)['value']
Returns the handles of all windows within the current session. :Usage: :: driver.window_handles
Below is the the instruction that describes the task: ### Input: Returns the handles of all windows within the current session. :Usage: :: driver.window_handles ### Response: def window_handles(self): """ Returns the handles of all windows within the current session. :Usage: :: driver.window_handles """ if self.w3c: return self.execute(Command.W3C_GET_WINDOW_HANDLES)['value'] else: return self.execute(Command.GET_WINDOW_HANDLES)['value']
def ParseMessage(self, parser_mediator, key, date_time, tokens): """Produces an event from a syslog body that matched one of the grammars. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the matching grammar. date_time (dfdatetime.DateTimeValues): date and time values. tokens (dict[str, str]): tokens derived from a syslog message based on the defined grammar. Raises: ValueError: If an unknown key is provided. """ if key not in ('failed_connection', 'login', 'opened_connection'): raise ValueError('Unknown grammar key: {0:s}'.format(key)) if key == 'login': event_data = SSHLoginEventData() elif key == 'failed_connection': event_data = SSHFailedConnectionEventData() elif key == 'opened_connection': event_data = SSHOpenedConnectionEventData() event_data.address = tokens.get('address', None) event_data.authentication_method = tokens.get( 'authentication_method', None) event_data.body = tokens.get('body', None) event_data.fingerprint = tokens.get('fingerprint', None) event_data.hostname = tokens.get('hostname', None) # TODO: pass line number to offset or remove. event_data.offset = 0 event_data.pid = tokens.get('pid', None) event_data.protocol = tokens.get('protocol', None) event_data.port = tokens.get('port', None) event_data.reporter = tokens.get('reporter', None) event_data.severity = tokens.get('severity', None) event_data.username = tokens.get('username', None) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Produces an event from a syslog body that matched one of the grammars. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the matching grammar. date_time (dfdatetime.DateTimeValues): date and time values. tokens (dict[str, str]): tokens derived from a syslog message based on the defined grammar. Raises: ValueError: If an unknown key is provided.
Below is the the instruction that describes the task: ### Input: Produces an event from a syslog body that matched one of the grammars. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the matching grammar. date_time (dfdatetime.DateTimeValues): date and time values. tokens (dict[str, str]): tokens derived from a syslog message based on the defined grammar. Raises: ValueError: If an unknown key is provided. ### Response: def ParseMessage(self, parser_mediator, key, date_time, tokens): """Produces an event from a syslog body that matched one of the grammars. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the matching grammar. date_time (dfdatetime.DateTimeValues): date and time values. tokens (dict[str, str]): tokens derived from a syslog message based on the defined grammar. Raises: ValueError: If an unknown key is provided. """ if key not in ('failed_connection', 'login', 'opened_connection'): raise ValueError('Unknown grammar key: {0:s}'.format(key)) if key == 'login': event_data = SSHLoginEventData() elif key == 'failed_connection': event_data = SSHFailedConnectionEventData() elif key == 'opened_connection': event_data = SSHOpenedConnectionEventData() event_data.address = tokens.get('address', None) event_data.authentication_method = tokens.get( 'authentication_method', None) event_data.body = tokens.get('body', None) event_data.fingerprint = tokens.get('fingerprint', None) event_data.hostname = tokens.get('hostname', None) # TODO: pass line number to offset or remove. event_data.offset = 0 event_data.pid = tokens.get('pid', None) event_data.protocol = tokens.get('protocol', None) event_data.port = tokens.get('port', None) event_data.reporter = tokens.get('reporter', None) event_data.severity = tokens.get('severity', None) event_data.username = tokens.get('username', None) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
def get_children(self, tree_alias, item): """Returns item's children. :param str|unicode tree_alias: :param TreeItemBase|None item: :rtype: list """ if not self.current_app_is_admin(): # We do not need i18n for a tree rendered in Admin dropdown. tree_alias = self.resolve_tree_i18n_alias(tree_alias) return self.cache.get_entry('parents', tree_alias)[item]
Returns item's children. :param str|unicode tree_alias: :param TreeItemBase|None item: :rtype: list
Below is the the instruction that describes the task: ### Input: Returns item's children. :param str|unicode tree_alias: :param TreeItemBase|None item: :rtype: list ### Response: def get_children(self, tree_alias, item): """Returns item's children. :param str|unicode tree_alias: :param TreeItemBase|None item: :rtype: list """ if not self.current_app_is_admin(): # We do not need i18n for a tree rendered in Admin dropdown. tree_alias = self.resolve_tree_i18n_alias(tree_alias) return self.cache.get_entry('parents', tree_alias)[item]
def determine_coords(list_of_variable_dicts): # type: (List[Dict]) -> Tuple[Set, Set] """Given a list of dicts with xarray object values, identify coordinates. Parameters ---------- list_of_variable_dicts : list of dict or Dataset objects Of the same form as the arguments to expand_variable_dicts. Returns ------- coord_names : set of variable names noncoord_names : set of variable names All variable found in the input should appear in either the set of coordinate or non-coordinate names. """ from .dataarray import DataArray from .dataset import Dataset coord_names = set() # type: set noncoord_names = set() # type: set for variables in list_of_variable_dicts: if isinstance(variables, Dataset): coord_names.update(variables.coords) noncoord_names.update(variables.data_vars) else: for name, var in variables.items(): if isinstance(var, DataArray): coords = set(var._coords) # use private API for speed # explicitly overwritten variables should take precedence coords.discard(name) coord_names.update(coords) return coord_names, noncoord_names
Given a list of dicts with xarray object values, identify coordinates. Parameters ---------- list_of_variable_dicts : list of dict or Dataset objects Of the same form as the arguments to expand_variable_dicts. Returns ------- coord_names : set of variable names noncoord_names : set of variable names All variable found in the input should appear in either the set of coordinate or non-coordinate names.
Below is the the instruction that describes the task: ### Input: Given a list of dicts with xarray object values, identify coordinates. Parameters ---------- list_of_variable_dicts : list of dict or Dataset objects Of the same form as the arguments to expand_variable_dicts. Returns ------- coord_names : set of variable names noncoord_names : set of variable names All variable found in the input should appear in either the set of coordinate or non-coordinate names. ### Response: def determine_coords(list_of_variable_dicts): # type: (List[Dict]) -> Tuple[Set, Set] """Given a list of dicts with xarray object values, identify coordinates. Parameters ---------- list_of_variable_dicts : list of dict or Dataset objects Of the same form as the arguments to expand_variable_dicts. Returns ------- coord_names : set of variable names noncoord_names : set of variable names All variable found in the input should appear in either the set of coordinate or non-coordinate names. """ from .dataarray import DataArray from .dataset import Dataset coord_names = set() # type: set noncoord_names = set() # type: set for variables in list_of_variable_dicts: if isinstance(variables, Dataset): coord_names.update(variables.coords) noncoord_names.update(variables.data_vars) else: for name, var in variables.items(): if isinstance(var, DataArray): coords = set(var._coords) # use private API for speed # explicitly overwritten variables should take precedence coords.discard(name) coord_names.update(coords) return coord_names, noncoord_names
def data_find_all(data, path, dyn_cls=False): """Find and return all element-as-tuples in tuple ``data`` using simplified XPath ``path``. """ path_parts = path.split("/") try: sub_elms = tuple( el for el in data if isinstance(el, (tuple, list)) and el[0] == path_parts[0] ) except IndexError: return None if len(path_parts) > 1: ret = [] for sub_elm in sub_elms: for x in data_find_all(sub_elm, "/".join(path_parts[1:])): ret.append(x) ret = tuple(ret) else: ret = sub_elms if ret and dyn_cls: cls = generate_element_class(ret[0]) return tuple(cls(data=tuple_) for tuple_ in ret) return ret
Find and return all element-as-tuples in tuple ``data`` using simplified XPath ``path``.
Below is the the instruction that describes the task: ### Input: Find and return all element-as-tuples in tuple ``data`` using simplified XPath ``path``. ### Response: def data_find_all(data, path, dyn_cls=False): """Find and return all element-as-tuples in tuple ``data`` using simplified XPath ``path``. """ path_parts = path.split("/") try: sub_elms = tuple( el for el in data if isinstance(el, (tuple, list)) and el[0] == path_parts[0] ) except IndexError: return None if len(path_parts) > 1: ret = [] for sub_elm in sub_elms: for x in data_find_all(sub_elm, "/".join(path_parts[1:])): ret.append(x) ret = tuple(ret) else: ret = sub_elms if ret and dyn_cls: cls = generate_element_class(ret[0]) return tuple(cls(data=tuple_) for tuple_ in ret) return ret
def register_hooked(self, hooks, # type: Union[Type[Hook], Sequence[Type[Hook]]] func, # type: Hooked args_gen=None # type: Optional[ArgsGen] ): # type: (Type[Hook], Callable, Optional[Callable]) -> None """Register func to be run when any of the hooks are run by parent Args: hooks: A Hook class or list of Hook classes of interest func: The callable that should be run on that Hook args_gen: Optionally specify the argument names that should be passed to func. If not given then use func.call_types.keys """ if self.hooked is None: self.hooked = {} if args_gen is None: args_gen = getattr(func, "call_types", {}).keys if not isinstance(hooks, Sequence): hooks = [hooks] for hook_cls in hooks: self.hooked[hook_cls] = (func, args_gen)
Register func to be run when any of the hooks are run by parent Args: hooks: A Hook class or list of Hook classes of interest func: The callable that should be run on that Hook args_gen: Optionally specify the argument names that should be passed to func. If not given then use func.call_types.keys
Below is the the instruction that describes the task: ### Input: Register func to be run when any of the hooks are run by parent Args: hooks: A Hook class or list of Hook classes of interest func: The callable that should be run on that Hook args_gen: Optionally specify the argument names that should be passed to func. If not given then use func.call_types.keys ### Response: def register_hooked(self, hooks, # type: Union[Type[Hook], Sequence[Type[Hook]]] func, # type: Hooked args_gen=None # type: Optional[ArgsGen] ): # type: (Type[Hook], Callable, Optional[Callable]) -> None """Register func to be run when any of the hooks are run by parent Args: hooks: A Hook class or list of Hook classes of interest func: The callable that should be run on that Hook args_gen: Optionally specify the argument names that should be passed to func. If not given then use func.call_types.keys """ if self.hooked is None: self.hooked = {} if args_gen is None: args_gen = getattr(func, "call_types", {}).keys if not isinstance(hooks, Sequence): hooks = [hooks] for hook_cls in hooks: self.hooked[hook_cls] = (func, args_gen)
def getProcessDatabase(self): """Returns number of processes discriminated by database name. @return: Dictionary mapping database name to number of processes. """ info_dict = {} cur = self._conn.cursor() cur.execute("""SHOW FULL PROCESSLIST;""") rows = cur.fetchall() if rows: for row in rows: db = row[3] info_dict[db] = info_dict.get(db, 0) + 1 return info_dict
Returns number of processes discriminated by database name. @return: Dictionary mapping database name to number of processes.
Below is the the instruction that describes the task: ### Input: Returns number of processes discriminated by database name. @return: Dictionary mapping database name to number of processes. ### Response: def getProcessDatabase(self): """Returns number of processes discriminated by database name. @return: Dictionary mapping database name to number of processes. """ info_dict = {} cur = self._conn.cursor() cur.execute("""SHOW FULL PROCESSLIST;""") rows = cur.fetchall() if rows: for row in rows: db = row[3] info_dict[db] = info_dict.get(db, 0) + 1 return info_dict
def _match_objects_against_atlas_footprint( self, orbfitEph, ra, dec): """*match the orbfit generated object positions against atlas exposure footprint* **Key Arguments:** - ``orbfitEph`` -- the orbfit ephemerides - ``ra`` -- the ATLAS exposure RA (degrees) - ``dec`` -- the ATLAS exposure DEC (degrees) **Return:** - ``matchedEph`` -- the ephemerides of objects falling within the ATLAS exposure footprint """ self.log.info( 'starting the ``_match_objects_against_atlas_footprint`` method') # GET THE ORBFIT MAG LIMIT magLimit = float(self.settings["orbfit"]["magnitude limit"]) tileSide = float(self.settings["orbfit"]["atlas exposure match side"]) pi = (4 * math.atan(1.0)) DEG_TO_RAD_FACTOR = pi / 180.0 RAD_TO_DEG_FACTOR = 180.0 / pi nside = 1024 raFc = ra decFc = dec eph = [] raArray = [] decArray = [] for o in orbfitEph: if float(o["apparent_mag"]) < magLimit: eph.append(o) raArray.append(float(o["ra_deg"])) decArray.append(float(o["dec_deg"])) raArray = np.array(raArray) decArray = np.array(decArray) healpix = hp.ang2pix(nside, theta=raArray, phi=decArray, lonlat=True) # GENERATE THE EXPOSURE HEALPIX ID MAP decCorners = (decFc - tileSide / 2, decFc + tileSide / 2) corners = [] for d in decCorners: if d > 90.: d = 180. - d elif d < -90.: d = -180 - d raCorners = (raFc - (tileSide / 2) / np.cos(d * DEG_TO_RAD_FACTOR), raFc + (tileSide / 2) / np.cos(d * DEG_TO_RAD_FACTOR)) for r in raCorners: if r > 360.: r = 720. - r elif r < 0.: r = 360. + r corners.append(hp.ang2vec(r, d, lonlat=True)) # NEAR THE POLES RETURN SQUARE INTO TRIANGE - ALMOST DEGENERATE pole = False for d in decCorners: if d > 87.0 or d < -87.0: pole = True if pole == True: corners = corners[1:] else: # FLIP CORNERS 3 & 4 SO HEALPY UNDERSTANDS POLYGON SHAPE corners = [corners[0], corners[1], corners[3], corners[2]] # RETURN HEALPIXELS IN EXPOSURE AREA expPixels = hp.query_polygon(nside, np.array( corners)) # DICTIONARY STORES ALL THE RELEVANT COORDINATES dicto = defaultdict(list) for ind, (p, r, d, o) in enumerate(zip(healpix, raArray, decArray, eph)): dicto[p].append(o) matchedEph = [] for ele in expPixels: if ele in dicto: matchedEph.append(dicto[ele][0]) self.log.info( 'completed the ``_match_objects_against_atlas_footprint`` method') return matchedEph
*match the orbfit generated object positions against atlas exposure footprint* **Key Arguments:** - ``orbfitEph`` -- the orbfit ephemerides - ``ra`` -- the ATLAS exposure RA (degrees) - ``dec`` -- the ATLAS exposure DEC (degrees) **Return:** - ``matchedEph`` -- the ephemerides of objects falling within the ATLAS exposure footprint
Below is the the instruction that describes the task: ### Input: *match the orbfit generated object positions against atlas exposure footprint* **Key Arguments:** - ``orbfitEph`` -- the orbfit ephemerides - ``ra`` -- the ATLAS exposure RA (degrees) - ``dec`` -- the ATLAS exposure DEC (degrees) **Return:** - ``matchedEph`` -- the ephemerides of objects falling within the ATLAS exposure footprint ### Response: def _match_objects_against_atlas_footprint( self, orbfitEph, ra, dec): """*match the orbfit generated object positions against atlas exposure footprint* **Key Arguments:** - ``orbfitEph`` -- the orbfit ephemerides - ``ra`` -- the ATLAS exposure RA (degrees) - ``dec`` -- the ATLAS exposure DEC (degrees) **Return:** - ``matchedEph`` -- the ephemerides of objects falling within the ATLAS exposure footprint """ self.log.info( 'starting the ``_match_objects_against_atlas_footprint`` method') # GET THE ORBFIT MAG LIMIT magLimit = float(self.settings["orbfit"]["magnitude limit"]) tileSide = float(self.settings["orbfit"]["atlas exposure match side"]) pi = (4 * math.atan(1.0)) DEG_TO_RAD_FACTOR = pi / 180.0 RAD_TO_DEG_FACTOR = 180.0 / pi nside = 1024 raFc = ra decFc = dec eph = [] raArray = [] decArray = [] for o in orbfitEph: if float(o["apparent_mag"]) < magLimit: eph.append(o) raArray.append(float(o["ra_deg"])) decArray.append(float(o["dec_deg"])) raArray = np.array(raArray) decArray = np.array(decArray) healpix = hp.ang2pix(nside, theta=raArray, phi=decArray, lonlat=True) # GENERATE THE EXPOSURE HEALPIX ID MAP decCorners = (decFc - tileSide / 2, decFc + tileSide / 2) corners = [] for d in decCorners: if d > 90.: d = 180. - d elif d < -90.: d = -180 - d raCorners = (raFc - (tileSide / 2) / np.cos(d * DEG_TO_RAD_FACTOR), raFc + (tileSide / 2) / np.cos(d * DEG_TO_RAD_FACTOR)) for r in raCorners: if r > 360.: r = 720. - r elif r < 0.: r = 360. + r corners.append(hp.ang2vec(r, d, lonlat=True)) # NEAR THE POLES RETURN SQUARE INTO TRIANGE - ALMOST DEGENERATE pole = False for d in decCorners: if d > 87.0 or d < -87.0: pole = True if pole == True: corners = corners[1:] else: # FLIP CORNERS 3 & 4 SO HEALPY UNDERSTANDS POLYGON SHAPE corners = [corners[0], corners[1], corners[3], corners[2]] # RETURN HEALPIXELS IN EXPOSURE AREA expPixels = hp.query_polygon(nside, np.array( corners)) # DICTIONARY STORES ALL THE RELEVANT COORDINATES dicto = defaultdict(list) for ind, (p, r, d, o) in enumerate(zip(healpix, raArray, decArray, eph)): dicto[p].append(o) matchedEph = [] for ele in expPixels: if ele in dicto: matchedEph.append(dicto[ele][0]) self.log.info( 'completed the ``_match_objects_against_atlas_footprint`` method') return matchedEph
def create(vm_): ''' To create a single VM in the VMware environment. Sample profile and arguments that can be specified in it can be found :ref:`here. <vmware-cloud-profile>` CLI Example: .. code-block:: bash salt-cloud -p vmware-centos6.5 vmname ''' try: # Check for required profile parameters before sending any API calls. if (vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'vmware', vm_['profile'], vm_=vm_) is False): return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vm_name = config.get_cloud_config_value( 'name', vm_, __opts__, default=None ) folder = config.get_cloud_config_value( 'folder', vm_, __opts__, default=None ) datacenter = config.get_cloud_config_value( 'datacenter', vm_, __opts__, default=None ) resourcepool = config.get_cloud_config_value( 'resourcepool', vm_, __opts__, default=None ) cluster = config.get_cloud_config_value( 'cluster', vm_, __opts__, default=None ) datastore = config.get_cloud_config_value( 'datastore', vm_, __opts__, default=None ) host = config.get_cloud_config_value( 'host', vm_, __opts__, default=None ) template = config.get_cloud_config_value( 'template', vm_, __opts__, default=False ) num_cpus = config.get_cloud_config_value( 'num_cpus', vm_, __opts__, default=None ) cores_per_socket = config.get_cloud_config_value( 'cores_per_socket', vm_, __opts__, default=None ) memory = config.get_cloud_config_value( 'memory', vm_, __opts__, default=None ) devices = config.get_cloud_config_value( 'devices', vm_, __opts__, default=None ) extra_config = config.get_cloud_config_value( 'extra_config', vm_, __opts__, default=None ) annotation = config.get_cloud_config_value( 'annotation', vm_, __opts__, default=None ) power = config.get_cloud_config_value( 'power_on', vm_, __opts__, default=True ) key_filename = config.get_cloud_config_value( 'private_key', vm_, __opts__, search_global=False, default=None ) deploy = config.get_cloud_config_value( 'deploy', vm_, __opts__, search_global=True, default=True ) wait_for_ip_timeout = config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=20 * 60 ) domain = config.get_cloud_config_value( 'domain', vm_, __opts__, search_global=False, default='local' ) hardware_version = config.get_cloud_config_value( 'hardware_version', vm_, __opts__, search_global=False, default=None ) guest_id = config.get_cloud_config_value( 'image', vm_, __opts__, search_global=False, default=None ) customization = config.get_cloud_config_value( 'customization', vm_, __opts__, search_global=False, default=True ) customization_spec = config.get_cloud_config_value( 'customization_spec', vm_, __opts__, search_global=False, default=None ) win_password = config.get_cloud_config_value( 'win_password', vm_, __opts__, search_global=False, default=None ) win_organization_name = config.get_cloud_config_value( 'win_organization_name', vm_, __opts__, search_global=False, default='Organization' ) plain_text = config.get_cloud_config_value( 'plain_text', vm_, __opts__, search_global=False, default=False ) win_user_fullname = config.get_cloud_config_value( 'win_user_fullname', vm_, __opts__, search_global=False, default='Windows User' ) win_run_once = config.get_cloud_config_value( 'win_run_once', vm_, __opts__, search_global=False, default=None ) win_ad_domain = config.get_cloud_config_value( 'win_ad_domain', vm_, __opts__, search_global=False, default='' ) win_ad_user = config.get_cloud_config_value( 'win_ad_user', vm_, __opts__, search_global=False, default='' ) win_ad_password = config.get_cloud_config_value( 'win_ad_password', vm_, __opts__, search_global=False, default='' ) win_autologon = config.get_cloud_config_value( 'win_autologon', vm_, __opts__, search_global=False, default=True ) timezone = config.get_cloud_config_value( 'timezone', vm_, __opts__, search_global=False, default='' ) hw_clock_utc = config.get_cloud_config_value( 'hw_clock_utc', vm_, __opts__, search_global=False, default='' ) clonefrom_datacenter = config.get_cloud_config_value( 'clonefrom_datacenter', vm_, __opts__, search_global=False, default=datacenter ) # Get service instance object si = _get_si() container_ref = None clonefrom_datacenter_ref = None # If datacenter is specified, set the container reference to start search from it instead if datacenter: datacenter_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datacenter, datacenter) container_ref = datacenter_ref if datacenter_ref else None if 'clonefrom' in vm_: # If datacenter is specified, set the container reference to start search from it instead if datacenter: datacenter_ref = salt.utils.vmware.get_mor_by_property( si, vim.Datacenter, datacenter ) container_ref = datacenter_ref if datacenter_ref else None clonefrom_container_ref = datacenter_ref if datacenter_ref else None # allow specifying a different datacenter that the template lives in if clonefrom_datacenter: clonefrom_datacenter_ref = salt.utils.vmware.get_mor_by_property( si, vim.Datacenter, clonefrom_datacenter ) clonefrom_container_ref = clonefrom_datacenter_ref if clonefrom_datacenter_ref else None # Clone VM/template from specified VM/template object_ref = salt.utils.vmware.get_mor_by_property( si, vim.VirtualMachine, vm_['clonefrom'], container_ref=clonefrom_container_ref ) if object_ref: clone_type = "template" if object_ref.config.template else "vm" else: raise SaltCloudSystemExit( 'The VM/template that you have specified under clonefrom does not exist.' ) else: clone_type = None object_ref = None # Either a cluster, or a resource pool must be specified when cloning from template or creating. if resourcepool: resourcepool_ref = salt.utils.vmware.get_mor_by_property( si, vim.ResourcePool, resourcepool, container_ref=container_ref ) if not resourcepool_ref: log.error("Specified resource pool: '%s' does not exist", resourcepool) if not clone_type or clone_type == "template": raise SaltCloudSystemExit('You must specify a resource pool that exists.') elif cluster: cluster_ref = salt.utils.vmware.get_mor_by_property( si, vim.ClusterComputeResource, cluster, container_ref=container_ref ) if not cluster_ref: log.error("Specified cluster: '%s' does not exist", cluster) if not clone_type or clone_type == "template": raise SaltCloudSystemExit('You must specify a cluster that exists.') else: resourcepool_ref = cluster_ref.resourcePool elif clone_type == "template": raise SaltCloudSystemExit( 'You must either specify a cluster or a resource pool when cloning from a template.' ) elif not clone_type: raise SaltCloudSystemExit( 'You must either specify a cluster or a resource pool when creating.' ) else: log.debug("Using resource pool used by the %s %s", clone_type, vm_['clonefrom']) # Either a datacenter or a folder can be optionally specified when cloning, required when creating. # If not specified when cloning, the existing VM/template\'s parent folder is used. if folder: folder_parts = folder.split('/') search_reference = container_ref for folder_part in folder_parts: if folder_part: folder_ref = salt.utils.vmware.get_mor_by_property( si, vim.Folder, folder_part, container_ref=search_reference ) search_reference = folder_ref if not folder_ref: log.error("Specified folder: '%s' does not exist", folder) log.debug("Using folder in which %s %s is present", clone_type, vm_['clonefrom']) folder_ref = object_ref.parent elif datacenter: if not datacenter_ref: log.error("Specified datacenter: '%s' does not exist", datacenter) log.debug("Using datacenter folder in which %s %s is present", clone_type, vm_['clonefrom']) folder_ref = object_ref.parent else: folder_ref = datacenter_ref.vmFolder elif not clone_type: raise SaltCloudSystemExit( 'You must either specify a folder or a datacenter when creating not cloning.' ) else: log.debug("Using folder in which %s %s is present", clone_type, vm_['clonefrom']) folder_ref = object_ref.parent if 'clonefrom' in vm_: # Create the relocation specs reloc_spec = vim.vm.RelocateSpec() if (resourcepool and resourcepool_ref) or (cluster and cluster_ref): reloc_spec.pool = resourcepool_ref # Either a datastore/datastore cluster can be optionally specified. # If not specified, the current datastore is used. if datastore: datastore_ref = salt.utils.vmware.get_mor_by_property( si, vim.Datastore, datastore, container_ref=container_ref ) if datastore_ref: # specific datastore has been specified reloc_spec.datastore = datastore_ref else: datastore_cluster_ref = salt.utils.vmware.get_mor_by_property(si, vim.StoragePod, datastore, container_ref=container_ref) if not datastore_cluster_ref: log.error("Specified datastore/datastore cluster: '%s' does not exist", datastore) log.debug("Using datastore used by the %s %s", clone_type, vm_['clonefrom']) else: log.debug("No datastore/datastore cluster specified") log.debug("Using datastore used by the %s %s", clone_type, vm_['clonefrom']) if host: host_ref = salt.utils.vmware.get_mor_by_property( si, vim.HostSystem, host, container_ref=container_ref ) if host_ref: reloc_spec.host = host_ref else: log.error("Specified host: '%s' does not exist", host) else: if not datastore: raise SaltCloudSystemExit( 'You must specify a datastore when creating not cloning.' ) else: datastore_ref = salt.utils.vmware.get_mor_by_property( si, vim.Datastore, datastore ) if not datastore_ref: raise SaltCloudSystemExit("Specified datastore: '{0}' does not exist".format(datastore)) if host: host_ref = salt.utils.vmware.get_mor_by_property( _get_si(), vim.HostSystem, host, container_ref=container_ref ) if not host_ref: log.error("Specified host: '%s' does not exist", host) # Create the config specs config_spec = vim.vm.ConfigSpec() # If the hardware version is specified and if it is different from the current # hardware version, then schedule a hardware version upgrade if hardware_version and object_ref is not None: hardware_version = 'vmx-{0:02}'.format(hardware_version) if hardware_version != object_ref.config.version: log.debug( "Scheduling hardware version upgrade from %s to %s", object_ref.config.version, hardware_version ) scheduled_hardware_upgrade = vim.vm.ScheduledHardwareUpgradeInfo() scheduled_hardware_upgrade.upgradePolicy = 'always' scheduled_hardware_upgrade.versionKey = hardware_version config_spec.scheduledHardwareUpgradeInfo = scheduled_hardware_upgrade else: log.debug("Virtual hardware version already set to %s", hardware_version) if num_cpus: log.debug("Setting cpu to: %s", num_cpus) config_spec.numCPUs = int(num_cpus) if cores_per_socket: log.debug("Setting cores per socket to: %s", cores_per_socket) config_spec.numCoresPerSocket = int(cores_per_socket) if memory: try: memory_num, memory_unit = re.findall(r"[^\W\d_]+|\d+.\d+|\d+", memory) if memory_unit.lower() == "mb": memory_mb = int(memory_num) elif memory_unit.lower() == "gb": memory_mb = int(float(memory_num)*1024.0) else: err_msg = "Invalid memory type specified: '{0}'".format(memory_unit) log.error(err_msg) return {'Error': err_msg} except (TypeError, ValueError): memory_mb = int(memory) log.debug("Setting memory to: %s MB", memory_mb) config_spec.memoryMB = memory_mb if devices: specs = _manage_devices(devices, vm=object_ref, container_ref=container_ref, new_vm_name=vm_name) config_spec.deviceChange = specs['device_specs'] if extra_config: for key, value in six.iteritems(extra_config): option = vim.option.OptionValue(key=key, value=value) config_spec.extraConfig.append(option) if annotation: config_spec.annotation = six.text_type(annotation) if 'clonefrom' in vm_: clone_spec = handle_snapshot( config_spec, object_ref, reloc_spec, template, vm_ ) if not clone_spec: clone_spec = build_clonespec(config_spec, object_ref, reloc_spec, template) if customization and customization_spec: customization_spec = salt.utils.vmware.get_customizationspec_ref(si=si, customization_spec_name=customization_spec) clone_spec.customization = customization_spec.spec elif customization and (devices and 'network' in list(devices.keys())): global_ip = vim.vm.customization.GlobalIPSettings() if 'dns_servers' in list(vm_.keys()): global_ip.dnsServerList = vm_['dns_servers'] non_hostname_chars = re.compile(r'[^\w-]') if re.search(non_hostname_chars, vm_name): host_name = re.split(non_hostname_chars, vm_name, maxsplit=1)[0] domain_name = re.split(non_hostname_chars, vm_name, maxsplit=1)[-1] else: host_name = vm_name domain_name = domain if 'Windows' not in object_ref.config.guestFullName: identity = vim.vm.customization.LinuxPrep() identity.hostName = vim.vm.customization.FixedName(name=host_name) identity.domain = domain_name if timezone: identity.timeZone = timezone if isinstance(hw_clock_utc, bool): identity.hwClockUTC = hw_clock_utc else: identity = vim.vm.customization.Sysprep() identity.guiUnattended = vim.vm.customization.GuiUnattended() identity.guiUnattended.autoLogon = win_autologon if win_autologon: identity.guiUnattended.autoLogonCount = 1 else: identity.guiUnattended.autoLogonCount = 0 identity.guiUnattended.password = vim.vm.customization.Password() identity.guiUnattended.password.value = win_password identity.guiUnattended.password.plainText = plain_text if timezone: identity.guiUnattended.timeZone = timezone if win_run_once: identity.guiRunOnce = vim.vm.customization.GuiRunOnce() identity.guiRunOnce.commandList = win_run_once identity.userData = vim.vm.customization.UserData() identity.userData.fullName = win_user_fullname identity.userData.orgName = win_organization_name identity.userData.computerName = vim.vm.customization.FixedName() identity.userData.computerName.name = host_name identity.identification = vim.vm.customization.Identification() if win_ad_domain and win_ad_user and win_ad_password: identity.identification.joinDomain = win_ad_domain identity.identification.domainAdmin = win_ad_user identity.identification.domainAdminPassword = vim.vm.customization.Password() identity.identification.domainAdminPassword.value = win_ad_password identity.identification.domainAdminPassword.plainText = plain_text custom_spec = vim.vm.customization.Specification( globalIPSettings=global_ip, identity=identity, nicSettingMap=specs['nics_map'] ) clone_spec.customization = custom_spec if not template: clone_spec.powerOn = power log.debug('clone_spec set to:\n%s', pprint.pformat(clone_spec)) else: config_spec.name = vm_name config_spec.files = vim.vm.FileInfo() config_spec.files.vmPathName = '[{0}] {1}/{1}.vmx'.format(datastore, vm_name) config_spec.guestId = guest_id log.debug('config_spec set to:\n%s', pprint.pformat(config_spec)) event_kwargs = vm_.copy() if event_kwargs.get('password'): del event_kwargs['password'] try: __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args=__utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if 'clonefrom' in vm_: log.info("Creating %s from %s(%s)", vm_['name'], clone_type, vm_['clonefrom']) if datastore and not datastore_ref and datastore_cluster_ref: # datastore cluster has been specified so apply Storage DRS recommendations pod_spec = vim.storageDrs.PodSelectionSpec(storagePod=datastore_cluster_ref) storage_spec = vim.storageDrs.StoragePlacementSpec( type='clone', vm=object_ref, podSelectionSpec=pod_spec, cloneSpec=clone_spec, cloneName=vm_name, folder=folder_ref ) # get recommended datastores recommended_datastores = si.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec) # apply storage DRS recommendations task = si.content.storageResourceManager.ApplyStorageDrsRecommendation_Task(recommended_datastores.recommendations[0].key) salt.utils.vmware.wait_for_task(task, vm_name, 'apply storage DRS recommendations', 5, 'info') else: # clone the VM/template task = object_ref.Clone(folder_ref, vm_name, clone_spec) salt.utils.vmware.wait_for_task(task, vm_name, 'clone', 5, 'info') else: log.info('Creating %s', vm_['name']) if host: task = folder_ref.CreateVM_Task(config_spec, resourcepool_ref, host_ref) else: task = folder_ref.CreateVM_Task(config_spec, resourcepool_ref) salt.utils.vmware.wait_for_task(task, vm_name, "create", 15, 'info') except Exception as exc: err_msg = 'Error creating {0}: {1}'.format(vm_['name'], exc) log.error( err_msg, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return {'Error': err_msg} new_vm_ref = salt.utils.vmware.get_mor_by_property(si, vim.VirtualMachine, vm_name, container_ref=container_ref) # Find how to power on in CreateVM_Task (if possible), for now this will do try: if not clone_type and power: task = new_vm_ref.PowerOn() salt.utils.vmware.wait_for_task(task, vm_name, 'power', 5, 'info') except Exception as exc: log.info('Powering on the VM threw this exception. Ignoring.') log.info(exc) # If it a template or if it does not need to be powered on then do not wait for the IP out = None if not template and power: ip = _wait_for_ip(new_vm_ref, wait_for_ip_timeout) if ip: log.info("[ %s ] IPv4 is: %s", vm_name, ip) # ssh or smb using ip and install salt only if deploy is True if deploy: vm_['key_filename'] = key_filename # if specified, prefer ssh_host to the discovered ip address if 'ssh_host' not in vm_: vm_['ssh_host'] = ip log.info("[ %s ] Deploying to %s", vm_name, vm_['ssh_host']) out = __utils__['cloud.bootstrap'](vm_, __opts__) data = show_instance(vm_name, call='action') if deploy and isinstance(out, dict): data['deploy_kwargs'] = out.get('deploy_kwargs', {}) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return data
To create a single VM in the VMware environment. Sample profile and arguments that can be specified in it can be found :ref:`here. <vmware-cloud-profile>` CLI Example: .. code-block:: bash salt-cloud -p vmware-centos6.5 vmname
Below is the the instruction that describes the task: ### Input: To create a single VM in the VMware environment. Sample profile and arguments that can be specified in it can be found :ref:`here. <vmware-cloud-profile>` CLI Example: .. code-block:: bash salt-cloud -p vmware-centos6.5 vmname ### Response: def create(vm_): ''' To create a single VM in the VMware environment. Sample profile and arguments that can be specified in it can be found :ref:`here. <vmware-cloud-profile>` CLI Example: .. code-block:: bash salt-cloud -p vmware-centos6.5 vmname ''' try: # Check for required profile parameters before sending any API calls. if (vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'vmware', vm_['profile'], vm_=vm_) is False): return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vm_name = config.get_cloud_config_value( 'name', vm_, __opts__, default=None ) folder = config.get_cloud_config_value( 'folder', vm_, __opts__, default=None ) datacenter = config.get_cloud_config_value( 'datacenter', vm_, __opts__, default=None ) resourcepool = config.get_cloud_config_value( 'resourcepool', vm_, __opts__, default=None ) cluster = config.get_cloud_config_value( 'cluster', vm_, __opts__, default=None ) datastore = config.get_cloud_config_value( 'datastore', vm_, __opts__, default=None ) host = config.get_cloud_config_value( 'host', vm_, __opts__, default=None ) template = config.get_cloud_config_value( 'template', vm_, __opts__, default=False ) num_cpus = config.get_cloud_config_value( 'num_cpus', vm_, __opts__, default=None ) cores_per_socket = config.get_cloud_config_value( 'cores_per_socket', vm_, __opts__, default=None ) memory = config.get_cloud_config_value( 'memory', vm_, __opts__, default=None ) devices = config.get_cloud_config_value( 'devices', vm_, __opts__, default=None ) extra_config = config.get_cloud_config_value( 'extra_config', vm_, __opts__, default=None ) annotation = config.get_cloud_config_value( 'annotation', vm_, __opts__, default=None ) power = config.get_cloud_config_value( 'power_on', vm_, __opts__, default=True ) key_filename = config.get_cloud_config_value( 'private_key', vm_, __opts__, search_global=False, default=None ) deploy = config.get_cloud_config_value( 'deploy', vm_, __opts__, search_global=True, default=True ) wait_for_ip_timeout = config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=20 * 60 ) domain = config.get_cloud_config_value( 'domain', vm_, __opts__, search_global=False, default='local' ) hardware_version = config.get_cloud_config_value( 'hardware_version', vm_, __opts__, search_global=False, default=None ) guest_id = config.get_cloud_config_value( 'image', vm_, __opts__, search_global=False, default=None ) customization = config.get_cloud_config_value( 'customization', vm_, __opts__, search_global=False, default=True ) customization_spec = config.get_cloud_config_value( 'customization_spec', vm_, __opts__, search_global=False, default=None ) win_password = config.get_cloud_config_value( 'win_password', vm_, __opts__, search_global=False, default=None ) win_organization_name = config.get_cloud_config_value( 'win_organization_name', vm_, __opts__, search_global=False, default='Organization' ) plain_text = config.get_cloud_config_value( 'plain_text', vm_, __opts__, search_global=False, default=False ) win_user_fullname = config.get_cloud_config_value( 'win_user_fullname', vm_, __opts__, search_global=False, default='Windows User' ) win_run_once = config.get_cloud_config_value( 'win_run_once', vm_, __opts__, search_global=False, default=None ) win_ad_domain = config.get_cloud_config_value( 'win_ad_domain', vm_, __opts__, search_global=False, default='' ) win_ad_user = config.get_cloud_config_value( 'win_ad_user', vm_, __opts__, search_global=False, default='' ) win_ad_password = config.get_cloud_config_value( 'win_ad_password', vm_, __opts__, search_global=False, default='' ) win_autologon = config.get_cloud_config_value( 'win_autologon', vm_, __opts__, search_global=False, default=True ) timezone = config.get_cloud_config_value( 'timezone', vm_, __opts__, search_global=False, default='' ) hw_clock_utc = config.get_cloud_config_value( 'hw_clock_utc', vm_, __opts__, search_global=False, default='' ) clonefrom_datacenter = config.get_cloud_config_value( 'clonefrom_datacenter', vm_, __opts__, search_global=False, default=datacenter ) # Get service instance object si = _get_si() container_ref = None clonefrom_datacenter_ref = None # If datacenter is specified, set the container reference to start search from it instead if datacenter: datacenter_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datacenter, datacenter) container_ref = datacenter_ref if datacenter_ref else None if 'clonefrom' in vm_: # If datacenter is specified, set the container reference to start search from it instead if datacenter: datacenter_ref = salt.utils.vmware.get_mor_by_property( si, vim.Datacenter, datacenter ) container_ref = datacenter_ref if datacenter_ref else None clonefrom_container_ref = datacenter_ref if datacenter_ref else None # allow specifying a different datacenter that the template lives in if clonefrom_datacenter: clonefrom_datacenter_ref = salt.utils.vmware.get_mor_by_property( si, vim.Datacenter, clonefrom_datacenter ) clonefrom_container_ref = clonefrom_datacenter_ref if clonefrom_datacenter_ref else None # Clone VM/template from specified VM/template object_ref = salt.utils.vmware.get_mor_by_property( si, vim.VirtualMachine, vm_['clonefrom'], container_ref=clonefrom_container_ref ) if object_ref: clone_type = "template" if object_ref.config.template else "vm" else: raise SaltCloudSystemExit( 'The VM/template that you have specified under clonefrom does not exist.' ) else: clone_type = None object_ref = None # Either a cluster, or a resource pool must be specified when cloning from template or creating. if resourcepool: resourcepool_ref = salt.utils.vmware.get_mor_by_property( si, vim.ResourcePool, resourcepool, container_ref=container_ref ) if not resourcepool_ref: log.error("Specified resource pool: '%s' does not exist", resourcepool) if not clone_type or clone_type == "template": raise SaltCloudSystemExit('You must specify a resource pool that exists.') elif cluster: cluster_ref = salt.utils.vmware.get_mor_by_property( si, vim.ClusterComputeResource, cluster, container_ref=container_ref ) if not cluster_ref: log.error("Specified cluster: '%s' does not exist", cluster) if not clone_type or clone_type == "template": raise SaltCloudSystemExit('You must specify a cluster that exists.') else: resourcepool_ref = cluster_ref.resourcePool elif clone_type == "template": raise SaltCloudSystemExit( 'You must either specify a cluster or a resource pool when cloning from a template.' ) elif not clone_type: raise SaltCloudSystemExit( 'You must either specify a cluster or a resource pool when creating.' ) else: log.debug("Using resource pool used by the %s %s", clone_type, vm_['clonefrom']) # Either a datacenter or a folder can be optionally specified when cloning, required when creating. # If not specified when cloning, the existing VM/template\'s parent folder is used. if folder: folder_parts = folder.split('/') search_reference = container_ref for folder_part in folder_parts: if folder_part: folder_ref = salt.utils.vmware.get_mor_by_property( si, vim.Folder, folder_part, container_ref=search_reference ) search_reference = folder_ref if not folder_ref: log.error("Specified folder: '%s' does not exist", folder) log.debug("Using folder in which %s %s is present", clone_type, vm_['clonefrom']) folder_ref = object_ref.parent elif datacenter: if not datacenter_ref: log.error("Specified datacenter: '%s' does not exist", datacenter) log.debug("Using datacenter folder in which %s %s is present", clone_type, vm_['clonefrom']) folder_ref = object_ref.parent else: folder_ref = datacenter_ref.vmFolder elif not clone_type: raise SaltCloudSystemExit( 'You must either specify a folder or a datacenter when creating not cloning.' ) else: log.debug("Using folder in which %s %s is present", clone_type, vm_['clonefrom']) folder_ref = object_ref.parent if 'clonefrom' in vm_: # Create the relocation specs reloc_spec = vim.vm.RelocateSpec() if (resourcepool and resourcepool_ref) or (cluster and cluster_ref): reloc_spec.pool = resourcepool_ref # Either a datastore/datastore cluster can be optionally specified. # If not specified, the current datastore is used. if datastore: datastore_ref = salt.utils.vmware.get_mor_by_property( si, vim.Datastore, datastore, container_ref=container_ref ) if datastore_ref: # specific datastore has been specified reloc_spec.datastore = datastore_ref else: datastore_cluster_ref = salt.utils.vmware.get_mor_by_property(si, vim.StoragePod, datastore, container_ref=container_ref) if not datastore_cluster_ref: log.error("Specified datastore/datastore cluster: '%s' does not exist", datastore) log.debug("Using datastore used by the %s %s", clone_type, vm_['clonefrom']) else: log.debug("No datastore/datastore cluster specified") log.debug("Using datastore used by the %s %s", clone_type, vm_['clonefrom']) if host: host_ref = salt.utils.vmware.get_mor_by_property( si, vim.HostSystem, host, container_ref=container_ref ) if host_ref: reloc_spec.host = host_ref else: log.error("Specified host: '%s' does not exist", host) else: if not datastore: raise SaltCloudSystemExit( 'You must specify a datastore when creating not cloning.' ) else: datastore_ref = salt.utils.vmware.get_mor_by_property( si, vim.Datastore, datastore ) if not datastore_ref: raise SaltCloudSystemExit("Specified datastore: '{0}' does not exist".format(datastore)) if host: host_ref = salt.utils.vmware.get_mor_by_property( _get_si(), vim.HostSystem, host, container_ref=container_ref ) if not host_ref: log.error("Specified host: '%s' does not exist", host) # Create the config specs config_spec = vim.vm.ConfigSpec() # If the hardware version is specified and if it is different from the current # hardware version, then schedule a hardware version upgrade if hardware_version and object_ref is not None: hardware_version = 'vmx-{0:02}'.format(hardware_version) if hardware_version != object_ref.config.version: log.debug( "Scheduling hardware version upgrade from %s to %s", object_ref.config.version, hardware_version ) scheduled_hardware_upgrade = vim.vm.ScheduledHardwareUpgradeInfo() scheduled_hardware_upgrade.upgradePolicy = 'always' scheduled_hardware_upgrade.versionKey = hardware_version config_spec.scheduledHardwareUpgradeInfo = scheduled_hardware_upgrade else: log.debug("Virtual hardware version already set to %s", hardware_version) if num_cpus: log.debug("Setting cpu to: %s", num_cpus) config_spec.numCPUs = int(num_cpus) if cores_per_socket: log.debug("Setting cores per socket to: %s", cores_per_socket) config_spec.numCoresPerSocket = int(cores_per_socket) if memory: try: memory_num, memory_unit = re.findall(r"[^\W\d_]+|\d+.\d+|\d+", memory) if memory_unit.lower() == "mb": memory_mb = int(memory_num) elif memory_unit.lower() == "gb": memory_mb = int(float(memory_num)*1024.0) else: err_msg = "Invalid memory type specified: '{0}'".format(memory_unit) log.error(err_msg) return {'Error': err_msg} except (TypeError, ValueError): memory_mb = int(memory) log.debug("Setting memory to: %s MB", memory_mb) config_spec.memoryMB = memory_mb if devices: specs = _manage_devices(devices, vm=object_ref, container_ref=container_ref, new_vm_name=vm_name) config_spec.deviceChange = specs['device_specs'] if extra_config: for key, value in six.iteritems(extra_config): option = vim.option.OptionValue(key=key, value=value) config_spec.extraConfig.append(option) if annotation: config_spec.annotation = six.text_type(annotation) if 'clonefrom' in vm_: clone_spec = handle_snapshot( config_spec, object_ref, reloc_spec, template, vm_ ) if not clone_spec: clone_spec = build_clonespec(config_spec, object_ref, reloc_spec, template) if customization and customization_spec: customization_spec = salt.utils.vmware.get_customizationspec_ref(si=si, customization_spec_name=customization_spec) clone_spec.customization = customization_spec.spec elif customization and (devices and 'network' in list(devices.keys())): global_ip = vim.vm.customization.GlobalIPSettings() if 'dns_servers' in list(vm_.keys()): global_ip.dnsServerList = vm_['dns_servers'] non_hostname_chars = re.compile(r'[^\w-]') if re.search(non_hostname_chars, vm_name): host_name = re.split(non_hostname_chars, vm_name, maxsplit=1)[0] domain_name = re.split(non_hostname_chars, vm_name, maxsplit=1)[-1] else: host_name = vm_name domain_name = domain if 'Windows' not in object_ref.config.guestFullName: identity = vim.vm.customization.LinuxPrep() identity.hostName = vim.vm.customization.FixedName(name=host_name) identity.domain = domain_name if timezone: identity.timeZone = timezone if isinstance(hw_clock_utc, bool): identity.hwClockUTC = hw_clock_utc else: identity = vim.vm.customization.Sysprep() identity.guiUnattended = vim.vm.customization.GuiUnattended() identity.guiUnattended.autoLogon = win_autologon if win_autologon: identity.guiUnattended.autoLogonCount = 1 else: identity.guiUnattended.autoLogonCount = 0 identity.guiUnattended.password = vim.vm.customization.Password() identity.guiUnattended.password.value = win_password identity.guiUnattended.password.plainText = plain_text if timezone: identity.guiUnattended.timeZone = timezone if win_run_once: identity.guiRunOnce = vim.vm.customization.GuiRunOnce() identity.guiRunOnce.commandList = win_run_once identity.userData = vim.vm.customization.UserData() identity.userData.fullName = win_user_fullname identity.userData.orgName = win_organization_name identity.userData.computerName = vim.vm.customization.FixedName() identity.userData.computerName.name = host_name identity.identification = vim.vm.customization.Identification() if win_ad_domain and win_ad_user and win_ad_password: identity.identification.joinDomain = win_ad_domain identity.identification.domainAdmin = win_ad_user identity.identification.domainAdminPassword = vim.vm.customization.Password() identity.identification.domainAdminPassword.value = win_ad_password identity.identification.domainAdminPassword.plainText = plain_text custom_spec = vim.vm.customization.Specification( globalIPSettings=global_ip, identity=identity, nicSettingMap=specs['nics_map'] ) clone_spec.customization = custom_spec if not template: clone_spec.powerOn = power log.debug('clone_spec set to:\n%s', pprint.pformat(clone_spec)) else: config_spec.name = vm_name config_spec.files = vim.vm.FileInfo() config_spec.files.vmPathName = '[{0}] {1}/{1}.vmx'.format(datastore, vm_name) config_spec.guestId = guest_id log.debug('config_spec set to:\n%s', pprint.pformat(config_spec)) event_kwargs = vm_.copy() if event_kwargs.get('password'): del event_kwargs['password'] try: __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args=__utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if 'clonefrom' in vm_: log.info("Creating %s from %s(%s)", vm_['name'], clone_type, vm_['clonefrom']) if datastore and not datastore_ref and datastore_cluster_ref: # datastore cluster has been specified so apply Storage DRS recommendations pod_spec = vim.storageDrs.PodSelectionSpec(storagePod=datastore_cluster_ref) storage_spec = vim.storageDrs.StoragePlacementSpec( type='clone', vm=object_ref, podSelectionSpec=pod_spec, cloneSpec=clone_spec, cloneName=vm_name, folder=folder_ref ) # get recommended datastores recommended_datastores = si.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec) # apply storage DRS recommendations task = si.content.storageResourceManager.ApplyStorageDrsRecommendation_Task(recommended_datastores.recommendations[0].key) salt.utils.vmware.wait_for_task(task, vm_name, 'apply storage DRS recommendations', 5, 'info') else: # clone the VM/template task = object_ref.Clone(folder_ref, vm_name, clone_spec) salt.utils.vmware.wait_for_task(task, vm_name, 'clone', 5, 'info') else: log.info('Creating %s', vm_['name']) if host: task = folder_ref.CreateVM_Task(config_spec, resourcepool_ref, host_ref) else: task = folder_ref.CreateVM_Task(config_spec, resourcepool_ref) salt.utils.vmware.wait_for_task(task, vm_name, "create", 15, 'info') except Exception as exc: err_msg = 'Error creating {0}: {1}'.format(vm_['name'], exc) log.error( err_msg, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return {'Error': err_msg} new_vm_ref = salt.utils.vmware.get_mor_by_property(si, vim.VirtualMachine, vm_name, container_ref=container_ref) # Find how to power on in CreateVM_Task (if possible), for now this will do try: if not clone_type and power: task = new_vm_ref.PowerOn() salt.utils.vmware.wait_for_task(task, vm_name, 'power', 5, 'info') except Exception as exc: log.info('Powering on the VM threw this exception. Ignoring.') log.info(exc) # If it a template or if it does not need to be powered on then do not wait for the IP out = None if not template and power: ip = _wait_for_ip(new_vm_ref, wait_for_ip_timeout) if ip: log.info("[ %s ] IPv4 is: %s", vm_name, ip) # ssh or smb using ip and install salt only if deploy is True if deploy: vm_['key_filename'] = key_filename # if specified, prefer ssh_host to the discovered ip address if 'ssh_host' not in vm_: vm_['ssh_host'] = ip log.info("[ %s ] Deploying to %s", vm_name, vm_['ssh_host']) out = __utils__['cloud.bootstrap'](vm_, __opts__) data = show_instance(vm_name, call='action') if deploy and isinstance(out, dict): data['deploy_kwargs'] = out.get('deploy_kwargs', {}) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return data
def _validate_config(): ''' Validate azurefs config, return False if it doesn't validate ''' if not isinstance(__opts__['azurefs'], list): log.error('azurefs configuration is not formed as a list, skipping azurefs') return False for container in __opts__['azurefs']: if not isinstance(container, dict): log.error( 'One or more entries in the azurefs configuration list are ' 'not formed as a dict. Skipping azurefs: %s', container ) return False if 'account_name' not in container or 'container_name' not in container: log.error( 'An azurefs container configuration is missing either an ' 'account_name or a container_name: %s', container ) return False return True
Validate azurefs config, return False if it doesn't validate
Below is the the instruction that describes the task: ### Input: Validate azurefs config, return False if it doesn't validate ### Response: def _validate_config(): ''' Validate azurefs config, return False if it doesn't validate ''' if not isinstance(__opts__['azurefs'], list): log.error('azurefs configuration is not formed as a list, skipping azurefs') return False for container in __opts__['azurefs']: if not isinstance(container, dict): log.error( 'One or more entries in the azurefs configuration list are ' 'not formed as a dict. Skipping azurefs: %s', container ) return False if 'account_name' not in container or 'container_name' not in container: log.error( 'An azurefs container configuration is missing either an ' 'account_name or a container_name: %s', container ) return False return True
def user_agent_info(sdk_version, custom_user_agent): # type: (str, str) -> str """Return the user agent info along with the SDK and Python Version information. :param sdk_version: Version of the SDK being used. :type sdk_version: str :param custom_user_agent: Custom User Agent string provided by the developer. :type custom_user_agent: str :return: User Agent Info string :rtype: str """ python_version = ".".join(str(x) for x in sys.version_info[0:3]) user_agent = "ask-python/{} Python/{}".format( sdk_version, python_version) if custom_user_agent is None: return user_agent else: return user_agent + " {}".format(custom_user_agent)
Return the user agent info along with the SDK and Python Version information. :param sdk_version: Version of the SDK being used. :type sdk_version: str :param custom_user_agent: Custom User Agent string provided by the developer. :type custom_user_agent: str :return: User Agent Info string :rtype: str
Below is the the instruction that describes the task: ### Input: Return the user agent info along with the SDK and Python Version information. :param sdk_version: Version of the SDK being used. :type sdk_version: str :param custom_user_agent: Custom User Agent string provided by the developer. :type custom_user_agent: str :return: User Agent Info string :rtype: str ### Response: def user_agent_info(sdk_version, custom_user_agent): # type: (str, str) -> str """Return the user agent info along with the SDK and Python Version information. :param sdk_version: Version of the SDK being used. :type sdk_version: str :param custom_user_agent: Custom User Agent string provided by the developer. :type custom_user_agent: str :return: User Agent Info string :rtype: str """ python_version = ".".join(str(x) for x in sys.version_info[0:3]) user_agent = "ask-python/{} Python/{}".format( sdk_version, python_version) if custom_user_agent is None: return user_agent else: return user_agent + " {}".format(custom_user_agent)
def t_LITERAL(self, t): r'(\"([^\\\n]|(\\.))*?\")|\'([^\\\n]|(\\.))*?\'' s = t.value[1:-1] maps = { 't': '\t', 'r': '\r', 'n': '\n', '\\': '\\', '\'': '\'', '"': '\"' } i = 0 length = len(s) val = '' while i < length: if s[i] == '\\': i += 1 if s[i] in maps: val += maps[s[i]] else: msg = 'Cannot escape character: %s' % s[i] raise ThriftParserError(msg) else: val += s[i] i += 1 t.value = val return t
r'(\"([^\\\n]|(\\.))*?\")|\'([^\\\n]|(\\.))*?\
Below is the the instruction that describes the task: ### Input: r'(\"([^\\\n]|(\\.))*?\")|\'([^\\\n]|(\\.))*?\ ### Response: def t_LITERAL(self, t): r'(\"([^\\\n]|(\\.))*?\")|\'([^\\\n]|(\\.))*?\'' s = t.value[1:-1] maps = { 't': '\t', 'r': '\r', 'n': '\n', '\\': '\\', '\'': '\'', '"': '\"' } i = 0 length = len(s) val = '' while i < length: if s[i] == '\\': i += 1 if s[i] in maps: val += maps[s[i]] else: msg = 'Cannot escape character: %s' % s[i] raise ThriftParserError(msg) else: val += s[i] i += 1 t.value = val return t
def medial_axis(self, resolution=None, clip=None): """ Find the approximate medial axis based on a voronoi diagram of evenly spaced points on the boundary of the polygon. Parameters ---------- resolution : None or float Distance between each sample on the polygon boundary clip : None, or (2,) float Min, max number of samples Returns ---------- medial : Path2D object Contains only medial axis of Path """ if resolution is None: resolution = self.scale / 1000.0 # convert the edges to Path2D kwargs from .exchange.misc import edges_to_path # edges and vertices edge_vert = [polygons.medial_axis(i, resolution, clip) for i in self.polygons_full] # create a Path2D object for each region medials = [Path2D(**edges_to_path( edges=e, vertices=v)) for e, v in edge_vert] # get a single Path2D of medial axis medial = concatenate(medials) return medial
Find the approximate medial axis based on a voronoi diagram of evenly spaced points on the boundary of the polygon. Parameters ---------- resolution : None or float Distance between each sample on the polygon boundary clip : None, or (2,) float Min, max number of samples Returns ---------- medial : Path2D object Contains only medial axis of Path
Below is the the instruction that describes the task: ### Input: Find the approximate medial axis based on a voronoi diagram of evenly spaced points on the boundary of the polygon. Parameters ---------- resolution : None or float Distance between each sample on the polygon boundary clip : None, or (2,) float Min, max number of samples Returns ---------- medial : Path2D object Contains only medial axis of Path ### Response: def medial_axis(self, resolution=None, clip=None): """ Find the approximate medial axis based on a voronoi diagram of evenly spaced points on the boundary of the polygon. Parameters ---------- resolution : None or float Distance between each sample on the polygon boundary clip : None, or (2,) float Min, max number of samples Returns ---------- medial : Path2D object Contains only medial axis of Path """ if resolution is None: resolution = self.scale / 1000.0 # convert the edges to Path2D kwargs from .exchange.misc import edges_to_path # edges and vertices edge_vert = [polygons.medial_axis(i, resolution, clip) for i in self.polygons_full] # create a Path2D object for each region medials = [Path2D(**edges_to_path( edges=e, vertices=v)) for e, v in edge_vert] # get a single Path2D of medial axis medial = concatenate(medials) return medial
def mixedToUnder(s): # pragma: no cover """ Sample: >>> mixedToUnder("FooBarBaz") 'foo_bar_baz' Special case for ID: >>> mixedToUnder("FooBarID") 'foo_bar_id' """ if s.endswith('ID'): return mixedToUnder(s[:-2] + "_id") trans = _mixedToUnderRE.sub(mixedToUnderSub, s) if trans.startswith('_'): trans = trans[1:] return trans
Sample: >>> mixedToUnder("FooBarBaz") 'foo_bar_baz' Special case for ID: >>> mixedToUnder("FooBarID") 'foo_bar_id'
Below is the the instruction that describes the task: ### Input: Sample: >>> mixedToUnder("FooBarBaz") 'foo_bar_baz' Special case for ID: >>> mixedToUnder("FooBarID") 'foo_bar_id' ### Response: def mixedToUnder(s): # pragma: no cover """ Sample: >>> mixedToUnder("FooBarBaz") 'foo_bar_baz' Special case for ID: >>> mixedToUnder("FooBarID") 'foo_bar_id' """ if s.endswith('ID'): return mixedToUnder(s[:-2] + "_id") trans = _mixedToUnderRE.sub(mixedToUnderSub, s) if trans.startswith('_'): trans = trans[1:] return trans
def get_val(dataset, timestamp=None): """ Turn the string value of a dataset into an appropriate value, be it a decimal value, array or time series. If a timestamp is passed to this function, return the values appropriate to the requested times. If the timestamp is *before* the start of the timeseries data, return None If the timestamp is *after* the end of the timeseries data, return the last value. The raw flag indicates whether timeseries should be returned raw -- exactly as they are in the DB (a timeseries being a list of timeseries data objects, for example) or as a single python dictionary """ if dataset.type == 'array': #TODO: design a mechansim to retrieve this data if it's stored externally return json.loads(dataset.value) elif dataset.type == 'descriptor': return str(dataset.value) elif dataset.type == 'scalar': return Decimal(str(dataset.value)) elif dataset.type == 'timeseries': #TODO: design a mechansim to retrieve this data if it's stored externally val = dataset.value seasonal_year = config.get('DEFAULT','seasonal_year', '1678') seasonal_key = config.get('DEFAULT', 'seasonal_key', '9999') val = dataset.value.replace(seasonal_key, seasonal_year) timeseries = pd.read_json(val, convert_axes=True) if timestamp is None: return timeseries else: try: idx = timeseries.index #Seasonal timeseries are stored in the year #1678 (the lowest year pandas allows for valid times). #Therefore if the timeseries is seasonal, #the request must be a seasonal request, not a #standard request if type(idx) == pd.DatetimeIndex: if set(idx.year) == set([int(seasonal_year)]): if isinstance(timestamp, list): seasonal_timestamp = [] for t in timestamp: t_1900 = t.replace(year=int(seasonal_year)) seasonal_timestamp.append(t_1900) timestamp = seasonal_timestamp else: timestamp = [timestamp.replace(year=int(seasonal_year))] pandas_ts = timeseries.reindex(timestamp, method='ffill') #If there are no values at all, just return None if len(pandas_ts.dropna()) == 0: return None #Replace all numpy NAN values with None pandas_ts = pandas_ts.where(pandas_ts.notnull(), None) val_is_array = False if len(pandas_ts.columns) > 1: val_is_array = True if val_is_array: if type(timestamp) is list and len(timestamp) == 1: ret_val = pandas_ts.loc[timestamp[0]].values.tolist() else: ret_val = pandas_ts.loc[timestamp].values.tolist() else: col_name = pandas_ts.loc[timestamp].columns[0] if type(timestamp) is list and len(timestamp) == 1: ret_val = pandas_ts.loc[timestamp[0]].loc[col_name] else: ret_val = pandas_ts.loc[timestamp][col_name].values.tolist() return ret_val except Exception as e: log.critical("Unable to retrive data. Check timestamps.") log.critical(e)
Turn the string value of a dataset into an appropriate value, be it a decimal value, array or time series. If a timestamp is passed to this function, return the values appropriate to the requested times. If the timestamp is *before* the start of the timeseries data, return None If the timestamp is *after* the end of the timeseries data, return the last value. The raw flag indicates whether timeseries should be returned raw -- exactly as they are in the DB (a timeseries being a list of timeseries data objects, for example) or as a single python dictionary
Below is the the instruction that describes the task: ### Input: Turn the string value of a dataset into an appropriate value, be it a decimal value, array or time series. If a timestamp is passed to this function, return the values appropriate to the requested times. If the timestamp is *before* the start of the timeseries data, return None If the timestamp is *after* the end of the timeseries data, return the last value. The raw flag indicates whether timeseries should be returned raw -- exactly as they are in the DB (a timeseries being a list of timeseries data objects, for example) or as a single python dictionary ### Response: def get_val(dataset, timestamp=None): """ Turn the string value of a dataset into an appropriate value, be it a decimal value, array or time series. If a timestamp is passed to this function, return the values appropriate to the requested times. If the timestamp is *before* the start of the timeseries data, return None If the timestamp is *after* the end of the timeseries data, return the last value. The raw flag indicates whether timeseries should be returned raw -- exactly as they are in the DB (a timeseries being a list of timeseries data objects, for example) or as a single python dictionary """ if dataset.type == 'array': #TODO: design a mechansim to retrieve this data if it's stored externally return json.loads(dataset.value) elif dataset.type == 'descriptor': return str(dataset.value) elif dataset.type == 'scalar': return Decimal(str(dataset.value)) elif dataset.type == 'timeseries': #TODO: design a mechansim to retrieve this data if it's stored externally val = dataset.value seasonal_year = config.get('DEFAULT','seasonal_year', '1678') seasonal_key = config.get('DEFAULT', 'seasonal_key', '9999') val = dataset.value.replace(seasonal_key, seasonal_year) timeseries = pd.read_json(val, convert_axes=True) if timestamp is None: return timeseries else: try: idx = timeseries.index #Seasonal timeseries are stored in the year #1678 (the lowest year pandas allows for valid times). #Therefore if the timeseries is seasonal, #the request must be a seasonal request, not a #standard request if type(idx) == pd.DatetimeIndex: if set(idx.year) == set([int(seasonal_year)]): if isinstance(timestamp, list): seasonal_timestamp = [] for t in timestamp: t_1900 = t.replace(year=int(seasonal_year)) seasonal_timestamp.append(t_1900) timestamp = seasonal_timestamp else: timestamp = [timestamp.replace(year=int(seasonal_year))] pandas_ts = timeseries.reindex(timestamp, method='ffill') #If there are no values at all, just return None if len(pandas_ts.dropna()) == 0: return None #Replace all numpy NAN values with None pandas_ts = pandas_ts.where(pandas_ts.notnull(), None) val_is_array = False if len(pandas_ts.columns) > 1: val_is_array = True if val_is_array: if type(timestamp) is list and len(timestamp) == 1: ret_val = pandas_ts.loc[timestamp[0]].values.tolist() else: ret_val = pandas_ts.loc[timestamp].values.tolist() else: col_name = pandas_ts.loc[timestamp].columns[0] if type(timestamp) is list and len(timestamp) == 1: ret_val = pandas_ts.loc[timestamp[0]].loc[col_name] else: ret_val = pandas_ts.loc[timestamp][col_name].values.tolist() return ret_val except Exception as e: log.critical("Unable to retrive data. Check timestamps.") log.critical(e)
def event(self, interface_id, address, value_key, value): """If a device emits some sort event, we will handle it here.""" LOG.debug("RPCFunctions.event: interface_id = %s, address = %s, value_key = %s, value = %s" % ( interface_id, address, value_key.upper(), str(value))) self.devices_all[interface_id.split( '-')[-1]][address].event(interface_id, value_key.upper(), value) if self.eventcallback: self.eventcallback(interface_id=interface_id, address=address, value_key=value_key.upper(), value=value) return True
If a device emits some sort event, we will handle it here.
Below is the the instruction that describes the task: ### Input: If a device emits some sort event, we will handle it here. ### Response: def event(self, interface_id, address, value_key, value): """If a device emits some sort event, we will handle it here.""" LOG.debug("RPCFunctions.event: interface_id = %s, address = %s, value_key = %s, value = %s" % ( interface_id, address, value_key.upper(), str(value))) self.devices_all[interface_id.split( '-')[-1]][address].event(interface_id, value_key.upper(), value) if self.eventcallback: self.eventcallback(interface_id=interface_id, address=address, value_key=value_key.upper(), value=value) return True
def txn_useNonce(self, server_url, timestamp, salt): """Return whether this nonce is present, and if it is, then remove it from the set. str -> bool""" if abs(timestamp - time.time()) > nonce.SKEW: return False try: self.db_add_nonce(server_url, timestamp, salt) except self.exceptions.IntegrityError: # The key uniqueness check failed return False else: # The nonce was successfully added return True
Return whether this nonce is present, and if it is, then remove it from the set. str -> bool
Below is the the instruction that describes the task: ### Input: Return whether this nonce is present, and if it is, then remove it from the set. str -> bool ### Response: def txn_useNonce(self, server_url, timestamp, salt): """Return whether this nonce is present, and if it is, then remove it from the set. str -> bool""" if abs(timestamp - time.time()) > nonce.SKEW: return False try: self.db_add_nonce(server_url, timestamp, salt) except self.exceptions.IntegrityError: # The key uniqueness check failed return False else: # The nonce was successfully added return True
def arg_string_from_dict(arg_dict, **kwds): """ This function takes a series of ditionaries and creates an argument string for a graphql query """ # the filters dictionary filters = { **arg_dict, **kwds, } # return the correctly formed string return ", ".join("{}: {}".format(key, json.dumps(value)) for key,value in filters.items())
This function takes a series of ditionaries and creates an argument string for a graphql query
Below is the the instruction that describes the task: ### Input: This function takes a series of ditionaries and creates an argument string for a graphql query ### Response: def arg_string_from_dict(arg_dict, **kwds): """ This function takes a series of ditionaries and creates an argument string for a graphql query """ # the filters dictionary filters = { **arg_dict, **kwds, } # return the correctly formed string return ", ".join("{}: {}".format(key, json.dumps(value)) for key,value in filters.items())
def cmd(self, command, args, prefix=None): """ Sends a command to the server. :param command: IRC code to send. :type command: unicode :param args: arguments to pass with the command. :type args: basestring :param prefix: optional prefix to prepend to the command. :type prefix: str or None """ if prefix is None: prefix = u'' raw_cmd = u'{0} {1} {2}'.format(prefix, command, args).strip() self._send(raw_cmd)
Sends a command to the server. :param command: IRC code to send. :type command: unicode :param args: arguments to pass with the command. :type args: basestring :param prefix: optional prefix to prepend to the command. :type prefix: str or None
Below is the the instruction that describes the task: ### Input: Sends a command to the server. :param command: IRC code to send. :type command: unicode :param args: arguments to pass with the command. :type args: basestring :param prefix: optional prefix to prepend to the command. :type prefix: str or None ### Response: def cmd(self, command, args, prefix=None): """ Sends a command to the server. :param command: IRC code to send. :type command: unicode :param args: arguments to pass with the command. :type args: basestring :param prefix: optional prefix to prepend to the command. :type prefix: str or None """ if prefix is None: prefix = u'' raw_cmd = u'{0} {1} {2}'.format(prefix, command, args).strip() self._send(raw_cmd)
def array_metadata_to_n5(array_metadata): '''Convert array metadata from zarr to N5 format.''' for f, t in zarr_to_n5_keys: array_metadata[t] = array_metadata[f] del array_metadata[f] del array_metadata['zarr_format'] try: dtype = np.dtype(array_metadata['dataType']) except TypeError: # pragma: no cover raise TypeError( "data type %s not supported by N5" % array_metadata['dataType']) array_metadata['dataType'] = dtype.name array_metadata['dimensions'] = array_metadata['dimensions'][::-1] array_metadata['blockSize'] = array_metadata['blockSize'][::-1] if 'fill_value' in array_metadata: if array_metadata['fill_value'] != 0 and array_metadata['fill_value'] is not None: raise ValueError("N5 only supports fill_value == 0 (for now)") del array_metadata['fill_value'] if 'order' in array_metadata: if array_metadata['order'] != 'C': raise ValueError("zarr N5 storage only stores arrays in C order (for now)") del array_metadata['order'] if 'filters' in array_metadata: if array_metadata['filters'] != [] and array_metadata['filters'] is not None: raise ValueError("N5 storage does not support zarr filters") del array_metadata['filters'] assert 'compression' in array_metadata compressor_config = array_metadata['compression'] compressor_config = compressor_config_to_n5(compressor_config) array_metadata['compression'] = compressor_config return array_metadata
Convert array metadata from zarr to N5 format.
Below is the the instruction that describes the task: ### Input: Convert array metadata from zarr to N5 format. ### Response: def array_metadata_to_n5(array_metadata): '''Convert array metadata from zarr to N5 format.''' for f, t in zarr_to_n5_keys: array_metadata[t] = array_metadata[f] del array_metadata[f] del array_metadata['zarr_format'] try: dtype = np.dtype(array_metadata['dataType']) except TypeError: # pragma: no cover raise TypeError( "data type %s not supported by N5" % array_metadata['dataType']) array_metadata['dataType'] = dtype.name array_metadata['dimensions'] = array_metadata['dimensions'][::-1] array_metadata['blockSize'] = array_metadata['blockSize'][::-1] if 'fill_value' in array_metadata: if array_metadata['fill_value'] != 0 and array_metadata['fill_value'] is not None: raise ValueError("N5 only supports fill_value == 0 (for now)") del array_metadata['fill_value'] if 'order' in array_metadata: if array_metadata['order'] != 'C': raise ValueError("zarr N5 storage only stores arrays in C order (for now)") del array_metadata['order'] if 'filters' in array_metadata: if array_metadata['filters'] != [] and array_metadata['filters'] is not None: raise ValueError("N5 storage does not support zarr filters") del array_metadata['filters'] assert 'compression' in array_metadata compressor_config = array_metadata['compression'] compressor_config = compressor_config_to_n5(compressor_config) array_metadata['compression'] = compressor_config return array_metadata
def sbd_to_steem(self, sbd=0, price=0, account=None): ''' Uses the ticker to get the lowest ask and moves the sbd at that price. ''' if not account: account = self.mainaccount if self.check_balances(account): if sbd == 0: sbd = self.sbdbal elif sbd > self.sbdbal: self.msg.error_message("INSUFFICIENT FUNDS. CURRENT SBD BAL: " + str(self.sbdbal)) return False if price == 0: price = 1 / self.dex_ticker()['lowest_ask'] try: self.dex.sell(sbd, "SBD", price, account=account) except Exception as e: self.msg.error_message("COULD NOT SELL SBD FOR STEEM: " + str(e)) return False else: self.msg.message("TRANSFERED " + str(sbd) + " SBD TO STEEM AT THE PRICE OF: $" + str(price)) return True else: return False
Uses the ticker to get the lowest ask and moves the sbd at that price.
Below is the the instruction that describes the task: ### Input: Uses the ticker to get the lowest ask and moves the sbd at that price. ### Response: def sbd_to_steem(self, sbd=0, price=0, account=None): ''' Uses the ticker to get the lowest ask and moves the sbd at that price. ''' if not account: account = self.mainaccount if self.check_balances(account): if sbd == 0: sbd = self.sbdbal elif sbd > self.sbdbal: self.msg.error_message("INSUFFICIENT FUNDS. CURRENT SBD BAL: " + str(self.sbdbal)) return False if price == 0: price = 1 / self.dex_ticker()['lowest_ask'] try: self.dex.sell(sbd, "SBD", price, account=account) except Exception as e: self.msg.error_message("COULD NOT SELL SBD FOR STEEM: " + str(e)) return False else: self.msg.message("TRANSFERED " + str(sbd) + " SBD TO STEEM AT THE PRICE OF: $" + str(price)) return True else: return False
def load_params(self, filename, ctx=None, allow_missing=False, ignore_extra=False): """[Deprecated] Please use load_parameters. Load parameters from file. filename : str Path to parameter file. ctx : Context or list of Context, default cpu() Context(s) to initialize loaded parameters on. allow_missing : bool, default False Whether to silently skip loading parameters not represents in the file. ignore_extra : bool, default False Whether to silently ignore parameters from the file that are not present in this Block. """ warnings.warn("load_params is deprecated. Please use load_parameters.") self.load_parameters(filename, ctx, allow_missing, ignore_extra)
[Deprecated] Please use load_parameters. Load parameters from file. filename : str Path to parameter file. ctx : Context or list of Context, default cpu() Context(s) to initialize loaded parameters on. allow_missing : bool, default False Whether to silently skip loading parameters not represents in the file. ignore_extra : bool, default False Whether to silently ignore parameters from the file that are not present in this Block.
Below is the the instruction that describes the task: ### Input: [Deprecated] Please use load_parameters. Load parameters from file. filename : str Path to parameter file. ctx : Context or list of Context, default cpu() Context(s) to initialize loaded parameters on. allow_missing : bool, default False Whether to silently skip loading parameters not represents in the file. ignore_extra : bool, default False Whether to silently ignore parameters from the file that are not present in this Block. ### Response: def load_params(self, filename, ctx=None, allow_missing=False, ignore_extra=False): """[Deprecated] Please use load_parameters. Load parameters from file. filename : str Path to parameter file. ctx : Context or list of Context, default cpu() Context(s) to initialize loaded parameters on. allow_missing : bool, default False Whether to silently skip loading parameters not represents in the file. ignore_extra : bool, default False Whether to silently ignore parameters from the file that are not present in this Block. """ warnings.warn("load_params is deprecated. Please use load_parameters.") self.load_parameters(filename, ctx, allow_missing, ignore_extra)
def create_from_response_pdu(resp_pdu, req_pdu): """ Create instance from response PDU. Response PDU is required together with the number of registers read. :param resp_pdu: Byte array with request PDU. :param quantity: Number of coils read. :return: Instance of :class:`ReadCoils`. """ read_input_registers = ReadInputRegisters() read_input_registers.quantity = struct.unpack('>H', req_pdu[-2:])[0] fmt = '>' + (conf.TYPE_CHAR * read_input_registers.quantity) read_input_registers.data = list(struct.unpack(fmt, resp_pdu[2:])) return read_input_registers
Create instance from response PDU. Response PDU is required together with the number of registers read. :param resp_pdu: Byte array with request PDU. :param quantity: Number of coils read. :return: Instance of :class:`ReadCoils`.
Below is the the instruction that describes the task: ### Input: Create instance from response PDU. Response PDU is required together with the number of registers read. :param resp_pdu: Byte array with request PDU. :param quantity: Number of coils read. :return: Instance of :class:`ReadCoils`. ### Response: def create_from_response_pdu(resp_pdu, req_pdu): """ Create instance from response PDU. Response PDU is required together with the number of registers read. :param resp_pdu: Byte array with request PDU. :param quantity: Number of coils read. :return: Instance of :class:`ReadCoils`. """ read_input_registers = ReadInputRegisters() read_input_registers.quantity = struct.unpack('>H', req_pdu[-2:])[0] fmt = '>' + (conf.TYPE_CHAR * read_input_registers.quantity) read_input_registers.data = list(struct.unpack(fmt, resp_pdu[2:])) return read_input_registers
def replace_payment_transaction_by_id(cls, payment_transaction_id, payment_transaction, **kwargs): """Replace PaymentTransaction Replace all attributes of PaymentTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_payment_transaction_by_id(payment_transaction_id, payment_transaction, async=True) >>> result = thread.get() :param async bool :param str payment_transaction_id: ID of paymentTransaction to replace (required) :param PaymentTransaction payment_transaction: Attributes of paymentTransaction to replace (required) :return: PaymentTransaction If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_payment_transaction_by_id_with_http_info(payment_transaction_id, payment_transaction, **kwargs) else: (data) = cls._replace_payment_transaction_by_id_with_http_info(payment_transaction_id, payment_transaction, **kwargs) return data
Replace PaymentTransaction Replace all attributes of PaymentTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_payment_transaction_by_id(payment_transaction_id, payment_transaction, async=True) >>> result = thread.get() :param async bool :param str payment_transaction_id: ID of paymentTransaction to replace (required) :param PaymentTransaction payment_transaction: Attributes of paymentTransaction to replace (required) :return: PaymentTransaction If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: Replace PaymentTransaction Replace all attributes of PaymentTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_payment_transaction_by_id(payment_transaction_id, payment_transaction, async=True) >>> result = thread.get() :param async bool :param str payment_transaction_id: ID of paymentTransaction to replace (required) :param PaymentTransaction payment_transaction: Attributes of paymentTransaction to replace (required) :return: PaymentTransaction If the method is called asynchronously, returns the request thread. ### Response: def replace_payment_transaction_by_id(cls, payment_transaction_id, payment_transaction, **kwargs): """Replace PaymentTransaction Replace all attributes of PaymentTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_payment_transaction_by_id(payment_transaction_id, payment_transaction, async=True) >>> result = thread.get() :param async bool :param str payment_transaction_id: ID of paymentTransaction to replace (required) :param PaymentTransaction payment_transaction: Attributes of paymentTransaction to replace (required) :return: PaymentTransaction If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_payment_transaction_by_id_with_http_info(payment_transaction_id, payment_transaction, **kwargs) else: (data) = cls._replace_payment_transaction_by_id_with_http_info(payment_transaction_id, payment_transaction, **kwargs) return data
def timeline(ctx, pager, limit, twtfile, sorting, timeout, porcelain, source, cache, force_update): """Retrieve your personal timeline.""" if source: source_obj = ctx.obj["conf"].get_source_by_nick(source) if not source_obj: logger.debug("Not following {0}, trying as URL".format(source)) source_obj = Source(source, source) sources = [source_obj] else: sources = ctx.obj["conf"].following tweets = [] if cache: try: with Cache.discover(update_interval=ctx.obj["conf"].timeline_update_interval) as cache: force_update = force_update or not cache.is_valid if force_update: tweets = get_remote_tweets(sources, limit, timeout, cache) else: logger.debug("Multiple calls to 'timeline' within {0} seconds. Skipping update".format( cache.update_interval)) # Behold, almighty list comprehensions! (I might have gone overboard here…) tweets = list(chain.from_iterable([cache.get_tweets(source.url) for source in sources])) except OSError as e: logger.debug(e) tweets = get_remote_tweets(sources, limit, timeout) else: tweets = get_remote_tweets(sources, limit, timeout) if twtfile and not source: source = Source(ctx.obj["conf"].nick, ctx.obj["conf"].twturl, file=twtfile) tweets.extend(get_local_tweets(source, limit)) if not tweets: return tweets = sort_and_truncate_tweets(tweets, sorting, limit) if pager: click.echo_via_pager(style_timeline(tweets, porcelain)) else: click.echo(style_timeline(tweets, porcelain))
Retrieve your personal timeline.
Below is the the instruction that describes the task: ### Input: Retrieve your personal timeline. ### Response: def timeline(ctx, pager, limit, twtfile, sorting, timeout, porcelain, source, cache, force_update): """Retrieve your personal timeline.""" if source: source_obj = ctx.obj["conf"].get_source_by_nick(source) if not source_obj: logger.debug("Not following {0}, trying as URL".format(source)) source_obj = Source(source, source) sources = [source_obj] else: sources = ctx.obj["conf"].following tweets = [] if cache: try: with Cache.discover(update_interval=ctx.obj["conf"].timeline_update_interval) as cache: force_update = force_update or not cache.is_valid if force_update: tweets = get_remote_tweets(sources, limit, timeout, cache) else: logger.debug("Multiple calls to 'timeline' within {0} seconds. Skipping update".format( cache.update_interval)) # Behold, almighty list comprehensions! (I might have gone overboard here…) tweets = list(chain.from_iterable([cache.get_tweets(source.url) for source in sources])) except OSError as e: logger.debug(e) tweets = get_remote_tweets(sources, limit, timeout) else: tweets = get_remote_tweets(sources, limit, timeout) if twtfile and not source: source = Source(ctx.obj["conf"].nick, ctx.obj["conf"].twturl, file=twtfile) tweets.extend(get_local_tweets(source, limit)) if not tweets: return tweets = sort_and_truncate_tweets(tweets, sorting, limit) if pager: click.echo_via_pager(style_timeline(tweets, porcelain)) else: click.echo(style_timeline(tweets, porcelain))
def request(self, method, url, bearer_auth=True, **req_kwargs): ''' A loose wrapper around Requests' :class:`~requests.sessions.Session` which injects OAuth 2.0 parameters. :param method: A string representation of the HTTP method to be used. :type method: str :param url: The resource to be requested. :type url: str :param bearer_auth: Whether to use Bearer Authentication or not, defaults to `True`. :type bearer_auth: bool :param \*\*req_kwargs: Keyworded args to be passed down to Requests. :type \*\*req_kwargs: dict ''' req_kwargs.setdefault('params', {}) url = self._set_url(url) if is_basestring(req_kwargs['params']): req_kwargs['params'] = dict(parse_qsl(req_kwargs['params'])) if bearer_auth and self.access_token is not None: req_kwargs['auth'] = OAuth2Auth(self.access_token) else: req_kwargs['params'].update({self.access_token_key: self.access_token}) req_kwargs.setdefault('timeout', OAUTH2_DEFAULT_TIMEOUT) return super(OAuth2Session, self).request(method, url, **req_kwargs)
A loose wrapper around Requests' :class:`~requests.sessions.Session` which injects OAuth 2.0 parameters. :param method: A string representation of the HTTP method to be used. :type method: str :param url: The resource to be requested. :type url: str :param bearer_auth: Whether to use Bearer Authentication or not, defaults to `True`. :type bearer_auth: bool :param \*\*req_kwargs: Keyworded args to be passed down to Requests. :type \*\*req_kwargs: dict
Below is the the instruction that describes the task: ### Input: A loose wrapper around Requests' :class:`~requests.sessions.Session` which injects OAuth 2.0 parameters. :param method: A string representation of the HTTP method to be used. :type method: str :param url: The resource to be requested. :type url: str :param bearer_auth: Whether to use Bearer Authentication or not, defaults to `True`. :type bearer_auth: bool :param \*\*req_kwargs: Keyworded args to be passed down to Requests. :type \*\*req_kwargs: dict ### Response: def request(self, method, url, bearer_auth=True, **req_kwargs): ''' A loose wrapper around Requests' :class:`~requests.sessions.Session` which injects OAuth 2.0 parameters. :param method: A string representation of the HTTP method to be used. :type method: str :param url: The resource to be requested. :type url: str :param bearer_auth: Whether to use Bearer Authentication or not, defaults to `True`. :type bearer_auth: bool :param \*\*req_kwargs: Keyworded args to be passed down to Requests. :type \*\*req_kwargs: dict ''' req_kwargs.setdefault('params', {}) url = self._set_url(url) if is_basestring(req_kwargs['params']): req_kwargs['params'] = dict(parse_qsl(req_kwargs['params'])) if bearer_auth and self.access_token is not None: req_kwargs['auth'] = OAuth2Auth(self.access_token) else: req_kwargs['params'].update({self.access_token_key: self.access_token}) req_kwargs.setdefault('timeout', OAUTH2_DEFAULT_TIMEOUT) return super(OAuth2Session, self).request(method, url, **req_kwargs)
def get_what_txt(self): """ Overrides the base behaviour defined in ValidationError in order to add details about the function. :return: """ return 'input [{var}] for function [{func}]'.format(var=self.get_variable_str(), func=self.validator.get_validated_func_display_name())
Overrides the base behaviour defined in ValidationError in order to add details about the function. :return:
Below is the the instruction that describes the task: ### Input: Overrides the base behaviour defined in ValidationError in order to add details about the function. :return: ### Response: def get_what_txt(self): """ Overrides the base behaviour defined in ValidationError in order to add details about the function. :return: """ return 'input [{var}] for function [{func}]'.format(var=self.get_variable_str(), func=self.validator.get_validated_func_display_name())
def run(data): """Quantitaive isoforms expression by eXpress""" name = dd.get_sample_name(data) in_bam = dd.get_transcriptome_bam(data) config = data['config'] if not in_bam: logger.info("Transcriptome-mapped BAM file not found, skipping eXpress.") return data out_dir = os.path.join(dd.get_work_dir(data), "express", name) out_file = os.path.join(out_dir, name + ".xprs") express = config_utils.get_program("express", data['config']) strand = _set_stranded_flag(in_bam, data) if not file_exists(out_file): gtf_fasta = gtf.gtf_to_fasta(dd.get_gtf_file(data), dd.get_ref_file(data)) with tx_tmpdir(data) as tmp_dir: with file_transaction(data, out_dir) as tx_out_dir: bam_file = _prepare_bam_file(in_bam, tmp_dir, config) cmd = ("{express} --no-update-check -o {tx_out_dir} {strand} {gtf_fasta} {bam_file}") do.run(cmd.format(**locals()), "Run express on %s." % in_bam, {}) shutil.move(os.path.join(out_dir, "results.xprs"), out_file) eff_count_file = _get_column(out_file, out_file.replace(".xprs", "_eff.counts"), 7, data=data) tpm_file = _get_column(out_file, out_file.replace("xprs", "tpm"), 14, data=data) fpkm_file = _get_column(out_file, out_file.replace("xprs", "fpkm"), 10, data=data) data = dd.set_express_counts(data, eff_count_file) data = dd.set_express_tpm(data, tpm_file) data = dd.set_express_fpkm(data, fpkm_file) return data
Quantitaive isoforms expression by eXpress
Below is the the instruction that describes the task: ### Input: Quantitaive isoforms expression by eXpress ### Response: def run(data): """Quantitaive isoforms expression by eXpress""" name = dd.get_sample_name(data) in_bam = dd.get_transcriptome_bam(data) config = data['config'] if not in_bam: logger.info("Transcriptome-mapped BAM file not found, skipping eXpress.") return data out_dir = os.path.join(dd.get_work_dir(data), "express", name) out_file = os.path.join(out_dir, name + ".xprs") express = config_utils.get_program("express", data['config']) strand = _set_stranded_flag(in_bam, data) if not file_exists(out_file): gtf_fasta = gtf.gtf_to_fasta(dd.get_gtf_file(data), dd.get_ref_file(data)) with tx_tmpdir(data) as tmp_dir: with file_transaction(data, out_dir) as tx_out_dir: bam_file = _prepare_bam_file(in_bam, tmp_dir, config) cmd = ("{express} --no-update-check -o {tx_out_dir} {strand} {gtf_fasta} {bam_file}") do.run(cmd.format(**locals()), "Run express on %s." % in_bam, {}) shutil.move(os.path.join(out_dir, "results.xprs"), out_file) eff_count_file = _get_column(out_file, out_file.replace(".xprs", "_eff.counts"), 7, data=data) tpm_file = _get_column(out_file, out_file.replace("xprs", "tpm"), 14, data=data) fpkm_file = _get_column(out_file, out_file.replace("xprs", "fpkm"), 10, data=data) data = dd.set_express_counts(data, eff_count_file) data = dd.set_express_tpm(data, tpm_file) data = dd.set_express_fpkm(data, fpkm_file) return data
def new(cls, access_token, environment='prod'): '''Create new storage service client. Arguments: environment(str): The service environment to be used for the client. 'prod' or 'dev'. access_token(str): The access token used to authenticate with the service Returns: A storage_service.Client instance ''' api_client = ApiClient.new(access_token, environment) return cls(api_client)
Create new storage service client. Arguments: environment(str): The service environment to be used for the client. 'prod' or 'dev'. access_token(str): The access token used to authenticate with the service Returns: A storage_service.Client instance
Below is the the instruction that describes the task: ### Input: Create new storage service client. Arguments: environment(str): The service environment to be used for the client. 'prod' or 'dev'. access_token(str): The access token used to authenticate with the service Returns: A storage_service.Client instance ### Response: def new(cls, access_token, environment='prod'): '''Create new storage service client. Arguments: environment(str): The service environment to be used for the client. 'prod' or 'dev'. access_token(str): The access token used to authenticate with the service Returns: A storage_service.Client instance ''' api_client = ApiClient.new(access_token, environment) return cls(api_client)
def flush_template(context, declaration=None, reconstruct=True): """Emit the code needed to flush the buffer. Will only emit the yield and clear if the buffer is known to be dirty. """ if declaration is None: declaration = Line(0, '') if {'text', 'dirty'}.issubset(context.flag): yield declaration.clone(line='yield "".join(_buffer)') context.flag.remove('text') # This will force a new buffer to be constructed. context.flag.remove('dirty') if reconstruct: for i in ensure_buffer(context): yield i if declaration.stripped == 'yield': yield declaration
Emit the code needed to flush the buffer. Will only emit the yield and clear if the buffer is known to be dirty.
Below is the the instruction that describes the task: ### Input: Emit the code needed to flush the buffer. Will only emit the yield and clear if the buffer is known to be dirty. ### Response: def flush_template(context, declaration=None, reconstruct=True): """Emit the code needed to flush the buffer. Will only emit the yield and clear if the buffer is known to be dirty. """ if declaration is None: declaration = Line(0, '') if {'text', 'dirty'}.issubset(context.flag): yield declaration.clone(line='yield "".join(_buffer)') context.flag.remove('text') # This will force a new buffer to be constructed. context.flag.remove('dirty') if reconstruct: for i in ensure_buffer(context): yield i if declaration.stripped == 'yield': yield declaration
def uppass(tree, feature): """ UPPASS traverses the tree starting from the tips and going up till the root, and assigns to each parent node a state based on the states of its child nodes. if N is a tip: S(N) <- state of N else: L, R <- left and right children of N UPPASS(L) UPPASS(R) if S(L) intersects with S(R): S(N) <- intersection(S(L), S(R)) else: S(N) <- union(S(L), S(R)) :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the parsimonious states are reconstructed :return: void, adds get_personalized_feature_name(feature, BU_PARS_STATES) feature to the tree nodes """ ps_feature = get_personalized_feature_name(feature, BU_PARS_STATES) for node in tree.traverse('postorder'): if not node.is_leaf(): children_states = get_most_common_states(getattr(child, ps_feature) for child in node.children) node_states = getattr(node, ps_feature) state_intersection = node_states & children_states node.add_feature(ps_feature, state_intersection if state_intersection else node_states)
UPPASS traverses the tree starting from the tips and going up till the root, and assigns to each parent node a state based on the states of its child nodes. if N is a tip: S(N) <- state of N else: L, R <- left and right children of N UPPASS(L) UPPASS(R) if S(L) intersects with S(R): S(N) <- intersection(S(L), S(R)) else: S(N) <- union(S(L), S(R)) :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the parsimonious states are reconstructed :return: void, adds get_personalized_feature_name(feature, BU_PARS_STATES) feature to the tree nodes
Below is the the instruction that describes the task: ### Input: UPPASS traverses the tree starting from the tips and going up till the root, and assigns to each parent node a state based on the states of its child nodes. if N is a tip: S(N) <- state of N else: L, R <- left and right children of N UPPASS(L) UPPASS(R) if S(L) intersects with S(R): S(N) <- intersection(S(L), S(R)) else: S(N) <- union(S(L), S(R)) :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the parsimonious states are reconstructed :return: void, adds get_personalized_feature_name(feature, BU_PARS_STATES) feature to the tree nodes ### Response: def uppass(tree, feature): """ UPPASS traverses the tree starting from the tips and going up till the root, and assigns to each parent node a state based on the states of its child nodes. if N is a tip: S(N) <- state of N else: L, R <- left and right children of N UPPASS(L) UPPASS(R) if S(L) intersects with S(R): S(N) <- intersection(S(L), S(R)) else: S(N) <- union(S(L), S(R)) :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the parsimonious states are reconstructed :return: void, adds get_personalized_feature_name(feature, BU_PARS_STATES) feature to the tree nodes """ ps_feature = get_personalized_feature_name(feature, BU_PARS_STATES) for node in tree.traverse('postorder'): if not node.is_leaf(): children_states = get_most_common_states(getattr(child, ps_feature) for child in node.children) node_states = getattr(node, ps_feature) state_intersection = node_states & children_states node.add_feature(ps_feature, state_intersection if state_intersection else node_states)
def saltenviron(environ): ''' Make Salt's opts dict and the APIClient available in the WSGI environ ''' if '__opts__' not in locals(): import salt.config __opts__ = salt.config.client_config( os.environ.get('SALT_MASTER_CONFIG', '/etc/salt/master')) environ['SALT_OPTS'] = __opts__ environ['SALT_APIClient'] = salt.netapi.NetapiClient(__opts__)
Make Salt's opts dict and the APIClient available in the WSGI environ
Below is the the instruction that describes the task: ### Input: Make Salt's opts dict and the APIClient available in the WSGI environ ### Response: def saltenviron(environ): ''' Make Salt's opts dict and the APIClient available in the WSGI environ ''' if '__opts__' not in locals(): import salt.config __opts__ = salt.config.client_config( os.environ.get('SALT_MASTER_CONFIG', '/etc/salt/master')) environ['SALT_OPTS'] = __opts__ environ['SALT_APIClient'] = salt.netapi.NetapiClient(__opts__)
def op_paths(self, path_base): # type: (Union[str, UrlPath]) -> Generator[Tuple[UrlPath, Operation]] """ Return all operations stored in containers. """ path_base += self.path_prefix for operation in self._operations: for op_path in operation.op_paths(path_base): yield op_path
Return all operations stored in containers.
Below is the the instruction that describes the task: ### Input: Return all operations stored in containers. ### Response: def op_paths(self, path_base): # type: (Union[str, UrlPath]) -> Generator[Tuple[UrlPath, Operation]] """ Return all operations stored in containers. """ path_base += self.path_prefix for operation in self._operations: for op_path in operation.op_paths(path_base): yield op_path
def add_html_link(app, pagename, templatename, context, doctree): """As each page is built, collect page names for the sitemap""" app.sitemap_links.append(pagename + ".html")
As each page is built, collect page names for the sitemap
Below is the the instruction that describes the task: ### Input: As each page is built, collect page names for the sitemap ### Response: def add_html_link(app, pagename, templatename, context, doctree): """As each page is built, collect page names for the sitemap""" app.sitemap_links.append(pagename + ".html")
def result(self, timeout=None): """Return the result of the call that the future represents. Args: timeout: The number of seconds to wait for the result if the future isn't done. If None, then there is no limit on the wait time. Returns: The result of the call that the future represents. Raises: TimeoutError: If the future didn't finish executing before the given timeout. exceptions.Exception: If the call raised then that exception will be raised. """ if self._state == self.RUNNING: self._context.wait_all_futures([self], timeout) return self.__get_result()
Return the result of the call that the future represents. Args: timeout: The number of seconds to wait for the result if the future isn't done. If None, then there is no limit on the wait time. Returns: The result of the call that the future represents. Raises: TimeoutError: If the future didn't finish executing before the given timeout. exceptions.Exception: If the call raised then that exception will be raised.
Below is the the instruction that describes the task: ### Input: Return the result of the call that the future represents. Args: timeout: The number of seconds to wait for the result if the future isn't done. If None, then there is no limit on the wait time. Returns: The result of the call that the future represents. Raises: TimeoutError: If the future didn't finish executing before the given timeout. exceptions.Exception: If the call raised then that exception will be raised. ### Response: def result(self, timeout=None): """Return the result of the call that the future represents. Args: timeout: The number of seconds to wait for the result if the future isn't done. If None, then there is no limit on the wait time. Returns: The result of the call that the future represents. Raises: TimeoutError: If the future didn't finish executing before the given timeout. exceptions.Exception: If the call raised then that exception will be raised. """ if self._state == self.RUNNING: self._context.wait_all_futures([self], timeout) return self.__get_result()
def midi_outputs(self): """ :return: A list of MIDI output :class:`Ports`. """ return self.client.get_ports(is_midi=True, is_physical=True, is_output=True)
:return: A list of MIDI output :class:`Ports`.
Below is the the instruction that describes the task: ### Input: :return: A list of MIDI output :class:`Ports`. ### Response: def midi_outputs(self): """ :return: A list of MIDI output :class:`Ports`. """ return self.client.get_ports(is_midi=True, is_physical=True, is_output=True)
def unregister(self): """ Unregister this system from the insights service """ machine_id = generate_machine_id() try: logger.debug("Unregistering %s", machine_id) url = self.api_url + "/v1/systems/" + machine_id net_logger.info("DELETE %s", url) self.session.delete(url) logger.info( "Successfully unregistered from the Red Hat Insights Service") return True except requests.ConnectionError as e: logger.debug(e) logger.error("Could not unregister this system") return False
Unregister this system from the insights service
Below is the the instruction that describes the task: ### Input: Unregister this system from the insights service ### Response: def unregister(self): """ Unregister this system from the insights service """ machine_id = generate_machine_id() try: logger.debug("Unregistering %s", machine_id) url = self.api_url + "/v1/systems/" + machine_id net_logger.info("DELETE %s", url) self.session.delete(url) logger.info( "Successfully unregistered from the Red Hat Insights Service") return True except requests.ConnectionError as e: logger.debug(e) logger.error("Could not unregister this system") return False
def set_transform(self, type_, *args, **kwargs): """ Create a new transform of *type* and assign it to this node. All extra arguments are used in the construction of the transform. Parameters ---------- type_ : str The transform type. *args : tuple Arguments. **kwargs : dict Keywoard arguments. """ self.transform = create_transform(type_, *args, **kwargs)
Create a new transform of *type* and assign it to this node. All extra arguments are used in the construction of the transform. Parameters ---------- type_ : str The transform type. *args : tuple Arguments. **kwargs : dict Keywoard arguments.
Below is the the instruction that describes the task: ### Input: Create a new transform of *type* and assign it to this node. All extra arguments are used in the construction of the transform. Parameters ---------- type_ : str The transform type. *args : tuple Arguments. **kwargs : dict Keywoard arguments. ### Response: def set_transform(self, type_, *args, **kwargs): """ Create a new transform of *type* and assign it to this node. All extra arguments are used in the construction of the transform. Parameters ---------- type_ : str The transform type. *args : tuple Arguments. **kwargs : dict Keywoard arguments. """ self.transform = create_transform(type_, *args, **kwargs)
def get_image(image_id, flags=FLAGS.ALL, **conn): """ Orchestrates all the calls required to fully build out an EC2 Image (AMI, AKI, ARI) { "Architecture": "x86_64", "Arn": "arn:aws:ec2:us-east-1::image/ami-11111111", "BlockDeviceMappings": [], "CreationDate": "2013-07-11T16:04:06.000Z", "Description": "...", "Hypervisor": "xen", "ImageId": "ami-11111111", "ImageLocation": "111111111111/...", "ImageType": "machine", "KernelId": "aki-88888888", "LaunchPermissions": [], "Name": "...", "OwnerId": "111111111111", "ProductCodes": [], "Public": false, "RamdiskId": {}, "RootDeviceName": "/dev/sda1", "RootDeviceType": "ebs", "SriovNetSupport": "simple", "State": "available", "Tags": [], "VirtualizationType": "hvm", "_version": 1 } :param image_id: str ami id :param flags: By default, set to ALL fields :param conn: dict containing enough information to make a connection to the desired account. Must at least have 'assume_role' key. :return: dict containing a fully built out image. """ image = dict(ImageId=image_id) conn['region'] = conn.get('region', 'us-east-1') return registry.build_out(flags, image, **conn)
Orchestrates all the calls required to fully build out an EC2 Image (AMI, AKI, ARI) { "Architecture": "x86_64", "Arn": "arn:aws:ec2:us-east-1::image/ami-11111111", "BlockDeviceMappings": [], "CreationDate": "2013-07-11T16:04:06.000Z", "Description": "...", "Hypervisor": "xen", "ImageId": "ami-11111111", "ImageLocation": "111111111111/...", "ImageType": "machine", "KernelId": "aki-88888888", "LaunchPermissions": [], "Name": "...", "OwnerId": "111111111111", "ProductCodes": [], "Public": false, "RamdiskId": {}, "RootDeviceName": "/dev/sda1", "RootDeviceType": "ebs", "SriovNetSupport": "simple", "State": "available", "Tags": [], "VirtualizationType": "hvm", "_version": 1 } :param image_id: str ami id :param flags: By default, set to ALL fields :param conn: dict containing enough information to make a connection to the desired account. Must at least have 'assume_role' key. :return: dict containing a fully built out image.
Below is the the instruction that describes the task: ### Input: Orchestrates all the calls required to fully build out an EC2 Image (AMI, AKI, ARI) { "Architecture": "x86_64", "Arn": "arn:aws:ec2:us-east-1::image/ami-11111111", "BlockDeviceMappings": [], "CreationDate": "2013-07-11T16:04:06.000Z", "Description": "...", "Hypervisor": "xen", "ImageId": "ami-11111111", "ImageLocation": "111111111111/...", "ImageType": "machine", "KernelId": "aki-88888888", "LaunchPermissions": [], "Name": "...", "OwnerId": "111111111111", "ProductCodes": [], "Public": false, "RamdiskId": {}, "RootDeviceName": "/dev/sda1", "RootDeviceType": "ebs", "SriovNetSupport": "simple", "State": "available", "Tags": [], "VirtualizationType": "hvm", "_version": 1 } :param image_id: str ami id :param flags: By default, set to ALL fields :param conn: dict containing enough information to make a connection to the desired account. Must at least have 'assume_role' key. :return: dict containing a fully built out image. ### Response: def get_image(image_id, flags=FLAGS.ALL, **conn): """ Orchestrates all the calls required to fully build out an EC2 Image (AMI, AKI, ARI) { "Architecture": "x86_64", "Arn": "arn:aws:ec2:us-east-1::image/ami-11111111", "BlockDeviceMappings": [], "CreationDate": "2013-07-11T16:04:06.000Z", "Description": "...", "Hypervisor": "xen", "ImageId": "ami-11111111", "ImageLocation": "111111111111/...", "ImageType": "machine", "KernelId": "aki-88888888", "LaunchPermissions": [], "Name": "...", "OwnerId": "111111111111", "ProductCodes": [], "Public": false, "RamdiskId": {}, "RootDeviceName": "/dev/sda1", "RootDeviceType": "ebs", "SriovNetSupport": "simple", "State": "available", "Tags": [], "VirtualizationType": "hvm", "_version": 1 } :param image_id: str ami id :param flags: By default, set to ALL fields :param conn: dict containing enough information to make a connection to the desired account. Must at least have 'assume_role' key. :return: dict containing a fully built out image. """ image = dict(ImageId=image_id) conn['region'] = conn.get('region', 'us-east-1') return registry.build_out(flags, image, **conn)
def feincms_breadcrumbs(page, include_self=True): """ Generate a list of the page's ancestors suitable for use as breadcrumb navigation. By default, generates an unordered list with the id "breadcrumbs" - override breadcrumbs.html to change this. :: {% feincms_breadcrumbs feincms_page %} """ if not page or not isinstance(page, Page): raise ValueError("feincms_breadcrumbs must be called with a valid Page object") ancs = page.get_ancestors() bc = [(anc.get_absolute_url(), anc.short_title()) for anc in ancs] if include_self: bc.append((None, page.short_title())) return {"trail": bc}
Generate a list of the page's ancestors suitable for use as breadcrumb navigation. By default, generates an unordered list with the id "breadcrumbs" - override breadcrumbs.html to change this. :: {% feincms_breadcrumbs feincms_page %}
Below is the the instruction that describes the task: ### Input: Generate a list of the page's ancestors suitable for use as breadcrumb navigation. By default, generates an unordered list with the id "breadcrumbs" - override breadcrumbs.html to change this. :: {% feincms_breadcrumbs feincms_page %} ### Response: def feincms_breadcrumbs(page, include_self=True): """ Generate a list of the page's ancestors suitable for use as breadcrumb navigation. By default, generates an unordered list with the id "breadcrumbs" - override breadcrumbs.html to change this. :: {% feincms_breadcrumbs feincms_page %} """ if not page or not isinstance(page, Page): raise ValueError("feincms_breadcrumbs must be called with a valid Page object") ancs = page.get_ancestors() bc = [(anc.get_absolute_url(), anc.short_title()) for anc in ancs] if include_self: bc.append((None, page.short_title())) return {"trail": bc}
def append(self, data): """Append data to a file.""" data_length = len(data) if self._size + data_length > self._flush_size: self.flush() if not self._exclusive and data_length > _FILE_POOL_MAX_SIZE: raise errors.Error( "Too big input %s (%s)." % (data_length, _FILE_POOL_MAX_SIZE)) else: self._buffer.append(data) self._size += data_length if self._size > self._flush_size: self.flush()
Append data to a file.
Below is the the instruction that describes the task: ### Input: Append data to a file. ### Response: def append(self, data): """Append data to a file.""" data_length = len(data) if self._size + data_length > self._flush_size: self.flush() if not self._exclusive and data_length > _FILE_POOL_MAX_SIZE: raise errors.Error( "Too big input %s (%s)." % (data_length, _FILE_POOL_MAX_SIZE)) else: self._buffer.append(data) self._size += data_length if self._size > self._flush_size: self.flush()