function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def n_meas(self): """ Returns the sum of any tuple in the domain. :rtype: `int` """ return self._n_meas
QInfer/python-qinfer
[ 91, 32, 91, 22, 1344992565 ]
def n_elements(self): """ Returns the number of elements of a tuple in the domain. :rtype: `int` """ return self._n_elements
QInfer/python-qinfer
[ 91, 32, 91, 22, 1344992565 ]
def is_continuous(self): """ Whether or not the domain has an uncountable number of values. :type: `bool` """ return False
QInfer/python-qinfer
[ 91, 32, 91, 22, 1344992565 ]
def is_finite(self): """ Whether or not the domain contains a finite number of points. :type: `bool` """ return True
QInfer/python-qinfer
[ 91, 32, 91, 22, 1344992565 ]
def dtype(self): """ The numpy dtype of a single element of the domain. :type: `np.dtype` """ return np.dtype([('k', np.int, self.n_elements)])
QInfer/python-qinfer
[ 91, 32, 91, 22, 1344992565 ]
def n_members(self): """ Returns the number of members in the domain if it `is_finite`, otherwise, returns `None`. :type: ``int`` """ return int(binom(self.n_meas + self.n_elements -1, self.n_elements - 1))
QInfer/python-qinfer
[ 91, 32, 91, 22, 1344992565 ]
def example_point(self): """ Returns any single point guaranteed to be in the domain, but no other guarantees; useful for testing purposes. This is given as a size 1 ``np.array`` of type ``dtype``. :type: ``np.ndarray`` """ return np.array([([self.n_meas] + [0] * (self.n_elements-1),)], dtype=self.dtype)
QInfer/python-qinfer
[ 91, 32, 91, 22, 1344992565 ]
def values(self): """ Returns an `np.array` of type `self.dtype` containing some values from the domain. For domains where ``is_finite`` is ``True``, all elements of the domain will be yielded exactly once. :rtype: `np.ndarray` """ # This code comes from Jared Goguen at http://stackoverflow.com/a/37712597/1082565 partition_array = np.empty((self.n_members, self.n_elements), dtype=int) masks = np.identity(self.n_elements, dtype=int) for i, c in enumerate(combinations_with_replacement(masks, self.n_meas)): partition_array[i,:] = sum(c) # Convert to dtype before returning return self.from_regular_array(partition_array)
QInfer/python-qinfer
[ 91, 32, 91, 22, 1344992565 ]
def to_regular_array(self, A): """ Converts from an array of type `self.dtype` to an array of type `int` with an additional index labeling the tuple indeces. :param np.ndarray A: An `np.array` of type `self.dtype`. :rtype: `np.ndarray` """ # this could be a static method, but we choose to be consistent with # from_regular_array return A.view((int, len(A.dtype.names))).reshape(A.shape + (-1,))
QInfer/python-qinfer
[ 91, 32, 91, 22, 1344992565 ]
def __init__(self, request, exc_type, exc_value, frames): ExceptionReporter.__init__(self, request, exc_type, exc_value, None) self.frames = frames
dcramer/django-db-log
[ 116, 25, 116, 8, 1253216083 ]
def get_traceback_html(self): "Return HTML code for traceback." if issubclass(self.exc_type, TemplateDoesNotExist): self.template_does_not_exist = True if (settings.TEMPLATE_DEBUG and hasattr(self.exc_value, 'source') and isinstance(self.exc_value, TemplateSyntaxError)): self.get_template_exception_info() frames = self.get_traceback_frames() unicode_hint = '' if issubclass(self.exc_type, UnicodeError): start = getattr(self.exc_value, 'start', None) end = getattr(self.exc_value, 'end', None) if start is not None and end is not None: unicode_str = self.exc_value.args[1] unicode_hint = smart_unicode(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace') t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template') c = Context({ 'exception_type': self.exc_type.__name__, 'exception_value': smart_unicode(self.exc_value, errors='replace'), 'unicode_hint': unicode_hint, 'frames': frames, 'lastframe': frames[-1], 'request': self.request, 'template_info': self.template_info, 'template_does_not_exist': self.template_does_not_exist, }) return t.render(c)
dcramer/django-db-log
[ 116, 25, 116, 8, 1253216083 ]
def ResponseFromHandler(self, path): content = "Hello from handler" return self.MakeResponse(content, "text/html", False)
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def setUp(self): super(MemoryCacheHTTPServerTest, self).setUp() self._test_filename = 'bear.webm' test_file = os.path.join(util.GetUnittestDataDir(), 'bear.webm') self._test_file_size = os.stat(test_file).st_size
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def CheckContentHeaders(self, content_range_request, content_range_response, content_length_response): self._tab.ExecuteJavaScript( """ var loaded = false; var xmlhttp = new XMLHttpRequest(); xmlhttp.onload = function(e) { loaded = true; }; // Avoid cached content by appending unique URL param. xmlhttp.open('GET', {{ url }} + "?t=" + Date.now(), true); xmlhttp.setRequestHeader('Range', {{ range }}); xmlhttp.send(); """, url=self.UrlOfUnittestFile(self._test_filename), range='bytes=%s' % content_range_request) self._tab.WaitForJavaScriptCondition('loaded', timeout=5) content_range = self._tab.EvaluateJavaScript( 'xmlhttp.getResponseHeader("Content-Range");') content_range_response = 'bytes %s/%d' % (content_range_response, self._test_file_size) self.assertEqual(content_range, content_range_response) content_length = self._tab.EvaluateJavaScript( 'xmlhttp.getResponseHeader("Content-Length");') self.assertEqual(content_length, str(content_length_response))
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def __init__(self, project): self.project = project self.stripped_js_by_filename = {} self.loaded_modules = {} self.loaded_raw_scripts = {} self.loaded_style_sheets = {} self.loaded_images = {}
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def source_paths(self): """A list of base directories to search for modules under.""" return self.project.source_paths
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def FindResourceGivenAbsolutePath(self, absolute_path, binary=False): """Returns a Resource for the given absolute path.""" candidate_paths = [] for source_path in self.source_paths: if absolute_path.startswith(source_path): candidate_paths.append(source_path) if len(candidate_paths) == 0: return None # Sort by length. Longest match wins. candidate_paths.sort(lambda x, y: len(x) - len(y)) longest_candidate = candidate_paths[-1] return resource_module.Resource(longest_candidate, absolute_path, binary)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def _FindResourceGivenNameAndSuffix( self, requested_name, extension, return_resource=False): """Searches for a file and reads its contents. Args: requested_name: The name of the resource that was requested. extension: The extension for this requested resource. Returns: A (path, contents) pair. """ pathy_name = requested_name.replace('.', os.sep) filename = pathy_name + extension resource = self.FindResourceGivenRelativePath(filename) if return_resource: return resource if not resource: return None, None return _read_file(resource.absolute_path)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def LoadModule(self, module_name=None, module_filename=None, excluded_scripts=None): assert bool(module_name) ^ bool(module_filename), ( 'Must provide either module_name or module_filename.') if module_filename: resource = self.FindResource(module_filename) if not resource: raise Exception('Could not find %s in %s' % ( module_filename, repr(self.source_paths))) module_name = resource.name else: resource = None # Will be set if we end up needing to load. if module_name in self.loaded_modules: assert self.loaded_modules[module_name].contents return self.loaded_modules[module_name] if not resource: # happens when module_name was given resource = self.FindModuleResource(module_name) if not resource: raise module.DepsException('No resource for module "%s"' % module_name) m = html_module.HTMLModule(self, module_name, resource) self.loaded_modules[module_name] = m # Fake it, this is probably either polymer.min.js or platform.js which are # actually .js files.... if resource.absolute_path.endswith('.js'): return m m.Parse(excluded_scripts) m.Load(excluded_scripts) return m
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def LoadStyleSheet(self, name): if name in self.loaded_style_sheets: return self.loaded_style_sheets[name] resource = self._FindResourceGivenNameAndSuffix( name, '.css', return_resource=True) if not resource: raise module.DepsException( 'Could not find a file for stylesheet %s' % name) style_sheet = style_sheet_module.StyleSheet(self, name, resource) style_sheet.load() self.loaded_style_sheets[name] = style_sheet return style_sheet
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def GetStrippedJSForFilename(self, filename, early_out_if_no_py_vulcanize): if filename in self.stripped_js_by_filename: return self.stripped_js_by_filename[filename] with open(filename, 'r') as f: contents = f.read(4096) if early_out_if_no_py_vulcanize and ('py_vulcanize' not in contents): return None s = strip_js_comments.StripJSComments(contents) self.stripped_js_by_filename[filename] = s return s
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def main(): """ This Python script displays a web page with test created with the video_quality_measurement program, which is a tool in WebRTC. The script requires on two external files and one Python library: - A HTML template file with layout and references to the json variables defined in this script - A data file in Python format, containing the following: - test_configuration - a dictionary of test configuration names and values. - frame_data_types - a dictionary that maps the different metrics to their data types. - frame_data - a list of dictionaries where each dictionary maps a metric to it's value. - The gviz_api.py of the Google Visualization Python API, available at http://code.google.com/p/google-visualization-python/ The HTML file is shipped with the script, while the data file must be generated by running video_quality_measurement with the --python flag specified. """ print 'Content-type: text/html\n' # the newline is required! page_template_filename = '../templates/chart_page_template.html' # The data files must be located in the project tree for app engine being # able to access them. data_filenames = ['../data/vp8_sw.py', '../data/vp8_hw.py'] # Will contain info/error messages to be displayed on the resulting page. messages = [] # Load the page HTML template. try: f = open(page_template_filename) page_template = f.read() f.close() except IOError as e: ShowErrorPage('Cannot open page template file: %s<br>Details: %s' % (page_template_filename, e)) return # Read data from external Python script files. First check that they exist. for filename in data_filenames: if not os.path.exists(filename): messages.append('Cannot open data file: %s' % filename) data_filenames.remove(filename) # Read data from all existing input files. data_list = [] test_configurations = [] names = [] for filename in data_filenames: read_vars = {} # empty dictionary to load the data into. execfile(filename, read_vars, read_vars) test_configuration = read_vars['test_configuration'] table_description = read_vars['frame_data_types'] table_data = read_vars['frame_data'] # Verify the data in the file loaded properly. if not table_description or not table_data: messages.append('Invalid input file: %s. Missing description list or ' 'data dictionary variables.' % filename) continue # Frame numbers appear as number type in the data, but Chart API requires # values of the X-axis to be of string type. # Change the frame_number column data type: table_description['frame_number'] = ('string', 'Frame number') # Convert all the values to string types: for row in table_data: row['frame_number'] = str(row['frame_number']) # Store the unique data from this file in the high level lists. test_configurations.append(test_configuration) data_list.append(table_data) # Name of the test run must be present. test_name = FindConfiguration(test_configuration, 'name') if not test_name: messages.append('Invalid input file: %s. Missing configuration key ' '"name"', filename) continue names.append(test_name) # Create data helper and build data tables for each graph. helper = webrtc.data_helper.DataHelper(data_list, table_description, names, messages) # Loading it into gviz_api.DataTable objects and create JSON strings. description, data = helper.CreateConfigurationTable(test_configurations) configurations = gviz_api.DataTable(description, data) json_configurations = configurations.ToJSon() # pylint: disable=W0612 description, data = helper.CreateData('ssim') ssim = gviz_api.DataTable(description, data) # pylint: disable=W0612 json_ssim_data = ssim.ToJSon(helper.GetOrdering(description)) description, data = helper.CreateData('psnr') psnr = gviz_api.DataTable(description, data) # pylint: disable=W0612 json_psnr_data = psnr.ToJSon(helper.GetOrdering(description)) description, data = helper.CreateData('packets_dropped') packet_loss = gviz_api.DataTable(description, data) # pylint: disable=W0612 json_packet_loss_data = packet_loss.ToJSon(helper.GetOrdering(description)) description, data = helper.CreateData('bit_rate') # Add a column of data points for the desired bit rate to be plotted. # (uses test configuration from the last data set, assuming it is the same # for all of them) desired_bit_rate = FindConfiguration(test_configuration, 'bit_rate_in_kbps') if not desired_bit_rate: ShowErrorPage('Cannot configuration field named "bit_rate_in_kbps"') return desired_bit_rate = int(desired_bit_rate) # Add new column data type description. description['desired_bit_rate'] = ('number', 'Desired bit rate (kbps)') for row in data: row['desired_bit_rate'] = desired_bit_rate bit_rate = gviz_api.DataTable(description, data) # pylint: disable=W0612 json_bit_rate_data = bit_rate.ToJSon(helper.GetOrdering(description)) # Format the messages list with newlines. messages = '\n'.join(messages) # Put the variables as JSon strings into the template. print page_template % vars()
golden1232004/webrtc_new
[ 7, 5, 7, 1, 1467039620 ]
def ShowErrorPage(error_message): print '<html><body>%s</body></html>' % error_message
golden1232004/webrtc_new
[ 7, 5, 7, 1, 1467039620 ]
def GabeToCsv(filepath,csvpath): """ Converti un fichier GABE (Generic Array Binary Exchange) en format CSV (Comma Separated Values) """ # Instanciation du lecteur reader=ls.Gabe_rw() # Lecture du fichier gabe if reader.Load(filepath): # Conversion en liste data=reader.ToList() # Rotation des données (les colonnes deviennent des lignes) data=zip(*data) # Ecriture des données fich=open(csvpath,'w') for line in data: firstcol=True for col in line: if not firstcol: fich.write(",") else: firstcol=False fich.write(str(col)) # Ecriture de la cellule et virgule fich.write("\n") # Retour à la ligne fich.close()
Ifsttar/I-Simpa
[ 185, 54, 185, 114, 1395306706 ]
def get_media_url(url): try: HTTP_HEADER = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Referer': url} # 'Connection': 'keep-alive' html = net.http_GET(url, headers=HTTP_HEADER).content hiddenurl = HTMLParser().unescape(re.search('hiddenurl">(.+?)<\/span>', html, re.IGNORECASE).group(1))
mrknow/filmkodi
[ 68, 68, 68, 206, 1444160337 ]
def make_casa_testimage(infile, outname): infile = str(infile) outname = str(outname) if not CASA_INSTALLED: raise Exception("Attempted to make a CASA test image in a non-CASA " "environment") ia = image() ia.fromfits(infile=infile, outfile=outname, overwrite=True) ia.unlock() ia.close() ia.done() cube = SpectralCube.read(infile) if isinstance(cube, VaryingResolutionSpectralCube): ia.open(outname) # populate restoring beam emptily ia.setrestoringbeam(major={'value':1.0, 'unit':'arcsec'}, minor={'value':1.0, 'unit':'arcsec'}, pa={'value':90.0, 'unit':'deg'}, channel=len(cube.beams)-1, polarization=-1, ) # populate each beam (hard assumption of 1 poln) for channum, beam in enumerate(cube.beams): casabdict = {'major': {'value':beam.major.to(u.deg).value, 'unit':'deg'}, 'minor': {'value':beam.minor.to(u.deg).value, 'unit':'deg'}, 'positionangle': {'value':beam.pa.to(u.deg).value, 'unit':'deg'} } ia.setrestoringbeam(beam=casabdict, channel=channum, polarization=0) ia.unlock() ia.close() ia.done()
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def filename(request): return request.getfixturevalue(request.param)
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def test_casa_read_basic(memmap, bigendian): # Check that SpectralCube.read works for an example CASA dataset stored # in the tests directory. This test should NOT require CASA, whereas a # number of tests below require CASA to generate test datasets. The present # test is to ensure CASA is not required for reading. if bigendian: cube = SpectralCube.read(os.path.join(DATA, 'basic_bigendian.image'), memmap=memmap) else: cube = SpectralCube.read(os.path.join(DATA, 'basic.image'), memmap=memmap) assert cube.shape == (3, 4, 5) assert_allclose(cube.wcs.pixel_to_world_values(1, 2, 3), [2.406271e+01, 2.993521e+01, 1.421911e+09]) # Carry out an operation to make sure the underlying data array works cube.moment0() # Slice the dataset assert_quantity_allclose(cube.unmasked_data[0, 0, :], [1, 1, 1, 1, 1] * u.Jy / u.beam) assert_quantity_allclose(cube.unmasked_data[0, 1, 2], 1 * u.Jy / u.beam)
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def test_casa_read_basic_nomask(): # Make sure things work well if there is no mask in the data cube = SpectralCube.read(os.path.join(DATA, 'nomask.image')) assert cube.shape == (3, 4, 5) assert_allclose(cube.wcs.pixel_to_world_values(1, 2, 3), [2.406271e+01, 2.993521e+01, 1.421911e+09]) # Carry out an operation to make sure the underlying data array works cube.moment0() # Slice the dataset assert_quantity_allclose(cube.unmasked_data[0, 0, :], [1, 1, 1, 1, 1] * u.Jy / u.beam) assert_quantity_allclose(cube.unmasked_data[0, 1, 2], 1 * u.Jy / u.beam) # Slice the cube assert_quantity_allclose(cube[:, 0, 0], [1, 1, 1] * u.Jy / u.beam)
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def test_casa_read(filename, tmp_path): # Check that SpectralCube.read returns data with the same shape and values # if read from CASA as if read from FITS. cube = SpectralCube.read(filename) make_casa_testimage(filename, tmp_path / 'casa.image') casacube = SpectralCube.read(tmp_path / 'casa.image') assert casacube.shape == cube.shape assert_allclose(casacube.unmasked_data[:].value, cube.unmasked_data[:].value)
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def test_casa_read_nomask(filename, tmp_path): # As for test_casa_read, but we remove the mask to make sure # that we can still read in the cubes cube = SpectralCube.read(filename) make_casa_testimage(filename, tmp_path / 'casa.image') shutil.rmtree(tmp_path / 'casa.image' / 'mask0') casacube = SpectralCube.read(tmp_path / 'casa.image') assert casacube.shape == cube.shape assert_allclose(casacube.unmasked_data[:].value, cube.unmasked_data[:].value)
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def test_casa_read_stokes(data_advs, tmp_path): # Check that StokesSpectralCube.read returns data with the same shape and values # if read from CASA as if read from FITS. cube = StokesSpectralCube.read(data_advs) make_casa_testimage(data_advs, tmp_path / 'casa.image') casacube = StokesSpectralCube.read(tmp_path / 'casa.image') assert casacube.I.shape == cube.I.shape assert_allclose(casacube.I.unmasked_data[:].value, cube.I.unmasked_data[:].value)
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def test_casa_mask(data_adv, tmp_path): # This tests the make_casa_mask function which can be used to create a mask # file in an existing image. cube = SpectralCube.read(data_adv) mask_array = np.array([[True, False], [False, False], [True, True]]) bool_mask = BooleanArrayMask(mask=mask_array, wcs=cube._wcs, shape=cube.shape) cube = cube.with_mask(bool_mask) make_casa_mask(cube, str(tmp_path / 'casa.mask'), add_stokes=False, append_to_image=False, overwrite=True) ia = casatools.image() ia.open(str(tmp_path / 'casa.mask')) casa_mask = ia.getchunk() coords = ia.coordsys() ia.unlock() ia.close() ia.done() # Test masks # Mask array is broadcasted to the cube shape. Mimic this, switch to ints, # and transpose to match CASA image. compare_mask = np.tile(mask_array, (4, 1, 1)).astype('int16').T assert np.all(compare_mask == casa_mask) # Test WCS info # Convert back to an astropy wcs object so transforms are dealt with. casa_wcs = coordsys_to_astropy_wcs(coords.torecord()) header = casa_wcs.to_header() # Invokes transform # Compare some basic properties EXCLUDING the spectral axis assert_allclose(cube.wcs.wcs.crval[:2], casa_wcs.wcs.crval[:2]) assert_allclose(cube.wcs.wcs.cdelt[:2], casa_wcs.wcs.cdelt[:2]) assert np.all(list(cube.wcs.wcs.cunit)[:2] == list(casa_wcs.wcs.cunit)[:2]) assert np.all(list(cube.wcs.wcs.ctype)[:2] == list(casa_wcs.wcs.ctype)[:2]) assert_allclose(cube.wcs.wcs.crpix, casa_wcs.wcs.crpix)
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def test_casa_mask_append(data_adv, tmp_path): # This tests the append option for the make_casa_mask function cube = SpectralCube.read(data_adv) mask_array = np.array([[True, False], [False, False], [True, True]]) bool_mask = BooleanArrayMask(mask=mask_array, wcs=cube._wcs, shape=cube.shape) cube = cube.with_mask(bool_mask) make_casa_testimage(data_adv, tmp_path / 'casa.image') # in this case, casa.mask is the name of the mask, not its path make_casa_mask(cube, 'casa.mask', append_to_image=True, img=str(tmp_path / 'casa.image'), add_stokes=False, overwrite=True) assert os.path.exists(tmp_path / 'casa.image/casa.mask')
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def capwords(s, sep=None): """capwords(s [,sep]) -> string Split the argument into words using split, capitalize each word using capitalize, and join the capitalized words using join. If the optional second argument sep is absent or None, runs of whitespace characters are replaced by a single space and leading and trailing whitespace are removed, otherwise sep is used to split and join the words. """ return (sep or ' ').join(x.capitalize() for x in s.split(sep))
jameswatt2008/jameswatt2008.github.io
[ 1, 1, 1, 1, 1414399890 ]
def __init__(cls, name, bases, dct): super(_TemplateMetaclass, cls).__init__(name, bases, dct) if 'pattern' in dct: pattern = cls.pattern else: pattern = _TemplateMetaclass.pattern % { 'delim' : _re.escape(cls.delimiter), 'id' : cls.idpattern, } cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE)
jameswatt2008/jameswatt2008.github.io
[ 1, 1, 1, 1, 1414399890 ]
def __init__(self, template): self.template = template
jameswatt2008/jameswatt2008.github.io
[ 1, 1, 1, 1, 1414399890 ]
def _invalid(self, mo): i = mo.start('invalid') lines = self.template[:i].splitlines(keepends=True) if not lines: colno = 1 lineno = 1 else: colno = i - len(''.join(lines[:-1])) lineno = len(lines) raise ValueError('Invalid placeholder in string: line %d, col %d' % (lineno, colno))
jameswatt2008/jameswatt2008.github.io
[ 1, 1, 1, 1, 1414399890 ]
def convert(mo): # Check the most common path first. named = mo.group('named') or mo.group('braced') if named is not None: val = mapping[named] # We use this idiom instead of str() because the latter will # fail if val is a Unicode containing non-ASCII characters. return '%s' % (val,) if mo.group('escaped') is not None: return self.delimiter if mo.group('invalid') is not None: self._invalid(mo) raise ValueError('Unrecognized named group in pattern', self.pattern)
jameswatt2008/jameswatt2008.github.io
[ 1, 1, 1, 1, 1414399890 ]
def safe_substitute(*args, **kws): if not args: raise TypeError("descriptor 'safe_substitute' of 'Template' object " "needs an argument") self, *args = args # allow the "self" keyword be passed if len(args) > 1: raise TypeError('Too many positional arguments') if not args: mapping = kws elif kws: mapping = _ChainMap(kws, args[0]) else: mapping = args[0] # Helper function for .sub() def convert(mo): named = mo.group('named') or mo.group('braced') if named is not None: try: # We use this idiom instead of str() because the latter # will fail if val is a Unicode containing non-ASCII return '%s' % (mapping[named],) except KeyError: return mo.group() if mo.group('escaped') is not None: return self.delimiter if mo.group('invalid') is not None: return mo.group() raise ValueError('Unrecognized named group in pattern', self.pattern) return self.pattern.sub(convert, self.template)
jameswatt2008/jameswatt2008.github.io
[ 1, 1, 1, 1, 1414399890 ]
def format(*args, **kwargs): if not args: raise TypeError("descriptor 'format' of 'Formatter' object " "needs an argument") self, *args = args # allow the "self" keyword be passed try: format_string, *args = args # allow the "format_string" keyword be passed except ValueError: if 'format_string' in kwargs: format_string = kwargs.pop('format_string') import warnings warnings.warn("Passing 'format_string' as keyword argument is " "deprecated", DeprecationWarning, stacklevel=2) else: raise TypeError("format() missing 1 required positional " "argument: 'format_string'") from None return self.vformat(format_string, args, kwargs)
jameswatt2008/jameswatt2008.github.io
[ 1, 1, 1, 1, 1414399890 ]
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth, auto_arg_index=0): if recursion_depth < 0: raise ValueError('Max string recursion exceeded') result = [] for literal_text, field_name, format_spec, conversion in \ self.parse(format_string): # output the literal text if literal_text: result.append(literal_text) # if there's a field, output it if field_name is not None: # this is some markup, find the object and do # the formatting # handle arg indexing when empty field_names are given. if field_name == '': if auto_arg_index is False: raise ValueError('cannot switch from manual field ' 'specification to automatic field ' 'numbering') field_name = str(auto_arg_index) auto_arg_index += 1 elif field_name.isdigit(): if auto_arg_index: raise ValueError('cannot switch from manual field ' 'specification to automatic field ' 'numbering') # disable auto arg incrementing, if it gets # used later on, then an exception will be raised auto_arg_index = False # given the field_name, find the object it references # and the argument it came from obj, arg_used = self.get_field(field_name, args, kwargs) used_args.add(arg_used) # do any conversion on the resulting object obj = self.convert_field(obj, conversion) # expand the format spec, if needed format_spec, auto_arg_index = self._vformat( format_spec, args, kwargs, used_args, recursion_depth-1, auto_arg_index=auto_arg_index) # format the object and append to the result result.append(self.format_field(obj, format_spec)) return ''.join(result), auto_arg_index
jameswatt2008/jameswatt2008.github.io
[ 1, 1, 1, 1, 1414399890 ]
def check_unused_args(self, used_args, args, kwargs): pass
jameswatt2008/jameswatt2008.github.io
[ 1, 1, 1, 1, 1414399890 ]
def convert_field(self, value, conversion): # do any conversion on the resulting object if conversion is None: return value elif conversion == 's': return str(value) elif conversion == 'r': return repr(value) elif conversion == 'a': return ascii(value) raise ValueError("Unknown conversion specifier {0!s}".format(conversion))
jameswatt2008/jameswatt2008.github.io
[ 1, 1, 1, 1, 1414399890 ]
def parse(self, format_string): return _string.formatter_parser(format_string)
jameswatt2008/jameswatt2008.github.io
[ 1, 1, 1, 1, 1414399890 ]
def __init__ (self): TestBase.__init__ (self, __file__) self.name = "Header Ops: Add multiple headers" self.request = "GET /%s/ HTTP/1.0\r\n" %(DIR) self.expected_error = 200 self.conf = CONF%(globals()) n = 2 for h,v in HEADERS: self.conf += "vserver!1!rule!2560!header_op!%d!type = add\n" %(n) self.conf += "vserver!1!rule!2560!header_op!%d!header = %s\n" %(n, h) self.conf += "vserver!1!rule!2560!header_op!%d!value = %s\n" %(n, v) n += 1
cherokee/webserver
[ 561, 103, 561, 433, 1318335564 ]
def setUp(self): super(UserManagementHelperTest, self).setUp() self.request = RequestFactory().post('/') self.old_user = UserFactory.create() self.new_user = UserFactory.create() self.new_user.save() self.request.user = self.old_user self.lti_consumer = LtiConsumer( consumer_name='TestConsumer', consumer_key='TestKey', consumer_secret='TestSecret' ) self.lti_consumer.save() self.lti_user = LtiUser( lti_user_id='lti_user_id', edx_user=self.new_user )
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def test_permission_denied_for_unknown_user(self, _authenticate_mock): with self.assertRaises(PermissionDenied): users.switch_user(self.request, self.lti_user, self.lti_consumer)
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def test_authenticate_called(self, _login_mock): with patch('lti_provider.users.authenticate', return_value=self.new_user) as authenticate: users.switch_user(self.request, self.lti_user, self.lti_consumer) authenticate.assert_called_with( username=self.new_user.username, lti_user_id=self.lti_user.lti_user_id, lti_consumer=self.lti_consumer )
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def test_login_called(self, login_mock): with patch('lti_provider.users.authenticate', return_value=self.new_user): users.switch_user(self.request, self.lti_user, self.lti_consumer) login_mock.assert_called_with(self.request, self.new_user)
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def setUp(self): super(AuthenticateLtiUserTest, self).setUp() self.lti_consumer = LtiConsumer( consumer_name='TestConsumer', consumer_key='TestKey', consumer_secret='TestSecret' ) self.lti_consumer.save() self.lti_user_id = 'lti_user_id' self.edx_user_id = 'edx_user_id' self.old_user = UserFactory.create() self.request = RequestFactory().post('/') self.request.user = self.old_user
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def test_authentication_with_new_user(self, _create_user, switch_user): lti_user = MagicMock() lti_user.edx_user_id = self.edx_user_id with patch('lti_provider.users.create_lti_user', return_value=lti_user) as create_user: users.authenticate_lti_user(self.request, self.lti_user_id, self.lti_consumer) create_user.assert_called_with(self.lti_user_id, self.lti_consumer) switch_user.assert_called_with(self.request, lti_user, self.lti_consumer)
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def test_authentication_with_unauthenticated_user(self, create_user, switch_user): lti_user = self.create_lti_user_model() self.request.user = lti_user.edx_user self.request.user.is_authenticated = MagicMock(return_value=False) users.authenticate_lti_user(self.request, self.lti_user_id, self.lti_consumer) self.assertFalse(create_user.called) switch_user.assert_called_with(self.request, lti_user, self.lti_consumer)
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def setUp(self): super(CreateLtiUserTest, self).setUp() self.lti_consumer = LtiConsumer( consumer_name='TestConsumer', consumer_key='TestKey', consumer_secret='TestSecret' ) self.lti_consumer.save()
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def test_create_lti_user_creates_correct_user(self, uuid_mock, _username_mock): users.create_lti_user('lti_user_id', self.lti_consumer) self.assertEqual(User.objects.count(), 1) user = User.objects.get(username='edx_id') self.assertEqual(user.email, 'edx_id@lti.example.com') uuid_mock.assert_called_with()
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def test_unique_username_created(self, username_mock): User(username='edx_id').save() users.create_lti_user('lti_user_id', self.lti_consumer) self.assertEqual(username_mock.call_count, 2) self.assertEqual(User.objects.count(), 2) user = User.objects.get(username='new_edx_id') self.assertEqual(user.email, 'new_edx_id@lti.example.com')
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def setUp(self): super(LtiBackendTest, self).setUp() self.edx_user = UserFactory.create() self.edx_user.save() self.lti_consumer = LtiConsumer( consumer_key="Consumer Key", consumer_secret="Consumer Secret" ) self.lti_consumer.save() self.lti_user_id = 'LTI User ID' LtiUser( lti_consumer=self.lti_consumer, lti_user_id=self.lti_user_id, edx_user=self.edx_user ).save()
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def test_missing_user_returns_none(self): user = users.LtiBackend().authenticate( username=self.edx_user.username, lti_user_id='Invalid Username', lti_consumer=self.lti_consumer ) self.assertIsNone(user)
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def test_missing_lti_id_returns_null(self): user = users.LtiBackend().authenticate( username=self.edx_user.username, lti_consumer=self.lti_consumer ) self.assertIsNone(user)
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def test_existing_user_returned_by_get_user(self): user = users.LtiBackend().get_user(self.edx_user.id) self.assertEqual(user, self.edx_user)
miptliot/edx-platform
[ 1, 7, 1, 5, 1382087527 ]
def main(self): while 1: self.send(str(self.value), "outbox") time.sleep(self.sleep)
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def assert_same_cluster(self, lhs, rhs): self.assertEqual( server_lib.ClusterSpec(lhs).as_dict(), server_lib.ClusterSpec(rhs).as_dict())
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testClusterDefAsInput(self): cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = "chief" job.tasks[0] = "127.0.0.1:1234" job = cluster_def.job.add() job.name = "worker" job.tasks[0] = "127.0.0.1:8964" job.tasks[1] = "127.0.0.1:2333" job = cluster_def.job.add() job.name = "ps" job.tasks[0] = "127.0.0.1:1926" job.tasks[1] = "127.0.0.1:3141" self.assert_same_cluster( cluster_def, multi_worker_util.normalize_cluster_spec(cluster_def))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testUnexpectedInput(self): cluster_spec = ["127.0.0.1:8964", "127.0.0.1:2333"] with self.assertRaisesRegex( ValueError, "`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a " "`tf.train.ClusterDef` object"): multi_worker_util.normalize_cluster_spec(cluster_spec)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testClusterWithChief(self): cluster_spec = { "chief": ["127.0.0.1:1234"], "worker": ["127.0.0.1:8964", "127.0.0.1:2333"], "ps": ["127.0.0.1:1926", "127.0.0.1:3141"] } self.assertTrue(multi_worker_util.is_chief(cluster_spec, "chief", 0)) self.assertFalse(multi_worker_util.is_chief(cluster_spec, "worker", 0))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testEvaluatorIsChief(self): cluster_spec = { "chief": ["127.0.0.1:1234"], "worker": ["127.0.0.1:8964", "127.0.0.1:2333"], "evaluator": ["127.0.0.1:2019"] } self.assertTrue(multi_worker_util.is_chief(cluster_spec, "evaluator", 0))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testCountWorker(self): cluster_spec = { "chief": ["127.0.0.1:1234"], "worker": ["127.0.0.1:8964", "127.0.0.1:2333"], "ps": ["127.0.0.1:1926", "127.0.0.1:3141"] } self.assertEqual( multi_worker_util.worker_count(cluster_spec, task_type="chief"), 3) self.assertEqual( multi_worker_util.worker_count(cluster_spec, task_type="worker"), 3)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testTaskTypeNotFound(self): cluster_spec = {} with self.assertRaisesRegex( ValueError, "`task_type` 'worker' not found in cluster_spec."): multi_worker_util.worker_count(cluster_spec, task_type="worker")
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testChiefId(self): cluster_spec = { "chief": ["127.0.0.1:1234"], "worker": ["127.0.0.1:8964", "127.0.0.1:2333"], "ps": ["127.0.0.1:1926", "127.0.0.1:3141"] } self.assertEqual( multi_worker_util.id_in_cluster(cluster_spec, "chief", 0), 0)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testEvaluatorId(self): cluster_spec = { "chief": ["127.0.0.1:1234"], "worker": ["127.0.0.1:8964", "127.0.0.1:2333"], "evaluator": ["127.0.0.1:7566"] } self.assertEqual( multi_worker_util.id_in_cluster(cluster_spec, "evaluator", 0), 0)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testMultipleChiefs(self): cluster_spec = { "chief": ["127.0.0.1:8258", "127.0.0.1:7566"], } with self.assertRaisesRegex(ValueError, "There must be at most one 'chief' job."): multi_worker_util.id_in_cluster(cluster_spec, "chief", 0)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testChiefAsLeader(self): cluster_spec = { "chief": ["127.0.0.1:1234"], "worker": ["127.0.0.1:8964", "127.0.0.1:2333"], "ps": ["127.0.0.1:1926", "127.0.0.1:3141"] } self.assertEqual( multi_worker_util.collective_leader(cluster_spec, "worker", 0), "/job:chief/replica:0/task:0")
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testLeaderForEvaluator(self): cluster_spec = { "chief": ["127.0.0.1:1234"], "worker": ["127.0.0.1:8964", "127.0.0.1:2333"], "ps": ["127.0.0.1:1926", "127.0.0.1:3141"], "evaluator": ["127.0.0.1:2019"] } self.assertEqual( multi_worker_util.collective_leader(cluster_spec, "evaluator", 0), "")
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testEvaluatorNotInCluster(self): cluster_spec = { "chief": ["127.0.0.1:1234"], "worker": ["127.0.0.1:8964", "127.0.0.1:2333"], "ps": ["127.0.0.1:1926", "127.0.0.1:3141"] } multi_worker_util._validate_cluster_spec(cluster_spec, "chief", 0) multi_worker_util._validate_cluster_spec(cluster_spec, "worker", 0) multi_worker_util._validate_cluster_spec(cluster_spec, "ps", 0) multi_worker_util._validate_cluster_spec(cluster_spec, "evaluator", 0)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def add(self, x, y): return x + y
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def sub(self, x, y): return x - y
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def square(self, x): if x > 1000: ## raise a custom exception raise Exception("http://example.com/error#number_too_big", "number %d too big to square" % x) return x * x
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def sum(self, list): return reduce(lambda x, y: x + y, list)
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def pickySum(self, list): errs = [] for i in list: if i % 3 == 0: errs.append(i) if len(errs) > 0: raise Exception("http://example.com/error#invalid_numbers", "one or more numbers are multiples of 3", errs) return reduce(lambda x, y: x + y, list)
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def sqrt(self, x): return math.sqrt(x)
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def asyncSum(self, list): ## Simulate a slow function. d = defer.Deferred() reactor.callLater(3, d.callback, self.sum(list)) return d
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def __init__(self, filename): self.store = shelve.open(filename)
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def set(self, key = None, value = None): if key is not None: k = str(key) if value is not None: self.store[k] = value else: if self.store.has_key(k): del self.store[k] else: self.store.clear()
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def get(self, key = None): if key is None: return self.store.items() else: return self.store.get(str(key), None)
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def keys(self): return self.store.keys()
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def __init__(self): self.clear()
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def calc(self, arg): op = arg["op"] if op == "C": self.clear() return str(self.current) num = decimal.Decimal(arg["num"]) if self.op: if self.op == "+": self.current += num elif self.op == "-": self.current -= num elif self.op == "*": self.current *= num elif self.op == "/": self.current /= num self.op = op else: self.op = op self.current = num res = str(self.current) if op == "=": self.clear() return res
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def __init__(self, allowedTopicIds): self.allowedTopicIds = allowedTopicIds self.serial = 0
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def subscribe(self, topicUriPrefix, topicUriSuffix): """ Custom topic subscription handler. """ print "client wants to subscribe to %s%s" % (topicUriPrefix, topicUriSuffix) try: i = int(topicUriSuffix) if i in self.allowedTopicIds: print "Subscribing client to topic Foobar %d" % i return True else: print "Client not allowed to subscribe to topic Foobar %d" % i return False except: print "illegal topic - skipped subscription" return False
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def publish(self, topicUriPrefix, topicUriSuffix, event): """ Custom topic publication handler. """ print "client wants to publish to %s%s" % (topicUriPrefix, topicUriSuffix) try: i = int(topicUriSuffix) if type(event) == dict and event.has_key("count"): if event["count"] > 0: self.serial += 1 event["serial"] = self.serial print "ok, published enriched event" return event else: print "event count attribute is negative" return None else: print "event is not dict or misses count attribute" return None except: print "illegal topic - skipped publication of event" return None
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def onSessionOpen(self): self.initSimpleRpc() self.initKeyValue() self.initCalculator() self.initSimplePubSub() self.initPubSubAuth()
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def initKeyValue(self): ## Key-Value Store self.registerForRpc(self.factory.keyvalue, "http://example.com/simple/keyvalue#")
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def initSimplePubSub(self): ## register a single, fixed URI as PubSub topic self.registerForPubSub("http://example.com/simple") ## register a URI and all URIs having the string as prefix as PubSub topic self.registerForPubSub("http://example.com/event#", True) ## register any URI (string) as topic #self.registerForPubSub("", True)
normanmaurer/autobahntestsuite-maven-plugin
[ 5, 7, 5, 2, 1354609119 ]
def upgrade_charm(): # Trigger removal of PPA docker installation if it was previously set. set_state('config.changed.install_from_upstream') hookenv.atexit(remove_state, 'config.changed.install_from_upstream') cleanup_pre_snap_services() check_resources_for_upgrade_needed() # Remove the RC for nginx ingress if it exists if hookenv.config().get('ingress'): kubectl_success('delete', 'rc', 'nginx-ingress-controller') # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags, # since they can differ between k8s versions remove_state('kubernetes-worker.gpu.enabled') remove_state('kubernetes-worker.cni-plugins.installed') remove_state('kubernetes-worker.config.created') remove_state('kubernetes-worker.ingress.available') set_state('kubernetes-worker.restart-needed')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def set_upgrade_needed(): set_state('kubernetes-worker.snaps.upgrade-needed') config = hookenv.config() previous_channel = config.previous('channel') require_manual = config.get('require-manual-upgrade') if previous_channel is None or not require_manual: set_state('kubernetes-worker.snaps.upgrade-specified')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def channel_changed(): set_upgrade_needed()
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def upgrade_needed_status(): msg = 'Needs manual upgrade, run the upgrade action' hookenv.status_set('blocked', msg)
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def install_snaps(): check_resources_for_upgrade_needed() channel = hookenv.config('channel') hookenv.status_set('maintenance', 'Installing kubectl snap') snap.install('kubectl', channel=channel, classic=True) hookenv.status_set('maintenance', 'Installing kubelet snap') snap.install('kubelet', channel=channel, classic=True) hookenv.status_set('maintenance', 'Installing kube-proxy snap') snap.install('kube-proxy', channel=channel, classic=True) set_state('kubernetes-worker.snaps.installed') set_state('kubernetes-worker.restart-needed') remove_state('kubernetes-worker.snaps.upgrade-needed') remove_state('kubernetes-worker.snaps.upgrade-specified')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def shutdown(): ''' When this unit is destroyed: - delete the current node - stop the worker services ''' try: if os.path.isfile(kubeconfig_path): kubectl('delete', 'node', gethostname().lower()) except CalledProcessError: hookenv.log('Failed to unregister node.') service_stop('snap.kubelet.daemon') service_stop('snap.kube-proxy.daemon')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]
def install_cni_plugins(): ''' Unpack the cni-plugins resource ''' charm_dir = os.getenv('CHARM_DIR') # Get the resource via resource_get try: resource_name = 'cni-{}'.format(arch()) archive = hookenv.resource_get(resource_name) except Exception: message = 'Error fetching the cni resource.' hookenv.log(message) hookenv.status_set('blocked', message) return if not archive: hookenv.log('Missing cni resource.') hookenv.status_set('blocked', 'Missing cni resource.') return # Handle null resource publication, we check if filesize < 1mb filesize = os.stat(archive).st_size if filesize < 1000000: hookenv.status_set('blocked', 'Incomplete cni resource.') return hookenv.status_set('maintenance', 'Unpacking cni resource.') unpack_path = '{}/files/cni'.format(charm_dir) os.makedirs(unpack_path, exist_ok=True) cmd = ['tar', 'xfvz', archive, '-C', unpack_path] hookenv.log(cmd) check_call(cmd) apps = [ {'name': 'loopback', 'path': '/opt/cni/bin'} ] for app in apps: unpacked = '{}/{}'.format(unpack_path, app['name']) app_path = os.path.join(app['path'], app['name']) install = ['install', '-v', '-D', unpacked, app_path] hookenv.log(install) check_call(install) # Used by the "registry" action. The action is run on a single worker, but # the registry pod can end up on any worker, so we need this directory on # all the workers. os.makedirs('/srv/registry', exist_ok=True) set_state('kubernetes-worker.cni-plugins.installed')
cncf/cross-cloud
[ 164, 57, 164, 40, 1494451575 ]