function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def test_create_token_response(self): bearer = BearerToken(self.mock_validator) headers, body, status_code = self.auth.create_token_response( self.request, bearer) token = json.loads(body) self.assertEqual(self.mock_validator.save_token.call_count, 1) self.assertIn('access_token', token) self.assertIn('token_type', token) self.assertIn('expires_in', token) self.assertIn('refresh_token', token) # ensure client_authentication_required() is properly called self.mock_validator.client_authentication_required.assert_called_once_with(self.request) # fail client authentication self.mock_validator.reset_mock() self.mock_validator.validate_user.return_value = True self.mock_validator.authenticate_client.return_value = False status_code = self.auth.create_token_response(self.request, bearer)[2] self.assertEqual(status_code, 401) self.assertEqual(self.mock_validator.save_token.call_count, 0) # mock client_authentication_required() returning False then fail self.mock_validator.reset_mock() self.mock_validator.client_authentication_required.return_value = False self.mock_validator.authenticate_client_id.return_value = False status_code = self.auth.create_token_response(self.request, bearer)[2] self.assertEqual(status_code, 401) self.assertEqual(self.mock_validator.save_token.call_count, 0)
idan/oauthlib
[ 2555, 477, 2555, 82, 1321744131 ]
def test_custom_auth_validators_unsupported(self): authval1, authval2 = mock.Mock(), mock.Mock() expected = ('ResourceOwnerPasswordCredentialsGrant does not ' 'support authorization validators. Use token ' 'validators instead.') with self.assertRaises(ValueError) as caught: ResourceOwnerPasswordCredentialsGrant(self.mock_validator, pre_auth=[authval1]) self.assertEqual(caught.exception.args[0], expected) with self.assertRaises(ValueError) as caught: ResourceOwnerPasswordCredentialsGrant(self.mock_validator, post_auth=[authval2]) self.assertEqual(caught.exception.args[0], expected) with self.assertRaises(AttributeError): self.auth.custom_validators.pre_auth.append(authval1) with self.assertRaises(AttributeError): self.auth.custom_validators.pre_auth.append(authval2)
idan/oauthlib
[ 2555, 477, 2555, 82, 1321744131 ]
def test_error_response(self): pass
idan/oauthlib
[ 2555, 477, 2555, 82, 1321744131 ]
def test_invalid_request_missing_params(self): del self.request.grant_type self.assertRaises(errors.InvalidRequestError, self.auth.validate_token_request, self.request)
idan/oauthlib
[ 2555, 477, 2555, 82, 1321744131 ]
def test_invalid_grant_type(self): self.request.grant_type = 'foo' self.assertRaises(errors.UnsupportedGrantTypeError, self.auth.validate_token_request, self.request)
idan/oauthlib
[ 2555, 477, 2555, 82, 1321744131 ]
def test_client_id_missing(self): del self.request.client.client_id self.assertRaises(NotImplementedError, self.auth.validate_token_request, self.request)
idan/oauthlib
[ 2555, 477, 2555, 82, 1321744131 ]
def __init__(self, app: ASGIApp, dispatch: DispatchFunction = None) -> None: self.app = app self.dispatch_func = self.dispatch if dispatch is None else dispatch
encode/starlette
[ 7965, 724, 7965, 41, 1529932581 ]
def __init__(self, **kwargs): """ Initializes a CTranslationMap instance Notes: You can specify all parameters while calling this methods. A special argument named `data` will enable you to load the object from a Python dictionary Examples: >>> ctranslationmap = NUCTranslationMap(id=u'xxxx-xxx-xxx-xxx', name=u'CTranslationMap') >>> ctranslationmap = NUCTranslationMap(data=my_dict) """ super(NUCTranslationMap, self).__init__() # Read/Write Attributes
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def mapping_type(self): """ Get mapping_type value. Notes: NAT for 1:1 mapping or PAT for *:1 mappings.
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def mapping_type(self, value): """ Set mapping_type value. Notes: NAT for 1:1 mapping or PAT for *:1 mappings.
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def last_updated_by(self): """ Get last_updated_by value. Notes: ID of the user who last updated the object.
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def last_updated_by(self, value): """ Set last_updated_by value. Notes: ID of the user who last updated the object.
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def entity_scope(self): """ Get entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def entity_scope(self, value): """ Set entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def associated_domain_id(self): """ Get associated_domain_id value. Notes: Domain associated to this address mapping.
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def associated_domain_id(self, value): """ Set associated_domain_id value. Notes: Domain associated to this address mapping.
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def customer_alias_ip(self): """ Get customer_alias_ip value. Notes: Customer public IP in the provider domain.
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def customer_alias_ip(self, value): """ Set customer_alias_ip value. Notes: Customer public IP in the provider domain.
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def customer_ip(self): """ Get customer_ip value. Notes: Customer private IP in the customer domain.
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def customer_ip(self, value): """ Set customer_ip value. Notes: Customer private IP in the customer domain.
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def external_id(self): """ Get external_id value. Notes: External object ID. Used for integration with third party systems
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def external_id(self, value): """ Set external_id value. Notes: External object ID. Used for integration with third party systems
nuagenetworks/vspk-python
[ 19, 18, 19, 7, 1457133058 ]
def add_overlaplcwp(self, parameters, timeslots=None, matrixsize=None, blockid=0, key=("ov", "ovkin", "ovpot")): r"""Add storage for various overlap matrices. We can store one matrix type per key. ========= ====== Key name Matrix ========= ====== ``ov`` :math:`\langle\Upsilon | \Upsilon\rangle` ``ovkin`` :math:`\langle\Upsilon | T | \Upsilon\rangle` ``ovpot`` :math:`\langle\Upsilon | V(\underline{x}) | \Upsilon\rangle` ========= ====== Note that 'strange' errors occur if we later try to load or save matrices for a key we did not initialise with this function. :param parameters: A :py:class:`ParameterProvider` instance. It can be empty and is not used at the moment. :param timeslots: The number of time slots we need. Can be set to ``None`` to get automatically growing datasets. :param matrixsize: The (maximal) size of each of the overlap matrices. If specified this remains fixed for all timeslots. Can be set to ``None`` (default) to get automatically growing datasets. :type matrixsize: Pair of integers or ``None``. :param blockid: The ID of the data block to operate on. :param key: Specify which overlap matrices to save. All are independent. :type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``. Default is ``("ov", "ovkin", "ovpot")``. """ valid_keys = ("ov", "ovkin", "ovpot") # Create the dataset with appropriate parameters grp_ov = self._srf[self._prefixb + str(blockid)].create_group("overlaplcwp") if timeslots is None: T = 0 Ts = None csTs = 128 else: T = timeslots Ts = timeslots csTs = min(128, Ts) if matrixsize is None: Jr = 0 Jc = 0 Jrs = None Jcs = None csJrs = 128 csJcs = 128 else: Jr, Jc = matrixsize Jrs, Jcs = matrixsize csJrs = min(128, Jrs) csJcs = min(128, Jcs) for k in key: if k not in valid_keys: raise ValueError("Unknown key value " + str(k)) name = k[2:] daset_tg = grp_ov.create_dataset("timegrid" + name, (T,), dtype=np.integer, chunks=True, maxshape=(Ts,), fillvalue=-1) grp_ov.create_dataset("shape" + name, (T, 2), dtype=np.integer, chunks=(csTs, 2), maxshape=(Ts, 2)) grp_ov.create_dataset("overlap" + name, (T, Jr, Jc), dtype=np.complexfloating, chunks=(1, csJrs, csJcs), maxshape=(Ts, Jrs, Jcs)) daset_tg.attrs["pointer"] = 0
WaveBlocks/WaveBlocksND
[ 6, 8, 6, 34, 1332703340 ]
def has_overlaplcwp(self, blockid=0, key=("ov", "ovkin", "ovpot")): r"""Ask if the specified data block has the desired data tensor. :param blockid: The ID of the data block to operate on. :param key: Specify which overlap matrices to save. All are independent. :type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``. Default is ``("ov", "ovkin", "ovpot")``. """ r = True r &= ("overlaplcwp" in self._srf[self._prefixb + str(blockid)].keys()) if r and "ov" in key: r &= ("overlap" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys()) if r and "ovpot" in key: r &= ("overlappot" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys()) if r and "ovkin" in key: r &= ("overlapkin" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys()) return r
WaveBlocks/WaveBlocksND
[ 6, 8, 6, 34, 1332703340 ]
def load_overlaplcwp_timegrid(self, blockid=0, key=("ov", "ovkin", "ovpot")): r"""Load the timegrid corresponding to the overlap matrices specified. :param blockid: The ID of the data block to operate on. :param key: Specify which overlap matrices to load. All are independent. :type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``. Default is ``("ov", "ovkin", "ovpot")``. :return: A list of :py:class:`ndarray` each having one column. """ tg = [] for item in key: if item == "ov": pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegrid" tg.append(self._srf[pathtg][:]) elif item == "ovkin": pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridkin" tg.append(self._srf[pathtg][:]) elif item == "ovpot": pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridpot" tg.append(self._srf[pathtg][:]) else: raise ValueError("Unknown key value {}".format(item)) if len(tg) == 1: print(tg) return tg[0] else: return tuple(tg)
WaveBlocks/WaveBlocksND
[ 6, 8, 6, 34, 1332703340 ]
def __init__(self): from tardis.tardis_portal.models import Dataset_File for f in settings.POST_SAVE_FILTERS: cls = f[0] args = [] kw = {} if len(f) == 2: args = f[1] if len(f) == 3: kw = f[2] hook = self._safe_import(cls, args, kw) # XXX seems to requre a strong ref else it won't fire, # could be because some hooks are classes not functions. post_save.connect(hook, sender=Dataset_File, weak=False) logger.debug('Initialised postsave hook %s' % post_save.receivers) # disable middleware raise MiddlewareNotUsed()
aaryani/CoreTardis
[ 3, 1, 3, 1, 1310021778 ]
def normalize_spectrum(wave, flux, norm_range, dwave, p=(1E-6, 1E-6), q=0.5, ivar=None, eps=1e-10, rsv_frac=1.): """ A double smooth normalization of a spectrum Converted from Chao Liu's normSpectrum.m Updated by Bo Zhang Parameters ---------- wave: ndarray (n_pix, ) wavelegnth array flux: ndarray (n_pix, ) flux array norm_range: tuple a tuple consisting (wave_start, wave_stop) dwave: float binning width p: tuple of 2 ps smoothing parameter between 0 and 1: 0 -> LS-straight line 1 -> cubic spline interpolant q: float in range of [0, 100] percentile, between 0 and 1 ivar: ndarray (n_pix, ) | None ivar array, default is None eps: float the ivar threshold rsv_frac: float the fraction of pixels reserved in terms of std. default is 3. Returns ------- flux_norm: ndarray normalized flux flux_cont: ndarray continuum flux Example ------- >>> flux_norm, flux_cont = normalize_spectrum( >>> wave, flux, (4000., 8000.), 100., p=(1E-8, 1E-7), q=0.5, >>> rsv_frac=2.0) """ if ivar is not None: # ivar is set ivar = np.where(np.logical_or(wave < norm_range[0], wave > norm_range[1]), 0, ivar) ivar = np.where(ivar <= eps, eps, ivar) # mask = ivar <= eps var = 1. / ivar else: # default config is even weight var = np.ones_like(flux) # wave = wave[~mask] # flux = flux[~mask] # check q region assert 0. < q < 1. # n_iter = len(p) n_bin = np.int(np.fix(np.diff(norm_range) / dwave) + 1) wave1 = norm_range[0] # SMOOTH 1 # print(wave.shape, flux.shape, var.shape) if ivar is not None: ind_good_init = 1. * (ivar > 0.) * (flux > 0.) else: ind_good_init = 1. * (flux > 0.) ind_good_init = ind_good_init.astype(np.bool) # print("@Cham: sum(ind_good_init)", np.sum(ind_good_init)) flux_smoothed1 = SmoothSpline(wave[ind_good_init], flux[ind_good_init], p=p[0], var=var[ind_good_init])(wave) dflux = flux - flux_smoothed1 # collecting continuum pixels --> ITERATION 1 ind_good = np.zeros(wave.shape, dtype=np.bool) for i_bin in range(n_bin): ind_bin = np.logical_and(wave > wave1 + (i_bin - 0.5) * dwave, wave <= wave1 + (i_bin + 0.5) * dwave) if np.sum(ind_bin > 0): # median & sigma bin_median = np.median(dflux[ind_bin]) bin_std = np.median(np.abs(dflux - bin_median)) # within 1 sigma with q-percentile ind_good_ = ind_bin * ( np.abs(dflux - np.nanpercentile(dflux[ind_bin], q * 100.)) < ( rsv_frac * bin_std)) ind_good = np.logical_or(ind_good, ind_good_) ind_good = np.logical_and(ind_good, ind_good_init) # assert there is continuum pixels try: assert np.sum(ind_good) > 0 except AssertionError: Warning("@Keenan.normalize_spectrum(): unable to find continuum! ") ind_good = np.ones(wave.shape, dtype=np.bool) # SMOOTH 2 # continuum flux flux_smoothed2 = SmoothSpline( wave[ind_good], flux[ind_good], p=p[1], var=var[ind_good])(wave) # normalized flux flux_norm = flux / flux_smoothed2 return flux_norm, flux_smoothed2
hypergravity/hrs
[ 5, 1, 5, 2, 1479823121 ]
def get_stable_pixels(pixel_disp, wave_arm=100, frac=0.20): """ Parameters ---------- pixel_disp: np.ndarray dispersion array wave_arm: int the arm length in terms of pixels frac: float the reserved fraction, between 0.00 and 1.00 Returns ------- ind_stable """ ind_stable = np.zeros_like(pixel_disp, dtype=np.bool) for i in range(len(ind_stable)): edge_l = np.max([i - wave_arm, 0]) edge_r = np.min([i + wave_arm, len(pixel_disp)]) if pixel_disp[i] <= \ np.percentile(pixel_disp[edge_l:edge_r], frac * 100.): ind_stable[i] = True return ind_stable
hypergravity/hrs
[ 5, 1, 5, 2, 1479823121 ]
def normalize_spectra(wave_flux_tuple_list, norm_range, dwave, p=(1E-6, 1E-6), q=50, n_jobs=1, verbose=False): """ normalize multiple spectra using the same configuration Parameters ---------- wave_flux_tuple_list: list[n_obs] a list of (wave, flux) tuple norm_range: tuple a tuple consisting (wave_start, wave_stop) dwave: float binning width p: tuple of 2 ps smoothing parameter between 0 and 1: 0 -> LS-straight line 1 -> cubic spline interpolant q: float in range of [0, 100] percentile, between 0 and 1 n_jobs: int number of processes launched by joblib verbose: int / bool verbose level Returns ------- flux_norm: ndarray normalized flux """ pass
hypergravity/hrs
[ 5, 1, 5, 2, 1479823121 ]
def setUp(self): (self.community, self.members, self.chairmens) = create_sample_community()
hasadna/OpenCommunity
[ 8, 16, 8, 18, 1372881176 ]
def test_send_invitation(self): i = Invitation.objects.create(community=self.community, created_by=self.members[0], email="xxx@xyz.com") i.send(self.members[0]) self.assertEqual(len(mail.outbox), 1) self.assertIn(self.community.name, mail.outbox[0].subject) self.assertIn(i.get_absolute_url(), mail.outbox[0].body)
hasadna/OpenCommunity
[ 8, 16, 8, 18, 1372881176 ]
def setUp(self): (self.community, self.members, self.chairmen) = create_sample_community()
hasadna/OpenCommunity
[ 8, 16, 8, 18, 1372881176 ]
def post_invite(self, data=None): if not data: data = {"email": "sample@email.com", "default_group_name": DefaultGroups.MEMBER, "message": "the message"} return self.client.post(reverse("members" , kwargs={"community_id": self.community.id}), data)
hasadna/OpenCommunity
[ 8, 16, 8, 18, 1372881176 ]
def test_view(self): self.login_chairmen() response = self.post_invite({"email": "sample@email.com", "default_group_name": DefaultGroups.MEMBER, "message": "the message"}) self.assertEqual(Invitation.objects.all().count(), 1) invitation = Invitation.objects.all()[0] self.assertEqual(invitation.community, self.community) self.assertEqual(invitation.created_by, self.chairmen[0]) self.assertEqual(invitation.message, "the message") self.assertEqual(len(mail.outbox), 1) self.assertEqual(response.status_code, 200) #the response is an ajax response the show the user as added #to the list of members self.assertIn("delete-invitation", response.content) self.assertIn("sample@email.com", response.content)
hasadna/OpenCommunity
[ 8, 16, 8, 18, 1372881176 ]
def test_bad_email(self): self.login_chairmen() response = self.post_invite({"email": "not a real email", "default_group_name": DefaultGroups.MEMBER, "message": "the message"}) self.assertEqual(response.status_code, 400) self.assertEqual(_("Form error. Please supply a valid email."), response.content)
hasadna/OpenCommunity
[ 8, 16, 8, 18, 1372881176 ]
def calc_auc(predictions): y_true =[] y_score=[] for line in predictions: values= line.split(" ") y_true.append(float(values[1])) y_score.append(float(values[0])) auc = sklearn.metrics.roc_auc_score(y_true,y_score) return auc
gnina/scripts
[ 17, 80, 17, 1, 1456588807 ]
def dispatch(self, request, *args, **kwargs): requestuser_devilryrole = request.cradmin_instance.get_devilryrole_for_requestuser() if requestuser_devilryrole != PermissionGroup.GROUPTYPE_DEPARTMENTADMIN: raise Http404() return super(AbstractTypeInUsersView, self).dispatch(request=request, *args, **kwargs)
devilry/devilry-django
[ 48, 23, 48, 82, 1264339874 ]
def get_backlink_label(self): raise NotImplementedError()
devilry/devilry-django
[ 48, 23, 48, 82, 1264339874 ]
def split_users_blob(cls, users_blob): """ Split the given string of users by ``,`` and whitespace. Returns a set. """ users_blob_split = cls.users_blob_split_pattern.split(users_blob) if len(users_blob_split) == 0: return [] if users_blob_split[0] == '': del users_blob_split[0] if len(users_blob_split) > 0 and users_blob_split[-1] == '': del users_blob_split[-1] return set(users_blob_split)
devilry/devilry-django
[ 48, 23, 48, 82, 1264339874 ]
def __get_users_blob_placeholder(self): if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND: return gettext_lazy('jane@example.com\njohn@example.com') else: return gettext_lazy('jane\njohn')
devilry/devilry-django
[ 48, 23, 48, 82, 1264339874 ]
def __validate_users_blob_emails(self, emails): invalid_emails = [] for email in emails: try: validate_email(email) except ValidationError: invalid_emails.append(email) if invalid_emails: self.add_error( 'users_blob', gettext_lazy('Invalid email addresses: %(emails)s') % { 'emails': ', '.join(sorted(invalid_emails)) } )
devilry/devilry-django
[ 48, 23, 48, 82, 1264339874 ]
def clean(self): cleaned_data = super(UserImportForm, self).clean() users_blob = cleaned_data.get('users_blob', None) if users_blob: users = AbstractTypeInUsersView.split_users_blob(users_blob) if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND: self.__validate_users_blob_emails(emails=users) else: self.__validate_users_blob_usernames(usernames=users) self.cleaned_users_set = users
devilry/devilry-django
[ 48, 23, 48, 82, 1264339874 ]
def get_field_layout(self): return [ layout.Div( layout.Field('users_blob', placeholder=self.__get_users_blob_placeholder()), css_class='cradmin-globalfields cradmin-legacy-formfield-label-sr-only') ]
devilry/devilry-django
[ 48, 23, 48, 82, 1264339874 ]
def get_success_url(self): return self.request.cradmin_app.reverse_appindexurl()
devilry/devilry-django
[ 48, 23, 48, 82, 1264339874 ]
def import_users_from_usernames(self, usernames): raise NotImplementedError()
devilry/devilry-django
[ 48, 23, 48, 82, 1264339874 ]
def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "$Format:%d$" git_full = "$Format:%H$" git_date = "$Format:%ci$" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords
yt-project/unyt
[ 308, 44, 308, 33, 1522273616 ]
def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "" cfg.parentdir_prefix = "unyt-" cfg.versionfile_source = "unyt/_version.py" cfg.verbose = False return cfg
yt-project/unyt
[ 308, 44, 308, 33, 1522273616 ]
def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate
yt-project/unyt
[ 308, 44, 308, 33, 1522273616 ]
def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
yt-project/unyt
[ 308, 44, 308, 33, 1522273616 ]
def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords
yt-project/unyt
[ 308, 44, 308, 33, 1522273616 ]
def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None}
yt-project/unyt
[ 308, 44, 308, 33, 1522273616 ]
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces
yt-project/unyt
[ 308, 44, 308, 33, 1522273616 ]
def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered
yt-project/unyt
[ 308, 44, 308, 33, 1522273616 ]
def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered
yt-project/unyt
[ 308, 44, 308, 33, 1522273616 ]
def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered
yt-project/unyt
[ 308, 44, 308, 33, 1522273616 ]
def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")}
yt-project/unyt
[ 308, 44, 308, 33, 1522273616 ]
def _GetProcessStartTime(pid): p = psutil.Process(pid) if inspect.ismethod(p.create_time): return p.create_time() else: # Process.create_time is a property in old versions of psutil. return p.create_time
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def _LogMapFailureDiagnostics(device): _DumpHostLog() # The device forwarder daemon logs to the logcat, so print the end of that. try: logger.info('Last 50 lines of logcat:') for logcat_line in device.adb.Logcat(dump=True)[-50:]: logger.info(' %s', logcat_line) except (device_errors.CommandFailedError, device_errors.DeviceUnreachableError): # Grabbing the device forwarder log is also best-effort. Ignore all errors. logger.warning('Failed to get the contents of the logcat.') # Log alive device forwarders. try: ps_out = device.RunShellCommand(['ps'], check_return=True) logger.info('Currently running device_forwarders:') for line in ps_out: if 'device_forwarder' in line: logger.info(' %s', line) except (device_errors.CommandFailedError, device_errors.DeviceUnreachableError): logger.warning('Failed to list currently running device_forwarder ' 'instances.')
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def __init__(self, path): self._fd = -1 self._path = path
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def __exit__(self, _exception_type, _exception_value, traceback): fcntl.flock(self._fd, fcntl.LOCK_UN) os.close(self._fd)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def __init__(self, message): super(HostForwarderError, self).__init__(message)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def Map(port_pairs, device, tool=None): """Runs the forwarder. Args: port_pairs: A list of tuples (device_port, host_port) to forward. Note that you can specify 0 as a device_port, in which case a port will by dynamically assigned on the device. You can get the number of the assigned port using the DevicePortForHostPort method. device: A DeviceUtils instance. tool: Tool class to use to get wrapper, if necessary, for executing the forwarder (see valgrind_tools.py). Raises: Exception on failure to forward the port. """ if not tool: tool = base_tool.BaseTool() with _FileLock(Forwarder._LOCK_PATH): instance = Forwarder._GetInstanceLocked(tool) instance._InitDeviceLocked(device, tool) device_serial = str(device) map_arg_lists = [[ '--adb=' + adb_wrapper.AdbWrapper.GetAdbPath(), '--serial-id=' + device_serial, '--map', str(device_port), str(host_port) ] for device_port, host_port in port_pairs] logger.info('Forwarding using commands: %s', map_arg_lists) for map_arg_list in map_arg_lists: try: map_cmd = [instance._host_forwarder_path] + map_arg_list (exit_code, output) = cmd_helper.GetCmdStatusAndOutputWithTimeout( map_cmd, Forwarder._TIMEOUT) except cmd_helper.TimeoutError as e: raise HostForwarderError( '`%s` timed out:\n%s' % (' '.join(map_cmd), e.output)) except OSError as e: if e.errno == 2: raise HostForwarderError('Unable to start host forwarder. ' 'Make sure you have built host_forwarder.') else: raise if exit_code != 0: try: instance._KillDeviceLocked(device, tool) except (device_errors.CommandFailedError, device_errors.DeviceUnreachableError): # We don't want the failure to kill the device forwarder to # supersede the original failure to map. logger.warning( 'Failed to kill the device forwarder after map failure: %s', str(e)) _LogMapFailureDiagnostics(device) formatted_output = ('\n'.join(output) if isinstance(output, list) else output) raise HostForwarderError( '`%s` exited with %d:\n%s' % (' '.join(map_cmd), exit_code, formatted_output)) tokens = output.split(':') if len(tokens) != 2: raise HostForwarderError('Unexpected host forwarder output "%s", ' 'expected "device_port:host_port"' % output) device_port = int(tokens[0]) host_port = int(tokens[1]) serial_with_port = (device_serial, device_port) instance._device_to_host_port_map[serial_with_port] = host_port instance._host_to_device_port_map[host_port] = serial_with_port logger.info('Forwarding device port: %d to host port: %d.', device_port, host_port)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def UnmapDevicePort(device_port, device): """Unmaps a previously forwarded device port. Args: device: A DeviceUtils instance. device_port: A previously forwarded port (through Map()). """ with _FileLock(Forwarder._LOCK_PATH): Forwarder._UnmapDevicePortLocked(device_port, device)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def UnmapAllDevicePorts(device): """Unmaps all the previously forwarded ports for the provided device. Args: device: A DeviceUtils instance. port_pairs: A list of tuples (device_port, host_port) to unmap. """ with _FileLock(Forwarder._LOCK_PATH): instance = Forwarder._GetInstanceLocked(None) unmap_all_cmd = [ instance._host_forwarder_path, '--adb=%s' % adb_wrapper.AdbWrapper.GetAdbPath(), '--serial-id=%s' % device.serial, '--unmap-all' ] try: exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout( unmap_all_cmd, Forwarder._TIMEOUT) except cmd_helper.TimeoutError as e: raise HostForwarderError( '`%s` timed out:\n%s' % (' '.join(unmap_all_cmd), e.output)) if exit_code != 0: error_msg = [ '`%s` exited with %d' % (' '.join(unmap_all_cmd), exit_code) ] if isinstance(output, list): error_msg += output else: error_msg += [output] raise HostForwarderError('\n'.join(error_msg)) # Clean out any entries from the device & host map. device_map = instance._device_to_host_port_map host_map = instance._host_to_device_port_map for device_serial_and_port, host_port in device_map.items(): device_serial = device_serial_and_port[0] if device_serial == device.serial: del device_map[device_serial_and_port] del host_map[host_port] # Kill the device forwarder. tool = base_tool.BaseTool() instance._KillDeviceLocked(device, tool)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def DevicePortForHostPort(host_port): """Returns the device port that corresponds to a given host port.""" with _FileLock(Forwarder._LOCK_PATH): serial_and_port = Forwarder._GetInstanceLocked( None)._host_to_device_port_map.get(host_port) return serial_and_port[1] if serial_and_port else None
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def RemoveHostLog(): if os.path.exists(Forwarder._HOST_FORWARDER_LOG): os.unlink(Forwarder._HOST_FORWARDER_LOG)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def GetHostLog(): if not os.path.exists(Forwarder._HOST_FORWARDER_LOG): return '' with file(Forwarder._HOST_FORWARDER_LOG, 'r') as f: return f.read()
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def _GetInstanceLocked(tool): """Returns the singleton instance. Note that the global lock must be acquired before calling this method. Args: tool: Tool class to use to get wrapper, if necessary, for executing the forwarder (see valgrind_tools.py). """ if not Forwarder._instance: Forwarder._instance = Forwarder(tool) return Forwarder._instance
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def _UnmapDevicePortLocked(device_port, device): """Internal method used by UnmapDevicePort(). Note that the global lock must be acquired before calling this method. """ instance = Forwarder._GetInstanceLocked(None) serial = str(device) serial_with_port = (serial, device_port) if serial_with_port not in instance._device_to_host_port_map: logger.error('Trying to unmap non-forwarded port %d', device_port) return host_port = instance._device_to_host_port_map[serial_with_port] del instance._device_to_host_port_map[serial_with_port] del instance._host_to_device_port_map[host_port] unmap_cmd = [ instance._host_forwarder_path, '--adb=%s' % adb_wrapper.AdbWrapper.GetAdbPath(), '--serial-id=%s' % serial, '--unmap', str(device_port) ] try: (exit_code, output) = cmd_helper.GetCmdStatusAndOutputWithTimeout( unmap_cmd, Forwarder._TIMEOUT) except cmd_helper.TimeoutError as e: raise HostForwarderError( '`%s` timed out:\n%s' % (' '.join(unmap_cmd), e.output)) if exit_code != 0: logger.error('`%s` exited with %d:\n%s', ' '.join(unmap_cmd), exit_code, '\n'.join(output) if isinstance(output, list) else output)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def _GetPidForLock(): """Returns the PID used for host_forwarder initialization. The PID of the "sharder" is used to handle multiprocessing. The "sharder" is the initial process that forks that is the parent process. """ return os.getpgrp()
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def _InitDeviceLocked(self, device, tool): """Initializes the device_forwarder daemon for a specific device (once). Note that the global lock must be acquired before calling this method. This method kills any existing device_forwarder daemon on the device that could be stale, pushes the latest version of the daemon (to the device) and starts it. Args: device: A DeviceUtils instance. tool: Tool class to use to get wrapper, if necessary, for executing the forwarder (see valgrind_tools.py). """ device_serial = str(device) if device_serial in self._initialized_devices: return try: self._KillDeviceLocked(device, tool) except device_errors.CommandFailedError: logger.warning('Failed to kill device forwarder. Rebooting.') device.Reboot() forwarder_device_path_on_host = devil_env.config.FetchPath( 'forwarder_device', device=device) forwarder_device_path_on_device = ( Forwarder._DEVICE_FORWARDER_FOLDER if os.path.isdir(forwarder_device_path_on_host) else Forwarder._DEVICE_FORWARDER_PATH) device.PushChangedFiles([(forwarder_device_path_on_host, forwarder_device_path_on_device)]) cmd = [Forwarder._DEVICE_FORWARDER_PATH] wrapper = tool.GetUtilWrapper() if wrapper: cmd.insert(0, wrapper) device.RunShellCommand( cmd, env={'LD_LIBRARY_PATH': Forwarder._DEVICE_FORWARDER_FOLDER}, check_return=True) self._initialized_devices.add(device_serial)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def KillHost(): """Kills the forwarder process running on the host.""" with _FileLock(Forwarder._LOCK_PATH): Forwarder._GetInstanceLocked(None)._KillHostLocked()
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def KillDevice(device, tool=None): """Kills the forwarder process running on the device. Args: device: Instance of DeviceUtils for talking to the device. tool: Wrapper tool (e.g. valgrind) that can be used to execute the device forwarder (see valgrind_tools.py). """ with _FileLock(Forwarder._LOCK_PATH): Forwarder._GetInstanceLocked(None)._KillDeviceLocked( device, tool or base_tool.BaseTool())
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def __init__(self, opts, numTests, progressBar=None): self.opts = opts self.numTests = numTests self.current = None self.progressBar = progressBar self.completed = 0
JianpingZeng/xcc
[ 36, 3, 36, 2, 1484539704 ]
def update(self, test): self.completed += 1 if self.opts.incremental: update_incremental_cache(test) if self.progressBar: self.progressBar.update(float(self.completed)/self.numTests, test.getFullName()) shouldShow = test.result.code.isFailure or \ self.opts.showAllOutput or \ (not self.opts.quiet and not self.opts.succinct) if not shouldShow: return if self.progressBar: self.progressBar.clear() # Show the test result line. test_name = test.getFullName() print('%s: %s (%d of %d)' % (test.result.code.name, test_name, self.completed, self.numTests)) # Show the test failure output, if requested. if (test.result.code.isFailure and self.opts.showOutput) or \ self.opts.showAllOutput: if test.result.code.isFailure: print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(), '*'*20)) print(test.result.output) print("*" * 20) # Report test metrics, if present. if test.result.metrics: print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(), '*'*10)) items = sorted(test.result.metrics.items()) for metric_name, value in items: print('%s: %s ' % (metric_name, value.format())) print("*" * 10) # Ensure the output is flushed. sys.stdout.flush()
JianpingZeng/xcc
[ 36, 3, 36, 2, 1484539704 ]
def update_incremental_cache(test): if not test.result.code.isFailure: return fname = test.getFilePath() os.utime(fname, None)
JianpingZeng/xcc
[ 36, 3, 36, 2, 1484539704 ]
def sortIndex(test): fname = test.getFilePath() try: return -os.path.getmtime(fname) except: return 0
JianpingZeng/xcc
[ 36, 3, 36, 2, 1484539704 ]
def main(builtinParameters = {}): # Use processes by default on Unix platforms. isWindows = platform.system() == 'Windows' useProcessesIsDefault = not isWindows global options from optparse import OptionParser, OptionGroup parser = OptionParser("usage: %prog [options] {file-or-path}") parser.add_option("", "--version", dest="show_version", help="Show version and exit", action="store_true", default=False) parser.add_option("-j", "--threads", dest="numThreads", metavar="N", help="Number of testing threads", type=int, action="store", default=None) parser.add_option("", "--config-prefix", dest="configPrefix", metavar="NAME", help="Prefix for 'lit' config files", action="store", default=None) parser.add_option("-D", "--param", dest="userParameters", metavar="NAME=VAL", help="Add 'NAME' = 'VAL' to the user defined parameters", type=str, action="append", default=[]) group = OptionGroup(parser, "Output Format") # FIXME: I find these names very confusing, although I like the # functionality. group.add_option("-q", "--quiet", dest="quiet", help="Suppress no error output", action="store_true", default=False) group.add_option("-s", "--succinct", dest="succinct", help="Reduce amount of output", action="store_true", default=False) group.add_option("-v", "--verbose", dest="showOutput", help="Show test output for failures", action="store_true", default=False) group.add_option("-a", "--show-all", dest="showAllOutput", help="Display all commandlines and output", action="store_true", default=False) group.add_option("-o", "--output", dest="output_path", help="Write test results to the provided path", action="store", type=str, metavar="PATH") group.add_option("", "--no-progress-bar", dest="useProgressBar", help="Do not use curses based progress bar", action="store_false", default=True) group.add_option("", "--show-unsupported", dest="show_unsupported", help="Show unsupported tests", action="store_true", default=False) group.add_option("", "--show-xfail", dest="show_xfail", help="Show tests that were expected to fail", action="store_true", default=False) parser.add_option_group(group) group = OptionGroup(parser, "Test Execution") group.add_option("", "--path", dest="path", help="Additional paths to add to testing environment", action="append", type=str, default=[]) group.add_option("", "--vg", dest="useValgrind", help="Run tests under valgrind", action="store_true", default=False) group.add_option("", "--vg-leak", dest="valgrindLeakCheck", help="Check for memory leaks under valgrind", action="store_true", default=False) group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG", help="Specify an extra argument for valgrind", type=str, action="append", default=[]) group.add_option("", "--time-tests", dest="timeTests", help="Track elapsed wall time for each test", action="store_true", default=False) group.add_option("", "--no-execute", dest="noExecute", help="Don't execute any tests (assume PASS)", action="store_true", default=False) group.add_option("", "--xunit-xml-output", dest="xunit_output_file", help=("Write XUnit-compatible XML test reports to the" " specified file"), default=None) group.add_option("", "--timeout", dest="maxIndividualTestTime", help="Maximum time to spend running a single test (in seconds)." "0 means no time limit. [Default: 0]", type=int, default=None) parser.add_option_group(group) group = OptionGroup(parser, "Test Selection") group.add_option("", "--max-tests", dest="maxTests", metavar="N", help="Maximum number of tests to run", action="store", type=int, default=None) group.add_option("", "--max-time", dest="maxTime", metavar="N", help="Maximum time to spend testing (in seconds)", action="store", type=float, default=None) group.add_option("", "--shuffle", dest="shuffle", help="Run tests in random order", action="store_true", default=False) group.add_option("-i", "--incremental", dest="incremental", help="Run modified and failing tests first (updates " "mtimes)", action="store_true", default=False) group.add_option("", "--filter", dest="filter", metavar="REGEX", help=("Only run tests with paths matching the given " "regular expression"), action="store", default=None) parser.add_option_group(group) group = OptionGroup(parser, "Debug and Experimental Options") group.add_option("", "--debug", dest="debug", help="Enable debugging (for 'lit' development)", action="store_true", default=False) group.add_option("", "--show-suites", dest="showSuites", help="Show discovered test suites", action="store_true", default=False) group.add_option("", "--show-tests", dest="showTests", help="Show all discovered tests", action="store_true", default=False) group.add_option("", "--use-processes", dest="useProcesses", help="Run tests in parallel with processes (not threads)", action="store_true", default=useProcessesIsDefault) group.add_option("", "--use-threads", dest="useProcesses", help="Run tests in parallel with threads (not processes)", action="store_false", default=useProcessesIsDefault) parser.add_option_group(group) (opts, args) = parser.parse_args() if opts.show_version: print("lit %s" % (lit.__version__,)) return if not args: parser.error('No inputs specified') if opts.numThreads is None:
JianpingZeng/xcc
[ 36, 3, 36, 2, 1484539704 ]
def home(request): """ Displays a list of messages to be translated """ def fix_nls(in_, out_): """Fixes submitted translations by filtering carriage returns and pairing newlines at the begging and end of the translated string with the original """ if 0 == len(in_) or 0 == len(out_): return out_ if "\r" in out_ and "\r" not in in_: out_ = out_.replace("\r", '') if "\n" == in_[0] and "\n" != out_[0]: out_ = "\n" + out_ elif "\n" != in_[0] and "\n" == out_[0]: out_ = out_.lstrip() if "\n" == in_[-1] and "\n" != out_[-1]: out_ = out_ + "\n" elif "\n" != in_[-1] and "\n" == out_[-1]: out_ = out_.rstrip() return out_ storage = get_storage(request) version = rosetta.get_version(True) if storage.has('rosetta_i18n_fn'): rosetta_i18n_fn = storage.get('rosetta_i18n_fn') rosetta_i18n_app = get_app_name(rosetta_i18n_fn) rosetta_i18n_lang_code = storage.get('rosetta_i18n_lang_code') rosetta_i18n_lang_bidi = rosetta_i18n_lang_code.split('-')[0] in settings.LANGUAGES_BIDI rosetta_i18n_write = storage.get('rosetta_i18n_write', True) if rosetta_i18n_write: rosetta_i18n_pofile = pofile(rosetta_i18n_fn, wrapwidth=rosetta_settings.POFILE_WRAP_WIDTH) for entry in rosetta_i18n_pofile: entry.md5hash = hashlib.md5( entry.msgid.encode("utf8") + entry.msgstr.encode("utf8") + (entry.msgctxt and entry.msgctxt.encode("utf8") or "") ).hexdigest() else: rosetta_i18n_pofile = storage.get('rosetta_i18n_pofile') if 'filter' in request.GET: if request.GET.get('filter') in ('untranslated', 'translated', 'fuzzy', 'all'): filter_ = request.GET.get('filter') storage.set('rosetta_i18n_filter', filter_) return HttpResponseRedirect(reverse('rosetta-home')) rosetta_i18n_filter = storage.get('rosetta_i18n_filter', 'all') if '_next' in request.POST: rx = re.compile(r'^m_([0-9a-f]+)') rx_plural = re.compile(r'^m_([0-9a-f]+)_([0-9]+)') file_change = False for key, value in request.POST.items(): md5hash = None plural_id = None if rx_plural.match(key): md5hash = str(rx_plural.match(key).groups()[0]) # polib parses .po files into unicode strings, but # doesn't bother to convert plural indexes to int, # so we need unicode here. plural_id = unicode(rx_plural.match(key).groups()[1]) elif rx.match(key): md5hash = str(rx.match(key).groups()[0]) if md5hash is not None: entry = rosetta_i18n_pofile.find(md5hash, 'md5hash') # If someone did a makemessage, some entries might # have been removed, so we need to check. if entry: old_msgstr = entry.msgstr if plural_id is not None: #plural_string = fix_nls(entry.msgstr_plural[plural_id], value) plural_string = fix_nls(entry.msgid_plural, value) entry.msgstr_plural[plural_id] = plural_string else: entry.msgstr = fix_nls(entry.msgid, value) is_fuzzy = bool(request.POST.get('f_%s' % md5hash, False)) old_fuzzy = 'fuzzy' in entry.flags if old_fuzzy and not is_fuzzy: entry.flags.remove('fuzzy') elif not old_fuzzy and is_fuzzy: entry.flags.append('fuzzy') file_change = True if old_msgstr != value or old_fuzzy != is_fuzzy: entry_changed.send(sender=entry, user=request.user, old_msgstr=old_msgstr, old_fuzzy=old_fuzzy, pofile=rosetta_i18n_fn, language_code=rosetta_i18n_lang_code, ) else: storage.set('rosetta_last_save_error', True) if file_change and rosetta_i18n_write: try: # Provide defaults in case authorization is not required. request.user.first_name = getattr(request.user, 'first_name', 'Anonymous') request.user.last_name = getattr(request.user, 'last_name', 'User') request.user.email = getattr(request.user, 'email', 'anonymous@user.tld') rosetta_i18n_pofile.metadata['Last-Translator'] = unicodedata.normalize('NFKD', u"%s %s <%s>" % (request.user.first_name, request.user.last_name, request.user.email)).encode('ascii', 'ignore') rosetta_i18n_pofile.metadata['X-Translated-Using'] = u"django-rosetta %s" % rosetta.get_version(False) rosetta_i18n_pofile.metadata['PO-Revision-Date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M%z') except UnicodeDecodeError: pass try: rosetta_i18n_pofile.save() po_filepath, ext = os.path.splitext(rosetta_i18n_fn) save_as_mo_filepath = po_filepath + '.mo' rosetta_i18n_pofile.save_as_mofile(save_as_mo_filepath) post_save.send(sender=None, language_code=rosetta_i18n_lang_code, request=request) # Try auto-reloading via the WSGI daemon mode reload mechanism if rosetta_settings.WSGI_AUTO_RELOAD and \ 'mod_wsgi.process_group' in request.environ and \ request.environ.get('mod_wsgi.process_group', None) and \ 'SCRIPT_FILENAME' in request.environ and \ int(request.environ.get('mod_wsgi.script_reloading', '0')): try: os.utime(request.environ.get('SCRIPT_FILENAME'), None) except OSError: pass # Try auto-reloading via uwsgi daemon reload mechanism if rosetta_settings.UWSGI_AUTO_RELOAD: try: import uwsgi # pretty easy right? uwsgi.reload() except: # we may not be running under uwsgi :P pass except: storage.set('rosetta_i18n_write', False) storage.set('rosetta_i18n_pofile', rosetta_i18n_pofile) # Retain query arguments query_arg = '?_next=1' if 'query' in request.GET or 'query' in request.POST: query_arg += '&query=%s' % request.REQUEST.get('query') if 'page' in request.GET: query_arg += '&page=%d&_next=1' % int(request.GET.get('page')) return HttpResponseRedirect(reverse('rosetta-home') + iri_to_uri(query_arg)) rosetta_i18n_lang_name = _(storage.get('rosetta_i18n_lang_name')) rosetta_i18n_lang_code = storage.get('rosetta_i18n_lang_code') if 'query' in request.REQUEST and request.REQUEST.get('query', '').strip(): query = request.REQUEST.get('query').strip() rx = re.compile(re.escape(query), re.IGNORECASE) paginator = Paginator([e for e in rosetta_i18n_pofile if not e.obsolete and rx.search(smart_unicode(e.msgstr) + smart_unicode(e.msgid) + u''.join([o[0] for o in e.occurrences]))], rosetta_settings.MESSAGES_PER_PAGE) else: if rosetta_i18n_filter == 'untranslated': paginator = Paginator(rosetta_i18n_pofile.untranslated_entries(), rosetta_settings.MESSAGES_PER_PAGE) elif rosetta_i18n_filter == 'translated': paginator = Paginator(rosetta_i18n_pofile.translated_entries(), rosetta_settings.MESSAGES_PER_PAGE) elif rosetta_i18n_filter == 'fuzzy': paginator = Paginator([e for e in rosetta_i18n_pofile.fuzzy_entries() if not e.obsolete], rosetta_settings.MESSAGES_PER_PAGE) else: paginator = Paginator([e for e in rosetta_i18n_pofile if not e.obsolete], rosetta_settings.MESSAGES_PER_PAGE) if 'page' in request.GET and int(request.GET.get('page')) <= paginator.num_pages and int(request.GET.get('page')) > 0: page = int(request.GET.get('page')) else: page = 1 if '_next' in request.GET or '_next' in request.POST: page += 1 if page > paginator.num_pages: page = 1 query_arg = '?page=%d' % page return HttpResponseRedirect(reverse('rosetta-home') + iri_to_uri(query_arg)) rosetta_messages = paginator.page(page).object_list if rosetta_settings.MAIN_LANGUAGE and rosetta_settings.MAIN_LANGUAGE != rosetta_i18n_lang_code: main_language = None for language in settings.LANGUAGES: if language[0] == rosetta_settings.MAIN_LANGUAGE: main_language = _(language[1]) break fl = ("/%s/" % rosetta_settings.MAIN_LANGUAGE).join(rosetta_i18n_fn.split("/%s/" % rosetta_i18n_lang_code)) po = pofile(fl) main_messages = [] for message in rosetta_messages: message.main_lang = po.find(message.msgid).msgstr needs_pagination = paginator.num_pages > 1 if needs_pagination: if paginator.num_pages >= 10: page_range = pagination_range(1, paginator.num_pages, page) else: page_range = range(1, 1 + paginator.num_pages) try: ADMIN_MEDIA_PREFIX = settings.ADMIN_MEDIA_PREFIX ADMIN_IMAGE_DIR = ADMIN_MEDIA_PREFIX + 'img/admin/' except AttributeError: ADMIN_MEDIA_PREFIX = settings.STATIC_URL + 'admin/' ADMIN_IMAGE_DIR = ADMIN_MEDIA_PREFIX + 'img/' ENABLE_TRANSLATION_SUGGESTIONS = rosetta_settings.BING_APP_ID and rosetta_settings.ENABLE_TRANSLATION_SUGGESTIONS BING_APP_ID = rosetta_settings.BING_APP_ID MESSAGES_SOURCE_LANGUAGE_NAME = rosetta_settings.MESSAGES_SOURCE_LANGUAGE_NAME MESSAGES_SOURCE_LANGUAGE_CODE = rosetta_settings.MESSAGES_SOURCE_LANGUAGE_CODE if storage.has('rosetta_last_save_error'): storage.delete('rosetta_last_save_error') rosetta_last_save_error = True return render_to_response('pofile.html', locals(), context_instance=RequestContext(request)) else: return list_languages(request, do_session_warn=True)
20tab/upy
[ 9, 6, 9, 8, 1342021737 ]
def download_file(request): import zipfile from StringIO import StringIO storage = get_storage(request) # original filename rosetta_i18n_fn = storage.get('rosetta_i18n_fn', None) # in-session modified catalog rosetta_i18n_pofile = storage.get('rosetta_i18n_pofile', None) # language code rosetta_i18n_lang_code = storage.get('rosetta_i18n_lang_code', None) if not rosetta_i18n_lang_code or not rosetta_i18n_pofile or not rosetta_i18n_fn: return HttpResponseRedirect(reverse('rosetta-home')) try: if len(rosetta_i18n_fn.split('/')) >= 5: offered_fn = '_'.join(rosetta_i18n_fn.split('/')[-5:]) else: offered_fn = rosetta_i18n_fn.split('/')[-1] po_fn = str(rosetta_i18n_fn.split('/')[-1]) mo_fn = str(po_fn.replace('.po', '.mo')) # not so smart, huh zipdata = StringIO() zipf = zipfile.ZipFile(zipdata, mode="w") zipf.writestr(po_fn, unicode(rosetta_i18n_pofile).encode("utf8")) zipf.writestr(mo_fn, rosetta_i18n_pofile.to_binary()) zipf.close() zipdata.seek(0) response = HttpResponse(zipdata.read()) response['Content-Disposition'] = 'attachment; filename=%s.%s.zip' % (offered_fn, rosetta_i18n_lang_code) response['Content-Type'] = 'application/x-zip' return response except Exception: return HttpResponseRedirect(reverse('rosetta-home'))
20tab/upy
[ 9, 6, 9, 8, 1342021737 ]
def list_languages(request, do_session_warn=False): """ Lists the languages for the current project, the gettext catalog files that can be translated and their translation progress """ storage = get_storage(request) languages = [] if 'filter' in request.GET: if request.GET.get('filter') in ('project', 'third-party', 'django', 'all'): filter_ = request.GET.get('filter') storage.set('rosetta_i18n_catalog_filter', filter_) return HttpResponseRedirect(reverse('rosetta-pick-file')) rosetta_i18n_catalog_filter = storage.get('rosetta_i18n_catalog_filter', 'project') third_party_apps = rosetta_i18n_catalog_filter in ('all', 'third-party') django_apps = rosetta_i18n_catalog_filter in ('all', 'django') project_apps = rosetta_i18n_catalog_filter in ('all', 'project') has_pos = False for language in settings.LANGUAGES: pos = find_pos(language[0], project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps) has_pos = has_pos or len(pos) languages.append( (language[0], _(language[1]), [(get_app_name(l), os.path.realpath(l), pofile(l)) for l in pos], ) ) try: ADMIN_MEDIA_PREFIX = settings.ADMIN_MEDIA_PREFIX except AttributeError: ADMIN_MEDIA_PREFIX = settings.STATIC_URL + 'admin/' version = rosetta.get_version(True) do_session_warn = do_session_warn and 'SessionRosettaStorage' in rosetta_settings.STORAGE_CLASS and 'signed_cookies' in settings.SESSION_ENGINE return render_to_response('rosetta_languages.html', locals(), context_instance=RequestContext(request))
20tab/upy
[ 9, 6, 9, 8, 1342021737 ]
def get_app_name(path): app = path.split("/locale")[0].split("/")[-1] return app
20tab/upy
[ 9, 6, 9, 8, 1342021737 ]
def set_test_params(self): self.num_nodes = 2
nlgcoin/guldencoin-official
[ 136, 50, 136, 34, 1439462804 ]
def main(): print('Parsing rfc file...') item = None items = [] out = open('irc3/_rfc.py', 'w') with open('irc3/rfc1459.txt') as fd: for line in fd: line = line.replace('<host> * <host>', '<host> * <host1>') line = line.replace('<# visible>', '<visible>') line = line.replace('<H|G>[*][@|+]', '<modes>') line = line.replace('<nick!user|*!*>@<host|server>', '<mask>') match = _re_num.search(line) if match is not None: if item: items.append((int(item['num']), item)) item = defaultdict(list) match = match.groupdict() if '_' in match: match.pop('_') item.update(match) match = _re_mask.search(line) if match is not None: item['mask'].append(match.groupdict()['_']) _re_sub = re.compile('(?P<m><[^>]+>)') out.write('''
gawel/irc3
[ 201, 47, 201, 4, 1385813388 ]
def repl(v): v = v.lower() v = v.replace('nickname', 'nick') v = v.replace('nicks', 'nicknames') for c in '!@*': v = v.replace(c, '') for c in '| ': v = v.replace(c, '_') v = v.strip(' _') if v.endswith('_name'): v = v[:-5] if v == 'client_ip_address_in_dot_form': v = 'clientip' if v == 'integer': for k in 'xyz': if k not in params: v = k break if v == 'command': v = 'cmd' if v == 'real': v = 'realname' if v == 'name' and 'nick' not in params: v = 'nick' if v == 'user': if 'nick' not in params and num not in ('352',): v = 'nick' else: v = 'username' return v
gawel/irc3
[ 201, 47, 201, 4, 1385813388 ]
def msub(m): v = m.groupdict()['m'].strip('<>') v = repl(v) params.append(v) return '(?P<%s>\S+)' % v
gawel/irc3
[ 201, 47, 201, 4, 1385813388 ]
def sha3(seed): return sha3_256(bytes(seed)).digest()
HydraChain/hydrachain
[ 364, 107, 364, 37, 1440942763 ]
def DEBUG(*args, **kargs): print(FAIL + repr(args) + repr(kargs) + ENDC)
HydraChain/hydrachain
[ 364, 107, 364, 37, 1440942763 ]
def cstr(num, txt): if isinstance(num, bytes): num = big_endian_to_int(num) return '%s%s%s' % (colors[num % len(colors)], txt, ENDC)
HydraChain/hydrachain
[ 364, 107, 364, 37, 1440942763 ]
def phx(x): return x.encode('hex')[:8]
HydraChain/hydrachain
[ 364, 107, 364, 37, 1440942763 ]
def password(self): raise AttributeError('password is not a readable attribute')
abdesslem/CTF
[ 47, 18, 47, 11, 1445619879 ]
def password(self, password): self.password_hash = generate_password_hash(password)
abdesslem/CTF
[ 47, 18, 47, 11, 1445619879 ]
def __repr__(self): return '<User %r>' % self.username
abdesslem/CTF
[ 47, 18, 47, 11, 1445619879 ]
def test_scan_code_push_event(self): from wechatpy.events import ScanCodePushEvent xml = """<xml> <ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName> <FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName> <CreateTime>1408090502</CreateTime> <MsgType><![CDATA[event]]></MsgType> <Event><![CDATA[scancode_push]]></Event> <EventKey><![CDATA[6]]></EventKey> <ScanCodeInfo><ScanType><![CDATA[qrcode]]></ScanType> <ScanResult><![CDATA[1]]></ScanResult> </ScanCodeInfo> </xml>""" event = parse_message(xml) self.assertTrue(isinstance(event, ScanCodePushEvent)) self.assertEqual("qrcode", event.scan_type) self.assertEqual("1", event.scan_result)
jxtech/wechatpy
[ 3364, 745, 3364, 44, 1410527008 ]
def test_pic_sysphoto_event(self): from wechatpy.events import PicSysPhotoEvent xml = """<xml> <ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName> <FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName> <CreateTime>1408090651</CreateTime> <MsgType><![CDATA[event]]></MsgType> <Event><![CDATA[pic_sysphoto]]></Event> <EventKey><![CDATA[6]]></EventKey> <SendPicsInfo><Count>1</Count> <PicList> <item> <PicMd5Sum><![CDATA[1b5f7c23b5bf75682a53e7b6d163e185]]></PicMd5Sum> </item> </PicList> </SendPicsInfo> </xml>""" event = parse_message(xml) self.assertTrue(isinstance(event, PicSysPhotoEvent)) self.assertEqual(1, event.count) self.assertEqual("1b5f7c23b5bf75682a53e7b6d163e185", event.pictures[0]["PicMd5Sum"])
jxtech/wechatpy
[ 3364, 745, 3364, 44, 1410527008 ]
def test_pic_wechat_event(self): from wechatpy.events import PicWeChatEvent xml = """<xml> <ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName> <FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName> <CreateTime>1408090816</CreateTime> <MsgType><![CDATA[event]]></MsgType> <Event><![CDATA[pic_weixin]]></Event> <EventKey><![CDATA[6]]></EventKey> <SendPicsInfo><Count>1</Count> <PicList> <item> <PicMd5Sum><![CDATA[5a75aaca956d97be686719218f275c6b]]></PicMd5Sum> </item> </PicList> </SendPicsInfo> </xml>""" event = parse_message(xml) self.assertTrue(isinstance(event, PicWeChatEvent)) self.assertEqual(1, event.count) self.assertEqual("5a75aaca956d97be686719218f275c6b", event.pictures[0]["PicMd5Sum"])
jxtech/wechatpy
[ 3364, 745, 3364, 44, 1410527008 ]
def test_merchant_order_event(self): from wechatpy.events import MerchantOrderEvent xml = """<xml> <ToUserName><![CDATA[weixin_media1]]></ToUserName> <FromUserName><![CDATA[oDF3iYyVlek46AyTBbMRVV8VZVlI]]></FromUserName> <CreateTime>1398144192</CreateTime> <MsgType><![CDATA[event]]></MsgType> <Event><![CDATA[merchant_order]]></Event> <OrderId><![CDATA[test_order_id]]></OrderId> <OrderStatus>2</OrderStatus> <ProductId><![CDATA[test_product_id]]></ProductId> <SkuInfo><![CDATA[10001:1000012;10002:100021]]></SkuInfo> </xml>""" event = parse_message(xml) self.assertTrue(isinstance(event, MerchantOrderEvent)) self.assertEqual("test_order_id", event.order_id) self.assertEqual(2, event.order_status) self.assertEqual("test_product_id", event.product_id) self.assertEqual("10001:1000012;10002:100021", event.sku_info)
jxtech/wechatpy
[ 3364, 745, 3364, 44, 1410527008 ]
def test_kf_close_session_event(self): from wechatpy.events import KfCloseSessionEvent xml = """<xml> <ToUserName><![CDATA[touser]]></ToUserName> <FromUserName><![CDATA[fromuser]]></FromUserName> <CreateTime>1399197672</CreateTime> <MsgType><![CDATA[event]]></MsgType> <Event><![CDATA[kf_close_session]]></Event> <KfAccount><![CDATA[test1@test]]></KfAccount> </xml>""" event = parse_message(xml) self.assertTrue(isinstance(event, KfCloseSessionEvent)) self.assertEqual("test1@test", event.account)
jxtech/wechatpy
[ 3364, 745, 3364, 44, 1410527008 ]
def test_template_send_job_finish_event(self): from wechatpy.events import TemplateSendJobFinishEvent xml = """<xml> <ToUserName><![CDATA[touser]]></ToUserName> <FromUserName><![CDATA[fromuser]]></FromUserName> <CreateTime>1395658920</CreateTime> <MsgType><![CDATA[event]]></MsgType> <Event><![CDATA[TEMPLATESENDJOBFINISH]]></Event> <MsgID>200163836</MsgID> <Status><![CDATA[success]]></Status> </xml>""" event = parse_message(xml) self.assertTrue(isinstance(event, TemplateSendJobFinishEvent)) self.assertEqual(200163836, event.id) self.assertEqual("success", event.status)
jxtech/wechatpy
[ 3364, 745, 3364, 44, 1410527008 ]
def test_template_subscribe_msg_change_event(self): from wechatpy.events import SubscribeMsgChangeEvent xml = """<xml> <ToUserName><![CDATA[gh_123456789abc]]></ToUserName> <FromUserName><![CDATA[otFpruAK8D-E6EfStSYonYSBZ8_4]]></FromUserName> <CreateTime>1610969440</CreateTime> <MsgType><![CDATA[event]]></MsgType> <Event><![CDATA[subscribe_msg_change_event]]></Event> <SubscribeMsgChangeEvent> <List> <TemplateId><![CDATA[VRR0UEO9VJOLs0MHlU0OilqX6MVFDwH3_3gz3Oc0NIc]]></TemplateId> <SubscribeStatusString><![CDATA[reject]]></SubscribeStatusString> </List> </SubscribeMsgChangeEvent> </xml>""" event = parse_message(xml) self.assertIsInstance(event, SubscribeMsgChangeEvent) self.assertEqual(1, len(event.subscribes)) self.assertEqual("VRR0UEO9VJOLs0MHlU0OilqX6MVFDwH3_3gz3Oc0NIc", event.subscribes[0]["TemplateId"]) self.assertEqual("reject", event.subscribes[0]["SubscribeStatusString"])
jxtech/wechatpy
[ 3364, 745, 3364, 44, 1410527008 ]