language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
doocs__leetcode
solution/2400-2499/2490.Circular Sentence/Solution.py
{ "start": 0, "end": 197 }
class ____: def isCircularSentence(self, sentence: str) -> bool: ss = sentence.split() n = len(ss) return all(s[-1] == ss[(i + 1) % n][0] for i, s in enumerate(ss))
Solution
python
huggingface__transformers
src/transformers/models/sew/modeling_sew.py
{ "start": 41072, "end": 46010 }
class ____(SEWPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Sequence classification does not support the use of SEW adapters (config.add_adapter=True)" ) self.sew = SEWModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.sew.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.sew.parameters(): param.requires_grad = False @auto_docstring def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[tuple, SequenceClassifierOutput]: r""" input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`SEWProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.sew( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel"]
SEWForSequenceClassification
python
scikit-image__scikit-image
tests/skimage/graph/test_connect.py
{ "start": 196, "end": 2367 }
class ____(mcp.MCP_Connect): def _reset(self): """Reset the id map.""" mcp.MCP_Connect._reset(self) self._conn = {} self._bestconn = {} def create_connection(self, id1, id2, pos1, pos2, cost1, cost2): # Process data hash = min(id1, id2), max(id1, id2) val = min(pos1, pos2), max(pos1, pos2) cost = min(cost1, cost2) # Add to total list self._conn.setdefault(hash, []).append(val) # Keep track of connection with lowest cost curcost = self._bestconn.get(hash, (np.inf,))[0] if cost < curcost: self._bestconn[hash] = (cost,) + val def test_connections(): # Create MCP object with three seed points mcp = MCP(a) costs, traceback = mcp.find_costs([(1, 1), (7, 7), (1, 7)]) # Test that all three seed points are connected connections = set(mcp._conn.keys()) assert (0, 1) in connections assert (1, 2) in connections assert (0, 2) in connections # Test that any two neighbors have only been connected once for position_tuples in mcp._conn.values(): n1 = len(position_tuples) n2 = len(set(position_tuples)) assert n1 == n2 # For seed 0 and 1 cost, pos1, pos2 = mcp._bestconn[(0, 1)] # Test meeting points assert (pos1, pos2) == ((3, 3), (4, 4)) # Test the whole path path = mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2))) assert_array_equal(path, [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7)]) # For seed 1 and 2 cost, pos1, pos2 = mcp._bestconn[(1, 2)] # Test meeting points assert (pos1, pos2) == ((3, 7), (4, 7)) # Test the whole path path = mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2))) assert_array_equal(path, [(1, 7), (2, 7), (3, 7), (4, 7), (5, 7), (6, 7), (7, 7)]) # For seed 0 and 2 cost, pos1, pos2 = mcp._bestconn[(0, 2)] # Test meeting points assert (pos1, pos2) == ((1, 3), (1, 4)) # Test the whole path path = mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2))) assert_array_equal(path, [(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7)])
MCP
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/auto_materialize_asset_evaluations.py
{ "start": 9923, "end": 10181 }
class ____(graphene.ObjectType): message = graphene.NonNull(graphene.String) class Meta: interfaces = (GrapheneError,) name = "AutoMaterializeAssetEvaluationNeedsMigrationError"
GrapheneAutoMaterializeAssetEvaluationNeedsMigrationError
python
graphql-python__graphene
graphene/validation/tests/test_disable_introspection.py
{ "start": 144, "end": 842 }
class ____(ObjectType): name = String(required=True) @staticmethod def resolve_name(root, info): return "Hello world!" schema = Schema(query=Query) def run_query(query: str): document = parse(query) return validate( schema=schema.graphql_schema, document_ast=document, rules=(DisableIntrospection,), ) def test_disallows_introspection_queries(): errors = run_query("{ __schema { queryType { name } } }") assert len(errors) == 1 assert errors[0].message == "Cannot query '__schema': introspection is disabled." def test_allows_non_introspection_queries(): errors = run_query("{ name }") assert len(errors) == 0
Query
python
pennersr__django-allauth
allauth/socialaccount/providers/cilogon/provider.py
{ "start": 509, "end": 1694 }
class ____(OAuth2Provider): id = "cilogon" name = "CILogon" account_class = CILogonAccount oauth2_adapter_class = CILogonOAuth2Adapter def get_default_scope(self): scope = [Scope.PROFILE, Scope.USERINFO, Scope.OPENID] if QUERY_EMAIL: scope.append(Scope.EMAIL) return scope def get_auth_params_from_request(self, request, action): ret = super().get_auth_params_from_request(request, action) if action == AuthAction.REAUTHENTICATE: ret["prompt"] = "select_account consent" return ret def extract_uid(self, data): return str(data.get("sub")) def extract_common_fields(self, data): return dict( email=data.get("email"), last_name=data.get("family_name"), first_name=data.get("given_name"), eppn=data.get("eppn"), ) def extract_email_addresses(self, data): ret = [] email = data.get("email") if email and data.get("verified_email"): ret.append(EmailAddress(email=email, verified=True, primary=True)) return ret provider_classes = [CILogonProvider]
CILogonProvider
python
lepture__authlib
authlib/oauth2/rfc7523/jwt_bearer.py
{ "start": 479, "end": 6970 }
class ____(BaseGrant, TokenEndpointMixin): GRANT_TYPE = JWT_BEARER_GRANT_TYPE #: Options for verifying JWT payload claims. Developers MAY #: overwrite this constant to create a more strict options. CLAIMS_OPTIONS = { "iss": {"essential": True}, "aud": {"essential": True}, "exp": {"essential": True}, } # A small allowance of time, typically no more than a few minutes, # to account for clock skew. The default is 60 seconds. LEEWAY = 60 @staticmethod def sign( key, issuer, audience, subject=None, issued_at=None, expires_at=None, claims=None, **kwargs, ): return sign_jwt_bearer_assertion( key, issuer, audience, subject, issued_at, expires_at, claims, **kwargs ) def process_assertion_claims(self, assertion): """Extract JWT payload claims from request "assertion", per `Section 3.1`_. :param assertion: assertion string value in the request :return: JWTClaims :raise: InvalidGrantError .. _`Section 3.1`: https://tools.ietf.org/html/rfc7523#section-3.1 """ try: claims = jwt.decode( assertion, self.resolve_public_key, claims_options=self.CLAIMS_OPTIONS ) claims.validate(leeway=self.LEEWAY) except JoseError as e: log.debug("Assertion Error: %r", e) raise InvalidGrantError(description=e.description) from e return claims def resolve_public_key(self, headers, payload): client = self.resolve_issuer_client(payload["iss"]) return self.resolve_client_key(client, headers, payload) def validate_token_request(self): """The client makes a request to the token endpoint by sending the following parameters using the "application/x-www-form-urlencoded" format per `Section 2.1`_: grant_type REQUIRED. Value MUST be set to "urn:ietf:params:oauth:grant-type:jwt-bearer". assertion REQUIRED. Value MUST contain a single JWT. scope OPTIONAL. The following example demonstrates an access token request with a JWT as an authorization grant: .. code-block:: http POST /token.oauth2 HTTP/1.1 Host: as.example.com Content-Type: application/x-www-form-urlencoded grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer &assertion=eyJhbGciOiJFUzI1NiIsImtpZCI6IjE2In0. eyJpc3Mi[...omitted for brevity...]. J9l-ZhwP[...omitted for brevity...] .. _`Section 2.1`: https://tools.ietf.org/html/rfc7523#section-2.1 """ assertion = self.request.form.get("assertion") if not assertion: raise InvalidRequestError("Missing 'assertion' in request") claims = self.process_assertion_claims(assertion) client = self.resolve_issuer_client(claims["iss"]) log.debug("Validate token request of %s", client) if not client.check_grant_type(self.GRANT_TYPE): raise UnauthorizedClientError( f"The client is not authorized to use 'grant_type={self.GRANT_TYPE}'" ) self.request.client = client self.validate_requested_scope() subject = claims.get("sub") if subject: user = self.authenticate_user(subject) if not user: raise InvalidGrantError(description="Invalid 'sub' value in assertion") log.debug("Check client(%s) permission to User(%s)", client, user) if not self.has_granted_permission(client, user): raise InvalidClientError( description="Client has no permission to access user data" ) self.request.user = user def create_token_response(self): """If valid and authorized, the authorization server issues an access token. """ token = self.generate_token( scope=self.request.payload.scope, user=self.request.user, include_refresh_token=False, ) log.debug("Issue token %r to %r", token, self.request.client) self.save_token(token) return 200, token, self.TOKEN_RESPONSE_HEADER def resolve_issuer_client(self, issuer): """Fetch client via "iss" in assertion claims. Developers MUST implement this method in subclass, e.g.:: def resolve_issuer_client(self, issuer): return Client.query_by_iss(issuer) :param issuer: "iss" value in assertion :return: Client instance """ raise NotImplementedError() def resolve_client_key(self, client, headers, payload): """Resolve client key to decode assertion data. Developers MUST implement this method in subclass. For instance, there is a "jwks" column on client table, e.g.:: def resolve_client_key(self, client, headers, payload): # from authlib.jose import JsonWebKey key_set = JsonWebKey.import_key_set(client.jwks) return key_set.find_by_kid(headers["kid"]) :param client: instance of OAuth client model :param headers: headers part of the JWT :param payload: payload part of the JWT :return: ``authlib.jose.Key`` instance """ raise NotImplementedError() def authenticate_user(self, subject): """Authenticate user with the given assertion claims. Developers MUST implement it in subclass, e.g.:: def authenticate_user(self, subject): return User.get_by_sub(subject) :param subject: "sub" value in claims :return: User instance """ raise NotImplementedError() def has_granted_permission(self, client, user): """Check if the client has permission to access the given user's resource. Developers MUST implement it in subclass, e.g.:: def has_granted_permission(self, client, user): permission = ClientUserGrant.query(client=client, user=user) return permission.granted :param client: instance of OAuth client model :param user: instance of User model :return: bool """ raise NotImplementedError()
JWTBearerGrant
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol41.py
{ "start": 1237, "end": 1335 }
class ____(Protocol[AnyStr_contra]): def write(self, __b: AnyStr_contra) -> Any: ...
WriteBuffer
python
allegroai__clearml
clearml/backend_api/services/v2_20/tasks.py
{ "start": 127374, "end": 130112 }
class ____(Response): """ Response of tasks.completed endpoint. :param updated: Number of tasks updated (0 or 1) :type updated: int :param fields: Updated fields names and values :type fields: dict :param published: Number of tasks published (0 or 1) :type published: int """ _service = "tasks" _action = "completed" _version = "2.20" _schema = { "definitions": {}, "properties": { "fields": { "additionalProperties": True, "description": "Updated fields names and values", "type": ["object", "null"], }, "published": { "description": "Number of tasks published (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, "updated": { "description": "Number of tasks updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, }, "type": "object", } def __init__( self, updated: Optional[int] = None, fields: Optional[dict] = None, published: Optional[int] = None, **kwargs: Any ) -> None: super(CompletedResponse, self).__init__(**kwargs) self.updated = updated self.fields = fields self.published = published @schema_property("updated") def updated(self) -> Optional[int]: return self._property_updated @updated.setter def updated(self, value: Optional[int]) -> None: if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value @schema_property("fields") def fields(self) -> Optional[dict]: return self._property_fields @fields.setter def fields(self, value: Optional[dict]) -> None: if value is None: self._property_fields = None return self.assert_isinstance(value, "fields", (dict,)) self._property_fields = value @schema_property("published") def published(self) -> Optional[int]: return self._property_published @published.setter def published(self, value: Optional[int]) -> None: if value is None: self._property_published = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "published", six.integer_types) self._property_published = value
CompletedResponse
python
wandb__wandb
wandb/apis/public/history.py
{ "start": 636, "end": 4656 }
class ____: """Iterator for scanning complete run history. <!-- lazydoc-ignore-class: internal --> """ def __init__( self, api: public.Api, run: runs.Run, min_step: int, max_step: int, keys: list[str] | None = None, page_size: int = 1000, ): self.run = run self.min_step = min_step self.max_step = max_step self.keys = keys self.page_size = page_size self._api = api # Tell wandb-core to initialize resources to scan the run's history. scan_run_history_init = apb.ScanRunHistoryInit( entity=self.run.entity, project=self.run.project, run_id=self.run.id, keys=self.keys, ) scan_run_history_init_request = apb.ReadRunHistoryRequest( scan_run_history_init=scan_run_history_init ) api_request = apb.ApiRequest( read_run_history_request=scan_run_history_init_request ) response: apb.ApiResponse = self._api._send_api_request(api_request) self._scan_request_id = ( response.read_run_history_response.scan_run_history_init.request_id ) self.scan_offset = 0 self.rows = [] self.keys = keys # Add cleanup hook to clean up resources in wandb-core # when this scan object is deleted. # # Using weakref.finalize ensures that references to objects needed during cleanup # are not garbage collected before being used. # see: https://docs.python.org/3/library/weakref.html#comparing-finalizers-with-del-methods weakref.finalize( self, self.cleanup, self._api, self._scan_request_id, ) def __iter__(self): self.scan_offset = 0 self.page_offset = self.min_step self.rows = [] return self def __next__(self): while True: if self.scan_offset < len(self.rows): row = self.rows[self.scan_offset] self.scan_offset += 1 return row if self.page_offset >= self.max_step: raise StopIteration() if self.page_offset >= self.run.lastHistoryStep: raise StopIteration() self._load_next() def _load_next(self): from wandb.proto import wandb_api_pb2 as apb max_step = min(self.page_offset + self.page_size, self.max_step) read_run_history_request = apb.ReadRunHistoryRequest( scan_run_history=apb.ScanRunHistory( min_step=self.page_offset, max_step=max_step, request_id=self._scan_request_id, ), ) api_request = apb.ApiRequest(read_run_history_request=read_run_history_request) response: apb.ApiResponse = self._api._send_api_request(api_request) run_history: apb.RunHistoryResponse = ( response.read_run_history_response.run_history ) self.rows = [ self._convert_history_row_to_dict(row) for row in run_history.history_rows ] self.page_offset += self.page_size self.scan_offset = 0 def _convert_history_row_to_dict(self, history_row): return { item.key: json.loads(item.value_json) for item in history_row.history_items } @staticmethod def cleanup( api: public.Api, request_id: int, ): scan_run_history_cleanup = apb.ScanRunHistoryCleanup( request_id=request_id, ) scan_run_history_cleanup_request = apb.ReadRunHistoryRequest( scan_run_history_cleanup=scan_run_history_cleanup ) with contextlib.suppress(ConnectionResetError): api._send_api_request( apb.ApiRequest( read_run_history_request=scan_run_history_cleanup_request ) )
BetaHistoryScan
python
Textualize__textual
src/textual/css/_style_properties.py
{ "start": 30008, "end": 30366 }
class ____(StringEnumProperty): """Descriptor for overflow styles that forces widgets to refresh scrollbars.""" def _before_refresh(self, obj: StylesBase, value: str | None) -> None: from textual.widget import Widget # Avoid circular import if isinstance(obj.node, Widget): obj.node._refresh_scrollbars()
OverflowProperty
python
kamyu104__LeetCode-Solutions
Python/find-longest-awesome-substring.py
{ "start": 37, "end": 602 }
class ____(object): def longestAwesome(self, s): """ :type s: str :rtype: int """ ALPHABET_SIZE = 10 result, mask, lookup = 0, 0, [len(s)]*(2**ALPHABET_SIZE) lookup[0] = -1 for i, ch in enumerate(s): mask ^= 2**(ord(ch)-ord('0')) if lookup[mask] == len(s): lookup[mask] = i result = max(result, i - lookup[mask]) for d in xrange(ALPHABET_SIZE): result = max(result, i - lookup[mask^(2**d)]) return result
Solution
python
ray-project__ray
python/ray/train/v2/_internal/execution/local_mode/torch.py
{ "start": 1244, "end": 3190 }
class ____(LocalController): def _set_train_fn_utils(self) -> None: world_size = 1 global_rank = 0 local_rank = 0 nproc_per_node = 1 node_rank = 0 if has_torchrun_env(): assert not dist.is_initialized(), "torch.distributed is already initialized" torch.distributed.init_process_group( backend="nccl" if torch.cuda.is_available() else "gloo" ) world_size = torch.distributed.get_world_size() global_rank = torch.distributed.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) if torch.cuda.is_available(): torch.cuda.set_device(local_rank) nproc_per_node = int(os.environ.get("LOCAL_WORLD_SIZE")) node_rank = global_rank // nproc_per_node if world_size != 1: assert ( self.datasets is None or len(self.datasets) == 0 ), "Ray Data is not supported in local mode with multiple workers." set_train_fn_utils( LocalTrainFnUtils( experiment_name=self.experiment_name, world_size=world_size, world_rank=global_rank, local_rank=local_rank, local_world_size=nproc_per_node, node_rank=node_rank, dataset_shards=self.datasets, ) ) def run(self, train_func: Callable[[], None]) -> Result: self._set_train_fn_utils() train_func() train_fn_utils = get_train_fn_utils() assert isinstance(train_fn_utils, LocalTrainFnUtils) result = Result( metrics=train_fn_utils._get_last_metrics(), checkpoint=train_fn_utils.get_checkpoint(), path=None, error=None, ) if dist.is_initialized(): dist.destroy_process_group() return result
LocalTorchController
python
davidhalter__jedi
jedi/inference/compiled/value.py
{ "start": 13462, "end": 13979 }
class ____(ParamNameInterface, AbstractNameDefinition): def __init__(self, compiled_value, name, default): self.parent_context = compiled_value.parent_context self.string_name = name self._default = default def get_kind(self): return Parameter.POSITIONAL_ONLY def to_string(self): string = self.string_name if self._default: string += '=' + self._default return string def infer(self): return NO_VALUES
UnresolvableParamName
python
kamyu104__LeetCode-Solutions
Python/flip-square-submatrix-vertically.py
{ "start": 39, "end": 440 }
class ____(object): def reverseSubmatrix(self, grid, x, y, k): """ :type grid: List[List[int]] :type x: int :type y: int :type k: int :rtype: List[List[int]] """ for i in xrange(k//2): for j in xrange(k): grid[x+i][y+j], grid[x+(k-1-i)][y+j] = grid[x+(k-1-i)][y+j], grid[x+i][y+j] return grid
Solution
python
scikit-learn__scikit-learn
sklearn/mixture/_gaussian_mixture.py
{ "start": 18675, "end": 35776 }
class ____(BaseMixture): """Gaussian Mixture. Representation of a Gaussian mixture model probability distribution. This class allows to estimate the parameters of a Gaussian mixture distribution. Read more in the :ref:`User Guide <gmm>`. .. versionadded:: 0.18 Parameters ---------- n_components : int, default=1 The number of mixture components. covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full' String describing the type of covariance parameters to use. Must be one of: - 'full': each component has its own general covariance matrix. - 'tied': all components share the same general covariance matrix. - 'diag': each component has its own diagonal covariance matrix. - 'spherical': each component has its own single variance. For an example of using `covariance_type`, refer to :ref:`sphx_glr_auto_examples_mixture_plot_gmm_selection.py`. tol : float, default=1e-3 The convergence threshold. EM iterations will stop when the lower bound average gain is below this threshold. reg_covar : float, default=1e-6 Non-negative regularization added to the diagonal of covariance. Allows to assure that the covariance matrices are all positive. max_iter : int, default=100 The number of EM iterations to perform. n_init : int, default=1 The number of initializations to perform. The best results are kept. init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \ default='kmeans' The method used to initialize the weights, the means and the precisions. String must be one of: - 'kmeans' : responsibilities are initialized using kmeans. - 'k-means++' : use the k-means++ method to initialize. - 'random' : responsibilities are initialized randomly. - 'random_from_data' : initial means are randomly selected data points. .. versionchanged:: v1.1 `init_params` now accepts 'random_from_data' and 'k-means++' as initialization methods. weights_init : array-like of shape (n_components, ), default=None The user-provided initial weights. If it is None, weights are initialized using the `init_params` method. means_init : array-like of shape (n_components, n_features), default=None The user-provided initial means, If it is None, means are initialized using the `init_params` method. precisions_init : array-like, default=None The user-provided initial precisions (inverse of the covariance matrices). If it is None, precisions are initialized using the 'init_params' method. The shape depends on 'covariance_type':: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' random_state : int, RandomState instance or None, default=None Controls the random seed given to the method chosen to initialize the parameters (see `init_params`). In addition, it controls the generation of random samples from the fitted distribution (see the method `sample`). Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. warm_start : bool, default=False If 'warm_start' is True, the solution of the last fitting is used as initialization for the next call of fit(). This can speed up convergence when fit is called several times on similar problems. In that case, 'n_init' is ignored and only a single initialization occurs upon the first call. See :term:`the Glossary <warm_start>`. verbose : int, default=0 Enable verbose output. If 1 then it prints the current initialization and each iteration step. If greater than 1 then it prints also the log probability and the time needed for each step. verbose_interval : int, default=10 Number of iteration done before the next print. Attributes ---------- weights_ : array-like of shape (n_components,) The weights of each mixture components. means_ : array-like of shape (n_components, n_features) The mean of each mixture component. covariances_ : array-like The covariance of each mixture component. The shape depends on `covariance_type`:: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' For an example of using covariances, refer to :ref:`sphx_glr_auto_examples_mixture_plot_gmm_covariances.py`. precisions_ : array-like The precision matrices for each component in the mixture. A precision matrix is the inverse of a covariance matrix. A covariance matrix is symmetric positive definite so the mixture of Gaussian can be equivalently parameterized by the precision matrices. Storing the precision matrices instead of the covariance matrices makes it more efficient to compute the log-likelihood of new samples at test time. The shape depends on `covariance_type`:: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' precisions_cholesky_ : array-like The Cholesky decomposition of the precision matrices of each mixture component. A precision matrix is the inverse of a covariance matrix. A covariance matrix is symmetric positive definite so the mixture of Gaussian can be equivalently parameterized by the precision matrices. Storing the precision matrices instead of the covariance matrices makes it more efficient to compute the log-likelihood of new samples at test time. The shape depends on `covariance_type`:: (n_components,) if 'spherical', (n_features, n_features) if 'tied', (n_components, n_features) if 'diag', (n_components, n_features, n_features) if 'full' converged_ : bool True when convergence of the best fit of EM was reached, False otherwise. n_iter_ : int Number of step used by the best fit of EM to reach the convergence. lower_bound_ : float Lower bound value on the log-likelihood (of the training data with respect to the model) of the best fit of EM. lower_bounds_ : array-like of shape (`n_iter_`,) The list of lower bound values on the log-likelihood from each iteration of the best fit of EM. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- BayesianGaussianMixture : Gaussian mixture model fit with a variational inference. Examples -------- >>> import numpy as np >>> from sklearn.mixture import GaussianMixture >>> X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]]) >>> gm = GaussianMixture(n_components=2, random_state=0).fit(X) >>> gm.means_ array([[10., 2.], [ 1., 2.]]) >>> gm.predict([[0, 0], [12, 3]]) array([1, 0]) For a comparison of Gaussian Mixture with other clustering algorithms, see :ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py`. For an illustration of the negative log-likelihood surface of a :class:`~sklearn.mixture.GaussianMixture` Model, see :ref:`sphx_glr_auto_examples_mixture_plot_gmm_pdf.py`. """ _parameter_constraints: dict = { **BaseMixture._parameter_constraints, "covariance_type": [StrOptions({"full", "tied", "diag", "spherical"})], "weights_init": ["array-like", None], "means_init": ["array-like", None], "precisions_init": ["array-like", None], } def __init__( self, n_components=1, *, covariance_type="full", tol=1e-3, reg_covar=1e-6, max_iter=100, n_init=1, init_params="kmeans", weights_init=None, means_init=None, precisions_init=None, random_state=None, warm_start=False, verbose=0, verbose_interval=10, ): super().__init__( n_components=n_components, tol=tol, reg_covar=reg_covar, max_iter=max_iter, n_init=n_init, init_params=init_params, random_state=random_state, warm_start=warm_start, verbose=verbose, verbose_interval=verbose_interval, ) self.covariance_type = covariance_type self.weights_init = weights_init self.means_init = means_init self.precisions_init = precisions_init def _check_parameters(self, X, xp=None): """Check the Gaussian mixture parameters are well defined.""" _, n_features = X.shape if self.weights_init is not None: self.weights_init = _check_weights( self.weights_init, self.n_components, xp=xp ) if self.means_init is not None: self.means_init = _check_means( self.means_init, self.n_components, n_features, xp=xp ) if self.precisions_init is not None: self.precisions_init = _check_precisions( self.precisions_init, self.covariance_type, self.n_components, n_features, xp=xp, ) allowed_init_params = ["random", "random_from_data"] if ( get_config()["array_api_dispatch"] and self.init_params not in allowed_init_params ): raise NotImplementedError( f"Allowed `init_params` are {allowed_init_params} if " f"'array_api_dispatch' is enabled. You passed " f"init_params={self.init_params!r}, which are not implemented to work " "with 'array_api_dispatch' enabled. Please disable " f"'array_api_dispatch' to use init_params={self.init_params!r}." ) def _initialize_parameters(self, X, random_state, xp=None): # If all the initial parameters are all provided, then there is no need to run # the initialization. compute_resp = ( self.weights_init is None or self.means_init is None or self.precisions_init is None ) if compute_resp: super()._initialize_parameters(X, random_state, xp=xp) else: self._initialize(X, None, xp=xp) def _initialize(self, X, resp, xp=None): """Initialization of the Gaussian mixture parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) resp : array-like of shape (n_samples, n_components) """ xp, _, device_ = get_namespace_and_device(X, xp=xp) n_samples, _ = X.shape weights, means, covariances = None, None, None if resp is not None: weights, means, covariances = _estimate_gaussian_parameters( X, resp, self.reg_covar, self.covariance_type, xp=xp ) if self.weights_init is None: weights /= n_samples self.weights_ = weights if self.weights_init is None else self.weights_init self.weights_ = xp.asarray(self.weights_, device=device_) self.means_ = means if self.means_init is None else self.means_init if self.precisions_init is None: self.covariances_ = covariances self.precisions_cholesky_ = _compute_precision_cholesky( covariances, self.covariance_type, xp=xp ) else: self.precisions_cholesky_ = _compute_precision_cholesky_from_precisions( self.precisions_init, self.covariance_type, xp=xp ) def _m_step(self, X, log_resp, xp=None): """M step. Parameters ---------- X : array-like of shape (n_samples, n_features) log_resp : array-like of shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X. """ xp, _ = get_namespace(X, log_resp, xp=xp) self.weights_, self.means_, self.covariances_ = _estimate_gaussian_parameters( X, xp.exp(log_resp), self.reg_covar, self.covariance_type, xp=xp ) self.weights_ /= xp.sum(self.weights_) self.precisions_cholesky_ = _compute_precision_cholesky( self.covariances_, self.covariance_type, xp=xp ) def _estimate_log_prob(self, X, xp=None): return _estimate_log_gaussian_prob( X, self.means_, self.precisions_cholesky_, self.covariance_type, xp=xp ) def _estimate_log_weights(self, xp=None): xp, _ = get_namespace(self.weights_, xp=xp) return xp.log(self.weights_) def _compute_lower_bound(self, _, log_prob_norm): return log_prob_norm def _get_parameters(self): return ( self.weights_, self.means_, self.covariances_, self.precisions_cholesky_, ) def _set_parameters(self, params, xp=None): xp, _, device_ = get_namespace_and_device(params, xp=xp) ( self.weights_, self.means_, self.covariances_, self.precisions_cholesky_, ) = params # Attributes computation if self.covariance_type == "full": self.precisions_ = xp.empty_like(self.precisions_cholesky_, device=device_) for k in range(self.precisions_cholesky_.shape[0]): prec_chol = self.precisions_cholesky_[k, :, :] self.precisions_[k, :, :] = prec_chol @ prec_chol.T elif self.covariance_type == "tied": self.precisions_ = self.precisions_cholesky_ @ self.precisions_cholesky_.T else: self.precisions_ = self.precisions_cholesky_**2 def _n_parameters(self): """Return the number of free parameters in the model.""" _, n_features = self.means_.shape if self.covariance_type == "full": cov_params = self.n_components * n_features * (n_features + 1) / 2.0 elif self.covariance_type == "diag": cov_params = self.n_components * n_features elif self.covariance_type == "tied": cov_params = n_features * (n_features + 1) / 2.0 elif self.covariance_type == "spherical": cov_params = self.n_components mean_params = n_features * self.n_components return int(cov_params + mean_params + self.n_components - 1) def bic(self, X): """Bayesian information criterion for the current model on the input X. You can refer to this :ref:`mathematical section <aic_bic>` for more details regarding the formulation of the BIC used. For an example of GMM selection using `bic` information criterion, refer to :ref:`sphx_glr_auto_examples_mixture_plot_gmm_selection.py`. Parameters ---------- X : array of shape (n_samples, n_dimensions) The input samples. Returns ------- bic : float The lower the better. """ return -2 * self.score(X) * X.shape[0] + self._n_parameters() * math.log( X.shape[0] ) def aic(self, X): """Akaike information criterion for the current model on the input X. You can refer to this :ref:`mathematical section <aic_bic>` for more details regarding the formulation of the AIC used. Parameters ---------- X : array of shape (n_samples, n_dimensions) The input samples. Returns ------- aic : float The lower the better. """ return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters() def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.array_api_support = ( self.init_params in ["random", "random_from_data"] and not self.warm_start ) return tags
GaussianMixture
python
google__jax
tests/pallas/tpu_pallas_async_test.py
{ "start": 6279, "end": 25086 }
class ____(parameterized.TestCase): # TODO(b/368123537): add more tests def setUp(self): super().setUp() if not jtu.is_device_tpu_at_least(4): self.skipTest('DMAs only guaranteed to work ou TPU v4+') def test_basic_async_copy(self): @jax.jit def f(x): copy_start, copy_done = make_async_copy() x, fut = copy_start(x) y = copy_done(x, fut) return y x = jax.random.normal(jax.random.key(0), (8, 128), dtype=jnp.float32) y = f(x) np.testing.assert_array_equal(y, x) def test_multiple_async_copy(self): @jax.jit def f(x): copy_start, copy_done = make_async_copy() x, fut = copy_start(x) x2, fut2 = copy_start(x) y = copy_done(x, fut) y2 = copy_done(x2, fut2) return y, y2 x = jax.random.normal(jax.random.key(0), (8, 128), dtype=jnp.float32) y, y2 = f(x) np.testing.assert_array_equal(y, x) np.testing.assert_array_equal(y2, x) def test_async_slice(self): @jax.jit def f(x): async_slice_start, async_slice_done = make_async_slice(2) x, fut = async_slice_start(x) y = async_slice_done(x, fut) return y x = jax.random.normal(jax.random.key(0), (4, 8, 128), dtype=jnp.float32) y = f(x) np.testing.assert_array_equal(y, x[2]) def test_async_dynamic_slice(self): @jax.jit def f(x, i): async_slice_start, async_slice_done = make_async_dynamic_slice(i) x, fut = async_slice_start(x) y = async_slice_done(x, fut) return y x = jax.random.normal(jax.random.key(0), (4, 8, 128), dtype=jnp.float32) y = f(x, 2) np.testing.assert_array_equal(y, x[2]) def test_multi_async_dynamic_slice(self): @jax.jit def f(x, i, j): async_slice_start, async_slice_done = make_async_dynamic_slice(i) async_slice_start2, async_slice_done2 = make_async_dynamic_slice(j) x, fut = async_slice_start(x) x2, fut2 = async_slice_start2(x) y = async_slice_done(x, fut) y2 = async_slice_done2(x2, fut2) return y, y2 x = jax.random.normal(jax.random.key(0), (4, 8, 128), dtype=jnp.float32) y, y2 = f(x, 2, 3) np.testing.assert_array_equal(y, x[2]) np.testing.assert_array_equal(y2, x[3]) def test_basic_async_copy_into_vmem(self): @jax.jit def f(x): copy_start, copy_done = make_async_copy(pltpu.VMEM) x, fut = copy_start(x) y = copy_done(x, fut) return y if not jtu.is_device_tpu_at_least(5): self.skipTest('TPU v5+ required for async copy into VMEM') x = jax.random.normal(jax.random.key(0), (8, 128), dtype=jnp.float32) y = f(x) np.testing.assert_array_equal(y, x) def test_multiple_async_copy_into_vmem(self): @jax.jit def f(x): copy_start, copy_done = make_async_copy(pltpu.VMEM) x1, fut = copy_start(x) x2, fut2 = copy_start(x) y = copy_done(x1, fut) y2 = copy_done(x2, fut2) return y, y2 if not jtu.is_device_tpu_at_least(5): self.skipTest('TPU v5+ required for async copy into VMEM') x = jax.random.normal(jax.random.key(0), (8, 128), dtype=jnp.float32) y, y2 = f(x) np.testing.assert_array_equal(y, x) np.testing.assert_array_equal(y2, x) def test_copy_in_a_loop(self): @jax.jit def f(x): def body(_, carry): x = carry copy_start, copy_done = make_async_copy() x, fut = copy_start(x) y = copy_done(x, fut) return y x = jax.lax.fori_loop(0, x.shape[0], body, x) return x x = jax.random.normal(jax.random.key(0), (16, 8, 128), dtype=jnp.float32) y = f(x) np.testing.assert_array_equal(y, x) def test_staggered_copy_in_a_loop(self): @jax.jit def f(x): copy_start, copy_done = make_async_copy() x, fut = copy_start(x) def body(_, carry): x, fut = carry y = copy_done(x, fut) y, fut = copy_start(y) return y, fut # We *must* use unroll > 2 here because of aliasing constraints. XLA will # introduce copies of the active buffer with unroll=1. y, fut = jax.lax.fori_loop(0, x.shape[0] - 1, body, (x, fut), unroll=2) x = copy_done(y, fut) return x x = jax.random.normal(jax.random.key(0), (16, 8, 128), dtype=jnp.float32) y = f(x) np.testing.assert_array_equal(y, x) def test_full_copy_in_a_loop(self): @jax.jit def f(x): y = jnp.zeros_like(x) def body(i, carry): x, ys = carry copy_start, copy_done = make_async_dynamic_slice(i) x, fut = copy_start(x) y = copy_done(x, fut) ys = ys.at[i].set(y) return x, ys _, y = jax.lax.fori_loop(0, x.shape[0], body, (x, y)) return y x = jax.random.normal(jax.random.key(0), (16, 8, 128), dtype=jnp.float32) y = f(x) np.testing.assert_array_equal(y, x) def test_staggered_full_copy_in_a_loop(self): @jax.jit def f(x): y = jnp.zeros_like(x) copy_start, _ = make_async_dynamic_slice(jnp.array(0)) x, fut = copy_start(x) def body(i, carry): x, fut, ys = carry _, copy_done = make_async_dynamic_slice(i) y = copy_done(x, fut) copy_start, _ = make_async_dynamic_slice(i + 1) ys = ys.at[i].set(y) x, fut = copy_start(x) return x, fut, ys # We can use unroll=1 here because we have the ys.at[i].set(y) in the # middle x, fut, ys = jax.lax.fori_loop(0, x.shape[0] - 1, body, (x, fut, y), unroll=1) _, copy_done = make_async_dynamic_slice(x.shape[0] - 1) y = copy_done(x, fut) ys = ys.at[x.shape[0] - 1].set(y) return ys x = jax.random.normal(jax.random.key(0), (16, 8, 128), dtype=jnp.float32) y = f(x) np.testing.assert_array_equal(y, x) @parameterized.product(joint_axis=[True, False]) def test_device_id_as_axis_dict(self, joint_axis): if jax.device_count() < 2: self.skipTest('Requires at least 2 devices for a 2d mesh.') xdim, ydim = 2, jax.device_count() // 2 mesh = jax.make_mesh( (xdim, ydim), ('x', 'y'), axis_types=(jax.sharding.AxisType.Auto,) * 2, ) xlocal, ylocal = 8, 128 if joint_axis: axis_name = ('x', 'y') pspec = P(('x', 'y'), None) input_arr = jax.device_put( jax.random.uniform(jax.random.key(0), (xlocal * xdim * ydim, ylocal)), jax.sharding.NamedSharding(mesh, pspec), ) else: axis_name = 'x' pspec = P('x', 'y') input_arr = jax.device_put( jax.random.uniform(jax.random.key(0), (xlocal * xdim, ylocal * ydim)), jax.sharding.NamedSharding(mesh, pspec), ) def copy_kernel(input_ref, output_ref, send_sem, recv_sem, local_copy_sem): xid = jax.lax.axis_index(axis_name) x0_local_copy = pltpu.make_async_copy( src_ref=input_ref, dst_ref=output_ref, sem=local_copy_sem ) copy_x0_to_x1 = pltpu.make_async_remote_copy( src_ref=input_ref, dst_ref=output_ref, send_sem=send_sem, recv_sem=recv_sem, device_id={axis_name: 1}, ) @pl.when(xid == 0) def _(): copy_x0_to_x1.start() x0_local_copy.start() x0_local_copy.wait() copy_x0_to_x1.wait_send() @pl.when(xid == 1) def _(): copy_x0_to_x1.wait_recv() copy = pl.pallas_call( copy_kernel, out_shape=jax.ShapeDtypeStruct((xlocal, ylocal), jnp.float32), in_specs=[pl.BlockSpec(memory_space=pltpu.MemorySpace.ANY),], out_specs=pl.BlockSpec(memory_space=pltpu.MemorySpace.ANY), scratch_shapes=[pltpu.SemaphoreType.DMA] * 3, ) # Wrap the kernel within a shard_map to call. pallas_out = jax.jit( jax.shard_map( copy, mesh=mesh, in_specs=pspec, out_specs=pspec, check_vma=False ) )(input_arr) # x=1 devices are flushed with x=0 device contents np.testing.assert_array_equal(input_arr[:xlocal], pallas_out[:xlocal]) np.testing.assert_array_equal(pallas_out[:xlocal], pallas_out[xlocal:(2*xlocal)]) def test_axis_dict_with_core_single_device(self): if jax.device_count() > 2 or (jax.devices()[0].num_cores) != 2: self.skipTest('Testing single device two cores') mesh = jax.make_mesh( (jax.device_count(),), ('device',), axis_types=(jax.sharding.AxisType.Auto,), ) ddim = jax.device_count() tcmesh = pltpu.create_tensorcore_mesh('core') pspec = P('device', None) sharding = jax.sharding.NamedSharding(mesh, pspec) # Array is fully sharded. xlocal, ylocal = 8, 256 input_arr = jnp.arange(xlocal * ddim * ylocal, dtype=jnp.int32).reshape( (xlocal * ddim, ylocal) ) input_arr = jax.device_put(input_arr, sharding) def core_copy(refs): in_ref, out_ref = refs @pl.core_map(tcmesh, compiler_params=pltpu.CompilerParams(collective_id=7)) def _(): num_cores = jax.lax.axis_size('core') slc_size = ylocal // num_cores vmem_shape = (xlocal, slc_size) # This runs on every core, for every vmem iterations def alloc(out_vmem_ref, sem, send_sem, recv_sem): core_index = jax.lax.axis_index('core') slc = pl.ds(core_index * slc_size, slc_size) # Make sure all cores have entered run_scoped. sem0 = pltpu.get_barrier_semaphore() for i in range(ddim): for j in range(num_cores): pltpu.semaphore_signal( sem0, 1, device_id={'device': i, 'core': j}, device_id_type=pltpu.DeviceIdType.MESH) pltpu.semaphore_wait(sem0, ddim * num_cores) # Identity function by default pltpu.async_copy(in_ref.at[:, slc], out_ref.at[:, slc], sem).wait() copy_c0_to_c1 = pltpu.make_async_remote_copy( src_ref=in_ref.at[:, slc], dst_ref=out_vmem_ref, send_sem=send_sem, recv_sem=recv_sem, device_id={'core': 1}, device_id_type=pltpu.DeviceIdType.MESH, ) @pl.when(core_index == 0) def _(): copy_c0_to_c1.start() copy_c0_to_c1.wait_send() @pl.when(core_index == 1) def _(): copy_c0_to_c1.wait_recv() pltpu.async_copy(out_vmem_ref, out_ref.at[:, slc], sem).wait() pl.run_scoped( alloc, pltpu.VMEM(vmem_shape, out_ref.dtype), *([pltpu.SemaphoreType.DMA] * 3), ) @partial(jax.shard_map, mesh=mesh, in_specs=pspec, out_specs=pspec, check_vma=False) def run_core_kernel(input): output = jnp.zeros_like(input) _, output = pl.run_state(core_copy)((input, output)) return output pallas_out = jax.jit(run_core_kernel)(input_arr) # The device=0 core=1 slice was flushed with device=0 core=0 contents np.testing.assert_array_equal(pallas_out[:, 128:], input_arr[:, :128]) np.testing.assert_array_equal(pallas_out[:, :128], input_arr[:, :128]) def make_async_remote_copy(axis_name: str, direction: str = 'right', target_memory_space=None): if target_memory_space is None: target_memory_space = pltpu.ANY @jax.named_call def copy_start(x: jax.Array) -> tuple[jax.Array, Future]: def copy_start_kernel(x_ref, aliased_x_ref, o_ref, send_sem, recv_sem): del aliased_x_ref axis_size = jax.lax.axis_size(axis_name) left_neighbor = jax.lax.rem( jax.lax.axis_index(axis_name) - 1 + axis_size, axis_size ) right_neighbor = jax.lax.rem( jax.lax.axis_index(axis_name) + 1, axis_size ) if direction == 'right': src_neighbor = left_neighbor dst_neighbor = right_neighbor else: src_neighbor = right_neighbor dst_neighbor = left_neighbor barrier_sem = pltpu.get_barrier_semaphore() pltpu.semaphore_signal(barrier_sem, device_id=src_neighbor) pltpu.semaphore_wait(barrier_sem, 1) pltpu.make_async_remote_copy( x_ref, o_ref, send_sem, recv_sem, device_id=dst_neighbor, ).start() x, out, send_sem, recv_sem = pl.pallas_call( copy_start_kernel, out_shape=( jax.ShapeDtypeStruct(x.shape, x.dtype), # aliased x target_memory_space(x.shape, x.dtype), # out pltpu.SemaphoreType.DMA(()), # send_sem pltpu.SemaphoreType.DMA(()), # recv_sem ), in_specs=[ pl.BlockSpec(memory_space=pltpu.ANY), ], out_specs=( pl.BlockSpec(memory_space=pltpu.ANY), pl.BlockSpec(memory_space=target_memory_space), pl.BlockSpec(memory_space=pltpu.SEMAPHORE), pl.BlockSpec(memory_space=pltpu.SEMAPHORE), ), input_output_aliases={0: 0}, compiler_params=pltpu.CompilerParams( collective_id=0, has_side_effects=True ), )(x) return x, (out, send_sem, recv_sem) @jax.named_call def send_done(x: jax.Array, future: Future) -> jax.Array: _, send_sem, _ = future def send_done_kernel(x_ref, send_sem, aliased_o_ref): del aliased_o_ref pltpu.make_async_copy(x_ref, x_ref, send_sem).wait() x = pl.pallas_call( send_done_kernel, out_shape=jax.ShapeDtypeStruct(x.shape, x.dtype), # out in_specs=[ pl.BlockSpec(memory_space=pltpu.ANY), pl.BlockSpec(memory_space=pltpu.SEMAPHORE), ], out_specs=pl.BlockSpec(memory_space=pltpu.ANY), input_output_aliases={0: 0}, )(x, send_sem) return x @jax.named_call def recv_done(x: jax.Array, future: Future) -> jax.Array: out, _, recv_sem = future def send_done_kernel(x_ref, o_ref, send_sem, aliased_o_ref): del aliased_o_ref pltpu.make_async_copy(x_ref, o_ref, send_sem).wait() out = pl.pallas_call( send_done_kernel, out_shape=target_memory_space(x.shape, x.dtype), # out in_specs=[ pl.BlockSpec(memory_space=pltpu.ANY), pl.BlockSpec(memory_space=target_memory_space), pl.BlockSpec(memory_space=pltpu.SEMAPHORE), ], out_specs=pl.BlockSpec(memory_space=target_memory_space), input_output_aliases={1: 0}, )(x, out, recv_sem) return out return copy_start, send_done, recv_done def make_bidi_collective_permute(axis_name: str): @jax.named_call def copy_start(x: jax.Array) -> tuple[jax.Array, Future]: def copy_start_kernel(x_ref, aliased_x_ref, o_ref, left_sems, right_sems): del aliased_x_ref axis_size = jax.lax.axis_size(axis_name) left_neighbor = jax.lax.rem( jax.lax.axis_index(axis_name) - 1 + axis_size, axis_size ) right_neighbor = jax.lax.rem( jax.lax.axis_index(axis_name) + 1, axis_size ) barrier_sem = pltpu.get_barrier_semaphore() pltpu.semaphore_signal(barrier_sem, device_id=left_neighbor) pltpu.semaphore_signal(barrier_sem, device_id=right_neighbor) pltpu.semaphore_wait(barrier_sem, 2) assert x.shape[0] % 2 == 0, x.shape pltpu.make_async_remote_copy( x_ref.at[pl.ds(0, x.shape[0] // 2)], o_ref.at[pl.ds(0, x.shape[0] // 2)], right_sems[0], right_sems[1], device_id=right_neighbor, ).start() pltpu.make_async_remote_copy( x_ref.at[pl.ds(x.shape[0] // 2, x.shape[0] // 2)], o_ref.at[pl.ds(x.shape[0] // 2, x.shape[0] // 2)], left_sems[0], left_sems[1], device_id=left_neighbor, ).start() x, out, left_sems, right_sems = pl.pallas_call( copy_start_kernel, out_shape=( jax.ShapeDtypeStruct(x.shape, x.dtype), # aliased x pltpu.ANY(x.shape, x.dtype), # out (pltpu.SemaphoreType.DMA(()),) * 2, # left_sems (pltpu.SemaphoreType.DMA(()),) * 2, # right_sems ), in_specs=[ pl.BlockSpec(memory_space=pltpu.ANY), ], out_specs=( pl.BlockSpec(memory_space=pltpu.ANY), pl.BlockSpec(memory_space=pltpu.ANY), (pl.BlockSpec(memory_space=pltpu.SEMAPHORE),) * 2, (pl.BlockSpec(memory_space=pltpu.SEMAPHORE),) * 2, ), input_output_aliases={0: 0}, compiler_params=pltpu.CompilerParams( collective_id=0, has_side_effects=False ), )(x) return x, (out, left_sems, right_sems) @jax.named_call def send_done(x: jax.Array, future: Future) -> jax.Array: _, (send_left_sem, _), (send_right_sem, _) = future def send_done_kernel(x_ref, send_left_sem, send_right_sem, aliased_o_ref): del aliased_o_ref pltpu.make_async_copy( x_ref.at[x_ref.shape[0] // 2 :], x_ref.at[x_ref.shape[0] // 2 :], send_left_sem, ).wait() pltpu.make_async_copy( x_ref.at[x_ref.shape[0] // 2 :], x_ref.at[x_ref.shape[0] // 2 :], send_right_sem, ).wait() x = pl.pallas_call( send_done_kernel, out_shape=jax.ShapeDtypeStruct(x.shape, x.dtype), # out in_specs=[ pl.BlockSpec(memory_space=pltpu.ANY), pl.BlockSpec(memory_space=pltpu.SEMAPHORE), pl.BlockSpec(memory_space=pltpu.SEMAPHORE), ], out_specs=pl.BlockSpec(memory_space=pltpu.ANY), input_output_aliases={0: 0}, )(x, send_left_sem, send_right_sem) return x @jax.named_call def recv_done(x: jax.Array, future: Future) -> jax.Array: out, (_, recv_left_sem), (_, recv_right_sem) = future def recv_done_kernel(o_ref, x_ref, recv_left_sem, recv_right_sem, aliased_o_ref): del aliased_o_ref pltpu.make_async_copy( x_ref.at[o_ref.shape[0] // 2 :], o_ref.at[o_ref.shape[0] // 2 :], recv_left_sem, ).wait() pltpu.make_async_copy( x_ref.at[o_ref.shape[0] // 2 :], o_ref.at[o_ref.shape[0] // 2 :], recv_right_sem, ).wait() out = pl.pallas_call( recv_done_kernel, out_shape=jax.ShapeDtypeStruct(x.shape, x.dtype), # out in_specs=[ pl.BlockSpec(memory_space=pltpu.ANY), pl.BlockSpec(memory_space=pltpu.ANY), pl.BlockSpec(memory_space=pltpu.SEMAPHORE), pl.BlockSpec(memory_space=pltpu.SEMAPHORE), ], out_specs=pl.BlockSpec(memory_space=pltpu.ANY), input_output_aliases={0: 0}, )(out, x, recv_left_sem, recv_right_sem) return out return copy_start, send_done, recv_done
PallasCallAsyncCopyTest
python
tensorflow__tensorflow
tensorflow/python/keras/callbacks.py
{ "start": 106644, "end": 109715 }
class ____(Callback): r"""Callback for creating simple, custom callbacks on-the-fly. This callback is constructed with anonymous functions that will be called at the appropriate time (during `Model.{fit | evaluate | predict}`). Note that the callbacks expects positional arguments, as: - `on_epoch_begin` and `on_epoch_end` expect two positional arguments: `epoch`, `logs` - `on_batch_begin` and `on_batch_end` expect two positional arguments: `batch`, `logs` - `on_train_begin` and `on_train_end` expect one positional argument: `logs` Args: on_epoch_begin: called at the beginning of every epoch. on_epoch_end: called at the end of every epoch. on_batch_begin: called at the beginning of every batch. on_batch_end: called at the end of every batch. on_train_begin: called at the beginning of model training. on_train_end: called at the end of model training. Example: ```python # Print the batch number at the beginning of every batch. batch_print_callback = LambdaCallback( on_batch_begin=lambda batch,logs: print(batch)) # Stream the epoch loss to a file in JSON format. The file content # is not well-formed JSON but rather has a JSON object per line. import json json_log = open('loss_log.json', mode='wt', buffering=1) json_logging_callback = LambdaCallback( on_epoch_end=lambda epoch, logs: json_log.write( json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'), on_train_end=lambda logs: json_log.close() ) # Terminate some processes after having finished model training. processes = ... cleanup_callback = LambdaCallback( on_train_end=lambda logs: [ p.terminate() for p in processes if p.is_alive()]) model.fit(..., callbacks=[batch_print_callback, json_logging_callback, cleanup_callback]) ``` """ def __init__(self, on_epoch_begin=None, on_epoch_end=None, on_batch_begin=None, on_batch_end=None, on_train_begin=None, on_train_end=None, **kwargs): super(LambdaCallback, self).__init__() self.__dict__.update(kwargs) if on_epoch_begin is not None: self.on_epoch_begin = on_epoch_begin else: self.on_epoch_begin = lambda epoch, logs: None if on_epoch_end is not None: self.on_epoch_end = on_epoch_end else: self.on_epoch_end = lambda epoch, logs: None if on_batch_begin is not None: self.on_batch_begin = on_batch_begin else: self.on_batch_begin = lambda batch, logs: None if on_batch_end is not None: self.on_batch_end = on_batch_end else: self.on_batch_end = lambda batch, logs: None if on_train_begin is not None: self.on_train_begin = on_train_begin else: self.on_train_begin = lambda logs: None if on_train_end is not None: self.on_train_end = on_train_end else: self.on_train_end = lambda logs: None
LambdaCallback
python
fluentpython__example-code
attic/metaprog/spreadsheet2.py
{ "start": 794, "end": 1385 }
class ____: def __init__(self, **tools): self._cells = {} self._tools = {'__builtins__' : {}} self._tools.update(tools) def __setitem__(self, key, formula): try: compile(formula, '<__setitem__>', 'eval') except SyntaxError as exc: msg = '{} [{!r}] = {!r}'.format(exc.msg, key, formula) raise SyntaxError(msg) self._cells[key] = formula def getformula(self, key): return self._cells[key] def __getitem__(self, key): return eval(self._cells[key], self._tools, self)
Spreadsheet
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/oracle/cx_oracle.py
{ "start": 24742, "end": 25017 }
class ____(_LOBDataType, sqltypes.Text): def get_dbapi_type(self, dbapi): # previously, this was dbapi.CLOB. # DB_TYPE_NVARCHAR will instead be passed to setinputsizes() # when this datatype is used. return dbapi.DB_TYPE_NVARCHAR
_OracleText
python
cython__cython
Cython/Compiler/ExprNodes.py
{ "start": 152065, "end": 152238 }
class ____(TempNode): # TempNode holding a Python value. def __init__(self, pos, env): TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
PyTempNode
python
django-compressor__django-compressor
compressor/storage.py
{ "start": 645, "end": 2163 }
class ____(FileSystemStorage): """ Standard file system storage for files handled by django-compressor. The defaults for ``location`` and ``base_url`` are ``COMPRESS_ROOT`` and ``COMPRESS_URL``. """ def __init__(self, location=None, base_url=None, *args, **kwargs): if location is None: location = settings.COMPRESS_ROOT if base_url is None: base_url = settings.COMPRESS_URL super().__init__(location, base_url, *args, **kwargs) def accessed_time(self, name): return datetime.fromtimestamp(os.path.getatime(self.path(name))) def created_time(self, name): return datetime.fromtimestamp(os.path.getctime(self.path(name))) def modified_time(self, name): return datetime.fromtimestamp(os.path.getmtime(self.path(name))) def save(self, filename, content): temp_filename = super().save(filename, content) # If a file already exists in the target location, FileSystemStorage # will generate an unique filename and save content there instead. # When that happens, we move the file to the intended location using # os.replace() (which is an atomic operation): if temp_filename != filename: os.replace(self.path(temp_filename), self.path(filename)) return filename compressor_file_storage = SimpleLazyObject( lambda: storages.create_storage( {"BACKEND": "compressor.storage.CompressorFileStorage"} ) )
CompressorFileStorage
python
wandb__wandb
wandb/vendor/pygments/lexers/theorem.py
{ "start": 479, "end": 6399 }
class ____(RegexLexer): """ For the `Coq <http://coq.inria.fr/>`_ theorem prover. .. versionadded:: 1.5 """ name = 'Coq' aliases = ['coq'] filenames = ['*.v'] mimetypes = ['text/x-coq'] keywords1 = ( # Vernacular commands 'Section', 'Module', 'End', 'Require', 'Import', 'Export', 'Variable', 'Variables', 'Parameter', 'Parameters', 'Axiom', 'Hypothesis', 'Hypotheses', 'Notation', 'Local', 'Tactic', 'Reserved', 'Scope', 'Open', 'Close', 'Bind', 'Delimit', 'Definition', 'Let', 'Ltac', 'Fixpoint', 'CoFixpoint', 'Morphism', 'Relation', 'Implicit', 'Arguments', 'Set', 'Unset', 'Contextual', 'Strict', 'Prenex', 'Implicits', 'Inductive', 'CoInductive', 'Record', 'Structure', 'Canonical', 'Coercion', 'Theorem', 'Lemma', 'Corollary', 'Proposition', 'Fact', 'Remark', 'Example', 'Proof', 'Goal', 'Save', 'Qed', 'Defined', 'Hint', 'Resolve', 'Rewrite', 'View', 'Search', 'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside', 'outside', 'Check', 'Global', 'Instance', 'Class', 'Existing', 'Universe', 'Polymorphic', 'Monomorphic', 'Context' ) keywords2 = ( # Gallina 'forall', 'exists', 'exists2', 'fun', 'fix', 'cofix', 'struct', 'match', 'end', 'in', 'return', 'let', 'if', 'is', 'then', 'else', 'for', 'of', 'nosimpl', 'with', 'as', ) keywords3 = ( # Sorts 'Type', 'Prop', ) keywords4 = ( # Tactics 'pose', 'set', 'move', 'case', 'elim', 'apply', 'clear', 'hnf', 'intro', 'intros', 'generalize', 'rename', 'pattern', 'after', 'destruct', 'induction', 'using', 'refine', 'inversion', 'injection', 'rewrite', 'congr', 'unlock', 'compute', 'ring', 'field', 'replace', 'fold', 'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog', 'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial', 'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto', 'split', 'left', 'right', 'autorewrite', 'tauto', 'setoid_rewrite', 'intuition', 'eauto', 'eapply', 'econstructor', 'etransitivity', 'constructor', 'erewrite', 'red', 'cbv', 'lazy', 'vm_compute', 'native_compute', 'subst', ) keywords5 = ( # Terminators 'by', 'done', 'exact', 'reflexivity', 'tauto', 'romega', 'omega', 'assumption', 'solve', 'contradiction', 'discriminate', 'congruence', ) keywords6 = ( # Control 'do', 'last', 'first', 'try', 'idtac', 'repeat', ) # 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', # 'downto', 'else', 'end', 'exception', 'external', 'false', # 'for', 'fun', 'function', 'functor', 'if', 'in', 'include', # 'inherit', 'initializer', 'lazy', 'let', 'match', 'method', # 'module', 'mutable', 'new', 'object', 'of', 'open', 'private', # 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try', # 'type', 'val', 'virtual', 'when', 'while', 'with' keyopts = ( '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', r'-\.', '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', '<-', '<->', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>', r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~', '=>', r'/\\', r'\\/', r'\{\|', r'\|\}', u'Π', u'λ', ) operators = r'[!$%&*+\./:<=>?@^|~-]' prefix_syms = r'[!?~]' infix_syms = r'[=<>@^|&+\*/$%-]' primitives = ('unit', 'nat', 'bool', 'string', 'ascii', 'list') tokens = { 'root': [ (r'\s+', Text), (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo), (r'\(\*', Comment, 'comment'), (words(keywords1, prefix=r'\b', suffix=r'\b'), Keyword.Namespace), (words(keywords2, prefix=r'\b', suffix=r'\b'), Keyword), (words(keywords3, prefix=r'\b', suffix=r'\b'), Keyword.Type), (words(keywords4, prefix=r'\b', suffix=r'\b'), Keyword), (words(keywords5, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo), (words(keywords6, prefix=r'\b', suffix=r'\b'), Keyword.Reserved), # (r'\b([A-Z][\w\']*)(\.)', Name.Namespace, 'dotted'), (r'\b([A-Z][\w\']*)', Name), (r'(%s)' % '|'.join(keyopts[::-1]), Operator), (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), (r"[^\W\d][\w']*", Name), (r'\d[\d_]*', Number.Integer), (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), (r'0[oO][0-7][0-7_]*', Number.Oct), (r'0[bB][01][01_]*', Number.Bin), (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", String.Char), (r"'.'", String.Char), (r"'", Keyword), # a stray quote is another syntax element (r'"', String.Double, 'string'), (r'[~?][a-z][\w\']*:', Name), ], 'comment': [ (r'[^(*)]+', Comment), (r'\(\*', Comment, '#push'), (r'\*\)', Comment, '#pop'), (r'[(*)]', Comment), ], 'string': [ (r'[^"]+', String.Double), (r'""', String.Double), (r'"', String.Double, '#pop'), ], 'dotted': [ (r'\s+', Text), (r'\.', Punctuation), (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace), (r'[A-Z][\w\']*', Name.Class, '#pop'), (r'[a-z][a-z0-9_\']*', Name, '#pop'), default('#pop') ], } def analyse_text(text): if text.startswith('(*'): return True
CoqLexer
python
kamyu104__LeetCode-Solutions
Python/frequencies-of-shortest-supersequences.py
{ "start": 118, "end": 2205 }
class ____(object): def supersequences(self, words): """ :type words: List[str] :rtype: List[List[int]] """ def f(x): x = ord(x)-ord('a') if char_to_int[x] == -1: int_to_char[len(indegree)] = x char_to_int[x] = len(indegree) indegree.append(0) return char_to_int[x] def topological_sort(cnt): total = sum(cnt) if total > ans[0]: return new_cnt = cnt[:] new_indgree = indegree[:] lookup = [False]*len(cnt) q = [] for u in xrange(len(indegree)): if not new_indgree[u] or new_cnt[u] == 2: new_cnt[u] -= 1 lookup[u] = True q.append(u) while q: new_q = [] for u in q: for v in adj[u]: new_indgree[v] -= 1 if new_indgree[v]: continue new_cnt[v] -= 1 if lookup[v]: continue lookup[v] = True new_q.append(v) q = new_q if any(new_cnt): return if total < ans[0]: ans[0] = total ans[1][:] = [] ans[1].append(cnt) adj = [[] for _ in xrange(26)] char_to_int, int_to_char, indegree = [-1]*26, [0]*26, [] for w in words: adj[f(w[0])].append(f(w[1])) indegree[f(w[1])] += 1 ans = [float("inf"), []] for mask in xrange(1<<len(indegree)): topological_sort([2 if mask&(1<<i) else 1 for i in xrange(len(indegree))]) result = [] for cnt in ans[1]: new_cnt = [0]*26 for i, x in enumerate(cnt): new_cnt[int_to_char[i]] = x result.append(new_cnt) return result
Solution
python
gevent__gevent
src/gevent/tests/test__pool.py
{ "start": 7940, "end": 8817 }
class ____(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time() try: return self.func(*args, **kwds) finally: self.elapsed = time() - t def sqr(x, wait=0.0): gevent.sleep(wait) return x * x def squared(x): return x * x def sqr_random_sleep(x): gevent.sleep(random.random() * 0.1) return x * x def final_sleep(): yield from range(3) gevent.sleep(0.2) TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.082, 0.035, 0.14 SMALL_RANGE = 10 LARGE_RANGE = 1000 if (greentest.PYPY and greentest.WIN) or greentest.RUN_LEAKCHECKS or greentest.RUN_COVERAGE: # See comments in test__threadpool.py. LARGE_RANGE = 25 elif greentest.RUNNING_ON_CI or greentest.EXPECT_POOR_TIMER_RESOLUTION: LARGE_RANGE = 100
TimingWrapper
python
xlwings__xlwings
xlwings/constants.py
{ "start": 112679, "end": 112951 }
class ____: xlChart = -4109 # from enum XlSheetType xlDialogSheet = -4116 # from enum XlSheetType xlExcel4IntlMacroSheet = 4 # from enum XlSheetType xlExcel4MacroSheet = 3 # from enum XlSheetType xlWorksheet = -4167 # from enum XlSheetType
SheetType
python
getsentry__sentry
src/sentry/api/endpoints/api_tokens.py
{ "start": 2553, "end": 5163 }
class ____(Endpoint): owner = ApiOwner.SECURITY publish_status = { "DELETE": ApiPublishStatus.PRIVATE, "GET": ApiPublishStatus.PRIVATE, "POST": ApiPublishStatus.PRIVATE, } authentication_classes = (SessionNoAuthTokenAuthentication,) permission_classes = (SentryIsAuthenticated,) @method_decorator(never_cache) def get(self, request: Request) -> Response: user_id = get_appropriate_user_id(request=request) token_list = list( ApiToken.objects.filter(application__isnull=True, user_id=user_id).select_related( "application" ) ) return Response(serialize(token_list, request.user, include_token=False)) @method_decorator(never_cache) def post(self, request: Request) -> Response: serializer = ApiTokenSerializer(data=request.data) if serializer.is_valid(): result = serializer.validated_data token = ApiToken.objects.create( user_id=request.user.id, name=result.get("name", None), token_type=AuthTokenType.USER, scope_list=result["scopes"], expires_at=None, ) capture_security_activity( account=request.user, type="api-token-generated", actor=request.user, ip_address=request.META["REMOTE_ADDR"], context={}, send_email=True, ) analytics.record(ApiTokenCreated(user_id=request.user.id)) return Response(serialize(token, request.user), status=201) return Response(serializer.errors, status=400) @method_decorator(never_cache) def delete(self, request: Request): user_id = get_appropriate_user_id(request=request) token_id = request.data.get("tokenId", None) # Account for token_id being 0, which can be considered valid if token_id is None: return Response({"tokenId": token_id}, status=400) with outbox_context(transaction.atomic(router.db_for_write(ApiToken)), flush=False): token_to_delete: ApiToken | None = ApiToken.objects.filter( id=token_id, application__isnull=True, user_id=user_id ).first() if token_to_delete is None: return Response({"tokenId": token_id, "userId": user_id}, status=400) token_to_delete.delete() analytics.record(ApiTokenDeleted(user_id=request.user.id)) return Response(status=204)
ApiTokensEndpoint
python
great-expectations__great_expectations
great_expectations/expectations/metrics/query_metrics/query_table/base_query_table.py
{ "start": 132, "end": 288 }
class ____(QueryTable): metric_name = "base_query.table" value_keys = ("base_query",) query_param_name: ClassVar[str] = "base_query"
BaseQueryTable
python
numpy__numpy
numpy/lib/tests/test_arraypad.py
{ "start": 3513, "end": 4533 }
class ____: @pytest.mark.parametrize("mode", _all_modes.keys()) def test_zero_padding_shortcuts(self, mode): test = np.arange(120).reshape(4, 5, 6) pad_amt = [(0, 0) for _ in test.shape] assert_array_equal(test, np.pad(test, pad_amt, mode=mode)) @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) def test_shallow_statistic_range(self, mode): test = np.arange(120).reshape(4, 5, 6) pad_amt = [(1, 1) for _ in test.shape] assert_array_equal(np.pad(test, pad_amt, mode='edge'), np.pad(test, pad_amt, mode=mode, stat_length=1)) @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) def test_clip_statistic_range(self, mode): test = np.arange(30).reshape(5, 6) pad_amt = [(3, 3) for _ in test.shape] assert_array_equal(np.pad(test, pad_amt, mode=mode), np.pad(test, pad_amt, mode=mode, stat_length=30))
TestConditionalShortcuts
python
huggingface__transformers
src/transformers/models/vipllava/modeling_vipllava.py
{ "start": 4207, "end": 5256 }
class ____(nn.Module): def __init__(self, config: VipLlavaConfig): super().__init__() num_feature_layers = 1 if isinstance(config.vision_feature_layers, int) else len(config.vision_feature_layers) self.projector_layernorm = nn.LayerNorm( num_feature_layers * config.vision_config.hidden_size, eps=config.projector_layernorm_eps ) self.linear_1 = nn.Linear( num_feature_layers * config.vision_config.hidden_size, config.text_config.hidden_size, bias=True, ) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True) def forward(self, hidden_states): hidden_states = self.projector_layernorm(hidden_states) hidden_states = self.linear_1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states @auto_docstring
VipLlavaMultiModalProjector
python
pypa__setuptools
setuptools/_distutils/tests/test_install_lib.py
{ "start": 353, "end": 3612 }
class ____( support.TempdirManager, ): def test_finalize_options(self): dist = self.create_dist()[1] cmd = install_lib(dist) cmd.finalize_options() assert cmd.compile == 1 assert cmd.optimize == 0 # optimize must be 0, 1, or 2 cmd.optimize = 'foo' with pytest.raises(DistutilsOptionError): cmd.finalize_options() cmd.optimize = '4' with pytest.raises(DistutilsOptionError): cmd.finalize_options() cmd.optimize = '2' cmd.finalize_options() assert cmd.optimize == 2 @pytest.mark.skipif('sys.dont_write_bytecode') def test_byte_compile(self): project_dir, dist = self.create_dist() os.chdir(project_dir) cmd = install_lib(dist) cmd.compile = cmd.optimize = 1 f = os.path.join(project_dir, 'foo.py') self.write_file(f, '# python file') cmd.byte_compile([f]) pyc_file = importlib.util.cache_from_source('foo.py', optimization='') pyc_opt_file = importlib.util.cache_from_source( 'foo.py', optimization=cmd.optimize ) assert os.path.exists(pyc_file) assert os.path.exists(pyc_opt_file) def test_get_outputs(self): project_dir, dist = self.create_dist() os.chdir(project_dir) os.mkdir('spam') cmd = install_lib(dist) # setting up a dist environment cmd.compile = cmd.optimize = 1 cmd.install_dir = self.mkdtemp() f = os.path.join(project_dir, 'spam', '__init__.py') self.write_file(f, '# python package') cmd.distribution.ext_modules = [Extension('foo', ['xxx'])] cmd.distribution.packages = ['spam'] cmd.distribution.script_name = 'setup.py' # get_outputs should return 4 elements: spam/__init__.py and .pyc, # foo.import-tag-abiflags.so / foo.pyd outputs = cmd.get_outputs() assert len(outputs) == 4, outputs def test_get_inputs(self): project_dir, dist = self.create_dist() os.chdir(project_dir) os.mkdir('spam') cmd = install_lib(dist) # setting up a dist environment cmd.compile = cmd.optimize = 1 cmd.install_dir = self.mkdtemp() f = os.path.join(project_dir, 'spam', '__init__.py') self.write_file(f, '# python package') cmd.distribution.ext_modules = [Extension('foo', ['xxx'])] cmd.distribution.packages = ['spam'] cmd.distribution.script_name = 'setup.py' # get_inputs should return 2 elements: spam/__init__.py and # foo.import-tag-abiflags.so / foo.pyd inputs = cmd.get_inputs() assert len(inputs) == 2, inputs def test_dont_write_bytecode(self, caplog): # makes sure byte_compile is not used dist = self.create_dist()[1] cmd = install_lib(dist) cmd.compile = True cmd.optimize = 1 old_dont_write_bytecode = sys.dont_write_bytecode sys.dont_write_bytecode = True try: cmd.byte_compile([]) finally: sys.dont_write_bytecode = old_dont_write_bytecode assert 'byte-compiling is disabled' in caplog.messages[0]
TestInstallLib
python
django__django
tests/backends/postgresql/tests.py
{ "start": 984, "end": 24747 }
class ____(TestCase): databases = {"default", "other"} def test_nodb_cursor(self): """ The _nodb_cursor() fallbacks to the default connection database when access to the 'postgres' database is not granted. """ orig_connect = BaseDatabaseWrapper.connect def mocked_connect(self): if self.settings_dict["NAME"] is None: raise DatabaseError() return orig_connect(self) with connection._nodb_cursor() as cursor: self.assertIs(cursor.closed, False) self.assertIsNotNone(cursor.db.connection) self.assertIsNone(cursor.db.settings_dict["NAME"]) self.assertIs(cursor.closed, True) self.assertIsNone(cursor.db.connection) # Now assume the 'postgres' db isn't available msg = ( "Normally Django will use a connection to the 'postgres' database " "to avoid running initialization queries against the production " "database when it's not needed (for example, when running tests). " "Django was unable to create a connection to the 'postgres' " "database and will use the first PostgreSQL database instead." ) with self.assertWarnsMessage(RuntimeWarning, msg): with mock.patch( "django.db.backends.base.base.BaseDatabaseWrapper.connect", side_effect=mocked_connect, autospec=True, ): with mock.patch.object( connection, "settings_dict", {**connection.settings_dict, "NAME": "postgres"}, ): with connection._nodb_cursor() as cursor: self.assertIs(cursor.closed, False) self.assertIsNotNone(cursor.db.connection) self.assertIs(cursor.closed, True) self.assertIsNone(cursor.db.connection) self.assertIsNotNone(cursor.db.settings_dict["NAME"]) self.assertEqual( cursor.db.settings_dict["NAME"], connections["other"].settings_dict["NAME"] ) # Cursor is yielded only for the first PostgreSQL database. with self.assertWarnsMessage(RuntimeWarning, msg): with mock.patch( "django.db.backends.base.base.BaseDatabaseWrapper.connect", side_effect=mocked_connect, autospec=True, ): with connection._nodb_cursor() as cursor: self.assertIs(cursor.closed, False) self.assertIsNotNone(cursor.db.connection) def test_nodb_cursor_raises_postgres_authentication_failure(self): """ _nodb_cursor() re-raises authentication failure to the 'postgres' db when other connection to the PostgreSQL database isn't available. """ def mocked_connect(self): raise DatabaseError() def mocked_all(self): test_connection = copy.copy(connections[DEFAULT_DB_ALIAS]) test_connection.settings_dict = copy.deepcopy(connection.settings_dict) test_connection.settings_dict["NAME"] = "postgres" return [test_connection] msg = ( "Normally Django will use a connection to the 'postgres' database " "to avoid running initialization queries against the production " "database when it's not needed (for example, when running tests). " "Django was unable to create a connection to the 'postgres' " "database and will use the first PostgreSQL database instead." ) with self.assertWarnsMessage(RuntimeWarning, msg): mocker_connections_all = mock.patch( "django.utils.connection.BaseConnectionHandler.all", side_effect=mocked_all, autospec=True, ) mocker_connect = mock.patch( "django.db.backends.base.base.BaseDatabaseWrapper.connect", side_effect=mocked_connect, autospec=True, ) with mocker_connections_all, mocker_connect: with self.assertRaises(DatabaseError): with connection._nodb_cursor(): pass def test_nodb_cursor_reraise_exceptions(self): with self.assertRaisesMessage(DatabaseError, "exception"): with connection._nodb_cursor(): raise DatabaseError("exception") def test_database_name_too_long(self): from django.db.backends.postgresql.base import DatabaseWrapper settings = connection.settings_dict.copy() max_name_length = connection.ops.max_name_length() settings["NAME"] = "a" + (max_name_length * "a") msg = ( "The database name '%s' (%d characters) is longer than " "PostgreSQL's limit of %s characters. Supply a shorter NAME in " "settings.DATABASES." ) % (settings["NAME"], max_name_length + 1, max_name_length) with self.assertRaisesMessage(ImproperlyConfigured, msg): DatabaseWrapper(settings).get_connection_params() def test_database_name_empty(self): from django.db.backends.postgresql.base import DatabaseWrapper settings = connection.settings_dict.copy() settings["NAME"] = "" msg = ( "settings.DATABASES is improperly configured. Please supply the " "NAME or OPTIONS['service'] value." ) with self.assertRaisesMessage(ImproperlyConfigured, msg): DatabaseWrapper(settings).get_connection_params() def test_service_name(self): from django.db.backends.postgresql.base import DatabaseWrapper settings = connection.settings_dict.copy() settings["OPTIONS"] = {"service": "my_service"} settings["NAME"] = "" params = DatabaseWrapper(settings).get_connection_params() self.assertEqual(params["service"], "my_service") self.assertNotIn("database", params) def test_service_name_default_db(self): # None is used to connect to the default 'postgres' db. from django.db.backends.postgresql.base import DatabaseWrapper settings = connection.settings_dict.copy() settings["NAME"] = None settings["OPTIONS"] = {"service": "django_test"} params = DatabaseWrapper(settings).get_connection_params() self.assertEqual(params["dbname"], "postgres") self.assertNotIn("service", params) def test_connect_and_rollback(self): """ PostgreSQL shouldn't roll back SET TIME ZONE, even if the first transaction is rolled back (#17062). """ new_connection = no_pool_connection() try: # Ensure the database default time zone is different than # the time zone in new_connection.settings_dict. We can # get the default time zone by reset & show. with new_connection.cursor() as cursor: cursor.execute("RESET TIMEZONE") cursor.execute("SHOW TIMEZONE") db_default_tz = cursor.fetchone()[0] new_tz = "Europe/Paris" if db_default_tz == "UTC" else "UTC" new_connection.close() # Invalidate timezone name cache, because the setting_changed # handler cannot know about new_connection. del new_connection.timezone_name # Fetch a new connection with the new_tz as default # time zone, run a query and rollback. with self.settings(TIME_ZONE=new_tz): new_connection.set_autocommit(False) new_connection.rollback() # Now let's see if the rollback rolled back the SET TIME ZONE. with new_connection.cursor() as cursor: cursor.execute("SHOW TIMEZONE") tz = cursor.fetchone()[0] self.assertEqual(new_tz, tz) finally: new_connection.close() def test_connect_non_autocommit(self): """ The connection wrapper shouldn't believe that autocommit is enabled after setting the time zone when AUTOCOMMIT is False (#21452). """ new_connection = no_pool_connection() new_connection.settings_dict["AUTOCOMMIT"] = False try: # Open a database connection. with new_connection.cursor(): self.assertFalse(new_connection.get_autocommit()) finally: new_connection.close() @unittest.skipUnless(is_psycopg3, "psycopg3 specific test") def test_connect_pool(self): from psycopg_pool import PoolTimeout new_connection = no_pool_connection(alias="default_pool") new_connection.settings_dict["OPTIONS"]["pool"] = { "min_size": 0, "max_size": 2, "timeout": 5, } self.assertIsNotNone(new_connection.pool) connections = [] def get_connection(): # copy() reuses the existing alias and as such the same pool. conn = new_connection.copy() conn.connect() connections.append(conn) return conn try: connection_1 = get_connection() # First connection. connection_1_backend_pid = connection_1.connection.info.backend_pid get_connection() # Get the second connection. with self.assertRaises(PoolTimeout): # The pool has a maximum of 2 connections. get_connection() connection_1.close() # Release back to the pool. connection_3 = get_connection() # Reuses the first connection as it is available. self.assertEqual( connection_3.connection.info.backend_pid, connection_1_backend_pid ) finally: # Release all connections back to the pool. for conn in connections: conn.close() new_connection.close_pool() @unittest.skipUnless(is_psycopg3, "psycopg3 specific test") def test_connect_pool_set_to_true(self): new_connection = no_pool_connection(alias="default_pool") new_connection.settings_dict["OPTIONS"]["pool"] = True try: self.assertIsNotNone(new_connection.pool) finally: new_connection.close_pool() @unittest.skipUnless(is_psycopg3, "psycopg3 specific test") def test_connect_pool_with_timezone(self): new_time_zone = "Africa/Nairobi" new_connection = no_pool_connection(alias="default_pool") try: with new_connection.cursor() as cursor: cursor.execute("SHOW TIMEZONE") tz = cursor.fetchone()[0] self.assertNotEqual(new_time_zone, tz) finally: new_connection.close() del new_connection.timezone_name new_connection.settings_dict["OPTIONS"]["pool"] = True try: with self.settings(TIME_ZONE=new_time_zone): with new_connection.cursor() as cursor: cursor.execute("SHOW TIMEZONE") tz = cursor.fetchone()[0] self.assertEqual(new_time_zone, tz) finally: new_connection.close() new_connection.close_pool() @unittest.skipUnless(is_psycopg3, "psycopg3 specific test") def test_pooling_health_checks(self): new_connection = no_pool_connection(alias="default_pool") new_connection.settings_dict["OPTIONS"]["pool"] = True new_connection.settings_dict["CONN_HEALTH_CHECKS"] = False try: self.assertIsNone(new_connection.pool._check) finally: new_connection.close_pool() new_connection.settings_dict["CONN_HEALTH_CHECKS"] = True try: self.assertIsNotNone(new_connection.pool._check) finally: new_connection.close_pool() @unittest.skipUnless(is_psycopg3, "psycopg3 specific test") def test_cannot_open_new_connection_in_atomic_block(self): new_connection = no_pool_connection(alias="default_pool") new_connection.settings_dict["OPTIONS"]["pool"] = True msg = "Cannot open a new connection in an atomic block." new_connection.in_atomic_block = True new_connection.closed_in_transaction = True with self.assertRaisesMessage(ProgrammingError, msg): new_connection.ensure_connection() @unittest.skipUnless(is_psycopg3, "psycopg3 specific test") def test_pooling_not_support_persistent_connections(self): new_connection = no_pool_connection(alias="default_pool") new_connection.settings_dict["OPTIONS"]["pool"] = True new_connection.settings_dict["CONN_MAX_AGE"] = 10 msg = "Pooling doesn't support persistent connections." with self.assertRaisesMessage(ImproperlyConfigured, msg): new_connection.pool @unittest.skipIf(is_psycopg3, "psycopg2 specific test") def test_connect_pool_setting_ignored_for_psycopg2(self): new_connection = no_pool_connection() new_connection.settings_dict["OPTIONS"]["pool"] = True msg = "Database pooling requires psycopg >= 3" with self.assertRaisesMessage(ImproperlyConfigured, msg): new_connection.connect() def test_connect_isolation_level(self): """ The transaction level can be configured with DATABASES ['OPTIONS']['isolation_level']. """ from django.db.backends.postgresql.psycopg_any import IsolationLevel # Since this is a django.test.TestCase, a transaction is in progress # and the isolation level isn't reported as 0. This test assumes that # PostgreSQL is configured with the default isolation level. # Check the level on the psycopg connection, not the Django wrapper. self.assertIsNone(connection.connection.isolation_level) new_connection = no_pool_connection() new_connection.settings_dict["OPTIONS"][ "isolation_level" ] = IsolationLevel.SERIALIZABLE try: # Start a transaction so the isolation level isn't reported as 0. new_connection.set_autocommit(False) # Check the level on the psycopg connection, not the Django # wrapper. self.assertEqual( new_connection.connection.isolation_level, IsolationLevel.SERIALIZABLE, ) finally: new_connection.close() def test_connect_invalid_isolation_level(self): self.assertIsNone(connection.connection.isolation_level) new_connection = no_pool_connection() new_connection.settings_dict["OPTIONS"]["isolation_level"] = -1 msg = ( "Invalid transaction isolation level -1 specified. Use one of the " "psycopg.IsolationLevel values." ) with self.assertRaisesMessage(ImproperlyConfigured, msg): new_connection.ensure_connection() def test_connect_role(self): """ The session role can be configured with DATABASES ["OPTIONS"]["assume_role"]. """ try: custom_role = "django_nonexistent_role" new_connection = no_pool_connection() new_connection.settings_dict["OPTIONS"]["assume_role"] = custom_role msg = f'role "{custom_role}" does not exist' with self.assertRaisesMessage(errors.InvalidParameterValue, msg): new_connection.connect() finally: new_connection.close() @unittest.skipUnless(is_psycopg3, "psycopg3 specific test") def test_connect_server_side_binding(self): """ The server-side parameters binding role can be enabled with DATABASES ["OPTIONS"]["server_side_binding"]. """ from django.db.backends.postgresql.base import ServerBindingCursor new_connection = no_pool_connection() new_connection.settings_dict["OPTIONS"]["server_side_binding"] = True try: new_connection.connect() self.assertEqual( new_connection.connection.cursor_factory, ServerBindingCursor, ) finally: new_connection.close() def test_connect_custom_cursor_factory(self): """ A custom cursor factory can be configured with DATABASES["options"] ["cursor_factory"]. """ from django.db.backends.postgresql.base import Cursor class MyCursor(Cursor): pass new_connection = no_pool_connection() new_connection.settings_dict["OPTIONS"]["cursor_factory"] = MyCursor try: new_connection.connect() self.assertEqual(new_connection.connection.cursor_factory, MyCursor) finally: new_connection.close() def test_connect_no_is_usable_checks(self): new_connection = no_pool_connection() try: with mock.patch.object(new_connection, "is_usable") as is_usable: new_connection.connect() is_usable.assert_not_called() finally: new_connection.close() def test_client_encoding_utf8_enforce(self): new_connection = no_pool_connection() new_connection.settings_dict["OPTIONS"]["client_encoding"] = "iso-8859-2" try: new_connection.connect() if is_psycopg3: self.assertEqual(new_connection.connection.info.encoding, "utf-8") else: self.assertEqual(new_connection.connection.encoding, "UTF8") finally: new_connection.close() def _select(self, val): with connection.cursor() as cursor: cursor.execute("SELECT %s::text[]", (val,)) return cursor.fetchone()[0] def test_select_ascii_array(self): a = ["awef"] b = self._select(a) self.assertEqual(a[0], b[0]) def test_select_unicode_array(self): a = ["ᄲawef"] b = self._select(a) self.assertEqual(a[0], b[0]) def test_lookup_cast(self): from django.db.backends.postgresql.operations import DatabaseOperations do = DatabaseOperations(connection=None) lookups = ( "iexact", "contains", "icontains", "startswith", "istartswith", "endswith", "iendswith", "regex", "iregex", ) for lookup in lookups: with self.subTest(lookup=lookup): self.assertIn("::text", do.lookup_cast(lookup)) def test_lookup_cast_isnull_noop(self): from django.db.backends.postgresql.operations import DatabaseOperations do = DatabaseOperations(connection=None) # Using __isnull lookup doesn't require casting. tests = [ "CharField", "EmailField", "TextField", ] for field_type in tests: with self.subTest(field_type=field_type): self.assertEqual(do.lookup_cast("isnull", field_type), "%s") def test_correct_extraction_psycopg_version(self): from django.db.backends.postgresql.base import Database, psycopg_version psycopg_version.cache_clear() with mock.patch.object(Database, "__version__", "4.2.1 (dt dec pq3 ext lo64)"): self.addCleanup(psycopg_version.cache_clear) self.assertEqual(psycopg_version(), (4, 2, 1)) psycopg_version.cache_clear() with mock.patch.object( Database, "__version__", "4.2b0.dev1 (dt dec pq3 ext lo64)" ): self.assertEqual(psycopg_version(), (4, 2)) @override_settings(DEBUG=True) @unittest.skipIf(is_psycopg3, "psycopg2 specific test") def test_copy_to_expert_cursors(self): out = StringIO() copy_expert_sql = "COPY django_session TO STDOUT (FORMAT CSV, HEADER)" with connection.cursor() as cursor: cursor.copy_expert(copy_expert_sql, out) cursor.copy_to(out, "django_session") self.assertEqual( [q["sql"] for q in connection.queries], [copy_expert_sql, "COPY django_session TO STDOUT"], ) @override_settings(DEBUG=True) @unittest.skipUnless(is_psycopg3, "psycopg3 specific test") def test_copy_cursors(self): copy_sql = "COPY django_session TO STDOUT (FORMAT CSV, HEADER)" with connection.cursor() as cursor: with cursor.copy(copy_sql) as copy: for row in copy: pass self.assertEqual([q["sql"] for q in connection.queries], [copy_sql]) def test_get_database_version(self): new_connection = no_pool_connection() new_connection.pg_version = 150009 self.assertEqual(new_connection.get_database_version(), (15, 9)) @mock.patch.object(connection, "get_database_version", return_value=(14,)) def test_check_database_version_supported(self, mocked_get_database_version): msg = "PostgreSQL 15 or later is required (found 14)." with self.assertRaisesMessage(NotSupportedError, msg): connection.check_database_version_supported() self.assertTrue(mocked_get_database_version.called) def test_compose_sql_when_no_connection(self): new_connection = no_pool_connection() try: self.assertEqual( new_connection.ops.compose_sql("SELECT %s", ["test"]), "SELECT 'test'", ) finally: new_connection.close() def test_bypass_timezone_configuration(self): from django.db.backends.postgresql.base import DatabaseWrapper class CustomDatabaseWrapper(DatabaseWrapper): def _configure_timezone(self, connection): return False for Wrapper, commit in [ (DatabaseWrapper, True), (CustomDatabaseWrapper, False), ]: with self.subTest(wrapper=Wrapper, commit=commit): new_connection = no_pool_connection() self.addCleanup(new_connection.close) # Set the database default time zone to be different from # the time zone in new_connection.settings_dict. with new_connection.cursor() as cursor: cursor.execute("RESET TIMEZONE") cursor.execute("SHOW TIMEZONE") db_default_tz = cursor.fetchone()[0] new_tz = "Europe/Paris" if db_default_tz == "UTC" else "UTC" new_connection.timezone_name = new_tz settings = new_connection.settings_dict.copy() conn = new_connection.connection self.assertIs(Wrapper(settings)._configure_connection(conn), commit) def test_bypass_role_configuration(self): from django.db.backends.postgresql.base import DatabaseWrapper class CustomDatabaseWrapper(DatabaseWrapper): def _configure_role(self, connection): return False new_connection = no_pool_connection() self.addCleanup(new_connection.close) new_connection.connect() settings = new_connection.settings_dict.copy() settings["OPTIONS"]["assume_role"] = "django_nonexistent_role" conn = new_connection.connection self.assertIs( CustomDatabaseWrapper(settings)._configure_connection(conn), False )
Tests
python
tensorflow__tensorflow
tensorflow/python/ops/ragged/ragged_where_op_test.py
{ "start": 9492, "end": 16974 }
class ____(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ #========================================================================= # Coordinate-retrieval mode #========================================================================= dict( # shape=[D1] condition=[True, False, True, False, True], expected=[[0], [2], [4]]), dict( # shape=[D1, D2] condition=[[True, False], [False, True]], expected=[[0, 0], [1, 1]]), dict( # shape=[D1, (D2)] condition=ragged_factory_ops.constant_value( [[True, False, True], [False, True]]), expected=[[0, 0], [0, 2], [1, 1]]), dict( # shape=[D1, (D2), (D3)] condition=ragged_factory_ops.constant_value([ [[True, False, True], [False, True]], [[True], [], [False], [False, True, False]] ]), expected=[[0, 0, 0], [0, 0, 2], [0, 1, 1], [1, 0, 0], [1, 3, 1]]), dict( # shape=[D1, (D2), D3] condition=ragged_factory_ops.constant_value([ [[True, False], [False, True]], [[True, False], [False, False], [True, False], [False, True]] ], ragged_rank=1), expected=[[0, 0, 0], [0, 1, 1], [1, 0, 0], [1, 2, 0], [1, 3, 1]]), dict( # shape=[D1, (D2), (D3), (D4)] condition=ragged_factory_ops.constant_value([ [[[], [True]]], [[[True, False, True], [False, True]], [[True], [], [False], [False, True, False]]] ]), expected=[[0, 0, 1, 0], [1, 0, 0, 0], [1, 0, 0, 2], [1, 0, 1, 1], [1, 1, 0, 0], [1, 1, 3, 1]]), #========================================================================= # Elementwise multiplexing #========================================================================= dict( # shape=[] condition=True, x='A', y='a', expected=b'A'), dict( # shape=[] condition=False, x='A', y='a', expected=b'a'), dict( # shape=[D1] condition=[True, False, True], x=['A', 'B', 'C'], y=['a', 'b', 'c'], expected=[b'A', b'b', b'C']), dict( # shape=[D1, D2] condition=[[True, False], [False, True]], x=[['A', 'B'], ['D', 'E']], y=[['a', 'b'], ['d', 'e']], expected=[[b'A', b'b'], [b'd', b'E']]), dict( # shape=[D1, (D2)] condition=ragged_factory_ops.constant_value( [[True, False, True], [False, True]]), x=ragged_factory_ops.constant_value([['A', 'B', 'C'], ['D', 'E']]), y=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d', 'e']]), expected=ragged_factory_ops.constant_value( [[b'A', b'b', b'C'], [b'd', b'E']])), dict( # shape=[D1, (D2), D3] condition=ragged_factory_ops.constant_value([ [[True, False], [False, True]], [[True, False], [False, False], [True, False], [False, True]] ], ragged_rank=1), x=ragged_factory_ops.constant_value([ [['A', 'B'], ['C', 'D']], [['E', 'F'], ['G', 'H'], ['I', 'J'], ['K', 'L']] ], ragged_rank=1), y=ragged_factory_ops.constant_value([ [['a', 'b'], ['c', 'd']], [['e', 'f'], ['g', 'h'], ['i', 'j'], ['k', 'l']] ], ragged_rank=1), expected=ragged_factory_ops.constant_value([ [[b'A', b'b'], [b'c', b'D']], [[b'E', b'f'], [b'g', b'h'], [b'I', b'j'], [b'k', b'L']] ], ragged_rank=1)), dict( # shape=[D1, (D2), (D3), (D4)] condition=ragged_factory_ops.constant_value([ [[[], [True]]], [[[True, False, True], [False, True]], [[True], [], [False], [False, True, False]]] ]), x=ragged_factory_ops.constant_value([ [[[], ['A']]], [[['B', 'C', 'D'], ['E', 'F']], [['G'], [], ['H'], ['I', 'J', 'K']]] ]), y=ragged_factory_ops.constant_value([ [[[], ['a']]], [[['b', 'c', 'd'], ['e', 'f']], [['g'], [], ['h'], ['i', 'j', 'k']]] ]), expected=ragged_factory_ops.constant_value([ [[[], [b'A']]], [[[b'B', b'c', b'D'], [b'e', b'F']], [[b'G'], [], [b'h'], [b'i', b'J', b'k']]] ])), #========================================================================= # Broadcasting #========================================================================= dict( # c.shape=[D1], x.shape=[D1, D2], y.shape=[D1, D2] condition=[[True], [False], [True]], x=[['A', 'B'], ['C', 'D'], ['E', 'F']], y=[['a', 'b'], ['c', 'd'], ['e', 'f']], expected=[[b'A', b'B'], [b'c', b'd'], [b'E', b'F']]), dict( # c.shape=[D1], x.shape=[D1, (D2)], y.shape=[D1, (D2)] condition=[[True], [False], [True]], x=ragged_factory_ops.constant_value( [['A', 'B', 'C'], ['D', 'E'], ['F', 'G']]), y=ragged_factory_ops.constant_value( [['a', 'b', 'c'], ['d', 'e'], ['f', 'g']]), expected=ragged_factory_ops.constant_value( [[b'A', b'B', b'C'], [b'd', b'e'], [b'F', b'G']])), dict( # c.shape=[D1, None], x.shape=[], y.shape=[] condition=ragged_factory_ops.constant_value( [[True, False, True, True], [True, False]]), x=10, y=20, expected=ragged_factory_ops.constant_value( [[10, 20, 10, 10], [10, 20]])), dict( # c.shape=[D1, D2], x.shape=[D1, 1], y.shape=[1, D2] condition=[[True, False], [True, False], [False, True]], x=[[10], [20], [30]], y=[[40, 50]], expected=[[10, 50], [20, 50], [40, 30]]), dict( # c.shape=[D1, (D2), D3], x.shape=[D1, (D2), 1], y.shape=[D3] condition=ragged_factory_ops.constant_value( [[[True, False], [False, True]], [[True, True]]], ragged_rank=1), x=ragged_factory_ops.constant_value([[[10], [20]], [[30]]], ragged_rank=1), y=np.array([[[40, 50]]]), expected=[[[10, 50], [40, 20]], [[30, 30]]]), ]) # pyformat: disable def testRaggedWhere(self, condition, expected, x=None, y=None): result = ragged_where_op.where_v2(condition, x, y) self.assertAllEqual(result, expected) @parameterized.parameters([ dict( condition=[True, False], x=[1, 2], error=ValueError, message='x and y must be either both None or both non-None'), dict( condition=ragged_factory_ops.constant_value([[True, False, True], [False, True]]), x=ragged_factory_ops.constant_value([['A', 'B', 'C'], ['D', 'E']]), y=[['a', 'b'], ['d', 'e']], error=errors.InvalidArgumentError, message=r'must be broadcastable|Unable to broadcast'), ]) def testRaggedWhereErrors(self, condition, error, message, x=None, y=None): with self.assertRaisesRegex(error, message): self.evaluate(ragged_where_op.where_v2(condition, x, y)) if __name__ == '__main__': googletest.main()
RaggedWhereV2OpTest
python
kamyu104__LeetCode-Solutions
Python/process-restricted-friend-requests.py
{ "start": 758, "end": 1444 }
class ____(object): def friendRequests(self, n, restrictions, requests): """ :type n: int :type restrictions: List[List[int]] :type requests: List[List[int]] :rtype: List[bool] """ result = [] uf = UnionFind(n) for u, v in requests: pu, pv = uf.find_set(u), uf.find_set(v) ok = True for x, y in restrictions: px, py = uf.find_set(x), uf.find_set(y) if {px, py} == {pu, pv}: ok = False break result.append(ok) if ok: uf.union_set(u, v) return result
Solution
python
pytorch__pytorch
test/test_testing.py
{ "start": 64296, "end": 73446 }
class ____(TestCase): def test_default_names(self): class TestParametrized(TestCase): @parametrize("x", range(5)) def test_default_names(self, x): pass @parametrize("x,y", [(1, 2), (2, 3), (3, 4)]) def test_two_things_default_names(self, x, y): pass instantiate_parametrized_tests(TestParametrized) expected_test_names = [ 'TestParametrized.test_default_names_x_0', 'TestParametrized.test_default_names_x_1', 'TestParametrized.test_default_names_x_2', 'TestParametrized.test_default_names_x_3', 'TestParametrized.test_default_names_x_4', 'TestParametrized.test_two_things_default_names_x_1_y_2', 'TestParametrized.test_two_things_default_names_x_2_y_3', 'TestParametrized.test_two_things_default_names_x_3_y_4', ] test_names = _get_test_names_for_test_class(TestParametrized) self.assertEqual(expected_test_names, test_names) def test_name_fn(self): class TestParametrized(TestCase): @parametrize("bias", [False, True], name_fn=lambda b: 'bias' if b else 'no_bias') def test_custom_names(self, bias): pass @parametrize("x", [1, 2], name_fn=str) @parametrize("y", [3, 4], name_fn=str) @parametrize("z", [5, 6], name_fn=str) def test_three_things_composition_custom_names(self, x, y, z): pass @parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: f'{x}__{y}') def test_two_things_custom_names_alternate(self, x, y): pass instantiate_parametrized_tests(TestParametrized) expected_test_names = [ 'TestParametrized.test_custom_names_bias', 'TestParametrized.test_custom_names_no_bias', 'TestParametrized.test_three_things_composition_custom_names_1_3_5', 'TestParametrized.test_three_things_composition_custom_names_1_3_6', 'TestParametrized.test_three_things_composition_custom_names_1_4_5', 'TestParametrized.test_three_things_composition_custom_names_1_4_6', 'TestParametrized.test_three_things_composition_custom_names_2_3_5', 'TestParametrized.test_three_things_composition_custom_names_2_3_6', 'TestParametrized.test_three_things_composition_custom_names_2_4_5', 'TestParametrized.test_three_things_composition_custom_names_2_4_6', 'TestParametrized.test_two_things_custom_names_alternate_1__2', 'TestParametrized.test_two_things_custom_names_alternate_1__3', 'TestParametrized.test_two_things_custom_names_alternate_1__4', ] test_names = _get_test_names_for_test_class(TestParametrized) self.assertEqual(expected_test_names, test_names) def test_reparametrize(self): def include_is_even_arg(test_name, param_kwargs): x = param_kwargs["x"] is_even = x % 2 == 0 new_param_kwargs = dict(param_kwargs) new_param_kwargs["is_even"] = is_even is_even_suffix = "_even" if is_even else "_odd" new_test_name = f"{test_name}{is_even_suffix}" yield (new_test_name, new_param_kwargs) def exclude_odds(test_name, param_kwargs): x = param_kwargs["x"] is_even = x % 2 == 0 yield None if not is_even else (test_name, param_kwargs) class TestParametrized(TestCase): @reparametrize(parametrize("x", range(5)), include_is_even_arg) def test_foo(self, x, is_even): pass @reparametrize(parametrize("x", range(5)), exclude_odds) def test_bar(self, x): pass instantiate_parametrized_tests(TestParametrized) expected_test_names = [ 'TestParametrized.test_bar_x_0', 'TestParametrized.test_bar_x_2', 'TestParametrized.test_bar_x_4', 'TestParametrized.test_foo_x_0_even', 'TestParametrized.test_foo_x_1_odd', 'TestParametrized.test_foo_x_2_even', 'TestParametrized.test_foo_x_3_odd', 'TestParametrized.test_foo_x_4_even', ] test_names = _get_test_names_for_test_class(TestParametrized) self.assertEqual(expected_test_names, test_names) def test_subtest_names(self): class TestParametrized(TestCase): @parametrize("bias", [subtest(True, name='bias'), subtest(False, name='no_bias')]) def test_custom_names(self, bias): pass @parametrize("x,y", [subtest((1, 2), name='double'), subtest((1, 3), name='triple'), subtest((1, 4), name='quadruple')]) def test_two_things_custom_names(self, x, y): pass instantiate_parametrized_tests(TestParametrized) expected_test_names = [ 'TestParametrized.test_custom_names_bias', 'TestParametrized.test_custom_names_no_bias', 'TestParametrized.test_two_things_custom_names_double', 'TestParametrized.test_two_things_custom_names_quadruple', 'TestParametrized.test_two_things_custom_names_triple', ] test_names = _get_test_names_for_test_class(TestParametrized) self.assertEqual(expected_test_names, test_names) def test_apply_param_specific_decorators(self): # Test that decorators can be applied on a per-param basis. def test_dec(func): func._decorator_applied = True return func class TestParametrized(TestCase): @parametrize("x", [subtest(1, name='one'), subtest(2, name='two', decorators=[test_dec]), subtest(3, name='three')]) def test_param(self, x): pass instantiate_parametrized_tests(TestParametrized) for test_func, name in _get_test_funcs_for_test_class(TestParametrized): self.assertEqual(hasattr(test_func, '_decorator_applied'), name == 'test_param_two') def test_compose_param_specific_decorators(self): # Test that multiple per-param decorators compose correctly. def test_dec(func): func._decorator_applied = True return func class TestParametrized(TestCase): @parametrize("x", [subtest(1), subtest(2, decorators=[test_dec]), subtest(3)]) @parametrize("y", [subtest(False, decorators=[test_dec]), subtest(True)]) def test_param(self, x, y): pass instantiate_parametrized_tests(TestParametrized) for test_func, name in _get_test_funcs_for_test_class(TestParametrized): # Decorator should be applied whenever either x == 2 or y == False. should_apply = ('x_2' in name) or ('y_False' in name) self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply) def test_modules_decorator_misuse_error(self): # Test that @modules errors out when used with instantiate_parametrized_tests(). class TestParametrized(TestCase): @modules(module_db) def test_modules(self, module_info): pass with self.assertRaisesRegex(RuntimeError, 'intended to be used in a device-specific context'): instantiate_parametrized_tests(TestParametrized) def test_ops_decorator_misuse_error(self): # Test that @ops errors out when used with instantiate_parametrized_tests(). class TestParametrized(TestCase): @ops(op_db) def test_ops(self, module_info): pass with self.assertRaisesRegex(RuntimeError, 'intended to be used in a device-specific context'): instantiate_parametrized_tests(TestParametrized) def test_multiple_handling_of_same_param_error(self): # Test that multiple decorators handling the same param errors out. class TestParametrized(TestCase): @parametrize("x", range(3)) @parametrize("x", range(5)) def test_param(self, x): pass with self.assertRaisesRegex(RuntimeError, 'multiple parametrization decorators'): instantiate_parametrized_tests(TestParametrized) @parametrize("x", [1, subtest(2, decorators=[unittest.expectedFailure]), 3]) def test_subtest_expected_failure(self, x): if x == 2: raise RuntimeError('Boom') @parametrize("x", [subtest(1, decorators=[unittest.expectedFailure]), 2, 3]) @parametrize("y", [4, 5, subtest(6, decorators=[unittest.expectedFailure])]) def test_two_things_subtest_expected_failure(self, x, y): if x == 1 or y == 6: raise RuntimeError('Boom')
TestTestParametrization
python
huggingface__transformers
src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py
{ "start": 1575, "end": 2574 }
class ____: module: nn.Module traced: list[nn.Module] = field(default_factory=list) handles: list = field(default_factory=list) name2module: dict[str, nn.Module] = field(default_factory=OrderedDict) def _forward_hook(self, m, inputs: Tensor, outputs: Tensor, name: str): has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, (nn.Conv2d, nn.BatchNorm2d)) if has_not_submodules: self.traced.append(m) self.name2module[name] = m def __call__(self, x: Tensor): for name, m in self.module.named_modules(): self.handles.append(m.register_forward_hook(partial(self._forward_hook, name=name))) self.module(x) [x.remove() for x in self.handles] return self @property def parametrized(self): # check the len of the state_dict keys to see if we have learnable params return {k: v for k, v in self.name2module.items() if len(list(v.state_dict().keys())) > 0}
Tracker
python
buildout__buildout
zc.recipe.egg_/src/zc/recipe/egg/egg.py
{ "start": 5513, "end": 8614 }
class ____(Eggs): def __init__(self, buildout, name, options): super(Scripts, self).__init__(buildout, name, options) options['bin-directory'] = buildout['buildout']['bin-directory'] options['_b'] = options['bin-directory'] # backward compat. self.extra_paths = [ os.path.join(buildout['buildout']['directory'], p.strip()) for p in options.get('extra-paths', '').split('\n') if p.strip() ] if self.extra_paths: options['extra-paths'] = '\n'.join(self.extra_paths) relative_paths = options.get( 'relative-paths', buildout['buildout'].get('relative-paths', 'false') ) if relative_paths == 'true': options['buildout-directory'] = buildout['buildout']['directory'] self._relative_paths = options['buildout-directory'] else: self._relative_paths = '' assert relative_paths == 'false' parse_entry_point = re.compile( r'([^=]+)=(\w+(?:[.]\w+)*):(\w+(?:[.]\w+)*)$' ).match def install(self): reqs, ws = self.working_set() options = self.options scripts = options.get('scripts') if scripts or scripts is None: if scripts is not None: scripts = scripts.split() scripts = dict([ ('=' in s) and s.split('=', 1) or (s, s) for s in scripts ]) for s in options.get('entry-points', '').split(): parsed = self.parse_entry_point(s) if not parsed: logging.getLogger(self.name).error( "Cannot parse the entry point %s.", s) raise zc.buildout.UserError("Invalid entry point") reqs.append(parsed.groups()) if get_bool(options, 'dependent-scripts'): # Generate scripts for all packages in the working set, # except setuptools. reqs = list(reqs) for dist in ws: name = dist.project_name if name != 'setuptools' and name not in reqs: reqs.append(name) return zc.buildout.easy_install.scripts( reqs, ws, sys.executable, options['bin-directory'], scripts=scripts, extra_paths=self.extra_paths, interpreter=options.get('interpreter'), initialization=options.get('initialization', ''), arguments=options.get('arguments', ''), relative_paths=self._relative_paths, ) return () update = install def get_bool(options, name, default=False): value = options.get(name) if not value: return default if value == 'true': return True elif value == 'false': return False else: raise zc.buildout.UserError( "Invalid value for %s option: %s" % (name, value)) Egg = Scripts
Scripts
python
oauthlib__oauthlib
oauthlib/oauth1/rfc5849/errors.py
{ "start": 2245, "end": 2334 }
class ____(OAuth1Error): error = 'invalid_signature_method'
InvalidSignatureMethodError
python
pyca__cryptography
tests/x509/test_ocsp.py
{ "start": 3058, "end": 6738 }
class ____: def test_bad_request(self): with pytest.raises(ValueError): ocsp.load_der_ocsp_request(b"invalid") def test_load_request(self): req = _load_data( os.path.join("x509", "ocsp", "req-sha1.der"), ocsp.load_der_ocsp_request, ) assert isinstance(req, ocsp.OCSPRequest) assert req.issuer_name_hash == ( b"8\xcaF\x8c\x07D\x8d\xf4\x81\x96\xc7mmLpQ\x9e`\xa7\xbd" ) assert req.issuer_key_hash == ( b"yu\xbb\x84:\xcb,\xdez\t\xbe1\x1bC\xbc\x1c*MSX" ) assert isinstance(req.hash_algorithm, hashes.SHA1) assert req.serial_number == int( "98D9E5C0B4C373552DF77C5D0F1EB5128E4945F9", 16 ) assert len(req.extensions) == 0 def test_load_request_with_extensions(self): req = _load_data( os.path.join("x509", "ocsp", "req-ext-nonce.der"), ocsp.load_der_ocsp_request, ) assert len(req.extensions) == 1 ext = req.extensions[0] assert ext.critical is False assert ext.value == x509.OCSPNonce( b"{\x80Z\x1d7&\xb8\xb8OH\xd2\xf8\xbf\xd7-\xfd" ) def test_load_request_with_acceptable_responses(self): req = _load_data( os.path.join("x509", "ocsp", "req-acceptable-responses.der"), ocsp.load_der_ocsp_request, ) assert len(req.extensions) == 1 ext = req.extensions[0] assert ext.critical is False assert ext.value == x509.OCSPAcceptableResponses( [x509.ObjectIdentifier("1.3.6.1.5.5.7.48.1.1")] ) def test_load_request_with_unknown_extension(self): req = _load_data( os.path.join("x509", "ocsp", "req-ext-unknown-oid.der"), ocsp.load_der_ocsp_request, ) assert len(req.extensions) == 1 ext = req.extensions[0] assert ext.critical is False assert ext.value == x509.UnrecognizedExtension( x509.ObjectIdentifier("1.3.6.1.5.5.7.48.1.2213"), b"\x04\x10{\x80Z\x1d7&\xb8\xb8OH\xd2\xf8\xbf\xd7-\xfd", ) def test_load_request_with_duplicate_extension(self): req = _load_data( os.path.join("x509", "ocsp", "req-duplicate-ext.der"), ocsp.load_der_ocsp_request, ) with pytest.raises(x509.DuplicateExtension): req.extensions def test_load_request_two_requests(self): with pytest.raises(NotImplementedError): _load_data( os.path.join("x509", "ocsp", "req-multi-sha1.der"), ocsp.load_der_ocsp_request, ) def test_invalid_hash_algorithm(self): req = _load_data( os.path.join("x509", "ocsp", "req-invalid-hash-alg.der"), ocsp.load_der_ocsp_request, ) with raises_unsupported_algorithm(None): req.hash_algorithm def test_serialize_request(self): req_bytes = load_vectors_from_file( filename=os.path.join("x509", "ocsp", "req-sha1.der"), loader=lambda data: data.read(), mode="rb", ) req = ocsp.load_der_ocsp_request(req_bytes) assert req.public_bytes(serialization.Encoding.DER) == req_bytes def test_invalid_serialize_encoding(self): req = _load_data( os.path.join("x509", "ocsp", "req-sha1.der"), ocsp.load_der_ocsp_request, ) with pytest.raises(ValueError): req.public_bytes("invalid") with pytest.raises(ValueError): req.public_bytes(serialization.Encoding.PEM)
TestOCSPRequest
python
lepture__authlib
authlib/oidc/core/grants/code.py
{ "start": 3696, "end": 5681 }
class ____(OpenIDToken): """An extension from OpenID Connect for "grant_type=code" request. Developers MUST implement the missing methods:: class MyOpenIDCode(OpenIDCode): def get_jwt_config(self, grant): return {...} def exists_nonce(self, nonce, request): return check_if_nonce_in_cache(request.payload.client_id, nonce) def generate_user_info(self, user, scope): return {...} The register this extension with AuthorizationCodeGrant:: authorization_server.register_grant( AuthorizationCodeGrant, extensions=[MyOpenIDCode()] ) """ def __init__(self, require_nonce=False): self.require_nonce = require_nonce def exists_nonce(self, nonce, request): """Check if the given nonce is existing in your database. Developers MUST implement this method in subclass, e.g.:: def exists_nonce(self, nonce, request): exists = AuthorizationCode.query.filter_by( client_id=request.payload.client_id, nonce=nonce ).first() return bool(exists) :param nonce: A string of "nonce" parameter in request :param request: OAuth2Request instance :return: Boolean """ raise NotImplementedError() def validate_openid_authorization_request(self, grant, redirect_uri): validate_nonce(grant.request, self.exists_nonce, self.require_nonce) def __call__(self, grant): grant.register_hook("after_create_token_response", self.process_token) if is_openid_scope(grant.request.payload.scope): grant.register_hook( "after_validate_authorization_request_payload", self.validate_openid_authorization_request, ) grant.register_hook( "after_validate_consent_request", validate_request_prompt )
OpenIDCode
python
realpython__materials
emacs-the-best-python-editor/PyEval/pyeval_operand.py
{ "start": 104, "end": 439 }
class ____: """ Common operator class used by the evaluator. """ def __init__(self, operand_string): """Create a new operator object.""" # String to hold the operand literal self.op_string = operand_string # Integer value of the operand self.op_value = int(operand_string)
Operand
python
scipy__scipy
scipy/integrate/_ivp/common.py
{ "start": 3899, "end": 15745 }
class ____: """Continuous ODE solution. It is organized as a collection of `DenseOutput` objects which represent local interpolants. It provides an algorithm to select a right interpolant for each given point. The interpolants cover the range between `t_min` and `t_max` (see Attributes below). Evaluation outside this interval is not forbidden, but the accuracy is not guaranteed. When evaluating at a breakpoint (one of the values in `ts`) a segment with the lower index is selected. Parameters ---------- ts : array_like, shape (n_segments + 1,) Time instants between which local interpolants are defined. Must be strictly increasing or decreasing (zero segment with two points is also allowed). interpolants : list of DenseOutput with n_segments elements Local interpolants. An i-th interpolant is assumed to be defined between ``ts[i]`` and ``ts[i + 1]``. alt_segment : boolean Requests the alternative interpolant segment selection scheme. At each solver integration point, two interpolant segments are available. The default (False) and alternative (True) behaviours select the segment for which the requested time corresponded to ``t`` and ``t_old``, respectively. This functionality is only relevant for testing the interpolants' accuracy: different integrators use different construction strategies. Attributes ---------- t_min, t_max : float Time range of the interpolation. """ def __init__(self, ts, interpolants, alt_segment=False): ts = np.asarray(ts) d = np.diff(ts) # The first case covers integration on zero segment. if not ((ts.size == 2 and ts[0] == ts[-1]) or np.all(d > 0) or np.all(d < 0)): raise ValueError("`ts` must be strictly increasing or decreasing.") self.n_segments = len(interpolants) if ts.shape != (self.n_segments + 1,): raise ValueError("Numbers of time stamps and interpolants " "don't match.") self.ts = ts self.interpolants = interpolants if ts[-1] >= ts[0]: self.t_min = ts[0] self.t_max = ts[-1] self.ascending = True self.side = "right" if alt_segment else "left" self.ts_sorted = ts else: self.t_min = ts[-1] self.t_max = ts[0] self.ascending = False self.side = "left" if alt_segment else "right" self.ts_sorted = ts[::-1] def _call_single(self, t): # Here we preserve a certain symmetry that when t is in self.ts, # if alt_segment=False, then we prioritize a segment with a lower # index. ind = np.searchsorted(self.ts_sorted, t, side=self.side) segment = min(max(ind - 1, 0), self.n_segments - 1) if not self.ascending: segment = self.n_segments - 1 - segment return self.interpolants[segment](t) def __call__(self, t): """Evaluate the solution. Parameters ---------- t : float or array_like with shape (n_points,) Points to evaluate at. Returns ------- y : ndarray, shape (n_states,) or (n_states, n_points) Computed values. Shape depends on whether `t` is a scalar or a 1-D array. """ t = np.asarray(t) if t.ndim == 0: return self._call_single(t) order = np.argsort(t) reverse = np.empty_like(order) reverse[order] = np.arange(order.shape[0]) t_sorted = t[order] # See comment in self._call_single. segments = np.searchsorted(self.ts_sorted, t_sorted, side=self.side) segments -= 1 segments[segments < 0] = 0 segments[segments > self.n_segments - 1] = self.n_segments - 1 if not self.ascending: segments = self.n_segments - 1 - segments ys = [] group_start = 0 for segment, group in groupby(segments): group_end = group_start + len(list(group)) y = self.interpolants[segment](t_sorted[group_start:group_end]) ys.append(y) group_start = group_end ys = np.hstack(ys) ys = ys[:, reverse] return ys NUM_JAC_DIFF_REJECT = EPS ** 0.875 NUM_JAC_DIFF_SMALL = EPS ** 0.75 NUM_JAC_DIFF_BIG = EPS ** 0.25 NUM_JAC_MIN_FACTOR = 1e3 * EPS NUM_JAC_FACTOR_INCREASE = 10 NUM_JAC_FACTOR_DECREASE = 0.1 def num_jac(fun, t, y, f, threshold, factor, sparsity=None): """Finite differences Jacobian approximation tailored for ODE solvers. This function computes finite difference approximation to the Jacobian matrix of `fun` with respect to `y` using forward differences. The Jacobian matrix has shape (n, n) and its element (i, j) is equal to ``d f_i / d y_j``. A special feature of this function is the ability to correct the step size from iteration to iteration. The main idea is to keep the finite difference significantly separated from its round-off error which approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a huge error and assures that the estimated derivative are reasonably close to the true values (i.e., the finite difference approximation is at least qualitatively reflects the structure of the true Jacobian). Parameters ---------- fun : callable Right-hand side of the system implemented in a vectorized fashion. t : float Current time. y : ndarray, shape (n,) Current state. f : ndarray, shape (n,) Value of the right hand side at (t, y). threshold : float Threshold for `y` value used for computing the step size as ``factor * np.maximum(np.abs(y), threshold)``. Typically, the value of absolute tolerance (atol) for a solver should be passed as `threshold`. factor : ndarray with shape (n,) or None Factor to use for computing the step size. Pass None for the very evaluation, then use the value returned from this function. sparsity : tuple (structure, groups) or None Sparsity structure of the Jacobian, `structure` must be csc_matrix. Returns ------- J : ndarray or csc_matrix, shape (n, n) Jacobian matrix. factor : ndarray, shape (n,) Suggested `factor` for the next evaluation. """ y = np.asarray(y) n = y.shape[0] if n == 0: return np.empty((0, 0)), factor if factor is None: factor = np.full(n, EPS ** 0.5) else: factor = factor.copy() # Direct the step as ODE dictates, hoping that such a step won't lead to # a problematic region. For complex ODEs it makes sense to use the real # part of f as we use steps along real axis. f_sign = 2 * (np.real(f) >= 0).astype(float) - 1 y_scale = f_sign * np.maximum(threshold, np.abs(y)) h = (y + factor * y_scale) - y # Make sure that the step is not 0 to start with. Not likely it will be # executed often. for i in np.nonzero(h == 0)[0]: while h[i] == 0: factor[i] *= 10 h[i] = (y[i] + factor[i] * y_scale[i]) - y[i] if sparsity is None: return _dense_num_jac(fun, t, y, f, h, factor, y_scale) else: structure, groups = sparsity return _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups) def _dense_num_jac(fun, t, y, f, h, factor, y_scale): n = y.shape[0] h_vecs = np.diag(h) f_new = fun(t, y[:, None] + h_vecs) diff = f_new - f[:, None] max_ind = np.argmax(np.abs(diff), axis=0) r = np.arange(n) max_diff = np.abs(diff[max_ind, r]) scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r])) diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale if np.any(diff_too_small): ind, = np.nonzero(diff_too_small) new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind] h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind] h_vecs[ind, ind] = h_new f_new = fun(t, y[:, None] + h_vecs[:, ind]) diff_new = f_new - f[:, None] max_ind = np.argmax(np.abs(diff_new), axis=0) r = np.arange(ind.shape[0]) max_diff_new = np.abs(diff_new[max_ind, r]) scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r])) update = max_diff[ind] * scale_new < max_diff_new * scale[ind] if np.any(update): update, = np.nonzero(update) update_ind = ind[update] factor[update_ind] = new_factor[update] h[update_ind] = h_new[update] diff[:, update_ind] = diff_new[:, update] scale[update_ind] = scale_new[update] max_diff[update_ind] = max_diff_new[update] diff /= h factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE factor = np.maximum(factor, NUM_JAC_MIN_FACTOR) return diff, factor def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups): n = y.shape[0] n_groups = np.max(groups) + 1 h_vecs = np.empty((n_groups, n)) for group in range(n_groups): e = np.equal(group, groups) h_vecs[group] = h * e h_vecs = h_vecs.T f_new = fun(t, y[:, None] + h_vecs) df = f_new - f[:, None] i, j, _ = find(structure) diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc() max_ind = np.array(abs(diff).argmax(axis=0)).ravel() r = np.arange(n) max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel() scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, groups[r]])) diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale if np.any(diff_too_small): ind, = np.nonzero(diff_too_small) new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind] h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind] h_new_all = np.zeros(n) h_new_all[ind] = h_new groups_unique = np.unique(groups[ind]) groups_map = np.empty(n_groups, dtype=int) h_vecs = np.empty((groups_unique.shape[0], n)) for k, group in enumerate(groups_unique): e = np.equal(group, groups) h_vecs[k] = h_new_all * e groups_map[group] = k h_vecs = h_vecs.T f_new = fun(t, y[:, None] + h_vecs) df = f_new - f[:, None] i, j, _ = find(structure[:, ind]) diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]], (i, j)), shape=(n, ind.shape[0])).tocsc() max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel() r = np.arange(ind.shape[0]) max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel() scale_new = np.maximum( np.abs(f[max_ind_new]), np.abs(f_new[max_ind_new, groups_map[groups[ind]]])) update = max_diff[ind] * scale_new < max_diff_new * scale[ind] if np.any(update): update, = np.nonzero(update) update_ind = ind[update] factor[update_ind] = new_factor[update] h[update_ind] = h_new[update] diff[:, update_ind] = diff_new[:, update] scale[update_ind] = scale_new[update] max_diff[update_ind] = max_diff_new[update] diff.data /= np.repeat(h, np.diff(diff.indptr)) factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE factor = np.maximum(factor, NUM_JAC_MIN_FACTOR) return diff, factor
OdeSolution
python
getsentry__sentry
src/sentry/issues/endpoints/group_stats.py
{ "start": 526, "end": 1174 }
class ____(GroupEndpoint, StatsMixin): publish_status = { "GET": ApiPublishStatus.UNKNOWN, } def get(self, request: Request, group) -> Response: try: environment_id = get_environment_id(request, group.project.organization_id) except Environment.DoesNotExist: raise ResourceDoesNotExist data = tsdb.backend.get_range( model=TSDBModel.group, keys=[group.id], **self._parse_args(request, environment_id), tenant_ids={"organization_id": group.project.organization_id}, )[group.id] return Response(data)
GroupStatsEndpoint
python
huggingface__transformers
tests/models/beit/test_image_processing_beit.py
{ "start": 1105, "end": 3604 }
class ____: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_reduce_labels=False, ): size = size if size is not None else {"height": 20, "width": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_reduce_labels = do_reduce_labels def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) def prepare_semantic_single_inputs(): ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") example = ds[0] return example["image"], example["map"] def prepare_semantic_batch_inputs(): ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") return list(ds["image"][:2]), list(ds["map"][:2]) @require_torch @require_vision
BeitImageProcessingTester
python
pennersr__django-allauth
allauth/socialaccount/providers/edmodo/provider.py
{ "start": 219, "end": 436 }
class ____(ProviderAccount): def get_profile_url(self): return self.account.extra_data.get("profile_url") def get_avatar_url(self): return self.account.extra_data.get("avatar_url")
EdmodoAccount
python
django-guardian__django-guardian
guardian/testapp/tests/conf.py
{ "start": 349, "end": 897 }
class ____: def setUp(self): super().setUp() from django.contrib.auth import get_user_model from django.contrib.auth.models import Group User = get_user_model() Group.objects.create(pk=1, name="admins") jack_group = Group.objects.create(pk=2, name="jackGroup") User.objects.get_or_create(username=guardian_settings.ANONYMOUS_USER_NAME) jack = User.objects.create(username="jack", is_active=True, is_superuser=False, is_staff=False) jack.groups.add(jack_group)
TestDataMixin
python
huggingface__transformers
src/transformers/models/mobilevit/modeling_mobilevit.py
{ "start": 31281, "end": 34909 }
class ____(MobileViTPreTrainedModel): def __init__(self, config: MobileViTConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilevit = MobileViTModel(config, expand_output=False) self.segmentation_head = MobileViTDeepLabV3(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SemanticSegmenterOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Examples: ```python >>> import requests >>> import torch >>> from PIL import Image >>> from transformers import AutoImageProcessor, MobileViTForSemanticSegmentation >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-small") >>> model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-small") >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # logits are of shape (batch_size, num_labels, height, width) >>> logits = outputs.logits ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and self.config.num_labels == 1: raise ValueError("The number of labels should be greater than one") outputs = self.mobilevit( pixel_values, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] logits = self.segmentation_head(encoder_hidden_states) loss = None if labels is not None: # upsample logits to the images' original size upsampled_logits = nn.functional.interpolate( logits, size=labels.shape[-2:], mode="bilinear", align_corners=False ) loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index) loss = loss_fct(upsampled_logits, labels) if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) __all__ = [ "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ]
MobileViTForSemanticSegmentation
python
dask__dask
dask/tests/test_typing.py
{ "start": 3356, "end": 5665 }
class ____(DaskMethodsMixin): def __init__(self, based_on: DaskCollection) -> None: self.based_on = based_on def __dask_graph__(self) -> Graph: return self.based_on.__dask_graph__() def __dask_keys__(self) -> NestedKeys: return self.based_on.__dask_keys__() def __dask_postcompute__(self) -> tuple[PostComputeCallable, tuple]: return finalize, () def __dask_postpersist__(self) -> tuple[PostPersistCallable, tuple]: return self.based_on.__dask_postpersist__() def __dask_tokenize__(self) -> Hashable: return tokenize(self.based_on) __dask_scheduler__ = staticmethod(get2) __dask_optimize__ = globalmethod( dont_optimize, key="collection_optim", falsey=dont_optimize, ) def increment_(x: int) -> int: return x + 1 increment: Delayed = delayed(increment_) def assert_isinstance(coll: DaskCollection, protocol: Any) -> None: assert isinstance(coll, protocol) @pytest.mark.parametrize("protocol", [DaskCollection, HLGDaskCollection]) def test_isinstance_core(protocol): arr = da.ones(10) bag = db.from_sequence([1, 2, 3, 4, 5], npartitions=2) dobj = increment(2) assert_isinstance(arr, protocol) assert_isinstance(bag, protocol) assert_isinstance(dobj, protocol) def test_isinstance_custom() -> None: a = da.ones(10) hlgc = HLGCollection(a) nhlgc = NotHLGCollection(a) assert isinstance(hlgc, DaskCollection) assert isinstance(nhlgc, DaskCollection) assert isinstance(nhlgc, DaskCollection) assert not isinstance(nhlgc, HLGDaskCollection) def compute(coll: DaskCollection) -> Any: return coll.compute() def compute2(coll: DaskCollection) -> Any: return coll.compute() def test_parameter_passing() -> None: from dask.array import Array a: Delayed = increment(2) hlgc = HLGCollection(a) assert compute(hlgc) == 3 assert compute2(hlgc) == 3 d: Delayed = increment(3) assert compute(d) == 4 assert compute2(d) == 4 array: Array = da.ones(10) assert compute(array).shape == (10,) assert compute2(array).shape == (10,) def test_inheriting_class() -> None: inheriting: Inheriting = Inheriting(increment(2)) assert isinstance(inheriting, Inheriting)
NotHLGCollection
python
huggingface__transformers
tests/models/cpmant/test_modeling_cpmant.py
{ "start": 6907, "end": 8883 }
class ____(unittest.TestCase): @tooslow def test_inference_causal(self): texts = "今天天气真好!" model_path = "openbmb/cpm-ant-10b" model = CpmAntForCausalLM.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) inputs = tokenizer(texts, return_tensors="pt") hidden_states = model(**inputs).logits expected_slice = torch.tensor( [[[-6.4267, -6.4083, -6.3958], [-5.8802, -5.9447, -5.7811], [-5.3896, -5.4820, -5.4295]]], ) torch.testing.assert_close(hidden_states[:, :3, :3], expected_slice, rtol=1e-2, atol=1e-2) @tooslow def test_simple_generation(self): model_path = "openbmb/cpm-ant-10b" model = CpmAntForCausalLM.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) texts = "今天天气不错," expected_output = "今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的" model_inputs = tokenizer(texts, return_tensors="pt") token_ids = model.generate(**model_inputs) output_texts = tokenizer.batch_decode(token_ids) self.assertEqual(expected_output, output_texts) @tooslow def test_batch_generation(self): model_path = "openbmb/cpm-ant-10b" model = CpmAntForCausalLM.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) texts = ["今天天气不错,", "新年快乐,万事如意!"] expected_output = [ "今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的", "新年快乐,万事如意!在这辞旧迎新的美好时刻,我谨代表《农村新技术》杂志社全体同仁,向一直以来关心、支持《农村新技术》杂志发展的各级领导、各界朋友和广大读者致以最诚挚的", ] model_inputs = tokenizer(texts, return_tensors="pt", padding=True) token_ids = model.generate(**model_inputs) output_texts = tokenizer.batch_decode(token_ids) self.assertEqual(expected_output, output_texts)
CpmAntForCausalLMlIntegrationTest
python
sympy__sympy
sympy/functions/special/error_functions.py
{ "start": 71214, "end": 76488 }
class ____(FresnelIntegral): r""" Fresnel integral C. Explanation =========== This function is defined by .. math:: \operatorname{C}(z) = \int_0^z \cos{\frac{\pi}{2} t^2} \mathrm{d}t. It is an entire function. Examples ======== >>> from sympy import I, oo, fresnelc >>> from sympy.abc import z Several special values are known: >>> fresnelc(0) 0 >>> fresnelc(oo) 1/2 >>> fresnelc(-oo) -1/2 >>> fresnelc(I*oo) I/2 >>> fresnelc(-I*oo) -I/2 In general one can pull out factors of -1 and $i$ from the argument: >>> fresnelc(-z) -fresnelc(z) >>> fresnelc(I*z) I*fresnelc(z) The Fresnel C integral obeys the mirror symmetry $\overline{C(z)} = C(\bar{z})$: >>> from sympy import conjugate >>> conjugate(fresnelc(z)) fresnelc(conjugate(z)) Differentiation with respect to $z$ is supported: >>> from sympy import diff >>> diff(fresnelc(z), z) cos(pi*z**2/2) Defining the Fresnel functions via an integral: >>> from sympy import integrate, pi, cos, expand_func >>> integrate(cos(pi*z**2/2), z) fresnelc(z)*gamma(1/4)/(4*gamma(5/4)) >>> expand_func(integrate(cos(pi*z**2/2), z)) fresnelc(z) We can numerically evaluate the Fresnel integral to arbitrary precision on the whole complex plane: >>> fresnelc(2).evalf(30) 0.488253406075340754500223503357 >>> fresnelc(-2*I).evalf(30) -0.488253406075340754500223503357*I See Also ======== fresnels: Fresnel sine integral. References ========== .. [1] https://en.wikipedia.org/wiki/Fresnel_integral .. [2] https://dlmf.nist.gov/7 .. [3] https://mathworld.wolfram.com/FresnelIntegrals.html .. [4] https://functions.wolfram.com/GammaBetaErf/FresnelC .. [5] The converging factors for the fresnel integrals by John W. Wrench Jr. and Vicki Alley """ _trigfunc = cos _sign = S.One @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n < 0: return S.Zero else: x = sympify(x) if len(previous_terms) > 1: p = previous_terms[-1] return (-pi**2*x**4*(4*n - 3)/(8*n*(2*n - 1)*(4*n + 1))) * p else: return x * (-x**4)**n * (S(2)**(-2*n)*pi**(2*n)) / ((4*n + 1)*factorial(2*n)) def _eval_rewrite_as_erf(self, z, **kwargs): return (S.One - I)/4 * (erf((S.One + I)/2*sqrt(pi)*z) + I*erf((S.One - I)/2*sqrt(pi)*z)) def _eval_rewrite_as_hyper(self, z, **kwargs): return z * hyper([Rational(1, 4)], [S.Half, Rational(5, 4)], -pi**2*z**4/16) def _eval_rewrite_as_meijerg(self, z, **kwargs): return (pi*z**Rational(3, 4) / (sqrt(2)*root(z**2, 4)*root(-z, 4)) * meijerg([], [1], [Rational(1, 4)], [Rational(3, 4), 0], -pi**2*z**4/16)) def _eval_rewrite_as_Integral(self, z, **kwargs): from sympy.integrals.integrals import Integral t = Dummy(uniquely_named_symbol('t', [z]).name) return Integral(cos(pi*t**2/2), (t, 0, z)) def _eval_as_leading_term(self, x, logx, cdir): from sympy.series.order import Order arg = self.args[0].as_leading_term(x, logx=logx, cdir=cdir) arg0 = arg.subs(x, 0) if arg0 is S.ComplexInfinity: arg0 = arg.limit(x, 0, dir='-' if re(cdir).is_negative else '+') if arg0.is_zero: return arg elif arg0 in [S.Infinity, S.NegativeInfinity]: s = 1 if arg0 is S.Infinity else -1 return s*S.Half + Order(x, x) else: return self.func(arg0) def _eval_aseries(self, n, args0, x, logx): from sympy.series.order import Order point = args0[0] # Expansion at oo if point in [S.Infinity, -S.Infinity]: z = self.args[0] # expansion of C(x) = C1(x*sqrt(pi/2)), see reference[5] page 1-8 # as only real infinities are dealt with, sin and cos are O(1) p = [S.NegativeOne**k * factorial(4*k + 1) / (2**(2*k + 2) * z**(4*k + 3) * 2**(2*k)*factorial(2*k)) for k in range(n) if 4*k + 3 < n] q = [1/(2*z)] + [S.NegativeOne**k * factorial(4*k - 1) / (2**(2*k + 1) * z**(4*k + 1) * 2**(2*k - 1)*factorial(2*k - 1)) for k in range(1, n) if 4*k + 1 < n] p = [-sqrt(2/pi)*t for t in p] q = [ sqrt(2/pi)*t for t in q] s = 1 if point is S.Infinity else -1 # The expansion at oo is 1/2 + some odd powers of z # To get the expansion at -oo, replace z by -z and flip the sign # The result -1/2 + the same odd powers of z as before. return s*S.Half + (cos(z**2)*Add(*p) + sin(z**2)*Add(*q) ).subs(x, sqrt(2/pi)*x) + Order(1/z**n, x) # All other points are not handled return super()._eval_aseries(n, args0, x, logx) ############################################################################### #################### HELPER FUNCTIONS ######################################### ###############################################################################
fresnelc
python
gevent__gevent
src/gevent/tests/test__queue.py
{ "start": 16111, "end": 16518 }
class ____(AbstractGenericGetTestCase): kind = queue.SimpleQueue Timeout = Full def setUp(self): super(TestPutInterrupt, self).setUp() self.queue = self._makeOne() def wait(self, timeout): while not self.queue.full(): self.queue.put(1) return self.queue.put(2, timeout=timeout) def _makeOne(self): return self.kind(1)
TestPutInterrupt
python
pytorch__pytorch
torch/_dynamo/source.py
{ "start": 15840, "end": 16281 }
class ____(Source): desc: Optional[str] = None def guard_source(self) -> GuardSource: return GuardSource.EPHEMERAL def name(self) -> str: return f"<ephemeral{': ' + self.desc if self.desc is not None else ''}>" def make_guard(self, fn: Callable[..., Any]) -> Guard: raise NotImplementedError def is_ephemeral(self) -> bool: return True @dataclasses.dataclass(frozen=True)
EphemeralSource
python
kamyu104__LeetCode-Solutions
Python/longest-uncommon-subsequence-i.py
{ "start": 37, "end": 259 }
class ____(object): def findLUSlength(self, a, b): """ :type a: str :type b: str :rtype: int """ if a == b: return -1 return max(len(a), len(b))
Solution
python
huggingface__transformers
src/transformers/models/mpnet/modeling_mpnet.py
{ "start": 20751, "end": 24085 }
class ____(MPNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.classifier = MPNetClassificationHead(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring
MPNetForSequenceClassification
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/dataproc_metastore.py
{ "start": 39394, "end": 45684 }
class ____(GoogleCloudBaseOperator): """ Restore a service from a backup. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param service_id: Required. The ID of the metastore service, which is used as the final component of the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or hyphens. This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. :param backup_project_id: Required. The ID of the Google Cloud project that the metastore service backup to restore from. :param backup_region: Required. The ID of the Google Cloud region that the metastore service backup to restore from. :param backup_service_id: Required. The ID of the metastore service backup to restore from, which is used as the final component of the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or hyphens. :param backup_id: Required. The ID of the metastore service backup to restore from :param restore_type: Optional. The type of restore. If unspecified, defaults to ``METADATA_ONLY`` :param request_id: Optional. A unique id used to identify the request. :param retry: Optional. Designation of what errors, if any, should be retried. :param timeout: Optional. The timeout for this request. :param metadata: Optional. Strings which should be sent along with the request as metadata. :param gcp_conn_id: The connection ID to use connecting to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ( "project_id", "impersonation_chain", ) operator_extra_links = (DataprocMetastoreLink(),) def __init__( self, *, project_id: str, region: str, service_id: str, backup_project_id: str, backup_region: str, backup_service_id: str, backup_id: str, restore_type: Restore | None = None, request_id: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.project_id = project_id self.region = region self.service_id = service_id self.backup_project_id = backup_project_id self.backup_region = backup_region self.backup_service_id = backup_service_id self.backup_id = backup_id self.restore_type = restore_type self.request_id = request_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain @property def extra_links_params(self) -> dict[str, Any]: return { "region": self.region, "service_id": self.service_id, "project_id": self.project_id, } def execute(self, context: Context): hook = DataprocMetastoreHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain ) self.log.info( "Restoring Dataproc Metastore service: %s from backup: %s", self.service_id, self.backup_id ) hook.restore_service( project_id=self.project_id, region=self.region, service_id=self.service_id, backup_project_id=self.backup_project_id, backup_region=self.backup_region, backup_service_id=self.backup_service_id, backup_id=self.backup_id, restore_type=self.restore_type, request_id=self.request_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) self._wait_for_restore_service(hook) self.log.info("Service %s restored from backup %s", self.service_id, self.backup_id) DataprocMetastoreLink.persist(context=context, url=METASTORE_SERVICE_LINK) def _wait_for_restore_service(self, hook: DataprocMetastoreHook): """ Check that export was created successfully. This is a workaround to an issue parsing result to MetadataExport inside the SDK. """ for time_to_wait in exponential_sleep_generator(initial=10, maximum=120): time.sleep(time_to_wait) service = hook.get_service( region=self.region, project_id=self.project_id, service_id=self.service_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) activities: MetadataManagementActivity = service.metadata_management_activity restore_service: Restore = activities.restores[0] if restore_service.state == Restore.State.SUCCEEDED: return restore_service if restore_service.state == Restore.State.FAILED: raise AirflowException("Restoring service FAILED")
DataprocMetastoreRestoreServiceOperator
python
davidhalter__parso
parso/python/tree.py
{ "start": 22545, "end": 22997 }
class ____(Flow): type = 'try_stmt' __slots__ = () def get_except_clause_tests(self): """ Returns the ``test`` nodes found in ``except_clause`` nodes. Returns ``[None]`` for except clauses without an exception given. """ for node in self.children: if node.type == 'except_clause': yield node.children[1] elif node == 'except': yield None
TryStmt
python
ethereum__web3.py
web3/main.py
{ "start": 11446, "end": 16163 }
class ____(BaseWeb3, Generic[AsyncProviderT]): # mypy Types eth: AsyncEth net: AsyncNet geth: AsyncGeth # Providers AsyncHTTPProvider = AsyncHTTPProvider WebSocketProvider = WebSocketProvider AsyncEthereumTesterProvider = AsyncEthereumTesterProvider def __init__( self, provider: AsyncProviderT | None = None, middleware: Sequence[Any] | None = None, modules: dict[str, type[Module] | Sequence[Any]] | None = None, external_modules: None | (dict[str, type[Module] | Sequence[Any]]) = None, ens: Union[AsyncENS, "Empty"] = empty, ) -> None: _validate_provider(self, provider) self.manager = self.RequestManager(self, provider, middleware) self.codec = ABICodec(build_strict_registry()) self._modules = get_async_default_modules() if modules is None else modules self._external_modules = None if external_modules is None else external_modules self.attach_modules(self._modules) if external_modules is not None: self.attach_modules(external_modules) self.ens = ens async def is_connected(self, show_traceback: bool = False) -> bool: return await self.provider.is_connected(show_traceback) @property def provider(self) -> AsyncProviderT: return cast(AsyncProviderT, self.manager.provider) @provider.setter def provider(self, provider: AsyncProviderT) -> None: self.manager.provider = provider @property async def client_version(self) -> str: return await self.manager.coro_request(RPC.web3_clientVersion, []) @property def ens(self) -> Union[AsyncENS, "Empty"]: if self._ens is empty: ns = AsyncENS.from_web3(self) ns.w3 = self return ns return self._ens @ens.setter def ens(self, new_ens: Union[AsyncENS, "Empty"]) -> None: if new_ens: new_ens.w3 = self # set self object reference for ``AsyncENS.w3`` self._ens = new_ens # -- persistent connection settings -- # _subscription_manager: SubscriptionManager | None = None _persistent_connection: Optional["PersistentConnection"] = None @property @persistent_connection_provider_method() def subscription_manager(self) -> SubscriptionManager: """ Access the subscription manager for the current PersistentConnectionProvider. """ if not self._subscription_manager: self._subscription_manager = SubscriptionManager(self) return self._subscription_manager @property @persistent_connection_provider_method() def socket(self) -> PersistentConnection: if self._persistent_connection is None: self._persistent_connection = PersistentConnection(self) return self._persistent_connection # w3 = await AsyncWeb3(PersistentConnectionProvider(...)) @persistent_connection_provider_method( "Provider must inherit from ``PersistentConnectionProvider`` class " "when instantiating via ``await``." ) def __await__(self) -> Generator[Any, None, Self]: async def __async_init__() -> Self: provider = cast("PersistentConnectionProvider", self.provider) await provider.connect() # set signal handlers since not within a context manager provider._set_signal_handlers() return self return __async_init__().__await__() # async with AsyncWeb3(PersistentConnectionProvider(...)) as w3: @persistent_connection_provider_method( message="Provider must inherit from ``PersistentConnectionProvider`` class " "when instantiating via ``async with``." ) async def __aenter__(self) -> Self: await self.provider.connect() return self @persistent_connection_provider_method() async def __aexit__( self, exc_type: type[BaseException], exc_val: BaseException, exc_tb: TracebackType, ) -> None: await self.provider.disconnect() # async for w3 in AsyncWeb3(PersistentConnectionProvider(...)): @persistent_connection_provider_method( message="Provider must inherit from ``PersistentConnectionProvider`` class " "when instantiating via ``async for``." ) async def __aiter__(self) -> AsyncIterator[Self]: provider = self.provider while True: await provider.connect() yield self cast("PersistentConnectionProvider", provider).logger.error( "Connection interrupted, attempting to reconnect..." ) await provider.disconnect()
AsyncWeb3
python
walkccc__LeetCode
solutions/3034. Number of Subarrays That Match a Pattern I/3034.py
{ "start": 0, "end": 1341 }
class ____: def countMatchingSubarrays(self, nums: list[int], pattern: list[int]) -> int: def getNum(a: int, b: int) -> int: if a < b: return 1 if a > b: return -1 return 0 numsPattern = [getNum(a, b) for a, b in itertools.pairwise(nums)] return self._kmp(numsPattern, pattern) def _kmp(self, nums: list[int], pattern: list[int]) -> int: """Returns the number of occurrences of the pattern in `nums`.""" def getLPS(nums: list[int]) -> list[int]: """ Returns the lps array, where lps[i] is the length of the longest prefix of nums[0..i] which is also a suffix of this substring. """ lps = [0] * len(nums) j = 0 for i in range(1, len(nums)): while j > 0 and nums[j] != nums[i]: j = lps[j - 1] if nums[i] == nums[j]: lps[i] = j + 1 j += 1 return lps lps = getLPS(pattern) res = 0 i = 0 # s' index j = 0 # pattern's index while i < len(nums): if nums[i] == pattern[j]: i += 1 j += 1 if j == len(pattern): res += 1 j = lps[j - 1] elif j != 0: # Mismatch after j matches. # Don't match lps[0..lps[j - 1]] since they will match anyway. j = lps[j - 1] else: i += 1 return res
Solution
python
PyCQA__pylint
doc/data/messages/i/invalid-class-object/bad.py
{ "start": 0, "end": 70 }
class ____: pass Apple.__class__ = 1 # [invalid-class-object]
Apple
python
doocs__leetcode
lcof/面试题32 - III. 从上到下打印二叉树 III/Solution.py
{ "start": 164, "end": 707 }
class ____: def levelOrder(self, root: TreeNode) -> List[List[int]]: ans = [] if root is None: return ans q = deque([root]) ans = [] while q: t = [] for _ in range(len(q)): node = q.popleft() t.append(node.val) if node.left: q.append(node.left) if node.right: q.append(node.right) ans.append(t[::-1] if len(ans) & 1 else t) return ans
Solution
python
django__django
django/contrib/messages/storage/cookie.py
{ "start": 1571, "end": 1812 }
class ____: def dumps(self, obj): return [ json.dumps( o, separators=(",", ":"), cls=MessageEncoder, ) for o in obj ]
MessagePartSerializer
python
keras-team__keras
keras/src/backend/common/variables_test.py
{ "start": 18705, "end": 21585 }
class ____(test_case.TestCase): """tests for dtype, shape, ndim, __repr__""" def test_variable_dtype(self): """Test retrieving the dtype of a variable.""" v = backend.Variable( initializer=np.array([1.0, 2.0, 3.0], dtype=np.float32) ) self.assertEqual(v.dtype, "float32") def test_variable_shape(self): """Test retrieving the shape of a variable.""" v = backend.Variable(initializer=np.array([[1.0, 2.0], [3.0, 4.0]])) self.assertEqual(v.shape, (2, 2)) def test_variable_ndim(self): """Test retrieving the number of dimensions of a variable.""" v = backend.Variable(initializer=np.array([[1.0, 2.0], [3.0, 4.0]])) self.assertEqual(v.ndim, 2) def test_variable_repr(self): """Test the string representation of a variable.""" v = backend.Variable( initializer=np.array([1.0, 2.0, 3.0], dtype=np.float32), name="test_var", ) expected_repr = ( "<Variable path=test_var, shape=(3,), dtype=float32, " "value=[1. 2. 3.]>" ) self.assertEqual(repr(v), expected_repr) # Test with `backend.StatelessScope()` with backend.StatelessScope(): v = backend.Variable( initializer="zeros", shape=(3,), name="test_var" ) expected_repr = ( "<Variable path=test_var, shape=(3,), dtype=float32>" ) self.assertEqual(repr(v), expected_repr) def test_variable_getitem(self): """Test getting an item from a variable.""" v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0])) self.assertEqual(v[0], 1) def test_variable_initialize(self): """Test initializing a variable.""" v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0])) init_value = np.array([4.0, 5.0, 6.0]) v._initialize(value=init_value) self.assertAllClose(v.value, init_value) def test_variable_convert_to_tensor(self): """Test converting a variable to a tensor.""" v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0])) self.assertAllClose( v._convert_to_tensor(v.value), np.array([1.0, 2.0, 3.0]) ) def test_variable_convert_to_tensor_with_dtype(self): """Test converting a variable to a tensor with a dtype.""" v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0])) self.assertAllClose( v._convert_to_tensor(v.value, dtype="float32"), np.array([1.0, 2.0, 3.0]), ) def test_variable_array(self): """Test converting a variable to an array.""" v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0])) self.assertAllClose(v.__array__(), np.array([1.0, 2.0, 3.0]))
VariableDtypeShapeNdimRepr
python
tensorflow__tensorflow
tensorflow/python/compiler/tensorrt/test/concatenation_test.py
{ "start": 1140, "end": 2888 }
class ____(trt_test.TfTrtIntegrationTestBase): """Testing Concatenation in TF-TRT conversion.""" def GraphFn(self, x): dtype = x.dtype # scale a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype) r1 = x / a a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype) r2 = a / x a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype) r3 = a + x a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype) r4 = x * a a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype) r5 = x - a a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype) r6 = a - x a = constant_op.constant(np.random.randn(3, 1), dtype=dtype) r7 = x - a a = constant_op.constant(np.random.randn(3, 1), dtype=dtype) r8 = a - x a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype) r9 = gen_math_ops.maximum(x, a) a = constant_op.constant(np.random.randn(3, 1), dtype=dtype) r10 = gen_math_ops.minimum(a, x) a = constant_op.constant(np.random.randn(3), dtype=dtype) r11 = x * a a = constant_op.constant(np.random.randn(1), dtype=dtype) r12 = a * x concat1 = array_ops.concat([r1, r2, r3, r4, r5, r6], axis=-1) concat2 = array_ops.concat([r7, r8, r9, r10, r11, r12], axis=3) x = array_ops.concat([concat1, concat2], axis=-1) return gen_array_ops.reshape(x, [2, -1], name="output_0") def GetParams(self): return self.BuildParams(self.GraphFn, dtypes.float32, [[2, 3, 3, 1]], [[2, 126]]) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return ["TRTEngineOp_000"] if __name__ == "__main__": test.main()
ConcatenationTest
python
django-debug-toolbar__django-debug-toolbar
debug_toolbar/_stubs.py
{ "start": 471, "end": 592 }
class ____(dj_template.RequestContext): template: dj_template.Template render_context: RenderContext
RequestContext
python
jmcnamara__XlsxWriter
xlsxwriter/test/styles/test_styles01.py
{ "start": 380, "end": 3101 }
class ____(unittest.TestCase): """ Test assembling a complete Styles file. """ def test_assemble_xml_file(self): """Test for styles.xml file with default styles.""" self.maxDiff = None fh = StringIO() style = Styles() style._set_filehandle(fh) workbook = Workbook() workbook._prepare_format_properties() style._set_style_properties( [ workbook.xf_formats, workbook.palette, workbook.font_count, workbook.num_formats, workbook.border_count, workbook.fill_count, workbook.custom_colors, workbook.dxf_formats, workbook.has_comments, ] ) style._assemble_xml_file() workbook.fileclosed = 1 exp = _xml_to_list( """ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"> <fonts count="1"> <font> <sz val="11"/> <color theme="1"/> <name val="Calibri"/> <family val="2"/> <scheme val="minor"/> </font> </fonts> <fills count="2"> <fill> <patternFill patternType="none"/> </fill> <fill> <patternFill patternType="gray125"/> </fill> </fills> <borders count="1"> <border> <left/> <right/> <top/> <bottom/> <diagonal/> </border> </borders> <cellStyleXfs count="1"> <xf numFmtId="0" fontId="0" fillId="0" borderId="0"/> </cellStyleXfs> <cellXfs count="1"> <xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/> </cellXfs> <cellStyles count="1"> <cellStyle name="Normal" xfId="0" builtinId="0"/> </cellStyles> <dxfs count="0"/> <tableStyles count="0" defaultTableStyle="TableStyleMedium9" defaultPivotStyle="PivotStyleLight16"/> </styleSheet> """ ) got = _xml_to_list(fh.getvalue()) self.assertEqual(exp, got)
TestAssembleStyles
python
getsentry__sentry
src/sentry/discover/compare_tables.py
{ "start": 1155, "end": 1476 }
class ____(Enum): BOTH_FAILED = "both_requests_failed" EAP_FAILED = "eap_failed" FIELD_NOT_FOUND = "field_not_found" METRICS_FAILED = "metrics_failed" NO_DATA = "no_data" NO_FIELDS = "no_fields" NO_PROJECT = "no_project" PASSED = "passed" QUERY_FAILED = "query_failed"
CompareTableResult
python
ray-project__ray
python/ray/train/_internal/worker_group.py
{ "start": 1671, "end": 2815 }
class ____: """Class representing a Worker.""" actor: ActorHandle metadata: WorkerMetadata def create_executable_class(executable_cls: Optional[Type] = None) -> Type: """Create the executable class to use as the Ray actors.""" if not executable_cls: return RayTrainWorker elif issubclass(executable_cls, RayTrainWorker): return executable_cls else: class _WrappedExecutable(executable_cls, RayTrainWorker): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) return _WrappedExecutable def construct_metadata() -> WorkerMetadata: """Creates metadata for this worker. This function is expected to be run on the actor. """ node_id = ray.get_runtime_context().get_node_id() node_ip = ray.util.get_node_ip_address() hostname = socket.gethostname() accelerator_ids = ray.get_runtime_context().get_accelerator_ids() pid = os.getpid() return WorkerMetadata( node_id=node_id, node_ip=node_ip, hostname=hostname, resource_ids=accelerator_ids, pid=pid, )
Worker
python
huggingface__transformers
src/transformers/models/vit_mae/modeling_vit_mae.py
{ "start": 22540, "end": 25010 }
class ____(ViTMAEPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = ViTMAEEmbeddings(config) self.encoder = ViTMAEEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @check_model_inputs(tie_last_hidden_states=False) @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, noise: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: bool = False, **kwargs: Unpack[TransformersKwargs], ) -> ViTMAEModelOutput: r""" noise (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mainly used for testing purposes to control randomness and maintain the reproducibility interpolate_pos_encoding (`bool`, *optional*, default `False`): Whether to interpolate the pre-trained position encodings. This is mainly used to use the model on higher resolution images. Examples: ```python >>> from transformers import AutoImageProcessor, ViTMAEModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/vit-mae-base") >>> model = ViTMAEModel.from_pretrained("facebook/vit-mae-base") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output, mask, ids_restore = self.embeddings( pixel_values, noise=noise, interpolate_pos_encoding=interpolate_pos_encoding ) encoder_outputs: BaseModelOutput = self.encoder(embedding_output) sequence_output = encoder_outputs.last_hidden_state sequence_output = self.layernorm(sequence_output) return ViTMAEModelOutput(last_hidden_state=sequence_output, mask=mask, ids_restore=ids_restore)
ViTMAEModel
python
encode__starlette
starlette/formparsers.py
{ "start": 1479, "end": 1595 }
class ____(Exception): def __init__(self, message: str) -> None: self.message = message
MultiPartException
python
pexpect__pexpect
pexpect/pty_spawn.py
{ "start": 642, "end": 37382 }
class ____(SpawnBase): '''This is the main class interface for Pexpect. Use this class to start and control child applications. ''' # This is purely informational now - changing it has no effect use_native_pty_fork = use_native_pty_fork def __init__(self, command, args=[], timeout=30, maxread=2000, searchwindowsize=None, logfile=None, cwd=None, env=None, ignore_sighup=False, echo=True, preexec_fn=None, encoding=None, codec_errors='strict', dimensions=None, use_poll=False): '''This is the constructor. The command parameter may be a string that includes a command and any arguments to the command. For example:: child = pexpect.spawn('/usr/bin/ftp') child = pexpect.spawn('/usr/bin/ssh user@example.com') child = pexpect.spawn('ls -latr /tmp') You may also construct it with a list of arguments like so:: child = pexpect.spawn('/usr/bin/ftp', []) child = pexpect.spawn('/usr/bin/ssh', ['user@example.com']) child = pexpect.spawn('ls', ['-latr', '/tmp']) After this the child application will be created and will be ready to talk to. For normal use, see expect() and send() and sendline(). Remember that Pexpect does NOT interpret shell meta characters such as redirect, pipe, or wild cards (``>``, ``|``, or ``*``). This is a common mistake. If you want to run a command and pipe it through another command then you must also start a shell. For example:: child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > logs.txt"') child.expect(pexpect.EOF) The second form of spawn (where you pass a list of arguments) is useful in situations where you wish to spawn a command and pass it its own argument list. This can make syntax more clear. For example, the following is equivalent to the previous example:: shell_cmd = 'ls -l | grep LOG > logs.txt' child = pexpect.spawn('/bin/bash', ['-c', shell_cmd]) child.expect(pexpect.EOF) The maxread attribute sets the read buffer size. This is maximum number of bytes that Pexpect will try to read from a TTY at one time. Setting the maxread size to 1 will turn off buffering. Setting the maxread value higher may help performance in cases where large amounts of output are read back from the child. This feature is useful in conjunction with searchwindowsize. When the keyword argument *searchwindowsize* is None (default), the full buffer is searched at each iteration of receiving incoming data. The default number of bytes scanned at each iteration is very large and may be reduced to collaterally reduce search cost. After :meth:`~.expect` returns, the full buffer attribute remains up to size *maxread* irrespective of *searchwindowsize* value. When the keyword argument ``timeout`` is specified as a number, (default: *30*), then :class:`TIMEOUT` will be raised after the value specified has elapsed, in seconds, for any of the :meth:`~.expect` family of method calls. When None, TIMEOUT will not be raised, and :meth:`~.expect` may block indefinitely until match. The logfile member turns on or off logging. All input and output will be copied to the given file object. Set logfile to None to stop logging. This is the default. Set logfile to sys.stdout to echo everything to standard output. The logfile is flushed after each write. Example log input and output to a file:: child = pexpect.spawn('some_command') fout = open('mylog.txt','wb') child.logfile = fout Example log to stdout:: # In Python 2: child = pexpect.spawn('some_command') child.logfile = sys.stdout # In Python 3, we'll use the ``encoding`` argument to decode data # from the subprocess and handle it as unicode: child = pexpect.spawn('some_command', encoding='utf-8') child.logfile = sys.stdout The logfile_read and logfile_send members can be used to separately log the input from the child and output sent to the child. Sometimes you don't want to see everything you write to the child. You only want to log what the child sends back. For example:: child = pexpect.spawn('some_command') child.logfile_read = sys.stdout You will need to pass an encoding to spawn in the above code if you are using Python 3. To separately log output sent to the child use logfile_send:: child.logfile_send = fout If ``ignore_sighup`` is True, the child process will ignore SIGHUP signals. The default is False from Pexpect 4.0, meaning that SIGHUP will be handled normally by the child. The delaybeforesend helps overcome a weird behavior that many users were experiencing. The typical problem was that a user would expect() a "Password:" prompt and then immediately call sendline() to send the password. The user would then see that their password was echoed back to them. Passwords don't normally echo. The problem is caused by the fact that most applications print out the "Password" prompt and then turn off stdin echo, but if you send your password before the application turned off echo, then you get your password echoed. Normally this wouldn't be a problem when interacting with a human at a real keyboard. If you introduce a slight delay just before writing then this seems to clear up the problem. This was such a common problem for many users that I decided that the default pexpect behavior should be to sleep just before writing to the child application. 1/20th of a second (50 ms) seems to be enough to clear up the problem. You can set delaybeforesend to None to return to the old behavior. Note that spawn is clever about finding commands on your path. It uses the same logic that "which" uses to find executables. If you wish to get the exit status of the child you must call the close() method. The exit or signal status of the child will be stored in self.exitstatus or self.signalstatus. If the child exited normally then exitstatus will store the exit return code and signalstatus will be None. If the child was terminated abnormally with a signal then signalstatus will store the signal value and exitstatus will be None:: child = pexpect.spawn('some_command') child.close() print(child.exitstatus, child.signalstatus) If you need more detail you can also read the self.status member which stores the status returned by os.waitpid. You can interpret this using os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. The echo attribute may be set to False to disable echoing of input. As a pseudo-terminal, all input echoed by the "keyboard" (send() or sendline()) will be repeated to output. For many cases, it is not desirable to have echo enabled, and it may be later disabled using setecho(False) followed by waitnoecho(). However, for some platforms such as Solaris, this is not possible, and should be disabled immediately on spawn. If preexec_fn is given, it will be called in the child process before launching the given command. This is useful to e.g. reset inherited signal handlers. The dimensions attribute specifies the size of the pseudo-terminal as seen by the subprocess, and is specified as a two-entry tuple (rows, columns). If this is unspecified, the defaults in ptyprocess will apply. The use_poll attribute enables using select.poll() over select.select() for socket handling. This is handy if your system could have > 1024 fds ''' super(spawn, self).__init__(timeout=timeout, maxread=maxread, searchwindowsize=searchwindowsize, logfile=logfile, encoding=encoding, codec_errors=codec_errors) self.STDIN_FILENO = pty.STDIN_FILENO self.STDOUT_FILENO = pty.STDOUT_FILENO self.STDERR_FILENO = pty.STDERR_FILENO self.str_last_chars = 100 self.cwd = cwd self.env = env self.echo = echo self.ignore_sighup = ignore_sighup self.__irix_hack = sys.platform.lower().startswith('irix') if command is None: self.command = None self.args = None self.name = '<pexpect factory incomplete>' else: self._spawn(command, args, preexec_fn, dimensions) self.use_poll = use_poll def __str__(self): '''This returns a human-readable string that represents the state of the object. ''' s = [] s.append(repr(self)) s.append('command: ' + str(self.command)) s.append('args: %r' % (self.args,)) s.append('buffer (last %s chars): %r' % (self.str_last_chars,self.buffer[-self.str_last_chars:])) s.append('before (last %s chars): %r' % (self.str_last_chars,self.before[-self.str_last_chars:] if self.before else '')) s.append('after: %r' % (self.after,)) s.append('match: %r' % (self.match,)) s.append('match_index: ' + str(self.match_index)) s.append('exitstatus: ' + str(self.exitstatus)) if hasattr(self, 'ptyproc'): s.append('flag_eof: ' + str(self.flag_eof)) s.append('pid: ' + str(self.pid)) s.append('child_fd: ' + str(self.child_fd)) s.append('closed: ' + str(self.closed)) s.append('timeout: ' + str(self.timeout)) s.append('delimiter: ' + str(self.delimiter)) s.append('logfile: ' + str(self.logfile)) s.append('logfile_read: ' + str(self.logfile_read)) s.append('logfile_send: ' + str(self.logfile_send)) s.append('maxread: ' + str(self.maxread)) s.append('ignorecase: ' + str(self.ignorecase)) s.append('searchwindowsize: ' + str(self.searchwindowsize)) s.append('delaybeforesend: ' + str(self.delaybeforesend)) s.append('delayafterclose: ' + str(self.delayafterclose)) s.append('delayafterterminate: ' + str(self.delayafterterminate)) return '\n'.join(s) def _spawn(self, command, args=[], preexec_fn=None, dimensions=None): '''This starts the given command in a child process. This does all the fork/exec type of stuff for a pty. This is called by __init__. If args is empty then command will be parsed (split on spaces) and args will be set to parsed arguments. ''' # The pid and child_fd of this object get set by this method. # Note that it is difficult for this method to fail. # You cannot detect if the child process cannot start. # So the only way you can tell if the child process started # or not is to try to read from the file descriptor. If you get # EOF immediately then it means that the child is already dead. # That may not necessarily be bad because you may have spawned a child # that performs some task; creates no stdout output; and then dies. # If command is an int type then it may represent a file descriptor. if isinstance(command, type(0)): raise ExceptionPexpect('Command is an int type. ' + 'If this is a file descriptor then maybe you want to ' + 'use fdpexpect.fdspawn which takes an existing ' + 'file descriptor instead of a command string.') if not isinstance(args, type([])): raise TypeError('The argument, args, must be a list.') if args == []: self.args = split_command_line(command) self.command = self.args[0] else: # Make a shallow copy of the args list. self.args = args[:] self.args.insert(0, command) self.command = command command_with_path = which(self.command, env=self.env) if command_with_path is None: raise ExceptionPexpect('The command was not found or was not ' + 'executable: %s.' % self.command) self.command = command_with_path self.args[0] = self.command self.name = '<' + ' '.join(self.args) + '>' assert self.pid is None, 'The pid member must be None.' assert self.command is not None, 'The command member must not be None.' kwargs = {'echo': self.echo, 'preexec_fn': preexec_fn} if self.ignore_sighup: def preexec_wrapper(): "Set SIGHUP to be ignored, then call the real preexec_fn" signal.signal(signal.SIGHUP, signal.SIG_IGN) if preexec_fn is not None: preexec_fn() kwargs['preexec_fn'] = preexec_wrapper if dimensions is not None: kwargs['dimensions'] = dimensions if self.encoding is not None: # Encode command line using the specified encoding self.args = [a if isinstance(a, bytes) else a.encode(self.encoding) for a in self.args] self.ptyproc = self._spawnpty(self.args, env=self.env, cwd=self.cwd, **kwargs) self.pid = self.ptyproc.pid self.child_fd = self.ptyproc.fd self.terminated = False self.closed = False def _spawnpty(self, args, **kwargs): '''Spawn a pty and return an instance of PtyProcess.''' return ptyprocess.PtyProcess.spawn(args, **kwargs) def close(self, force=True): '''This closes the connection with the child application. Note that calling close() more than once is valid. This emulates standard Python behavior with files. Set force to True if you want to make sure that the child is terminated (SIGKILL is sent if the child ignores SIGHUP and SIGINT). ''' self.flush() with _wrap_ptyprocess_err(): # PtyProcessError may be raised if it is not possible to terminate # the child. self.ptyproc.close(force=force) self.isalive() # Update exit status from ptyproc self.child_fd = -1 self.closed = True def isatty(self): '''This returns True if the file descriptor is open and connected to a tty(-like) device, else False. On SVR4-style platforms implementing streams, such as SunOS and HP-UX, the child pty may not appear as a terminal device. This means methods such as setecho(), setwinsize(), getwinsize() may raise an IOError. ''' return os.isatty(self.child_fd) def waitnoecho(self, timeout=-1): '''This waits until the terminal ECHO flag is set False. This returns True if the echo mode is off. This returns False if the ECHO flag was not set False before the timeout. This can be used to detect when the child is waiting for a password. Usually a child application will turn off echo mode when it is waiting for the user to enter a password. For example, instead of expecting the "password:" prompt you can wait for the child to set ECHO off:: p = pexpect.spawn('ssh user@example.com') p.waitnoecho() p.sendline(mypassword) If timeout==-1 then this method will use the value in self.timeout. If timeout==None then this method to block until ECHO flag is False. ''' if timeout == -1: timeout = self.timeout if timeout is not None: end_time = time.time() + timeout while True: if not self.getecho(): return True if timeout < 0 and timeout is not None: return False if timeout is not None: timeout = end_time - time.time() time.sleep(0.1) def getecho(self): '''This returns the terminal echo mode. This returns True if echo is on or False if echo is off. Child applications that are expecting you to enter a password often set ECHO False. See waitnoecho(). Not supported on platforms where ``isatty()`` returns False. ''' return self.ptyproc.getecho() def setecho(self, state): '''This sets the terminal echo mode on or off. Note that anything the child sent before the echo will be lost, so you should be sure that your input buffer is empty before you call setecho(). For example, the following will work as expected:: p = pexpect.spawn('cat') # Echo is on by default. p.sendline('1234') # We expect see this twice from the child... p.expect(['1234']) # ... once from the tty echo... p.expect(['1234']) # ... and again from cat itself. p.setecho(False) # Turn off tty echo p.sendline('abcd') # We will set this only once (echoed by cat). p.sendline('wxyz') # We will set this only once (echoed by cat) p.expect(['abcd']) p.expect(['wxyz']) The following WILL NOT WORK because the lines sent before the setecho will be lost:: p = pexpect.spawn('cat') p.sendline('1234') p.setecho(False) # Turn off tty echo p.sendline('abcd') # We will set this only once (echoed by cat). p.sendline('wxyz') # We will set this only once (echoed by cat) p.expect(['1234']) p.expect(['1234']) p.expect(['abcd']) p.expect(['wxyz']) Not supported on platforms where ``isatty()`` returns False. ''' return self.ptyproc.setecho(state) def read_nonblocking(self, size=1, timeout=-1): '''This reads at most size characters from the child application. It includes a timeout. If the read does not complete within the timeout period then a TIMEOUT exception is raised. If the end of file is read then an EOF exception will be raised. If a logfile is specified, a copy is written to that log. If timeout is None then the read may block indefinitely. If timeout is -1 then the self.timeout value is used. If timeout is 0 then the child is polled and if there is no data immediately ready then this will raise a TIMEOUT exception. The timeout refers only to the amount of time to read at least one character. This is not affected by the 'size' parameter, so if you call read_nonblocking(size=100, timeout=30) and only one character is available right away then one character will be returned immediately. It will not wait for 30 seconds for another 99 characters to come in. On the other hand, if there are bytes available to read immediately, all those bytes will be read (up to the buffer size). So, if the buffer size is 1 megabyte and there is 1 megabyte of data available to read, the buffer will be filled, regardless of timeout. This is a wrapper around os.read(). It uses select.select() or select.poll() to implement the timeout. ''' if self.closed: raise ValueError('I/O operation on closed file.') if self.use_poll: def select(timeout): return poll_ignore_interrupts([self.child_fd], timeout) else: def select(timeout): return select_ignore_interrupts([self.child_fd], [], [], timeout)[0] # If there is data available to read right now, read as much as # we can. We do this to increase performance if there are a lot # of bytes to be read. This also avoids calling isalive() too # often. See also: # * https://github.com/pexpect/pexpect/pull/304 # * http://trac.sagemath.org/ticket/10295 if select(0): try: incoming = super(spawn, self).read_nonblocking(size) except EOF: # Maybe the child is dead: update some attributes in that case self.isalive() raise while len(incoming) < size and select(0): try: incoming += super(spawn, self).read_nonblocking(size - len(incoming)) except EOF: # Maybe the child is dead: update some attributes in that case self.isalive() # Don't raise EOF, just return what we read so far. return incoming return incoming if timeout == -1: timeout = self.timeout if not self.isalive(): # The process is dead, but there may or may not be data # available to read. Note that some systems such as Solaris # do not give an EOF when the child dies. In fact, you can # still try to read from the child_fd -- it will block # forever or until TIMEOUT. For that reason, it's important # to do this check before calling select() with timeout. if select(0): return super(spawn, self).read_nonblocking(size) self.flag_eof = True raise EOF('End Of File (EOF). Braindead platform.') elif self.__irix_hack: # Irix takes a long time before it realizes a child was terminated. # Make sure that the timeout is at least 2 seconds. # FIXME So does this mean Irix systems are forced to always have # FIXME a 2 second delay when calling read_nonblocking? That sucks. if timeout is not None and timeout < 2: timeout = 2 # Because of the select(0) check above, we know that no data # is available right now. But if a non-zero timeout is given # (possibly timeout=None), we call select() with a timeout. if (timeout != 0) and select(timeout): return super(spawn, self).read_nonblocking(size) if not self.isalive(): # Some platforms, such as Irix, will claim that their # processes are alive; timeout on the select; and # then finally admit that they are not alive. self.flag_eof = True raise EOF('End of File (EOF). Very slow platform.') else: raise TIMEOUT('Timeout exceeded.') def write(self, s): '''This is similar to send() except that there is no return value. ''' self.send(s) def writelines(self, sequence): '''This calls write() for each element in the sequence. The sequence can be any iterable object producing strings, typically a list of strings. This does not add line separators. There is no return value. ''' for s in sequence: self.write(s) def send(self, s): '''Sends string ``s`` to the child process, returning the number of bytes written. If a logfile is specified, a copy is written to that log. The default terminal input mode is canonical processing unless set otherwise by the child process. This allows backspace and other line processing to be performed prior to transmitting to the receiving program. As this is buffered, there is a limited size of such buffer. On Linux systems, this is 4096 (defined by N_TTY_BUF_SIZE). All other systems honor the POSIX.1 definition PC_MAX_CANON -- 1024 on OSX, 256 on OpenSolaris, and 1920 on FreeBSD. This value may be discovered using fpathconf(3):: >>> from os import fpathconf >>> print(fpathconf(0, 'PC_MAX_CANON')) 256 On such a system, only 256 bytes may be received per line. Any subsequent bytes received will be discarded. BEL (``'\a'``) is then sent to output if IMAXBEL (termios.h) is set by the tty driver. This is usually enabled by default. Linux does not honor this as an option -- it behaves as though it is always set on. Canonical input processing may be disabled altogether by executing a shell, then stty(1), before executing the final program:: >>> bash = pexpect.spawn('/bin/bash', echo=False) >>> bash.sendline('stty -icanon') >>> bash.sendline('base64') >>> bash.sendline('x' * 5000) ''' if self.delaybeforesend is not None: time.sleep(self.delaybeforesend) s = self._coerce_send_string(s) self._log(s, 'send') b = self._encoder.encode(s, final=False) return os.write(self.child_fd, b) def sendline(self, s=''): '''Wraps send(), sending string ``s`` to child process, with ``os.linesep`` automatically appended. Returns number of bytes written. Only a limited number of bytes may be sent for each line in the default terminal mode, see docstring of :meth:`send`. ''' s = self._coerce_send_string(s) return self.send(s + self.linesep) def _log_control(self, s): """Write control characters to the appropriate log files""" if self.encoding is not None: s = s.decode(self.encoding, 'replace') self._log(s, 'send') def sendcontrol(self, char): '''Helper method that wraps send() with mnemonic access for sending control character to the child (such as Ctrl-C or Ctrl-D). For example, to send Ctrl-G (ASCII 7, bell, '\a'):: child.sendcontrol('g') See also, sendintr() and sendeof(). ''' n, byte = self.ptyproc.sendcontrol(char) self._log_control(byte) return n def sendeof(self): '''This sends an EOF to the child. This sends a character which causes the pending parent output buffer to be sent to the waiting child program without waiting for end-of-line. If it is the first character of the line, the read() in the user program returns 0, which signifies end-of-file. This means to work as expected a sendeof() has to be called at the beginning of a line. This method does not send a newline. It is the responsibility of the caller to ensure the eof is sent at the beginning of a line. ''' n, byte = self.ptyproc.sendeof() self._log_control(byte) def sendintr(self): '''This sends a SIGINT to the child. It does not require the SIGINT to be the first character on a line. ''' n, byte = self.ptyproc.sendintr() self._log_control(byte) @property def flag_eof(self): return self.ptyproc.flag_eof @flag_eof.setter def flag_eof(self, value): self.ptyproc.flag_eof = value def eof(self): '''This returns True if the EOF exception was ever raised. ''' return self.flag_eof def terminate(self, force=False): '''This forces a child process to terminate. It starts nicely with SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This returns True if the child was terminated. This returns False if the child could not be terminated. ''' if not self.isalive(): return True try: self.kill(signal.SIGHUP) time.sleep(self.delayafterterminate) if not self.isalive(): return True self.kill(signal.SIGCONT) time.sleep(self.delayafterterminate) if not self.isalive(): return True self.kill(signal.SIGINT) time.sleep(self.delayafterterminate) if not self.isalive(): return True if force: self.kill(signal.SIGKILL) time.sleep(self.delayafterterminate) if not self.isalive(): return True else: return False return False except OSError: # I think there are kernel timing issues that sometimes cause # this to happen. I think isalive() reports True, but the # process is dead to the kernel. # Make one last attempt to see if the kernel is up to date. time.sleep(self.delayafterterminate) if not self.isalive(): return True else: return False def wait(self): '''This waits until the child exits. This is a blocking call. This will not read any data from the child, so this will block forever if the child has unread output and has terminated. In other words, the child may have printed output then called exit(), but, the child is technically still alive until its output is read by the parent. This method is non-blocking if :meth:`wait` has already been called previously or :meth:`isalive` method returns False. It simply returns the previously determined exit status. ''' ptyproc = self.ptyproc with _wrap_ptyprocess_err(): # exception may occur if "Is some other process attempting # "job control with our child pid?" exitstatus = ptyproc.wait() self.status = ptyproc.status self.exitstatus = ptyproc.exitstatus self.signalstatus = ptyproc.signalstatus self.terminated = True return exitstatus def isalive(self): '''This tests if the child process is running or not. This is non-blocking. If the child was terminated then this will read the exitstatus or signalstatus of the child. This returns True if the child process appears to be running or False if not. It can take literally SECONDS for Solaris to return the right status. ''' ptyproc = self.ptyproc with _wrap_ptyprocess_err(): alive = ptyproc.isalive() if not alive: self.status = ptyproc.status self.exitstatus = ptyproc.exitstatus self.signalstatus = ptyproc.signalstatus self.terminated = True return alive def kill(self, sig): '''This sends the given signal to the child application. In keeping with UNIX tradition it has a misleading name. It does not necessarily kill the child unless you send the right signal. ''' # Same as os.kill, but the pid is given for you. if self.isalive(): os.kill(self.pid, sig) def getwinsize(self): '''This returns the terminal window size of the child tty. The return value is a tuple of (rows, cols). ''' return self.ptyproc.getwinsize() def setwinsize(self, rows, cols): '''This sets the terminal window size of the child tty. This will cause a SIGWINCH signal to be sent to the child. This does not change the physical window size. It changes the size reported to TTY-aware applications like vi or curses -- applications that respond to the SIGWINCH signal. ''' return self.ptyproc.setwinsize(rows, cols) def interact(self, escape_character=chr(29), input_filter=None, output_filter=None): '''This gives control of the child process to the interactive user (the human at the keyboard). Keystrokes are sent to the child process, and the stdout and stderr output of the child process is printed. This simply echos the child stdout and child stderr to the real stdout and it echos the real stdin to the child stdin. When the user types the escape_character this method will return None. The escape_character will not be transmitted. The default for escape_character is entered as ``Ctrl - ]``, the very same as BSD telnet. To prevent escaping, escape_character may be set to None. If a logfile is specified, then the data sent and received from the child process in interact mode is duplicated to the given log. You may pass in optional input and output filter functions. These functions should take bytes array and return bytes array too. Even with ``encoding='utf-8'`` support, meth:`interact` will always pass input_filter and output_filter bytes. You may need to wrap your function to decode and encode back to UTF-8. The output_filter will be passed all the output from the child process. The input_filter will be passed all the keyboard input from the user. The input_filter is run BEFORE the check for the escape_character. Note that if you change the window size of the parent the SIGWINCH signal will not be passed through to the child. If you want the child window size to change when the parent's window size changes then do something like the following example:: import pexpect, struct, fcntl, termios, signal, sys def sigwinch_passthrough (sig, data): s = struct.pack("HHHH", 0, 0, 0, 0) a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ , s)) if not p.closed: p.setwinsize(a[0],a[1]) # Note this 'p' is global and used in sigwinch_passthrough. p = pexpect.spawn('/bin/bash') signal.signal(signal.SIGWINCH, sigwinch_passthrough) p.interact() ''' # Flush the buffer. self.write_to_stdout(self.buffer) self.stdout.flush() self._buffer = self.buffer_type() mode = tty.tcgetattr(self.STDIN_FILENO) tty.setraw(self.STDIN_FILENO) if escape_character is not None and PY3: escape_character = escape_character.encode('latin-1') try: self.__interact_copy(escape_character, input_filter, output_filter) finally: tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode) def __interact_writen(self, fd, data): '''This is used by the interact() method. ''' while data != b'' and self.isalive(): n = os.write(fd, data) data = data[n:] def __interact_read(self, fd): '''This is used by the interact() method. ''' return os.read(fd, 1000) def __interact_copy( self, escape_character=None, input_filter=None, output_filter=None ): '''This is used by the interact() method. ''' while self.isalive(): if self.use_poll: r = poll_ignore_interrupts([self.child_fd, self.STDIN_FILENO]) else: r, w, e = select_ignore_interrupts( [self.child_fd, self.STDIN_FILENO], [], [] ) if self.child_fd in r: try: data = self.__interact_read(self.child_fd) except OSError as err: if err.args[0] == errno.EIO: # Linux-style EOF break raise if data == b'': # BSD-style EOF break if output_filter: data = output_filter(data) self._log(data, 'read') os.write(self.STDOUT_FILENO, data) if self.STDIN_FILENO in r: data = self.__interact_read(self.STDIN_FILENO) if input_filter: data = input_filter(data) i = -1 if escape_character is not None: i = data.rfind(escape_character) if i != -1: data = data[:i] if data: self._log(data, 'send') self.__interact_writen(self.child_fd, data) break self._log(data, 'send') self.__interact_writen(self.child_fd, data) def spawnu(*args, **kwargs): """Deprecated: pass encoding to spawn() instead.""" kwargs.setdefault('encoding', 'utf-8') return spawn(*args, **kwargs)
spawn
python
getsentry__sentry
tests/sentry/workflow_engine/migrations/test_0104_action_data_fallthrough_type.py
{ "start": 234, "end": 1473 }
class ____(TestMigrations): migrate_from = "0103_add_unique_constraint" migrate_to = "0104_action_data_fallthrough_type" app = "workflow_engine" def setup_initial_state(self) -> None: self.org = self.create_organization(name="test-org") self.project = self.create_project(organization=self.org) self.action = Action.objects.create( type="email", data={"fallthroughType": FallthroughChoiceType.ACTIVE_MEMBERS}, config={ "target_type": ActionTarget.ISSUE_OWNERS, "target_identifier": None, }, ) self.action_no_fallthrough = Action.objects.create( type="email", data={}, config={ "target_type": ActionTarget.USER, "target_identifier": str(self.user.id), }, ) def test_migration(self) -> None: fallthrough_action = Action.objects.filter( data={"fallthrough_type": FallthroughChoiceType.ACTIVE_MEMBERS} ) assert len(fallthrough_action) == 1 no_fallthrough_action = Action.objects.filter(data={}) assert len(no_fallthrough_action) == 1
TestActionDataFallthroughType
python
huggingface__transformers
src/transformers/models/chameleon/modeling_chameleon.py
{ "start": 28247, "end": 32206 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.num_resolutions = len(config.channel_multiplier) self.num_res_blocks = config.num_res_blocks base_channels = config.base_channels resolution = config.resolution in_channels = config.in_channels double_latent = config.double_latent latent_channels = config.latent_channels channel_multiplier = config.channel_multiplier self.conv_in = torch.nn.Conv2d(in_channels, base_channels, kernel_size=3, stride=1, padding=1) curr_res = resolution in_channel_multiplier = (1,) + tuple(channel_multiplier) self.in_channel_multiplier = in_channel_multiplier self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = base_channels * in_channel_multiplier[i_level] block_out = base_channels * channel_multiplier[i_level] for i_block in range(self.num_res_blocks): block.append( ChameleonVQVAEEncoderResnetBlock( config=config, in_channels=block_in, out_channels=block_out, ) ) block_in = block_out if ( config.attn_resolutions is not None and curr_res in config.attn_resolutions and config.attn_type == "vanilla" ): attn.append(ChameleonVQVAEEncoderAttnBlock(block_in)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions - 1: down.downsample = ChameleonVQVAEEncoderConvDownsample(block_in) curr_res = curr_res // 2 self.down.append(down) self.mid = nn.Module() self.mid.block_1 = ChameleonVQVAEEncoderResnetBlock( config=config, in_channels=block_in, out_channels=block_in, ) self.mid.attn_1 = ChameleonVQVAEEncoderAttnBlock(block_in) if config.attn_type == "vanilla" else nn.Identity() self.mid.block_2 = ChameleonVQVAEEncoderResnetBlock( config=config, in_channels=block_in, out_channels=block_in, ) self.norm_out = torch.nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True) self.conv_out = torch.nn.Conv2d( block_in, 2 * latent_channels if double_latent else latent_channels, kernel_size=3, stride=1, padding=1, ) def forward(self, pixel_values: torch.LongTensor): # downsampling hidden_states = [self.conv_in(pixel_values)] for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): hidden_state = self.down[i_level].block[i_block]( hidden_states[-1], ) if len(self.down[i_level].attn) > 0: hidden_state = self.down[i_level].attn[i_block](hidden_state) hidden_states.append(hidden_state) if i_level != self.num_resolutions - 1: hidden_states.append(self.down[i_level].downsample(hidden_states[-1])) # middle last_hidden_state = hidden_states[-1] last_hidden_state = self.mid.block_1(last_hidden_state) last_hidden_state = self.mid.attn_1(last_hidden_state) last_hidden_state = self.mid.block_2(last_hidden_state) # end last_hidden_state = self.norm_out(last_hidden_state) last_hidden_state *= torch.sigmoid(last_hidden_state) last_hidden_state = self.conv_out(last_hidden_state) return last_hidden_state
ChameleonVQVAEEncoder
python
tensorflow__tensorflow
tensorflow/python/ops/clustering_ops.py
{ "start": 2105, "end": 25455 }
class ____: """Creates the graph for k-means clustering.""" def __init__(self, inputs, num_clusters, initial_clusters=RANDOM_INIT, distance_metric=SQUARED_EUCLIDEAN_DISTANCE, use_mini_batch=False, mini_batch_steps_per_iteration=1, random_seed=0, kmeans_plus_plus_num_retries=2, kmc2_chain_length=200): """Creates an object for generating KMeans clustering graph. This class implements the following variants of K-means algorithm: If use_mini_batch is False, it runs standard full batch K-means. Each step runs a single iteration of K-Means. This step can be run sharded across multiple workers by passing a list of sharded inputs to this class. Note however that a single step needs to process the full input at once. If use_mini_batch is True, it runs a generalization of the mini-batch K-means algorithm. It runs multiple iterations, where each iteration is composed of mini_batch_steps_per_iteration steps. Two copies of cluster centers are maintained: one that is updated at the end of each iteration, and one that is updated every step. The first copy is used to compute cluster allocations for each step, and for inference, while the second copy is the one updated each step using the mini-batch update rule. After each iteration is complete, this second copy is copied back the first copy. Note that for use_mini_batch=True, when mini_batch_steps_per_iteration=1, the algorithm reduces to the standard mini-batch algorithm. Also by setting mini_batch_steps_per_iteration = num_inputs / batch_size, the algorithm becomes an asynchronous version of the full-batch algorithm. Note however that there is no guarantee by this implementation that each input is seen exactly once per iteration. Also, different updates are applied asynchronously without locking. So this asynchronous version may not behave exactly like a full-batch version. Args: inputs: An input tensor or list of input tensors. It is assumed that the data points have been previously randomly permuted. num_clusters: An integer tensor specifying the number of clusters. This argument is ignored if initial_clusters is a tensor or numpy array. initial_clusters: Specifies the clusters used during initialization. One of the following: - a tensor or numpy array with the initial cluster centers. - a function f(inputs, k) that returns up to k centers from `inputs`. - "random": Choose centers randomly from `inputs`. - "kmeans_plus_plus": Use kmeans++ to choose centers from `inputs`. - "kmc2": Use the fast k-MC2 algorithm to choose centers from `inputs`. In the last three cases, one batch of `inputs` may not yield `num_clusters` centers, in which case initialization will require multiple batches until enough centers are chosen. In the case of "random" or "kmeans_plus_plus", if the input size is <= `num_clusters` then the entire batch is chosen to be cluster centers. distance_metric: Distance metric used for clustering. Supported options: "squared_euclidean", "cosine". use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume full batch. mini_batch_steps_per_iteration: Number of steps after which the updated cluster centers are synced back to a master copy. random_seed: Seed for PRNG used to initialize seeds. kmeans_plus_plus_num_retries: For each point that is sampled during kmeans++ initialization, this parameter specifies the number of additional points to draw from the current distribution before selecting the best. If a negative value is specified, a heuristic is used to sample O(log(num_to_sample)) additional points. kmc2_chain_length: Determines how many candidate points are used by the k-MC2 algorithm to produce one new cluster centers. If a (mini-)batch contains less points, one new cluster center is generated from the (mini-)batch. Raises: ValueError: An invalid argument was passed to initial_clusters or distance_metric. """ initialization_algorithms = [RANDOM_INIT, KMEANS_PLUS_PLUS_INIT, KMC2_INIT] if isinstance(initial_clusters, str) and initial_clusters not in initialization_algorithms: raise ValueError( f'Unsupported initialization algorithm `{initial_clusters}`,' f'must be one of `{initialization_algorithms}`.') distance_metrics = [SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE] if distance_metric not in distance_metrics: raise ValueError(f'Unsupported distance metric `{distance_metric}`,' f'must be one of `{distance_metrics}`.') self._inputs = inputs if isinstance(inputs, list) else [inputs] self._num_clusters = num_clusters self._initial_clusters = initial_clusters self._distance_metric = distance_metric self._use_mini_batch = use_mini_batch self._mini_batch_steps_per_iteration = int(mini_batch_steps_per_iteration) self._seed = random_seed_ops.get_seed(random_seed)[0] self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries self._kmc2_chain_length = kmc2_chain_length @classmethod def _distance_graph(cls, inputs, clusters, distance_metric): """Computes distance between each input and each cluster center. Args: inputs: list of input Tensors. clusters: cluster Tensor. distance_metric: distance metric used for clustering Returns: list of Tensors, where each element corresponds to each element in inputs. The value is the distance of each row to all the cluster centers. Currently only Euclidean distance and cosine distance are supported. """ assert isinstance(inputs, list) if distance_metric == SQUARED_EUCLIDEAN_DISTANCE: return cls._compute_euclidean_distance(inputs, clusters) elif distance_metric == COSINE_DISTANCE: return cls._compute_cosine_distance( inputs, clusters, inputs_normalized=True) else: assert False, str(distance_metric) @classmethod def _compute_euclidean_distance(cls, inputs, clusters): """Computes Euclidean distance between each input and each cluster center. Args: inputs: list of input Tensors. clusters: cluster Tensor. Returns: list of Tensors, where each element corresponds to each element in inputs. The value is the distance of each row to all the cluster centers. """ output = [] for inp in inputs: with ops.colocate_with(inp, ignore_existing=True): # Computes Euclidean distance. Note the first and third terms are # broadcast additions. squared_distance = ( math_ops.reduce_sum(math_ops.square(inp), 1, keepdims=True) - 2 * math_ops.matmul(inp, clusters, transpose_b=True) + array_ops.transpose( math_ops.reduce_sum( math_ops.square(clusters), 1, keepdims=True))) output.append(squared_distance) return output @classmethod def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True): """Computes cosine distance between each input and each cluster center. Args: inputs: list of input Tensor. clusters: cluster Tensor inputs_normalized: if True, it assumes that inp and clusters are normalized and computes the dot product which is equivalent to the cosine distance. Else it L2 normalizes the inputs first. Returns: list of Tensors, where each element corresponds to each element in inp. The value is the distance of each row to all the cluster centers. """ output = [] if not inputs_normalized: with ops.colocate_with(clusters, ignore_existing=True): clusters = nn_impl.l2_normalize(clusters, axis=1) for inp in inputs: with ops.colocate_with(inp, ignore_existing=True): if not inputs_normalized: inp = nn_impl.l2_normalize(inp, axis=1) output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True)) return output def _infer_graph(self, inputs, clusters): """Maps input to closest cluster and the score. Args: inputs: list of input Tensors. clusters: Tensor of cluster centers. Returns: List of tuple, where each value in tuple corresponds to a value in inp. The tuple has following three elements: all_scores: distance of each input to each cluster center. score: distance of each input to closest cluster center. cluster_idx: index of cluster center closest to the corresponding input. """ assert isinstance(inputs, list) # Pairwise distances are used only by transform(). In all other cases, this # sub-graph is not evaluated. scores = self._distance_graph(inputs, clusters, self._distance_metric) output = [] if (self._distance_metric == COSINE_DISTANCE and not self._clusters_l2_normalized()): # The cosine distance between normalized vectors x and y is the same as # 2 * squared_euclidean_distance. We are using this fact and reusing the # nearest_neighbors op. # TODO(ands): Support COSINE distance in nearest_neighbors and remove # this. with ops.colocate_with(clusters, ignore_existing=True): clusters = nn_impl.l2_normalize(clusters, axis=1) for inp, score in zip(inputs, scores): with ops.colocate_with(inp, ignore_existing=True): (indices, distances) = gen_clustering_ops.nearest_neighbors(inp, clusters, 1) if self._distance_metric == COSINE_DISTANCE: distances *= 0.5 output.append( (score, array_ops.squeeze(distances, [-1]), array_ops.squeeze(indices, [-1]))) return zip(*output) def _clusters_l2_normalized(self): """Returns True if clusters centers are kept normalized.""" return (self._distance_metric == COSINE_DISTANCE and (not self._use_mini_batch or self._mini_batch_steps_per_iteration > 1)) def _create_variables(self, num_clusters): """Creates variables. Args: num_clusters: an integer Tensor providing the number of clusters. Returns: Tuple with following elements: - cluster_centers: a Tensor for storing cluster centers - cluster_centers_initialized: bool Variable indicating whether clusters are initialized. - cluster_counts: a Tensor for storing counts of points assigned to this cluster. This is used by mini-batch training. - cluster_centers_updated: Tensor representing copy of cluster centers that are updated every step. - update_in_steps: numbers of steps left before we sync cluster_centers_updated back to cluster_centers. """ init_value = array_ops.placeholder_with_default([], shape=None) cluster_centers = variable_v1.VariableV1( init_value, name=CLUSTERS_VAR_NAME, validate_shape=False) cluster_centers_initialized = variable_v1.VariableV1( False, dtype=dtypes.bool, name='initialized') if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1: # Copy of cluster centers actively updated each step according to # mini-batch update rule. cluster_centers_updated = variable_v1.VariableV1( init_value, name='clusters_updated', validate_shape=False) # How many steps till we copy the updated clusters to cluster_centers. update_in_steps = variable_v1.VariableV1( self._mini_batch_steps_per_iteration, dtype=dtypes.int64, name='update_in_steps') # Count of points assigned to cluster_centers_updated. cluster_counts = variable_v1.VariableV1( array_ops.zeros([num_clusters], dtype=dtypes.int64)) else: cluster_centers_updated = cluster_centers update_in_steps = None cluster_counts = ( variable_v1.VariableV1( array_ops.ones([num_clusters], dtype=dtypes.int64)) if self._use_mini_batch else None) return (cluster_centers, cluster_centers_initialized, cluster_counts, cluster_centers_updated, update_in_steps) @classmethod def _l2_normalize_data(cls, inputs): """Normalized the input data.""" output = [] for inp in inputs: with ops.colocate_with(inp, ignore_existing=True): output.append(nn_impl.l2_normalize(inp, dim=1)) return output def training_graph(self): """Generate a training graph for kmeans algorithm. This returns, among other things, an op that chooses initial centers (init_op), a boolean variable that is set to True when the initial centers are chosen (cluster_centers_initialized), and an op to perform either an entire Lloyd iteration or a mini-batch of a Lloyd iteration (training_op). The caller should use these components as follows. A single worker should execute init_op multiple times until cluster_centers_initialized becomes True. Then multiple workers may execute training_op any number of times. Returns: A tuple consisting of: all_scores: A matrix (or list of matrices) of dimensions (num_input, num_clusters) where the value is the distance of an input vector and a cluster center. cluster_idx: A vector (or list of vectors). Each element in the vector corresponds to an input row in 'inp' and specifies the cluster id corresponding to the input. scores: Similar to cluster_idx but specifies the distance to the assigned cluster instead. cluster_centers_initialized: scalar indicating whether clusters have been initialized. init_op: an op to initialize the clusters. training_op: an op that runs an iteration of training. """ # Implementation of kmeans. if (isinstance(self._initial_clusters, str) or callable(self._initial_clusters)): initial_clusters = self._initial_clusters num_clusters = ops.convert_to_tensor(self._num_clusters) else: initial_clusters = ops.convert_to_tensor(self._initial_clusters) num_clusters = array_ops.shape(initial_clusters)[0] inputs = self._inputs (cluster_centers_var, cluster_centers_initialized, total_counts, cluster_centers_updated, update_in_steps) = self._create_variables(num_clusters) init_op = _InitializeClustersOpFactory( self._inputs, num_clusters, initial_clusters, self._distance_metric, self._seed, self._kmeans_plus_plus_num_retries, self._kmc2_chain_length, cluster_centers_var, cluster_centers_updated, cluster_centers_initialized).op() cluster_centers = cluster_centers_var if self._distance_metric == COSINE_DISTANCE: inputs = self._l2_normalize_data(inputs) if not self._clusters_l2_normalized(): cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1) all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers) if self._use_mini_batch: sync_updates_op = self._mini_batch_sync_updates_op( update_in_steps, cluster_centers_var, cluster_centers_updated, total_counts) assert sync_updates_op is not None with ops.control_dependencies([sync_updates_op]): training_op = self._mini_batch_training_op(inputs, cluster_idx, cluster_centers_updated, total_counts) else: assert cluster_centers == cluster_centers_var training_op = self._full_batch_training_op(inputs, num_clusters, cluster_idx, cluster_centers_var) return (all_scores, cluster_idx, scores, cluster_centers_initialized, init_op, training_op) def _mini_batch_sync_updates_op(self, update_in_steps, cluster_centers_var, cluster_centers_updated, total_counts): if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1: assert update_in_steps is not None with ops.colocate_with(update_in_steps, ignore_existing=True): def _f(): # Note that there is a race condition here, so we do a best effort # updates here. We reset update_in_steps first so that other workers # don't duplicate the updates. Also we update cluster_center_vars # before resetting total_counts to avoid large updates to # cluster_centers_updated based on partially updated # cluster_center_vars. with ops.control_dependencies([ state_ops.assign(update_in_steps, self._mini_batch_steps_per_iteration - 1) ]): with ops.colocate_with( cluster_centers_updated, ignore_existing=True): if self._distance_metric == COSINE_DISTANCE: cluster_centers = nn_impl.l2_normalize( cluster_centers_updated, dim=1) else: cluster_centers = cluster_centers_updated with ops.colocate_with(cluster_centers_var, ignore_existing=True): with ops.control_dependencies( [state_ops.assign(cluster_centers_var, cluster_centers)]): with ops.colocate_with(None, ignore_existing=True): with ops.control_dependencies([ state_ops.assign(total_counts, array_ops.zeros_like(total_counts)) ]): return array_ops.identity(update_in_steps) return cond.cond( update_in_steps <= 0, _f, lambda: state_ops.assign_sub(update_in_steps, 1)) else: return control_flow_ops.no_op() def _mini_batch_training_op(self, inputs, cluster_idx_list, cluster_centers, total_counts): """Creates an op for training for mini batch case. Args: inputs: list of input Tensors. cluster_idx_list: A vector (or list of vectors). Each element in the vector corresponds to an input row in 'inp' and specifies the cluster id corresponding to the input. cluster_centers: Tensor Ref of cluster centers. total_counts: Tensor Ref of cluster counts. Returns: An op for doing an update of mini-batch k-means. """ update_ops = [] for inp, cluster_idx in zip(inputs, cluster_idx_list): with ops.colocate_with(inp, ignore_existing=True): assert total_counts is not None cluster_idx = array_ops.reshape(cluster_idx, [-1]) # Dedupe the unique ids of cluster_centers being updated so that updates # can be locally aggregated. unique_ids, unique_idx = array_ops.unique(cluster_idx) num_unique_cluster_idx = array_ops.size(unique_ids) # Fetch the old values of counts and cluster_centers. with ops.colocate_with(total_counts, ignore_existing=True): old_counts = array_ops.gather(total_counts, unique_ids) # TODO(agarwal): This colocation seems to run into problems. Fix it. with ops.colocate_with(cluster_centers, ignore_existing=True): old_cluster_centers = array_ops.gather(cluster_centers, unique_ids) # Locally aggregate the increment to counts. count_updates = math_ops.unsorted_segment_sum( array_ops.ones_like(unique_idx, dtype=total_counts.dtype), unique_idx, num_unique_cluster_idx) # Locally compute the sum of inputs mapped to each id. # For a cluster with old cluster value x, old count n, and with data # d_1,...d_k newly assigned to it, we recompute the new value as # \\(x += (sum_i(d_i) - k * x) / (n + k)\\). # Compute \\(sum_i(d_i)\\), see comment above. cluster_center_updates = math_ops.unsorted_segment_sum( inp, unique_idx, num_unique_cluster_idx) # Shape to enable broadcasting count_updates and learning_rate to inp. # It extends the shape with 1's to match the rank of inp. broadcast_shape = array_ops.concat([ array_ops.reshape(num_unique_cluster_idx, [1]), array_ops.ones( array_ops.reshape(array_ops.rank(inp) - 1, [1]), dtype=dtypes.int32) ], 0) # Subtract k * x, see comment above. cluster_center_updates -= math_ops.cast( array_ops.reshape(count_updates, broadcast_shape), inp.dtype) * old_cluster_centers learning_rate = math_ops.reciprocal( math_ops.cast(old_counts + count_updates, inp.dtype)) learning_rate = array_ops.reshape(learning_rate, broadcast_shape) # scale by 1 / (n + k), see comment above. cluster_center_updates *= learning_rate # Apply the updates. update_counts = state_ops.scatter_add(total_counts, unique_ids, count_updates) update_cluster_centers = state_ops.scatter_add(cluster_centers, unique_ids, cluster_center_updates) update_ops.extend([update_counts, update_cluster_centers]) return control_flow_ops.group(*update_ops) def _full_batch_training_op(self, inputs, num_clusters, cluster_idx_list, cluster_centers): """Creates an op for training for full batch case. Args: inputs: list of input Tensors. num_clusters: an integer Tensor providing the number of clusters. cluster_idx_list: A vector (or list of vectors). Each element in the vector corresponds to an input row in 'inp' and specifies the cluster id corresponding to the input. cluster_centers: Tensor Ref of cluster centers. Returns: An op for doing an update of mini-batch k-means. """ cluster_sums = [] cluster_counts = [] epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype) for inp, cluster_idx in zip(inputs, cluster_idx_list): with ops.colocate_with(inp, ignore_existing=True): cluster_sums.append( math_ops.unsorted_segment_sum(inp, cluster_idx, num_clusters)) cluster_counts.append( math_ops.unsorted_segment_sum( array_ops.reshape( array_ops.ones( array_ops.reshape(array_ops.shape(inp)[0], [-1])), [-1, 1]), cluster_idx, num_clusters)) with ops.colocate_with(cluster_centers, ignore_existing=True): new_clusters_centers = math_ops.add_n(cluster_sums) / ( math_ops.cast(math_ops.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon) if self._clusters_l2_normalized(): new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1) return state_ops.assign(cluster_centers, new_clusters_centers)
KMeans
python
realpython__materials
python-double-underscore/shapes.py
{ "start": 178, "end": 478 }
class ____: def __init__(self, side): self.side = _validate(side) def calculate_area(self): return round(self.side**2, 2) def _validate(value): if not isinstance(value, int | float) or value <= 0: raise ValueError("positive number expected") return value
Square
python
tensorflow__tensorflow
tensorflow/python/distribute/combinations.py
{ "start": 4860, "end": 5482 }
class ____(combinations_lib.TestCombination): """Sets up distribution strategy for tests.""" def should_execute_combination(self, kwargs): distributions = [ v for v in kwargs.values() if isinstance(v, NamedDistribution) ] if test_util.is_xla_enabled() and any(d.no_xla for d in distributions): return ( False, "n/a: skipping strategy combination with no_xla=True in XLA tests") return (True, None) def parameter_modifiers(self): return [ DistributionParameter(), combinations_lib.OptionalParameter("use_var_policy"), ]
DistributionCombination
python
ray-project__ray
release/long_running_tests/workloads/serve_failure.py
{ "start": 1600, "end": 2839 }
class ____: def __init__(self, kill_period_s=1): self.kill_period_s = kill_period_s self.sanctuary = set() async def run(self): while True: chosen = random.choice(self._get_serve_actors()) print(f"Killing {chosen}") ray.kill(chosen, no_restart=False) await asyncio.sleep(self.kill_period_s) async def spare(self, app_name: str): print(f'Sparing application "{app_name}" replicas.') self.sanctuary.add(app_name) async def stop_spare(self, app_name: str): print(f'No longer sparing application "{app_name}" replicas.') self.sanctuary.discard(app_name) def _get_serve_actors(self): controller = _get_global_client()._controller routers = list(ray.get(controller.get_proxies.remote()).values()) all_handles = routers + [controller] replica_dict = ray.get(controller._all_running_replicas.remote()) for deployment_id, replica_info_list in replica_dict.items(): if deployment_id.app not in self.sanctuary: for replica_info in replica_info_list: all_handles.append(replica_info.actor_handle) return all_handles
RandomKiller
python
neetcode-gh__leetcode
python/0045-jump-game-ii.py
{ "start": 0, "end": 331 }
class ____: def jump(self, nums: List[int]) -> int: l, r = 0, 0 res = 0 while r < (len(nums) - 1): maxJump = 0 for i in range(l, r + 1): maxJump = max(maxJump, i + nums[i]) l = r + 1 r = maxJump res += 1 return res
Solution
python
pytorch__pytorch
torch/_export/serde/schema.py
{ "start": 1730, "end": 1852 }
class ____: expr_str: Annotated[str, 10] hint: Annotated[Optional[SymExprHint], 20] = None @_union_dataclass
SymExpr
python
kamyu104__LeetCode-Solutions
Python/projection-area-of-3d-shapes.py
{ "start": 31, "end": 530 }
class ____(object): def projectionArea(self, grid): """ :type grid: List[List[int]] :rtype: int """ result = 0 for i in xrange(len(grid)): max_row, max_col = 0, 0 for j in xrange(len(grid)): if grid[i][j]: result += 1 max_row = max(max_row, grid[i][j]) max_col = max(max_col, grid[j][i]) result += max_row + max_col return result
Solution
python
pytorch__pytorch
torch/_refs/fft.py
{ "start": 13042, "end": 17980 }
class ____(NamedTuple): shape: tuple[int, ...] dim: tuple[int, ...] last_dim_size: int def _canonicalize_fft_c2r_shape_and_dim_args( fname: str, input: TensorLikeType, s: Optional[ShapeType], dim: Optional[DimsType], ) -> _CanonicalizeC2rReturn: """Canonicalize shape and dim arguments for n-dimensional c2r transforms, as well as calculating the last_dim_size which is shape[dim[-1]] for the output""" (shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim) torch._check(len(shape) > 0, lambda: f"{fname} must transform at least one axis") if s is None or s[-1] == -1: last_dim_size = 2 * (input.shape[dim[-1]] - 1) else: last_dim_size = shape[-1] torch._check( last_dim_size >= 1, lambda: f"Invalid number of data points ({last_dim_size}) specified", ) shape_list = list(shape) shape_list[-1] = last_dim_size // 2 + 1 return _CanonicalizeC2rReturn( shape=tuple(shape_list), dim=dim, last_dim_size=last_dim_size ) @register_decomposition(aten.fft_irfftn) @out_wrapper() def irfftn( input: TensorLikeType, s: Optional[ShapeType] = None, dim: Optional[DimsType] = None, norm: NormType = None, ) -> TensorLikeType: shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args( "irfftn", input, s, dim ) input = _maybe_promote_tensor_fft(input, require_complex=True) input = _resize_fft_input(input, dim, shape) out = prims.fft_c2r(input, dim=dim, last_dim_size=last_dim_size) return _apply_norm(out, norm, _prod(out.shape[d] for d in dim), forward=False) @register_decomposition(aten.fft_hfftn) @out_wrapper() def hfftn( input: TensorLikeType, s: Optional[ShapeType] = None, dim: Optional[DimsType] = None, norm: NormType = None, ) -> TensorLikeType: shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args( "hfftn", input, s, dim ) input = _maybe_promote_tensor_fft(input, require_complex=True) input = _resize_fft_input(input, dim, shape) tmp = prims.fft_c2c(input, dim=dim[:-1], forward=True) if len(dim) > 1 else input tmp = _apply_norm(tmp, norm, _prod(shape[:-1]), forward=True) tmp = prims.conj_physical(tmp) out = prims.fft_c2r(tmp, dim=dim[-1:], last_dim_size=last_dim_size) return _apply_norm(out, norm, last_dim_size, forward=True) @register_decomposition(aten.fft_fft2) @out_wrapper() def fft2( input: TensorLikeType, s: Optional[ShapeType] = None, dim: Optional[DimsType] = (-2, -1), norm: NormType = None, ) -> TensorLikeType: return torch.fft.fftn(input, s=s, dim=dim, norm=norm) @register_decomposition(aten.fft_ifft2) @out_wrapper() def ifft2( input: TensorLikeType, s: Optional[ShapeType] = None, dim: Optional[DimsType] = (-2, -1), norm: NormType = None, ) -> TensorLikeType: return torch.fft.ifftn(input, s=s, dim=dim, norm=norm) @register_decomposition(aten.fft_rfft2) @out_wrapper() def rfft2( input: TensorLikeType, s: Optional[ShapeType] = None, dim: Optional[DimsType] = (-2, -1), norm: NormType = None, ) -> TensorLikeType: return torch.fft.rfftn(input, s=s, dim=dim, norm=norm) @register_decomposition(aten.fft_irfft2) @out_wrapper() def irfft2( input: TensorLikeType, s: Optional[ShapeType] = None, dim: Optional[DimsType] = (-2, -1), norm: NormType = None, ) -> TensorLikeType: return torch.fft.irfftn(input, s=s, dim=dim, norm=norm) @register_decomposition(aten.fft_hfft2) @out_wrapper() def hfft2( input: TensorLikeType, s: Optional[ShapeType] = None, dim: Optional[DimsType] = (-2, -1), norm: NormType = None, ) -> TensorLikeType: return torch.fft.hfftn(input, s=s, dim=dim, norm=norm) @register_decomposition(aten.fft_ihfft2) @out_wrapper() def ihfft2( input: TensorLikeType, s: Optional[ShapeType] = None, dim: Optional[DimsType] = (-2, -1), norm: NormType = None, ) -> TensorLikeType: return torch.fft.ihfftn(input, s=s, dim=dim, norm=norm) def _default_alldims(dim: Optional[DimsType], x: TensorLikeType) -> list[int]: """Convert Optional[DimsType] to a simple list, defaulting to all dimensions""" if dim is None: return list(range(x.ndim)) elif not isinstance(dim, Sequence): return [dim] else: return list(dim) @register_decomposition(aten.fft_fftshift) def fftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType: dims = _default_alldims(dim, input) shift = [input.shape[d] // 2 for d in dims] return torch.roll(input, shift, dims) @register_decomposition(aten.fft_ifftshift) def ifftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType: dims = _default_alldims(dim, input) shift = [(input.shape[d] + 1) // 2 for d in dims] return torch.roll(input, shift, dims)
_CanonicalizeC2rReturn
python
streamlit__streamlit
lib/tests/streamlit/elements/markdown_test.py
{ "start": 5491, "end": 8217 }
class ____(DeltaGeneratorTestCase): """Test st.caption APIs.""" def test_st_caption_with_help(self): """Test st.caption with help.""" st.caption("some caption", help="help text") el = self.get_delta_from_queue().new_element assert el.markdown.help == "help text" def test_st_caption_with_width(self): """Test st.caption with different width types.""" test_cases = [ (400, WidthConfigFields.PIXEL_WIDTH.value, "pixel_width", 400), ("stretch", WidthConfigFields.USE_STRETCH.value, "use_stretch", True), ("content", WidthConfigFields.USE_CONTENT.value, "use_content", True), ] for width_value, expected_width_spec, field_name, field_value in test_cases: with self.subTest(width_value=width_value): st.caption("some caption", width=width_value) el = self.get_delta_from_queue().new_element assert el.markdown.body == "some caption" assert el.width_config.WhichOneof("width_spec") == expected_width_spec assert getattr(el.width_config, field_name) == field_value def test_st_caption_with_invalid_width(self): """Test st.caption with invalid width values.""" test_cases = [ ( "invalid", "Invalid width value: 'invalid'. Width must be either an integer (pixels), 'stretch', or 'content'.", ), ( -50, "Invalid width value: -50. Width must be either an integer (pixels), 'stretch', or 'content'.", ), ( 0, "Invalid width value: 0. Width must be either an integer (pixels), 'stretch', or 'content'.", ), ( 75.5, "Invalid width value: 75.5. Width must be either an integer (pixels), 'stretch', or 'content'.", ), ] for width_value, expected_error_message in test_cases: with self.subTest(width_value=width_value): with pytest.raises(StreamlitAPIException) as exc: st.caption("some caption", width=width_value) assert str(exc.value) == expected_error_message def test_st_caption_default_width(self): """Test that st.caption defaults to stretch width.""" st.caption("some caption") el = self.get_delta_from_queue().new_element assert el.markdown.body == "some caption" assert ( el.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert el.width_config.use_stretch is True
StCaptionAPITest
python
scipy__scipy
scipy/fftpack/tests/test_real_transforms.py
{ "start": 17133, "end": 17275 }
class ____(_TestIDSTBase): def setup_method(self): self.rdt = np.float64 self.dec = 12 self.type = 1
TestIDSTIDouble
python
django-haystack__django-haystack
test_haystack/whoosh_tests/testcases.py
{ "start": 93, "end": 1258 }
class ____(TestCase): fixtures = ["base_data"] @classmethod def setUpClass(cls): for name, conn_settings in settings.HAYSTACK_CONNECTIONS.items(): if ( conn_settings["ENGINE"] != "haystack.backends.whoosh_backend.WhooshEngine" ): continue if "STORAGE" in conn_settings and conn_settings["STORAGE"] != "file": continue # Start clean if os.path.exists(conn_settings["PATH"]): shutil.rmtree(conn_settings["PATH"]) from haystack import connections connections[name].get_backend().setup() super().setUpClass() @classmethod def tearDownClass(cls): for conn in settings.HAYSTACK_CONNECTIONS.values(): if conn["ENGINE"] != "haystack.backends.whoosh_backend.WhooshEngine": continue if "STORAGE" in conn and conn["STORAGE"] != "file": continue # Start clean if os.path.exists(conn["PATH"]): shutil.rmtree(conn["PATH"]) super().tearDownClass()
WhooshTestCase
python
dagster-io__dagster
examples/docs_snippets/docs_snippets/concepts/resources/resources.py
{ "start": 3230, "end": 5509 }
class ____: def __init__(self, connection: str): self.connection = connection @resource(config_schema={"connection": str}) def db_resource(init_context): connection = init_context.resource_config["connection"] return DatabaseConnection(connection) # end_resource_config def get_db_connection(): return "foo" def cleanup_db_connection(_db_conn): pass # start_cm_resource from contextlib import contextmanager @resource @contextmanager def db_connection(): try: db_conn = get_db_connection() yield db_conn finally: cleanup_db_connection(db_conn) # end_cm_resource # start_cm_resource_op @op(required_resource_keys={"db_connection"}) def use_db_connection(context: OpExecutionContext): db_conn = context.resources.db_connection ... # end_cm_resource_op @job def the_job(): ... def get_the_db_connection(_): ... # start_build_resources_example from dagster import resource, build_resources @resource def the_credentials(): ... @resource(required_resource_keys={"credentials"}) def the_db_connection(init_context): get_the_db_connection(init_context.resources.credentials) def uses_db_connection(): with build_resources( {"db_connection": the_db_connection, "credentials": the_credentials} ) as resources: conn = resources.db_connection ... # end_build_resources_example def do_something_with_resource(_): pass # start_asset_use_resource from dagster import asset, AssetExecutionContext @asset(required_resource_keys={"foo"}) def asset_requires_resource(context: AssetExecutionContext): do_something_with_resource(context.resources.foo) # end_asset_use_resource @resource def foo_resource(): ... # start_asset_provide_resource from dagster import Definitions defs = Definitions( assets=[asset_requires_resource], resources={"foo": foo_resource}, ) # end_asset_provide_resource # start_asset_provide_resource_using_repository from dagster import repository, with_resources @repository def repo(): return [ *with_resources( definitions=[asset_requires_resource], resource_defs={"foo": foo_resource}, ) ] # end_asset_provide_resource_using_repository
DatabaseConnection
python
scrapy__scrapy
tests/test_loader.py
{ "start": 945, "end": 1036 }
class ____(NameItemLoader): name_in = MapCompose(lambda v: v.title())
ProcessorItemLoader
python
numba__numba
numba/tests/test_debug.py
{ "start": 7445, "end": 12092 }
class ____(TestCase): """ Tests debug options associated with parfors """ # mutates env with os.environ so must be run serially _numba_parallel_test_ = False def check_parfors_warning(self, warn_list): msg = ("'parallel=True' was specified but no transformation for " "parallel execution was possible.") warning_found = False for w in warn_list: if msg in str(w.message): warning_found = True break self.assertTrue(warning_found, "Warning message should be found.") def check_parfors_unsupported_prange_warning(self, warn_list): msg = ("prange or pndindex loop will not be executed in parallel " "due to there being more than one entry to or exit from the " "loop (e.g., an assertion).") warning_found = False for w in warn_list: if msg in str(w.message): warning_found = True break self.assertTrue(warning_found, "Warning message should be found.") @needs_blas @skip_parfors_unsupported def test_warns(self): """ Test that using parallel=True on a function that does not have parallel semantics warns. """ arr_ty = types.Array(types.float64, 2, "C") with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", NumbaPerformanceWarning) njit((arr_ty, arr_ty), parallel=True)(unsupported_parfor) self.check_parfors_warning(w) @needs_blas @skip_parfors_unsupported def test_unsupported_prange_warns(self): """ Test that prange with multiple exits issues a warning """ with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", NumbaPerformanceWarning) njit((types.int64,), parallel=True)(unsupported_prange) self.check_parfors_unsupported_prange_warning(w) @skip_parfors_unsupported def test_array_debug_opt_stats(self): """ Test that NUMBA_DEBUG_ARRAY_OPT_STATS produces valid output """ # deliberately trigger a compilation loop to increment the # Parfor class state, this is to ensure the test works based # on indices computed based on this state and not hard coded # indices. njit((types.int64,), parallel=True)(supported_parfor) with override_env_config('NUMBA_DEBUG_ARRAY_OPT_STATS', '1'): with captured_stdout() as out: njit((types.int64,), parallel=True)(supported_parfor) # grab the various parts out the output output = out.getvalue().split('\n') parallel_loop_output = \ [x for x in output if 'is produced from pattern' in x] fuse_output = \ [x for x in output if 'is fused into' in x] after_fusion_output = \ [x for x in output if 'After fusion, function' in x] # Parfor's have a shared state index, grab the current value # as it will be used as an offset for all loop messages parfor_state = int(re.compile(r'#([0-9]+)').search( parallel_loop_output[0]).group(1)) bounds = range(parfor_state, parfor_state + len(parallel_loop_output)) # Check the Parallel for-loop <index> is produced from <pattern> # works first pattern = ("('ones function', 'NumPy mapping')", ('prange', 'user', '')) fmt = 'Parallel for-loop #{} is produced from pattern \'{}\' at' for i, trials, lpattern in zip(bounds, parallel_loop_output, pattern): to_match = fmt.format(i, lpattern) self.assertIn(to_match, trials) # Check the fusion statements are correct pattern = (parfor_state + 1, parfor_state + 0) fmt = 'Parallel for-loop #{} is fused into for-loop #{}.' for trials in fuse_output: to_match = fmt.format(*pattern) self.assertIn(to_match, trials) # Check the post fusion statements are correct pattern = (supported_parfor.__name__, 1, set([parfor_state])) fmt = 'After fusion, function {} has {} parallel for-loop(s) #{}.' for trials in after_fusion_output: to_match = fmt.format(*pattern) self.assertIn(to_match, trials) if __name__ == '__main__': unittest.main()
TestParforsDebug
python
joke2k__faker
faker/providers/person/es_CO/__init__.py
{ "start": 83, "end": 35579 }
class ____(PersonProvider): formats_female = [ "{{first_name_female}} {{last_name}}", "{{first_name_female}} {{first_name_female}} {{last_name}}", "{{first_name_female}} {{last_name}} {{last_name}}", "{{first_name_female}} {{first_name_female}} {{last_name}} {{last_name}}", ] formats_male = [ "{{first_name_male}} {{last_name}}", "{{first_name_male}} {{first_name_male}} {{last_name}}", "{{first_name_male}} {{last_name}} {{last_name}}", "{{first_name_male}} {{first_name_male}} {{last_name}} {{last_name}}", ] formats = formats_female + formats_male # 300 female first names # Source: Most frequent female first names from the dataset "Ciudadanía Digital" # <https://www.datos.gov.co/Ciencia-Tecnolog-a-e-Innovaci-n/Ciudadan-a-Digital/g4cd-bvpd> # Weightings derived from total number on each name first_names_female = OrderedDict( [ ("María", 0.091410602), ("Luz", 0.034645603), ("Patricia", 0.023351346), ("Ana", 0.020151805), ("Andrea", 0.018010166), ("Diana", 0.016136401), ("Sandra", 0.015622408), ("Martha", 0.014981956), ("Paola", 0.014810625), ("Carmen", 0.013346151), ("Marcela", 0.013283602), ("Isabel", 0.013113631), ("Milena", 0.012810402), ("Lucía", 0.012599637), ("Cecilia", 0.012194425), ("Claudia", 0.011997259), ("Carolina", 0.011933349), ("Gloria", 0.011810970), ("Rosa", 0.011619242), ("Liliana", 0.011109328), ("Elena", 0.010636128), ("Alejandra", 0.010181965), ("Adriana", 0.009594544), ("Laura", 0.009359303), ("Fernanda", 0.008134150), ("Marina", 0.008090637), ("Inés", 0.007652791), ("Lorena", 0.007152395), ("Ángela", 0.007043613), ("Cristina", 0.006926673), ("Leidy", 0.006914435), ("Daniela", 0.006910356), ("Olga", 0.006802934), ("Esther", 0.006773019), ("Tatiana", 0.006690073), ("Johana", 0.006411320), ("Rocío", 0.006339252), ("Beatriz", 0.006067298), ("Lina", 0.006034663), ("Mónica", 0.006007468), ("Alba", 0.006002029), ("Luisa", 0.005946278), ("Esperanza", 0.005904125), ("Yaneth", 0.005904125), ("Blanca", 0.005881009), ("Amparo", 0.005864692), ("Alexandra", 0.005845655), ("Nancy", 0.005670245), ("Margarita", 0.005626732), ("Elizabeth", 0.005584579), ("Stella", 0.005570981), ("Karen", 0.005569622), ("Angie", 0.005380613), ("Viviana", 0.005365656), ("Natalia", 0.005086903), ("Mercedes", 0.005077385), ("Eugenia", 0.004825827), ("Valentina", 0.004602825), ("Yolanda", 0.004540275), ("Angélica", 0.004498122), ("Paula", 0.004454609), ("Camila", 0.004389340), ("Teresa", 0.004377103), ("Sofía", 0.004315913), ("Vanessa", 0.004121466), ("Nelly", 0.004039879), ("Mary", 0.004038520), ("Gladys", 0.003903902), ("Ruth", 0.003796480), ("Flor", 0.003716254), ("Nubia", 0.003708095), ("Johanna", 0.003701296), ("Erika", 0.003684979), ("Doris", 0.003572118), ("Judith", 0.003490532), ("Dayana", 0.003472855), ("Sonia", 0.003355915), ("Maritza", 0.003334158), ("Edith", 0.003267529), ("Clara", 0.003244413), ("Consuelo", 0.003215858), ("Julieth", 0.003123394), ("Juliana", 0.003107077), ("Dora", 0.003092119), ("Victoria", 0.002935745), ("Aura", 0.002856879), ("Eliana", 0.002855519), ("Astrid", 0.002844641), ("Alicia", 0.002841921), ("Katherine", 0.002833763), ("Janeth", 0.002782091), ("Leonor", 0.002742658), ("Myriam", 0.002676029), ("Lizeth", 0.002648834), ("Mery", 0.002616199), ("Nidia", 0.002609400), ("Jenny", 0.002511497), ("Kelly", 0.002495180), ("Constanza", 0.002423112), ("Ximena", 0.002387758), ("Yulieth", 0.002376880), ("Ligia", 0.002375520), ("Sara", 0.002361922), ("Socorro", 0.002338806), ("Ingrid", 0.002322489), ("Helena", 0.002319769), ("Dary", 0.002318409), ("Rosalba", 0.002280336), ("Catalina", 0.002254500), ("Fanny", 0.002216427), ("Pilar", 0.002137560), ("Yenny", 0.002126682), ("Mariela", 0.002080449), ("Yamile", 0.002062772), ("Lilia", 0.002038297), ("Karina", 0.002011101), ("Mariana", 0.001977107), ("Silvia", 0.001953991), ("Julia", 0.001939033), ("Maribel", 0.001898240), ("Amanda", 0.001891441), ("Lucy", 0.001888722), ("Elsa", 0.001884642), ("Jessica", 0.001869685), ("Rosario", 0.001865606), ("Fabiola", 0.001847929), ("Marleny", 0.001808495), ("Marta", 0.001805776), ("Elvira", 0.001788099), ("Omaira", 0.001769062), ("Yuliana", 0.001756824), ("Mireya", 0.001752745), ("Marisol", 0.001695634), ("Piedad", 0.001673878), ("Rubiela", 0.001668439), ("Deisy", 0.001665719), ("Estela", 0.001595011), ("Miriam", 0.001552858), ("Manuela", 0.001537901), ("Jennifer", 0.001536541), ("Graciela", 0.001497108), ("Yadira", 0.001494388), ("Elisa", 0.001488949), ("Yolima", 0.001472632), ("Ruby", 0.001450876), ("Gabriela", 0.001448156), ("Libia", 0.001445436), ("Yohana", 0.001430479), ("Karol", 0.001422320), ("Bibiana", 0.001397844), ("Katerine", 0.001396485), ("Edilma", 0.001389686), ("Nohora", 0.001378808), ("Shirley", 0.001365210), ("Xiomara", 0.001321697), ("Francy", 0.001316258), ("Betty", 0.001305380), ("Melissa", 0.001297221), ("Estella", 0.001272746), ("Carmenza", 0.001271386), ("Edna", 0.001261867), ("Mayra", 0.001260508), ("Vanesa", 0.001259148), ("Lida", 0.001245550), ("Raquel", 0.001244190), ("Estefanía", 0.001231952), ("Hilda", 0.001230593), ("Mabel", 0.001222434), ("Cindy", 0.001212916), ("Liseth", 0.001208836), ("Wendy", 0.001199318), ("Lady", 0.001185720), ("Yésica", 0.001180281), ("Mayerly", 0.001173482), ("Verónica", 0.001173482), ("Norma", 0.001157165), ("Gina", 0.001150366), ("Susana", 0.001150366), ("Yesenia", 0.001144927), ("Maira", 0.001142208), ("Nora", 0.001134049), ("Marlene", 0.001128610), ("Valeria", 0.001124531), ("Elvia", 0.001116372), ("Yasmín", 0.001116372), ("Bertha", 0.001113652), ("Aida", 0.001112293), ("Tania", 0.001102774), ("Ester", 0.001071499), ("Yineth", 0.001067420), ("Dolores", 0.001045664), ("Irma", 0.001041585), ("Matilde", 0.001036145), ("Aurora", 0.001029347), ("Magda", 0.001022548), ("Miryam", 0.001022548), ("Esmeralda", 0.001007590), ("Lucero", 0.001006230), ("Lucila", 0.001003511), ("Gladis", 0.001000791), ("Juana", 0.000984474), ("Jimena", 0.000983114), ("Margoth", 0.000965437), ("Antonia", 0.000961358), ("Yuri", 0.000953199), ("Josefina", 0.000950480), ("Liceth", 0.000939602), ("Yuli", 0.000932803), ("Nury", 0.000930083), ("Nataly", 0.000924644), ("Vilma", 0.000921925), ("Yéssica", 0.000921925), ("Yudy", 0.000921925), ("Yuly", 0.000916486), ("Heidy", 0.000913766), ("Nelcy", 0.000890650), ("Ofelia", 0.000889290), ("Jhoana", 0.000887930), ("Gilma", 0.000875692), ("Zoraida", 0.000868894), ("Enith", 0.000856656), ("Elsy", 0.000853936), ("Clemencia", 0.000828100), ("Danna", 0.000824021), ("Emilia", 0.000818582), ("Cielo", 0.000817222), ("Linda", 0.000817222), ("Damaris", 0.000815863), ("Delia", 0.000811783), ("Irene", 0.000810423), ("Virginia", 0.000803625), ("Paulina", 0.000796826), ("Alcira", 0.000790027), ("Magaly", 0.000785948), ("Ivonne", 0.000779149), ("Oliva", 0.000768271), ("Yina", 0.000753313), ("Carol", 0.000745154), ("Geraldine", 0.000745154), ("Yeimy", 0.000738356), ("Magdalena", 0.000732917), ("Yanet", 0.000731557), ("Yazmín", 0.000730197), ("Sindy", 0.000728837), ("Dahiana", 0.000726118), ("Alix", 0.000724758), ("Rita", 0.000724758), ("Lidia", 0.000719319), ("Marlén", 0.000716599), ("Michel", 0.000715239), ("Yeny", 0.000708441), ("Marly", 0.000703002), ("Dolly", 0.000698922), ("Leydi", 0.000694843), ("Nayibe", 0.000694843), ("Yury", 0.000693483), ("Denis", 0.000690764), ("Derly", 0.000689404), ("Yurany", 0.000688044), ("Emilce", 0.000685325), ("Tulia", 0.000683965), ("Yenifer", 0.000681245), ("Anyi", 0.000677166), ("Francia", 0.000673087), ("Jazmín", 0.000671727), ("Josefa", 0.000671727), ("Janneth", 0.000669007), ("Emilse", 0.000662208), ("Jackeline", 0.000649970), ("Concepción", 0.000645891), ("Dina", 0.000644531), ("Lisbeth", 0.000640452), ("Nathalia", 0.000640452), ("Amelia", 0.000639092), ("Emma", 0.000637733), ("Jacqueline", 0.000637733), ("Zulma", 0.000637733), ("Maricela", 0.000632293), ("Adela", 0.000629574), ("Ibeth", 0.000629574), ("Candelaria", 0.000615976), ("Mirian", 0.000613257), ("Magnolia", 0.000611897), ("Elcy", 0.000606458), ("Aleida", 0.000603738), ("Eva", 0.000598299), ("Gisela", 0.000598299), ("Yurani", 0.000596939), ("Azucena", 0.000595580), ("Amalia", 0.000591500), ("Deicy", 0.000590141), ("Nelsy", 0.000588781), ("Iris", 0.000584701), ("Katherin", 0.000584701), ("Lilian", 0.000584701), ("Leticia", 0.000575183), ("Deyanira", 0.000573823), ("Melba", 0.000573823), ("Adiela", 0.000567024), ("Catherine", 0.000557506), ] ) # 300 male first names # Source: Most frequent male first names from the dataset "Ciudadanía Digital" # <https://www.datos.gov.co/Ciencia-Tecnolog-a-e-Innovaci-n/Ciudadan-a-Digital/g4cd-bvpd> # Weightings derived from total number on each name first_names_male = OrderedDict( [ ("José", 0.048691700), ("Andrés", 0.039716926), ("Luis", 0.038832502), ("Carlos", 0.037462599), ("Juan", 0.032670871), ("Alberto", 0.021566442), ("David", 0.020957759), ("Antonio", 0.019540921), ("Fernando", 0.019087709), ("Jorge", 0.016852449), ("Jesús", 0.016303901), ("Alexander", 0.015906424), ("Enrique", 0.015438545), ("Javier", 0.014926665), ("Manuel", 0.013744500), ("Eduardo", 0.013292754), ("Jhon", 0.012868877), ("Diego", 0.012004987), ("Camilo", 0.011381637), ("Alejandro", 0.011356703), ("Óscar", 0.010821355), ("Daniel", 0.010819889), ("Miguel", 0.010749487), ("Felipe", 0.010291874), ("Jairo", 0.010011734), ("Mauricio", 0.009859196), ("César", 0.009266647), ("Alfonso", 0.008726899), ("Rafael", 0.008559695), ("Cristian", 0.008083016), ("Sebastián", 0.007534468), ("Ángel", 0.007062188), ("Iván", 0.007059255), ("Jaime", 0.007024054), ("Julio", 0.006956586), ("Julián", 0.006823115), ("Fabián", 0.006730713), ("Dario", 0.006658844), ("William", 0.006591376), ("Orlando", 0.006400704), ("Francisco", 0.006356703), ("Ricardo", 0.006339102), ("Mario", 0.006192432), ("Edwin", 0.006070695), ("John", 0.006014960), ("Víctor", 0.005956292), ("Leonardo", 0.005865356), ("Armando", 0.005843356), ("Augusto", 0.005786154), ("Pablo", 0.005552948), ("Álvaro", 0.005506013), ("Hernán", 0.005488413), ("Fredy", 0.005476679), ("Pedro", 0.005412144), ("Héctor", 0.005325609), ("Santiago", 0.005315342), ("Edgar", 0.005305075), ("Gustavo", 0.005247873), ("Hernando", 0.005161338), ("Guillermo", 0.005145204), ("Esteban", 0.005055735), ("Humberto", 0.004832796), ("Nelson", 0.004813728), ("Wilson", 0.004730126), ("Arturo", 0.004656791), ("Gabriel", 0.004545321), ("Alfredo", 0.004297448), ("Omar", 0.004282781), ("Germán", 0.004224113), ("Henry", 0.003967439), ("Fabio", 0.003961572), ("Alonso", 0.003751833), ("Brayan", 0.003709299), ("Hugo", 0.003684365), ("Sergio", 0.003650631), ("Alexis", 0.003374890), ("Adolfo", 0.003335289), ("Stiven", 0.003276621), ("Kevin", 0.003134350), ("Johan", 0.003075682), ("Yesid", 0.003056615), ("Ernesto", 0.002924611), ("Raúl", 0.002763274), ("Rodrigo", 0.002694339), ("Roberto", 0.002585802), ("Rubén", 0.002560868), ("Anderson", 0.002525667), ("Eliecer", 0.002511000), ("Nicolás", 0.002471399), ("Ferney", 0.002395131), ("Steven", 0.002390730), ("Emilio", 0.002324729), ("Yeison", 0.002311528), ("Arley", 0.002222059), ("Néstor", 0.002200059), ("Albeiro", 0.002125257), ("Wilmer", 0.002101789), ("Gerardo", 0.002095923), ("Jair", 0.002091522), ("Jonathan", 0.002032854), ("Marco", 0.001984453), ("Elkin", 0.001971253), ("Harold", 0.001940452), ("Martín", 0.001915518), ("Elías", 0.001912584), ("Robinson", 0.001908184), ("Alirio", 0.001884717), ("Edison", 0.001871517), ("Adrián", 0.001861250), ("Edinson", 0.001840716), ("Ramiro", 0.001831916), ("Jhonatan", 0.001828982), ("León", 0.001820182), ("Milton", 0.001770314), ("Oswaldo", 0.001726313), ("Ignacio", 0.001714579), ("Freddy", 0.001692578), ("Segundo", 0.001663244), ("Ramón", 0.001651511), ("Duván", 0.001632444), ("Samuel", 0.001554708), ("Gilberto", 0.001535641), ("Walter", 0.001453505), ("Alex", 0.001428571), ("Libardo", 0.001422705), ("James", 0.001421238), ("Vicente", 0.001394837), ("Ariel", 0.001378703), ("Danilo", 0.001377237), ("Giovanny", 0.001353769), ("Gregorio", 0.001334702), ("Gonzalo", 0.001325902), ("Michael", 0.001267234), ("Marlon", 0.001251100), ("Efrain", 0.001186565), ("Mateo", 0.001180698), ("Bernardo", 0.001167498), ("Leandro", 0.001164564), ("Jhoan", 0.001158698), ("Rodolfo", 0.001155764), ("Joaquín", 0.001127897), ("Felix", 0.001089762), ("Jeison", 0.001080962), ("Leonel", 0.001078029), ("Estiven", 0.001070695), ("Wilmar", 0.001063362), ("Edward", 0.001057495), ("Christian", 0.001054561), ("Jefferson", 0.001054561), ("Ronald", 0.000992960), ("René", 0.000990026), ("Aníbal", 0.000972426), ("Richard", 0.000966559), ("Andrey", 0.000959226), ("Jean", 0.000956292), ("Jaider", 0.000954825), ("Darwin", 0.000951892), ("Ever", 0.000951892), ("Arnulfo", 0.000950425), ("Giovanni", 0.000940158), ("Emiro", 0.000934292), ("Uriel", 0.000929891), ("Franklin", 0.000924025), ("Edilberto", 0.000912291), ("Smith", 0.000897624), ("Octavio", 0.000890290), ("Cristhian", 0.000875623), ("Tulio", 0.000875623), ("Eduar", 0.000862423), ("Junior", 0.000859490), ("Didier", 0.000855089), ("Reinaldo", 0.000847756), ("Fidel", 0.000830155), ("Willian", 0.000819889), ("Jimmy", 0.000815488), ("Eder", 0.000758287), ("Isaac", 0.000758287), ("Saúl", 0.000746553), ("Danny", 0.000745087), ("Marcos", 0.000740686), ("Yair", 0.000730419), ("Moisés", 0.000724553), ("Edwar", 0.000723086), ("Jhonny", 0.000718686), ("Miller", 0.000717219), ("Santos", 0.000717219), ("Esneider", 0.000715752), ("Franco", 0.000714286), ("Abel", 0.000705485), ("Dairo", 0.000701085), ("Roger", 0.000701085), ("Aldemar", 0.000695219), ("Rolando", 0.000693752), ("Hermes", 0.000689352), ("Jeferson", 0.000684952), ("Efrén", 0.000679085), ("Jeisson", 0.000679085), ("Ismael", 0.000674685), ("Edgardo", 0.000673218), ("Maicol", 0.000661484), ("Brandon", 0.000654151), ("Bryan", 0.000654151), ("Robert", 0.000649751), ("Eduard", 0.000636550), ("Nilson", 0.000635084), ("Agustín", 0.000632150), ("Tomás", 0.000630683), ("Edilson", 0.000621883), ("Aurelio", 0.000618950), ("Domingo", 0.000618950), ("Arbey", 0.000616016), ("Joan", 0.000614550), ("Yeferson", 0.000607216), ("Samir", 0.000602816), ("Wilder", 0.000602816), ("Ciro", 0.000599883), ("Josué", 0.000598416), ("Joel", 0.000596949), ("Horacio", 0.000591082), ("Jader", 0.000591082), ("Gerson", 0.000583749), ("Marino", 0.000579349), ("Erick", 0.000572015), ("Eugenio", 0.000561748), ("Benjamín", 0.000558815), ("Norberto", 0.000557348), ("Alcides", 0.000555881), ("Israel", 0.000539748), ("Yamid", 0.000535348), ("Emerson", 0.000525081), ("Frank", 0.000504547), ("Geovanny", 0.000504547), ("Gildardo", 0.000495747), ("Vladimir", 0.000495747), ("Silvio", 0.000485480), ("Dagoberto", 0.000479613), ("Misael", 0.000472279), ("Adalberto", 0.000464946), ("Elmer", 0.000464946), ("Campo", 0.000460546), ("Herney", 0.000456145), ("Eider", 0.000454679), ("Farid", 0.000451745), ("Edisson", 0.000447345), ("Evelio", 0.000442945), ("Bladimir", 0.000429745), ("Heriberto", 0.000423878), ("Sneider", 0.000422411), ("Nel", 0.000419478), ("Rigoberto", 0.000419478), ("Jhony", 0.000416544), ("Salvador", 0.000415078), ("Argemiro", 0.000410678), ("Brian", 0.000407744), ("Abelardo", 0.000404811), ("Federico", 0.000401877), ("Jonatan", 0.000398944), ("Wilfredo", 0.000397477), ("Faber", 0.000396011), ("Osvaldo", 0.000394544), ("Simón", 0.000394544), ("Elver", 0.000390144), ("Alveiro", 0.000388677), ("Jerson", 0.000385744), ("Ovidio", 0.000381344), ("Elvis", 0.000375477), ("Norbey", 0.000375477), ("Wilman", 0.000374010), ("Johnny", 0.000372543), ("Cristobal", 0.000368143), ("Harvey", 0.000366676), ("Ancizar", 0.000363743), ("Yerson", 0.000363743), ("Román", 0.000362276), ("Ronal", 0.000362276), ("Reinel", 0.000360810), ("Albert", 0.000359343), ("Darío", 0.000359343), ("Edier", 0.000357876), ("Neider", 0.000353476), ("Harol", 0.000352009), ("Paulo", 0.000352009), ("Deiby", 0.000347609), ("Dany", 0.000346143), ("Leider", 0.000341742), ("Damián", 0.000340276), ("Aldair", 0.000335876), ("Gallego", 0.000335876), ("Abraham", 0.000332942), ("Yecid", 0.000331476), ("Ocampo", 0.000324142), ("Wilfrido", 0.000324142), ("Lorenzo", 0.000318275), ("Paul", 0.000318275), ("Wilber", 0.000316808), ("Bayron", 0.000315342), ("Dubán", 0.000312408), ("Jhan", 0.000312408), ("Isaías", 0.000310942), ("Isidro", 0.000310942), ] ) first_names = first_names_female.copy() first_names.update(first_names_male) # 300 last names # Source: Most frequent last names from the dataset "Ciudadanía Digital" # <https://www.datos.gov.co/Ciencia-Tecnolog-a-e-Innovaci-n/Ciudadan-a-Digital/g4cd-bvpd> # Weightings derived from total number on each name last_names = OrderedDict( [ ("Rodríguez", 0.027384697), ("Gómez", 0.020422368), ("Martínez", 0.020115369), ("García", 0.019433148), ("López", 0.019162104), ("González", 0.018265076), ("Hernández", 0.01699467), ("Sánchez", 0.016893259), ("Pérez", 0.016406486), ("Díaz", 0.015069702), ("Ramírez", 0.014970134), ("Rojas", 0.012601722), ("Torres", 0.012484639), ("Moreno", 0.01134238), ("Vargas", 0.010733913), ("Muñoz", 0.010541231), ("Ortiz", 0.01009871), ("Castro", 0.009097505), ("Gutiérrez", 0.008656827), ("Jiménez", 0.008560948), ("Suárez", 0.008066799), ("Álvarez", 0.008056658), ("Ruiz", 0.007958934), ("Valencia", 0.007941418), ("Quintero", 0.00766392), ("Herrera", 0.007485989), ("Romero", 0.00748138), ("Mosquera", 0.007114455), ("Morales", 0.007082188), ("Arias", 0.006243241), ("Rivera", 0.006023824), ("Flórez", 0.005914116), ("Giraldo", 0.005782281), ("Medina", 0.005736185), ("Castillo", 0.005722356), ("Parra", 0.005665197), ("Peña", 0.005635696), ("Guerrero", 0.005407982), ("Salazar", 0.005365573), ("Osorio", 0.005327775), ("Mejía", 0.005317634), ("Mendoza", 0.005201472), ("Marín", 0.005053043), ("Cardona", 0.00496546), ("Cárdenas", 0.004892629), ("Cruz", 0.004795827), ("Restrepo", 0.004729449), ("Correa", 0.004724839), ("Ortega", 0.004712854), ("Acosta", 0.004640023), ("Ramos", 0.004636335), ("Reyes", 0.004593005), ("Rincón", 0.004554284), ("Zapata", 0.004487906), ("Sierra", 0.004380963), ("Mora", 0.004333945), ("Palacios", 0.004313663), ("Molina", 0.004285083), ("Delgado", 0.004150483), ("Guzmán", 0.004148639), ("Silva", 0.00413942), ("Contreras", 0.004136654), ("Lozano", 0.004089636), ("Montoya", 0.004063823), ("Ríos", 0.003995601), ("Vásquez", 0.003978084), ("Caicedo", 0.003936598), ("Cortés", 0.003899721), ("Velásquez", 0.003888658), ("Londoño", 0.003881283), ("Ospina", 0.003877595), ("Jaramillo", 0.003845328), ("Córdoba", 0.003807529), ("Escobar", 0.003759589), ("Murillo", 0.003740229), ("Orozco", 0.00373101), ("Fernández", 0.003705196), ("Vega", 0.003632364), ("Hurtado", 0.003556767), ("Carvajal", 0.003517124), ("Agudelo", 0.00351528), ("Calderón", 0.003481169), ("León", 0.003475638), ("Ávila", 0.003279269), ("Garzón", 0.003224876), ("Beltrán", 0.0032175), ("Trujillo", 0.00320275), ("Pineda", 0.003086588), ("Méndez", 0.003059852), ("Barrera", 0.003041414), ("Acevedo", 0.002976879), ("Henao", 0.002974113), ("Bedoya", 0.002971348), ("Franco", 0.002971348), ("Jaimes", 0.002916954), ("Bernal", 0.002892063), ("Arango", 0.002830294), ("Hoyos", 0.002818309), ("Navarro", 0.002813699), ("Durán", 0.002772213), ("Vergara", 0.002759306), ("Soto", 0.002739024), ("Camacho", 0.002672646), ("Sandoval", 0.002652363), ("Gil", 0.002644988), ("Buitrago", 0.002634847), ("Duarte", 0.002609033), ("Carrillo", 0.002527904), ("Duque", 0.002526982), ("Pacheco", 0.002519607), ("Barrios", 0.002500247), ("Aguirre", 0.002496559), ("Vélez", 0.002459682), ("Benavides", 0.002455994), ("Bermúdez", 0.002447697), ("Narváez", 0.002442166), ("Rueda", 0.002432025), ("Toro", 0.002431103), ("Blanco", 0.002385007), ("Amaya", 0.002381319), ("Forero", 0.002380397), ("Becerra", 0.002371178), ("Pinzón", 0.002364724), ("Camargo", 0.002363802), ("Vanegas", 0.002347208), ("Bonilla", 0.002326004), ("Padilla", 0.002326004), ("Ariza", 0.00231955), ("Ardila", 0.002313097), ("Galvis", 0.0023048), ("Daza", 0.002289127), ("Mena", 0.002284517), ("Villamizar", 0.002254094), ("Sarmiento", 0.002245797), ("Cano", 0.002234734), ("Zambrano", 0.00223289), ("Espinosa", 0.00222828), ("Gallego", 0.00222828), ("Tovar", 0.002224593), ("Uribe", 0.002219061), ("Ochoa", 0.002190482), ("Castellanos", 0.002181262), ("Cabrera", 0.002177575), ("Castañeda", 0.002145307), ("Solano", 0.002143464), ("Fuentes", 0.002104743), ("Perdomo", 0.002103821), ("Guevara", 0.002101977), ("Castaño", 0.002077085), ("Patiño", 0.002046662), ("Ocampo", 0.002029146), ("Páez", 0.002020848), ("Serna", 0.002020848), ("Mesa", 0.002005176), ("Angulo", 0.001987659), ("Mercado", 0.001982128), ("Alzate", 0.001969221), ("Rosero", 0.001952626), ("Bautista", 0.001948939), ("Vera", 0.001932344), ("Meneses", 0.0019305), ("Arenas", 0.001922203), ("Cifuentes", 0.001902843), ("Arévalo", 0.001896389), ("Montes", 0.001878873), ("Arrieta", 0.001876107), ("Guerra", 0.001869653), ("Aguilar", 0.001855825), ("Ayala", 0.001849371), ("Figueroa", 0.001845684), ("Fonseca", 0.001840152), ("Pinto", 0.001832777), ("Bravo", 0.001805119), ("Luna", 0.001801431), ("Niño", 0.001798666), ("Salcedo", 0.00179129), ("Serrano", 0.001790368), ("Roa", 0.001773774), ("Palacio", 0.001770086), ("Perea", 0.001765476), ("Velasco", 0.001761789), ("Villa", 0.001760867), ("Sepúlveda", 0.001755335), ("Benítez", 0.001744272), ("Meza", 0.001741507), ("Sanabria", 0.001737819), ("Miranda", 0.001722146), ("Gaitán", 0.001714771), ("Melo", 0.00170463), ("Márquez", 0.001678816), ("Ordóñez", 0.001675128), ("Zuluaga", 0.001673285), ("Andrade", 0.001663143), ("Estrada", 0.00165669), ("Prieto", 0.00162811), ("Alvarado", 0.001624423), ("Leal", 0.001623501), ("Gaviria", 0.001616125), ("Salas", 0.001603219), ("Polo", 0.001597687), ("Bohórquez", 0.001585702), ("Arboleda", 0.001580171), ("Pulido", 0.001580171), ("Pardo", 0.001579249), ("Monsalve", 0.001575561), ("Cuéllar", 0.001573717), ("Rangel", 0.001571873), ("Nieto", 0.001570029), ("Loaiza", 0.00156542), ("Rivas", 0.001562654), ("Murcia", 0.001561732), ("Campo", 0.001555279), ("Naranjo", 0.001555279), ("Galindo", 0.001538684), ("Santos", 0.001537762), ("Lara", 0.001532231), ("Triana", 0.001510105), ("Burbano", 0.001485213), ("Maldonado", 0.001485213), ("Galeano", 0.001476916), ("Pabón", 0.001464931), ("Rentería", 0.001462165), ("Espitia", 0.001458477), ("Fajardo", 0.001457555), ("Gamboa", 0.001455711), ("Chávez", 0.001436351), ("Vallejo", 0.001435429), ("Barreto", 0.001431742), ("Caro", 0.001415147), ("Ceballos", 0.001407772), ("Alarcón", 0.001405006), ("Prada", 0.00140224), ("Villegas", 0.001384724), ("Cáceres", 0.001381958), ("Caballero", 0.001380114), ("Salgado", 0.001380114), ("Velandia", 0.001373661), ("Carmona", 0.001365363), ("Chaparro", 0.001364441), ("Oviedo", 0.001360754), ("Granados", 0.001348769), ("Montenegro", 0.001348769), ("Saavedra", 0.00133955), ("Betancur", 0.001338628), ("Rubio", 0.001335862), ("Cuesta", 0.001312814), ("Rico", 0.001300829), ("Ballesteros", 0.001299907), ("Ibarra", 0.001298985), ("Valderrama", 0.001283312), ("Barbosa", 0.001277781), ("Garcés", 0.001269484), ("Monroy", 0.001253811), ("Erazo", 0.001251045), ("Núñez", 0.001245514), ("Quiroga", 0.001231685), ("Angarita", 0.001230763), ("Cantillo", 0.001227997), ("Posada", 0.001214168), ("Pedraza", 0.001210481), ("Arteaga", 0.001204027), ("Yepes", 0.001204027), ("Bustos", 0.001198496), ("Olaya", 0.001196652), ("Salamanca", 0.001189277), ("Burgos", 0.001186511), ("Corredor", 0.001180979), ("Alfonso", 0.001173604), ("Paz", 0.001168072), ("Parada", 0.001161619), ("Bolaños", 0.001150556), ("Tamayo", 0.001149634), ("Manrique", 0.001144103), ("Domínguez", 0.001138571), ("Cardozo", 0.001134883), ("Quiroz", 0.001134883), ("Bastidas", 0.001127508), ("Obando", 0.001112757), ("Rendón", 0.001112757), ("Mantilla", 0.001109991), ("Gonzáles", 0.001107226), ("Puentes", 0.00110446), ("Bejarano", 0.001088787), ("Riascos", 0.001086943), ("Castrillón", 0.001086022), ("Bustamante", 0.0010851), ("Rengifo", 0.0010851), ("Ospino", 0.001083256), ("Ojeda", 0.001081412), ("Villamil", 0.001073115), ("Cerón", 0.00105652), ("Arroyo", 0.001055598), ("Ángel", 0.001053754), ("Chacón", 0.001050067), ("Portilla", 0.001042691), ("Barragán", 0.001041769), ("Orjuela", 0.001039926), ("Bolívar", 0.001024253), ("Molano", 0.001021487), ("Anaya", 0.001016878), ] ) prefixes_female = OrderedDict( [ ("Sra.", 0.5), ("Srta.", 0.2), ("Dra.", 0.2), ("Doña", 0.05), ("Dña.", 0.05), ] ) prefixes_male = OrderedDict( [ ("Sr.", 0.7), ("Dr.", 0.2), ("Don", 0.05), ("D.", 0.05), ] )
Provider
python
django__django
tests/test_runner_apps/failures/tests_failures.py
{ "start": 49, "end": 142 }
class ____(TestCase): def test_sample(self): self.assertEqual(0, 1)
FailureTestCase
python
joke2k__faker
tests/providers/test_ssn.py
{ "start": 7006, "end": 7852 }
class ____(unittest.TestCase): def setUp(self): self.fake = Faker("de_AT") Faker.seed(0) def test_vat_id(self): for _ in range(100): assert re.search(r"^ATU\d{8}$", self.fake.vat_id()) def test_ssn(self): for _ in range(100): ssn: str = self.fake.ssn() assert len(ssn) == 10 assert len(self.fake.ssn(self.fake.date_of_birth())) == 10 def test_ssn_checkdigit(self): for _ in range(100): ssn: str = self.fake.ssn() ssn_digits: list[int] = [int(char) for char in ssn[:3] + ssn[4:]] factors: list[int] = [3, 7, 9, 5, 8, 4, 2, 1, 6] sum: int = 0 for index, digit in enumerate(ssn_digits): sum += digit * factors[index] assert sum % 11 == int(ssn[3])
TestDeAT
python
pymupdf__PyMuPDF
src/__init__.py
{ "start": 646392, "end": 821574 }
class ____(FileDataError): """Raised when creating documents from zero-length data.""" pass # propagate exception class to C-level code #_set_FileDataError(FileDataError) csRGB = Colorspace(CS_RGB) csGRAY = Colorspace(CS_GRAY) csCMYK = Colorspace(CS_CMYK) # These don't appear to be visible in classic, but are used # internally. # dictkey_align = "align" dictkey_asc = "ascender" dictkey_bidi = "bidi" dictkey_bbox = "bbox" dictkey_blocks = "blocks" dictkey_bpc = "bpc" dictkey_c = "c" dictkey_chars = "chars" dictkey_color = "color" dictkey_colorspace = "colorspace" dictkey_content = "content" dictkey_creationDate = "creationDate" dictkey_cs_name = "cs-name" dictkey_da = "da" dictkey_dashes = "dashes" dictkey_descr = "description" dictkey_desc = "descender" dictkey_dir = "dir" dictkey_effect = "effect" dictkey_ext = "ext" dictkey_filename = "filename" dictkey_fill = "fill" dictkey_flags = "flags" dictkey_char_flags = "char_flags" dictkey_font = "font" dictkey_glyph = "glyph" dictkey_height = "height" dictkey_id = "id" dictkey_image = "image" dictkey_items = "items" dictkey_length = "length" dictkey_lines = "lines" dictkey_matrix = "transform" dictkey_modDate = "modDate" dictkey_name = "name" dictkey_number = "number" dictkey_origin = "origin" dictkey_rect = "rect" dictkey_size = "size" dictkey_smask = "smask" dictkey_spans = "spans" dictkey_stroke = "stroke" dictkey_style = "style" dictkey_subject = "subject" dictkey_text = "text" dictkey_title = "title" dictkey_type = "type" dictkey_ufilename = "ufilename" dictkey_width = "width" dictkey_wmode = "wmode" dictkey_xref = "xref" dictkey_xres = "xres" dictkey_yres = "yres" try: from pymupdf_fonts import fontdescriptors, fontbuffers fitz_fontdescriptors = fontdescriptors.copy() for k in fitz_fontdescriptors.keys(): fitz_fontdescriptors[k]["loader"] = fontbuffers[k] del fontdescriptors, fontbuffers except ImportError: fitz_fontdescriptors = {} symbol_glyphs = ( # Glyph list for the built-in font 'Symbol' (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (32, 0.25), (33, 0.333), (34, 0.713), (35, 0.5), (36, 0.549), (37, 0.833), (38, 0.778), (39, 0.439), (40, 0.333), (41, 0.333), (42, 0.5), (43, 0.549), (44, 0.25), (45, 0.549), (46, 0.25), (47, 0.278), (48, 0.5), (49, 0.5), (50, 0.5), (51, 0.5), (52, 0.5), (53, 0.5), (54, 0.5), (55, 0.5), (56, 0.5), (57, 0.5), (58, 0.278), (59, 0.278), (60, 0.549), (61, 0.549), (62, 0.549), (63, 0.444), (64, 0.549), (65, 0.722), (66, 0.667), (67, 0.722), (68, 0.612), (69, 0.611), (70, 0.763), (71, 0.603), (72, 0.722), (73, 0.333), (74, 0.631), (75, 0.722), (76, 0.686), (77, 0.889), (78, 0.722), (79, 0.722), (80, 0.768), (81, 0.741), (82, 0.556), (83, 0.592), (84, 0.611), (85, 0.69), (86, 0.439), (87, 0.768), (88, 0.645), (89, 0.795), (90, 0.611), (91, 0.333), (92, 0.863), (93, 0.333), (94, 0.658), (95, 0.5), (96, 0.5), (97, 0.631), (98, 0.549), (99, 0.549), (100, 0.494), (101, 0.439), (102, 0.521), (103, 0.411), (104, 0.603), (105, 0.329), (106, 0.603), (107, 0.549), (108, 0.549), (109, 0.576), (110, 0.521), (111, 0.549), (112, 0.549), (113, 0.521), (114, 0.549), (115, 0.603), (116, 0.439), (117, 0.576), (118, 0.713), (119, 0.686), (120, 0.493), (121, 0.686), (122, 0.494), (123, 0.48), (124, 0.2), (125, 0.48), (126, 0.549), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (183, 0.46), (160, 0.25), (161, 0.62), (162, 0.247), (163, 0.549), (164, 0.167), (165, 0.713), (166, 0.5), (167, 0.753), (168, 0.753), (169, 0.753), (170, 0.753), (171, 1.042), (172, 0.713), (173, 0.603), (174, 0.987), (175, 0.603), (176, 0.4), (177, 0.549), (178, 0.411), (179, 0.549), (180, 0.549), (181, 0.576), (182, 0.494), (183, 0.46), (184, 0.549), (185, 0.549), (186, 0.549), (187, 0.549), (188, 1), (189, 0.603), (190, 1), (191, 0.658), (192, 0.823), (193, 0.686), (194, 0.795), (195, 0.987), (196, 0.768), (197, 0.768), (198, 0.823), (199, 0.768), (200, 0.768), (201, 0.713), (202, 0.713), (203, 0.713), (204, 0.713), (205, 0.713), (206, 0.713), (207, 0.713), (208, 0.768), (209, 0.713), (210, 0.79), (211, 0.79), (212, 0.89), (213, 0.823), (214, 0.549), (215, 0.549), (216, 0.713), (217, 0.603), (218, 0.603), (219, 1.042), (220, 0.987), (221, 0.603), (222, 0.987), (223, 0.603), (224, 0.494), (225, 0.329), (226, 0.79), (227, 0.79), (228, 0.786), (229, 0.713), (230, 0.384), (231, 0.384), (232, 0.384), (233, 0.384), (234, 0.384), (235, 0.384), (236, 0.494), (237, 0.494), (238, 0.494), (239, 0.494), (183, 0.46), (241, 0.329), (242, 0.274), (243, 0.686), (244, 0.686), (245, 0.686), (246, 0.384), (247, 0.549), (248, 0.384), (249, 0.384), (250, 0.384), (251, 0.384), (252, 0.494), (253, 0.494), (254, 0.494), (183, 0.46), ) zapf_glyphs = ( # Glyph list for the built-in font 'ZapfDingbats' (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (32, 0.278), (33, 0.974), (34, 0.961), (35, 0.974), (36, 0.98), (37, 0.719), (38, 0.789), (39, 0.79), (40, 0.791), (41, 0.69), (42, 0.96), (43, 0.939), (44, 0.549), (45, 0.855), (46, 0.911), (47, 0.933), (48, 0.911), (49, 0.945), (50, 0.974), (51, 0.755), (52, 0.846), (53, 0.762), (54, 0.761), (55, 0.571), (56, 0.677), (57, 0.763), (58, 0.76), (59, 0.759), (60, 0.754), (61, 0.494), (62, 0.552), (63, 0.537), (64, 0.577), (65, 0.692), (66, 0.786), (67, 0.788), (68, 0.788), (69, 0.79), (70, 0.793), (71, 0.794), (72, 0.816), (73, 0.823), (74, 0.789), (75, 0.841), (76, 0.823), (77, 0.833), (78, 0.816), (79, 0.831), (80, 0.923), (81, 0.744), (82, 0.723), (83, 0.749), (84, 0.79), (85, 0.792), (86, 0.695), (87, 0.776), (88, 0.768), (89, 0.792), (90, 0.759), (91, 0.707), (92, 0.708), (93, 0.682), (94, 0.701), (95, 0.826), (96, 0.815), (97, 0.789), (98, 0.789), (99, 0.707), (100, 0.687), (101, 0.696), (102, 0.689), (103, 0.786), (104, 0.787), (105, 0.713), (106, 0.791), (107, 0.785), (108, 0.791), (109, 0.873), (110, 0.761), (111, 0.762), (112, 0.762), (113, 0.759), (114, 0.759), (115, 0.892), (116, 0.892), (117, 0.788), (118, 0.784), (119, 0.438), (120, 0.138), (121, 0.277), (122, 0.415), (123, 0.392), (124, 0.392), (125, 0.668), (126, 0.668), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (183, 0.788), (161, 0.732), (162, 0.544), (163, 0.544), (164, 0.91), (165, 0.667), (166, 0.76), (167, 0.76), (168, 0.776), (169, 0.595), (170, 0.694), (171, 0.626), (172, 0.788), (173, 0.788), (174, 0.788), (175, 0.788), (176, 0.788), (177, 0.788), (178, 0.788), (179, 0.788), (180, 0.788), (181, 0.788), (182, 0.788), (183, 0.788), (184, 0.788), (185, 0.788), (186, 0.788), (187, 0.788), (188, 0.788), (189, 0.788), (190, 0.788), (191, 0.788), (192, 0.788), (193, 0.788), (194, 0.788), (195, 0.788), (196, 0.788), (197, 0.788), (198, 0.788), (199, 0.788), (200, 0.788), (201, 0.788), (202, 0.788), (203, 0.788), (204, 0.788), (205, 0.788), (206, 0.788), (207, 0.788), (208, 0.788), (209, 0.788), (210, 0.788), (211, 0.788), (212, 0.894), (213, 0.838), (214, 1.016), (215, 0.458), (216, 0.748), (217, 0.924), (218, 0.748), (219, 0.918), (220, 0.927), (221, 0.928), (222, 0.928), (223, 0.834), (224, 0.873), (225, 0.828), (226, 0.924), (227, 0.924), (228, 0.917), (229, 0.93), (230, 0.931), (231, 0.463), (232, 0.883), (233, 0.836), (234, 0.836), (235, 0.867), (236, 0.867), (237, 0.696), (238, 0.696), (239, 0.874), (183, 0.788), (241, 0.874), (242, 0.76), (243, 0.946), (244, 0.771), (245, 0.865), (246, 0.771), (247, 0.888), (248, 0.967), (249, 0.888), (250, 0.831), (251, 0.873), (252, 0.927), (253, 0.97), (183, 0.788), (183, 0.788), ) # Functions # def _rect_area(width, height, args): # Used by IRect.get_area() and Rect.get_area(). unit = args[0] if args else 'px' u = {"px": (1, 1), "in": (1.0, 72.0), "cm": (2.54, 72.0), "mm": (25.4, 72.0)} f = (u[unit][0] / u[unit][1]) ** 2 return f * width * height def _read_samples( pixmap, offset, n): # fixme: need to be able to get a sample in one call, as a Python # bytes or similar. ret = [] if not pixmap.samples(): # mupdf.fz_samples_get() gives a segv if pixmap->samples is null. return ret for i in range( n): ret.append( mupdf.fz_samples_get( pixmap, offset + i)) return bytes( ret) def _INRANGE(v, low, high): return low <= v and v <= high def _remove_dest_range(pdf, numbers): pagecount = mupdf.pdf_count_pages(pdf) for i in range(pagecount): n1 = i if n1 in numbers: continue pageref = mupdf.pdf_lookup_page_obj( pdf, i) annots = mupdf.pdf_dict_get( pageref, PDF_NAME('Annots')) if not annots.m_internal: continue len_ = mupdf.pdf_array_len(annots) for j in range(len_ - 1, -1, -1): o = mupdf.pdf_array_get( annots, j) if not mupdf.pdf_name_eq( mupdf.pdf_dict_get( o, PDF_NAME('Subtype')), PDF_NAME('Link')): continue action = mupdf.pdf_dict_get( o, PDF_NAME('A')) dest = mupdf.pdf_dict_get( o, PDF_NAME('Dest')) if action.m_internal: if not mupdf.pdf_name_eq( mupdf.pdf_dict_get( action, PDF_NAME('S')), PDF_NAME('GoTo')): continue dest = mupdf.pdf_dict_get( action, PDF_NAME('D')) pno = -1 if mupdf.pdf_is_array( dest): target = mupdf.pdf_array_get( dest, 0) pno = mupdf.pdf_lookup_page_number( pdf, target) elif mupdf.pdf_is_string( dest): location, _, _ = mupdf.fz_resolve_link( pdf.super(), mupdf.pdf_to_text_string( dest)) pno = location.page if pno < 0: # page number lookup did not work continue n1 = pno if n1 in numbers: mupdf.pdf_array_delete( annots, j) def ASSERT_PDF(cond): assert isinstance(cond, (mupdf.PdfPage, mupdf.PdfDocument)), f'{type(cond)=} {cond=}' if not cond.m_internal: raise Exception(MSG_IS_NO_PDF) def EMPTY_IRECT(): return IRect(FZ_MAX_INF_RECT, FZ_MAX_INF_RECT, FZ_MIN_INF_RECT, FZ_MIN_INF_RECT) def EMPTY_QUAD(): return EMPTY_RECT().quad def EMPTY_RECT(): return Rect(FZ_MAX_INF_RECT, FZ_MAX_INF_RECT, FZ_MIN_INF_RECT, FZ_MIN_INF_RECT) def ENSURE_OPERATION(pdf): if not JM_have_operation(pdf): raise Exception("No journalling operation started") def INFINITE_IRECT(): return IRect(FZ_MIN_INF_RECT, FZ_MIN_INF_RECT, FZ_MAX_INF_RECT, FZ_MAX_INF_RECT) def INFINITE_QUAD(): return INFINITE_RECT().quad def INFINITE_RECT(): return Rect(FZ_MIN_INF_RECT, FZ_MIN_INF_RECT, FZ_MAX_INF_RECT, FZ_MAX_INF_RECT) def JM_BinFromBuffer(buffer_): ''' Turn fz_buffer into a Python bytes object ''' assert isinstance(buffer_, mupdf.FzBuffer) ret = mupdf.fz_buffer_extract_copy(buffer_) return ret def JM_EscapeStrFromStr(c): # `c` is typically from SWIG which will have converted a `const char*` from # C into a Python `str` using `PyUnicode_DecodeUTF8(carray, static_cast< # Py_ssize_t >(size), "surrogateescape")`. This gives us a Python `str` # with some characters encoded as a \0xdcXY sequence, where `XY` are hex # digits for an invalid byte in the original `const char*`. # # This is actually a reasonable way of representing arbitrary # strings from C, but we want to mimic what PyMuPDF does. It uses # `PyUnicode_DecodeRawUnicodeEscape(c, (Py_ssize_t) strlen(c), "replace")` # which gives a string containing actual unicode characters for any invalid # bytes. # # We mimic this by converting the `str` to a `bytes` with 'surrogateescape' # to recognise \0xdcXY sequences, then convert the individual bytes into a # `str` using `chr()`. # # Would be good to have a more efficient way to do this. # if c is None: return '' assert isinstance(c, str), f'{type(c)=}' b = c.encode('utf8', 'surrogateescape') ret = '' for bb in b: ret += chr(bb) return ret def JM_BufferFromBytes(stream): ''' Make fz_buffer from a PyBytes, PyByteArray or io.BytesIO object. If a text io.BytesIO, we convert to binary by encoding as utf8. ''' if isinstance(stream, (bytes, bytearray)): data = stream elif hasattr(stream, 'getvalue'): data = stream.getvalue() if isinstance(data, str): data = data.encode('utf-8') if not isinstance(data, (bytes, bytearray)): raise Exception(f'.getvalue() returned unexpected type: {type(data)}') else: return mupdf.FzBuffer() return mupdf.fz_new_buffer_from_copied_data(data) def JM_FLOAT_ITEM(obj, idx): if not PySequence_Check(obj): return None return float(obj[idx]) def JM_INT_ITEM(obj, idx): if idx < len(obj): temp = obj[idx] if isinstance(temp, (int, float)): return 0, temp return 1, None def JM_pixmap_from_page(doc, page, ctm, cs, alpha, annots, clip): ''' Pixmap creation directly using a short-lived displaylist, so we can support separations. ''' SPOTS_NONE = 0 SPOTS_OVERPRINT_SIM = 1 SPOTS_FULL = 2 FZ_ENABLE_SPOT_RENDERING = True # fixme: this is a build-time setting in MuPDF's config.h. if FZ_ENABLE_SPOT_RENDERING: spots = SPOTS_OVERPRINT_SIM else: spots = SPOTS_NONE seps = None colorspace = cs matrix = JM_matrix_from_py(ctm) rect = mupdf.fz_bound_page(page) rclip = JM_rect_from_py(clip) rect = mupdf.fz_intersect_rect(rect, rclip) # no-op if clip is not given rect = mupdf.fz_transform_rect(rect, matrix) bbox = mupdf.fz_round_rect(rect) # Pixmap of the document's /OutputIntents ("output intents") oi = mupdf.fz_document_output_intent(doc) # if present and compatible, use it instead of the parameter if oi.m_internal: if mupdf.fz_colorspace_n(oi) == mupdf.fz_colorspace_n(cs): colorspace = mupdf.fz_keep_colorspace(oi) # check if spots rendering is available and if so use separations if spots != SPOTS_NONE: seps = mupdf.fz_page_separations(page) if seps.m_internal: n = mupdf.fz_count_separations(seps) if spots == SPOTS_FULL: for i in range(n): mupdf.fz_set_separation_behavior(seps, i, mupdf.FZ_SEPARATION_SPOT) else: for i in range(n): mupdf.fz_set_separation_behavior(seps, i, mupdf.FZ_SEPARATION_COMPOSITE) elif mupdf.fz_page_uses_overprint(page): # This page uses overprint, so we need an empty # sep object to force the overprint simulation on. seps = mupdf.fz_new_separations(0) elif oi.m_internal and mupdf.fz_colorspace_n(oi) != mupdf.fz_colorspace_n(colorspace): # We have an output intent, and it's incompatible # with the colorspace our device needs. Force the # overprint simulation on, because this ensures that # we 'simulate' the output intent too. seps = mupdf.fz_new_separations(0) pix = mupdf.fz_new_pixmap_with_bbox(colorspace, bbox, seps, alpha) if alpha: mupdf.fz_clear_pixmap(pix) else: mupdf.fz_clear_pixmap_with_value(pix, 0xFF) dev = mupdf.fz_new_draw_device(matrix, pix) if annots: mupdf.fz_run_page(page, dev, mupdf.FzMatrix(), mupdf.FzCookie()) else: mupdf.fz_run_page_contents(page, dev, mupdf.FzMatrix(), mupdf.FzCookie()) mupdf.fz_close_device(dev) return pix def JM_StrAsChar(x): # fixme: should encode, but swig doesn't pass bytes to C as const char*. return x #return x.encode('utf8') def JM_TUPLE(o: typing.Sequence) -> tuple: return tuple(map(lambda x: round(x, 5) if abs(x) >= 1e-4 else 0, o)) def JM_TUPLE3(o: typing.Sequence) -> tuple: return tuple(map(lambda x: round(x, 3) if abs(x) >= 1e-3 else 0, o)) def JM_UnicodeFromStr(s): if s is None: return '' if isinstance(s, bytes): s = s.decode('utf8') assert isinstance(s, str), f'{type(s)=} {s=}' return s def JM_add_annot_id(annot, stem): ''' Add a unique /NM key to an annotation or widget. Append a number to 'stem' such that the result is a unique name. ''' assert isinstance(annot, mupdf.PdfAnnot) page = _pdf_annot_page(annot) annot_obj = mupdf.pdf_annot_obj( annot) names = JM_get_annot_id_list(page) i = 0 while 1: stem_id = f'{JM_annot_id_stem}-{stem}{i}' if stem_id not in names: break i += 1 response = JM_StrAsChar(stem_id) name = mupdf.pdf_new_string( response, len(response)) mupdf.pdf_dict_puts(annot_obj, "NM", name) page.doc().m_internal.resynth_required = 0 def JM_add_oc_object(pdf, ref, xref): ''' Add OC object reference to a dictionary ''' indobj = mupdf.pdf_new_indirect(pdf, xref, 0) if not mupdf.pdf_is_dict(indobj): RAISEPY(MSG_BAD_OC_REF, PyExc_ValueError) type_ = mupdf.pdf_dict_get(indobj, PDF_NAME('Type')) if (mupdf.pdf_objcmp(type_, PDF_NAME('OCG')) == 0 or mupdf.pdf_objcmp(type_, PDF_NAME('OCMD')) == 0 ): mupdf.pdf_dict_put(ref, PDF_NAME('OC'), indobj) else: RAISEPY(MSG_BAD_OC_REF, PyExc_ValueError) def JM_annot_border(annot_obj): dash_py = list() style = None width = -1 clouds = -1 obj = None obj = mupdf.pdf_dict_get( annot_obj, PDF_NAME('Border')) if mupdf.pdf_is_array( obj): width = mupdf.pdf_to_real( mupdf.pdf_array_get( obj, 2)) if mupdf.pdf_array_len( obj) == 4: dash = mupdf.pdf_array_get( obj, 3) for i in range( mupdf.pdf_array_len( dash)): val = mupdf.pdf_to_int( mupdf.pdf_array_get( dash, i)) dash_py.append( val) bs_o = mupdf.pdf_dict_get( annot_obj, PDF_NAME('BS')) if bs_o.m_internal: width = mupdf.pdf_to_real( mupdf.pdf_dict_get( bs_o, PDF_NAME('W'))) style = mupdf.pdf_to_name( mupdf.pdf_dict_get( bs_o, PDF_NAME('S'))) if style == '': style = None obj = mupdf.pdf_dict_get( bs_o, PDF_NAME('D')) if obj.m_internal: for i in range( mupdf.pdf_array_len( obj)): val = mupdf.pdf_to_int( mupdf.pdf_array_get( obj, i)) dash_py.append( val) obj = mupdf.pdf_dict_get( annot_obj, PDF_NAME('BE')) if obj.m_internal: clouds = mupdf.pdf_to_int( mupdf.pdf_dict_get( obj, PDF_NAME('I'))) res = dict() res[ dictkey_width] = width res[ dictkey_dashes] = tuple( dash_py) res[ dictkey_style] = style res[ 'clouds'] = clouds return res def JM_annot_colors(annot_obj): res = dict() bc = list() # stroke colors fc =list() # fill colors o = mupdf.pdf_dict_get(annot_obj, mupdf.PDF_ENUM_NAME_C) if mupdf.pdf_is_array(o): n = mupdf.pdf_array_len(o) for i in range(n): col = mupdf.pdf_to_real( mupdf.pdf_array_get(o, i)) bc.append(col) res[dictkey_stroke] = bc o = mupdf.pdf_dict_gets(annot_obj, "IC") if mupdf.pdf_is_array(o): n = mupdf.pdf_array_len(o) for i in range(n): col = mupdf.pdf_to_real( mupdf.pdf_array_get(o, i)) fc.append(col) res[dictkey_fill] = fc return res def JM_annot_set_border( border, doc, annot_obj): assert isinstance(border, dict) obj = None dashlen = 0 nwidth = border.get( dictkey_width) # new width ndashes = border.get( dictkey_dashes) # new dashes nstyle = border.get( dictkey_style) # new style nclouds = border.get( 'clouds', -1) # new clouds value # get old border properties oborder = JM_annot_border( annot_obj) # delete border-related entries mupdf.pdf_dict_del( annot_obj, PDF_NAME('BS')) mupdf.pdf_dict_del( annot_obj, PDF_NAME('BE')) mupdf.pdf_dict_del( annot_obj, PDF_NAME('Border')) # populate border items: keep old values for any omitted new ones if nwidth < 0: nwidth = oborder.get( dictkey_width) # no new width: keep current if ndashes is None: ndashes = oborder.get( dictkey_dashes) # no new dashes: keep old if nstyle is None: nstyle = oborder.get( dictkey_style) # no new style: keep old if nclouds < 0: nclouds = oborder.get( "clouds", -1) # no new clouds: keep old if isinstance( ndashes, tuple) and len( ndashes) > 0: dashlen = len( ndashes) darr = mupdf.pdf_new_array( doc, dashlen) for d in ndashes: mupdf.pdf_array_push_int( darr, d) mupdf.pdf_dict_putl( annot_obj, darr, PDF_NAME('BS'), PDF_NAME('D')) mupdf.pdf_dict_putl( annot_obj, mupdf.pdf_new_real( nwidth), PDF_NAME('BS'), PDF_NAME('W'), ) if dashlen == 0: obj = JM_get_border_style( nstyle) else: obj = PDF_NAME('D') mupdf.pdf_dict_putl( annot_obj, obj, PDF_NAME('BS'), PDF_NAME('S')) if nclouds > 0: mupdf.pdf_dict_put_dict( annot_obj, PDF_NAME('BE'), 2) obj = mupdf.pdf_dict_get( annot_obj, PDF_NAME('BE')) mupdf.pdf_dict_put( obj, PDF_NAME('S'), PDF_NAME('C')) mupdf.pdf_dict_put_int( obj, PDF_NAME('I'), nclouds) def make_escape(ch): if ch == 92: return "\\u005c" elif 32 <= ch <= 127 or ch == 10: return chr(ch) elif 0xd800 <= ch <= 0xdfff: # orphaned surrogate return "\\ufffd" elif ch <= 0xffff: return "\\u%04x" % ch else: return "\\U%08x" % ch def JM_append_rune(buff, ch): """ APPEND non-ascii runes in unicode escape format to fz_buffer. """ mupdf.fz_append_string(buff, make_escape(ch)) def JM_append_word(lines, buff, wbbox, block_n, line_n, word_n): ''' Functions for wordlist output ''' s = JM_EscapeStrFromBuffer(buff) litem = ( wbbox.x0, wbbox.y0, wbbox.x1, wbbox.y1, s, block_n, line_n, word_n, ) lines.append(litem) return word_n + 1, mupdf.FzRect(mupdf.FzRect.Fixed_EMPTY) # word counter def JM_add_layer_config( pdf, name, creator, ON): ''' Add OC configuration to the PDF catalog ''' ocp = JM_ensure_ocproperties( pdf) configs = mupdf.pdf_dict_get( ocp, PDF_NAME('Configs')) if not mupdf.pdf_is_array( configs): configs = mupdf.pdf_dict_put_array( ocp, PDF_NAME('Configs'), 1) D = mupdf.pdf_new_dict( pdf, 5) mupdf.pdf_dict_put_text_string( D, PDF_NAME('Name'), name) if creator is not None: mupdf.pdf_dict_put_text_string( D, PDF_NAME('Creator'), creator) mupdf.pdf_dict_put( D, PDF_NAME('BaseState'), PDF_NAME('OFF')) onarray = mupdf.pdf_dict_put_array( D, PDF_NAME('ON'), 5) if not ON: pass else: ocgs = mupdf.pdf_dict_get( ocp, PDF_NAME('OCGs')) n = len(ON) for i in range(n): xref = 0 e, xref = JM_INT_ITEM(ON, i) if e == 1: continue ind = mupdf.pdf_new_indirect( pdf, xref, 0) if mupdf.pdf_array_contains( ocgs, ind): mupdf.pdf_array_push( onarray, ind) mupdf.pdf_array_push( configs, D) def JM_char_bbox(line, ch): ''' return rect of char quad ''' q = JM_char_quad(line, ch) r = mupdf.fz_rect_from_quad(q) if not line.m_internal.wmode: return r if r.y1 < r.y0 + ch.m_internal.size: r.y0 = r.y1 - ch.m_internal.size return r def JM_char_font_flags(font, line, ch): flags = 0 if line and ch: flags += detect_super_script(line, ch) flags += mupdf.fz_font_is_italic(font) * TEXT_FONT_ITALIC flags += mupdf.fz_font_is_serif(font) * TEXT_FONT_SERIFED flags += mupdf.fz_font_is_monospaced(font) * TEXT_FONT_MONOSPACED flags += mupdf.fz_font_is_bold(font) * TEXT_FONT_BOLD return flags def JM_char_quad(line, ch): ''' re-compute char quad if ascender/descender values make no sense ''' if 1 and g_use_extra: # This reduces time taken to extract text from PyMuPDF.pdf from 20s to # 15s. return mupdf.FzQuad(extra.JM_char_quad( line.m_internal, ch.m_internal)) assert isinstance(line, mupdf.FzStextLine) assert isinstance(ch, mupdf.FzStextChar) if _globals.skip_quad_corrections: # no special handling return ch.quad if line.m_internal.wmode: # never touch vertical write mode return ch.quad font = mupdf.FzFont(mupdf.ll_fz_keep_font(ch.m_internal.font)) asc = JM_font_ascender(font) dsc = JM_font_descender(font) fsize = ch.m_internal.size asc_dsc = asc - dsc + FLT_EPSILON if asc_dsc >= 1 and _globals.small_glyph_heights == 0: # no problem return mupdf.FzQuad(ch.m_internal.quad) # Re-compute quad with adjusted ascender / descender values: # Move ch->origin to (0,0) and de-rotate quad, then adjust the corners, # re-rotate and move back to ch->origin location. fsize = ch.m_internal.size bbox = mupdf.fz_font_bbox(font) fwidth = bbox.x1 - bbox.x0 if asc < 1e-3: # probably Tesseract glyphless font dsc = -0.1 asc = 0.9 asc_dsc = 1.0 if _globals.small_glyph_heights or asc_dsc < 1: dsc = dsc / asc_dsc asc = asc / asc_dsc asc_dsc = asc - dsc asc = asc * fsize / asc_dsc dsc = dsc * fsize / asc_dsc # Re-compute quad with the adjusted ascender / descender values: # Move ch->origin to (0,0) and de-rotate quad, then adjust the corners, # re-rotate and move back to ch->origin location. c = line.m_internal.dir.x # cosine s = line.m_internal.dir.y # sine trm1 = mupdf.fz_make_matrix(c, -s, s, c, 0, 0) # derotate trm2 = mupdf.fz_make_matrix(c, s, -s, c, 0, 0) # rotate if (c == -1): # left-right flip trm1.d = 1 trm2.d = 1 xlate1 = mupdf.fz_make_matrix(1, 0, 0, 1, -ch.m_internal.origin.x, -ch.m_internal.origin.y) xlate2 = mupdf.fz_make_matrix(1, 0, 0, 1, ch.m_internal.origin.x, ch.m_internal.origin.y) quad = mupdf.fz_transform_quad(mupdf.FzQuad(ch.m_internal.quad), xlate1) # move origin to (0,0) quad = mupdf.fz_transform_quad(quad, trm1) # de-rotate corners # adjust vertical coordinates if c == 1 and quad.ul.y > 0: # up-down flip quad.ul.y = asc quad.ur.y = asc quad.ll.y = dsc quad.lr.y = dsc else: quad.ul.y = -asc quad.ur.y = -asc quad.ll.y = -dsc quad.lr.y = -dsc # adjust horizontal coordinates that are too crazy: # (1) left x must be >= 0 # (2) if bbox width is 0, lookup char advance in font. if quad.ll.x < 0: quad.ll.x = 0 quad.ul.x = 0 cwidth = quad.lr.x - quad.ll.x if cwidth < FLT_EPSILON: glyph = mupdf.fz_encode_character( font, ch.m_internal.c) if glyph: fwidth = mupdf.fz_advance_glyph( font, glyph, line.m_internal.wmode) quad.lr.x = quad.ll.x + fwidth * fsize quad.ur.x = quad.lr.x quad = mupdf.fz_transform_quad(quad, trm2) # rotate back quad = mupdf.fz_transform_quad(quad, xlate2) # translate back return quad def JM_choice_options(annot): ''' return list of choices for list or combo boxes ''' annot_obj = mupdf.pdf_annot_obj( annot.this) opts = mupdf.pdf_choice_widget_options2( annot, 0) n = len( opts) if n == 0: return # wrong widget type optarr = mupdf.pdf_dict_get( annot_obj, PDF_NAME('Opt')) liste = [] for i in range( n): m = mupdf.pdf_array_len( mupdf.pdf_array_get( optarr, i)) if m == 2: val = ( mupdf.pdf_to_text_string( mupdf.pdf_array_get( mupdf.pdf_array_get( optarr, i), 0)), mupdf.pdf_to_text_string( mupdf.pdf_array_get( mupdf.pdf_array_get( optarr, i), 1)), ) liste.append( val) else: val = mupdf.pdf_to_text_string( mupdf.pdf_array_get( optarr, i)) liste.append( val) return liste def JM_clear_pixmap_rect_with_value(dest, value, b): ''' Clear a pixmap rectangle - my version also supports non-alpha pixmaps ''' b = mupdf.fz_intersect_irect(b, mupdf.fz_pixmap_bbox(dest)) w = b.x1 - b.x0 y = b.y1 - b.y0 if w <= 0 or y <= 0: return 0 destspan = dest.stride() destp = destspan * (b.y0 - dest.y()) + dest.n() * (b.x0 - dest.x()) # CMYK needs special handling (and potentially any other subtractive colorspaces) if mupdf.fz_colorspace_n(dest.colorspace()) == 4: value = 255 - value while 1: s = destp for x in range(0, w): mupdf.fz_samples_set(dest, s, 0) s += 1 mupdf.fz_samples_set(dest, s, 0) s += 1 mupdf.fz_samples_set(dest, s, 0) s += 1 mupdf.fz_samples_set(dest, s, value) s += 1 if dest.alpha(): mupdf.fz_samples_set(dest, s, 255) s += 1 destp += destspan if y == 0: break y -= 1 return 1 while 1: s = destp for x in range(w): for k in range(dest.n()-1): mupdf.fz_samples_set(dest, s, value) s += 1 if dest.alpha(): mupdf.fz_samples_set(dest, s, 255) s += 1 else: mupdf.fz_samples_set(dest, s, value) s += 1 destp += destspan if y == 0: break y -= 1 return 1 def JM_color_FromSequence(color): if isinstance(color, (int, float)): # maybe just a single float color = [color] if not isinstance( color, (list, tuple)): return -1, [] if len(color) not in (0, 1, 3, 4): return -1, [] ret = color[:] for i in range(len(ret)): if ret[i] < 0 or ret[i] > 1: ret[i] = 1 return len(ret), ret def JM_color_count( pm, clip): if 1 or g_use_extra: return extra.ll_JM_color_count(pm.m_internal, clip) rc = dict() cnt = 0 irect = mupdf.fz_pixmap_bbox( pm) irect = mupdf.fz_intersect_irect(irect, mupdf.fz_round_rect(JM_rect_from_py(clip))) stride = pm.stride() width = irect.x1 - irect.x0 height = irect.y1 - irect.y0 n = pm.n() substride = width * n s = stride * (irect.y0 - pm.y()) + (irect.x0 - pm.x()) * n oldpix = _read_samples( pm, s, n) cnt = 0 if mupdf.fz_is_empty_irect(irect): return rc for i in range( height): for j in range( 0, substride, n): newpix = _read_samples( pm, s + j, n) if newpix != oldpix: pixel = oldpix c = rc.get( pixel, None) if c is not None: cnt += c rc[ pixel] = cnt cnt = 1 oldpix = newpix else: cnt += 1 s += stride pixel = oldpix c = rc.get( pixel) if c is not None: cnt += c rc[ pixel] = cnt return rc def JM_compress_buffer(inbuffer): ''' compress char* into a new buffer ''' data, compressed_length = mupdf.fz_new_deflated_data_from_buffer( inbuffer, mupdf.FZ_DEFLATE_BEST, ) #log( '{=data compressed_length}') if not data or compressed_length == 0: return None buf = mupdf.FzBuffer(mupdf.fz_new_buffer_from_data(data, compressed_length)) mupdf.fz_resize_buffer(buf, compressed_length) return buf def JM_copy_rectangle(page, area): need_new_line = 0 buffer = io.StringIO() for block in page: if block.m_internal.type != mupdf.FZ_STEXT_BLOCK_TEXT: continue for line in block: line_had_text = 0 for ch in line: r = JM_char_bbox(line, ch) if JM_rects_overlap(area, r): line_had_text = 1 if need_new_line: buffer.write("\n") need_new_line = 0 buffer.write(make_escape(ch.m_internal.c)) if line_had_text: need_new_line = 1 s = buffer.getvalue() # take over the data return s def JM_convert_to_pdf(doc, fp, tp, rotate): ''' Convert any MuPDF document to a PDF Returns bytes object containing the PDF, created via 'write' function. ''' pdfout = mupdf.PdfDocument() incr = 1 s = fp e = tp if fp > tp: incr = -1 # count backwards s = tp # adjust ... e = fp # ... range rot = JM_norm_rotation(rotate) i = fp while 1: # interpret & write document pages as PDF pages if not _INRANGE(i, s, e): break page = mupdf.fz_load_page(doc, i) mediabox = mupdf.fz_bound_page(page) dev, resources, contents = mupdf.pdf_page_write(pdfout, mediabox) mupdf.fz_run_page(page, dev, mupdf.FzMatrix(), mupdf.FzCookie()) mupdf.fz_close_device(dev) dev = None page_obj = mupdf.pdf_add_page(pdfout, mediabox, rot, resources, contents) mupdf.pdf_insert_page(pdfout, -1, page_obj) i += incr # PDF created - now write it to Python bytearray # prepare write options structure opts = mupdf.PdfWriteOptions() opts.do_garbage = 4 opts.do_compress = 1 opts.do_compress_images = 1 opts.do_compress_fonts = 1 opts.do_sanitize = 1 opts.do_incremental = 0 opts.do_ascii = 0 opts.do_decompress = 0 opts.do_linear = 0 opts.do_clean = 1 opts.do_pretty = 0 res = mupdf.fz_new_buffer(8192) out = mupdf.FzOutput(res) mupdf.pdf_write_document(pdfout, out, opts) out.fz_close_output() c = mupdf.fz_buffer_extract_copy(res) assert isinstance(c, bytes) return c # Copied from MuPDF v1.14 # Create widget def JM_create_widget(doc, page, type, fieldname): old_sigflags = mupdf.pdf_to_int(mupdf.pdf_dict_getp(mupdf.pdf_trailer(doc), "Root/AcroForm/SigFlags")) #log( '*** JM_create_widget()') #log( f'{mupdf.pdf_create_annot_raw=}') #log( f'{page=}') #log( f'{mupdf.PDF_ANNOT_WIDGET=}') annot = mupdf.pdf_create_annot_raw(page, mupdf.PDF_ANNOT_WIDGET) annot_obj = mupdf.pdf_annot_obj(annot) try: JM_set_field_type(doc, annot_obj, type) mupdf.pdf_dict_put_text_string(annot_obj, PDF_NAME('T'), fieldname) if type == mupdf.PDF_WIDGET_TYPE_SIGNATURE: sigflags = old_sigflags | (SigFlag_SignaturesExist | SigFlag_AppendOnly) mupdf.pdf_dict_putl( mupdf.pdf_trailer(doc), mupdf.pdf_new_int(sigflags), PDF_NAME('Root'), PDF_NAME('AcroForm'), PDF_NAME('SigFlags'), ) # pdf_create_annot will have linked the new widget into the page's # annot array. We also need it linked into the document's form form = mupdf.pdf_dict_getp(mupdf.pdf_trailer(doc), "Root/AcroForm/Fields") if not form.m_internal: form = mupdf.pdf_new_array(doc, 1) mupdf.pdf_dict_putl( mupdf.pdf_trailer(doc), form, PDF_NAME('Root'), PDF_NAME('AcroForm'), PDF_NAME('Fields'), ) mupdf.pdf_array_push(form, annot_obj) # Cleanup relies on this statement being last except Exception: if g_exceptions_verbose: exception_info() mupdf.pdf_delete_annot(page, annot) if type == mupdf.PDF_WIDGET_TYPE_SIGNATURE: mupdf.pdf_dict_putl( mupdf.pdf_trailer(doc), mupdf.pdf_new_int(old_sigflags), PDF_NAME('Root'), PDF_NAME('AcroForm'), PDF_NAME('SigFlags'), ) raise return annot def JM_cropbox(page_obj): ''' return a PDF page's CropBox ''' if g_use_extra: return extra.JM_cropbox(page_obj) mediabox = JM_mediabox(page_obj) cropbox = mupdf.pdf_to_rect( mupdf.pdf_dict_get_inheritable(page_obj, PDF_NAME('CropBox')) ) if mupdf.fz_is_infinite_rect(cropbox) or mupdf.fz_is_empty_rect(cropbox): cropbox = mediabox y0 = mediabox.y1 - cropbox.y1 y1 = mediabox.y1 - cropbox.y0 cropbox.y0 = y0 cropbox.y1 = y1 return cropbox def JM_cropbox_size(page_obj): rect = JM_cropbox(page_obj) w = abs(rect.x1 - rect.x0) h = abs(rect.y1 - rect.y0) size = mupdf.fz_make_point(w, h) return size def JM_derotate_page_matrix(page): ''' just the inverse of rotation ''' mp = JM_rotate_page_matrix(page) return mupdf.fz_invert_matrix(mp) def JM_embed_file( pdf, buf, filename, ufilename, desc, compress, ): ''' embed a new file in a PDF (not only /EmbeddedFiles entries) ''' len_ = 0 val = mupdf.pdf_new_dict(pdf, 6) mupdf.pdf_dict_put_dict(val, PDF_NAME('CI'), 4) ef = mupdf.pdf_dict_put_dict(val, PDF_NAME('EF'), 4) mupdf.pdf_dict_put_text_string(val, PDF_NAME('F'), filename) mupdf.pdf_dict_put_text_string(val, PDF_NAME('UF'), ufilename) mupdf.pdf_dict_put_text_string(val, PDF_NAME('Desc'), desc) mupdf.pdf_dict_put(val, PDF_NAME('Type'), PDF_NAME('Filespec')) bs = b' ' f = mupdf.pdf_add_stream( pdf, #mupdf.fz_fz_new_buffer_from_copied_data(bs), mupdf.fz_new_buffer_from_copied_data(bs), mupdf.PdfObj(), 0, ) mupdf.pdf_dict_put(ef, PDF_NAME('F'), f) JM_update_stream(pdf, f, buf, compress) len_, _ = mupdf.fz_buffer_storage(buf) mupdf.pdf_dict_put_int(f, PDF_NAME('DL'), len_) mupdf.pdf_dict_put_int(f, PDF_NAME('Length'), len_) params = mupdf.pdf_dict_put_dict(f, PDF_NAME('Params'), 4) mupdf.pdf_dict_put_int(params, PDF_NAME('Size'), len_) return val def JM_embedded_clean(pdf): ''' perform some cleaning if we have /EmbeddedFiles: (1) remove any /Limits if /Names exists (2) remove any empty /Collection (3) set /PageMode/UseAttachments ''' root = mupdf.pdf_dict_get( mupdf.pdf_trailer( pdf), PDF_NAME('Root')) # remove any empty /Collection entry coll = mupdf.pdf_dict_get(root, PDF_NAME('Collection')) if coll.m_internal and mupdf.pdf_dict_len(coll) == 0: mupdf.pdf_dict_del(root, PDF_NAME('Collection')) efiles = mupdf.pdf_dict_getl( root, PDF_NAME('Names'), PDF_NAME('EmbeddedFiles'), PDF_NAME('Names'), ) if efiles.m_internal: mupdf.pdf_dict_put_name(root, PDF_NAME('PageMode'), "UseAttachments") def JM_EscapeStrFromBuffer(buff): if not buff.m_internal: return '' s = mupdf.fz_buffer_extract_copy(buff) val = PyUnicode_DecodeRawUnicodeEscape(s, errors='replace') return val def JM_ensure_identity(pdf): ''' Store ID in PDF trailer ''' id_ = mupdf.pdf_dict_get( mupdf.pdf_trailer(pdf), PDF_NAME('ID')) if not id_.m_internal: rnd0 = mupdf.fz_memrnd2(16) # Need to convert raw bytes into a str to send to # mupdf.pdf_new_string(). chr() seems to work for this. rnd = '' for i in rnd0: rnd += chr(i) id_ = mupdf.pdf_dict_put_array( mupdf.pdf_trailer( pdf), PDF_NAME('ID'), 2) mupdf.pdf_array_push( id_, mupdf.pdf_new_string( rnd, len(rnd))) mupdf.pdf_array_push( id_, mupdf.pdf_new_string( rnd, len(rnd))) def JM_ensure_ocproperties(pdf): ''' Ensure OCProperties, return /OCProperties key ''' ocp = mupdf.pdf_dict_get(mupdf.pdf_dict_get(mupdf.pdf_trailer(pdf), PDF_NAME('Root')), PDF_NAME('OCProperties')) if ocp.m_internal: return ocp root = mupdf.pdf_dict_get(mupdf.pdf_trailer(pdf), PDF_NAME('Root')) ocp = mupdf.pdf_dict_put_dict(root, PDF_NAME('OCProperties'), 2) mupdf.pdf_dict_put_array(ocp, PDF_NAME('OCGs'), 0) D = mupdf.pdf_dict_put_dict(ocp, PDF_NAME('D'), 5) mupdf.pdf_dict_put_array(D, PDF_NAME('ON'), 0) mupdf.pdf_dict_put_array(D, PDF_NAME('OFF'), 0) mupdf.pdf_dict_put_array(D, PDF_NAME('Order'), 0) mupdf.pdf_dict_put_array(D, PDF_NAME('RBGroups'), 0) return ocp def JM_expand_fname(name): ''' Make /DA string of annotation ''' if not name: return "Helv" if name.startswith("Co"): return "Cour" if name.startswith("co"): return "Cour" if name.startswith("Ti"): return "TiRo" if name.startswith("ti"): return "TiRo" if name.startswith("Sy"): return "Symb" if name.startswith("sy"): return "Symb" if name.startswith("Za"): return "ZaDb" if name.startswith("za"): return "ZaDb" return "Helv" def JM_field_type_text(wtype): ''' String from widget type ''' if wtype == mupdf.PDF_WIDGET_TYPE_BUTTON: return "Button" if wtype == mupdf.PDF_WIDGET_TYPE_CHECKBOX: return "CheckBox" if wtype == mupdf.PDF_WIDGET_TYPE_RADIOBUTTON: return "RadioButton" if wtype == mupdf.PDF_WIDGET_TYPE_TEXT: return "Text" if wtype == mupdf.PDF_WIDGET_TYPE_LISTBOX: return "ListBox" if wtype == mupdf.PDF_WIDGET_TYPE_COMBOBOX: return "ComboBox" if wtype == mupdf.PDF_WIDGET_TYPE_SIGNATURE: return "Signature" return "unknown" def JM_fill_pixmap_rect_with_color(dest, col, b): assert isinstance(dest, mupdf.FzPixmap) # fill a rect with a color tuple b = mupdf.fz_intersect_irect(b, mupdf.fz_pixmap_bbox( dest)) w = b.x1 - b.x0 y = b.y1 - b.y0 if w <= 0 or y <= 0: return 0 destspan = dest.stride() destp = destspan * (b.y0 - dest.y()) + dest.n() * (b.x0 - dest.x()) while 1: s = destp for x in range(w): for i in range( dest.n()): mupdf.fz_samples_set(dest, s, col[i]) s += 1 destp += destspan y -= 1 if y == 0: break return 1 def JM_find_annot_irt(annot): ''' Return the first annotation whose /IRT key ("In Response To") points to annot. Used to remove the response chain of a given annotation. ''' assert isinstance(annot, mupdf.PdfAnnot) irt_annot = None # returning this annot_obj = mupdf.pdf_annot_obj(annot) found = 0 # loop thru MuPDF's internal annots array page = _pdf_annot_page(annot) irt_annot = mupdf.pdf_first_annot(page) while 1: assert isinstance(irt_annot, mupdf.PdfAnnot) if not irt_annot.m_internal: break irt_annot_obj = mupdf.pdf_annot_obj(irt_annot) o = mupdf.pdf_dict_gets(irt_annot_obj, 'IRT') if o.m_internal: if not mupdf.pdf_objcmp(o, annot_obj): found = 1 break irt_annot = mupdf.pdf_next_annot(irt_annot) if found: return irt_annot def JM_font_ascender(font): ''' need own versions of ascender / descender ''' assert isinstance(font, mupdf.FzFont) if _globals.skip_quad_corrections: return 0.8 return mupdf.fz_font_ascender(font) def JM_font_descender(font): ''' need own versions of ascender / descender ''' assert isinstance(font, mupdf.FzFont) if _globals.skip_quad_corrections: return -0.2 ret = mupdf.fz_font_descender(font) return ret def JM_is_word_delimiter(ch, delimiters): """Check if ch is an extra word delimiting character. """ if (0 or ch <= 32 or ch == 160 or 0x202a <= ch <= 0x202e ): # covers any whitespace plus unicodes that switch between # right-to-left and left-to-right languages return True if not delimiters: # no extra delimiters provided return False char = chr(ch) for d in delimiters: if d == char: return True return False def JM_is_rtl_char(ch): if ch < 0x590 or ch > 0x900: return False return True def JM_font_name(font): assert isinstance(font, mupdf.FzFont) name = mupdf.fz_font_name(font) s = name.find('+') if _globals.subset_fontnames or s == -1 or s != 6: return name return name[s + 1:] def JM_gather_fonts(pdf, dict_, fontlist, stream_xref): rc = 1 n = mupdf.pdf_dict_len(dict_) for i in range(n): refname = mupdf.pdf_dict_get_key(dict_, i) fontdict = mupdf.pdf_dict_get_val(dict_, i) if not mupdf.pdf_is_dict(fontdict): mupdf.fz_warn( f"'{mupdf.pdf_to_name(refname)}' is no font dict ({mupdf.pdf_to_num(fontdict)} 0 R)") continue subtype = mupdf.pdf_dict_get(fontdict, mupdf.PDF_ENUM_NAME_Subtype) basefont = mupdf.pdf_dict_get(fontdict, mupdf.PDF_ENUM_NAME_BaseFont) if not basefont.m_internal or mupdf.pdf_is_null(basefont): name = mupdf.pdf_dict_get(fontdict, mupdf.PDF_ENUM_NAME_Name) else: name = basefont encoding = mupdf.pdf_dict_get(fontdict, mupdf.PDF_ENUM_NAME_Encoding) if mupdf.pdf_is_dict(encoding): encoding = mupdf.pdf_dict_get(encoding, mupdf.PDF_ENUM_NAME_BaseEncoding) xref = mupdf.pdf_to_num(fontdict) ext = "n/a" if xref: ext = JM_get_fontextension(pdf, xref) entry = ( xref, ext, mupdf.pdf_to_name(subtype), JM_EscapeStrFromStr(mupdf.pdf_to_name(name)), mupdf.pdf_to_name(refname), mupdf.pdf_to_name(encoding), stream_xref, ) fontlist.append(entry) return rc def JM_gather_forms(doc, dict_: mupdf.PdfObj, imagelist, stream_xref: int): ''' Store info of a /Form xobject in Python list ''' assert isinstance(doc, mupdf.PdfDocument) rc = 1 n = mupdf.pdf_dict_len(dict_) for i in range(n): refname = mupdf.pdf_dict_get_key( dict_, i) imagedict = mupdf.pdf_dict_get_val(dict_, i) if not mupdf.pdf_is_dict(imagedict): mupdf.fz_warn( f"'{mupdf.pdf_to_name(refname)}' is no form dict ({mupdf.pdf_to_num(imagedict)} 0 R)") continue type_ = mupdf.pdf_dict_get(imagedict, PDF_NAME('Subtype')) if not mupdf.pdf_name_eq(type_, PDF_NAME('Form')): continue o = mupdf.pdf_dict_get(imagedict, PDF_NAME('BBox')) m = mupdf.pdf_dict_get(imagedict, PDF_NAME('Matrix')) if m.m_internal: mat = mupdf.pdf_to_matrix(m) else: mat = mupdf.FzMatrix() if o.m_internal: bbox = mupdf.fz_transform_rect( mupdf.pdf_to_rect(o), mat) else: bbox = mupdf.FzRect(mupdf.FzRect.Fixed_INFINITE) xref = mupdf.pdf_to_num(imagedict) entry = ( xref, mupdf.pdf_to_name( refname), stream_xref, JM_py_from_rect(bbox), ) imagelist.append(entry) return rc def JM_gather_images(doc: mupdf.PdfDocument, dict_: mupdf.PdfObj, imagelist, stream_xref: int): ''' Store info of an image in Python list ''' rc = 1 n = mupdf.pdf_dict_len( dict_) for i in range(n): refname = mupdf.pdf_dict_get_key(dict_, i) imagedict = mupdf.pdf_dict_get_val(dict_, i) if not mupdf.pdf_is_dict(imagedict): mupdf.fz_warn(f"'{mupdf.pdf_to_name(refname)}' is no image dict ({mupdf.pdf_to_num(imagedict)} 0 R)") continue type_ = mupdf.pdf_dict_get(imagedict, PDF_NAME('Subtype')) if not mupdf.pdf_name_eq(type_, PDF_NAME('Image')): continue xref = mupdf.pdf_to_num(imagedict) gen = 0 smask = mupdf.pdf_dict_geta(imagedict, PDF_NAME('SMask'), PDF_NAME('Mask')) if smask.m_internal: gen = mupdf.pdf_to_num(smask) filter_ = mupdf.pdf_dict_geta(imagedict, PDF_NAME('Filter'), PDF_NAME('F')) if mupdf.pdf_is_array(filter_): filter_ = mupdf.pdf_array_get(filter_, 0) altcs = mupdf.PdfObj(0) cs = mupdf.pdf_dict_geta(imagedict, PDF_NAME('ColorSpace'), PDF_NAME('CS')) if mupdf.pdf_is_array(cs): cses = cs cs = mupdf.pdf_array_get(cses, 0) if (mupdf.pdf_name_eq(cs, PDF_NAME('DeviceN')) or mupdf.pdf_name_eq(cs, PDF_NAME('Separation')) ): altcs = mupdf.pdf_array_get(cses, 2) if mupdf.pdf_is_array(altcs): altcs = mupdf.pdf_array_get(altcs, 0) width = mupdf.pdf_dict_geta(imagedict, PDF_NAME('Width'), PDF_NAME('W')) height = mupdf.pdf_dict_geta(imagedict, PDF_NAME('Height'), PDF_NAME('H')) bpc = mupdf.pdf_dict_geta(imagedict, PDF_NAME('BitsPerComponent'), PDF_NAME('BPC')) entry = ( xref, gen, mupdf.pdf_to_int(width), mupdf.pdf_to_int(height), mupdf.pdf_to_int(bpc), JM_EscapeStrFromStr(mupdf.pdf_to_name(cs)), JM_EscapeStrFromStr(mupdf.pdf_to_name(altcs)), JM_EscapeStrFromStr(mupdf.pdf_to_name(refname)), JM_EscapeStrFromStr(mupdf.pdf_to_name(filter_)), stream_xref, ) imagelist.append(entry) return rc def JM_get_annot_by_xref(page, xref): ''' retrieve annot by its xref ''' assert isinstance(page, mupdf.PdfPage) found = 0 # loop thru MuPDF's internal annots array annot = mupdf.pdf_first_annot(page) while 1: if not annot.m_internal: break if xref == mupdf.pdf_to_num(mupdf.pdf_annot_obj(annot)): found = 1 break annot = mupdf.pdf_next_annot( annot) if not found: raise Exception("xref %d is not an annot of this page" % xref) return annot def JM_get_annot_by_name(page, name): ''' retrieve annot by name (/NM key) ''' assert isinstance(page, mupdf.PdfPage) if not name: return found = 0 # loop thru MuPDF's internal annots and widget arrays annot = mupdf.pdf_first_annot(page) while 1: if not annot.m_internal: break response, len_ = mupdf.pdf_to_string(mupdf.pdf_dict_gets(mupdf.pdf_annot_obj(annot), "NM")) if name == response: found = 1 break annot = mupdf.pdf_next_annot(annot) if not found: raise Exception("'%s' is not an annot of this page" % name) return annot def JM_get_annot_id_list(page): names = [] annots = mupdf.pdf_dict_get( page.obj(), mupdf.PDF_ENUM_NAME_Annots) if not annots.m_internal: return names for i in range( mupdf.pdf_array_len(annots)): annot_obj = mupdf.pdf_array_get(annots, i) name = mupdf.pdf_dict_gets(annot_obj, "NM") if name.m_internal: names.append( mupdf.pdf_to_text_string(name) ) return names def JM_get_annot_xref_list( page_obj): ''' return the xrefs and /NM ids of a page's annots, links and fields ''' if g_use_extra: names = extra.JM_get_annot_xref_list( page_obj) return names names = [] annots = mupdf.pdf_dict_get( page_obj, PDF_NAME('Annots')) n = mupdf.pdf_array_len( annots) for i in range( n): annot_obj = mupdf.pdf_array_get( annots, i) xref = mupdf.pdf_to_num( annot_obj) subtype = mupdf.pdf_dict_get( annot_obj, PDF_NAME('Subtype')) if not subtype.m_internal: continue # subtype is required type_ = mupdf.pdf_annot_type_from_string( mupdf.pdf_to_name( subtype)) if type_ == mupdf.PDF_ANNOT_UNKNOWN: continue # only accept valid annot types id_ = mupdf.pdf_dict_gets( annot_obj, "NM") names.append( (xref, type_, mupdf.pdf_to_text_string( id_))) return names def JM_get_annot_xref_list2(page): page = page._pdf_page(required=False) if not page.m_internal: return list() return JM_get_annot_xref_list( page.obj()) def JM_get_border_style(style): ''' return pdf_obj "border style" from Python str ''' val = mupdf.PDF_ENUM_NAME_S if style is None: return val s = style if s.startswith("b") or s.startswith("B"): val = mupdf.PDF_ENUM_NAME_B elif s.startswith("d") or s.startswith("D"): val = mupdf.PDF_ENUM_NAME_D elif s.startswith("i") or s.startswith("I"): val = mupdf.PDF_ENUM_NAME_I elif s.startswith("u") or s.startswith("U"): val = mupdf.PDF_ENUM_NAME_U elif s.startswith("s") or s.startswith("S"): val = mupdf.PDF_ENUM_NAME_S return val def JM_get_font( fontname, fontfile, fontbuffer, script, lang, ordering, is_bold, is_italic, is_serif, embed, ): ''' return a fz_font from a number of parameters ''' def fertig(font): if not font.m_internal: raise RuntimeError(MSG_FONT_FAILED) # if font allows this, set embedding if not font.m_internal.flags.never_embed: mupdf.fz_set_font_embedding(font, embed) return font index = 0 font = None if fontfile: #goto have_file; font = mupdf.fz_new_font_from_file( None, fontfile, index, 0) return fertig(font) if fontbuffer: #goto have_buffer; res = JM_BufferFromBytes(fontbuffer) font = mupdf.fz_new_font_from_buffer( None, res, index, 0) return fertig(font) if ordering > -1: # goto have_cjk; font = mupdf.fz_new_cjk_font(ordering) return fertig(font) if fontname: # goto have_base14; # Base-14 or a MuPDF builtin font font = mupdf.fz_new_base14_font(fontname) if font.m_internal: return fertig(font) font = mupdf.fz_new_builtin_font(fontname, is_bold, is_italic) return fertig(font) # Check for NOTO font #have_noto:; data, size, index = mupdf.fz_lookup_noto_font( script, lang) font = None if data: font = mupdf.fz_new_font_from_memory( None, data, size, index, 0) if font.m_internal: return fertig(font) font = mupdf.fz_load_fallback_font( script, lang, is_serif, is_bold, is_italic) return fertig(font) def JM_get_fontbuffer(doc, xref): ''' Return the contents of a font file, identified by xref ''' if xref < 1: return o = mupdf.pdf_load_object(doc, xref) desft = mupdf.pdf_dict_get(o, PDF_NAME('DescendantFonts')) if desft.m_internal: obj = mupdf.pdf_resolve_indirect(mupdf.pdf_array_get(desft, 0)) obj = mupdf.pdf_dict_get(obj, PDF_NAME('FontDescriptor')) else: obj = mupdf.pdf_dict_get(o, PDF_NAME('FontDescriptor')) if not obj.m_internal: message(f"invalid font - FontDescriptor missing") return o = obj stream = None obj = mupdf.pdf_dict_get(o, PDF_NAME('FontFile')) if obj.m_internal: stream = obj # ext = "pfa" obj = mupdf.pdf_dict_get(o, PDF_NAME('FontFile2')) if obj.m_internal: stream = obj # ext = "ttf" obj = mupdf.pdf_dict_get(o, PDF_NAME('FontFile3')) if obj.m_internal: stream = obj obj = mupdf.pdf_dict_get(obj, PDF_NAME('Subtype')) if obj.m_internal and not mupdf.pdf_is_name(obj): message("invalid font descriptor subtype") return if mupdf.pdf_name_eq(obj, PDF_NAME('Type1C')): pass # Prev code did: ext = "cff", but this has no effect. elif mupdf.pdf_name_eq(obj, PDF_NAME('CIDFontType0C')): pass # Prev code did: ext = "cid", but this has no effect. elif mupdf.pdf_name_eq(obj, PDF_NAME('OpenType')): pass # Prev code did: ext = "otf", but this has no effect. */ else: message('warning: unhandled font type {pdf_to_name(ctx, obj)!r}') if not stream: message('warning: unhandled font type') return return mupdf.pdf_load_stream(stream) def JM_get_resource_properties(ref): ''' Return the items of Resources/Properties (used for Marked Content) Argument may be e.g. a page object or a Form XObject ''' properties = mupdf.pdf_dict_getl(ref, PDF_NAME('Resources'), PDF_NAME('Properties')) if not properties.m_internal: return () else: n = mupdf.pdf_dict_len(properties) if n < 1: return () rc = [] for i in range(n): key = mupdf.pdf_dict_get_key(properties, i) val = mupdf.pdf_dict_get_val(properties, i) c = mupdf.pdf_to_name(key) xref = mupdf.pdf_to_num(val) rc.append((c, xref)) return rc def JM_get_widget_by_xref( page, xref): ''' retrieve widget by its xref ''' found = False annot = mupdf.pdf_first_widget( page) while annot.m_internal: annot_obj = mupdf.pdf_annot_obj( annot) if xref == mupdf.pdf_to_num( annot_obj): found = True break annot = mupdf.pdf_next_widget( annot) if not found: raise Exception( f"xref {xref} is not a widget of this page") return Annot( annot) def JM_get_widget_properties(annot, Widget): ''' Populate a Python Widget object with the values from a PDF form field. Called by "Page.first_widget" and "Widget.next". ''' #log( '{type(annot)=}') annot_obj = mupdf.pdf_annot_obj(annot.this) #log( 'Have called mupdf.pdf_annot_obj()') page = _pdf_annot_page(annot.this) pdf = page.doc() tw = annot def SETATTR(key, value): setattr(Widget, key, value) def SETATTR_DROP(mod, key, value): # Original C code for this function deletes if PyObject* is NULL. We # don't have a representation for that in Python - e.g. None is not # represented by NULL. setattr(mod, key, value) #log( '=== + mupdf.pdf_widget_type(tw)') field_type = mupdf.pdf_widget_type(tw.this) #log( '=== - mupdf.pdf_widget_type(tw)') Widget.field_type = field_type if field_type == mupdf.PDF_WIDGET_TYPE_SIGNATURE: if mupdf.pdf_signature_is_signed(pdf, annot_obj): SETATTR("is_signed", True) else: SETATTR("is_signed",False) else: SETATTR("is_signed", None) SETATTR_DROP(Widget, "border_style", JM_UnicodeFromStr(mupdf.pdf_field_border_style(annot_obj))) SETATTR_DROP(Widget, "field_type_string", JM_UnicodeFromStr(JM_field_type_text(field_type))) field_name = mupdf.pdf_load_field_name(annot_obj) SETATTR_DROP(Widget, "field_name", field_name) def pdf_dict_get_inheritable_nonempty_label(node, key): ''' This is a modified version of MuPDF's pdf_dict_get_inheritable(), with some changes: * Returns string from pdf_to_text_string() or None if not found. * Recurses to parent if current node exists but with empty string value. ''' slow = node halfbeat = 11 # Don't start moving slow pointer for a while. while 1: if not node.m_internal: return val = mupdf.pdf_dict_get(node, key) if val.m_internal: label = mupdf.pdf_to_text_string(val) if label: return label node = mupdf.pdf_dict_get(node, PDF_NAME('Parent')) if node.m_internal == slow.m_internal: raise Exception("cycle in resources") halfbeat -= 1 if halfbeat == 0: slow = mupdf.pdf_dict_get(slow, PDF_NAME('Parent')) halfbeat = 2 # In order to address #3950, we use our modified pdf_dict_get_inheritable() # to ignore empty-string child values. label = pdf_dict_get_inheritable_nonempty_label(annot_obj, PDF_NAME('TU')) if label is not None: SETATTR_DROP(Widget, "field_label", label) fvalue = None if field_type == mupdf.PDF_WIDGET_TYPE_RADIOBUTTON: obj = mupdf.pdf_dict_get( annot_obj, PDF_NAME('Parent')) # owning RB group if obj.m_internal: SETATTR_DROP(Widget, "rb_parent", mupdf.pdf_to_num( obj)) obj = mupdf.pdf_dict_get(annot_obj, PDF_NAME('AS')) if obj.m_internal: fvalue = mupdf.pdf_to_name(obj) if not fvalue: fvalue = mupdf.pdf_field_value(annot_obj) SETATTR_DROP(Widget, "field_value", JM_UnicodeFromStr(fvalue)) SETATTR_DROP(Widget, "field_display", mupdf.pdf_field_display(annot_obj)) border_width = mupdf.pdf_to_real(mupdf.pdf_dict_getl(annot_obj, PDF_NAME('BS'), PDF_NAME('W'))) if border_width == 0: border_width = 1 SETATTR_DROP(Widget, "border_width", border_width) obj = mupdf.pdf_dict_getl(annot_obj, PDF_NAME('BS'), PDF_NAME('D')) if mupdf.pdf_is_array(obj): n = mupdf.pdf_array_len(obj) d = [0] * n for i in range(n): d[i] = mupdf.pdf_to_int(mupdf.pdf_array_get(obj, i)) SETATTR_DROP(Widget, "border_dashes", d) SETATTR_DROP(Widget, "text_maxlen", mupdf.pdf_text_widget_max_len(tw.this)) SETATTR_DROP(Widget, "text_format", mupdf.pdf_text_widget_format(tw.this)) obj = mupdf.pdf_dict_getl(annot_obj, PDF_NAME('MK'), PDF_NAME('BG')) if mupdf.pdf_is_array(obj): n = mupdf.pdf_array_len(obj) col = [0] * n for i in range(n): col[i] = mupdf.pdf_to_real(mupdf.pdf_array_get(obj, i)) SETATTR_DROP(Widget, "fill_color", col) obj = mupdf.pdf_dict_getl(annot_obj, PDF_NAME('MK'), PDF_NAME('BC')) if mupdf.pdf_is_array(obj): n = mupdf.pdf_array_len(obj) col = [0] * n for i in range(n): col[i] = mupdf.pdf_to_real(mupdf.pdf_array_get(obj, i)) SETATTR_DROP(Widget, "border_color", col) SETATTR_DROP(Widget, "choice_values", JM_choice_options(annot)) da = mupdf.pdf_to_text_string(mupdf.pdf_dict_get_inheritable(annot_obj, PDF_NAME('DA'))) SETATTR_DROP(Widget, "_text_da", JM_UnicodeFromStr(da)) obj = mupdf.pdf_dict_getl(annot_obj, PDF_NAME('MK'), PDF_NAME('CA')) if obj.m_internal: SETATTR_DROP(Widget, "button_caption", JM_UnicodeFromStr(mupdf.pdf_to_text_string(obj))) SETATTR_DROP(Widget, "field_flags", mupdf.pdf_field_flags(annot_obj)) # call Py method to reconstruct text color, font name, size Widget._parse_da() # extract JavaScript action texts s = mupdf.pdf_dict_get(annot_obj, PDF_NAME('A')) ss = JM_get_script(s) SETATTR_DROP(Widget, "script", ss) SETATTR_DROP(Widget, "script_stroke", JM_get_script(mupdf.pdf_dict_getl(annot_obj, PDF_NAME('AA'), PDF_NAME('K'))) ) SETATTR_DROP(Widget, "script_format", JM_get_script(mupdf.pdf_dict_getl(annot_obj, PDF_NAME('AA'), PDF_NAME('F'))) ) SETATTR_DROP(Widget, "script_change", JM_get_script(mupdf.pdf_dict_getl(annot_obj, PDF_NAME('AA'), PDF_NAME('V'))) ) SETATTR_DROP(Widget, "script_calc", JM_get_script(mupdf.pdf_dict_getl(annot_obj, PDF_NAME('AA'), PDF_NAME('C'))) ) SETATTR_DROP(Widget, "script_blur", JM_get_script(mupdf.pdf_dict_getl(annot_obj, PDF_NAME('AA'), mupdf.pdf_new_name('Bl'))) ) SETATTR_DROP(Widget, "script_focus", JM_get_script(mupdf.pdf_dict_getl(annot_obj, PDF_NAME('AA'), mupdf.pdf_new_name('Fo'))) ) def JM_get_fontextension(doc, xref): ''' Return the file extension of a font file, identified by xref ''' if xref < 1: return "n/a" o = mupdf.pdf_load_object(doc, xref) desft = mupdf.pdf_dict_get(o, PDF_NAME('DescendantFonts')) if desft.m_internal: obj = mupdf.pdf_resolve_indirect(mupdf.pdf_array_get(desft, 0)) obj = mupdf.pdf_dict_get(obj, PDF_NAME('FontDescriptor')) else: obj = mupdf.pdf_dict_get(o, PDF_NAME('FontDescriptor')) if not obj.m_internal: return "n/a" # this is a base-14 font o = obj # we have the FontDescriptor obj = mupdf.pdf_dict_get(o, PDF_NAME('FontFile')) if obj.m_internal: return "pfa" obj = mupdf.pdf_dict_get(o, PDF_NAME('FontFile2')) if obj.m_internal: return "ttf" obj = mupdf.pdf_dict_get(o, PDF_NAME('FontFile3')) if obj.m_internal: obj = mupdf.pdf_dict_get(obj, PDF_NAME('Subtype')) if obj.m_internal and not mupdf.pdf_is_name(obj): message("invalid font descriptor subtype") return "n/a" if mupdf.pdf_name_eq(obj, PDF_NAME('Type1C')): return "cff" elif mupdf.pdf_name_eq(obj, PDF_NAME('CIDFontType0C')): return "cid" elif mupdf.pdf_name_eq(obj, PDF_NAME('OpenType')): return "otf" else: message("unhandled font type '%s'", mupdf.pdf_to_name(obj)) return "n/a" def JM_get_ocg_arrays_imp(arr): ''' Get OCG arrays from OC configuration Returns dict {"basestate":name, "on":list, "off":list, "rbg":list, "locked":list} ''' list_ = list() if mupdf.pdf_is_array( arr): n = mupdf.pdf_array_len( arr) for i in range(n): obj = mupdf.pdf_array_get( arr, i) item = mupdf.pdf_to_num( obj) if item not in list_: list_.append(item) return list_ def JM_get_ocg_arrays(conf): rc = dict() arr = mupdf.pdf_dict_get( conf, PDF_NAME('ON')) list_ = JM_get_ocg_arrays_imp( arr) if list_: rc["on"] = list_ arr = mupdf.pdf_dict_get( conf, PDF_NAME('OFF')) list_ = JM_get_ocg_arrays_imp( arr) if list_: rc["off"] = list_ arr = mupdf.pdf_dict_get( conf, PDF_NAME('Locked')) list_ = JM_get_ocg_arrays_imp( arr) if list_: rc['locked'] = list_ list_ = list() arr = mupdf.pdf_dict_get( conf, PDF_NAME('RBGroups')) if mupdf.pdf_is_array( arr): n = mupdf.pdf_array_len( arr) for i in range(n): obj = mupdf.pdf_array_get( arr, i) list1 = JM_get_ocg_arrays_imp( obj) list_.append(list1) if list_: rc["rbgroups"] = list_ obj = mupdf.pdf_dict_get( conf, PDF_NAME('BaseState')) if obj.m_internal: state = mupdf.pdf_to_name( obj) rc["basestate"] = state return rc def JM_get_page_labels(liste, nums): n = mupdf.pdf_array_len(nums) for i in range(0, n, 2): key = mupdf.pdf_resolve_indirect( mupdf.pdf_array_get(nums, i)) pno = mupdf.pdf_to_int(key) val = mupdf.pdf_resolve_indirect( mupdf.pdf_array_get(nums, i + 1)) res = JM_object_to_buffer(val, 1, 0) c = mupdf.fz_buffer_extract(res) assert isinstance(c, bytes) c = c.decode('utf-8') liste.append( (pno, c)) def JM_get_script(key): ''' JavaScript extractor Returns either the script source or None. Parameter is a PDF action dictionary, which must have keys /S and /JS. The value of /S must be '/JavaScript'. The value of /JS is returned. ''' if not key.m_internal: return j = mupdf.pdf_dict_get(key, PDF_NAME('S')) jj = mupdf.pdf_to_name(j) if jj == "JavaScript": js = mupdf.pdf_dict_get(key, PDF_NAME('JS')) if not js.m_internal: return else: return if mupdf.pdf_is_string(js): script = JM_UnicodeFromStr(mupdf.pdf_to_text_string(js)) elif mupdf.pdf_is_stream(js): res = mupdf.pdf_load_stream(js) script = JM_EscapeStrFromBuffer(res) else: return if script: # do not return an empty script return script return def JM_have_operation(pdf): ''' Ensure valid journalling state ''' if pdf.m_internal.journal and not mupdf.pdf_undoredo_step(pdf, 0): return 0 return 1 def JM_image_extension(type_): ''' return extension for MuPDF image type ''' if type_ == mupdf.FZ_IMAGE_FAX: return "fax" if type_ == mupdf.FZ_IMAGE_RAW: return "raw" if type_ == mupdf.FZ_IMAGE_FLATE: return "flate" if type_ == mupdf.FZ_IMAGE_LZW: return "lzw" if type_ == mupdf.FZ_IMAGE_RLD: return "rld" if type_ == mupdf.FZ_IMAGE_BMP: return "bmp" if type_ == mupdf.FZ_IMAGE_GIF: return "gif" if type_ == mupdf.FZ_IMAGE_JBIG2: return "jb2" if type_ == mupdf.FZ_IMAGE_JPEG: return "jpeg" if type_ == mupdf.FZ_IMAGE_JPX: return "jpx" if type_ == mupdf.FZ_IMAGE_JXR: return "jxr" if type_ == mupdf.FZ_IMAGE_PNG: return "png" if type_ == mupdf.FZ_IMAGE_PNM: return "pnm" if type_ == mupdf.FZ_IMAGE_TIFF: return "tiff" #if type_ == mupdf.FZ_IMAGE_PSD: return "psd" return "n/a" # fixme: need to avoid using a global for this. g_img_info = None def JM_image_filter(opaque, ctm, name, image): assert isinstance(ctm, mupdf.FzMatrix) r = mupdf.FzRect(mupdf.FzRect.Fixed_UNIT) q = mupdf.fz_transform_quad( mupdf.fz_quad_from_rect(r), ctm) q = mupdf.fz_transform_quad( q, g_img_info_matrix) temp = name, JM_py_from_quad(q) g_img_info.append(temp) def JM_image_profile( imagedata, keep_image): ''' Return basic properties of an image provided as bytes or bytearray The function creates an fz_image and optionally returns it. ''' if not imagedata: return None # nothing given len_ = len( imagedata) if len_ < 8: message( "bad image data") return None c = imagedata #log( 'calling mfz_recognize_image_format with {c!r=}') type_ = mupdf.fz_recognize_image_format( c) if type_ == mupdf.FZ_IMAGE_UNKNOWN: return None if keep_image: res = mupdf.fz_new_buffer_from_copied_data( c, len_) else: res = mupdf.fz_new_buffer_from_shared_data( c, len_) image = mupdf.fz_new_image_from_buffer( res) ctm = mupdf.fz_image_orientation_matrix( image) xres, yres = mupdf.fz_image_resolution(image) orientation = mupdf.fz_image_orientation( image) cs_name = mupdf.fz_colorspace_name( image.colorspace()) result = dict() result[ dictkey_width] = image.w() result[ dictkey_height] = image.h() result[ "orientation"] = orientation result[ dictkey_matrix] = JM_py_from_matrix(ctm) result[ dictkey_xres] = xres result[ dictkey_yres] = yres result[ dictkey_colorspace] = image.n() result[ dictkey_bpc] = image.bpc() result[ dictkey_ext] = JM_image_extension(type_) result[ dictkey_cs_name] = cs_name if keep_image: result[ dictkey_image] = image return result def JM_image_reporter(page): doc = page.doc() global g_img_info_matrix g_img_info_matrix = mupdf.FzMatrix() mediabox = mupdf.FzRect() mupdf.pdf_page_transform(page, mediabox, g_img_info_matrix) class SanitizeFilterOptions(mupdf.PdfSanitizeFilterOptions2): def __init__(self): super().__init__() self.use_virtual_image_filter() def image_filter(self, ctx, ctm, name, image, scissor): JM_image_filter(None, mupdf.FzMatrix(ctm), name, image) sanitize_filter_options = SanitizeFilterOptions() filter_options = _make_PdfFilterOptions( instance_forms=1, ascii=1, no_update=1, sanitize=1, sopts=sanitize_filter_options, ) global g_img_info g_img_info = [] mupdf.pdf_filter_page_contents( doc, page, filter_options) rc = tuple(g_img_info) g_img_info = [] return rc def JM_fitz_config(): have_TOFU = not hasattr(mupdf, 'TOFU') have_TOFU_BASE14 = not hasattr(mupdf, 'TOFU_BASE14') have_TOFU_CJK = not hasattr(mupdf, 'TOFU_CJK') have_TOFU_CJK_EXT = not hasattr(mupdf, 'TOFU_CJK_EXT') have_TOFU_CJK_LANG = not hasattr(mupdf, 'TOFU_CJK_LANG') have_TOFU_EMOJI = not hasattr(mupdf, 'TOFU_EMOJI') have_TOFU_HISTORIC = not hasattr(mupdf, 'TOFU_HISTORIC') have_TOFU_SIL = not hasattr(mupdf, 'TOFU_SIL') have_TOFU_SYMBOL = not hasattr(mupdf, 'TOFU_SYMBOL') ret = dict() ret["base14"] = have_TOFU_BASE14 ret["cbz"] = bool(mupdf.FZ_ENABLE_CBZ) ret["epub"] = bool(mupdf.FZ_ENABLE_EPUB) ret["html"] = bool(mupdf.FZ_ENABLE_HTML) ret["icc"] = bool(mupdf.FZ_ENABLE_ICC) ret["img"] = bool(mupdf.FZ_ENABLE_IMG) ret["jpx"] = bool(mupdf.FZ_ENABLE_JPX) ret["js"] = bool(mupdf.FZ_ENABLE_JS) ret["pdf"] = bool(mupdf.FZ_ENABLE_PDF) ret["plotter-cmyk"] = bool(mupdf.FZ_PLOTTERS_CMYK) ret["plotter-g"] = bool(mupdf.FZ_PLOTTERS_G) ret["plotter-n"] = bool(mupdf.FZ_PLOTTERS_N) ret["plotter-rgb"] = bool(mupdf.FZ_PLOTTERS_RGB) ret["py-memory"] = bool(JM_MEMORY) ret["svg"] = bool(mupdf.FZ_ENABLE_SVG) ret["tofu"] = have_TOFU ret["tofu-cjk"] = have_TOFU_CJK ret["tofu-cjk-ext"] = have_TOFU_CJK_EXT ret["tofu-cjk-lang"] = have_TOFU_CJK_LANG ret["tofu-emoji"] = have_TOFU_EMOJI ret["tofu-historic"] = have_TOFU_HISTORIC ret["tofu-sil"] = have_TOFU_SIL ret["tofu-symbol"] = have_TOFU_SYMBOL ret["xps"] = bool(mupdf.FZ_ENABLE_XPS) return ret def JM_insert_contents(pdf, pageref, newcont, overlay): ''' Insert a buffer as a new separate /Contents object of a page. 1. Create a new stream object from buffer 'newcont' 2. If /Contents already is an array, then just prepend or append this object 3. Else, create new array and put old content obj and this object into it. If the page had no /Contents before, just create a 1-item array. ''' contents = mupdf.pdf_dict_get(pageref, PDF_NAME('Contents')) newconts = mupdf.pdf_add_stream(pdf, newcont, mupdf.PdfObj(), 0) xref = mupdf.pdf_to_num(newconts) if mupdf.pdf_is_array(contents): if overlay: # append new object mupdf.pdf_array_push(contents, newconts) else: # prepend new object mupdf.pdf_array_insert(contents, newconts, 0) else: carr = mupdf.pdf_new_array(pdf, 5) if overlay: if contents.m_internal: mupdf.pdf_array_push(carr, contents) mupdf.pdf_array_push(carr, newconts) else: mupdf.pdf_array_push(carr, newconts) if contents.m_internal: mupdf.pdf_array_push(carr, contents) mupdf.pdf_dict_put(pageref, PDF_NAME('Contents'), carr) return xref def JM_insert_font(pdf, bfname, fontfile, fontbuffer, set_simple, idx, wmode, serif, encoding, ordering): ''' Insert a font in a PDF ''' font = None res = None data = None ixref = 0 index = 0 simple = 0 value=None name=None subt=None exto = None ENSURE_OPERATION(pdf) # check for CJK font if ordering > -1: data, size, index = mupdf.fz_lookup_cjk_font(ordering) if data: font = mupdf.fz_new_font_from_memory(None, data, size, index, 0) font_obj = mupdf.pdf_add_cjk_font(pdf, font, ordering, wmode, serif) exto = "n/a" simple = 0 #goto weiter; else: # check for PDF Base-14 font if bfname: data, size = mupdf.fz_lookup_base14_font(bfname) if data: font = mupdf.fz_new_font_from_memory(bfname, data, size, 0, 0) font_obj = mupdf.pdf_add_simple_font(pdf, font, encoding) exto = "n/a" simple = 1 #goto weiter; else: if fontfile: font = mupdf.fz_new_font_from_file(None, fontfile, idx, 0) else: res = JM_BufferFromBytes(fontbuffer) if not res.m_internal: RAISEPY(MSG_FILE_OR_BUFFER, PyExc_ValueError) font = mupdf.fz_new_font_from_buffer(None, res, idx, 0) if not set_simple: font_obj = mupdf.pdf_add_cid_font(pdf, font) simple = 0 else: font_obj = mupdf.pdf_add_simple_font(pdf, font, encoding) simple = 2 #weiter: ; ixref = mupdf.pdf_to_num(font_obj) name = JM_EscapeStrFromStr( mupdf.pdf_to_name( mupdf.pdf_dict_get(font_obj, PDF_NAME('BaseFont')))) subt = JM_UnicodeFromStr( mupdf.pdf_to_name( mupdf.pdf_dict_get( font_obj, PDF_NAME('Subtype')))) if not exto: exto = JM_UnicodeFromStr(JM_get_fontextension(pdf, ixref)) asc = mupdf.fz_font_ascender(font) dsc = mupdf.fz_font_descender(font) value = [ ixref, { "name": name, # base font name "type": subt, # subtype "ext": exto, # file extension "simple": bool(simple), # simple font? "ordering": ordering, # CJK font? "ascender": asc, "descender": dsc, }, ] return value def JM_irect_from_py(r): ''' PySequence to mupdf.FzIrect. Default: infinite irect ''' if isinstance(r, mupdf.FzIrect): return r if isinstance(r, IRect): r = mupdf.FzIrect( r.x0, r.y0, r.x1, r.y1) return r if isinstance(r, Rect): ret = mupdf.FzRect(r.x0, r.y0, r.x1, r.y1) ret = mupdf.FzIrect(ret) # Uses fz_irect_from_rect(). return ret if isinstance(r, mupdf.FzRect): ret = mupdf.FzIrect(r) # Uses fz_irect_from_rect(). return ret if not r or not PySequence_Check(r) or PySequence_Size(r) != 4: return mupdf.FzIrect(mupdf.fz_infinite_irect) f = [0, 0, 0, 0] for i in range(4): f[i] = r[i] if f[i] is None: return mupdf.FzIrect(mupdf.fz_infinite_irect) if f[i] < FZ_MIN_INF_RECT: f[i] = FZ_MIN_INF_RECT if f[i] > FZ_MAX_INF_RECT: f[i] = FZ_MAX_INF_RECT return mupdf.fz_make_irect(f[0], f[1], f[2], f[3]) def JM_listbox_value( annot): ''' ListBox retrieve value ''' # may be single value or array annot_obj = mupdf.pdf_annot_obj( annot) optarr = mupdf.pdf_dict_get( annot_obj, PDF_NAME('V')) if mupdf.pdf_is_string( optarr): # a single string return mupdf.pdf_to_text_string( optarr) # value is an array (may have len 0) n = mupdf.pdf_array_len( optarr) liste = [] # extract a list of strings # each entry may again be an array: take second entry then for i in range( n): elem = mupdf.pdf_array_get( optarr, i) if mupdf.pdf_is_array( elem): elem = mupdf.pdf_array_get( elem, 1) liste.append( JM_UnicodeFromStr( mupdf.pdf_to_text_string( elem))) return liste def JM_make_annot_DA(annot, ncol, col, fontname, fontsize): # PyMuPDF uses a fz_buffer to build up the string, but it's non-trivial to # convert the fz_buffer's `unsigned char*` into a `const char*` suitable # for passing to pdf_dict_put_text_string(). So instead we build up the # string directly in Python. buf = '' if ncol < 1: buf += f'0 g ' elif ncol == 1: buf += f'{col[0]:g} g ' elif ncol == 2: assert 0 elif ncol == 3: buf += f'{col[0]:g} {col[1]:g} {col[2]:g} rg ' else: buf += f'{col[0]:g} {col[1]:g} {col[2]:g} {col[3]:g} k ' buf += f'/{JM_expand_fname(fontname)} {fontsize} Tf' mupdf.pdf_dict_put_text_string(mupdf.pdf_annot_obj(annot), mupdf.PDF_ENUM_NAME_DA, buf) def JM_make_spanlist(line_dict, line, raw, buff, tp_rect): if 1 or g_use_extra: return extra.JM_make_spanlist(line_dict, line, raw, buff, tp_rect) char_list = None span_list = [] mupdf.fz_clear_buffer(buff) span_rect = mupdf.FzRect(mupdf.FzRect.Fixed_EMPTY) line_rect = mupdf.FzRect(mupdf.FzRect.Fixed_EMPTY) class char_style: def __init__(self, rhs=None): if rhs: self.size = rhs.size self.flags = rhs.flags if mupdf_version_tuple >= (1, 25, 2): self.char_flags = rhs.char_flags self.font = rhs.font self.argb = rhs.argb self.asc = rhs.asc self.desc = rhs.desc self.bidi = rhs.bidi else: self.size = -1 self.flags = -1 if mupdf_version_tuple >= (1, 25, 2): self.char_flags = -1 self.font = '' self.argb = -1 self.asc = 0 self.desc = 0 self.bidi = 0 def __str__(self): ret = f'{self.size} {self.flags}' if mupdf_version_tuple >= (1, 25, 2): ret += f' {self.char_flags}' ret += f' {self.font} {self.color} {self.asc} {self.desc}' return ret old_style = char_style() style = char_style() span = None span_origin = None for ch in line: # start-trace r = JM_char_bbox(line, ch) if (not JM_rects_overlap(tp_rect, r) and not mupdf.fz_is_infinite_rect(tp_rect) ): continue # Info from: # detect_super_script() # fz_font_is_italic() # fz_font_is_serif() # fz_font_is_monospaced() # fz_font_is_bold() flags = JM_char_font_flags(mupdf.FzFont(mupdf.ll_fz_keep_font(ch.m_internal.font)), line, ch) origin = mupdf.FzPoint(ch.m_internal.origin) style.size = ch.m_internal.size style.flags = flags if mupdf_version_tuple >= (1, 25, 2): # FZ_STEXT_SYNTHETIC is per-char, not per-span. style.char_flags = ch.m_internal.flags & ~mupdf.FZ_STEXT_SYNTHETIC style.font = JM_font_name(mupdf.FzFont(mupdf.ll_fz_keep_font(ch.m_internal.font))) style.argb = ch.m_internal.argb style.asc = JM_font_ascender(mupdf.FzFont(mupdf.ll_fz_keep_font(ch.m_internal.font))) style.desc = JM_font_descender(mupdf.FzFont(mupdf.ll_fz_keep_font(ch.m_internal.font))) style.bidi = ch.m_internal.bidi if (style.size != old_style.size or style.flags != old_style.flags or (mupdf_version_tuple >= (1, 25, 2) and (style.char_flags != old_style.char_flags) ) or style.argb != old_style.argb or style.font != old_style.font or style.bidi != old_style.bidi ): if old_style.size >= 0: # not first one, output previous if raw: # put character list in the span span[dictkey_chars] = char_list char_list = None else: # put text string in the span span[dictkey_text] = JM_EscapeStrFromBuffer( buff) mupdf.fz_clear_buffer(buff) span[dictkey_origin] = JM_py_from_point(span_origin) span[dictkey_bbox] = JM_py_from_rect(span_rect) line_rect = mupdf.fz_union_rect(line_rect, span_rect) span_list.append( span) span = None span = dict() asc = style.asc desc = style.desc if style.asc < 1e-3: asc = 0.9 desc = -0.1 span[dictkey_size] = style.size span[dictkey_flags] = style.flags span[dictkey_bidi] = style.bidi if mupdf_version_tuple >= (1, 25, 2): span[dictkey_char_flags] = style.char_flags span[dictkey_font] = JM_EscapeStrFromStr(style.font) span[dictkey_color] = style.argb & 0xffffff if mupdf_version_tuple >= (1, 25, 0): span['alpha'] = style.argb >> 24 span["ascender"] = asc span["descender"] = desc # Need to be careful here - doing 'old_style=style' does a shallow # copy, but we need to keep old_style as a distinct instance. old_style = char_style(style) span_rect = r span_origin = origin span_rect = mupdf.fz_union_rect(span_rect, r) if raw: # make and append a char dict char_dict = dict() char_dict[dictkey_origin] = JM_py_from_point( ch.m_internal.origin) char_dict[dictkey_bbox] = JM_py_from_rect(r) char_dict[dictkey_c] = chr(ch.m_internal.c) char_dict['synthetic'] = bool(ch.m_internal.flags & mupdf.FZ_STEXT_SYNTHETIC) if char_list is None: char_list = [] char_list.append(char_dict) else: # add character byte to buffer JM_append_rune(buff, ch.m_internal.c) # all characters processed, now flush remaining span if span: if raw: span[dictkey_chars] = char_list char_list = None else: span[dictkey_text] = JM_EscapeStrFromBuffer(buff) mupdf.fz_clear_buffer(buff) span[dictkey_origin] = JM_py_from_point(span_origin) span[dictkey_bbox] = JM_py_from_rect(span_rect) if not mupdf.fz_is_empty_rect(span_rect): span_list.append(span) line_rect = mupdf.fz_union_rect(line_rect, span_rect) span = None if not mupdf.fz_is_empty_rect(line_rect): line_dict[dictkey_spans] = span_list else: line_dict[dictkey_spans] = span_list return line_rect def _make_image_dict(img, img_dict): """Populate a dictionary with information extracted from a given image. Used by 'Document.extract_image' and by 'JM_make_image_block'. Both of these functions will add some more specific information. """ img_type = img.fz_compressed_image_type() ext = JM_image_extension(img_type) # compressed image buffer if present, else None ll_cbuf = mupdf.ll_fz_compressed_image_buffer(img.m_internal) if (0 or not ll_cbuf or img_type in (mupdf.FZ_IMAGE_JBIG2, mupdf.FZ_IMAGE_UNKNOWN) or img_type < mupdf.FZ_IMAGE_BMP ): # not an image with a compressed buffer: convert to PNG res = mupdf.fz_new_buffer_from_image_as_png( img, mupdf.FzColorParams(mupdf.fz_default_color_params), ) ext = "png" elif ext == "jpeg" and img.n() == 4: # JPEG with CMYK: invert colors res = mupdf.fz_new_buffer_from_image_as_jpeg( img, mupdf.FzColorParams(mupdf.fz_default_color_params), 95, 1) else: # copy the compressed buffer res = mupdf.FzBuffer(mupdf.ll_fz_keep_buffer(ll_cbuf.buffer)) bytes_ = JM_BinFromBuffer(res) img_dict[dictkey_width] = img.w() img_dict[dictkey_height] = img.h() img_dict[dictkey_ext] = ext img_dict[dictkey_colorspace] = img.n() img_dict[dictkey_xres] = img.xres() img_dict[dictkey_yres] = img.yres() img_dict[dictkey_bpc] = img.bpc() img_dict[dictkey_size] = len(bytes_) img_dict[dictkey_image] = bytes_ def JM_make_image_block(block, block_dict): img = block.i_image() _make_image_dict(img, block_dict) # if the image has a mask, store it as a PNG buffer mask = img.mask() if mask.m_internal: buff = mask.fz_new_buffer_from_image_as_png(mupdf.FzColorParams(mupdf.fz_default_color_params)) block_dict["mask"] = buff.fz_buffer_extract() else: block_dict["mask"] = None block_dict[dictkey_matrix] = JM_py_from_matrix(block.i_transform()) def JM_make_text_block(block, block_dict, raw, buff, tp_rect): if 1 or g_use_extra: return extra.JM_make_text_block(block.m_internal, block_dict, raw, buff.m_internal, tp_rect.m_internal) line_list = [] block_rect = mupdf.FzRect(mupdf.FzRect.Fixed_EMPTY) #log(f'{block=}') for line in block: #log(f'{line=}') if (mupdf.fz_is_empty_rect(mupdf.fz_intersect_rect(tp_rect, mupdf.FzRect(line.m_internal.bbox))) and not mupdf.fz_is_infinite_rect(tp_rect) ): continue line_dict = dict() line_rect = JM_make_spanlist(line_dict, line, raw, buff, tp_rect) block_rect = mupdf.fz_union_rect(block_rect, line_rect) line_dict[dictkey_wmode] = line.m_internal.wmode line_dict[dictkey_dir] = JM_py_from_point(line.m_internal.dir) line_dict[dictkey_bbox] = JM_py_from_rect(line_rect) line_list.append(line_dict) block_dict[dictkey_bbox] = JM_py_from_rect(block_rect) block_dict[dictkey_lines] = line_list def JM_make_textpage_dict(tp, page_dict, raw): if 1 or g_use_extra: return extra.JM_make_textpage_dict(tp.m_internal, page_dict, raw) text_buffer = mupdf.fz_new_buffer(128) block_list = [] tp_rect = mupdf.FzRect(tp.m_internal.mediabox) block_n = -1 #log( 'JM_make_textpage_dict {=tp}') for block in tp: block_n += 1 if (not mupdf.fz_contains_rect(tp_rect, mupdf.FzRect(block.m_internal.bbox)) and not mupdf.fz_is_infinite_rect(tp_rect) and block.m_internal.type == mupdf.FZ_STEXT_BLOCK_IMAGE ): continue if (not mupdf.fz_is_infinite_rect(tp_rect) and mupdf.fz_is_empty_rect(mupdf.fz_intersect_rect(tp_rect, mupdf.FzRect(block.m_internal.bbox))) ): continue block_dict = dict() block_dict[dictkey_number] = block_n block_dict[dictkey_type] = block.m_internal.type if block.m_internal.type == mupdf.FZ_STEXT_BLOCK_IMAGE: block_dict[dictkey_bbox] = JM_py_from_rect(block.m_internal.bbox) JM_make_image_block(block, block_dict) else: JM_make_text_block(block, block_dict, raw, text_buffer, tp_rect) block_list.append(block_dict) page_dict[dictkey_blocks] = block_list def JM_matrix_from_py(m): a = [0, 0, 0, 0, 0, 0] if isinstance(m, mupdf.FzMatrix): return m if isinstance(m, Matrix): return mupdf.FzMatrix(m.a, m.b, m.c, m.d, m.e, m.f) if not m or not PySequence_Check(m) or PySequence_Size(m) != 6: return mupdf.FzMatrix() for i in range(6): a[i] = JM_FLOAT_ITEM(m, i) if a[i] is None: return mupdf.FzRect() return mupdf.FzMatrix(a[0], a[1], a[2], a[3], a[4], a[5]) def JM_mediabox(page_obj): ''' return a PDF page's MediaBox ''' page_mediabox = mupdf.FzRect(mupdf.FzRect.Fixed_UNIT) mediabox = mupdf.pdf_to_rect( mupdf.pdf_dict_get_inheritable(page_obj, PDF_NAME('MediaBox')) ) if mupdf.fz_is_empty_rect(mediabox) or mupdf.fz_is_infinite_rect(mediabox): mediabox.x0 = 0 mediabox.y0 = 0 mediabox.x1 = 612 mediabox.y1 = 792 page_mediabox = mupdf.FzRect( mupdf.fz_min(mediabox.x0, mediabox.x1), mupdf.fz_min(mediabox.y0, mediabox.y1), mupdf.fz_max(mediabox.x0, mediabox.x1), mupdf.fz_max(mediabox.y0, mediabox.y1), ) if (page_mediabox.x1 - page_mediabox.x0 < 1 or page_mediabox.y1 - page_mediabox.y0 < 1 ): page_mediabox = mupdf.FzRect(mupdf.FzRect.Fixed_UNIT) return page_mediabox def JM_merge_range( doc_des, doc_src, spage, epage, apage, rotate, links, annots, show_progress, graft_map, ): ''' Copy a range of pages (spage, epage) from a source PDF to a specified location (apage) of the target PDF. If spage > epage, the sequence of source pages is reversed. ''' if g_use_extra: return extra.JM_merge_range( doc_des, doc_src, spage, epage, apage, rotate, links, annots, show_progress, graft_map, ) afterpage = apage counter = 0 # copied pages counter total = mupdf.fz_absi(epage - spage) + 1 # total pages to copy if spage < epage: page = spage while page <= epage: page_merge(doc_des, doc_src, page, afterpage, rotate, links, annots, graft_map) counter += 1 if show_progress > 0 and counter % show_progress == 0: message(f"Inserted {counter} of {total} pages.") page += 1 afterpage += 1 else: page = spage while page >= epage: page_merge(doc_des, doc_src, page, afterpage, rotate, links, annots, graft_map) counter += 1 if show_progress > 0 and counter % show_progress == 0: message(f"Inserted {counter} of {total} pages.") page -= 1 afterpage += 1 def JM_merge_resources( page, temp_res): ''' Merge the /Resources object created by a text pdf device into the page. The device may have created multiple /ExtGState/Alp? and /Font/F? objects. These need to be renamed (renumbered) to not overwrite existing page objects from previous executions. Returns the next available numbers n, m for objects /Alp<n>, /F<m>. ''' # page objects /Resources, /Resources/ExtGState, /Resources/Font resources = mupdf.pdf_dict_get(page.obj(), PDF_NAME('Resources')) if not resources.m_internal: resources = mupdf.pdf_dict_put_dict(page.obj(), PDF_NAME('Resources'), 5) main_extg = mupdf.pdf_dict_get(resources, PDF_NAME('ExtGState')) main_fonts = mupdf.pdf_dict_get(resources, PDF_NAME('Font')) # text pdf device objects /ExtGState, /Font temp_extg = mupdf.pdf_dict_get(temp_res, PDF_NAME('ExtGState')) temp_fonts = mupdf.pdf_dict_get(temp_res, PDF_NAME('Font')) max_alp = -1 max_fonts = -1 # Handle /Alp objects if mupdf.pdf_is_dict(temp_extg): # any created at all? n = mupdf.pdf_dict_len(temp_extg) if mupdf.pdf_is_dict(main_extg): # does page have /ExtGState yet? for i in range(mupdf.pdf_dict_len(main_extg)): # get highest number of objects named /Alpxxx alp = mupdf.pdf_to_name( mupdf.pdf_dict_get_key(main_extg, i)) if not alp.startswith('Alp'): continue j = mupdf.fz_atoi(alp[3:]) if j > max_alp: max_alp = j else: # create a /ExtGState for the page main_extg = mupdf.pdf_dict_put_dict(resources, PDF_NAME('ExtGState'), n) max_alp += 1 for i in range(n): # copy over renumbered /Alp objects alp = mupdf.pdf_to_name( mupdf.pdf_dict_get_key( temp_extg, i)) j = mupdf.fz_atoi(alp[3:]) + max_alp text = f'Alp{j}' val = mupdf.pdf_dict_get_val( temp_extg, i) mupdf.pdf_dict_puts(main_extg, text, val) if mupdf.pdf_is_dict(main_fonts): # has page any fonts yet? for i in range(mupdf.pdf_dict_len(main_fonts)): # get max font number font = mupdf.pdf_to_name( mupdf.pdf_dict_get_key( main_fonts, i)) if not font.startswith("F"): continue j = mupdf.fz_atoi(font[1:]) if j > max_fonts: max_fonts = j else: # create a Resources/Font for the page main_fonts = mupdf.pdf_dict_put_dict(resources, PDF_NAME('Font'), 2) max_fonts += 1 for i in range(mupdf.pdf_dict_len(temp_fonts)): # copy renumbered fonts font = mupdf.pdf_to_name( mupdf.pdf_dict_get_key( temp_fonts, i)) j = mupdf.fz_atoi(font[1:]) + max_fonts text = f'F{j}' val = mupdf.pdf_dict_get_val(temp_fonts, i) mupdf.pdf_dict_puts(main_fonts, text, val) return (max_alp, max_fonts) # next available numbers def JM_mupdf_warning( text): ''' redirect MuPDF warnings ''' JM_mupdf_warnings_store.append(text) if JM_mupdf_show_warnings: message(f'MuPDF warning: {text}') def JM_mupdf_error( text): JM_mupdf_warnings_store.append(text) if JM_mupdf_show_errors: message(f'MuPDF error: {text}\n') def JM_new_bbox_device(rc, inc_layers): assert isinstance(rc, list) return JM_new_bbox_device_Device( rc, inc_layers) def JM_new_buffer_from_stext_page(page): ''' make a buffer from an stext_page's text ''' assert isinstance(page, mupdf.FzStextPage) rect = mupdf.FzRect(page.m_internal.mediabox) buf = mupdf.fz_new_buffer(256) for block in page: if block.m_internal.type == mupdf.FZ_STEXT_BLOCK_TEXT: for line in block: for ch in line: if (not JM_rects_overlap(rect, JM_char_bbox(line, ch)) and not mupdf.fz_is_infinite_rect(rect) ): continue mupdf.fz_append_rune(buf, ch.m_internal.c) mupdf.fz_append_byte(buf, ord('\n')) mupdf.fz_append_byte(buf, ord('\n')) return buf def JM_new_javascript(pdf, value): ''' make new PDF action object from JavaScript source Parameters are a PDF document and a Python string. Returns a PDF action object. ''' if value is None: # no argument given return data = JM_StrAsChar(value) if data is None: # not convertible to char* return res = mupdf.fz_new_buffer_from_copied_data(data.encode('utf8')) source = mupdf.pdf_add_stream(pdf, res, mupdf.PdfObj(), 0) newaction = mupdf.pdf_add_new_dict(pdf, 4) mupdf.pdf_dict_put(newaction, PDF_NAME('S'), mupdf.pdf_new_name('JavaScript')) mupdf.pdf_dict_put(newaction, PDF_NAME('JS'), source) return newaction def JM_new_output_fileptr(bio): return JM_new_output_fileptr_Output( bio) def JM_norm_rotation(rotate): ''' # return normalized /Rotate value:one of 0, 90, 180, 270 ''' while rotate < 0: rotate += 360 while rotate >= 360: rotate -= 360 if rotate % 90 != 0: return 0 return rotate def JM_object_to_buffer(what, compress, ascii): res = mupdf.fz_new_buffer(512) out = mupdf.FzOutput(res) mupdf.pdf_print_obj(out, what, compress, ascii) out.fz_close_output() mupdf.fz_terminate_buffer(res) return res def JM_outline_xrefs(obj, xrefs): ''' Return list of outline xref numbers. Recursive function. Arguments: 'obj' first OL item 'xrefs' empty Python list ''' if not obj.m_internal: return xrefs thisobj = obj while thisobj.m_internal: newxref = mupdf.pdf_to_num( thisobj) if newxref in xrefs or mupdf.pdf_dict_get( thisobj, PDF_NAME('Type')).m_internal: # circular ref or top of chain: terminate break xrefs.append( newxref) first = mupdf.pdf_dict_get( thisobj, PDF_NAME('First')) # try go down if mupdf.pdf_is_dict( first): xrefs = JM_outline_xrefs( first, xrefs) thisobj = mupdf.pdf_dict_get( thisobj, PDF_NAME('Next')) # try go next parent = mupdf.pdf_dict_get( thisobj, PDF_NAME('Parent')) # get parent if not mupdf.pdf_is_dict( thisobj): thisobj = parent return xrefs def JM_page_rotation(page): ''' return a PDF page's /Rotate value: one of (0, 90, 180, 270) ''' rotate = 0 obj = mupdf.pdf_dict_get_inheritable( page.obj(), mupdf.PDF_ENUM_NAME_Rotate) rotate = mupdf.pdf_to_int(obj) rotate = JM_norm_rotation(rotate) return rotate def JM_pdf_obj_from_str(doc, src): ''' create PDF object from given string (new in v1.14.0: MuPDF dropped it) ''' # fixme: seems inefficient to convert to bytes instance then make another # copy inside fz_new_buffer_from_copied_data(), but no other way? # buffer_ = mupdf.fz_new_buffer_from_copied_data(bytes(src, 'utf8')) stream = mupdf.fz_open_buffer(buffer_) lexbuf = mupdf.PdfLexbuf(mupdf.PDF_LEXBUF_SMALL) result = mupdf.pdf_parse_stm_obj(doc, stream, lexbuf) return result def JM_pixmap_from_display_list( list_, ctm, cs, alpha, clip, seps, ): ''' Version of fz_new_pixmap_from_display_list (util.c) to also support rendering of only the 'clip' part of the displaylist rectangle ''' assert isinstance(list_, mupdf.FzDisplayList) if seps is None: seps = mupdf.FzSeparations() assert seps is None or isinstance(seps, mupdf.FzSeparations), f'{type(seps)=}: {seps}' rect = mupdf.fz_bound_display_list(list_) matrix = JM_matrix_from_py(ctm) rclip = JM_rect_from_py(clip) rect = mupdf.fz_intersect_rect(rect, rclip) # no-op if clip is not given rect = mupdf.fz_transform_rect(rect, matrix) irect = mupdf.fz_round_rect(rect) assert isinstance( cs, mupdf.FzColorspace) pix = mupdf.fz_new_pixmap_with_bbox(cs, irect, seps, alpha) if alpha: mupdf.fz_clear_pixmap(pix) else: mupdf.fz_clear_pixmap_with_value(pix, 0xFF) if not mupdf.fz_is_infinite_rect(rclip): dev = mupdf.fz_new_draw_device_with_bbox(matrix, pix, irect) mupdf.fz_run_display_list(list_, dev, mupdf.FzMatrix(), rclip, mupdf.FzCookie()) else: dev = mupdf.fz_new_draw_device(matrix, pix) mupdf.fz_run_display_list(list_, dev, mupdf.FzMatrix(), mupdf.FzRect(mupdf.FzRect.Fixed_INFINITE), mupdf.FzCookie()) mupdf.fz_close_device(dev) # Use special raw Pixmap constructor so we don't set alpha to true. return Pixmap( 'raw', pix) def JM_point_from_py(p): ''' PySequence to fz_point. Default: (FZ_MIN_INF_RECT, FZ_MIN_INF_RECT) ''' if isinstance(p, mupdf.FzPoint): return p if isinstance(p, Point): return mupdf.FzPoint(p.x, p.y) if g_use_extra: return extra.JM_point_from_py( p) p0 = mupdf.FzPoint(0, 0) x = JM_FLOAT_ITEM(p, 0) y = JM_FLOAT_ITEM(p, 1) if x is None or y is None: return p0 x = max( x, FZ_MIN_INF_RECT) y = max( y, FZ_MIN_INF_RECT) x = min( x, FZ_MAX_INF_RECT) y = min( y, FZ_MAX_INF_RECT) return mupdf.FzPoint(x, y) def JM_print_stext_page_as_text(res, page): ''' Plain text output. An identical copy of fz_print_stext_page_as_text, but lines within a block are concatenated by space instead a new-line character (which else leads to 2 new-lines). ''' if 1 and g_use_extra: return extra.JM_print_stext_page_as_text(res, page) assert isinstance(res, mupdf.FzBuffer) assert isinstance(page, mupdf.FzStextPage) rect = mupdf.FzRect(page.m_internal.mediabox) last_char = 0 n_blocks = 0 n_lines = 0 n_chars = 0 for n_blocks2, block in enumerate( page): if block.m_internal.type == mupdf.FZ_STEXT_BLOCK_TEXT: for n_lines2, line in enumerate( block): for n_chars2, ch in enumerate( line): pass n_chars += n_chars2 n_lines += n_lines2 n_blocks += n_blocks2 for block in page: if block.m_internal.type == mupdf.FZ_STEXT_BLOCK_TEXT: for line in block: last_char = 0 for ch in line: chbbox = JM_char_bbox(line, ch) if (mupdf.fz_is_infinite_rect(rect) or JM_rects_overlap(rect, chbbox) ): #raw += chr(ch.m_internal.c) last_char = ch.m_internal.c #log( '{=last_char!r utf!r}') JM_append_rune(res, last_char) if last_char != 10 and last_char > 0: mupdf.fz_append_string(res, "\n") def JM_put_script(annot_obj, key1, key2, value): ''' Create a JavaScript PDF action. Usable for all object types which support PDF actions, even if the argument name suggests annotations. Up to 2 key values can be specified, so JavaScript actions can be stored for '/A' and '/AA/?' keys. ''' key1_obj = mupdf.pdf_dict_get(annot_obj, key1) pdf = mupdf.pdf_get_bound_document(annot_obj) # owning PDF # if no new script given, just delete corresponding key if not value: if key2 is None or not key2.m_internal: mupdf.pdf_dict_del(annot_obj, key1) elif key1_obj.m_internal: mupdf.pdf_dict_del(key1_obj, key2) return # read any existing script as a PyUnicode string if not key2.m_internal or not key1_obj.m_internal: script = JM_get_script(key1_obj) else: script = JM_get_script(mupdf.pdf_dict_get(key1_obj, key2)) # replace old script, if different from new one if value != script: newaction = JM_new_javascript(pdf, value) if not key2.m_internal: mupdf.pdf_dict_put(annot_obj, key1, newaction) else: mupdf.pdf_dict_putl(annot_obj, newaction, key1, key2) def JM_py_from_irect(r): return r.x0, r.y0, r.x1, r.y1 def JM_py_from_matrix(m): return m.a, m.b, m.c, m.d, m.e, m.f def JM_py_from_point(p): return p.x, p.y def JM_py_from_quad(q): ''' PySequence from fz_quad. ''' return ( (q.ul.x, q.ul.y), (q.ur.x, q.ur.y), (q.ll.x, q.ll.y), (q.lr.x, q.lr.y), ) def JM_py_from_rect(r): return r.x0, r.y0, r.x1, r.y1 def JM_quad_from_py(r): if isinstance(r, mupdf.FzQuad): return r # cover all cases of 4-float-sequences if hasattr(r, "__getitem__") and len(r) == 4 and hasattr(r[0], "__float__"): r = mupdf.FzRect(*tuple(r)) if isinstance( r, mupdf.FzRect): return mupdf.fz_quad_from_rect( r) if isinstance( r, Quad): return mupdf.fz_make_quad( r.ul.x, r.ul.y, r.ur.x, r.ur.y, r.ll.x, r.ll.y, r.lr.x, r.lr.y, ) q = mupdf.fz_make_quad(0, 0, 0, 0, 0, 0, 0, 0) p = [0,0,0,0] if not r or not isinstance(r, (tuple, list)) or len(r) != 4: return q if JM_FLOAT_ITEM(r, 0) is None: return mupdf.fz_quad_from_rect(JM_rect_from_py(r)) for i in range(4): if i >= len(r): return q # invalid: cancel the rest obj = r[i] # next point item if not PySequence_Check(obj) or PySequence_Size(obj) != 2: return q # invalid: cancel the rest p[i].x = JM_FLOAT_ITEM(obj, 0) p[i].y = JM_FLOAT_ITEM(obj, 1) if p[i].x is None or p[i].y is None: return q p[i].x = max( p[i].x, FZ_MIN_INF_RECT) p[i].y = max( p[i].y, FZ_MIN_INF_RECT) p[i].x = min( p[i].x, FZ_MAX_INF_RECT) p[i].y = min( p[i].y, FZ_MAX_INF_RECT) q.ul = p[0] q.ur = p[1] q.ll = p[2] q.lr = p[3] return q def JM_read_contents(pageref): ''' Read and concatenate a PDF page's /Contents object(s) in a buffer ''' assert isinstance(pageref, mupdf.PdfObj), f'{type(pageref)}' contents = mupdf.pdf_dict_get(pageref, mupdf.PDF_ENUM_NAME_Contents) if mupdf.pdf_is_array(contents): res = mupdf.FzBuffer(1024) for i in range(mupdf.pdf_array_len(contents)): if i > 0: mupdf.fz_append_byte(res, 32) obj = mupdf.pdf_array_get(contents, i) if mupdf.pdf_is_stream(obj): nres = mupdf.pdf_load_stream(obj) mupdf.fz_append_buffer(res, nres) elif contents.m_internal: res = mupdf.pdf_load_stream(contents) else: res = mupdf.FzBuffer(0) return res def JM_rect_from_py(r): if isinstance(r, mupdf.FzRect): return r if isinstance(r, mupdf.FzIrect): return mupdf.FzRect(r) if isinstance(r, Rect): return mupdf.fz_make_rect(r.x0, r.y0, r.x1, r.y1) if isinstance(r, IRect): return mupdf.fz_make_rect(r.x0, r.y0, r.x1, r.y1) if not r or not PySequence_Check(r) or PySequence_Size(r) != 4: return mupdf.FzRect(mupdf.FzRect.Fixed_INFINITE) f = [0, 0, 0, 0] for i in range(4): f[i] = JM_FLOAT_ITEM(r, i) if f[i] is None: return mupdf.FzRect(mupdf.FzRect.Fixed_INFINITE) if f[i] < FZ_MIN_INF_RECT: f[i] = FZ_MIN_INF_RECT if f[i] > FZ_MAX_INF_RECT: f[i] = FZ_MAX_INF_RECT return mupdf.fz_make_rect(f[0], f[1], f[2], f[3]) def JM_rects_overlap(a, b): if (0 or a.x0 >= b.x1 or a.y0 >= b.y1 or a.x1 <= b.x0 or a.y1 <= b.y0 ): return 0 return 1 def JM_refresh_links( page): ''' refreshes the link and annotation tables of a page ''' if page is None or not page.m_internal: return obj = mupdf.pdf_dict_get( page.obj(), PDF_NAME('Annots')) if obj.m_internal: pdf = page.doc() number = mupdf.pdf_lookup_page_number( pdf, page.obj()) page_mediabox = mupdf.FzRect() page_ctm = mupdf.FzMatrix() mupdf.pdf_page_transform( page, page_mediabox, page_ctm) link = mupdf.pdf_load_link_annots( pdf, page, obj, number, page_ctm) page.m_internal.links = mupdf.ll_fz_keep_link( link.m_internal) def JM_rotate_page_matrix(page): ''' calculate page rotation matrices ''' if not page.m_internal: return mupdf.FzMatrix() # no valid pdf page given rotation = JM_page_rotation(page) #log( '{rotation=}') if rotation == 0: return mupdf.FzMatrix() # no rotation cb_size = JM_cropbox_size(page.obj()) w = cb_size.x h = cb_size.y #log( '{=h w}') if rotation == 90: m = mupdf.fz_make_matrix(0, 1, -1, 0, h, 0) elif rotation == 180: m = mupdf.fz_make_matrix(-1, 0, 0, -1, w, h) else: m = mupdf.fz_make_matrix(0, -1, 1, 0, 0, w) #log( 'returning {m=}') return m def JM_search_stext_page(page, needle): if 1 or g_use_extra: return extra.JM_search_stext_page(page.m_internal, needle) rect = mupdf.FzRect(page.m_internal.mediabox) if not needle: return quads = [] class Hits: def __str__(self): return f'Hits(len={self.len} quads={self.quads} hfuzz={self.hfuzz} vfuzz={self.vfuzz}' hits = Hits() hits.len = 0 hits.quads = quads hits.hfuzz = 0.2 # merge kerns but not large gaps hits.vfuzz = 0.1 buffer_ = JM_new_buffer_from_stext_page(page) haystack_string = mupdf.fz_string_from_buffer(buffer_) haystack = 0 begin, end = find_string(haystack_string[haystack:], needle) if begin is None: #goto no_more_matches; return quads begin += haystack end += haystack inside = 0 i = 0 for block in page: if block.m_internal.type != mupdf.FZ_STEXT_BLOCK_TEXT: continue for line in block: for ch in line: i += 1 if not mupdf.fz_is_infinite_rect(rect): r = JM_char_bbox(line, ch) if not JM_rects_overlap(rect, r): #goto next_char; continue while 1: #try_new_match: if not inside: if haystack >= begin: inside = 1 if inside: if haystack < end: on_highlight_char(hits, line, ch) break else: inside = 0 begin, end = find_string(haystack_string[haystack:], needle) if begin is None: #goto no_more_matches; return quads else: #goto try_new_match; begin += haystack end += haystack continue break haystack += 1 #next_char:; assert haystack_string[haystack] == '\n', \ f'{haystack=} {haystack_string[haystack]=}' haystack += 1 assert haystack_string[haystack] == '\n', \ f'{haystack=} {haystack_string[haystack]=}' haystack += 1 #no_more_matches:; return quads def JM_scan_resources(pdf, rsrc, liste, what, stream_xref, tracer): ''' Step through /Resources, looking up image, xobject or font information ''' if mupdf.pdf_mark_obj(rsrc): mupdf.fz_warn('Circular dependencies! Consider page cleaning.') return # Circular dependencies! try: xobj = mupdf.pdf_dict_get(rsrc, mupdf.PDF_ENUM_NAME_XObject) if what == 1: # lookup fonts font = mupdf.pdf_dict_get(rsrc, mupdf.PDF_ENUM_NAME_Font) JM_gather_fonts(pdf, font, liste, stream_xref) elif what == 2: # look up images JM_gather_images(pdf, xobj, liste, stream_xref) elif what == 3: # look up form xobjects JM_gather_forms(pdf, xobj, liste, stream_xref) else: # should never happen return # check if we need to recurse into Form XObjects n = mupdf.pdf_dict_len(xobj) for i in range(n): obj = mupdf.pdf_dict_get_val(xobj, i) if mupdf.pdf_is_stream(obj): sxref = mupdf.pdf_to_num(obj) else: sxref = 0 subrsrc = mupdf.pdf_dict_get(obj, mupdf.PDF_ENUM_NAME_Resources) if subrsrc.m_internal: sxref_t = sxref if sxref_t not in tracer: tracer.append(sxref_t) JM_scan_resources( pdf, subrsrc, liste, what, sxref, tracer) else: mupdf.fz_warn('Circular dependencies! Consider page cleaning.') return finally: mupdf.pdf_unmark_obj(rsrc) def JM_set_choice_options(annot, liste): ''' set ListBox / ComboBox values ''' if not liste: return assert isinstance( liste, (tuple, list)) n = len( liste) if n == 0: return annot_obj = mupdf.pdf_annot_obj( annot) pdf = mupdf.pdf_get_bound_document( annot_obj) optarr = mupdf.pdf_new_array( pdf, n) for i in range(n): val = liste[i] opt = val if isinstance(opt, str): mupdf.pdf_array_push_text_string( optarr, opt) else: assert isinstance( val, (tuple, list)) and len( val) == 2, 'bad choice field list' opt1, opt2 = val assert opt1 and opt2, 'bad choice field list' optarrsub = mupdf.pdf_array_push_array( optarr, 2) mupdf.pdf_array_push_text_string( optarrsub, opt1) mupdf.pdf_array_push_text_string( optarrsub, opt2) mupdf.pdf_dict_put( annot_obj, PDF_NAME('Opt'), optarr) def JM_set_field_type(doc, obj, type): ''' Set the field type ''' setbits = 0 clearbits = 0 typename = None if type == mupdf.PDF_WIDGET_TYPE_BUTTON: typename = PDF_NAME('Btn') setbits = mupdf.PDF_BTN_FIELD_IS_PUSHBUTTON elif type == mupdf.PDF_WIDGET_TYPE_RADIOBUTTON: typename = PDF_NAME('Btn') clearbits = mupdf.PDF_BTN_FIELD_IS_PUSHBUTTON setbits = mupdf.PDF_BTN_FIELD_IS_RADIO elif type == mupdf.PDF_WIDGET_TYPE_CHECKBOX: typename = PDF_NAME('Btn') clearbits = (mupdf.PDF_BTN_FIELD_IS_PUSHBUTTON | mupdf.PDF_BTN_FIELD_IS_RADIO) elif type == mupdf.PDF_WIDGET_TYPE_TEXT: typename = PDF_NAME('Tx') elif type == mupdf.PDF_WIDGET_TYPE_LISTBOX: typename = PDF_NAME('Ch') clearbits = mupdf.PDF_CH_FIELD_IS_COMBO elif type == mupdf.PDF_WIDGET_TYPE_COMBOBOX: typename = PDF_NAME('Ch') setbits = mupdf.PDF_CH_FIELD_IS_COMBO elif type == mupdf.PDF_WIDGET_TYPE_SIGNATURE: typename = PDF_NAME('Sig') if typename is not None and typename.m_internal: mupdf.pdf_dict_put(obj, PDF_NAME('FT'), typename) if setbits != 0 or clearbits != 0: bits = mupdf.pdf_dict_get_int(obj, PDF_NAME('Ff')) bits &= ~clearbits bits |= setbits mupdf.pdf_dict_put_int(obj, PDF_NAME('Ff'), bits) def JM_set_object_value(obj, key, value): ''' Set a PDF dict key to some value ''' eyecatcher = "fitz: replace me!" pdf = mupdf.pdf_get_bound_document(obj) # split PDF key at path seps and take last key part list_ = key.split('/') len_ = len(list_) i = len_ - 1 skey = list_[i] del list_[i] # del the last sub-key len_ = len(list_) # remaining length testkey = mupdf.pdf_dict_getp(obj, key) # check if key already exists if not testkey.m_internal: #No, it will be created here. But we cannot allow this happening if #indirect objects are referenced. So we check all higher level #sub-paths for indirect references. while len_ > 0: t = '/'.join(list_) # next high level if mupdf.pdf_is_indirect(mupdf.pdf_dict_getp(obj, JM_StrAsChar(t))): raise Exception("path to '%s' has indirects", JM_StrAsChar(skey)) del list_[len_ - 1] # del last sub-key len_ = len(list_) # remaining length # Insert our eyecatcher. Will create all sub-paths in the chain, or # respectively remove old value of key-path. mupdf.pdf_dict_putp(obj, key, mupdf.pdf_new_text_string(eyecatcher)) testkey = mupdf.pdf_dict_getp(obj, key) if not mupdf.pdf_is_string(testkey): raise Exception("cannot insert value for '%s'", key) temp = mupdf.pdf_to_text_string(testkey) if temp != eyecatcher: raise Exception("cannot insert value for '%s'", key) # read the result as a string res = JM_object_to_buffer(obj, 1, 0) objstr = JM_EscapeStrFromBuffer(res) # replace 'eyecatcher' by desired 'value' nullval = "/%s(%s)" % ( skey, eyecatcher) newval = "/%s %s" % (skey, value) newstr = objstr.replace(nullval, newval, 1) # make PDF object from resulting string new_obj = JM_pdf_obj_from_str(pdf, newstr) return new_obj def JM_set_ocg_arrays(conf, basestate, on, off, rbgroups, locked): if basestate: mupdf.pdf_dict_put_name( conf, PDF_NAME('BaseState'), basestate) if on is not None: mupdf.pdf_dict_del( conf, PDF_NAME('ON')) if on: arr = mupdf.pdf_dict_put_array( conf, PDF_NAME('ON'), 1) JM_set_ocg_arrays_imp( arr, on) if off is not None: mupdf.pdf_dict_del( conf, PDF_NAME('OFF')) if off: arr = mupdf.pdf_dict_put_array( conf, PDF_NAME('OFF'), 1) JM_set_ocg_arrays_imp( arr, off) if locked is not None: mupdf.pdf_dict_del( conf, PDF_NAME('Locked')) if locked: arr = mupdf.pdf_dict_put_array( conf, PDF_NAME('Locked'), 1) JM_set_ocg_arrays_imp( arr, locked) if rbgroups is not None: mupdf.pdf_dict_del( conf, PDF_NAME('RBGroups')) if rbgroups: arr = mupdf.pdf_dict_put_array( conf, PDF_NAME('RBGroups'), 1) n =len(rbgroups) for i in range(n): item0 = rbgroups[i] obj = mupdf.pdf_array_push_array( arr, 1) JM_set_ocg_arrays_imp( obj, item0) def JM_set_ocg_arrays_imp(arr, list_): ''' Set OCG arrays from dict of Python lists Works with dict like {"basestate":name, "on":list, "off":list, "rbg":list} ''' pdf = mupdf.pdf_get_bound_document(arr) for xref in list_: obj = mupdf.pdf_new_indirect(pdf, xref, 0) mupdf.pdf_array_push(arr, obj) def JM_set_resource_property(ref, name, xref): ''' Insert an item into Resources/Properties (used for Marked Content) Arguments: (1) e.g. page object, Form XObject (2) marked content name (3) xref of the referenced object (insert as indirect reference) ''' pdf = mupdf.pdf_get_bound_document(ref) ind = mupdf.pdf_new_indirect(pdf, xref, 0) if not ind.m_internal: RAISEPY(MSG_BAD_XREF, PyExc_ValueError) resources = mupdf.pdf_dict_get(ref, PDF_NAME('Resources')) if not resources.m_internal: resources = mupdf.pdf_dict_put_dict(ref, PDF_NAME('Resources'), 1) properties = mupdf.pdf_dict_get(resources, PDF_NAME('Properties')) if not properties.m_internal: properties = mupdf.pdf_dict_put_dict(resources, PDF_NAME('Properties'), 1) mupdf.pdf_dict_put(properties, mupdf.pdf_new_name(name), ind) def JM_set_widget_properties(annot, Widget): ''' Update the PDF form field with the properties from a Python Widget object. Called by "Page.add_widget" and "Annot.update_widget". ''' if isinstance( annot, Annot): annot = annot.this assert isinstance( annot, mupdf.PdfAnnot), f'{type(annot)=} {type=}' page = _pdf_annot_page(annot) assert page.m_internal, 'Annot is not bound to a page' annot_obj = mupdf.pdf_annot_obj(annot) pdf = page.doc() def GETATTR(name): return getattr(Widget, name, None) value = GETATTR("field_type") field_type = value # rectangle -------------------------------------------------------------- value = GETATTR("rect") rect = JM_rect_from_py(value) rot_mat = JM_rotate_page_matrix(page) rect = mupdf.fz_transform_rect(rect, rot_mat) mupdf.pdf_set_annot_rect(annot, rect) # fill color ------------------------------------------------------------- value = GETATTR("fill_color") if value and PySequence_Check(value): n = len(value) fill_col = mupdf.pdf_new_array(pdf, n) col = 0 for i in range(n): col = value[i] mupdf.pdf_array_push_real(fill_col, col) mupdf.pdf_field_set_fill_color(annot_obj, fill_col) # dashes ----------------------------------------------------------------- value = GETATTR("border_dashes") if value and PySequence_Check(value): n = len(value) dashes = mupdf.pdf_new_array(pdf, n) for i in range(n): mupdf.pdf_array_push_int(dashes, value[i]) mupdf.pdf_dict_putl(annot_obj, dashes, PDF_NAME('BS'), PDF_NAME('D')) # border color ----------------------------------------------------------- value = GETATTR("border_color") if value and PySequence_Check(value): n = len(value) border_col = mupdf.pdf_new_array(pdf, n) col = 0 for i in range(n): col = value[i] mupdf.pdf_array_push_real(border_col, col) mupdf.pdf_dict_putl(annot_obj, border_col, PDF_NAME('MK'), PDF_NAME('BC')) # entry ignored - may be used later # #int text_format = (int) PyInt_AsLong(GETATTR("text_format")); # # field label ----------------------------------------------------------- value = GETATTR("field_label") if value is not None: label = JM_StrAsChar(value) mupdf.pdf_dict_put_text_string(annot_obj, PDF_NAME('TU'), label) # field name ------------------------------------------------------------- value = GETATTR("field_name") if value is not None: name = JM_StrAsChar(value) old_name = mupdf.pdf_load_field_name(annot_obj) if name != old_name: mupdf.pdf_dict_put_text_string(annot_obj, PDF_NAME('T'), name) # max text len ----------------------------------------------------------- if field_type == mupdf.PDF_WIDGET_TYPE_TEXT: value = GETATTR("text_maxlen") text_maxlen = value if text_maxlen: mupdf.pdf_dict_put_int(annot_obj, PDF_NAME('MaxLen'), text_maxlen) value = GETATTR("field_display") d = value mupdf.pdf_field_set_display(annot_obj, d) # choice values ---------------------------------------------------------- if field_type in (mupdf.PDF_WIDGET_TYPE_LISTBOX, mupdf.PDF_WIDGET_TYPE_COMBOBOX): value = GETATTR("choice_values") JM_set_choice_options(annot, value) # border style ----------------------------------------------------------- value = GETATTR("border_style") val = JM_get_border_style(value) mupdf.pdf_dict_putl(annot_obj, val, PDF_NAME('BS'), PDF_NAME('S')) # border width ----------------------------------------------------------- value = GETATTR("border_width") border_width = value mupdf.pdf_dict_putl( annot_obj, mupdf.pdf_new_real(border_width), PDF_NAME('BS'), PDF_NAME('W'), ) # /DA string ------------------------------------------------------------- value = GETATTR("_text_da") da = JM_StrAsChar(value) mupdf.pdf_dict_put_text_string(annot_obj, PDF_NAME('DA'), da) mupdf.pdf_dict_del(annot_obj, PDF_NAME('DS')) # not supported by MuPDF mupdf.pdf_dict_del(annot_obj, PDF_NAME('RC')) # not supported by MuPDF # field flags ------------------------------------------------------------ field_flags = GETATTR("field_flags") if field_flags is not None: if field_type == mupdf.PDF_WIDGET_TYPE_COMBOBOX: field_flags |= mupdf.PDF_CH_FIELD_IS_COMBO elif field_type == mupdf.PDF_WIDGET_TYPE_RADIOBUTTON: field_flags |= mupdf.PDF_BTN_FIELD_IS_RADIO elif field_type == mupdf.PDF_WIDGET_TYPE_BUTTON: field_flags |= mupdf.PDF_BTN_FIELD_IS_PUSHBUTTON mupdf.pdf_dict_put_int( annot_obj, PDF_NAME('Ff'), field_flags) # button caption --------------------------------------------------------- value = GETATTR("button_caption") ca = JM_StrAsChar(value) if ca: mupdf.pdf_field_set_button_caption(annot_obj, ca) # script (/A) ------------------------------------------------------- value = GETATTR("script") JM_put_script(annot_obj, PDF_NAME('A'), mupdf.PdfObj(), value) # script (/AA/K) ------------------------------------------------------- value = GETATTR("script_stroke") JM_put_script(annot_obj, PDF_NAME('AA'), PDF_NAME('K'), value) # script (/AA/F) ------------------------------------------------------- value = GETATTR("script_format") JM_put_script(annot_obj, PDF_NAME('AA'), PDF_NAME('F'), value) # script (/AA/V) ------------------------------------------------------- value = GETATTR("script_change") JM_put_script(annot_obj, PDF_NAME('AA'), PDF_NAME('V'), value) # script (/AA/C) ------------------------------------------------------- value = GETATTR("script_calc") JM_put_script(annot_obj, PDF_NAME('AA'), PDF_NAME('C'), value) # script (/AA/Bl) ------------------------------------------------------- value = GETATTR("script_blur") JM_put_script(annot_obj, PDF_NAME('AA'), mupdf.pdf_new_name('Bl'), value) # script (/AA/Fo) codespell:ignore -------------------------------------- value = GETATTR("script_focus") JM_put_script(annot_obj, PDF_NAME('AA'), mupdf.pdf_new_name('Fo'), value) # field value ------------------------------------------------------------ value = GETATTR("field_value") # field value text = JM_StrAsChar(value) # convert to text (may fail!) if field_type == mupdf.PDF_WIDGET_TYPE_RADIOBUTTON: if not value: mupdf.pdf_set_field_value(pdf, annot_obj, "Off", 1) mupdf.pdf_dict_put_name(annot_obj, PDF_NAME('AS'), "Off") else: # TODO check if another button in the group is ON and if so set it Off onstate = mupdf.pdf_button_field_on_state(annot_obj) if onstate.m_internal: on = mupdf.pdf_to_name(onstate) mupdf.pdf_set_field_value(pdf, annot_obj, on, 1) mupdf.pdf_dict_put_name(annot_obj, PDF_NAME('AS'), on) elif text: mupdf.pdf_dict_put_name(annot_obj, PDF_NAME('AS'), text) elif field_type == mupdf.PDF_WIDGET_TYPE_CHECKBOX: onstate = mupdf.pdf_button_field_on_state(annot_obj) on = onstate.pdf_to_name() if value in (True, on) or text == 'Yes': mupdf.pdf_set_field_value(pdf, annot_obj, on, 1) mupdf.pdf_dict_put_name(annot_obj, PDF_NAME('AS'), on) mupdf.pdf_dict_put_name(annot_obj, PDF_NAME('V'), on) else: mupdf.pdf_dict_put_name( annot_obj, PDF_NAME('AS'), 'Off') mupdf.pdf_dict_put_name( annot_obj, PDF_NAME('V'), 'Off') else: if text: mupdf.pdf_set_field_value(pdf, annot_obj, text, 1) if field_type in (mupdf.PDF_WIDGET_TYPE_COMBOBOX, mupdf.PDF_WIDGET_TYPE_LISTBOX): mupdf.pdf_dict_del(annot_obj, PDF_NAME('I')) mupdf.pdf_dirty_annot(annot) mupdf.pdf_set_annot_hot(annot, 1) mupdf.pdf_set_annot_active(annot, 1) mupdf.pdf_update_annot(annot) def JM_show_string_cs( text, user_font, trm, s, wmode, bidi_level, markup_dir, language, ): i = 0 while i < len(s): l, ucs = mupdf.fz_chartorune(s[i:]) i += l gid = mupdf.fz_encode_character_sc(user_font, ucs) if gid == 0: gid, font = mupdf.fz_encode_character_with_fallback(user_font, ucs, 0, language) else: font = user_font mupdf.fz_show_glyph(text, font, trm, gid, ucs, wmode, bidi_level, markup_dir, language) adv = mupdf.fz_advance_glyph(font, gid, wmode) if wmode == 0: trm = mupdf.fz_pre_translate(trm, adv, 0) else: trm = mupdf.fz_pre_translate(trm, 0, -adv) return trm def JM_UnicodeFromBuffer(buff): buff_bytes = mupdf.fz_buffer_extract_copy(buff) val = buff_bytes.decode(errors='replace') z = val.find(chr(0)) if z >= 0: val = val[:z] return val def message_warning(text): ''' Generate a warning. ''' message(f'warning: {text}') def JM_update_stream(doc, obj, buffer_, compress): ''' update a stream object compress stream when beneficial ''' if compress: length, _ = mupdf.fz_buffer_storage(buffer_) if length > 30: # ignore small stuff buffer_compressed = JM_compress_buffer(buffer_) assert isinstance(buffer_compressed, mupdf.FzBuffer) if buffer_compressed.m_internal: length_compressed, _ = mupdf.fz_buffer_storage(buffer_compressed) if length_compressed < length: # was it worth the effort? mupdf.pdf_dict_put( obj, mupdf.PDF_ENUM_NAME_Filter, mupdf.PDF_ENUM_NAME_FlateDecode, ) mupdf.pdf_update_stream(doc, obj, buffer_compressed, 1) return mupdf.pdf_update_stream(doc, obj, buffer_, 0) def JM_xobject_from_page(pdfout, fsrcpage, xref, gmap): ''' Make an XObject from a PDF page For a positive xref assume that its object can be used instead ''' assert isinstance(gmap, mupdf.PdfGraftMap), f'{type(gmap)=}' if xref > 0: xobj1 = mupdf.pdf_new_indirect(pdfout, xref, 0) else: srcpage = _as_pdf_page(fsrcpage.this) spageref = srcpage.obj() mediabox = mupdf.pdf_to_rect(mupdf.pdf_dict_get_inheritable(spageref, PDF_NAME('MediaBox'))) # Deep-copy resources object of source page o = mupdf.pdf_dict_get_inheritable(spageref, PDF_NAME('Resources')) if gmap.m_internal: # use graftmap when possible resources = mupdf.pdf_graft_mapped_object(gmap, o) else: resources = mupdf.pdf_graft_object(pdfout, o) # get spgage contents source res = JM_read_contents(spageref) #------------------------------------------------------------- # create XObject representing the source page #------------------------------------------------------------- xobj1 = mupdf.pdf_new_xobject(pdfout, mediabox, mupdf.FzMatrix(), mupdf.PdfObj(0), res) # store spage contents JM_update_stream(pdfout, xobj1, res, 1) # store spage resources mupdf.pdf_dict_put(xobj1, PDF_NAME('Resources'), resources) return xobj1 def PySequence_Check(s): return isinstance(s, (tuple, list)) def PySequence_Size(s): return len(s) # constants: error messages. These are also in extra.i. # MSG_BAD_ANNOT_TYPE = "bad annot type" MSG_BAD_APN = "bad or missing annot AP/N" MSG_BAD_ARG_INK_ANNOT = "arg must be seq of seq of float pairs" MSG_BAD_ARG_POINTS = "bad seq of points" MSG_BAD_BUFFER = "bad type: 'buffer'" MSG_BAD_COLOR_SEQ = "bad color sequence" MSG_BAD_DOCUMENT = "cannot open broken document" MSG_BAD_FILETYPE = "bad filetype" MSG_BAD_LOCATION = "bad location" MSG_BAD_OC_CONFIG = "bad config number" MSG_BAD_OC_LAYER = "bad layer number" MSG_BAD_OC_REF = "bad 'oc' reference" MSG_BAD_PAGEID = "bad page id" MSG_BAD_PAGENO = "bad page number(s)" MSG_BAD_PDFROOT = "PDF has no root" MSG_BAD_RECT = "rect is infinite or empty" MSG_BAD_TEXT = "bad type: 'text'" MSG_BAD_XREF = "bad xref" MSG_COLOR_COUNT_FAILED = "color count failed" MSG_FILE_OR_BUFFER = "need font file or buffer" MSG_FONT_FAILED = "cannot create font" MSG_IS_NO_ANNOT = "is no annotation" MSG_IS_NO_IMAGE = "is no image" MSG_IS_NO_PDF = "is no PDF" MSG_IS_NO_DICT = "object is no PDF dict" MSG_PIX_NOALPHA = "source pixmap has no alpha" MSG_PIXEL_OUTSIDE = "pixel(s) outside image" JM_Exc_FileDataError = 'FileDataError' PyExc_ValueError = 'ValueError' def RAISEPY( msg, exc): #JM_Exc_CurrentException=exc #fz_throw(context, FZ_ERROR_GENERIC, msg) raise Exception( msg) def PyUnicode_DecodeRawUnicodeEscape(s, errors='strict'): # FIXED: handle raw unicode escape sequences if not s: return "" if isinstance(s, str): rc = s.encode("utf8", errors=errors) elif isinstance(s, bytes): rc = s[:] ret = rc.decode('raw_unicode_escape', errors=errors) return ret def CheckColor(c: OptSeq): if c: if ( type(c) not in (list, tuple) or len(c) not in (1, 3, 4) or min(c) < 0 or max(c) > 1 ): raise ValueError("need 1, 3 or 4 color components in range 0 to 1") def CheckFont(page: Page, fontname: str) -> tuple: """Return an entry in the page's font list if reference name matches. """ for f in page.get_fonts(): if f[4] == fontname: return f def CheckFontInfo(doc: Document, xref: int) -> list: """Return a font info if present in the document. """ for f in doc.FontInfos: if xref == f[0]: return f def CheckMarkerArg(quads: typing.Any) -> tuple: if CheckRect(quads): r = Rect(quads) return (r.quad,) if CheckQuad(quads): return (quads,) for q in quads: if not (CheckRect(q) or CheckQuad(q)): raise ValueError("bad quads entry") return quads def CheckMorph(o: typing.Any) -> bool: if not bool(o): return False if not (type(o) in (list, tuple) and len(o) == 2): raise ValueError("morph must be a sequence of length 2") if not (len(o[0]) == 2 and len(o[1]) == 6): raise ValueError("invalid morph param 0") if not o[1][4] == o[1][5] == 0: raise ValueError("invalid morph param 1") return True def CheckParent(o: typing.Any): return if not hasattr(o, "parent") or o.parent is None: raise ValueError(f"orphaned object {type(o)=}: parent is None") def CheckQuad(q: typing.Any) -> bool: """Check whether an object is convex, not empty quad-like. It must be a sequence of 4 number pairs. """ try: q0 = Quad(q) except Exception: if g_exceptions_verbose > 1: exception_info() return False return q0.is_convex def CheckRect(r: typing.Any) -> bool: """Check whether an object is non-degenerate rect-like. It must be a sequence of 4 numbers. """ try: r = Rect(r) except Exception: if g_exceptions_verbose > 1: exception_info() return False return not (r.is_empty or r.is_infinite) def ColorCode(c: typing.Union[list, tuple, float, None], f: str) -> str: if not c: return "" if hasattr(c, "__float__"): c = (c,) CheckColor(c) if len(c) == 1: s = _format_g(c[0]) + " " return s + "G " if f == "c" else s + "g " if len(c) == 3: s = _format_g(tuple(c)) + " " return s + "RG " if f == "c" else s + "rg " s = _format_g(tuple(c)) + " " return s + "K " if f == "c" else s + "k " def Page__add_text_marker(self, quads, annot_type): pdfpage = self._pdf_page() rotation = JM_page_rotation(pdfpage) def final(): if rotation != 0: mupdf.pdf_dict_put_int(pdfpage.obj(), PDF_NAME('Rotate'), rotation) try: if rotation != 0: mupdf.pdf_dict_put_int(pdfpage.obj(), PDF_NAME('Rotate'), 0) annot = mupdf.pdf_create_annot(pdfpage, annot_type) for item in quads: q = JM_quad_from_py(item) mupdf.pdf_add_annot_quad_point(annot, q) mupdf.pdf_update_annot(annot) JM_add_annot_id(annot, "A") final() except Exception: if g_exceptions_verbose: exception_info() final() return return Annot(annot) def PDF_NAME(x): assert isinstance(x, str) ret = getattr(mupdf, f'PDF_ENUM_NAME_{x}') # Note that we return a (swig proxy for) pdf_obj*, not a mupdf.PdfObj. In # the C++ API, the constructor PdfObj::PdfObj(pdf_obj*) is marked as # explicit, but this seems to be ignored by SWIG. If SWIG started to # generate code that respected `explicit`, we would need to do `return # mupdf.PdfObj(ret)`. # # [Compare with extra.i, where we define our own PDF_NAME2() macro that # returns a mupdf::PdfObj.] return ret def UpdateFontInfo(doc: Document, info: typing.Sequence): xref = info[0] found = False for i, fi in enumerate(doc.FontInfos): if fi[0] == xref: found = True break if found: doc.FontInfos[i] = info else: doc.FontInfos.append(info) def args_match(args, *types): ''' Returns true if <args> matches <types>. Each item in <types> is a type or tuple of types. Any of these types will match an item in <args>. `None` will match anything in <args>. `type(None)` will match an arg whose value is `None`. ''' j = 0 for i in range(len(types)): type_ = types[i] if j >= len(args): if isinstance(type_, tuple) and None in type_: # arg is missing but has default value. continue else: return False if type_ is not None and not isinstance(args[j], type_): return False j += 1 if j != len(args): return False return True def calc_image_matrix(width, height, tr, rotate, keep): ''' # compute image insertion matrix ''' trect = JM_rect_from_py(tr) rot = mupdf.fz_rotate(rotate) trw = trect.x1 - trect.x0 trh = trect.y1 - trect.y0 w = trw h = trh if keep: large = max(width, height) fw = width / large fh = height / large else: fw = fh = 1 small = min(fw, fh) if rotate != 0 and rotate != 180: f = fw fw = fh fh = f if fw < 1: if trw / fw > trh / fh: w = trh * small h = trh else: w = trw h = trw / small elif fw != fh: if trw / fw > trh / fh: w = trh / small h = trh else: w = trw h = trw * small else: w = trw h = trh tmp = mupdf.fz_make_point( (trect.x0 + trect.x1) / 2, (trect.y0 + trect.y1) / 2, ) mat = mupdf.fz_make_matrix(1, 0, 0, 1, -0.5, -0.5) mat = mupdf.fz_concat(mat, rot) mat = mupdf.fz_concat(mat, mupdf.fz_scale(w, h)) mat = mupdf.fz_concat(mat, mupdf.fz_translate(tmp.x, tmp.y)) return mat def detect_super_script(line, ch): if line.m_internal.wmode == 0 and line.m_internal.dir.x == 1 and line.m_internal.dir.y == 0: return ch.m_internal.origin.y < line.m_internal.first_char.origin.y - ch.m_internal.size * 0.1 return 0 def dir_str(x): ret = f'{x} {type(x)} ({len(dir(x))}):\n' for i in dir(x): ret += f' {i}\n' return ret def getTJstr(text: str, glyphs: typing.Union[list, tuple, None], simple: bool, ordering: int) -> str: """ Return a PDF string enclosed in [] brackets, suitable for the PDF TJ operator. Notes: The input string is converted to either 2 or 4 hex digits per character. Args: simple: no glyphs: 2-chars, use char codes as the glyph glyphs: 2-chars, use glyphs instead of char codes (Symbol, ZapfDingbats) not simple: ordering < 0: 4-chars, use glyphs not char codes ordering >=0: a CJK font! 4 chars, use char codes as glyphs """ if text.startswith("[<") and text.endswith(">]"): # already done return text if not bool(text): return "[<>]" if simple: # each char or its glyph is coded as a 2-byte hex if glyphs is None: # not Symbol, not ZapfDingbats: use char code otxt = "".join(["%02x" % ord(c) if ord(c) < 256 else "b7" for c in text]) else: # Symbol or ZapfDingbats: use glyphs otxt = "".join( ["%02x" % glyphs[ord(c)][0] if ord(c) < 256 else "b7" for c in text] ) return "[<" + otxt + ">]" # non-simple fonts: each char or its glyph is coded as 4-byte hex if ordering < 0: # not a CJK font: use the glyphs otxt = "".join(["%04x" % glyphs[ord(c)][0] for c in text]) else: # CJK: use the char codes otxt = "".join(["%04x" % ord(c) for c in text]) return "[<" + otxt + ">]" def get_pdf_str(s: str) -> str: """ Return a PDF string depending on its coding. Notes: Returns a string bracketed with either "()" or "<>" for hex values. If only ascii then "(original)" is returned, else if only 8 bit chars then "(original)" with interspersed octal strings \nnn is returned, else a string "<FEFF[hexstring]>" is returned, where [hexstring] is the UTF-16BE encoding of the original. """ if not bool(s): return "()" def make_utf16be(s): r = bytearray([254, 255]) + bytearray(s, "UTF-16BE") return "<" + r.hex() + ">" # brackets indicate hex # The following either returns the original string with mixed-in # octal numbers \nnn for chars outside the ASCII range, or returns # the UTF-16BE BOM version of the string. r = "" for c in s: oc = ord(c) if oc > 255: # shortcut if beyond 8-bit code range return make_utf16be(s) if oc > 31 and oc < 127: # in ASCII range if c in ("(", ")", "\\"): # these need to be escaped r += "\\" r += c continue if oc > 127: # beyond ASCII r += "\\%03o" % oc continue # now the white spaces if oc == 8: # backspace r += "\\b" elif oc == 9: # tab r += "\\t" elif oc == 10: # line feed r += "\\n" elif oc == 12: # form feed r += "\\f" elif oc == 13: # carriage return r += "\\r" else: r += "\\267" # unsupported: replace by 0xB7 return "(" + r + ")" def get_tessdata(tessdata=None): """Detect Tesseract language support folder. This function is used to enable OCR via Tesseract even if the language support folder is not specified directly or in environment variable TESSDATA_PREFIX. * If <tessdata> is set we return it directly. * Otherwise we return `os.environ['TESSDATA_PREFIX']` if set. * Otherwise we search for a Tesseract installation and return its language support folder. * Otherwise we raise an exception. """ if tessdata: return tessdata tessdata = os.getenv("TESSDATA_PREFIX") if tessdata: # use environment variable if set return tessdata # Try to locate the tesseract-ocr installation. import subprocess cp = subprocess.run('tesseract --list-langs', shell=1, capture_output=1, check=0, text=True) if cp.returncode == 0: m = re.search('List of available languages in "(.+)"', cp.stdout) if m: tessdata = m.group(1) return tessdata # Windows systems: if sys.platform == "win32": cp = subprocess.run("where tesseract", shell=1, capture_output=1, check=0, text=True) response = cp.stdout.strip() if cp.returncode or not response: raise RuntimeError("No tessdata specified and Tesseract is not installed") dirname = os.path.dirname(response) # path of tesseract.exe tessdata = os.path.join(dirname, "tessdata") # language support if os.path.exists(tessdata): # all ok? return tessdata else: # should not happen! raise RuntimeError("No tessdata specified and Tesseract installation has no {tessdata} folder") # Unix-like systems: attempts = list() for path in 'tesseract-ocr', 'tesseract': cp = subprocess.run(f'whereis {path}', shell=1, capture_output=1, check=0, text=True) if cp.returncode == 0: response = cp.stdout.strip().split() if len(response) == 2: # search tessdata in folder structure dirname = response[1] # contains tesseract-ocr installation folder pattern = f"{dirname}/*/tessdata" attempts.append(pattern) tessdatas = glob.glob(pattern) tessdatas.sort() if tessdatas: return tessdatas[-1] if attempts: text = 'No tessdata specified and no match for:\n' for attempt in attempts: text += f' {attempt}' raise RuntimeError(text) else: raise RuntimeError('No tessdata specified and Tesseract is not installed') def css_for_pymupdf_font( fontcode: str, *, CSS: OptStr = None, archive: AnyType = None, name: OptStr = None ) -> str: """Create @font-face items for the given fontcode of pymupdf-fonts. Adds @font-face support for fonts contained in package pymupdf-fonts. Creates a CSS font-family for all fonts starting with string 'fontcode'. Note: The font naming convention in package pymupdf-fonts is "fontcode<sf>", where the suffix "sf" is either empty or one of "it", "bo" or "bi". These suffixes thus represent the regular, italic, bold or bold-italic variants of a font. For example, font code "notos" refers to fonts "notos" - "Noto Sans Regular" "notosit" - "Noto Sans Italic" "notosbo" - "Noto Sans Bold" "notosbi" - "Noto Sans Bold Italic" This function creates four CSS @font-face definitions and collectively assigns the font-family name "notos" to them (or the "name" value). All fitting font buffers of the pymupdf-fonts package are placed / added to the archive provided as parameter. To use the font in pymupdf.Story, execute 'set_font(fontcode)'. The correct font weight (bold) or style (italic) will automatically be selected. Expects and returns the CSS source, with the new CSS definitions appended. Args: fontcode: (str) font code for naming the font variants to include. E.g. "fig" adds notos, notosi, notosb, notosbi fonts. A maximum of 4 font variants is accepted. CSS: (str) CSS string to add @font-face definitions to. archive: (Archive, mandatory) where to place the font buffers. name: (str) use this as family-name instead of 'fontcode'. Returns: Modified CSS, with appended @font-face statements for each font variant of fontcode. Fontbuffers associated with "fontcode" will be added to 'archive'. """ # @font-face template string CSSFONT = "\n@font-face {font-family: %s; src: url(%s);%s%s}\n" if not type(archive) is Archive: raise ValueError("'archive' must be an Archive") if CSS is None: CSS = "" # select font codes starting with the pass-in string font_keys = [k for k in fitz_fontdescriptors.keys() if k.startswith(fontcode)] if font_keys == []: raise ValueError(f"No font code '{fontcode}' found in pymupdf-fonts.") if len(font_keys) > 4: raise ValueError("fontcode too short") if name is None: # use this name for font-family name = fontcode for fkey in font_keys: font = fitz_fontdescriptors[fkey] bold = font["bold"] # determine font property italic = font["italic"] # determine font property fbuff = font["loader"]() # load the fontbuffer archive.add(fbuff, fkey) # update the archive bold_text = "font-weight: bold;" if bold else "" italic_text = "font-style: italic;" if italic else "" CSS += CSSFONT % (name, fkey, bold_text, italic_text) return CSS def get_text_length(text: str, fontname: str ="helv", fontsize: float =11, encoding: int =0) -> float: """Calculate length of a string for a built-in font. Args: fontname: name of the font. fontsize: font size points. encoding: encoding to use, 0=Latin (default), 1=Greek, 2=Cyrillic. Returns: (float) length of text. """ fontname = fontname.lower() basename = Base14_fontdict.get(fontname, None) glyphs = None if basename == "Symbol": glyphs = symbol_glyphs if basename == "ZapfDingbats": glyphs = zapf_glyphs if glyphs is not None: w = sum([glyphs[ord(c)][1] if ord(c) < 256 else glyphs[183][1] for c in text]) return w * fontsize if fontname in Base14_fontdict.keys(): return util_measure_string( text, Base14_fontdict[fontname], fontsize, encoding ) if fontname in ( "china-t", "china-s", "china-ts", "china-ss", "japan", "japan-s", "korea", "korea-s", ): return len(text) * fontsize raise ValueError("Font '%s' is unsupported" % fontname) def image_profile(img: ByteString) -> dict: """ Return basic properties of an image. Args: img: bytes, bytearray, io.BytesIO object or an opened image file. Returns: A dictionary with keys width, height, colorspace.n, bpc, type, ext and size, where 'type' is the MuPDF image type (0 to 14) and 'ext' the suitable file extension. """ if type(img) is io.BytesIO: stream = img.getvalue() elif hasattr(img, "read"): stream = img.read() elif type(img) in (bytes, bytearray): stream = img else: raise ValueError("bad argument 'img'") return TOOLS.image_profile(stream) def jm_append_merge(dev): ''' Append current path to list or merge into last path of the list. (1) Append if first path, different item lists or not a 'stroke' version of previous path (2) If new path has the same items, merge its content into previous path and change path["type"] to "fs". (3) If "out" is callable, skip the previous and pass dictionary to it. ''' #log(f'{getattr(dev, "pathdict", None)=}') assert isinstance(dev.out, list) #log( f'{dev.out=}') if callable(dev.method) or dev.method: # function or method # callback. if dev.method is None: # fixme, this surely cannot happen? assert 0 #resp = PyObject_CallFunctionObjArgs(out, dev.pathdict, NULL) else: #log(f'calling {dev.out=} {dev.method=} {dev.pathdict=}') resp = getattr(dev.out, dev.method)(dev.pathdict) if not resp: message("calling cdrawings callback function/method failed!") dev.pathdict = None return def append(): #log(f'jm_append_merge(): clearing dev.pathdict') dev.out.append(dev.pathdict.copy()) dev.pathdict.clear() assert isinstance(dev.out, list) len_ = len(dev.out) # len of output list so far #log('{len_=}') if len_ == 0: # always append first path return append() #log(f'{getattr(dev, "pathdict", None)=}') thistype = dev.pathdict[ dictkey_type] #log(f'{thistype=}') if thistype != 's': # if not stroke, then append return append() prev = dev.out[ len_-1] # get prev path #log( f'{prev=}') prevtype = prev[ dictkey_type] #log( f'{prevtype=}') if prevtype != 'f': # if previous not fill, append return append() # last check: there must be the same list of items for "f" and "s". previtems = prev[ dictkey_items] thisitems = dev.pathdict[ dictkey_items] if previtems != thisitems: return append() #rc = PyDict_Merge(prev, dev.pathdict, 0); // merge with no override try: for k, v in dev.pathdict.items(): if k not in prev: prev[k] = v rc = 0 except Exception: if g_exceptions_verbose: exception_info() #raise rc = -1 if rc == 0: prev[ dictkey_type] = 'fs' dev.pathdict.clear() else: message("could not merge stroke and fill path") append() def jm_bbox_add_rect( dev, ctx, rect, code): if not dev.layers: dev.result.append( (code, JM_py_from_rect(rect))) else: dev.result.append( (code, JM_py_from_rect(rect), dev.layer_name)) def jm_bbox_fill_image( dev, ctx, image, ctm, alpha, color_params): r = mupdf.FzRect(mupdf.FzRect.Fixed_UNIT) r = mupdf.ll_fz_transform_rect( r.internal(), ctm) jm_bbox_add_rect( dev, ctx, r, "fill-image") def jm_bbox_fill_image_mask( dev, ctx, image, ctm, colorspace, color, alpha, color_params): try: jm_bbox_add_rect( dev, ctx, mupdf.ll_fz_transform_rect(mupdf.fz_unit_rect, ctm), "fill-imgmask") except Exception: if g_exceptions_verbose: exception_info() raise def jm_bbox_fill_path( dev, ctx, path, even_odd, ctm, colorspace, color, alpha, color_params): even_odd = True if even_odd else False try: jm_bbox_add_rect( dev, ctx, mupdf.ll_fz_bound_path(path, None, ctm), "fill-path") except Exception: if g_exceptions_verbose: exception_info() raise def jm_bbox_fill_shade( dev, ctx, shade, ctm, alpha, color_params): try: jm_bbox_add_rect( dev, ctx, mupdf.ll_fz_bound_shade( shade, ctm), "fill-shade") except Exception: if g_exceptions_verbose: exception_info() raise def jm_bbox_stroke_text( dev, ctx, text, stroke, ctm, *args): try: jm_bbox_add_rect( dev, ctx, mupdf.ll_fz_bound_text( text, stroke, ctm), "stroke-text") except Exception: if g_exceptions_verbose: exception_info() raise def jm_bbox_fill_text( dev, ctx, text, ctm, *args): try: jm_bbox_add_rect( dev, ctx, mupdf.ll_fz_bound_text( text, None, ctm), "fill-text") except Exception: if g_exceptions_verbose: exception_info() raise def jm_bbox_ignore_text( dev, ctx, text, ctm): jm_bbox_add_rect( dev, ctx, mupdf.ll_fz_bound_text(text, None, ctm), "ignore-text") def jm_bbox_stroke_path( dev, ctx, path, stroke, ctm, colorspace, color, alpha, color_params): try: jm_bbox_add_rect( dev, ctx, mupdf.ll_fz_bound_path( path, stroke, ctm), "stroke-path") except Exception: if g_exceptions_verbose: exception_info() raise def jm_checkquad(dev): ''' Check whether the last 4 lines represent a quad. Because of how we count, the lines are a polyline already, i.e. last point of a line equals 1st point of next line. So we check for a polygon (last line's end point equals start point). If not true we return 0. ''' #log(f'{getattr(dev, "pathdict", None)=}') items = dev.pathdict[ dictkey_items] len_ = len(items) f = [0] * 8 # coordinates of the 4 corners # fill the 8 floats in f, start from items[-4:] for i in range( 4): # store line start points line = items[ len_ - 4 + i] temp = JM_point_from_py( line[1]) f[i * 2] = temp.x f[i * 2 + 1] = temp.y lp = JM_point_from_py( line[ 2]) if lp.x != f[0] or lp.y != f[1]: # not a polygon! #dev.linecount -= 1 return 0 # we have detected a quad dev.linecount = 0 # reset this # a quad item is ("qu", (ul, ur, ll, lr)), where the tuple items # are pairs of floats representing a quad corner each. # relationship of float array to quad points: # (0, 1) = ul, (2, 3) = ll, (6, 7) = ur, (4, 5) = lr q = mupdf.fz_make_quad(f[0], f[1], f[6], f[7], f[2], f[3], f[4], f[5]) rect = ('qu', JM_py_from_quad(q)) items[ len_ - 4] = rect # replace item -4 by rect del items[ len_ - 3 : len_] # delete remaining 3 items return 1 def jm_checkrect(dev): ''' Check whether the last 3 path items represent a rectangle. Returns 1 if we have modified the path, otherwise 0. ''' #log(f'{getattr(dev, "pathdict", None)=}') dev.linecount = 0 # reset line count orientation = 0 # area orientation of rectangle items = dev.pathdict[ dictkey_items] len_ = len(items) line0 = items[ len_ - 3] ll = JM_point_from_py( line0[ 1]) lr = JM_point_from_py( line0[ 2]) # no need to extract "line1"! line2 = items[ len_ - 1] ur = JM_point_from_py( line2[ 1]) ul = JM_point_from_py( line2[ 2]) # Assumption: # When decomposing rects, MuPDF always starts with a horizontal line, # followed by a vertical line, followed by a horizontal line. # First line: (ll, lr), third line: (ul, ur). # If 1st line is below 3rd line, we record anti-clockwise (+1), else # clockwise (-1) orientation. if (0 or ll.y != lr.y or ll.x != ul.x or ur.y != ul.y or ur.x != lr.x ): return 0 # not a rectangle # we have a rect, replace last 3 "l" items by one "re" item. if ul.y < lr.y: r = mupdf.fz_make_rect(ul.x, ul.y, lr.x, lr.y) orientation = 1 else: r = mupdf.fz_make_rect(ll.x, ll.y, ur.x, ur.y) orientation = -1 rect = ( 're', JM_py_from_rect(r), orientation) items[ len_ - 3] = rect # replace item -3 by rect del items[ len_ - 2 : len_] # delete remaining 2 items return 1 def jm_trace_text( dev, text, type_, ctm, colorspace, color, alpha, seqno): span = text.head while 1: if not span: break jm_trace_text_span( dev, span, type_, ctm, colorspace, color, alpha, seqno) span = span.next def jm_trace_text_span(dev, span, type_, ctm, colorspace, color, alpha, seqno): ''' jm_trace_text_span(fz_context *ctx, PyObject *out, fz_text_span *span, int type, fz_matrix ctm, fz_colorspace *colorspace, const float *color, float alpha, size_t seqno) ''' out_font = None assert isinstance( span, mupdf.fz_text_span) span = mupdf.FzTextSpan( span) assert isinstance( ctm, mupdf.fz_matrix) ctm = mupdf.FzMatrix( ctm) fontname = JM_font_name( span.font()) #float rgb[3]; #PyObject *chars = PyTuple_New(span->len); mat = mupdf.fz_concat(span.trm(), ctm) # text transformation matrix dir = mupdf.fz_transform_vector(mupdf.fz_make_point(1, 0), mat) # writing direction fsize = math.sqrt(dir.x * dir.x + dir.y * dir.y) # font size dir = mupdf.fz_normalize_vector(dir) space_adv = 0 asc = JM_font_ascender( span.font()) dsc = JM_font_descender( span.font()) if asc < 1e-3: # probably Tesseract font dsc = -0.1 asc = 0.9 # compute effective ascender / descender ascsize = asc * fsize / (asc - dsc) dscsize = dsc * fsize / (asc - dsc) fflags = 0 # font flags mono = mupdf.fz_font_is_monospaced( span.font()) fflags += mono * TEXT_FONT_MONOSPACED fflags += mupdf.fz_font_is_italic( span.font()) * TEXT_FONT_ITALIC fflags += mupdf.fz_font_is_serif( span.font()) * TEXT_FONT_SERIFED fflags += mupdf.fz_font_is_bold( span.font()) * TEXT_FONT_BOLD last_adv = 0 # walk through characters of span span_bbox = mupdf.FzRect() rot = mupdf.fz_make_matrix(dir.x, dir.y, -dir.y, dir.x, 0, 0) if dir.x == -1: # left-right flip rot.d = 1 chars = [] for i in range( span.m_internal.len): adv = 0 if span.items(i).gid >= 0: adv = mupdf.fz_advance_glyph( span.font(), span.items(i).gid, span.m_internal.wmode) adv *= fsize last_adv = adv if span.items(i).ucs == 32: space_adv = adv char_orig = mupdf.fz_make_point(span.items(i).x, span.items(i).y) char_orig = mupdf.fz_transform_point(char_orig, ctm) m1 = mupdf.fz_make_matrix(1, 0, 0, 1, -char_orig.x, -char_orig.y) m1 = mupdf.fz_concat(m1, rot) m1 = mupdf.fz_concat(m1, mupdf.FzMatrix(1, 0, 0, 1, char_orig.x, char_orig.y)) x0 = char_orig.x x1 = x0 + adv if ( (mat.d > 0 and (dir.x == 1 or dir.x == -1)) or (mat.b != 0 and mat.b == -mat.c) ): # up-down flip y0 = char_orig.y + dscsize y1 = char_orig.y + ascsize else: y0 = char_orig.y - ascsize y1 = char_orig.y - dscsize char_bbox = mupdf.fz_make_rect(x0, y0, x1, y1) char_bbox = mupdf.fz_transform_rect(char_bbox, m1) chars.append( ( span.items(i).ucs, span.items(i).gid, ( char_orig.x, char_orig.y, ), ( char_bbox.x0, char_bbox.y0, char_bbox.x1, char_bbox.y1, ), ) ) if i > 0: span_bbox = mupdf.fz_union_rect(span_bbox, char_bbox) else: span_bbox = char_bbox chars = tuple(chars) if not space_adv: if not (fflags & TEXT_FONT_MONOSPACED): c, out_font = mupdf.fz_encode_character_with_fallback( span.font(), 32, 0, 0) space_adv = mupdf.fz_advance_glyph( span.font(), c, span.m_internal.wmode, ) space_adv *= fsize if not space_adv: space_adv = last_adv else: space_adv = last_adv # for mono, any char width suffices # make the span dictionary span_dict = dict() span_dict[ 'dir'] = JM_py_from_point(dir) span_dict[ 'font'] = JM_EscapeStrFromStr(fontname) span_dict[ 'wmode'] = span.m_internal.wmode span_dict[ 'flags'] =fflags span_dict[ "bidi_lvl"] =span.m_internal.bidi_level span_dict[ "bidi_dir"] = span.m_internal.markup_dir span_dict[ 'ascender'] = asc span_dict[ 'descender'] = dsc span_dict[ 'colorspace'] = 3 if colorspace: rgb = mupdf.fz_convert_color( mupdf.FzColorspace( mupdf.ll_fz_keep_colorspace( colorspace)), color, mupdf.fz_device_rgb(), mupdf.FzColorspace(), mupdf.FzColorParams(), ) rgb = rgb[:3] # mupdf.fz_convert_color() always returns 4 items. else: rgb = (0, 0, 0) if dev.linewidth > 0: # width of character border linewidth = dev.linewidth else: linewidth = fsize * 0.05 # default: 5% of font size #log(f'{dev.linewidth=:.4f} {fsize=:.4f} {linewidth=:.4f}') span_dict[ 'color'] = rgb span_dict[ 'size'] = fsize span_dict[ "opacity"] = alpha span_dict[ "linewidth"] = linewidth span_dict[ "spacewidth"] = space_adv span_dict[ 'type'] = type_ span_dict[ 'bbox'] = JM_py_from_rect(span_bbox) span_dict[ 'layer'] = dev.layer_name span_dict[ "seqno"] = seqno span_dict[ 'chars'] = chars #log(f'{span_dict=}') dev.out.append( span_dict) def jm_lineart_color(colorspace, color): #log(f' ') if colorspace: try: # Need to be careful to use a named Python object to ensure # that the `params` we pass to mupdf.ll_fz_convert_color() is # valid. E.g. doing: # # rgb = mupdf.ll_fz_convert_color(..., mupdf.FzColorParams().internal()) # # - seems to end up with a corrupted `params`. # cs = mupdf.FzColorspace( mupdf.FzColorspace.Fixed_RGB) cp = mupdf.FzColorParams() rgb = mupdf.ll_fz_convert_color( colorspace, color, cs.m_internal, None, cp.internal(), ) except Exception: if g_exceptions_verbose: exception_info() raise return rgb[:3] return () def jm_lineart_drop_device(dev, ctx): if isinstance(dev.out, list): dev.out = [] dev.scissors = [] def jm_lineart_fill_path( dev, ctx, path, even_odd, ctm, colorspace, color, alpha, color_params): #log(f'{getattr(dev, "pathdict", None)=}') #log(f'jm_lineart_fill_path(): {dev.seqno=}') even_odd = True if even_odd else False try: assert isinstance( ctm, mupdf.fz_matrix) dev.ctm = mupdf.FzMatrix( ctm) # fz_concat(ctm, dev_ptm); dev.path_type = trace_device_FILL_PATH jm_lineart_path( dev, ctx, path) if dev.pathdict is None: return #item_count = len(dev.pathdict[ dictkey_items]) #if item_count == 0: # return dev.pathdict[ dictkey_type] ="f" dev.pathdict[ "even_odd"] = even_odd dev.pathdict[ "fill_opacity"] = alpha #log(f'setting dev.pathdict[ "closePath"] to false') #dev.pathdict[ "closePath"] = False dev.pathdict[ "fill"] = jm_lineart_color( colorspace, color) dev.pathdict[ dictkey_rect] = JM_py_from_rect(dev.pathrect) dev.pathdict[ "seqno"] = dev.seqno #jm_append_merge(dev) dev.pathdict[ 'layer'] = dev.layer_name if dev.clips: dev.pathdict[ 'level'] = dev.depth jm_append_merge(dev) dev.seqno += 1 #log(f'jm_lineart_fill_path() end: {getattr(dev, "pathdict", None)=}') except Exception: if g_exceptions_verbose: exception_info() raise # There are 3 text trace types: # 0 - fill text (PDF Tr 0) # 1 - stroke text (PDF Tr 1) # 3 - ignore text (PDF Tr 3) def jm_lineart_fill_text( dev, ctx, text, ctm, colorspace, color, alpha, color_params): if 0: log(f'{type(ctx)=} {ctx=}') log(f'{type(dev)=} {dev=}') log(f'{type(text)=} {text=}') log(f'{type(ctm)=} {ctm=}') log(f'{type(colorspace)=} {colorspace=}') log(f'{type(color)=} {color=}') log(f'{type(alpha)=} {alpha=}') log(f'{type(color_params)=} {color_params=}') jm_trace_text(dev, text, 0, ctm, colorspace, color, alpha, dev.seqno) dev.seqno += 1 def jm_lineart_ignore_text(dev, text, ctm): #log(f'{getattr(dev, "pathdict", None)=}') jm_trace_text(dev, text, 3, ctm, None, None, 1, dev.seqno) dev.seqno += 1
EmptyFileError
python
huggingface__transformers
src/transformers/models/phimoe/modeling_phimoe.py
{ "start": 14191, "end": 20796 }
class ____(nn.Module): """Collection of expert weights stored as 3D tensors.""" def __init__(self, config: PhimoeConfig): super().__init__() self.num_experts = config.num_local_experts self.hidden_dim = config.hidden_size self.intermediate_dim = config.intermediate_size self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim)) self.down_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_dim, self.intermediate_dim)) self.act_fn = ACT2FN[config.hidden_act] def forward( self, hidden_states: torch.Tensor, top_k_index: torch.Tensor, top_k_weights: torch.Tensor, ) -> torch.Tensor: final_hidden_states = torch.zeros_like(hidden_states) num_experts = top_k_weights.shape[1] with torch.no_grad(): expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=num_experts + 1) expert_mask = expert_mask.permute(2, 1, 0) expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero() for expert_idx in expert_hit: expert_idx = expert_idx[0] if expert_idx == num_experts: continue _, token_idx = torch.where(expert_mask[expert_idx]) current_state = hidden_states[token_idx] gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1) current_hidden_states = self.act_fn(gate) * up current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx]) current_hidden_states = current_hidden_states * top_k_weights[token_idx, expert_idx, None] final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype)) return final_hidden_states def sparsemixer(scores, jitter_eps, training, top_k=2): """ Sparse mixer function to select top-k experts and compute multipliers. Based on the paper: https://huggingface.co/papers/2409.12136 We first replace the TopK(·) function as random sampling of discrete variables in model training. Then, following Liu et al. (2023a) and Liu et al. (2023b), we apply Heun's third order method to approximate the expert routing gradient and construct a modified back-propagation to give a mathematically sound gradient estimation for expert routing. Args: scores (torch.Tensor): Input scores tensor. jitter_eps (float): Jitter epsilon for numerical stability. training (bool): Flag indicating if the model is in training mode. top_k (int): Number of top experts to select. Returns: tuple[torch.Tensor, torch.Tensor]: Multiplier and selected experts tensors. """ with torch.no_grad(): # Compute mask for sparsity mask_logits_threshold, max_ind = scores.max(dim=-1, keepdim=True) factor = scores.abs().clamp(min=mask_logits_threshold) mask_logits_threshold = ((mask_logits_threshold - scores) / factor) > (2 * jitter_eps) # Apply mask masked_gates = scores.masked_fill(mask_logits_threshold, float("-inf")) if training: selected_experts = ( ( masked_gates - torch.empty_like(masked_gates, memory_format=torch.legacy_contiguous_format).exponential_().log() ) .max(dim=-1)[1] .unsqueeze(-1) ) # Gumbel sampling, more robust than the multinomial method else: selected_experts = max_ind # Compute scores for gradients masked_gates = torch.softmax(masked_gates, dim=-1) multiplier_o = masked_gates.gather(dim=-1, index=selected_experts) if training: # Compute midpoint mask max_scores, max_ind = masked_gates.max(dim=-1, keepdim=True) mask_for_one = torch.logical_or( selected_experts == max_ind, torch.rand_like(max_scores) > 0.75, # Heun's third-order method ) # 1 -> 1.0 & 0 -> 1./3: lambda x: (x + 0.5) / 1.5 mask_for_one = torch.add(0.3333, mask_for_one, alpha=0.6667).type_as(masked_gates) multiplier = PhimoeMultiplier.apply( scores, multiplier_o, selected_experts, masked_gates, mask_for_one, ) else: multiplier = multiplier_o # Masked out first expert masked_scores = torch.scatter( scores, -1, selected_experts, float("-inf"), ) with torch.no_grad(): # Compute mask for sparsity mask_logits_threshold, max_ind = masked_scores.max(dim=-1, keepdim=True) factor = scores.abs().clamp(min=mask_logits_threshold) mask_logits_threshold = ((mask_logits_threshold - scores) / factor) > (2 * jitter_eps) # Apply mask masked_gates_top2 = masked_scores.masked_fill(mask_logits_threshold, float("-inf")) if training: selected_experts_top2 = ( ( masked_gates_top2 - torch.empty_like(masked_gates_top2, memory_format=torch.legacy_contiguous_format) .exponential_() .log() ) .max(dim=-1)[1] .unsqueeze(-1) ) # Gumbel sampling, more robust than the multinomial method else: selected_experts_top2 = max_ind # Compute scores for gradients masked_gates_top2 = torch.softmax(masked_gates_top2, dim=-1) multiplier_top2_o = masked_gates_top2.gather(dim=-1, index=selected_experts_top2) if training: # Compute midpoint mask max_scores, max_ind = masked_gates_top2.max(dim=-1, keepdim=True) mask_for_one_top2 = torch.logical_or( selected_experts_top2 == max_ind, torch.rand_like(max_scores).uniform_() > 0.75, # Heun's third-order method ) # 1 -> 1.0 & 0 -> 1./3: lambda x: (x + 0.5) / 1.5 mask_for_one_top2 = torch.add(0.3333, mask_for_one_top2, alpha=0.6667).type_as(masked_gates_top2) multiplier_top2 = PhimoeMultiplier.apply( scores, multiplier_top2_o, selected_experts_top2, masked_gates_top2, mask_for_one_top2, ) else: multiplier_top2 = multiplier_top2_o multiplier = torch.concat((multiplier, multiplier_top2), dim=-1) selected_experts = torch.concat((selected_experts, selected_experts_top2), dim=-1) return ( multiplier, selected_experts, )
PhimoeExperts
python
fluentpython__example-code-2e
23-descriptor/descriptorkinds.py
{ "start": 5580, "end": 5794 }
class ____: # <5> over = Overriding() over_no_get = OverridingNoGet() non_over = NonOverriding() def spam(self): # <6> print(f'-> Managed.spam({display(self)})') # end::DESCR_KINDS[]
Managed
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-pgvector/destination_pgvector/common/sql/sql_processor.py
{ "start": 1874, "end": 1957 }
class ____(enum.Enum): APPEND = "append" REPLACE = "replace"
RecordDedupeMode
python
walkccc__LeetCode
solutions/1066. Campus Bikes II/1066.py
{ "start": 0, "end": 753 }
class ____: def assignBikes( self, workers: list[list[int]], bikes: list[list[int]], ) -> int: def dist(p1: list[int], p2: list[int]) -> int: return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1]) @functools.lru_cache(None) def dp(workerIndex: int, used: int) -> int: """ Returns the minimum Manhattan distances to assign bikes to workers[workerIndex..n), where `used` is the bitmask of the used bikes. """ if workerIndex == len(workers): return 0 return min( (dist(workers[workerIndex], bike) + dp(workerIndex + 1, used | 1 << i) for i, bike in enumerate(bikes) if not used >> i & 1), default=math.inf) return dp(0, 0)
Solution
python
keras-team__keras
keras/src/backend/tensorflow/optimizer.py
{ "start": 465, "end": 9307 }
class ____(KerasAutoTrackable, base_optimizer.BaseOptimizer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._distribution_strategy = tf.distribute.get_strategy() def add_variable_from_reference( self, reference_variable, name=None, initializer="zeros" ): if isinstance(reference_variable, backend.Variable): colocate_var = reference_variable.value else: colocate_var = reference_variable with self._distribution_strategy.extended.colocate_vars_with( colocate_var ): return super().add_variable_from_reference( reference_variable, name=name, initializer=initializer ) def stateless_apply(self, optimizer_variables, grads, trainable_variables): # This is mainly due to the interaction with tf.distribute.Strategy, # which requires tf.Variable as the inputs for most of its APIs. raise ValueError( "stateless_apply is not supported with the TensorFlow backend " "(as it is incompatible with tf.distribute)." ) def assign(self, variable, value): if isinstance(variable, backend.Variable): variable = variable.value value = tf.cast(value, variable.dtype) if isinstance(value, tf.IndexedSlices): variable.scatter_update(value) else: variable.assign(value) def assign_add(self, variable, value): if isinstance(variable, backend.Variable): variable = variable.value value = tf.cast(value, variable.dtype) if isinstance(value, tf.IndexedSlices): variable.scatter_add(value) else: variable.assign_add(value) def assign_sub(self, variable, value): if isinstance(variable, backend.Variable): variable = variable.value value = tf.cast(value, variable.dtype) if isinstance(value, tf.IndexedSlices): variable.scatter_sub(value) else: variable.assign_sub(value) def _var_key(self, variable): if isinstance(variable, backend.Variable): variable = variable.value # Convert to tf.Variable if hasattr(variable, "_distributed_container"): variable = variable._distributed_container() elif ( isinstance(variable, tf.__internal__.CompositeTensor) and hasattr(variable, "handle") and hasattr(variable.handle, "_distributed_container") ): # For ResourceVariables, the _distributed_container attribute # is added to their handle tensors. variable = variable.handle._distributed_container() return variable._unique_id def _apply_weight_decay(self, variables): if self.weight_decay is None: return def distributed_apply_weight_decay(distribution, variables, **kwargs): def weight_decay_fn(variable): if self._use_weight_decay(variable): lr = tf.cast(self.learning_rate, variable.dtype) wd = tf.cast(self.weight_decay, variable.dtype) variable.assign_sub(variable * wd * lr) for variable in variables: if isinstance(variable, backend.Variable): variable = variable.value # Convert to tf.Variable distribution.extended.update( variable, weight_decay_fn, group=False ) tf.__internal__.distribute.interim.maybe_merge_call( distributed_apply_weight_decay, self._distribution_strategy, variables, ) def _backend_update_step(self, grads, trainable_variables, learning_rate): trainable_variables = [ v.value if isinstance(v, backend.Variable) else v for v in trainable_variables ] grads_and_vars = list(zip(grads, trainable_variables)) grads_and_vars = self._all_reduce_sum_gradients(grads_and_vars) tf.__internal__.distribute.interim.maybe_merge_call( self._distributed_tf_update_step, self._distribution_strategy, grads_and_vars, learning_rate, ) def _distributed_tf_update_step( self, distribution, grads_and_vars, learning_rate ): def apply_grad_to_update_var(var, grad, learning_rate): return self.update_step(grad, var, learning_rate) for grad, var in grads_and_vars: distribution.extended.update( var, apply_grad_to_update_var, args=(grad, learning_rate), group=False, ) def _all_reduce_sum_gradients(self, grads_and_vars): """Returns all-reduced gradients aggregated via summation. Args: grads_and_vars: List of (gradient, variable) pairs. Returns: List of (gradient, variable) pairs where gradients have been all-reduced. """ replica_context = tf.distribute.get_replica_context() if not replica_context: return grads_and_vars grads_and_vars = list(grads_and_vars) filtered_grads_and_vars = filter_empty_gradients(grads_and_vars) if filtered_grads_and_vars: grads = [pair[0] for pair in filtered_grads_and_vars] reduced = tf.distribute.get_replica_context().all_reduce( tf.distribute.ReduceOp.SUM, grads ) else: reduced = [] # Copy 'reduced' but add None gradients back in reduced_with_nones = [] reduced_pos = 0 for g, v in grads_and_vars: if g is None: reduced_with_nones.append((None, v)) else: reduced_with_nones.append((reduced[reduced_pos], v)) reduced_pos += 1 assert reduced_pos == len(reduced), "Failed to add all gradients" return reduced_with_nones def _overwrite_model_variables_with_average_value( self, trainable_variables ): """Overwrite model variables with their moving average values. This function overwrites variables on each device. Args: var_list: list of model variables. """ trainable_variables = [ v.value if isinstance(v, backend.Variable) else v for v in trainable_variables ] # Override model variable by the stored average value on all devices. for var, average_var in zip( trainable_variables, self._model_variables_moving_average ): self._distribution_strategy.extended.update( var, lambda a, b: a.assign(b), args=(average_var,) ) def _backend_increment_gradient_accumulators(self, grads, acc_grads): def update_accumulator(var, grad): var.assign(var + grad) accumulators = [v.value for v in acc_grads] def _distributed_tf_increment_grad_acc( distribution, grads, accumulators ): for grad, var in zip(grads, accumulators): distribution.extended.update( var, update_accumulator, args=(grad,), group=False ) tf.__internal__.distribute.interim.maybe_merge_call( _distributed_tf_increment_grad_acc, self._distribution_strategy, grads, accumulators, ) def _clip_by_norm(self, values, axes=None): # We need to use TF-specific OP to support the case, # when `values` are `tf.IndexedSlices`. return tf.clip_by_norm(values, self.clipnorm, axes) def filter_empty_gradients(grads_and_vars): """Filter out `(grad, var)` pairs that have a gradient equal to `None`.""" grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads = [] for grad, var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if not filtered: variable = ([v.name for _, v in grads_and_vars],) raise ValueError( f"No gradients provided for any variable: {variable}. " f"Provided `grads_and_vars` is {grads_and_vars}." ) if vars_with_empty_grads: warnings.warn( "Gradients do not exist for variables %s when minimizing the " "loss. If you're using `model.compile()`, did you forget to " "provide a `loss` argument?", ([v.name for v in vars_with_empty_grads]), ) return filtered
TFOptimizer
python
sympy__sympy
sympy/codegen/ast.py
{ "start": 28728, "end": 28786 }
class ____(String): """ Represents a comment. """
Comment
python
sympy__sympy
sympy/polys/polyoptions.py
{ "start": 16471, "end": 17250 }
class ____(Option, metaclass=OptionType): """``modulus`` option to polynomial manipulation functions. """ option = 'modulus' requires: list[str] = [] excludes = ['greedy', 'split', 'domain', 'gaussian', 'extension'] @classmethod def preprocess(cls, modulus): modulus = sympify(modulus) if modulus.is_Integer and modulus > 0: return int(modulus) else: raise OptionError( "'modulus' must a positive integer, got %s" % modulus) @classmethod def postprocess(cls, options): if 'modulus' in options: modulus = options['modulus'] symmetric = options.get('symmetric', True) options['domain'] = sympy.polys.domains.FF(modulus, symmetric)
Modulus
python
airbytehq__airbyte
airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_hourly_reports.py
{ "start": 22663, "end": 26697 }
class ____(HourlyReportsTestWithStateChangesAfterMigration): stream_name = "ad_performance_report_hourly" report_file = "ad_performance_report_hourly" records_number = 24 state_file = "hourly_reports_state" incremental_report_file = "ad_performance_report_hourly_incremental" def mock_report_apis(self): self.mock_user_query_api(response_template="user_query") self.mock_accounts_search_api( response_template="accounts_search_for_report", body=b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}', ) self.mock_generate_report_api( endpoint="Submit", response_template="generate_report", body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AdPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "AdPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountId", "CampaignId", "AdGroupId", "AdId", "TimePeriod", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "CurrencyCode", "AdDistribution", "DeviceType", "Language", "Network", "DeviceOS", "TopVsOther", "BidMatchType", "DeliveredMatchType", "AccountName", "CampaignName", "CampaignType", "AdGroupName", "Impressions", "Clicks", "Ctr", "Spend", "CostPerConversion", "DestinationUrl", "Assists", "ReturnOnAdSpend", "CostPerAssist", "CustomParameters", "FinalAppUrl", "AdDescription", "AdDescription2", "ViewThroughConversions", "ViewThroughConversionsQualified", "AllCostPerConversion", "AllReturnOnAdSpend", "Conversions", "ConversionRate", "ConversionsQualified", "AverageCpc", "AveragePosition", "AverageCpm", "AllConversions", "AllConversionRate", "AllRevenue", "AllRevenuePerConversion", "Revenue", "RevenuePerConversion", "RevenuePerAssist"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}', ) # for second read self.mock_generate_report_api( endpoint="Submit", response_template="generate_report", body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AdPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "AdPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountId", "CampaignId", "AdGroupId", "AdId", "TimePeriod", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "CurrencyCode", "AdDistribution", "DeviceType", "Language", "Network", "DeviceOS", "TopVsOther", "BidMatchType", "DeliveredMatchType", "AccountName", "CampaignName", "CampaignType", "AdGroupName", "Impressions", "Clicks", "Ctr", "Spend", "CostPerConversion", "DestinationUrl", "Assists", "ReturnOnAdSpend", "CostPerAssist", "CustomParameters", "FinalAppUrl", "AdDescription", "AdDescription2", "ViewThroughConversions", "ViewThroughConversionsQualified", "AllCostPerConversion", "AllReturnOnAdSpend", "Conversions", "ConversionRate", "ConversionsQualified", "AverageCpc", "AveragePosition", "AverageCpm", "AllConversions", "AllConversionRate", "AllRevenue", "AllRevenuePerConversion", "Revenue", "RevenuePerConversion", "RevenuePerAssist"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}', ) self.mock_generate_report_api( endpoint="Poll", response_template="generate_report_poll", body=b'{"ReportRequestId": "thisisthereport_requestid"}' )
TestAdPerformanceReportHourlyStream
python
sqlalchemy__sqlalchemy
test/sql/test_deprecations.py
{ "start": 2624, "end": 3691 }
class ____(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = "default" table1 = table( "mytable", column("myid", Integer), column("name", String), column("description", String), ) table2 = table( "myothertable", column("otherid", Integer), column("othername", String) ) def test_as_scalar(self): with testing.expect_deprecated( r"The SelectBase.as_scalar\(\) method is deprecated and " "will be removed in a future release." ): stmt = select(self.table1.c.myid).as_scalar() is_true(stmt.compare(select(self.table1.c.myid).scalar_subquery())) def test_as_scalar_from_subquery(self): with testing.expect_deprecated( r"The Subquery.as_scalar\(\) method, which was previously " r"``Alias.as_scalar\(\)`` prior to version 1.4" ): stmt = select(self.table1.c.myid).subquery().as_scalar() is_true(stmt.compare(select(self.table1.c.myid).scalar_subquery()))
SubqueryCoercionsTest
python
astropy__astropy
astropy/coordinates/polarization.py
{ "start": 349, "end": 2055 }
class ____(NamedTuple): """Symbol for a Stokes coordinate.""" symbol: str = "" description: str = "" # This is table 29 in the FITS 4.0 paper FITS_STOKES_VALUE_SYMBOL_MAP = { 1: StokesSymbol("I", "Standard Stokes unpolarized"), 2: StokesSymbol("Q", "Standard Stokes linear"), 3: StokesSymbol("U", "Standard Stokes linear"), 4: StokesSymbol("V", "Standard Stokes circular"), -1: StokesSymbol("RR", "Right-right circular: <RR*>"), -2: StokesSymbol("LL", "Left-left circular: <LL*>"), -3: StokesSymbol("RL", "Right-left cross-circular: Re(<RL*>))"), -4: StokesSymbol("LR", "Left-right cross-circular: Re(<LR*>)=Im(<RL*>)"), -5: StokesSymbol("XX", "X parallel linear: <XX*>"), -6: StokesSymbol("YY", "Y parallel linear: <YY*>"), -7: StokesSymbol("XY", "XY cross linear: Re(<XY*>)"), -8: StokesSymbol("YX", "YX cross linear: Im(<XY*>)"), } STOKES_VALUE_SYMBOL_MAP = FITS_STOKES_VALUE_SYMBOL_MAP.copy() UNKNOWN_STOKES_VALUE = -99999 @contextmanager def custom_stokes_symbol_mapping( mapping: dict[int, StokesSymbol], replace: bool = False ) -> None: """ Add a custom set of mappings from values to Stokes symbols. Parameters ---------- mappings A list of dictionaries with custom mappings between values (integers) and `.StokesSymbol` classes. replace Replace all mappings with this one. """ global STOKES_VALUE_SYMBOL_MAP original_mapping = STOKES_VALUE_SYMBOL_MAP.copy() if not replace: STOKES_VALUE_SYMBOL_MAP = {**original_mapping, **mapping} else: STOKES_VALUE_SYMBOL_MAP = mapping yield STOKES_VALUE_SYMBOL_MAP = original_mapping
StokesSymbol