language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
sympy__sympy
sympy/vector/orienters.py
{ "start": 7197, "end": 9089 }
class ____(ThreeAngleOrienter): """ Class to denote a space-orienter. """ _in_order = False def __new__(cls, angle1, angle2, angle3, rot_order): obj = ThreeAngleOrienter.__new__(cls, angle1, angle2, angle3, rot_order) return obj def __init__(self, angle1, angle2, angle3, rot_order): """ Space rotation is similar to Body rotation, but the rotations are applied in the opposite order. Parameters ========== angle1, angle2, angle3 : Expr Three successive angles to rotate the coordinate system by rotation_order : string String defining the order of axes for rotation See Also ======== BodyOrienter : Orienter to orient systems wrt Euler angles. Examples ======== >>> from sympy.vector import CoordSys3D, SpaceOrienter >>> from sympy import symbols >>> q1, q2, q3 = symbols('q1 q2 q3') >>> N = CoordSys3D('N') To orient a coordinate system D with respect to N, each sequential rotation is always about N's orthogonal unit vectors. For example, a '123' rotation will specify rotations about N.i, then N.j, then N.k. Therefore, >>> space_orienter = SpaceOrienter(q1, q2, q3, '312') >>> D = N.orient_new('D', (space_orienter, )) is same as >>> from sympy.vector import AxisOrienter >>> axis_orienter1 = AxisOrienter(q1, N.i) >>> B = N.orient_new('B', (axis_orienter1, )) >>> axis_orienter2 = AxisOrienter(q2, N.j) >>> C = B.orient_new('C', (axis_orienter2, )) >>> axis_orienter3 = AxisOrienter(q3, N.k) >>> D = C.orient_new('C', (axis_orienter3, )) """ # Dummy initializer for docstrings pass
SpaceOrienter
python
allegroai__clearml
clearml/backend_api/services/v2_20/models.py
{ "start": 124808, "end": 127662 }
class ____(Request): """ Set the model ready flag to True. If the model is an output model of a task then try to publish the task. :param model: Model id :type model: str :param force_publish_task: Publish the associated task (if exists) even if it is not in the 'stopped' state. Optional, the default value is False. :type force_publish_task: bool :param publish_task: Indicates that the associated task (if exists) should be published. Optional, the default value is True. :type publish_task: bool """ _service = "models" _action = "set_ready" _version = "2.20" _schema = { "definitions": {}, "properties": { "force_publish_task": { "description": "Publish the associated task (if exists) even if it is not in the 'stopped' state. Optional, the default value is False.", "type": "boolean", }, "model": {"description": "Model id", "type": "string"}, "publish_task": { "description": "Indicates that the associated task (if exists) should be published. Optional, the default value is True.", "type": "boolean", }, }, "required": ["model"], "type": "object", } def __init__( self, model: str, force_publish_task: Optional[bool] = None, publish_task: Optional[bool] = None, **kwargs: Any ) -> None: super(SetReadyRequest, self).__init__(**kwargs) self.model = model self.force_publish_task = force_publish_task self.publish_task = publish_task @schema_property("model") def model(self) -> str: return self._property_model @model.setter def model(self, value: str) -> None: if value is None: self._property_model = None return self.assert_isinstance(value, "model", six.string_types) self._property_model = value @schema_property("force_publish_task") def force_publish_task(self) -> Optional[bool]: return self._property_force_publish_task @force_publish_task.setter def force_publish_task(self, value: Optional[bool]) -> None: if value is None: self._property_force_publish_task = None return self.assert_isinstance(value, "force_publish_task", (bool,)) self._property_force_publish_task = value @schema_property("publish_task") def publish_task(self) -> Optional[bool]: return self._property_publish_task @publish_task.setter def publish_task(self, value: Optional[bool]) -> None: if value is None: self._property_publish_task = None return self.assert_isinstance(value, "publish_task", (bool,)) self._property_publish_task = value
SetReadyRequest
python
tiangolo__fastapi
docs_src/python_types/tutorial011_py39.py
{ "start": 89, "end": 492 }
class ____(BaseModel): id: int name: str = "John Doe" signup_ts: Union[datetime, None] = None friends: list[int] = [] external_data = { "id": "123", "signup_ts": "2017-06-01 12:22", "friends": [1, "2", b"3"], } user = User(**external_data) print(user) # > User id=123 name='John Doe' signup_ts=datetime.datetime(2017, 6, 1, 12, 22) friends=[1, 2, 3] print(user.id) # > 123
User
python
tiangolo__fastapi
docs_src/sql_databases/tutorial001_an_py310.py
{ "start": 160, "end": 1768 }
class ____(SQLModel, table=True): id: int | None = Field(default=None, primary_key=True) name: str = Field(index=True) age: int | None = Field(default=None, index=True) secret_name: str sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" connect_args = {"check_same_thread": False} engine = create_engine(sqlite_url, connect_args=connect_args) def create_db_and_tables(): SQLModel.metadata.create_all(engine) def get_session(): with Session(engine) as session: yield session SessionDep = Annotated[Session, Depends(get_session)] app = FastAPI() @app.on_event("startup") def on_startup(): create_db_and_tables() @app.post("/heroes/") def create_hero(hero: Hero, session: SessionDep) -> Hero: session.add(hero) session.commit() session.refresh(hero) return hero @app.get("/heroes/") def read_heroes( session: SessionDep, offset: int = 0, limit: Annotated[int, Query(le=100)] = 100, ) -> list[Hero]: heroes = session.exec(select(Hero).offset(offset).limit(limit)).all() return heroes @app.get("/heroes/{hero_id}") def read_hero(hero_id: int, session: SessionDep) -> Hero: hero = session.get(Hero, hero_id) if not hero: raise HTTPException(status_code=404, detail="Hero not found") return hero @app.delete("/heroes/{hero_id}") def delete_hero(hero_id: int, session: SessionDep): hero = session.get(Hero, hero_id) if not hero: raise HTTPException(status_code=404, detail="Hero not found") session.delete(hero) session.commit() return {"ok": True}
Hero
python
PrefectHQ__prefect
tests/server/models/test_flow_runs.py
{ "start": 7661, "end": 12671 }
class ____: async def test_update_flow_run_succeeds( self, flow, session, ): job_vars = {"foo": "bar"} flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun(flow_id=flow.id, flow_version="1.0"), ) flow_run_id = flow_run.id update_result = await models.flow_runs.update_flow_run( session=session, flow_run_id=flow_run_id, flow_run=schemas.actions.FlowRunUpdate( flow_version="The next one", job_variables=job_vars, ), ) assert update_result updated_flow_run = await models.flow_runs.read_flow_run( session=session, flow_run_id=flow_run_id ) assert flow_run_id == updated_flow_run.id == flow_run.id assert updated_flow_run.flow_version == "The next one" assert updated_flow_run.job_variables == job_vars async def test_update_flow_run_does_not_update_if_nothing_set(self, flow, session): flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun(flow_id=flow.id, flow_version="1.0"), ) flow_run_id = flow_run.id update_result = await models.flow_runs.update_flow_run( session=session, flow_run_id=flow_run_id, flow_run=schemas.actions.FlowRunUpdate(), ) assert update_result updated_flow_run = await models.flow_runs.read_flow_run( session=session, flow_run_id=flow_run_id ) assert flow_run_id == updated_flow_run.id == flow_run.id assert updated_flow_run.flow_version == "1.0" async def test_update_flow_run_returns_false_if_flow_run_does_not_exist( self, session ): assert not ( await models.flow_runs.update_flow_run( session=session, flow_run_id=uuid4(), flow_run=schemas.actions.FlowRunUpdate(), ) ) async def test_update_flow_run_labels( self, flow: orm_models.Flow, session: AsyncSession ): """Test that flow run labels can be updated by patching existing labels""" # Create a flow run with initial labels initial_labels: KeyValueLabels = {"env": "test", "version": "1.0"} flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun(flow_id=flow.id, labels=initial_labels), ) # Update with new labels new_labels: KeyValueLabels = {"version": "2.0", "new_key": "new_value"} update_success = await models.flow_runs.update_flow_run_labels( session=session, flow_run_id=flow_run.id, labels=new_labels ) assert update_success is True # Read the flow run back and verify labels were merged correctly updated_flow_run = await models.flow_runs.read_flow_run( session=session, flow_run_id=flow_run.id ) assert updated_flow_run assert updated_flow_run.labels == { "prefect.flow.id": str(flow.id), "env": "test", # Kept from initial labels "version": "2.0", # Updated from new labels "new_key": "new_value", # Added from new labels } async def test_update_flow_run_labels_raises_if_flow_run_does_not_exist( self, session: AsyncSession, caplog: pytest.LogCaptureFixture ): """Test that updating labels for a non-existent flow run raises""" with pytest.raises(ObjectNotFoundError) as exc: await models.flow_runs.update_flow_run_labels( session=session, flow_run_id=uuid4(), labels={"test": "label"} ) assert "Flow run with id" in str(exc.value) async def test_update_flow_run_labels_with_empty_initial_labels( self, flow: orm_models.Flow, session: AsyncSession ): """Test that labels can be added to a flow run with no existing labels""" # Create a flow run with no labels flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, ), ) # Update with new labels new_labels: KeyValueLabels = {"env": "test", "version": "1.0"} update_success = await models.flow_runs.update_flow_run_labels( session=session, flow_run_id=flow_run.id, labels=new_labels ) assert update_success is True # Read the flow run back and verify labels were added updated_flow_run = await models.flow_runs.read_flow_run( session=session, flow_run_id=flow_run.id ) assert updated_flow_run assert updated_flow_run.labels == { "prefect.flow.id": str(flow.id), **new_labels, }
TestUpdateFlowRun
python
getsentry__sentry
src/sentry/integrations/slack/analytics.py
{ "start": 162, "end": 294 }
class ____(analytics.Event): actor_id: int | None = None @analytics.eventclass("integrations.slack.status")
SlackIntegrationAssign
python
Lightning-AI__lightning
tests/tests_pytorch/models/test_hparams.py
{ "start": 2603, "end": 2805 }
class ____(BoringDataModule): """Tests that a model can take an object.""" def __init__(self, hparams): super().__init__() self.save_hyperparameters(hparams)
SaveHparamsDataModule
python
instagram__MonkeyType
monkeytype/tracing.py
{ "start": 6380, "end": 10532 }
class ____: """CallTracer captures the concrete types involved in a function invocation. On a per function call basis, CallTracer will record the types of arguments supplied, the type of the function's return value (if any), and the types of values yielded by the function (if any). It emits a CallTrace object that contains the captured types when the function returns. Use it like so: sys.setprofile(CallTracer(MyCallLogger())) """ def __init__( self, logger: CallTraceLogger, max_typed_dict_size: int, code_filter: Optional[CodeFilter] = None, sample_rate: Optional[int] = None, ) -> None: self.logger = logger self.traces: Dict[FrameType, CallTrace] = {} self.sample_rate = sample_rate self.cache: Dict[CodeType, Optional[Callable[..., Any]]] = {} self.should_trace = code_filter self.max_typed_dict_size = max_typed_dict_size def _get_func(self, frame: FrameType) -> Optional[Callable[..., Any]]: code = frame.f_code if code not in self.cache: self.cache[code] = get_func(frame) return self.cache[code] def handle_call(self, frame: FrameType) -> None: if self.sample_rate and random.randrange(self.sample_rate) != 0: return func = self._get_func(frame) if func is None: return code = frame.f_code # I can't figure out a way to access the value sent to a generator via # send() from a stack frame. if frame in self.traces: # resuming a generator; we've already seen this frame return arg_names = code.co_varnames[: code.co_argcount + code.co_kwonlyargcount] arg_types = {} for name in arg_names: if name in frame.f_locals: arg_types[name] = get_type( frame.f_locals[name], max_typed_dict_size=self.max_typed_dict_size ) self.traces[frame] = CallTrace(func, arg_types) def handle_return(self, frame: FrameType, arg: Any) -> None: # In the case of a 'return' event, arg contains the return value, or # None, if the block returned because of an unhandled exception. We # need to distinguish the exceptional case (not a valid return type) # from a function returning (or yielding) None. In the latter case, the # the last instruction that was executed should always be a return or a # yield. typ = get_type(arg, max_typed_dict_size=self.max_typed_dict_size) last_opcode = frame.f_code.co_code[frame.f_lasti] trace = self.traces.get(frame) if trace is None: return elif last_opcode == YIELD_VALUE_OPCODE: trace.add_yield_type(typ) else: if last_opcode == RETURN_VALUE_OPCODE: trace.return_type = typ del self.traces[frame] self.logger.log(trace) def __call__(self, frame: FrameType, event: str, arg: Any) -> "CallTracer": code = frame.f_code if ( event not in SUPPORTED_EVENTS or code.co_name == "trace_types" or self.should_trace and not self.should_trace(code) ): return self try: if event == EVENT_CALL: self.handle_call(frame) elif event == EVENT_RETURN: self.handle_return(frame, arg) else: logger.error("Cannot handle event %s", event) except Exception: logger.exception("Failed collecting trace") return self @contextmanager def trace_calls( logger: CallTraceLogger, max_typed_dict_size: int, code_filter: Optional[CodeFilter] = None, sample_rate: Optional[int] = None, ) -> Iterator[None]: """Enable call tracing for a block of code""" old_trace = sys.getprofile() sys.setprofile(CallTracer(logger, max_typed_dict_size, code_filter, sample_rate)) try: yield finally: sys.setprofile(old_trace) logger.flush()
CallTracer
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/sensors/test_glue.py
{ "start": 1089, "end": 8664 }
class ____: @mock.patch.object(GlueJobHook, "print_job_logs") @mock.patch.object(GlueJobHook, "get_conn") @mock.patch.object(GlueJobHook, "get_job_state") def test_poke(self, mock_get_job_state, mock_conn, mock_print_job_logs): mock_conn.return_value.get_job_run() mock_get_job_state.return_value = "SUCCEEDED" op = GlueJobSensor( task_id="test_glue_job_sensor", job_name="aws_test_glue_job", run_id="5152fgsfsjhsh61661", poke_interval=1, timeout=5, ) assert op.poke({}) mock_print_job_logs.assert_not_called() @mock.patch.object(GlueJobHook, "print_job_logs") @mock.patch.object(GlueJobHook, "get_conn") @mock.patch.object(GlueJobHook, "get_job_state") def test_poke_with_verbose_logging(self, mock_get_job_state, mock_conn, mock_print_job_logs): mock_conn.return_value.get_job_run() mock_get_job_state.return_value = "SUCCEEDED" job_name = "job_name" job_run_id = "job_run_id" op = GlueJobSensor( task_id="test_glue_job_sensor", job_name=job_name, run_id=job_run_id, poke_interval=1, timeout=5, verbose=True, ) assert op.poke({}) mock_print_job_logs.assert_called_once_with( job_name=job_name, run_id=job_run_id, continuation_tokens=ANY, ) @mock.patch.object(GlueJobHook, "print_job_logs") @mock.patch.object(GlueJobHook, "get_conn") @mock.patch.object(GlueJobHook, "get_job_state") def test_poke_false(self, mock_get_job_state, mock_conn, mock_print_job_logs): mock_conn.return_value.get_job_run() mock_get_job_state.return_value = "RUNNING" op = GlueJobSensor( task_id="test_glue_job_sensor", job_name="aws_test_glue_job", run_id="5152fgsfsjhsh61661", poke_interval=1, timeout=5, ) assert not op.poke({}) mock_print_job_logs.assert_not_called() @mock.patch.object(GlueJobHook, "print_job_logs") @mock.patch.object(GlueJobHook, "get_conn") @mock.patch.object(GlueJobHook, "get_job_state") def test_poke_false_with_verbose_logging(self, mock_get_job_state, mock_conn, mock_print_job_logs): mock_conn.return_value.get_job_run() mock_get_job_state.return_value = "RUNNING" job_name = "job_name" job_run_id = "job_run_id" op = GlueJobSensor( task_id="test_glue_job_sensor", job_name=job_name, run_id=job_run_id, poke_interval=1, timeout=5, verbose=True, ) assert not op.poke({}) mock_print_job_logs.assert_called_once_with( job_name=job_name, run_id=job_run_id, continuation_tokens=ANY, ) @mock.patch.object(GlueJobHook, "print_job_logs") @mock.patch.object(GlueJobHook, "get_conn") @mock.patch.object(GlueJobHook, "get_job_state") def test_poke_failed_job_with_verbose_logging(self, mock_get_job_state, mock_conn, mock_print_job_logs): mock_conn.return_value.get_job_run() mock_get_job_state.return_value = "FAILED" job_name = "job_name" job_run_id = "job_run_id" op = GlueJobSensor( task_id="test_glue_job_sensor", job_name=job_name, run_id=job_run_id, poke_interval=1, timeout=5, verbose=True, ) with pytest.raises(AirflowException): assert not op.poke({}) mock_print_job_logs.assert_called_once_with( job_name=job_name, run_id=job_run_id, continuation_tokens=ANY, ) @mock.patch("airflow.providers.amazon.aws.hooks.glue.GlueJobHook.get_job_state") def test_fail_poke(self, get_job_state): job_name = "job_name" job_run_id = "job_run_id" op = GlueJobSensor( task_id="test_glue_job_sensor", job_name=job_name, run_id=job_run_id, poke_interval=1, timeout=5, verbose=True, ) op.verbose = False job_state = "FAILED" get_job_state.return_value = job_state job_error_message = "Exiting Job" with pytest.raises(AirflowException, match=job_error_message): op.poke(context={}) def test_deferrable_execute_raises_task_deferred(self): job_name = "job_name" job_run_id = "job_run_id" sensor = GlueJobSensor( task_id="test_glue_job_sensor", job_name=job_name, run_id=job_run_id, deferrable=True, poke_interval=1, timeout=5, ) with pytest.raises(TaskDeferred): sensor.execute({}) @mock.patch.object(GlueJobSensor, "defer") def test_default_timeout(self, mock_defer): mock_defer.side_effect = TaskDeferred(trigger=mock.Mock(), method_name="execute_complete") sensor = GlueJobSensor( task_id="test_glue_job_sensor", job_name="job_name", run_id="job_run_id", deferrable=True, poke_interval=5, max_retries=30, ) with pytest.raises(TaskDeferred): sensor.execute({}) call_kwargs = mock_defer.call_args.kwargs["trigger"] assert call_kwargs.attempts == 30 mock_defer.assert_called_once() def test_default_args(self): job_name = "job_name" job_run_id = "job_run_id" sensor = GlueJobSensor( task_id="test_glue_job_sensor", job_name=job_name, run_id=job_run_id, ) assert sensor.poke_interval == 120 assert sensor.verbose is False assert sensor.deferrable is False or isinstance(sensor.deferrable, bool) assert sensor.aws_conn_id == "aws_default" def test_custom_args(self): job_name = "job_name" job_run_id = "job_run_id" sensor = GlueJobSensor( task_id="test_glue_job_sensor", job_name=job_name, run_id=job_run_id, verbose=True, deferrable=True, poke_interval=10, aws_conn_id="custom_conn", max_retries=20, ) assert sensor.verbose is True assert sensor.deferrable is True assert sensor.poke_interval == 10 assert sensor.aws_conn_id == "custom_conn" assert sensor.max_retries == 20 def test_defferable_params_passed_to_trigger(self): job_name = "job_name" job_run_id = "job_run_id" sensor = GlueJobSensor( task_id="test_glue_job_sensor", job_name=job_name, run_id=job_run_id, verbose=True, deferrable=True, poke_interval=10, region_name="us-west-2", aws_conn_id="custom_conn", max_retries=20, ) with pytest.raises(TaskDeferred) as defer: sensor.execute({}) assert defer.value.trigger.job_name == job_name assert defer.value.trigger.run_id == job_run_id assert defer.value.trigger.region_name == "us-west-2" assert defer.value.trigger.verbose assert defer.value.trigger.waiter_delay == 10 assert defer.value.trigger.attempts == 20 assert defer.value.trigger.aws_conn_id == "custom_conn"
TestGlueJobSensor
python
pallets__jinja
tests/test_ext.py
{ "start": 10520, "end": 16845 }
class ____: def test_trans(self): tmpl = i18n_env.get_template("child.html") assert tmpl.render(LANGUAGE="de") == "<title>fehlend</title>pass auf" def test_trans_plural(self): tmpl = i18n_env.get_template("plural.html") assert tmpl.render(LANGUAGE="de", user_count=1) == "Ein Benutzer online" assert tmpl.render(LANGUAGE="de", user_count=2) == "2 Benutzer online" def test_trans_plural_with_functions(self): tmpl = i18n_env.get_template("plural2.html") def get_user_count(): get_user_count.called += 1 return 1 get_user_count.called = 0 assert tmpl.render(LANGUAGE="de", get_user_count=get_user_count) == "1s" assert get_user_count.called == 1 def test_complex_plural(self): tmpl = i18n_env.from_string( "{% trans foo=42, count=2 %}{{ count }} item{% " "pluralize count %}{{ count }} items{% endtrans %}" ) assert tmpl.render() == "2 items" pytest.raises( TemplateAssertionError, i18n_env.from_string, "{% trans foo %}...{% pluralize bar %}...{% endtrans %}", ) def test_trans_stringformatting(self): tmpl = i18n_env.get_template("stringformat.html") assert tmpl.render(LANGUAGE="de", user_count=5) == "Benutzer: 5" def test_trimmed(self): tmpl = i18n_env.from_string( "{%- trans trimmed %} hello\n world {% endtrans -%}" ) assert tmpl.render() == "hello world" def test_trimmed_policy(self): s = "{%- trans %} hello\n world {% endtrans -%}" tmpl = i18n_env.from_string(s) trimmed_tmpl = i18n_env_trimmed.from_string(s) assert tmpl.render() == " hello\n world " assert trimmed_tmpl.render() == "hello world" def test_trimmed_policy_override(self): tmpl = i18n_env_trimmed.from_string( "{%- trans notrimmed %} hello\n world {% endtrans -%}" ) assert tmpl.render() == " hello\n world " def test_trimmed_vars(self): tmpl = i18n_env.from_string( '{%- trans trimmed x="world" %} hello\n {{ x }} {% endtrans -%}' ) assert tmpl.render() == "hello world" def test_trimmed_varname_trimmed(self): # unlikely variable name, but when used as a variable # it should not enable trimming tmpl = i18n_env.from_string( "{%- trans trimmed = 'world' %} hello\n {{ trimmed }} {% endtrans -%}" ) assert tmpl.render() == " hello\n world " def test_extract(self): from jinja2.ext import babel_extract source = BytesIO( b""" {{ gettext('Hello World') }} {% trans %}Hello World{% endtrans %} {% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %} """ ) assert list(babel_extract(source, ("gettext", "ngettext", "_"), [], {})) == [ (2, "gettext", "Hello World", []), (3, "gettext", "Hello World", []), (4, "ngettext", ("%(users)s user", "%(users)s users", None), []), ] def test_extract_trimmed(self): from jinja2.ext import babel_extract source = BytesIO( b""" {{ gettext(' Hello \n World') }} {% trans trimmed %} Hello \n World{% endtrans %} {% trans trimmed %}{{ users }} \n user {%- pluralize %}{{ users }} \n users{% endtrans %} """ ) assert list(babel_extract(source, ("gettext", "ngettext", "_"), [], {})) == [ (2, "gettext", " Hello \n World", []), (4, "gettext", "Hello World", []), (6, "ngettext", ("%(users)s user", "%(users)s users", None), []), ] def test_extract_trimmed_option(self): from jinja2.ext import babel_extract source = BytesIO( b""" {{ gettext(' Hello \n World') }} {% trans %} Hello \n World{% endtrans %} {% trans %}{{ users }} \n user {%- pluralize %}{{ users }} \n users{% endtrans %} """ ) opts = {"trimmed": "true"} assert list(babel_extract(source, ("gettext", "ngettext", "_"), [], opts)) == [ (2, "gettext", " Hello \n World", []), (4, "gettext", "Hello World", []), (6, "ngettext", ("%(users)s user", "%(users)s users", None), []), ] def test_comment_extract(self): from jinja2.ext import babel_extract source = BytesIO( b""" {# trans first #} {{ gettext('Hello World') }} {% trans %}Hello World{% endtrans %}{# trans second #} {#: third #} {% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %} """ ) assert list( babel_extract(source, ("gettext", "ngettext", "_"), ["trans", ":"], {}) ) == [ (3, "gettext", "Hello World", ["first"]), (4, "gettext", "Hello World", ["second"]), (6, "ngettext", ("%(users)s user", "%(users)s users", None), ["third"]), ] def test_extract_context(self): from jinja2.ext import babel_extract source = BytesIO( b""" {{ pgettext("babel", "Hello World") }} {{ npgettext("babel", "%(users)s user", "%(users)s users", users) }} """ ) assert list(babel_extract(source, ("pgettext", "npgettext", "_"), [], {})) == [ (2, "pgettext", ("babel", "Hello World"), []), (3, "npgettext", ("babel", "%(users)s user", "%(users)s users", None), []), ] def test_nested_trans_error(self): s = "{% trans %}foo{% trans %}{% endtrans %}" with pytest.raises(TemplateSyntaxError) as excinfo: i18n_env.from_string(s) assert "trans blocks can't be nested" in str(excinfo.value) def test_trans_block_error(self): s = "{% trans %}foo{% wibble bar %}{% endwibble %}{% endtrans %}" with pytest.raises(TemplateSyntaxError) as excinfo: i18n_env.from_string(s) assert "saw `wibble`" in str(excinfo.value)
TestInternationalization
python
pytorch__pytorch
torch/testing/_internal/distributed/nn/api/remote_module_test.py
{ "start": 18550, "end": 23067 }
class ____(CommonRemoteModuleTest): @property def world_size(self): # Override setting in CommonRemoteModuleTest return 3 @dist_utils.dist_init def test_send_remote_module_over_the_wire(self): if self.rank != 0: return dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) # Unpickled attributes include both the inherent attributes of RemoteModule # (not inherited from the superclass) and two installed methods. expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES) expected_unpickled_attrs.append("forward_async") expected_unpickled_attrs.append("forward") # Create a remote module on worker1 and then pass it to worker2 over the RPC layer. for remote_module in self._create_remote_module_iter( dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR] ): # Test querying some simple attributes from worker2. attrs = rpc.rpc_sync( dst_worker2_name, remote_module_attributes, (remote_module,) ) self.assertListEqual(list(attrs.keys()), expected_unpickled_attrs) self.assertEqual(attrs["on"], "worker1") self.assertEqual(attrs["device"], "cpu") self.assertFalse(attrs["is_device_map_set"]) self.assertFalse(attrs["is_scriptable"]) # Test the installed methods on worker1's can be initiated by worker2 over RPC layer. # NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``, # not have another worker to initiate forward over the RPC layer. args = (torch.ones(1), 2, "3") ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args)) self.assertEqual(ret1, tuple(reversed(args))) ret2 = rpc.rpc_sync( dst_worker2_name, remote_forward_async, (remote_module, args) ) self.assertEqual(ret2, tuple(reversed(args))) @dist_utils.dist_init def test_send_remote_module_over_the_wire_script_not_supported(self): if self.rank != 0: return dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) # Unpickled attributes include both the inherent attributes of RemoteModule # (not inherited from the superclass) and two installed methods. expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES) expected_unpickled_attrs.append("forward_async") expected_unpickled_attrs.append("forward") with self.assertRaisesRegex( RuntimeError, "Passing a script RemoteModule over RPC is not supported." ): # Create a remote module on worker1 and then pass it to worker2 over the RPC layer. for remote_module in self._create_remote_module_iter( dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] ): # Test querying some simple attributes from worker2. rpc.rpc_sync( dst_worker2_name, remote_module_attributes, (remote_module,) ) @dist_utils.dist_init def test_create_remote_module_from_module_rref(self): if self.rank != 0: return dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) # Create a remote module on worker1 and then pass its `module_rref` to worker2 over the RPC layer. for remote_module in self._create_remote_module_iter( dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR] ): remote_module2 = rpc.rpc_sync( dst_worker2_name, RemoteModule.init_from_module_rref, (dst_worker2_name, remote_module.get_module_rref()), ) args = (torch.ones(1), 2, "3") ret1 = rpc.rpc_sync(dst_worker1_name, remote_forward, (remote_module, args)) ret2 = rpc.rpc_sync( dst_worker2_name, remote_forward, (remote_module2, args) ) self.assertEqual(ret1, ret2)
ThreeWorkersRemoteModuleTest
python
falconry__falcon
examples/ws_tutorial/ws_tutorial/app.py
{ "start": 1331, "end": 2052 }
class ____: def __init__(self, protected_routes: list[str] | None = None): if protected_routes is None: protected_routes = [] self.protected_routes = protected_routes async def process_request_ws(self, req: Request, ws: WebSocket): # Opening a connection so we can receive the token await ws.accept() # Check if the route is protected if req.path not in self.protected_routes: return token = await ws.receive_text() if token != 'very secure token': await ws.close(1008) return # Never log tokens in production logger.info('Client with token %r Authenticated', token)
AuthMiddleware
python
python__mypy
mypy/nodes.py
{ "start": 136393, "end": 146168 }
class ____(SymbolNode): """ A symbol node representing a type alias. Type alias is a static concept, in contrast to variables with types like Type[...]. Namely: * type aliases - can be used in type context (annotations) - cannot be re-assigned * variables with type Type[...] - cannot be used in type context - but can be re-assigned An alias can be defined only by an assignment to a name (not any other lvalues). Such assignment defines an alias by default. To define a variable, an explicit Type[...] annotation is required. As an exception, at non-global scope non-subscripted rvalue creates a variable even without an annotation. This exception exists to accommodate the common use case of class-valued attributes. See SemanticAnalyzerPass2.check_and_set_up_type_alias for details. Aliases can be generic. We use bound type variables for generic aliases, similar to classes. Essentially, type aliases work as macros that expand textually. The definition and expansion rules are following: 1. An alias targeting a generic class without explicit variables act as the given class (this doesn't apply to TypedDict, Tuple and Callable, which are not proper classes but special type constructors): A = List AA = List[Any] x: A # same as List[Any] x: A[int] # same as List[int] x: AA # same as List[Any] x: AA[int] # Error! C = Callable # Same as Callable[..., Any] T = Tuple # Same as Tuple[Any, ...] 2. An alias using explicit type variables in its rvalue expects replacements (type arguments) for these variables. If missing, they are treated as Any, like for other generics: B = List[Tuple[T, T]] x: B # same as List[Tuple[Any, Any]] x: B[int] # same as List[Tuple[int, int]] def f(x: B[T]) -> T: ... # without T, Any would be used here 3. An alias can be defined using another aliases. In the definition rvalue the Any substitution doesn't happen for top level unsubscripted generic classes: A = List B = A # here A is expanded to List, _not_ List[Any], # to match the Python runtime behaviour x: B[int] # same as List[int] C = List[A] # this expands to List[List[Any]] AA = List[T] D = AA # here AA expands to List[Any] x: D[int] # Error! Note: the fact that we support aliases like `A = List` means that the target type will be initially an instance type with wrong number of type arguments. Such instances are all fixed either during or after main semantic analysis passes. We therefore store the difference between `List` and `List[Any]` rvalues (targets) using the `no_args` flag. Meaning of other fields: target: The target type. For generic aliases contains bound type variables as nested types (currently TypeVar and ParamSpec are supported). _fullname: Qualified name of this type alias. This is used in particular to track fine-grained dependencies from aliases. module: Module where the alias was defined. alias_tvars: Type variables used to define this alias. normalized: Used to distinguish between `A = List`, and `A = list`. Both are internally stored using `builtins.list` (because `typing.List` is itself an alias), while the second cannot be subscripted because of Python runtime limitation. line and column: Line and column on the original alias definition. eager: If True, immediately expand alias when referred to (useful for aliases within functions that can't be looked up from the symbol table) """ __slots__ = ( "target", "_fullname", "module", "alias_tvars", "no_args", "normalized", "_is_recursive", "eager", "tvar_tuple_index", "python_3_12_type_alias", ) __match_args__ = ("name", "target", "alias_tvars", "no_args") def __init__( self, target: mypy.types.Type, fullname: str, module: str, line: int, column: int, *, alias_tvars: list[mypy.types.TypeVarLikeType] | None = None, no_args: bool = False, normalized: bool = False, eager: bool = False, python_3_12_type_alias: bool = False, ) -> None: self._fullname = fullname self.module = module self.target = target if alias_tvars is None: alias_tvars = [] self.alias_tvars = alias_tvars self.no_args = no_args self.normalized = normalized # This attribute is manipulated by TypeAliasType. If non-None, # it is the cached value. self._is_recursive: bool | None = None self.eager = eager self.python_3_12_type_alias = python_3_12_type_alias self.tvar_tuple_index = None for i, t in enumerate(alias_tvars): if isinstance(t, mypy.types.TypeVarTupleType): self.tvar_tuple_index = i super().__init__(line, column) @classmethod def from_tuple_type(cls, info: TypeInfo) -> TypeAlias: """Generate an alias to the tuple type described by a given TypeInfo. NOTE: this doesn't set type alias type variables (for generic tuple types), they must be set by the caller (when fully analyzed). """ assert info.tuple_type # TODO: is it possible to refactor this to set the correct type vars here? return TypeAlias( info.tuple_type.copy_modified( # Create an Instance similar to fill_typevars(). fallback=mypy.types.Instance( info, mypy.types.type_vars_as_args(info.defn.type_vars) ) ), info.fullname, info.module_name, info.line, info.column, ) @classmethod def from_typeddict_type(cls, info: TypeInfo) -> TypeAlias: """Generate an alias to the TypedDict type described by a given TypeInfo. NOTE: this doesn't set type alias type variables (for generic TypedDicts), they must be set by the caller (when fully analyzed). """ assert info.typeddict_type # TODO: is it possible to refactor this to set the correct type vars here? return TypeAlias( info.typeddict_type.copy_modified( # Create an Instance similar to fill_typevars(). fallback=mypy.types.Instance( info, mypy.types.type_vars_as_args(info.defn.type_vars) ) ), info.fullname, info.module_name, info.line, info.column, ) @property def name(self) -> str: return self._fullname.split(".")[-1] @property def fullname(self) -> str: return self._fullname @property def has_param_spec_type(self) -> bool: return any(isinstance(v, mypy.types.ParamSpecType) for v in self.alias_tvars) def accept(self, visitor: NodeVisitor[T]) -> T: return visitor.visit_type_alias(self) def serialize(self) -> JsonDict: data: JsonDict = { ".class": "TypeAlias", "fullname": self._fullname, "module": self.module, "target": self.target.serialize(), "alias_tvars": [v.serialize() for v in self.alias_tvars], "no_args": self.no_args, "normalized": self.normalized, "python_3_12_type_alias": self.python_3_12_type_alias, } return data @classmethod def deserialize(cls, data: JsonDict) -> TypeAlias: assert data[".class"] == "TypeAlias" fullname = data["fullname"] module = data["module"] alias_tvars = [mypy.types.deserialize_type(v) for v in data["alias_tvars"]] assert all(isinstance(t, mypy.types.TypeVarLikeType) for t in alias_tvars) target = mypy.types.deserialize_type(data["target"]) no_args = data["no_args"] normalized = data["normalized"] python_3_12_type_alias = data["python_3_12_type_alias"] return cls( target, fullname, module, -1, -1, alias_tvars=cast(list[mypy.types.TypeVarLikeType], alias_tvars), no_args=no_args, normalized=normalized, python_3_12_type_alias=python_3_12_type_alias, ) def write(self, data: WriteBuffer) -> None: write_tag(data, TYPE_ALIAS) write_str(data, self._fullname) write_str(data, self.module) self.target.write(data) mypy.types.write_type_list(data, self.alias_tvars) write_bool(data, self.no_args) write_bool(data, self.normalized) write_bool(data, self.python_3_12_type_alias) write_tag(data, END_TAG) @classmethod def read(cls, data: ReadBuffer) -> TypeAlias: fullname = read_str(data) module = read_str(data) target = mypy.types.read_type(data) alias_tvars = mypy.types.read_type_var_likes(data) ret = TypeAlias( target, fullname, module, -1, -1, alias_tvars=alias_tvars, no_args=read_bool(data), normalized=read_bool(data), python_3_12_type_alias=read_bool(data), ) assert read_tag(data) == END_TAG return ret
TypeAlias
python
Unity-Technologies__ml-agents
ml-agents/mlagents/trainers/cli_utils.py
{ "start": 279, "end": 660 }
class ____(argparse.Action): """ Internal custom Action to raise warning when argument is called. """ def __init__(self, nargs=0, **kwargs): super().__init__(nargs=nargs, **kwargs) def __call__(self, arg_parser, namespace, values, option_string=None): logger.warning(f"The command line argument {option_string} was removed.")
RaiseRemovedWarning
python
huggingface__transformers
src/transformers/models/csm/modular_csm.py
{ "start": 6351, "end": 10858 }
class ____(LlamaModel, CsmPreTrainedModel): config: CsmDepthDecoderConfig def __init__(self, config): super().__init__(config) self.embed_tokens = nn.Embedding((config.num_codebooks * config.vocab_size), config.backbone_hidden_size) self.inputs_embeds_projector = nn.Linear(config.backbone_hidden_size, config.hidden_size, bias=False) @check_model_inputs() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, backbone_last_hidden_state: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutputWithPast]: r""" backbone_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, backbone_hidden_size)`, *optional*): The last hidden state of the backbone model. Such input is required when the first codebook token (the one generated by the backbone model) is provided in the `input_ids` argument. """ if position_ids is not None and not is_torchdynamo_compiling(): logger.warning_once( "Custom `position_ids` were provided but will be ignored. CSM depth decoder automatically determines position_ids " "from `cache_position` and as it requires them to be identical across the batch, the provided position_ids will be ignored." ) position_ids = None if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds.") if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 inputs_seq_length = inputs_embeds.shape[1] if inputs_embeds is not None else input_ids.shape[1] device = inputs_embeds.device if inputs_embeds is not None else input_ids.device cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_seq_length, device=device) if inputs_embeds is None: codebook_idxs = torch.clamp(cache_position - 1, min=0) offset = codebook_idxs * self.vocab_size inputs_embeds = self.embed_tokens(input_ids + offset) input_ids_are_first_codebook = cache_position[0] == 0 if backbone_last_hidden_state is not None: inputs_embeds[:, 0] = backbone_last_hidden_state else: if not is_torchdynamo_compiling() and input_ids_are_first_codebook: logger.warning( "When the first codebook token is provided, `backbone_last_hidden_state` should also be provided for correct inference." ) inputs_embeds = self.inputs_embeds_projector(inputs_embeds) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_ids = cache_position.unsqueeze(0) position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, )
CsmDepthDecoderModel
python
Textualize__textual
src/textual/eta.py
{ "start": 149, "end": 4645 }
class ____: """Calculate speed and estimate time to arrival.""" def __init__( self, estimation_period: float = 60, extrapolate_period: float = 30 ) -> None: """Create an ETA. Args: estimation_period: Period in seconds, used to calculate speed. extrapolate_period: Maximum number of seconds used to estimate progress after last sample. """ self.estimation_period = estimation_period self.max_extrapolate = extrapolate_period self._samples: list[tuple[float, float]] = [(0.0, 0.0)] self._add_count = 0 def __rich_repr__(self) -> rich.repr.Result: yield "speed", self.speed yield "eta", self.get_eta(monotonic()) @property def first_sample(self) -> tuple[float, float]: """First sample.""" assert self._samples, "Assumes samples not empty" return self._samples[0] @property def last_sample(self) -> tuple[float, float]: """Last sample.""" assert self._samples, "Assumes samples not empty" return self._samples[-1] def reset(self) -> None: """Start ETA calculations from current time.""" del self._samples[:] def add_sample(self, time: float, progress: float) -> None: """Add a new sample. Args: time: Time when sample occurred. progress: Progress ratio (0 is start, 1 is complete). """ if self._samples and self.last_sample[1] > progress: # If progress goes backwards, we need to reset calculations self.reset() self._samples.append((time, progress)) self._add_count += 1 if self._add_count % 100 == 0: # Prune periodically so we don't accumulate vast amounts of samples self._prune() def _prune(self) -> None: """Prune old samples.""" if len(self._samples) <= 10: # Keep at least 10 samples return prune_time = self._samples[-1][0] - self.estimation_period index = bisect.bisect_left(self._samples, (prune_time, 0)) del self._samples[:index] def _get_progress_at(self, time: float) -> tuple[float, float]: """Get the progress at a specific time.""" index = bisect.bisect_left(self._samples, (time, 0)) if index >= len(self._samples): return self.last_sample if index == 0: return self.first_sample # Linearly interpolate progress between two samples time1, progress1 = self._samples[index - 1] time2, progress2 = self._samples[index] factor = (time - time1) / (time2 - time1) intermediate_progress = progress1 + (progress2 - progress1) * factor return time, intermediate_progress @property def speed(self) -> float | None: """The current speed, or `None` if it couldn't be calculated.""" if len(self._samples) < 2: # Need at least 2 samples to calculate speed return None recent_sample_time, progress2 = self.last_sample progress_start_time, progress1 = self._get_progress_at( recent_sample_time - self.estimation_period ) if recent_sample_time - progress_start_time < 1: # Require at least a second span to calculate speed. return None time_delta = recent_sample_time - progress_start_time distance = progress2 - progress1 speed = distance / time_delta if time_delta else 0 return speed def get_eta(self, time: float) -> int | None: """Estimated seconds until completion, or `None` if no estimate can be made. Args: time: Current time. """ speed = self.speed if not speed: # Not enough samples to guess return None recent_time, recent_progress = self.last_sample remaining = 1.0 - recent_progress if remaining <= 0: # Complete return 0 # The bar is not complete, so we will extrapolate progress # This will give us a countdown, even with no samples time_since_sample = min(self.max_extrapolate, time - recent_time) extrapolate_progress = speed * time_since_sample # We don't want to extrapolate all the way to 0, as that would erroneously suggest it is finished eta = max(1.0, (remaining - extrapolate_progress) / speed) return ceil(eta)
ETA
python
google__pytype
pytype/abstract/_typing.py
{ "start": 21891, "end": 22046 }
class ____(_TypeVariable): """Parameter of a callable type (typing.ParamSpec).""" _INSTANCE_CLASS: type[ParamSpecInstance] = ParamSpecInstance
ParamSpec
python
spyder-ide__spyder
spyder/plugins/ipythonconsole/widgets/figurebrowser.py
{ "start": 470, "end": 2611 }
class ____(RichJupyterWidget): """ Widget with the necessary attributes and methods to intercept the figures sent by the kernel to the IPython Console and send it to the plots plugin. This widget can also block the plotting of inline figures in the IPython Console so that figures are only plotted in the plots plugin. """ _mute_inline_plotting = None sended_render_message = False def set_mute_inline_plotting(self, mute_inline_plotting): """Set mute_inline_plotting""" self._mute_inline_plotting = mute_inline_plotting # ---- Private API (overrode by us) def _handle_display_data(self, msg): """ Reimplemented to handle communications between the figure explorer and the kernel. """ img = None data = msg['content']['data'] if 'image/svg+xml' in data: fmt = 'image/svg+xml' img = data['image/svg+xml'] elif 'image/png' in data: # PNG data is base64 encoded as it passes over the network # in a JSON structure so we decode it. fmt = 'image/png' img = decodebytes(data['image/png'].encode('ascii')) elif 'image/jpeg' in data and self._jpg_supported: fmt = 'image/jpeg' img = decodebytes(data['image/jpeg'].encode('ascii')) if img is not None: self.sig_new_inline_figure.emit(img, fmt) if self._mute_inline_plotting: if not self.sended_render_message: self._append_html("<br>", before_prompt=True) self.append_html_message( _('Figures are displayed in the Plots pane by ' 'default. To make them also appear inline in the ' 'console, you need to uncheck "Mute inline ' 'plotting" under the options menu of Plots.'), before_prompt=True ) self.sended_render_message = True return return super()._handle_display_data(msg)
FigureBrowserWidget
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/comprehend.py
{ "start": 3470, "end": 9825 }
class ____(ComprehendBaseOperator): """ Create a comprehend pii entities detection job for a collection of documents. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:ComprehendStartPiiEntitiesDetectionJobOperator` :param input_data_config: The input properties for a PII entities detection job. (templated) :param output_data_config: Provides `configuration` parameters for the output of PII entity detection jobs. (templated) :param mode: Specifies whether the output provides the locations (offsets) of PII entities or a file in which PII entities are redacted. If you set the mode parameter to ONLY_REDACTION. In that case you must provide a RedactionConfig in start_pii_entities_kwargs. :param data_access_role_arn: The Amazon Resource Name (ARN) of the IAM role that grants Amazon Comprehend read access to your input data. (templated) :param language_code: The language of the input documents. (templated) :param start_pii_entities_kwargs: Any optional parameters to pass to the job. If JobName is not provided in start_pii_entities_kwargs, operator will create. :param wait_for_completion: Whether to wait for job to stop. (default: True) :param waiter_delay: Time in seconds to wait between status checks. (default: 60) :param waiter_max_attempts: Maximum number of attempts to check for job completion. (default: 20) :param deferrable: If True, the operator will wait asynchronously for the job to stop. This implies waiting for completion. This mode requires aiobotocore module to be installed. (default: False) :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param botocore_config: Configuration dictionary (key-values) for botocore client. See: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html """ operator_extra_links = (ComprehendPiiEntitiesDetectionLink(),) def __init__( self, input_data_config: dict, output_data_config: dict, mode: str, data_access_role_arn: str, language_code: str, start_pii_entities_kwargs: dict[str, Any] | None = None, wait_for_completion: bool = True, waiter_delay: int = 60, waiter_max_attempts: int = 20, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), **kwargs, ): super().__init__( input_data_config=input_data_config, output_data_config=output_data_config, data_access_role_arn=data_access_role_arn, language_code=language_code, **kwargs, ) self.mode = mode self.start_pii_entities_kwargs = start_pii_entities_kwargs or {} self.wait_for_completion = wait_for_completion self.waiter_delay = waiter_delay self.waiter_max_attempts = waiter_max_attempts self.deferrable = deferrable def execute(self, context: Context) -> str: if self.start_pii_entities_kwargs.get("JobName", None) is None: self.start_pii_entities_kwargs["JobName"] = ( f"start_pii_entities_detection_job-{int(utcnow().timestamp())}" ) self.log.info( "Submitting start pii entities detection job '%s'.", self.start_pii_entities_kwargs["JobName"] ) job_id = self.client.start_pii_entities_detection_job( InputDataConfig=self.input_data_config, OutputDataConfig=self.output_data_config, Mode=self.mode, DataAccessRoleArn=self.data_access_role_arn, LanguageCode=self.language_code, **self.start_pii_entities_kwargs, )["JobId"] job_url = ComprehendPiiEntitiesDetectionLink.format_str.format( aws_domain=ComprehendPiiEntitiesDetectionLink.get_aws_domain(self.hook.conn_partition), region_name=self.hook.conn_region_name, job_id=job_id, ) ComprehendPiiEntitiesDetectionLink.persist( context=context, operator=self, region_name=self.hook.conn_region_name, aws_partition=self.hook.conn_partition, job_id=job_id, ) self.log.info("You can view the PII entities detection job at %s", job_url) message_description = f"start pii entities detection job {job_id} to complete." if self.deferrable: self.log.info("Deferring %s", message_description) self.defer( trigger=ComprehendPiiEntitiesDetectionJobCompletedTrigger( job_id=job_id, waiter_delay=self.waiter_delay, waiter_max_attempts=self.waiter_max_attempts, aws_conn_id=self.aws_conn_id, ), method_name="execute_complete", ) elif self.wait_for_completion: self.log.info("Waiting for %s", message_description) self.hook.get_waiter("pii_entities_detection_job_complete").wait( JobId=job_id, WaiterConfig={"Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts}, ) return job_id def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str: validated_event = validate_execute_complete_event(event) if validated_event["status"] != "success": raise AirflowException("Error while running job: %s", validated_event) self.log.info("Comprehend pii entities detection job `%s` complete.", validated_event["job_id"]) return validated_event["job_id"]
ComprehendStartPiiEntitiesDetectionJobOperator
python
Textualize__textual
docs/examples/how-to/layout.py
{ "start": 183, "end": 299 }
class ____(Placeholder): DEFAULT_CSS = """ Header { height: 3; dock: top; } """
Header
python
falconry__falcon
tests/asgi/_asgi_test_app.py
{ "start": 4463, "end": 7184 }
class ____: async def on_get(self, req, resp): async def emit(): s = 0 while s <= SSE_TEST_MAX_DELAY_SEC: yield falcon.asgi.SSEvent(text='hello world') await asyncio.sleep(s) s += SSE_TEST_MAX_DELAY_SEC / 4 resp.sse = emit() async def on_websocket(self, req, ws): # noqa: C901 recv_command = req.get_header('X-Command') == 'recv' send_mismatched = req.get_header('X-Mismatch') == 'send' recv_mismatched = req.get_header('X-Mismatch') == 'recv' mismatch_type = req.get_header('X-Mismatch-Type', default='text') raise_error = req.get_header('X-Raise-Error') close = req.get_header('X-Close') close_code = req.get_header('X-Close-Code') if close_code: close_code = int(close_code) accept = req.get_header('X-Accept', default='accept') if accept == 'accept': subprotocol = req.get_header('X-Subprotocol') if subprotocol == '*': subprotocol = ws.subprotocols[0] if subprotocol: await ws.accept(subprotocol) else: await ws.accept() elif accept == 'reject': if close: await ws.close() return if send_mismatched: if mismatch_type == 'text': await ws.send_text(b'fizzbuzz') else: await ws.send_data('fizzbuzz') if recv_mismatched: if mismatch_type == 'text': await ws.receive_text() else: await ws.receive_data() start = time.time() while time.time() - start < 1: try: msg = None if recv_command: msg = await ws.receive_media() else: msg = None await ws.send_text('hello world') print('on_websocket:send_text') if msg and msg['command'] == 'echo': await ws.send_text(msg['echo']) await ws.send_data(b'hello\x00world') await asyncio.sleep(0.2) except falcon.errors.WebSocketDisconnected: print('on_websocket:WebSocketDisconnected') raise if raise_error == 'generic': raise Exception('Test: Generic Unhandled Error') elif raise_error == 'http': raise falcon.HTTPBadRequest() if close: # NOTE(kgriffs): Tests that the default is used # when close_code is None. await ws.close(close_code)
Events
python
kamyu104__LeetCode-Solutions
Python/design-spreadsheet.py
{ "start": 149, "end": 972 }
class ____(object): def __init__(self, rows): """ :type rows: int """ self.__lookup = collections.defaultdict(int) def setCell(self, cell, value): """ :type cell: str :type value: int :rtype: None """ self.__lookup[cell] = value def resetCell(self, cell): """ :type cell: str :rtype: None """ if cell in self.__lookup: del self.__lookup[cell] def getValue(self, formula): """ :type formula: str :rtype: int """ left, right = formula[1 :].split('+') x = self.__lookup.get(left, 0) if left[0].isalpha() else int(left) y = self.__lookup.get(right, 0) if right[0].isalpha() else int(right) return x+y
Spreadsheet
python
numba__numba
numba/tests/test_parallel_backend.py
{ "start": 39187, "end": 41581 }
class ____(TestCase): _DEBUG = False def run_cmd(self, cmdline): popen = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE,) # finish in _TEST_TIMEOUT seconds or kill it timeout = threading.Timer(_TEST_TIMEOUT, popen.kill) try: timeout.start() out, err = popen.communicate() if popen.returncode != 0: raise AssertionError( "process failed with code %s: stderr follows\n%s\n" % (popen.returncode, err.decode())) finally: timeout.cancel() return out.decode(), err.decode() @linux_only # only linux can leak semaphores def test_orphaned_semaphore(self): # sys path injection and separate usecase module to make sure everything # is importable by children of multiprocessing test_file = os.path.join(os.path.dirname(__file__), "orphaned_semaphore_usecase.py") cmdline = [sys.executable, test_file] out, err = self.run_cmd(cmdline) # assert no semaphore leaks reported on stderr self.assertNotIn("leaked semaphore", err) if self._DEBUG: print("OUT:", out) print("ERR:", err) def test_lazy_lock_init(self): # checks based on https://github.com/numba/numba/pull/5724 # looking for "lazy" process lock initialisation so as to avoid setting # a multiprocessing context as part of import. for meth in ('fork', 'spawn', 'forkserver'): # if a context is available on the host check it can be set as the # start method in a separate process try: multiprocessing.get_context(meth) except ValueError: continue cmd = ("import numba; import multiprocessing;" "multiprocessing.set_start_method('{}');" "print(multiprocessing.get_context().get_start_method())") cmdline = [sys.executable, "-c", cmd.format(meth)] out, err = self.run_cmd(cmdline) if self._DEBUG: print("OUT:", out) print("ERR:", err) self.assertIn(meth, out) @skip_parfors_unsupported @skip_no_omp
TestInitSafetyIssues
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/database.py
{ "start": 28278, "end": 41324 }
class ____(ExampleDatabase): """ A file-based database loaded from a `GitHub Actions <https://docs.github.com/en/actions>`_ artifact. You can use this for sharing example databases between CI runs and developers, allowing the latter to get read-only access to the former. This is particularly useful for continuous fuzzing (i.e. with `HypoFuzz <https://hypofuzz.com/>`_), where the CI system can help find new failing examples through fuzzing, and developers can reproduce them locally without any manual effort. .. note:: You must provide ``GITHUB_TOKEN`` as an environment variable. In CI, Github Actions provides this automatically, but it needs to be set manually for local usage. In a developer machine, this would usually be a `Personal Access Token <https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens>`_. If the repository is private, it's necessary for the token to have ``repo`` scope in the case of a classic token, or ``actions:read`` in the case of a fine-grained token. In most cases, this will be used through the :class:`~hypothesis.database.MultiplexedDatabase`, by combining a local directory-based database with this one. For example: .. code-block:: python local = DirectoryBasedExampleDatabase(".hypothesis/examples") shared = ReadOnlyDatabase(GitHubArtifactDatabase("user", "repo")) settings.register_profile("ci", database=local) settings.register_profile("dev", database=MultiplexedDatabase(local, shared)) # We don't want to use the shared database in CI, only to populate its local one. # which the workflow should then upload as an artifact. settings.load_profile("ci" if os.environ.get("CI") else "dev") .. note:: Because this database is read-only, you always need to wrap it with the :class:`ReadOnlyDatabase`. A setup like this can be paired with a GitHub Actions workflow including something like the following: .. code-block:: yaml - name: Download example database uses: dawidd6/action-download-artifact@v9 with: name: hypothesis-example-db path: .hypothesis/examples if_no_artifact_found: warn workflow_conclusion: completed - name: Run tests run: pytest - name: Upload example database uses: actions/upload-artifact@v3 if: always() with: name: hypothesis-example-db path: .hypothesis/examples In this workflow, we use `dawidd6/action-download-artifact <https://github.com/dawidd6/action-download-artifact>`_ to download the latest artifact given that the official `actions/download-artifact <https://github.com/actions/download-artifact>`_ does not support downloading artifacts from previous workflow runs. The database automatically implements a simple file-based cache with a default expiration period of 1 day. You can adjust this through the ``cache_timeout`` property. For mono-repo support, you can provide a unique ``artifact_name`` (e.g. ``hypofuzz-example-db-frontend``). """ def __init__( self, owner: str, repo: str, artifact_name: str = "hypothesis-example-db", cache_timeout: timedelta = timedelta(days=1), path: StrPathT | None = None, ): super().__init__() self.owner = owner self.repo = repo self.artifact_name = artifact_name self.cache_timeout = cache_timeout # Get the GitHub token from the environment # It's unnecessary to use a token if the repo is public self.token: str | None = getenv("GITHUB_TOKEN") if path is None: self.path: Path = Path( storage_directory(f"github-artifacts/{self.artifact_name}/") ) else: self.path = Path(path) # We don't want to initialize the cache until we need to self._initialized: bool = False self._disabled: bool = False # This is the path to the artifact in usage # .hypothesis/github-artifacts/<artifact-name>/<modified_isoformat>.zip self._artifact: Path | None = None # This caches the artifact structure self._access_cache: dict[PurePath, set[PurePath]] | None = None # Message to display if user doesn't wrap around ReadOnlyDatabase self._read_only_message = ( "This database is read-only. " "Please wrap this class with ReadOnlyDatabase" "i.e. ReadOnlyDatabase(GitHubArtifactDatabase(...))." ) def __repr__(self) -> str: return ( f"GitHubArtifactDatabase(owner={self.owner!r}, " f"repo={self.repo!r}, artifact_name={self.artifact_name!r})" ) def __eq__(self, other: object) -> bool: return ( isinstance(other, GitHubArtifactDatabase) and self.owner == other.owner and self.repo == other.repo and self.artifact_name == other.artifact_name and self.path == other.path ) def _prepare_for_io(self) -> None: assert self._artifact is not None, "Artifact not loaded." if self._initialized: # pragma: no cover return # Test that the artifact is valid try: with ZipFile(self._artifact) as f: if f.testzip(): # pragma: no cover raise BadZipFile # Turns out that testzip() doesn't work quite well # doing the cache initialization here instead # will give us more coverage of the artifact. # Cache the files inside each keypath self._access_cache = {} with ZipFile(self._artifact) as zf: namelist = zf.namelist() # Iterate over files in the artifact for filename in namelist: fileinfo = zf.getinfo(filename) if fileinfo.is_dir(): self._access_cache[PurePath(filename)] = set() else: # Get the keypath from the filename keypath = PurePath(filename).parent # Add the file to the keypath self._access_cache[keypath].add(PurePath(filename)) except BadZipFile: warnings.warn( "The downloaded artifact from GitHub is invalid. " "This could be because the artifact was corrupted, " "or because the artifact was not created by Hypothesis. ", HypothesisWarning, stacklevel=3, ) self._disabled = True self._initialized = True def _initialize_db(self) -> None: # Trigger warning that we suppressed earlier by intent_to_write=False storage_directory(self.path.name) # Create the cache directory if it doesn't exist self.path.mkdir(exist_ok=True, parents=True) # Get all artifacts cached_artifacts = sorted( self.path.glob("*.zip"), key=lambda a: datetime.fromisoformat(a.stem.replace("_", ":")), ) # Remove all but the latest artifact for artifact in cached_artifacts[:-1]: artifact.unlink() try: found_artifact = cached_artifacts[-1] except IndexError: found_artifact = None # Check if the latest artifact is a cache hit if found_artifact is not None and ( datetime.now(timezone.utc) - datetime.fromisoformat(found_artifact.stem.replace("_", ":")) < self.cache_timeout ): self._artifact = found_artifact else: # Download the latest artifact from GitHub new_artifact = self._fetch_artifact() if new_artifact: if found_artifact is not None: found_artifact.unlink() self._artifact = new_artifact elif found_artifact is not None: warnings.warn( "Using an expired artifact as a fallback for the database: " f"{found_artifact}", HypothesisWarning, stacklevel=2, ) self._artifact = found_artifact else: warnings.warn( "Couldn't acquire a new or existing artifact. Disabling database.", HypothesisWarning, stacklevel=2, ) self._disabled = True return self._prepare_for_io() def _get_bytes(self, url: str) -> bytes | None: # pragma: no cover request = Request( url, headers={ "Accept": "application/vnd.github+json", "X-GitHub-Api-Version": "2022-11-28 ", "Authorization": f"Bearer {self.token}", }, ) warning_message = None response_bytes: bytes | None = None try: with urlopen(request) as response: response_bytes = response.read() except HTTPError as e: if e.code == 401: warning_message = ( "Authorization failed when trying to download artifact from GitHub. " "Check that you have a valid GITHUB_TOKEN set in your environment." ) else: warning_message = ( "Could not get the latest artifact from GitHub. " "This could be because because the repository " "or artifact does not exist. " ) # see https://github.com/python/cpython/issues/128734 e.close() except URLError: warning_message = "Could not connect to GitHub to get the latest artifact. " except TimeoutError: warning_message = ( "Could not connect to GitHub to get the latest artifact " "(connection timed out)." ) if warning_message is not None: warnings.warn(warning_message, HypothesisWarning, stacklevel=4) return None return response_bytes def _fetch_artifact(self) -> Path | None: # pragma: no cover # Get the list of artifacts from GitHub url = f"https://api.github.com/repos/{self.owner}/{self.repo}/actions/artifacts" response_bytes = self._get_bytes(url) if response_bytes is None: return None artifacts = json.loads(response_bytes)["artifacts"] artifacts = [a for a in artifacts if a["name"] == self.artifact_name] if not artifacts: return None # Get the latest artifact from the list artifact = max(artifacts, key=lambda a: a["created_at"]) url = artifact["archive_download_url"] # Download the artifact artifact_bytes = self._get_bytes(url) if artifact_bytes is None: return None # Save the artifact to the cache # We replace ":" with "_" to ensure the filenames are compatible # with Windows filesystems timestamp = datetime.now(timezone.utc).isoformat().replace(":", "_") artifact_path = self.path / f"{timestamp}.zip" try: artifact_path.write_bytes(artifact_bytes) except OSError: warnings.warn( "Could not save the latest artifact from GitHub. ", HypothesisWarning, stacklevel=3, ) return None return artifact_path @staticmethod @lru_cache def _key_path(key: bytes) -> PurePath: return PurePath(_hash(key) + "/") def fetch(self, key: bytes) -> Iterable[bytes]: if self._disabled: return if not self._initialized: self._initialize_db() if self._disabled: return assert self._artifact is not None assert self._access_cache is not None kp = self._key_path(key) with ZipFile(self._artifact) as zf: # Get the all files in the the kp from the cache filenames = self._access_cache.get(kp, ()) for filename in filenames: with zf.open(filename.as_posix()) as f: yield f.read() # Read-only interface def save(self, key: bytes, value: bytes) -> None: raise RuntimeError(self._read_only_message) def move(self, src: bytes, dest: bytes, value: bytes) -> None: raise RuntimeError(self._read_only_message) def delete(self, key: bytes, value: bytes) -> None: raise RuntimeError(self._read_only_message)
GitHubArtifactDatabase
python
getsentry__sentry
tests/sentry/hybridcloud/test_organizationmembermapping.py
{ "start": 652, "end": 6759 }
class ____(TransactionTestCase, HybridCloudTestMixin): def test_upsert_stale_user_id(self) -> None: organizationmember_mapping_service.upsert_mapping( organization_id=self.organization.id, organizationmember_id=111111, mapping=RpcOrganizationMemberMappingUpdate( role=self.organization.default_role, user_id=10001, email=None, inviter_id=self.user.id, invite_status=None, ), ) omm = OrganizationMemberMapping.objects.get( organization_id=self.organization.id, organizationmember_id=111111 ) assert omm.user_id is None assert omm.inviter_id == self.user.id def test_upsert_stale_inviter_id(self) -> None: self.user self.organization with transaction.atomic(router.db_for_write(OrganizationMemberMapping)): organizationmember_mapping_service.upsert_mapping( organization_id=self.organization.id, organizationmember_id=111111, mapping=RpcOrganizationMemberMappingUpdate( role=self.organization.default_role, user_id=self.user.id, email=None, inviter_id=1000001, invite_status=None, ), ) omm = OrganizationMemberMapping.objects.get( organization_id=self.organization.id, organizationmember_id=111111 ) assert omm.user_id == self.user.id assert omm.inviter_id is None def test_upsert_email_invite(self) -> None: om = OrganizationMember( role="member", email="foo@example.com", organization_id=self.organization.id, ) rpc_orgmember_mapping = organizationmember_mapping_service.upsert_mapping( organization_id=self.organization.id, organizationmember_id=111111, mapping=RpcOrganizationMemberMappingUpdate.from_orm(om), ) assert rpc_orgmember_mapping is not None assert rpc_orgmember_mapping.email == "foo@example.com" assert rpc_orgmember_mapping.user_id is None assert rpc_orgmember_mapping.organization_id == self.organization.id om.user_id = self.create_user().id om.email = None rpc_orgmember_mapping = organizationmember_mapping_service.upsert_mapping( organization_id=self.organization.id, organizationmember_id=111111, mapping=RpcOrganizationMemberMappingUpdate.from_orm(om), ) assert rpc_orgmember_mapping is not None assert rpc_orgmember_mapping.user_id == om.user_id def test_upsert_happy_path(self) -> None: inviter = self.create_user("foo@example.com") with assume_test_silo_mode(SiloMode.REGION): om_id = OrganizationMember.objects.get( organization_id=self.organization.id, user_id=self.user.id ).id rpc_orgmember_mapping = organizationmember_mapping_service.upsert_mapping( organization_id=self.organization.id, organizationmember_id=om_id, mapping=RpcOrganizationMemberMappingUpdate( role="member", user_id=self.user.id, email=None, inviter_id=inviter.id, invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value, ), ) orgmember_mapping = OrganizationMemberMapping.objects.get( organization_id=self.organization.id ) assert rpc_orgmember_mapping is not None assert ( rpc_orgmember_mapping.organizationmember_id == orgmember_mapping.organizationmember_id ) assert rpc_orgmember_mapping.date_added == orgmember_mapping.date_added assert ( rpc_orgmember_mapping.organization_id == orgmember_mapping.organization_id == self.organization.id ) assert rpc_orgmember_mapping.role == orgmember_mapping.role == "member" assert rpc_orgmember_mapping.user_id == orgmember_mapping.user_id == self.user.id assert rpc_orgmember_mapping.email is orgmember_mapping.email is None assert rpc_orgmember_mapping.inviter_id == orgmember_mapping.inviter_id == inviter.id assert ( rpc_orgmember_mapping.invite_status == orgmember_mapping.invite_status == InviteStatus.REQUESTED_TO_BE_INVITED.value ) def test_create_mapping_updates_org_members(self) -> None: assert self.user.is_active self.user.is_active = False self.user.save() with outbox_runner(): org = self.create_organization("test", owner=self.user) with assume_test_silo_mode(SiloMode.REGION): om = OrganizationMember.objects.get(organization_id=org.id, user_id=self.user.id) assert not om.user_is_active def test_save_user_pushes_is_active(self) -> None: with outbox_runner(): org = self.create_organization("test", owner=self.user) with assume_test_silo_mode(SiloMode.REGION): om = OrganizationMember.objects.get(organization_id=org.id, user_id=self.user.id) assert om.user_is_active with outbox_runner(): self.user.is_active = False self.user.save() with assume_test_silo_mode(SiloMode.REGION): om.refresh_from_db() assert not om.user_is_active def test_update_user_pushes_is_active(self) -> None: with outbox_runner(): org = self.create_organization("test", owner=self.user) with assume_test_silo_mode(SiloMode.REGION): om = OrganizationMember.objects.get(organization_id=org.id, user_id=self.user.id) assert om.user_is_active with outbox_runner(): self.user.update(is_active=False) om.refresh_from_db() assert not om.user_is_active
OrganizationMappingTest
python
ansible__ansible
lib/ansible/modules/hostname.py
{ "start": 25933, "end": 26052 }
class ____(Hostname): platform = 'SunOS' distribution = None strategy_class = SolarisStrategy
SolarisHostname
python
pyinstaller__pyinstaller
PyInstaller/building/build_main.py
{ "start": 14974, "end": 17171 }
class ____(enum.IntFlag): """ Module collection mode flags. """ PYZ = enum.auto() # Collect byte-compiled .pyc into PYZ archive PYC = enum.auto() # Collect byte-compiled .pyc as external data file PY = enum.auto() # Collect source .py file as external data file _MODULE_COLLECTION_MODES = { "pyz": _ModuleCollectionMode.PYZ, "pyc": _ModuleCollectionMode.PYC, "py": _ModuleCollectionMode.PY, "pyz+py": _ModuleCollectionMode.PYZ | _ModuleCollectionMode.PY, "py+pyz": _ModuleCollectionMode.PYZ | _ModuleCollectionMode.PY, } def _get_module_collection_mode(mode_dict, name, noarchive=False): """ Determine the module/package collection mode for the given module name, based on the provided collection mode settings dictionary. """ # Default mode: collect into PYZ, unless noarchive is enabled. In that case, collect as pyc. mode_flags = _ModuleCollectionMode.PYC if noarchive else _ModuleCollectionMode.PYZ # If we have no collection mode settings, end here and now. if not mode_dict: return mode_flags # Search the parent modules/packages in top-down fashion, and take the last given setting. This ensures that # a setting given for the top-level package is recursively propagated to all its subpackages and submodules, # but also allows individual sub-modules to override the setting again. mode = 'pyz' name_parts = name.split('.') for i in range(len(name_parts)): modlevel = ".".join(name_parts[:i + 1]) modlevel_mode = mode_dict.get(modlevel, None) if modlevel_mode is not None: mode = modlevel_mode # Convert mode string to _ModuleCollectionMode flags try: mode_flags = _MODULE_COLLECTION_MODES[mode] except KeyError: raise ValueError(f"Unknown module collection mode for {name!r}: {mode!r}!") # noarchive flag being set means that we need to change _ModuleCollectionMode.PYZ into _ModuleCollectionMode.PYC if noarchive and _ModuleCollectionMode.PYZ in mode_flags: mode_flags ^= _ModuleCollectionMode.PYZ mode_flags |= _ModuleCollectionMode.PYC return mode_flags
_ModuleCollectionMode
python
django__django
tests/migrations/test_migrations_no_ancestor/0002_conflicting_second.py
{ "start": 43, "end": 618 }
class ____(migrations.Migration): dependencies = [] operations = [ migrations.DeleteModel("Tribble"), migrations.RemoveField("Author", "silly_field"), migrations.AddField("Author", "rating", models.IntegerField(default=0)), migrations.CreateModel( "Book", [ ("id", models.AutoField(primary_key=True)), ( "author", models.ForeignKey("migrations.Author", models.SET_NULL, null=True), ), ], ), ]
Migration
python
numba__numba
numba/tests/test_unicode.py
{ "start": 92138, "end": 93279 }
class ____(BaseTest): def test_unicode_iter(self): pyfunc = iter_usecase cfunc = njit(pyfunc) for a in UNICODE_EXAMPLES: self.assertPreciseEqual(pyfunc(a), cfunc(a)) def test_unicode_literal_iter(self): pyfunc = literal_iter_usecase cfunc = njit(pyfunc) self.assertPreciseEqual(pyfunc(), cfunc()) def test_unicode_enumerate_iter(self): pyfunc = enumerated_iter_usecase cfunc = njit(pyfunc) for a in UNICODE_EXAMPLES: self.assertPreciseEqual(pyfunc(a), cfunc(a)) def test_unicode_stopiteration_iter(self): self.disable_leak_check() pyfunc = iter_stopiteration_usecase cfunc = njit(pyfunc) for f in (pyfunc, cfunc): for a in UNICODE_EXAMPLES: with self.assertRaises(StopIteration): f(a) def test_unicode_literal_stopiteration_iter(self): pyfunc = literal_iter_stopiteration_usecase cfunc = njit(pyfunc) for f in (pyfunc, cfunc): with self.assertRaises(StopIteration): f()
TestUnicodeIteration
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol24.py
{ "start": 752, "end": 808 }
class ____(Protocol): def jump(self) -> int: ...
Jumps
python
allegroai__clearml
clearml/backend_api/services/v2_9/events.py
{ "start": 99006, "end": 101007 }
class ____(Request): """ Get histogram data of all the scalar metrics and variants in the task :param task: Task ID :type task: str :param metric: :type metric: str :param variant: :type variant: str """ _service = "events" _action = "vector_metrics_iter_histogram" _version = "2.9" _schema = { "definitions": {}, "properties": { "metric": {"description": "", "type": "string"}, "task": {"description": "Task ID", "type": "string"}, "variant": {"description": "", "type": "string"}, }, "required": ["task", "metric", "variant"], "type": "object", } def __init__(self, task: str, metric: str, variant: str, **kwargs: Any) -> None: super(VectorMetricsIterHistogramRequest, self).__init__(**kwargs) self.task = task self.metric = metric self.variant = variant @schema_property("task") def task(self) -> str: return self._property_task @task.setter def task(self, value: str) -> None: if value is None: self._property_task = None return self.assert_isinstance(value, "task", six.string_types) self._property_task = value @schema_property("metric") def metric(self) -> str: return self._property_metric @metric.setter def metric(self, value: str) -> None: if value is None: self._property_metric = None return self.assert_isinstance(value, "metric", six.string_types) self._property_metric = value @schema_property("variant") def variant(self) -> str: return self._property_variant @variant.setter def variant(self, value: str) -> None: if value is None: self._property_variant = None return self.assert_isinstance(value, "variant", six.string_types) self._property_variant = value
VectorMetricsIterHistogramRequest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1236390, "end": 1242161 }
class ____(sgqlc.types.Type, Node, Closable, UniformResourceLocatable): """Represents a Milestone object on a given repository.""" __schema__ = github_schema __field_names__ = ( "created_at", "creator", "description", "due_on", "issues", "number", "progress_percentage", "pull_requests", "repository", "state", "title", "updated_at", ) created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt") """Identifies the date and time when the object was created.""" creator = sgqlc.types.Field(Actor, graphql_name="creator") """Identifies the actor who created the milestone.""" description = sgqlc.types.Field(String, graphql_name="description") """Identifies the description of the milestone.""" due_on = sgqlc.types.Field(DateTime, graphql_name="dueOn") """Identifies the due date of the milestone.""" issues = sgqlc.types.Field( sgqlc.types.non_null(IssueConnection), graphql_name="issues", args=sgqlc.types.ArgDict( ( ("order_by", sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None)), ("labels", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="labels", default=None)), ("states", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(IssueState)), graphql_name="states", default=None)), ("filter_by", sgqlc.types.Arg(IssueFilters, graphql_name="filterBy", default=None)), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) """A list of issues associated with the milestone. Arguments: * `order_by` (`IssueOrder`): Ordering options for issues returned from the connection. * `labels` (`[String!]`): A list of label names to filter the pull requests by. * `states` (`[IssueState!]`): A list of states to filter the issues by. * `filter_by` (`IssueFilters`): Filtering options for issues returned from the connection. * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. """ number = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="number") """Identifies the number of the milestone.""" progress_percentage = sgqlc.types.Field(sgqlc.types.non_null(Float), graphql_name="progressPercentage") """Identifies the percentage complete for the milestone""" pull_requests = sgqlc.types.Field( sgqlc.types.non_null(PullRequestConnection), graphql_name="pullRequests", args=sgqlc.types.ArgDict( ( ( "states", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(PullRequestState)), graphql_name="states", default=None), ), ("labels", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="labels", default=None)), ("head_ref_name", sgqlc.types.Arg(String, graphql_name="headRefName", default=None)), ("base_ref_name", sgqlc.types.Arg(String, graphql_name="baseRefName", default=None)), ("order_by", sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None)), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) """A list of pull requests associated with the milestone. Arguments: * `states` (`[PullRequestState!]`): A list of states to filter the pull requests by. * `labels` (`[String!]`): A list of label names to filter the pull requests by. * `head_ref_name` (`String`): The head ref name to filter the pull requests by. * `base_ref_name` (`String`): The base ref name to filter the pull requests by. * `order_by` (`IssueOrder`): Ordering options for pull requests returned from the connection. * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. """ repository = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="repository") """The repository associated with this milestone.""" state = sgqlc.types.Field(sgqlc.types.non_null(MilestoneState), graphql_name="state") """Identifies the state of the milestone.""" title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title") """Identifies the title of the milestone.""" updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt") """Identifies the date and time when the object was last updated."""
Milestone
python
django-crispy-forms__django-crispy-forms
crispy_forms/base.py
{ "start": 0, "end": 744 }
class ____: """ Context manager that receives a `django.template.Context` instance and a list of keys Once the context manager is exited, it removes `keys` from the context, to avoid side effects in later layout objects that may use the same context variables. Layout objects should use `extra_context` to introduce context variables, never touch context object themselves, that could introduce side effects. """ def __init__(self, context, keys): self.context = context self.keys = keys def __enter__(self): pass def __exit__(self, type, value, traceback): for key in list(self.keys): if key in self.context: del self.context[key]
KeepContext
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/math_ops/reduction_ops_test.py
{ "start": 33892, "end": 39383 }
class ____(test.TestCase): def _compare(self, x, reduction_axes, keepdims, use_gpu=False): np_ans = x if reduction_axes is None: np_ans = np.amax(np_ans, keepdims=keepdims) else: for ra in reduction_axes[::-1]: np_ans = np.amax(np_ans, axis=ra, keepdims=keepdims) with self.cached_session(use_gpu=use_gpu): if reduction_axes is not None: reduction_axes = np.array(reduction_axes).astype(np.int32) tf_ans = math_ops.reduce_max(x, reduction_axes, keepdims) out = self.evaluate(tf_ans) self.assertAllClose(np_ans, out) self.assertShapeEqual(np_ans, tf_ans) def _compareAll(self, x, reduction_axes): self._compare(x, reduction_axes, False, use_gpu=True) self._compare(x, reduction_axes, True, use_gpu=True) def testAxesType(self): for dtype in [dtypes.int64, dtypes.int32]: with self.cached_session(): v = math_ops.reduce_max([0, 0], constant_op.constant(0, dtype=dtype)) tf_v = self.evaluate(v) self.assertAllEqual(tf_v, 0) @test_util.disable_xla("b/168718272") # XLA handling of NaN is inconsistent def testSpecialValues(self): for dtype in [np.float32, np.float64]: for size in range(1, 4): for arr in itertools.product([-np.inf, 1., np.nan, np.inf], repeat=size): self._compareAll(np.array(arr, dtype=dtype), None) def testInt64Reduce3D(self): # Create a 3D array of int64s and reduce across all possible # dimensions np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.int64) self._compareAll(np_arr, None) self._compareAll(np_arr, []) self._compareAll(np_arr, [0]) self._compareAll(np_arr, [1]) self._compareAll(np_arr, [2]) self._compareAll(np_arr, [0, 1]) self._compareAll(np_arr, [1, 2]) self._compareAll(np_arr, [0, 2]) self._compareAll(np_arr, [0, 1, 2]) def testFloatReduce3D(self): # Create a 3D array of floats and reduce across all possible # dimensions np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float32) self._compareAll(np_arr, None) self._compareAll(np_arr, []) self._compareAll(np_arr, [0]) self._compareAll(np_arr, [1]) self._compareAll(np_arr, [2]) self._compareAll(np_arr, [0, 1]) self._compareAll(np_arr, [1, 2]) self._compareAll(np_arr, [0, 2]) self._compareAll(np_arr, [0, 1, 2]) def testDoubleReduce3D(self): # Create a 3D array of doubles and reduce across all possible # dimensions np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float64) self._compareAll(np_arr, None) self._compareAll(np_arr, []) self._compareAll(np_arr, [0]) self._compareAll(np_arr, [1]) self._compareAll(np_arr, [2]) self._compareAll(np_arr, [0, 1]) self._compareAll(np_arr, [1, 2]) self._compareAll(np_arr, [0, 2]) self._compareAll(np_arr, [0, 1, 2]) def testBfloat16Reduce3D(self): # Create a 3D array of floats and reduce across all possible # dimensions np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(dtypes.bfloat16.as_numpy_dtype) self._compareAll(np_arr, None) self._compareAll(np_arr, []) self._compareAll(np_arr, [0]) self._compareAll(np_arr, [1]) self._compareAll(np_arr, [2]) self._compareAll(np_arr, [0, 1]) self._compareAll(np_arr, [1, 2]) self._compareAll(np_arr, [0, 2]) self._compareAll(np_arr, [0, 1, 2]) @test_util.run_deprecated_v1 def testGradient(self): s = [2, 3, 4, 2] x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64) with self.cached_session(): t = ops.convert_to_tensor(x) su = math_ops.reduce_max(t, [1, 2]) jacob_t, jacob_n = gradient_checker.compute_gradient( t, s, su, [2, 2], x_init_value=x, delta=1) self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8) @test_util.run_deprecated_v1 def testGradient2(self): s = [2, 3, 4, 2] x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64) with self.cached_session(): t = ops.convert_to_tensor(x) su = math_ops.reduce_max(t, [1]) jacob_t, jacob_n = gradient_checker.compute_gradient( t, s, su, [2, 4, 2], x_init_value=x, delta=1) self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8) @test_util.run_deprecated_v1 def testGradient3(self): s = [2, 3, 4, 2] x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64) with self.cached_session(): t = ops.convert_to_tensor(x) su = math_ops.reduce_max(t, [2]) jacob_t, jacob_n = gradient_checker.compute_gradient( t, s, su, [2, 3, 2], x_init_value=x, delta=1) self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8) @test_util.run_deprecated_v1 def testGradient4(self): s = [2, 3, 4, 2] x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64) with self.cached_session(): t = ops.convert_to_tensor(x) su = math_ops.reduce_max(t) jacob_t, jacob_n = gradient_checker.compute_gradient( t, s, su, [1], x_init_value=x, delta=1) self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8) @test_util.run_deprecated_v1 def testEmptyGradients(self): with self.cached_session(): x = array_ops.zeros([0, 3]) y = math_ops.reduce_max(x, [1]) error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0]) self.assertEqual(error, 0)
MaxReductionTest
python
getsentry__sentry
tests/sentry/issue_detection/test_n_plus_one_db_span_detector.py
{ "start": 12491, "end": 13319 }
class ____(TestCase): def test_respects_project_option(self) -> None: project = self.create_project() event = get_event("n-plus-one-db/n-plus-one-in-django-index-view-activerecord") event["project_id"] = project.id settings = get_detection_settings(project.id) detector = NPlusOneDBSpanDetector(settings, event) assert detector.is_creation_allowed_for_project(project) ProjectOption.objects.set_value( project=project, key="sentry:performance_issue_settings", value={"n_plus_one_db_queries_detection_enabled": False}, ) settings = get_detection_settings(project.id) detector = NPlusOneDBSpanDetector(settings, event) assert not detector.is_creation_allowed_for_project(project)
NPlusOneDbSettingTest
python
pandas-dev__pandas
pandas/tests/indexing/interval/test_interval.py
{ "start": 5349, "end": 7538 }
class ____: def test_mi_intervalindex_slicing_with_scalar(self): # GH#27456 ii = IntervalIndex.from_arrays( [0, 1, 10, 11, 0, 1, 10, 11], [1, 2, 11, 12, 1, 2, 11, 12], name="MP" ) idx = pd.MultiIndex.from_arrays( [ pd.Index(["FC", "FC", "FC", "FC", "OWNER", "OWNER", "OWNER", "OWNER"]), pd.Index( ["RID1", "RID1", "RID2", "RID2", "RID1", "RID1", "RID2", "RID2"] ), ii, ] ) idx.names = ["Item", "RID", "MP"] df = DataFrame({"value": [1, 2, 3, 4, 5, 6, 7, 8]}) df.index = idx query_df = DataFrame( { "Item": ["FC", "OWNER", "FC", "OWNER", "OWNER"], "RID": ["RID1", "RID1", "RID1", "RID2", "RID2"], "MP": [0.2, 1.5, 1.6, 11.1, 10.9], } ) query_df = query_df.sort_index() idx = pd.MultiIndex.from_arrays([query_df.Item, query_df.RID, query_df.MP]) query_df.index = idx result = df.value.loc[query_df.index] # the IntervalIndex level is indexed with floats, which map to # the intervals containing them. Matching the behavior we would get # with _only_ an IntervalIndex, we get an IntervalIndex level back. sliced_level = ii.take([0, 1, 1, 3, 2]) expected_index = pd.MultiIndex.from_arrays( [idx.get_level_values(0), idx.get_level_values(1), sliced_level] ) expected = Series([1, 6, 2, 8, 7], index=expected_index, name="value") tm.assert_series_equal(result, expected) @pytest.mark.xfail(WASM, reason="GH 23440") @pytest.mark.parametrize("base", [101, 1010]) def test_reindex_behavior_with_interval_index(self, base): # GH 51826 ser = Series( range(base), index=IntervalIndex.from_arrays(range(base), range(1, base + 1)), ) expected_result = Series([np.nan, 0], index=[np.nan, 1.0], dtype=float) result = ser.reindex(index=[np.nan, 1.0]) tm.assert_series_equal(result, expected_result)
TestIntervalIndexInsideMultiIndex
python
milvus-io__pymilvus
tests/test_grpc_handler_mutations.py
{ "start": 16023, "end": 19312 }
class ____: def test_get_query_segment_info(self, channel: Any, client_thread: Any) -> None: handler = GrpcHandler(channel=channel) info_future = client_thread.submit( handler.get_query_segment_info, collection_name="test_collection", timeout=30 ) (invocation_metadata, request, rpc) = channel.take_unary_unary( descriptor.methods_by_name["GetQuerySegmentInfo"] ) rpc.send_initial_metadata(()) # Create segment info segment_info = milvus_pb2.QuerySegmentInfo( segmentID=1001, collectionID=100, partitionID=10, num_rows=1000, state=common_pb2.SegmentState.Sealed ) expected_result = milvus_pb2.GetQuerySegmentInfoResponse( status=common_pb2.Status(code=0), infos=[segment_info] ) rpc.terminate(expected_result, (), grpc.StatusCode.OK, "") result = info_future.result() assert result == [segment_info] def test_get_query_segment_info_error(self, channel: Any, client_thread: Any) -> None: handler = GrpcHandler(channel=channel) info_future = client_thread.submit( handler.get_query_segment_info, collection_name="test_collection", timeout=30 ) (invocation_metadata, request, rpc) = channel.take_unary_unary( descriptor.methods_by_name["GetQuerySegmentInfo"] ) rpc.send_initial_metadata(()) expected_result = milvus_pb2.GetQuerySegmentInfoResponse( status=common_pb2.Status(code=1, reason="Collection not found") ) rpc.terminate(expected_result, (), grpc.StatusCode.OK, "") with pytest.raises(MilvusException): info_future.result() def test_create_alias(self, channel: Any, client_thread: Any) -> None: handler = GrpcHandler(channel=channel) alias_future = client_thread.submit( handler.create_alias, collection_name="test_collection", alias="test_alias", timeout=10 ) (invocation_metadata, request, rpc) = channel.take_unary_unary( descriptor.methods_by_name["CreateAlias"] ) rpc.send_initial_metadata(()) expected_result = common_pb2.Status(code=0) rpc.terminate(expected_result, (), grpc.StatusCode.OK, "") result = alias_future.result() assert result is None def test_create_alias_error(self, channel: Any, client_thread: Any) -> None: handler = GrpcHandler(channel=channel) alias_future = client_thread.submit( handler.create_alias, collection_name="test_collection", alias="test_alias", timeout=10 ) (invocation_metadata, request, rpc) = channel.take_unary_unary( descriptor.methods_by_name["CreateAlias"] ) rpc.send_initial_metadata(()) expected_result = common_pb2.Status(code=1, reason="Alias already exists") rpc.terminate(expected_result, (), grpc.StatusCode.OK, "") with pytest.raises(MilvusException): alias_future.result()
TestGrpcHandlerSegmentAndAliasOperations
python
openai__openai-python
src/openai/types/batch_usage.py
{ "start": 192, "end": 413 }
class ____(BaseModel): cached_tokens: int """The number of tokens that were retrieved from the cache. [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). """
InputTokensDetails
python
django__django
tests/auth_tests/test_views.py
{ "start": 3560, "end": 4578 }
class ____(AuthViewsTestCase): def test_named_urls(self): "Named URLs should be reversible" expected_named_urls = [ ("login", [], {}), ("logout", [], {}), ("password_change", [], {}), ("password_change_done", [], {}), ("password_reset", [], {}), ("password_reset_done", [], {}), ( "password_reset_confirm", [], { "uidb64": "aaaaaaa", "token": "1111-aaaaa", }, ), ("password_reset_complete", [], {}), ] for name, args, kwargs in expected_named_urls: with self.subTest(name=name): try: reverse(name, args=args, kwargs=kwargs) except NoReverseMatch: self.fail( "Reversal of url named '%s' failed with NoReverseMatch" % name )
AuthViewNamedURLTests
python
pydata__xarray
xarray/tests/test_backends.py
{ "start": 162176, "end": 172422 }
class ____(ZarrBase): @contextlib.contextmanager def create_zarr_target(self): if has_zarr_v3: yield zarr.storage.MemoryStore({}, read_only=False) else: yield {} def test_chunk_key_encoding_v2(self) -> None: encoding = {"name": "v2", "configuration": {"separator": "/"}} # Create a dataset with a variable name containing a period data = np.ones((4, 4)) original = Dataset({"var1": (("x", "y"), data)}) # Set up chunk key encoding with slash separator encoding = { "var1": { "chunk_key_encoding": encoding, "chunks": (2, 2), } } # Write to store with custom encoding with self.create_zarr_target() as store: original.to_zarr(store, encoding=encoding) # Verify the chunk keys in store use the slash separator if not has_zarr_v3: chunk_keys = [k for k in store.keys() if k.startswith("var1/")] assert len(chunk_keys) > 0 for key in chunk_keys: assert "/" in key assert "." not in key.split("/")[1:] # No dots in chunk coordinates # Read back and verify data with xr.open_zarr(store) as actual: assert_identical(original, actual) # Verify chunks are preserved assert actual["var1"].encoding["chunks"] == (2, 2) @pytest.mark.asyncio @requires_zarr_v3 async def test_async_load_multiple_variables(self) -> None: target_class = zarr.AsyncArray method_name = "getitem" original_method = getattr(target_class, method_name) # the indexed coordinate variables is not lazy, so the create_test_dataset has 4 lazy variables in total N_LAZY_VARS = 4 original = create_test_data() with self.create_zarr_target() as store: original.to_zarr(store, zarr_format=3, consolidated=False) with patch.object( target_class, method_name, wraps=original_method, autospec=True ) as mocked_meth: # blocks upon loading the coordinate variables here ds = xr.open_zarr(store, consolidated=False, chunks=None) # TODO we're not actually testing that these indexing methods are not blocking... result_ds = await ds.load_async() mocked_meth.assert_called() assert mocked_meth.call_count == N_LAZY_VARS mocked_meth.assert_awaited() xrt.assert_identical(result_ds, ds.load()) @pytest.mark.asyncio @requires_zarr_v3 @pytest.mark.parametrize("cls_name", ["Variable", "DataArray", "Dataset"]) async def test_concurrent_load_multiple_objects( self, cls_name, ) -> None: N_OBJECTS = 5 N_LAZY_VARS = { "Variable": 1, "DataArray": 1, "Dataset": 4, } # specific to the create_test_data() used target_class = zarr.AsyncArray method_name = "getitem" original_method = getattr(target_class, method_name) original = create_test_data() with self.create_zarr_target() as store: original.to_zarr(store, consolidated=False, zarr_format=3) with patch.object( target_class, method_name, wraps=original_method, autospec=True ) as mocked_meth: xr_obj = get_xr_obj(store, cls_name) # TODO we're not actually testing that these indexing methods are not blocking... coros = [xr_obj.load_async() for _ in range(N_OBJECTS)] results = await asyncio.gather(*coros) mocked_meth.assert_called() assert mocked_meth.call_count == N_OBJECTS * N_LAZY_VARS[cls_name] mocked_meth.assert_awaited() for result in results: xrt.assert_identical(result, xr_obj.load()) @pytest.mark.asyncio @requires_zarr_v3 @pytest.mark.parametrize("cls_name", ["Variable", "DataArray", "Dataset"]) @pytest.mark.parametrize( "indexer, method, target_zarr_class", [ pytest.param({}, "sel", "zarr.AsyncArray", id="no-indexing-sel"), pytest.param({}, "isel", "zarr.AsyncArray", id="no-indexing-isel"), pytest.param({"dim2": 1.0}, "sel", "zarr.AsyncArray", id="basic-int-sel"), pytest.param({"dim2": 2}, "isel", "zarr.AsyncArray", id="basic-int-isel"), pytest.param( {"dim2": slice(1.0, 3.0)}, "sel", "zarr.AsyncArray", id="basic-slice-sel", ), pytest.param( {"dim2": slice(1, 3)}, "isel", "zarr.AsyncArray", id="basic-slice-isel" ), pytest.param( {"dim2": [1.0, 3.0]}, "sel", "zarr.core.indexing.AsyncOIndex", id="outer-sel", ), pytest.param( {"dim2": [1, 3]}, "isel", "zarr.core.indexing.AsyncOIndex", id="outer-isel", ), pytest.param( { "dim1": xr.Variable(data=[2, 3], dims="points"), "dim2": xr.Variable(data=[1.0, 2.0], dims="points"), }, "sel", "zarr.core.indexing.AsyncVIndex", id="vectorized-sel", ), pytest.param( { "dim1": xr.Variable(data=[2, 3], dims="points"), "dim2": xr.Variable(data=[1, 3], dims="points"), }, "isel", "zarr.core.indexing.AsyncVIndex", id="vectorized-isel", ), ], ) async def test_indexing( self, cls_name, method, indexer, target_zarr_class, ) -> None: if not has_zarr_v3_async_oindex and target_zarr_class in ( "zarr.core.indexing.AsyncOIndex", "zarr.core.indexing.AsyncVIndex", ): pytest.skip( "current version of zarr does not support orthogonal or vectorized async indexing" ) if cls_name == "Variable" and method == "sel": pytest.skip("Variable doesn't have a .sel method") # Each type of indexing ends up calling a different zarr indexing method # They all use a method named .getitem, but on a different internal zarr class def _resolve_class_from_string(class_path: str) -> type[Any]: """Resolve a string class path like 'zarr.AsyncArray' to the actual class.""" module_path, class_name = class_path.rsplit(".", 1) module = import_module(module_path) return getattr(module, class_name) target_class = _resolve_class_from_string(target_zarr_class) method_name = "getitem" original_method = getattr(target_class, method_name) original = create_test_data() with self.create_zarr_target() as store: original.to_zarr(store, consolidated=False, zarr_format=3) with patch.object( target_class, method_name, wraps=original_method, autospec=True ) as mocked_meth: xr_obj = get_xr_obj(store, cls_name) # TODO we're not actually testing that these indexing methods are not blocking... result = await getattr(xr_obj, method)(**indexer).load_async() mocked_meth.assert_called() mocked_meth.assert_awaited() assert mocked_meth.call_count > 0 expected = getattr(xr_obj, method)(**indexer).load() xrt.assert_identical(result, expected) @pytest.mark.asyncio @pytest.mark.parametrize( ("indexer", "expected_err_msg"), [ pytest.param( {"dim2": 2}, "basic async indexing", marks=pytest.mark.skipif( has_zarr_v3, reason="current version of zarr has basic async indexing", ), ), # tests basic indexing pytest.param( {"dim2": [1, 3]}, "orthogonal async indexing", marks=pytest.mark.skipif( has_zarr_v3_async_oindex, reason="current version of zarr has async orthogonal indexing", ), ), # tests oindexing pytest.param( { "dim1": xr.Variable(data=[2, 3], dims="points"), "dim2": xr.Variable(data=[1, 3], dims="points"), }, "vectorized async indexing", marks=pytest.mark.skipif( has_zarr_v3_async_oindex, reason="current version of zarr has async vectorized indexing", ), ), # tests vindexing ], ) @parametrize_zarr_format async def test_raise_on_older_zarr_version( self, indexer, expected_err_msg, zarr_format, ): """Test that trying to use async load with insufficiently new version of zarr raises a clear error""" original = create_test_data() with self.create_zarr_target() as store: original.to_zarr(store, consolidated=False, zarr_format=zarr_format) ds = xr.open_zarr(store, consolidated=False, chunks=None) var = ds["var1"].variable with pytest.raises(NotImplementedError, match=expected_err_msg): await var.isel(**indexer).load_async() def get_xr_obj( store: zarr.abc.store.Store, cls_name: Literal["Variable", "DataArray", "Dataset"] ): ds = xr.open_zarr(store, consolidated=False, chunks=None) match cls_name: case "Variable": return ds["var1"].variable case "DataArray": return ds["var1"] case "Dataset": return ds
TestZarrDictStore
python
jazzband__django-waffle
test_app/models.py
{ "start": 631, "end": 720 }
class ____(AbstractBaseSwitch): """Demonstrates custom switch behavior."""
CustomSwitch
python
cython__cython
docs/examples/tutorial/pure/A.py
{ "start": 94, "end": 225 }
class ____: def __init__(self, b=0): self.a = 3 self.b = b def foo(self, x): print(x + _helper(1.0))
A
python
kubernetes-client__python
kubernetes/client/models/v1_validating_admission_policy.py
{ "start": 383, "end": 7646 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'spec': 'V1ValidatingAdmissionPolicySpec', 'status': 'V1ValidatingAdmissionPolicyStatus' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'spec': 'spec', 'status': 'status' } def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501 """V1ValidatingAdmissionPolicy - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._kind = None self._metadata = None self._spec = None self._status = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata if spec is not None: self.spec = spec if status is not None: self.status = status @property def api_version(self): """Gets the api_version of this V1ValidatingAdmissionPolicy. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1ValidatingAdmissionPolicy. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1ValidatingAdmissionPolicy. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1ValidatingAdmissionPolicy. # noqa: E501 :type: str """ self._api_version = api_version @property def kind(self): """Gets the kind of this V1ValidatingAdmissionPolicy. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1ValidatingAdmissionPolicy. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1ValidatingAdmissionPolicy. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1ValidatingAdmissionPolicy. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1ValidatingAdmissionPolicy. # noqa: E501 :return: The metadata of this V1ValidatingAdmissionPolicy. # noqa: E501 :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1ValidatingAdmissionPolicy. :param metadata: The metadata of this V1ValidatingAdmissionPolicy. # noqa: E501 :type: V1ObjectMeta """ self._metadata = metadata @property def spec(self): """Gets the spec of this V1ValidatingAdmissionPolicy. # noqa: E501 :return: The spec of this V1ValidatingAdmissionPolicy. # noqa: E501 :rtype: V1ValidatingAdmissionPolicySpec """ return self._spec @spec.setter def spec(self, spec): """Sets the spec of this V1ValidatingAdmissionPolicy. :param spec: The spec of this V1ValidatingAdmissionPolicy. # noqa: E501 :type: V1ValidatingAdmissionPolicySpec """ self._spec = spec @property def status(self): """Gets the status of this V1ValidatingAdmissionPolicy. # noqa: E501 :return: The status of this V1ValidatingAdmissionPolicy. # noqa: E501 :rtype: V1ValidatingAdmissionPolicyStatus """ return self._status @status.setter def status(self, status): """Sets the status of this V1ValidatingAdmissionPolicy. :param status: The status of this V1ValidatingAdmissionPolicy. # noqa: E501 :type: V1ValidatingAdmissionPolicyStatus """ self._status = status def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1ValidatingAdmissionPolicy): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1ValidatingAdmissionPolicy): return True return self.to_dict() != other.to_dict()
V1ValidatingAdmissionPolicy
python
airbytehq__airbyte
airbyte-integrations/connectors/source-zoho-crm/source_zoho_crm/types.py
{ "start": 1873, "end": 2507 }
class ____: @classmethod def _field_names(cls) -> Iterable[str]: return [field.name for field in dataclasses.fields(cls)] @classmethod def _filter_by_names(cls, dct: Dict[Any, Any]) -> Dict[Any, Any]: return {key: val for key, val in dct.items() if key in cls._field_names()} @classmethod def from_dict(cls, dct: MutableMapping[Any, Any]) -> object: return cls(**cls._filter_by_names(dct)) def update_from_dict(self, dct: MutableMapping[Any, Any]): for key, val in self._filter_by_names(dct).items(): setattr(self, key, val) @dataclasses.dataclass
FromDictMixin
python
matplotlib__matplotlib
lib/matplotlib/tri/_triinterpolate.py
{ "start": 24071, "end": 41335 }
class ____: """ Implementation of reduced HCT triangular element with explicit shape functions. Computes z, dz, d2z and the element stiffness matrix for bending energy: E(f) = integral( (d2z/dx2 + d2z/dy2)**2 dA) *** Reference for the shape functions: *** [1] Basis functions for general Hsieh-Clough-Tocher _triangles, complete or reduced. Michel Bernadou, Kamal Hassan International Journal for Numerical Methods in Engineering. 17(5):784 - 789. 2.01 *** Element description: *** 9 dofs: z and dz given at 3 apex C1 (conform) """ # 1) Loads matrices to generate shape functions as a function of # triangle eccentricities - based on [1] p.11 ''' M = np.array([ [ 0.00, 0.00, 0.00, 4.50, 4.50, 0.00, 0.00, 0.00, 0.00, 0.00], [-0.25, 0.00, 0.00, 0.50, 1.25, 0.00, 0.00, 0.00, 0.00, 0.00], [-0.25, 0.00, 0.00, 1.25, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.50, 1.00, 0.00, -1.50, 0.00, 3.00, 3.00, 0.00, 0.00, 3.00], [ 0.00, 0.00, 0.00, -0.25, 0.25, 0.00, 1.00, 0.00, 0.00, 0.50], [ 0.25, 0.00, 0.00, -0.50, -0.25, 1.00, 0.00, 0.00, 0.00, 1.00], [ 0.50, 0.00, 1.00, 0.00, -1.50, 0.00, 0.00, 3.00, 3.00, 3.00], [ 0.25, 0.00, 0.00, -0.25, -0.50, 0.00, 0.00, 0.00, 1.00, 1.00], [ 0.00, 0.00, 0.00, 0.25, -0.25, 0.00, 0.00, 1.00, 0.00, 0.50]]) M0 = np.array([ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [-1.00, 0.00, 0.00, 1.50, 1.50, 0.00, 0.00, 0.00, 0.00, -3.00], [-0.50, 0.00, 0.00, 0.75, 0.75, 0.00, 0.00, 0.00, 0.00, -1.50], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [ 1.00, 0.00, 0.00, -1.50, -1.50, 0.00, 0.00, 0.00, 0.00, 3.00], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.50, 0.00, 0.00, -0.75, -0.75, 0.00, 0.00, 0.00, 0.00, 1.50]]) M1 = np.array([ [-0.50, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [-0.25, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.50, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.25, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]]) M2 = np.array([ [ 0.50, 0.00, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.25, 0.00, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [-0.50, 0.00, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [-0.25, 0.00, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]]) # 2) Loads matrices to rotate components of gradient & Hessian # vectors in the reference basis of triangle first apex (a0) rotate_dV = np.array([[ 1., 0.], [ 0., 1.], [ 0., 1.], [-1., -1.], [-1., -1.], [ 1., 0.]]) rotate_d2V = np.array([[1., 0., 0.], [0., 1., 0.], [ 0., 0., 1.], [0., 1., 0.], [1., 1., 1.], [ 0., -2., -1.], [1., 1., 1.], [1., 0., 0.], [-2., 0., -1.]]) # 3) Loads Gauss points & weights on the 3 sub-_triangles for P2 # exact integral - 3 points on each subtriangles. # NOTE: as the 2nd derivative is discontinuous , we really need those 9 # points! n_gauss = 9 gauss_pts = np.array([[13./18., 4./18., 1./18.], [ 4./18., 13./18., 1./18.], [ 7./18., 7./18., 4./18.], [ 1./18., 13./18., 4./18.], [ 1./18., 4./18., 13./18.], [ 4./18., 7./18., 7./18.], [ 4./18., 1./18., 13./18.], [13./18., 1./18., 4./18.], [ 7./18., 4./18., 7./18.]], dtype=np.float64) gauss_w = np.ones([9], dtype=np.float64) / 9. # 4) Stiffness matrix for curvature energy E = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 2.]]) # 5) Loads the matrix to compute DOF_rot from tri_J at apex 0 J0_to_J1 = np.array([[-1., 1.], [-1., 0.]]) J0_to_J2 = np.array([[ 0., -1.], [ 1., -1.]]) def get_function_values(self, alpha, ecc, dofs): """ Parameters ---------- alpha : is a (N x 3 x 1) array (array of column-matrices) of barycentric coordinates, ecc : is a (N x 3 x 1) array (array of column-matrices) of triangle eccentricities, dofs : is a (N x 1 x 9) arrays (arrays of row-matrices) of computed degrees of freedom. Returns ------- Returns the N-array of interpolated function values. """ subtri = np.argmin(alpha, axis=1)[:, 0] ksi = _roll_vectorized(alpha, -subtri, axis=0) E = _roll_vectorized(ecc, -subtri, axis=0) x = ksi[:, 0, 0] y = ksi[:, 1, 0] z = ksi[:, 2, 0] x_sq = x*x y_sq = y*y z_sq = z*z V = _to_matrix_vectorized([ [x_sq*x], [y_sq*y], [z_sq*z], [x_sq*z], [x_sq*y], [y_sq*x], [y_sq*z], [z_sq*y], [z_sq*x], [x*y*z]]) prod = self.M @ V prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ V) prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ V) prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ V) s = _roll_vectorized(prod, 3*subtri, axis=0) return (dofs @ s)[:, 0, 0] def get_function_derivatives(self, alpha, J, ecc, dofs): """ Parameters ---------- *alpha* is a (N x 3 x 1) array (array of column-matrices of barycentric coordinates) *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at triangle first apex) *ecc* is a (N x 3 x 1) array (array of column-matrices of triangle eccentricities) *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed degrees of freedom. Returns ------- Returns the values of interpolated function derivatives [dz/dx, dz/dy] in global coordinates at locations alpha, as a column-matrices of shape (N x 2 x 1). """ subtri = np.argmin(alpha, axis=1)[:, 0] ksi = _roll_vectorized(alpha, -subtri, axis=0) E = _roll_vectorized(ecc, -subtri, axis=0) x = ksi[:, 0, 0] y = ksi[:, 1, 0] z = ksi[:, 2, 0] x_sq = x*x y_sq = y*y z_sq = z*z dV = _to_matrix_vectorized([ [ -3.*x_sq, -3.*x_sq], [ 3.*y_sq, 0.], [ 0., 3.*z_sq], [ -2.*x*z, -2.*x*z+x_sq], [-2.*x*y+x_sq, -2.*x*y], [ 2.*x*y-y_sq, -y_sq], [ 2.*y*z, y_sq], [ z_sq, 2.*y*z], [ -z_sq, 2.*x*z-z_sq], [ x*z-y*z, x*y-y*z]]) # Puts back dV in first apex basis dV = dV @ _extract_submatrices( self.rotate_dV, subtri, block_size=2, axis=0) prod = self.M @ dV prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ dV) prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ dV) prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ dV) dsdksi = _roll_vectorized(prod, 3*subtri, axis=0) dfdksi = dofs @ dsdksi # In global coordinates: # Here we try to deal with the simplest colinear cases, returning a # null matrix. J_inv = _safe_inv22_vectorized(J) dfdx = J_inv @ _transpose_vectorized(dfdksi) return dfdx def get_function_hessians(self, alpha, J, ecc, dofs): """ Parameters ---------- *alpha* is a (N x 3 x 1) array (array of column-matrices) of barycentric coordinates *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at triangle first apex) *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle eccentricities *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed degrees of freedom. Returns ------- Returns the values of interpolated function 2nd-derivatives [d2z/dx2, d2z/dy2, d2z/dxdy] in global coordinates at locations alpha, as a column-matrices of shape (N x 3 x 1). """ d2sdksi2 = self.get_d2Sidksij2(alpha, ecc) d2fdksi2 = dofs @ d2sdksi2 H_rot = self.get_Hrot_from_J(J) d2fdx2 = d2fdksi2 @ H_rot return _transpose_vectorized(d2fdx2) def get_d2Sidksij2(self, alpha, ecc): """ Parameters ---------- *alpha* is a (N x 3 x 1) array (array of column-matrices) of barycentric coordinates *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle eccentricities Returns ------- Returns the arrays d2sdksi2 (N x 3 x 1) Hessian of shape functions expressed in covariant coordinates in first apex basis. """ subtri = np.argmin(alpha, axis=1)[:, 0] ksi = _roll_vectorized(alpha, -subtri, axis=0) E = _roll_vectorized(ecc, -subtri, axis=0) x = ksi[:, 0, 0] y = ksi[:, 1, 0] z = ksi[:, 2, 0] d2V = _to_matrix_vectorized([ [ 6.*x, 6.*x, 6.*x], [ 6.*y, 0., 0.], [ 0., 6.*z, 0.], [ 2.*z, 2.*z-4.*x, 2.*z-2.*x], [2.*y-4.*x, 2.*y, 2.*y-2.*x], [2.*x-4.*y, 0., -2.*y], [ 2.*z, 0., 2.*y], [ 0., 2.*y, 2.*z], [ 0., 2.*x-4.*z, -2.*z], [ -2.*z, -2.*y, x-y-z]]) # Puts back d2V in first apex basis d2V = d2V @ _extract_submatrices( self.rotate_d2V, subtri, block_size=3, axis=0) prod = self.M @ d2V prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ d2V) prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ d2V) prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ d2V) d2sdksi2 = _roll_vectorized(prod, 3*subtri, axis=0) return d2sdksi2 def get_bending_matrices(self, J, ecc): """ Parameters ---------- *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at triangle first apex) *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle eccentricities Returns ------- Returns the element K matrices for bending energy expressed in GLOBAL nodal coordinates. K_ij = integral [ (d2zi/dx2 + d2zi/dy2) * (d2zj/dx2 + d2zj/dy2) dA] tri_J is needed to rotate dofs from local basis to global basis """ n = np.size(ecc, 0) # 1) matrix to rotate dofs in global coordinates J1 = self.J0_to_J1 @ J J2 = self.J0_to_J2 @ J DOF_rot = np.zeros([n, 9, 9], dtype=np.float64) DOF_rot[:, 0, 0] = 1 DOF_rot[:, 3, 3] = 1 DOF_rot[:, 6, 6] = 1 DOF_rot[:, 1:3, 1:3] = J DOF_rot[:, 4:6, 4:6] = J1 DOF_rot[:, 7:9, 7:9] = J2 # 2) matrix to rotate Hessian in global coordinates. H_rot, area = self.get_Hrot_from_J(J, return_area=True) # 3) Computes stiffness matrix # Gauss quadrature. K = np.zeros([n, 9, 9], dtype=np.float64) weights = self.gauss_w pts = self.gauss_pts for igauss in range(self.n_gauss): alpha = np.tile(pts[igauss, :], n).reshape(n, 3) alpha = np.expand_dims(alpha, 2) weight = weights[igauss] d2Skdksi2 = self.get_d2Sidksij2(alpha, ecc) d2Skdx2 = d2Skdksi2 @ H_rot K += weight * (d2Skdx2 @ self.E @ _transpose_vectorized(d2Skdx2)) # 4) With nodal (not elem) dofs K = _transpose_vectorized(DOF_rot) @ K @ DOF_rot # 5) Need the area to compute total element energy return _scalar_vectorized(area, K) def get_Hrot_from_J(self, J, return_area=False): """ Parameters ---------- *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at triangle first apex) Returns ------- Returns H_rot used to rotate Hessian from local basis of first apex, to global coordinates. if *return_area* is True, returns also the triangle area (0.5*det(J)) """ # Here we try to deal with the simplest colinear cases; a null # energy and area is imposed. J_inv = _safe_inv22_vectorized(J) Ji00 = J_inv[:, 0, 0] Ji11 = J_inv[:, 1, 1] Ji10 = J_inv[:, 1, 0] Ji01 = J_inv[:, 0, 1] H_rot = _to_matrix_vectorized([ [Ji00*Ji00, Ji10*Ji10, Ji00*Ji10], [Ji01*Ji01, Ji11*Ji11, Ji01*Ji11], [2*Ji00*Ji01, 2*Ji11*Ji10, Ji00*Ji11+Ji10*Ji01]]) if not return_area: return H_rot else: area = 0.5 * (J[:, 0, 0]*J[:, 1, 1] - J[:, 0, 1]*J[:, 1, 0]) return H_rot, area def get_Kff_and_Ff(self, J, ecc, triangles, Uc): """ Build K and F for the following elliptic formulation: minimization of curvature energy with value of function at node imposed and derivatives 'free'. Build the global Kff matrix in cco format. Build the full Ff vec Ff = - Kfc x Uc. Parameters ---------- *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at triangle first apex) *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle eccentricities *triangles* is a (N x 3) array of nodes indexes. *Uc* is (N x 3) array of imposed displacements at nodes Returns ------- (Kff_rows, Kff_cols, Kff_vals) Kff matrix in COO format - Duplicate (row, col) entries must be summed. Ff: force vector - dim npts * 3 """ ntri = np.size(ecc, 0) vec_range = np.arange(ntri, dtype=np.int32) c_indices = np.full(ntri, -1, dtype=np.int32) # for unused dofs, -1 f_dof = [1, 2, 4, 5, 7, 8] c_dof = [0, 3, 6] # vals, rows and cols indices in global dof numbering f_dof_indices = _to_matrix_vectorized([[ c_indices, triangles[:, 0]*2, triangles[:, 0]*2+1, c_indices, triangles[:, 1]*2, triangles[:, 1]*2+1, c_indices, triangles[:, 2]*2, triangles[:, 2]*2+1]]) expand_indices = np.ones([ntri, 9, 1], dtype=np.int32) f_row_indices = _transpose_vectorized(expand_indices @ f_dof_indices) f_col_indices = expand_indices @ f_dof_indices K_elem = self.get_bending_matrices(J, ecc) # Extracting sub-matrices # Explanation & notations: # * Subscript f denotes 'free' degrees of freedom (i.e. dz/dx, dz/dx) # * Subscript c denotes 'condensated' (imposed) degrees of freedom # (i.e. z at all nodes) # * F = [Ff, Fc] is the force vector # * U = [Uf, Uc] is the imposed dof vector # [ Kff Kfc ] # * K = [ ] is the laplacian stiffness matrix # [ Kcf Kff ] # * As F = K x U one gets straightforwardly: Ff = - Kfc x Uc # Computing Kff stiffness matrix in sparse COO format Kff_vals = np.ravel(K_elem[np.ix_(vec_range, f_dof, f_dof)]) Kff_rows = np.ravel(f_row_indices[np.ix_(vec_range, f_dof, f_dof)]) Kff_cols = np.ravel(f_col_indices[np.ix_(vec_range, f_dof, f_dof)]) # Computing Ff force vector in sparse COO format Kfc_elem = K_elem[np.ix_(vec_range, f_dof, c_dof)] Uc_elem = np.expand_dims(Uc, axis=2) Ff_elem = -(Kfc_elem @ Uc_elem)[:, :, 0] Ff_indices = f_dof_indices[np.ix_(vec_range, [0], f_dof)][:, 0, :] # Extracting Ff force vector in dense format # We have to sum duplicate indices - using bincount Ff = np.bincount(np.ravel(Ff_indices), weights=np.ravel(Ff_elem)) return Kff_rows, Kff_cols, Kff_vals, Ff # :class:_DOF_estimator, _DOF_estimator_user, _DOF_estimator_geom, # _DOF_estimator_min_E # Private classes used to compute the degree of freedom of each triangular # element for the TriCubicInterpolator.
_ReducedHCT_Element
python
ansible__ansible
packaging/release.py
{ "start": 9392, "end": 9647 }
class ____: """Details required to create a pull request.""" upstream_user: str upstream_repo: str upstream_branch: str user: str repo: str branch: str title: str body: str @dataclasses.dataclass(frozen=True)
PullRequest
python
ZoranPandovski__al-go-rithms
machine_learning/Neural_Networks/Back Propogation/back_propagation_neural_network.py
{ "start": 3021, "end": 5997 }
class ____: """ Back Propagation Neural Network model """ def __init__(self): self.layers = [] self.train_mse = [] self.fig_loss = plt.figure() self.ax_loss = self.fig_loss.add_subplot(1, 1, 1) def add_layer(self, layer): self.layers.append(layer) def build(self): for i, layer in enumerate(self.layers[:]): if i < 1: layer.is_input_layer = True else: layer.initializer(self.layers[i - 1].units) def summary(self): for i, layer in enumerate(self.layers[:]): print("------- layer %d -------" % i) print("weight.shape ", np.shape(layer.weight)) print("bias.shape ", np.shape(layer.bias)) def train(self, xdata, ydata, train_round, accuracy): self.train_round = train_round self.accuracy = accuracy self.ax_loss.hlines(self.accuracy, 0, self.train_round * 1.1) x_shape = np.shape(xdata) for round_i in range(train_round): all_loss = 0 for row in range(x_shape[0]): _xdata = np.asmatrix(xdata[row, :]).T _ydata = np.asmatrix(ydata[row, :]).T # forward propagation for layer in self.layers: _xdata = layer.forward_propagation(_xdata) loss, gradient = self.cal_loss(_ydata, _xdata) all_loss = all_loss + loss # back propagation: the input_layer does not upgrade for layer in self.layers[:0:-1]: gradient = layer.back_propagation(gradient) mse = all_loss / x_shape[0] self.train_mse.append(mse) self.plot_loss() if mse < self.accuracy: print("----达到精度----") return mse def cal_loss(self, ydata, ydata_): self.loss = np.sum(np.power((ydata - ydata_), 2)) self.loss_gradient = 2 * (ydata_ - ydata) # vector (shape is the same as _ydata.shape) return self.loss, self.loss_gradient def plot_loss(self): if self.ax_loss.lines: self.ax_loss.lines.remove(self.ax_loss.lines[0]) self.ax_loss.plot(self.train_mse, "r-") plt.ion() plt.xlabel("step") plt.ylabel("loss") plt.show() plt.pause(0.1) def example(): x = np.random.randn(10, 10) y = np.asarray( [ [0.8, 0.4], [0.4, 0.3], [0.34, 0.45], [0.67, 0.32], [0.88, 0.67], [0.78, 0.77], [0.55, 0.66], [0.55, 0.43], [0.54, 0.1], [0.1, 0.5], ] ) model = BPNN() for i in (10, 20, 30, 2): model.add_layer(DenseLayer(i)) model.build() model.summary() model.train(xdata=x, ydata=y, train_round=100, accuracy=0.01) if __name__ == "__main__": example()
BPNN
python
django__django
tests/multiple_database/tests.py
{ "start": 97533, "end": 97724 }
class ____: """Disallow all relations.""" def allow_relation(self, obj1, obj2, **hints): return False @override_settings(DATABASE_ROUTERS=[NoRelationRouter()])
NoRelationRouter
python
google__jax
jax/_src/pallas/mosaic/interpret/interpret_pallas_call.py
{ "start": 7482, "end": 31506 }
class ____: """A simple counter that is thread-safe.""" def __init__(self, initial_value: int): self.value = initial_value self.lock = threading.Lock() def get_next(self): with self.lock: result = self.value self.value += 1 return result # TODO(jburnim): Do we want to support multiple instances of SharedMemory? # Maybe for running multiple distinct interpreted computations in parallel? _shared_memory: memory.SharedMemory | None = None _shared_memory_init_lock = threading.Lock() races: RaceDetectionState | None = None dma_id_counter: Counter | None = None def reset_tpu_interpret_mode_state(): """Resets all global, shared state used by TPU interpret mode. TPU interpret mode uses global, shared state for simulating memory buffers and semaphores, for race detection, etc., when interpreting a kernel. Normally, this shared state is cleaned up after a kernel is interpreted. But if an exception is thrown while interpreting a kernel, the shared state is not cleaned up, allowing the simulated TPU state to be examined for debugging purposes. In this case, the shared state must be reset before any further kernels are interpreted. """ global _shared_memory, races, dma_id_counter with _shared_memory_init_lock: _shared_memory = None races = None dma_id_counter = None def _get_shared_memory() -> memory.SharedMemory: assert _shared_memory is not None return _shared_memory def _clear_shared_memory(): global _shared_memory with _shared_memory_init_lock: _shared_memory = None def _get_vector_clock_size( num_devices, num_cores_per_device, *, interpret_params ) -> int: """Returns the number of vector clocks to use.`""" num_cores = num_devices * num_cores_per_device if interpret_params.vector_clock_size is not None: if num_cores >= interpret_params.vector_clock_size: raise ValueError( f'Vector clock size ({interpret_params.vector_clock_size}) must be ' f'greater than the total number of cores ({num_cores}).' ) return interpret_params.vector_clock_size else: # Default the vector clock size to twice the total number of cores. return 2 * num_cores def _initialize_shared_memory( device_id, num_devices, num_cores_per_device, *, interpret_params ): global _shared_memory, races, dma_id_counter del device_id num_devices = int(num_devices) num_cores_per_device = int(num_cores_per_device) num_cores = num_devices * num_cores_per_device with _shared_memory_init_lock: if _shared_memory is None: vector_clock_size = _get_vector_clock_size( num_devices, num_cores_per_device, interpret_params=interpret_params ) races = RaceDetectionState(num_cores=num_cores) dma_id_counter = Counter(100) _shared_memory = memory.SharedMemory( num_devices=num_devices, num_cores_per_device=num_cores_per_device, out_of_bounds_reads=interpret_params.out_of_bounds_reads, dma_execution_mode=interpret_params.dma_execution_mode, uninitialized_memory=interpret_params.uninitialized_memory, detect_races=interpret_params.detect_races, vector_clock_size=vector_clock_size, clocks=[ vc.make_vector_clock(vector_clock_size) for _ in range(num_cores) ], barrier=threading.Barrier( num_devices, action=_update_clocks_for_global_barrier ), clean_up_barrier=threading.Barrier( num_devices, action=_clear_shared_memory ), ) assert _shared_memory.num_cores == num_cores def _update_clocks(low_global_core_id, high_global_core_id): """Synchronizes the vector clocks for the cores with ids in the range between the two arguments.""" shared_memory = _get_shared_memory() # Despite only updating the vector clocks for some cores, we still need to # hold the global lock to ensure that no other devices are concurrently # accessing the same vector clocks. with shared_memory.lock: for c in shared_memory.clocks[low_global_core_id + 1 : high_global_core_id]: vc.update_vector_clock(shared_memory.clocks[low_global_core_id], c) for c in shared_memory.clocks[low_global_core_id + 1 : high_global_core_id]: vc.update_vector_clock(c, shared_memory.clocks[low_global_core_id]) def _update_clocks_for_device_barrier(device_id): """Synchronizes the vector clocks for the cores on the given device.""" shared_memory = _get_shared_memory() low_core_id = device_id * shared_memory.num_cores_per_device high_core_id = (device_id + 1) * shared_memory.num_cores_per_device _update_clocks(low_core_id, high_core_id) def _update_clocks_for_global_barrier(): """Synchronizes all vector clocks.""" shared_memory = _get_shared_memory() _update_clocks(0, shared_memory.num_cores) def _barrier(device_id): del device_id shared_memory = _get_shared_memory() if shared_memory.num_devices > 1: shared_memory.barrier.wait() def _clean_up_shared_memory(device_id): del device_id shared_memory = _get_shared_memory() shared_memory.clean_up_barrier.wait() def _check_for_revisiting(device_id, local_core_id, loop_idx, output_blocks): device_id = int(device_id) local_core_id = int(local_core_id) loop_idx = tuple(int(x) for x in loop_idx) try: output_blocks = jax.tree.map(int, output_blocks) except: raise ValueError('Advanced indexers are not supported on TPU') output_ranges = [ _to_range(b) if b is not None else None for b in output_blocks ] shared_memory = _get_shared_memory() past_output_ranges = shared_memory.output_ranges[(device_id, local_core_id)] if not past_output_ranges: past_output_ranges.append((loop_idx, output_ranges)) return for i in range(len(output_ranges)): if output_ranges[i] is None: continue if past_output_ranges[-1][1][i] == output_ranges[i]: continue # TODO(jburnim): Do something constant time instead of linear here. past_idxs = [ j for j, ors in enumerate(past_output_ranges) if ors[1][i] == output_ranges[i] ] if past_idxs: raise RuntimeError( f'Revisited block {output_ranges[i]} of output {i} in iteration ' f'{loop_idx}. The block was previously visited in iterations ' f'{past_output_ranges[past_idxs[0]][0]} through ' f'{past_output_ranges[past_idxs[-1]][0]} .' ) past_output_ranges.append((loop_idx, output_ranges)) def _validate(device_id): device_id = int(device_id) shared_memory = _get_shared_memory() semaphores = shared_memory.get_sempahores_with_nonzero_count(device_id) if semaphores: sem, global_core_id = semaphores[0] # TODO(jburnim): Make this raise an error, but in a way that doesn't # cause other devices to hang later in `_clean_up_shared_memory`. print( f'Semaphore {sem.id} has non-zero count for {device_id} (global core' f' {global_core_id}) at kernel exit:' f' {sem.count_by_core[global_core_id]}' ) def _allocate_buffer( device_id: Array, local_core_id: Array | None, memory_space: Array, val: Array, ): """Allocates a memory buffer on the device with id `device_id` and core with id `local_core_id`. Args: device_id: Singleton array holding the device id where the buffer will be allocated. local_core_id: None or singleton array holding the core id where the buffer will be allocated. If None, a buffer will be allocated on each cores on the device. memory_space: Singleton array indicating the memory space to allocate the buffer in. If the corresponding memory space is "any" (i.e. HBM), at most one buffer will be allocated and it will belong to (local) core id 0. val: Array of values to initialize the allocated buffer with. Returns: Integer id for the allocated buffer. """ device_id = int(device_id) memory_space_str = TPU_MEMORY_SPACE_NAMES[int(memory_space)] del memory_space val = np.array(val) shared_memory = _get_shared_memory() if local_core_id is None: local_core_id_int = 0 local_core_ids = tuple(range(shared_memory.num_cores_per_device)) else: local_core_id_int = int(local_core_id) local_core_ids = (local_core_id_int,) del local_core_id local_core_id_to_buffer_id: dict[int, int] = {} for lci in local_core_ids: buffer_id = shared_memory.get_next_buffer_id(device_id, lci) if memory_space_str in ['any', 'hbm']: # If allocating in HBM, only actually allocate a buffer once. The first # local core (i.e. thread) that gets here allocates the buffer, but the # buffer is still keyed in the shared memory with core ID 0. However, # since the buffer is shared across all cores, we initialize the buffer's # `ref_count` with the number of cores per device. This ensures that the # buffer is not deallocated until all cores have exited the scope of the # allocation (e.g. have exited the body of a `run_scoped`). key = (memory_space_str, buffer_id, device_id, 0) ref_count = shared_memory.num_cores_per_device else: key = (memory_space_str, buffer_id, device_id, lci) ref_count = 1 if len(local_core_id_to_buffer_id) > 0: # If we are allocating more than one buffer, we must make additional # copies of `val` so that each buffer is a distinct ndarray. val = val.copy() shared_memory.allocate_buffer(key, ref_count=ref_count, value=val) local_core_id_to_buffer_id[lci] = buffer_id # The buffer ids should always be kept in sync across all cores. assert all( buffer_id == local_core_id_to_buffer_id[local_core_id_int] for buffer_id in local_core_id_to_buffer_id.values() ) # TODO(jburnim): Raise an error if buffer_id is too big for int16. return np.int16(local_core_id_to_buffer_id[local_core_id_int]) def _local_core_id_or_zero_if_hbm(local_core_id: int, memory_space: str) -> int: if memory_space in ['any', 'hbm']: return 0 return local_core_id def _deallocate_buffer(device_id, local_core_id, memory_space, buffer_id): device_id = int(device_id) local_core_id = int(local_core_id) memory_space = TPU_MEMORY_SPACE_NAMES[int(memory_space)] buffer_id = int(buffer_id) local_core_id = _local_core_id_or_zero_if_hbm(local_core_id, memory_space) shared_memory = _get_shared_memory() key = (memory_space, buffer_id, device_id, local_core_id) shared_memory.deallocate_buffer(key) def _allocate_semaphores( device_id: Array, local_core_id: Array | None, shape: Array ): """Allocates semaphores on the device with id `device_id` and core with id `local_core_id`. The number of semaphores allocated is given by the product of the entries in `shape`. Since for each semaphore id there is really only one global `Semaphore` object, 'allocation' of semaphores per device and core here means that the internal counter of semaphore ids that is held by `SharedMemory` is incremented for each the device and core (or for all cores on the dive if argument `local_core_id` is None, see below). Args: device_id: Singleton array holding the id for the device where the semaphores will be allocated. local_core_id: None or singleton array holding the id for the core where the semaphores will be allocated. If None, semaphores will be allocated on all cores on the device. shape: Shape of the semaphore array to allocate. Returns: Array of semaphore ids. """ device_id = int(device_id) shape = tuple(map(int, shape)) num_semaphores = math.prod(shape) shared_memory = _get_shared_memory() if local_core_id is None: local_core_id_int = 0 global_core_ids = shared_memory.get_global_core_ids(device_id) else: local_core_id_int = int(local_core_id) global_core_ids = ( shared_memory.get_global_core_id(device_id, local_core_id_int), ) del local_core_id global_core_id_to_semaphore_id = {} for gci in global_core_ids: semaphore_id = shared_memory.allocate_semaphores(gci, num_semaphores) global_core_id_to_semaphore_id[gci] = semaphore_id global_core_id = shared_memory.get_global_core_id( device_id, local_core_id_int ) # The semaphore ids should always be kept in sync across all cores. assert all( semaphore_id == global_core_id_to_semaphore_id[global_core_id] for semaphore_id in global_core_id_to_semaphore_id.values() ) # NOTE: For now, we use a relatively uncommon datatype (int16) for # semaphore (and buffer) IDs, so these values are more easily identifiable # in kernels. # # TODO(jburnim): Raise an error if any IDs are too big for int16. semaphore_id = global_core_id_to_semaphore_id[global_core_id] return np.arange( semaphore_id, semaphore_id + num_semaphores, dtype=np.int16 ).reshape(shape) TPU_MEMORY_SPACE_IDXS: dict[ mosaic_core.MemorySpace | pallas_core.MemorySpace | None, int ] = {v: i for i, v in enumerate(mosaic_core.MemorySpace)} TPU_MEMORY_SPACE_IDXS[pallas_core.MemorySpace.ANY] = TPU_MEMORY_SPACE_IDXS[ mosaic_core.MemorySpace.ANY ] TPU_MEMORY_SPACE_NAMES = { i: v.value for i, v in enumerate(mosaic_core.MemorySpace) } # Default to VMEM when no memory space is specified. TPU_MEMORY_SPACE_IDXS[None] = TPU_MEMORY_SPACE_IDXS[ mosaic_core.MemorySpace.VMEM ] def get_barrier_semaphore(device_id, collective_id): del device_id collective_id = int(collective_id) shared_memory = _get_shared_memory() shared_memory.guarantee_semaphore_with_fixed_id(collective_id) return np.int16(collective_id) def _transform_slice_or_index(slice_or_idx): if isinstance(slice_or_idx, int): return slice_or_idx else: start = int(slice_or_idx.start) size = int(slice_or_idx.size) stride = int(slice_or_idx.stride) return slice(start, start + size * stride, stride) def _compose_slice_or_index(slice_or_idx1, slice_or_idx2): ret = [] i = 0 j = 0 while True: if i == len(slice_or_idx1): ret.extend(slice_or_idx2[j:]) return tuple(ret) elif j == len(slice_or_idx2): ret.extend(slice_or_idx1[i:]) return tuple(ret) elif isinstance(slice_or_idx1[i], int): ret.append(slice_or_idx1[i]) i += 1 elif isinstance(slice_or_idx2[j], int): ret.append( slice_or_idx1[i].start + slice_or_idx2[j] * slice_or_idx1[i].step ) i += 1 j += 1 else: ret.append( slice( slice_or_idx1[i].start + slice_or_idx2[j].start * slice_or_idx1[i].step, slice_or_idx1[i].start + slice_or_idx2[j].stop * slice_or_idx1[i].step, slice_or_idx1[i].step * slice_or_idx2[j].step, ) ) i += 1 j += 1 def _to_range(transforms) -> tuple[slice | int, ...]: ret = () for transform in transforms: # For now, assume only NDIndexer transforms. ret = _compose_slice_or_index( ret, tuple(_transform_slice_or_index(i) for i in transform.indices) ) return ret def _to_int(x: int | Array | None) -> int | None: """Converts a value to an integer, or returns None if the value is None.""" if x is None: return None return int(x) def get( device_id, local_core_id, memory_space, buffer_id, transforms, block_indices=None, grid_loop_idx=None, *, src_device_id=None, src_local_core_id=None, clock=None, source_info=None, input_name=None, ) -> np.ndarray: device_id = int(device_id) local_core_id = int(local_core_id) memory_space = TPU_MEMORY_SPACE_NAMES[int(memory_space)] buffer_id = int(buffer_id) try: transforms = jax.tree.map(int, transforms) except: raise ValueError('Advanced indexers are not supported on TPU') src_device_id = _to_int(src_device_id) src_local_core_id = _to_int(src_local_core_id) if input_name is not None: # NOTE: input_name, block_indices, and grid_loop_idx are set only if this # function is being called to read a block from a pallas_call input (at the # start of one iteration of the kernel body). block_indices = tuple(int(x) for x in block_indices) grid_loop_idx = tuple(int(x) for x in tuple(grid_loop_idx)) shared_memory = _get_shared_memory() local_core_id_for_buffer = _local_core_id_or_zero_if_hbm( local_core_id, memory_space ) global_core_id = shared_memory.get_global_core_id(device_id, local_core_id) key = (memory_space, buffer_id, device_id, local_core_id_for_buffer) read_range = _to_range(transforms) ret, (shape, dtype), clock_ = shared_memory.get_buffer_content( key, read_range, global_core_id ) clock = clock if clock is not None else clock_ # Compute the shape of the read value, assuming the read is fully in-bounds. # TODO(jburnim): We already know this shape in the Jaxpr where we insert a # callback to `get`. Should we just pass the shape to `get`? # TODO(jburnim): Move to a helper function? full_read_shape = [] assert len(read_range) <= len(shape) for dim_size, idx_or_slice in itertools.zip_longest( shape, read_range, fillvalue=None ): assert isinstance(dim_size, int) if idx_or_slice is None: full_read_shape.append(dim_size) elif isinstance(idx_or_slice, int): continue else: dim_size = (idx_or_slice.stop - idx_or_slice.start) // idx_or_slice.step assert isinstance(dim_size, int) full_read_shape.append(dim_size) full_read_shape = tuple(full_read_shape) if (ret is None) or (full_read_shape != ret.shape): if shared_memory.out_of_bounds_reads == 'raise': if source_info is None: ctx = contextlib.nullcontext() else: ctx = source_info_util.user_context( traceback=source_info.traceback, name_stack=source_info.name_stack ) # type: ignore[assignment] with ctx: if input_name is None: raise IndexError( 'Out-of-bounds read of' f' ({device_id} {local_core_id} {memory_space} {buffer_id}):' f' reading [{read_range}] but buffer has shape {shape}.' ) else: # Different error message when we are reading a block of an input, # to copy it to a buffer before invoking the kernel body. raise IndexError( f'Out-of-bounds block index {block_indices} for' f' input "{input_name}" in iteration {grid_loop_idx}' f' on device {device_id} (core {local_core_id}):' f' reading [{read_range}] but input has shape {shape}.' ) # out_of_bounds_reads == "uninitialized" uninit_array = np.full( full_read_shape, _uninitialized_value(dtype, shared_memory.uninitialized_memory), dtype=dtype, ) if ret is None: ret = uninit_array else: uninit_array[tuple(slice(s) for s in ret.shape)] = ret ret = uninit_array if shared_memory.detect_races: if src_device_id is None: src_device_id = device_id if src_local_core_id is None: src_local_core_id = local_core_id assert races is not None races.check_read( src_device_id, src_local_core_id, clock, (memory_space, buffer_id, device_id, local_core_id_for_buffer), read_range, source_info=source_info, ) return ret def store( device_id, local_core_id, memory_space, buffer_id, transforms, val, block_indices=None, grid_loop_idx=None, *, src_device_id=None, src_local_core_id=None, clock=None, source_info=None, output_name=None, ): device_id = int(device_id) local_core_id = int(local_core_id) memory_space = TPU_MEMORY_SPACE_NAMES[int(memory_space)] buffer_id = int(buffer_id) try: transforms = jax.tree.map(int, transforms) except: raise ValueError('Advanced indexers are not supported on TPU') val = np.array(val) src_device_id = _to_int(src_device_id) src_local_core_id = _to_int(src_local_core_id) if output_name is not None: # NOTE: output_name, block_indices, and grid_loop_idx are set only if this # function is being called to store a block into a pallas_call output (at # the end of one iteration of the kernel body). block_indices = tuple(int(x) for x in block_indices) grid_loop_idx = tuple(int(x) for x in tuple(grid_loop_idx)) shared_memory = _get_shared_memory() local_core_id_for_buffer = _local_core_id_or_zero_if_hbm( local_core_id, memory_space ) global_core_id = shared_memory.get_global_core_id(device_id, local_core_id) key = (memory_space, buffer_id, device_id, local_core_id_for_buffer) write_range = _to_range(transforms) in_bounds, (shape, _), clock_ = shared_memory.store_buffer_content( key, write_range, val, global_core_id ) clock = clock if clock is not None else clock_ if not in_bounds: if output_name is None: raise ValueError( 'Out-of-bounds write of' f' ({device_id} {local_core_id} {memory_space} {buffer_id}):' f' writing [{write_range}] but buffer has shape {shape} .' ) else: # Different error message when we are copying a kernel buffer to a # block of an output (just after a kernel invocation). raise IndexError( f'Out-of-bounds block index {block_indices} for' f' output "{output_name}" in iteration {grid_loop_idx}' f' on device {device_id} (core {local_core_id}):' f' reading [{write_range}] but output has shape {shape}.' ) if shared_memory.detect_races: if src_device_id is None: src_device_id = device_id if src_local_core_id is None: src_local_core_id = local_core_id assert races is not None races.check_write( src_device_id, src_local_core_id, clock, (memory_space, buffer_id, device_id, local_core_id_for_buffer), write_range, source_info=source_info, ) def swap( device_id, local_core_id, memory_space, buffer_id, transforms, val, mask, *, source_info=None, ): device_id = int(device_id) local_core_id = int(local_core_id) memory_space = TPU_MEMORY_SPACE_NAMES[int(memory_space)] buffer_id = int(buffer_id) try: transforms = jax.tree.map(int, transforms) except: raise ValueError('Advanced indexers are not supported on TPU') val = np.array(val) mask = np.array(mask) if mask is not None else None if mask is not None: assert mask.shape == val.shape shared_memory = _get_shared_memory() local_core_id_for_buffer = _local_core_id_or_zero_if_hbm( local_core_id, memory_space ) global_core_id = shared_memory.get_global_core_id(device_id, local_core_id) key = (memory_space, buffer_id, device_id, local_core_id_for_buffer) read_write_range = _to_range(transforms) ret, (shape, _), clock = shared_memory.swap_buffer_content( key, read_write_range, val, mask, global_core_id ) if ret is None: if mask is None: raise ValueError( 'Out-of-bounds swap of' f' ({device_id} {local_core_id} {memory_space} {buffer_id}):' f' swapping [{read_write_range}] but buffer has shape' f' {shape} .' ) else: # TODO(jburnim): Include indices of out-of-bounds locations where mask # is True. raise ValueError( 'Out-of-bounds masked swap of' f' ({device_id} {local_core_id} {memory_space} {buffer_id}): swapping' f' [{read_write_range}] but buffer has shape {shape} . ' ) if shared_memory.detect_races: assert races is not None races.check_write( device_id, local_core_id, clock, (memory_space, buffer_id, device_id, local_core_id_for_buffer), read_write_range, source_info=source_info, ) return ret
Counter
python
huggingface__transformers
src/transformers/integrations/ggml.py
{ "start": 24000, "end": 24437 }
class ____(GPT2Converter): def __init__(self, tokenizer_dict): self.original_tokenizer = GGUFTokenizerSkeleton(tokenizer_dict) self.additional_kwargs = {} def converted(self) -> Tokenizer: vocab = {word: i for i, word in enumerate(self.original_tokenizer.tokens)} merges = self.original_tokenizer.merges tokenizer = super().converted(vocab, merges) return tokenizer
GGUFGPTConverter
python
scrapy__scrapy
tests/test_downloader_handlers.py
{ "start": 4928, "end": 10584 }
class ____: download_handler_cls: type = S3DownloadHandler # test use same example keys than amazon developer guide # http://s3.amazonaws.com/awsdocs/S3/20060301/s3-dg-20060301.pdf # and the tests described here are the examples from that manual AWS_ACCESS_KEY_ID = "0PN5J17HBGZHT7JJ3X82" AWS_SECRET_ACCESS_KEY = "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o" def setup_method(self): crawler = get_crawler() s3reqh = build_from_crawler( S3DownloadHandler, crawler, aws_access_key_id=self.AWS_ACCESS_KEY_ID, aws_secret_access_key=self.AWS_SECRET_ACCESS_KEY, httpdownloadhandler=HttpDownloadHandlerMock, ) self.download_request = s3reqh.download_request self.spider = DefaultSpider() @contextlib.contextmanager def _mocked_date(self, date): try: import botocore.auth # noqa: F401,PLC0415 except ImportError: yield else: # We need to mock botocore.auth.formatdate, because otherwise # botocore overrides Date header with current date and time # and Authorization header is different each time with mock.patch("botocore.auth.formatdate") as mock_formatdate: mock_formatdate.return_value = date yield def test_extra_kw(self): crawler = get_crawler() with pytest.raises((TypeError, NotConfigured)): build_from_crawler( S3DownloadHandler, crawler, extra_kw=True, ) def test_request_signing1(self): # gets an object from the johnsmith bucket. date = "Tue, 27 Mar 2007 19:36:42 +0000" req = Request("s3://johnsmith/photos/puppy.jpg", headers={"Date": date}) with self._mocked_date(date): httpreq = self.download_request(req, self.spider) assert ( httpreq.headers["Authorization"] == b"AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA=" ) def test_request_signing2(self): # puts an object into the johnsmith bucket. date = "Tue, 27 Mar 2007 21:15:45 +0000" req = Request( "s3://johnsmith/photos/puppy.jpg", method="PUT", headers={ "Content-Type": "image/jpeg", "Date": date, "Content-Length": "94328", }, ) with self._mocked_date(date): httpreq = self.download_request(req, self.spider) assert ( httpreq.headers["Authorization"] == b"AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ=" ) def test_request_signing3(self): # lists the content of the johnsmith bucket. date = "Tue, 27 Mar 2007 19:42:41 +0000" req = Request( "s3://johnsmith/?prefix=photos&max-keys=50&marker=puppy", method="GET", headers={ "User-Agent": "Mozilla/5.0", "Date": date, }, ) with self._mocked_date(date): httpreq = self.download_request(req, self.spider) assert ( httpreq.headers["Authorization"] == b"AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4=" ) def test_request_signing4(self): # fetches the access control policy sub-resource for the 'johnsmith' bucket. date = "Tue, 27 Mar 2007 19:44:46 +0000" req = Request("s3://johnsmith/?acl", method="GET", headers={"Date": date}) with self._mocked_date(date): httpreq = self.download_request(req, self.spider) assert ( httpreq.headers["Authorization"] == b"AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g=" ) def test_request_signing6(self): # uploads an object to a CNAME style virtual hosted bucket with metadata. date = "Tue, 27 Mar 2007 21:06:08 +0000" req = Request( "s3://static.johnsmith.net:8080/db-backup.dat.gz", method="PUT", headers={ "User-Agent": "curl/7.15.5", "Host": "static.johnsmith.net:8080", "Date": date, "x-amz-acl": "public-read", "content-type": "application/x-download", "Content-MD5": "4gJE4saaMU4BqNR0kLY+lw==", "X-Amz-Meta-ReviewedBy": "joe@johnsmith.net,jane@johnsmith.net", "X-Amz-Meta-FileChecksum": "0x02661779", "X-Amz-Meta-ChecksumAlgorithm": "crc32", "Content-Disposition": "attachment; filename=database.dat", "Content-Encoding": "gzip", "Content-Length": "5913339", }, ) with self._mocked_date(date): httpreq = self.download_request(req, self.spider) assert ( httpreq.headers["Authorization"] == b"AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI=" ) def test_request_signing7(self): # ensure that spaces are quoted properly before signing date = "Tue, 27 Mar 2007 19:42:41 +0000" req = Request( "s3://johnsmith/photos/my puppy.jpg?response-content-disposition=my puppy.jpg", method="GET", headers={"Date": date}, ) with self._mocked_date(date): httpreq = self.download_request(req, self.spider) assert ( httpreq.headers["Authorization"] == b"AWS 0PN5J17HBGZHT7JJ3X82:+CfvG8EZ3YccOrRVMXNaK2eKZmM=" )
TestS3
python
kubernetes-client__python
kubernetes/client/models/v1_service_backend_port.py
{ "start": 383, "end": 4495 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'name': 'str', 'number': 'int' } attribute_map = { 'name': 'name', 'number': 'number' } def __init__(self, name=None, number=None, local_vars_configuration=None): # noqa: E501 """V1ServiceBackendPort - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._name = None self._number = None self.discriminator = None if name is not None: self.name = name if number is not None: self.number = number @property def name(self): """Gets the name of this V1ServiceBackendPort. # noqa: E501 name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\". # noqa: E501 :return: The name of this V1ServiceBackendPort. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this V1ServiceBackendPort. name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\". # noqa: E501 :param name: The name of this V1ServiceBackendPort. # noqa: E501 :type: str """ self._name = name @property def number(self): """Gets the number of this V1ServiceBackendPort. # noqa: E501 number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\". # noqa: E501 :return: The number of this V1ServiceBackendPort. # noqa: E501 :rtype: int """ return self._number @number.setter def number(self, number): """Sets the number of this V1ServiceBackendPort. number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\". # noqa: E501 :param number: The number of this V1ServiceBackendPort. # noqa: E501 :type: int """ self._number = number def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1ServiceBackendPort): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1ServiceBackendPort): return True return self.to_dict() != other.to_dict()
V1ServiceBackendPort
python
fluentpython__example-code-2e
12-seq-hacking/vector_v2.py
{ "start": 2251, "end": 3486 }
class ____: typecode = 'd' def __init__(self, components): self._components = array(self.typecode, components) def __iter__(self): return iter(self._components) def __repr__(self): components = reprlib.repr(self._components) components = components[components.find('['):-1] return f'Vector({components})' def __str__(self): return str(tuple(self)) def __bytes__(self): return (bytes([ord(self.typecode)]) + bytes(self._components)) def __eq__(self, other): return tuple(self) == tuple(other) def __abs__(self): return math.hypot(*self) def __bool__(self): return bool(abs(self)) # tag::VECTOR_V2[] def __len__(self): return len(self._components) def __getitem__(self, key): if isinstance(key, slice): # <1> cls = type(self) # <2> return cls(self._components[key]) # <3> index = operator.index(key) # <4> return self._components[index] # <5> # end::VECTOR_V2[] @classmethod def frombytes(cls, octets): typecode = chr(octets[0]) memv = memoryview(octets[1:]).cast(typecode) return cls(memv)
Vector
python
walkccc__LeetCode
solutions/1983. Widest Pair of Indices With Equal Range Sum/1983.py
{ "start": 0, "end": 311 }
class ____: def widestPairOfIndices(self, nums1: list[int], nums2: list[int]) -> int: ans = 0 prefix = 0 prefixToIndex = {0: -1} for i, (num1, num2) in enumerate(zip(nums1, nums2)): prefix += num1 - num2 ans = max(ans, i - prefixToIndex.setdefault(prefix, i)) return ans
Solution
python
wandb__wandb
wandb/sdk/artifacts/_generated/project_artifact_type.py
{ "start": 253, "end": 343 }
class ____(GQLResult): project: Optional[ProjectArtifactTypeProject]
ProjectArtifactType
python
automl__auto-sklearn
autosklearn/metalearning/input/aslib_simple.py
{ "start": 183, "end": 5965 }
class ____(object): def __init__(self, directory: str, cs: ConfigurationSpace): self.logger = logging.getLogger(__name__) # Create data structures self.cs = cs self.dir_ = directory self.algorithm_runs = None self.configurations = None self.metafeatures = None self.read_funcs = { # "description.txt": self._read_description, "algorithm_runs.arff": self._read_algorithm_runs, # "feature_costs.arff": self._read_feature_costs, "feature_values.arff": self._read_feature_values, # "feature_runstatus.arff": self._read_feature_runstatus, # "ground_truth.arff": self._read_ground_truth, # "cv.arff": self._read_cv, "configurations.csv": self._read_configurations, } self.found_files = [] # Read ASLib files self._find_files() self._read_files() def _find_files(self): """ find all expected files in self.dir_ fills self.found_files """ expected = [ # "description.txt", "algorithm_runs.arff", "feature_values.arff", # "feature_runstatus.arff", ] optional = [ "ground_truth.arff", "feature_costs.arff", "citation.bib", "cv.arff", "configurations.csv", ] for expected_file in expected: full_path = os.path.join(self.dir_, expected_file) if not os.path.isfile(full_path): self.logger.error("Not found: %s (has to be added)" % (full_path)) else: self.found_files.append(full_path) for expected_file in optional: full_path = os.path.join(self.dir_, expected_file) if not os.path.isfile(full_path): # self.logger.warning( # "Not found: %s (maybe you want to add it)" % (full_path)) pass else: self.found_files.append(full_path) def _read_files(self): """ iterates over all found files (self.found_files) and calls the corresponding function to validate file """ for file_ in self.found_files: read_func = self.read_funcs.get(os.path.basename(file_)) if read_func: read_func(file_) def _read_algorithm_runs(self, filename): with open(filename) as fh: arff_dict = arff.load(fh) if arff_dict["attributes"][0][0].upper() != "INSTANCE_ID": self.logger.error( "instance_id as first attribute is missing in %s" % (filename) ) if arff_dict["attributes"][1][0].upper() != "REPETITION": self.logger.error( "repetition as second attribute is missing in %s" % (filename) ) if arff_dict["attributes"][2][0].upper() != "ALGORITHM": self.logger.error( "algorithm as third attribute is missing in %s" % (filename) ) performance_measures = [pm[0] for pm in arff_dict["attributes"][3:-1]] measure_instance_algorithm_triples = defaultdict(lambda: defaultdict(dict)) for data in arff_dict["data"]: inst_name = str(data[0]) # repetition = data[1] algorithm = str(data[2]) perf_list = data[3:-1] status = data[-1] if status != "ok": continue for i, performance_measure in enumerate(performance_measures): measure_instance_algorithm_triples[performance_measure][inst_name][ algorithm ] = perf_list[i] # TODO: this does not support any repetitions! measure_algorithm_matrices = OrderedDict() for pm in performance_measures: measure_algorithm_matrices[pm] = pd.DataFrame( measure_instance_algorithm_triples[pm] ).transpose() self.algorithm_runs = measure_algorithm_matrices def _read_feature_values(self, filename): with open(filename) as fh: arff_dict = arff.load(fh) metafeatures = dict() for data in arff_dict["data"]: inst_name = data[0] # repetition = data[1] features = data[2:] metafeatures[inst_name] = { feature[0]: feature_value for feature, feature_value in zip(arff_dict["attributes"][2:], features) } self.metafeatures = pd.DataFrame(metafeatures).transpose() def _read_configurations(self, filename): with open(filename) as fh: csv_reader = csv.DictReader(fh) configurations = dict() hp_names = self.cs.get_hyperparameter_names() for line in csv_reader: configuration = dict() algorithm_id = line["idx"] for hp_name, value in line.items(): if not value or hp_name == "idx": continue if hp_name not in hp_names: # skip hyperparameter # if it is not existing in the current search space continue try: value = int(value) except Exception: try: value = float(value) except Exception: pass configuration[hp_name] = value configurations[algorithm_id] = configuration self.configurations = configurations
AlgorithmSelectionProblem
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/class_interval.py
{ "start": 2087, "end": 2242 }
class ____(B4): def m0(self, x): self.m1(x) def source_two_hops(d: D4): d.m0(_test_source()) """ A5 / \ B5 C5 | D5 """
D4
python
scikit-learn__scikit-learn
sklearn/utils/tests/test_testing.py
{ "start": 8105, "end": 8355 }
class ____: def __init__(self): """MockEstimator""" def fit(self, X, y): return X def predict(self, X): return X def predict_proba(self, X): return X def score(self, X): return 1.0
MockEst
python
airbytehq__airbyte
airbyte-integrations/connectors/source-tplcentral/source_tplcentral/streams.py
{ "start": 3051, "end": 3445 }
class ____(TplcentralStream): # https://api.3plcentral.com/rels/customers/customers upstream_primary_key = "ReadOnly.CustomerId" collection_field = "ResourceList" page_size = 100 def path( self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None ) -> str: return "customers"
Customers
python
Lightning-AI__lightning
src/lightning/pytorch/trainer/connectors/data_connector.py
{ "start": 12317, "end": 15268 }
class ____: """Stores the information where the dataloaders come from. The source can be 1. from a ``*_dataloader()`` method on the :class:`~lightning.pytorch.core.LightningModule`, 2. from a ``*_dataloader()`` method on the :class:`~lightning.pytorch.core.datamodule.LightningDataModule`, 3. a direct instance of a :class:`~torch.utils.data.DataLoader` or supported collections thereof. Arguments: instance: A LightningModule, LightningDataModule, or (a collection of) iterable(s). name: A name for this dataloader source. If the instance is a module, the name corresponds to the hook that returns the desired dataloader(s). """ instance: Optional[Union[TRAIN_DATALOADERS, EVAL_DATALOADERS, "pl.LightningModule", "pl.LightningDataModule"]] name: str def dataloader(self) -> Union[TRAIN_DATALOADERS, EVAL_DATALOADERS]: """Returns the dataloader from the source. If the source is a module, the method with the corresponding :attr:`name` gets called. """ if isinstance(self.instance, pl.LightningModule): return call._call_lightning_module_hook(self.instance.trainer, self.name, pl_module=self.instance) if isinstance(self.instance, pl.LightningDataModule): assert self.instance.trainer is not None return call._call_lightning_datamodule_hook(self.instance.trainer, self.name) assert self.instance is not None return self.instance def is_defined(self) -> bool: """Returns whether the source dataloader can be retrieved or not. If the source is a module it checks that the method with given :attr:`name` is overridden. """ return not self.is_module() or is_overridden(self.name, self.instance) def is_module(self) -> bool: """Returns whether the DataLoader source is a LightningModule or a LightningDataModule. It does not check whether ``*_dataloader`` methods are actually overridden. """ return isinstance(self.instance, (pl.LightningModule, pl.LightningDataModule)) def _request_dataloader(data_source: _DataLoaderSource) -> Union[TRAIN_DATALOADERS, EVAL_DATALOADERS]: """Requests a dataloader by calling dataloader hooks corresponding to the given stage. Returns: The requested dataloader """ with _replace_dunder_methods(DataLoader, "dataset"), _replace_dunder_methods(BatchSampler): # under this context manager, the arguments passed to `DataLoader.__init__` will be captured and saved as # attributes on the instance in case the dataloader needs to be re-instantiated later by Lightning. # Also, it records all attribute setting and deletion using patched `__setattr__` and `__delattr__` # methods so that the re-instantiated object is as close to the original as possible. return data_source.dataloader() @dataclass
_DataLoaderSource
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 199107, "end": 199281 }
class ____(VegaLiteSchema): """Blend schema wrapper.""" _schema = {"$ref": "#/definitions/Blend"} def __init__(self, *args): super().__init__(*args)
Blend
python
has2k1__plotnine
tests/test_geom_boxplot.py
{ "start": 688, "end": 2865 }
class ____: p = ( ggplot(data, aes("x")) + geom_boxplot(aes(y="y"), size=2) + geom_boxplot(data[: 2 * m], aes(y="y+25", fill="x"), size=2) + geom_boxplot(data[2 * m :], aes(y="y+30", color="x"), size=2) + geom_boxplot(data[2 * m :], aes(y="y+55", linetype="x"), size=2) ) def test_aesthetics(self): assert self.p == "aesthetics" def test_aesthetics_coordatalip(self): assert self.p + coord_flip() == "aesthetics+coord_flip" def test_params(): p = ( ggplot(data, aes("x")) + geom_boxplot(data[:m], aes(y="y"), size=2, notch=True) + geom_boxplot( data[m : 2 * m], aes(y="y"), size=2, notch=True, notchwidth=0.8 ) + # outliers geom_boxplot( data[2 * m : 3 * m], aes(y="y"), size=2, outlier_size=4, outlier_color="green", ) + geom_boxplot( data[2 * m : 3 * m], aes(y="y+25"), size=2, outlier_size=4, outlier_alpha=0.5, ) + geom_boxplot( data[2 * m : 3 * m], aes(y="y+60"), size=2, outlier_size=4, outlier_shape="D", ) # position dodge + geom_boxplot(data[3 * m : 4 * m], aes(y="y", fill="factor(y%2)")) ) assert p == "params" def test_position_nudge(): p = ggplot(data, aes("x", "y")) + geom_boxplot( position=position_nudge(x=-0.1), size=2 ) assert p == "position_nudge" def test_weight(): # The boxes of the two plots should differ slightly due to the # method used to calculate weighted percentiles. There is no # standard method for calculating weighted percentiles. data = pd.DataFrame( { "x": list("a" * 11 + "b" * 5), "y": np.hstack( [[1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 15], [1, 2, 3, 4, 15]] ), "weight": np.hstack([np.ones(11), [1, 2, 3, 4, 1]]), } ) p = ggplot(data, aes(x="x", y="y", weight="weight")) + geom_boxplot() assert p == "weight"
TestAesthetics
python
numpy__numpy
numpy/_core/tests/test_records.py
{ "start": 307, "end": 13344 }
class ____: def test_fromrecords(self): r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], names='col1,col2,col3') assert_equal(r[0].item(), (456, 'dbe', 1.2)) assert_equal(r['col1'].dtype.kind, 'i') assert_equal(r['col2'].dtype.kind, 'U') assert_equal(r['col2'].dtype.itemsize, 12) assert_equal(r['col3'].dtype.kind, 'f') def test_fromrecords_0len(self): """ Verify fromrecords works with a 0-length input """ dtype = [('a', float), ('b', float)] r = np.rec.fromrecords([], dtype=dtype) assert_equal(r.shape, (0,)) def test_fromrecords_2d(self): data = [ [(1, 2), (3, 4), (5, 6)], [(6, 5), (4, 3), (2, 1)] ] expected_a = [[1, 3, 5], [6, 4, 2]] expected_b = [[2, 4, 6], [5, 3, 1]] # try with dtype r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)]) assert_equal(r1['a'], expected_a) assert_equal(r1['b'], expected_b) # try with names r2 = np.rec.fromrecords(data, names=['a', 'b']) assert_equal(r2['a'], expected_a) assert_equal(r2['b'], expected_b) assert_equal(r1, r2) def test_method_array(self): r = np.rec.array( b'abcdefg' * 100, formats='i2,S3,i4', shape=3, byteorder='big' ) assert_equal(r[1].item(), (25444, b'efg', 1633837924)) def test_method_array2(self): r = np.rec.array( [ (1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g') ], formats='u1,f4,S1' ) assert_equal(r[1].item(), (2, 22.0, b'b')) def test_recarray_slices(self): r = np.rec.array( [ (1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g') ], formats='u1,f4,S1' ) assert_equal(r[1::2][1].item(), (4, 44.0, b'd')) def test_recarray_fromarrays(self): x1 = np.array([1, 2, 3, 4]) x2 = np.array(['a', 'dd', 'xyz', '12']) x3 = np.array([1.1, 2, 3, 4]) r = np.rec.fromarrays([x1, x2, x3], names='a,b,c') assert_equal(r[1].item(), (2, 'dd', 2.0)) x1[1] = 34 assert_equal(r.a, np.array([1, 2, 3, 4])) def test_recarray_fromfile(self): data_dir = path.join(path.dirname(__file__), 'data') filename = path.join(data_dir, 'recarray_from_file.fits') fd = open(filename, 'rb') fd.seek(2880 * 2) r1 = np.rec.fromfile(fd, formats='f8,i4,S5', shape=3, byteorder='big') fd.seek(2880 * 2) r2 = np.rec.array(fd, formats='f8,i4,S5', shape=3, byteorder='big') fd.seek(2880 * 2) bytes_array = BytesIO() bytes_array.write(fd.read()) bytes_array.seek(0) r3 = np.rec.fromfile( bytes_array, formats='f8,i4,S5', shape=3, byteorder='big' ) fd.close() assert_equal(r1, r2) assert_equal(r2, r3) def test_recarray_from_obj(self): count = 10 a = np.zeros(count, dtype='O') b = np.zeros(count, dtype='f8') c = np.zeros(count, dtype='f8') for i in range(len(a)): a[i] = list(range(1, 10)) mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') for i in range(len(a)): assert_(mine.date[i] == list(range(1, 10))) assert_(mine.data1[i] == 0.0) assert_(mine.data2[i] == 0.0) def test_recarray_repr(self): a = np.array([(1, 0.1), (2, 0.2)], dtype=[('foo', '<i4'), ('bar', '<f8')]) a = np.rec.array(a) assert_equal( repr(a), textwrap.dedent("""\ rec.array([(1, 0.1), (2, 0.2)], dtype=[('foo', '<i4'), ('bar', '<f8')])""") ) # make sure non-structured dtypes also show up as rec.array a = np.array(np.ones(4, dtype='f8')) assert_(repr(np.rec.array(a)).startswith('rec.array')) # check that the 'np.record' part of the dtype isn't shown a = np.rec.array(np.ones(3, dtype='i4,i4')) assert_equal(repr(a).find('numpy.record'), -1) a = np.rec.array(np.ones(3, dtype='i4')) assert_(repr(a).find('dtype=int32') != -1) def test_0d_recarray_repr(self): arr_0d = np.rec.array((1, 2.0, '2003'), dtype='<i4,<f8,<M8[Y]') assert_equal(repr(arr_0d), textwrap.dedent("""\ rec.array((1, 2., '2003'), dtype=[('f0', '<i4'), ('f1', '<f8'), ('f2', '<M8[Y]')])""")) record = arr_0d[()] assert_equal(repr(record), "np.record((1, 2.0, '2003'), " "dtype=[('f0', '<i4'), ('f1', '<f8'), ('f2', '<M8[Y]')])") # 1.13 converted to python scalars before the repr try: np.set_printoptions(legacy='1.13') assert_equal(repr(record), '(1, 2.0, datetime.date(2003, 1, 1))') finally: np.set_printoptions(legacy=False) def test_recarray_from_repr(self): a = np.array([(1, 'ABC'), (2, "DEF")], dtype=[('foo', int), ('bar', 'S4')]) recordarr = np.rec.array(a) recarr = a.view(np.recarray) recordview = a.view(np.dtype((np.record, a.dtype))) recordarr_r = eval("np." + repr(recordarr), {'np': np}) recarr_r = eval("np." + repr(recarr), {'np': np}) # Prints the type `numpy.record` as part of the dtype: recordview_r = eval("np." + repr(recordview), {'np': np, 'numpy': np}) assert_equal(type(recordarr_r), np.recarray) assert_equal(recordarr_r.dtype.type, np.record) assert_equal(recordarr, recordarr_r) assert_equal(type(recarr_r), np.recarray) assert_equal(recarr_r.dtype.type, np.record) assert_equal(recarr, recarr_r) assert_equal(type(recordview_r), np.ndarray) assert_equal(recordview.dtype.type, np.record) assert_equal(recordview, recordview_r) def test_recarray_views(self): a = np.array([(1, 'ABC'), (2, "DEF")], dtype=[('foo', int), ('bar', 'S4')]) b = np.array([1, 2, 3, 4, 5], dtype=np.int64) # check that np.rec.array gives right dtypes assert_equal(np.rec.array(a).dtype.type, np.record) assert_equal(type(np.rec.array(a)), np.recarray) assert_equal(np.rec.array(b).dtype.type, np.int64) assert_equal(type(np.rec.array(b)), np.recarray) # check that viewing as recarray does the same assert_equal(a.view(np.recarray).dtype.type, np.record) assert_equal(type(a.view(np.recarray)), np.recarray) assert_equal(b.view(np.recarray).dtype.type, np.int64) assert_equal(type(b.view(np.recarray)), np.recarray) # check that view to non-structured dtype preserves type=np.recarray r = np.rec.array(np.ones(4, dtype="f4,i4")) rv = r.view('f8').view('f4,i4') assert_equal(type(rv), np.recarray) assert_equal(rv.dtype.type, np.record) # check that getitem also preserves np.recarray and np.record r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'i4,i4')])) assert_equal(r['c'].dtype.type, np.record) assert_equal(type(r['c']), np.recarray) # and that it preserves subclasses (gh-6949) class C(np.recarray): pass c = r.view(C) assert_equal(type(c['c']), C) # check that accessing nested structures keep record type, but # not for subarrays, non-void structures, non-structured voids test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4', 2)), ('d', ('i8', 'i4,i4'))] r = np.rec.array([((1, 1), b'11111111', [1, 1], 1), ((1, 1), b'11111111', [1, 1], 1)], dtype=test_dtype) assert_equal(r.a.dtype.type, np.record) assert_equal(r.b.dtype.type, np.void) assert_equal(r.c.dtype.type, np.float32) assert_equal(r.d.dtype.type, np.int64) # check the same, but for views r = np.rec.array(np.ones(4, dtype='i4,i4')) assert_equal(r.view('f4,f4').dtype.type, np.record) assert_equal(r.view(('i4', 2)).dtype.type, np.int32) assert_equal(r.view('V8').dtype.type, np.void) assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64) # check that we can undo the view arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')] for arr in arrs: rec = np.rec.array(arr) # recommended way to view as an ndarray: arr2 = rec.view(rec.dtype.fields or rec.dtype, np.ndarray) assert_equal(arr2.dtype.type, arr.dtype.type) assert_equal(type(arr2), type(arr)) def test_recarray_from_names(self): ra = np.rec.array([ (1, 'abc', 3.7000002861022949, 0), (2, 'xy', 6.6999998092651367, 1), (0, ' ', 0.40000000596046448, 0)], names='c1, c2, c3, c4') pa = np.rec.fromrecords([ (1, 'abc', 3.7000002861022949, 0), (2, 'xy', 6.6999998092651367, 1), (0, ' ', 0.40000000596046448, 0)], names='c1, c2, c3, c4') assert_(ra.dtype == pa.dtype) assert_(ra.shape == pa.shape) for k in range(len(ra)): assert_(ra[k].item() == pa[k].item()) def test_recarray_conflict_fields(self): ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2), (3, 'wrs', 1.3)], names='field, shape, mean') ra.mean = [1.1, 2.2, 3.3] assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3]) assert_(type(ra.mean) is type(ra.var)) ra.shape = (1, 3) assert_(ra.shape == (1, 3)) ra.shape = ['A', 'B', 'C'] assert_array_equal(ra['shape'], [['A', 'B', 'C']]) ra.field = 5 assert_array_equal(ra['field'], [[5, 5, 5]]) assert_(isinstance(ra.field, collections.abc.Callable)) def test_fromrecords_with_explicit_dtype(self): a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=[('a', int), ('b', object)]) assert_equal(a.a, [1, 2]) assert_equal(a[0].a, 1) assert_equal(a.b, ['a', 'bbb']) assert_equal(a[-1].b, 'bbb') # ndtype = np.dtype([('a', int), ('b', object)]) a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype) assert_equal(a.a, [1, 2]) assert_equal(a[0].a, 1) assert_equal(a.b, ['a', 'bbb']) assert_equal(a[-1].b, 'bbb') def test_recarray_stringtypes(self): # Issue #3993 a = np.array([('abc ', 1), ('abc', 2)], dtype=[('foo', 'S4'), ('bar', int)]) a = a.view(np.recarray) assert_equal(a.foo[0] == a.foo[1], False) def test_recarray_returntypes(self): qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)} a = np.rec.array([('abc ', (1, 1), 1, ('abcde', 'fgehi')), ('abc', (2, 3), 1, ('abcde', 'jklmn'))], dtype=[('foo', 'S4'), ('bar', [('A', int), ('B', int)]), ('baz', int), ('qux', qux_fields)]) assert_equal(type(a.foo), np.ndarray) assert_equal(type(a['foo']), np.ndarray) assert_equal(type(a.bar), np.recarray) assert_equal(type(a['bar']), np.recarray) assert_equal(a.bar.dtype.type, np.record) assert_equal(type(a['qux']), np.recarray) assert_equal(a.qux.dtype.type, np.record) assert_equal(dict(a.qux.dtype.fields), qux_fields) assert_equal(type(a.baz), np.ndarray) assert_equal(type(a['baz']), np.ndarray) assert_equal(type(a[0].bar), np.record) assert_equal(type(a[0]['bar']), np.record) assert_equal(a[0].bar.A, 1) assert_equal(a[0].bar['A'], 1) assert_equal(a[0]['bar'].A, 1) assert_equal(a[0]['bar']['A'], 1) assert_equal(a[0].qux.D, b'fgehi') assert_equal(a[0].qux['D'], b'fgehi') assert_equal(a[0]['qux'].D, b'fgehi') assert_equal(a[0]['qux']['D'], b'fgehi') def test_zero_width_strings(self): # Test for #6430, based on the test case from #1901 cols = [['test'] * 3, [''] * 3] rec = np.rec.fromarrays(cols) assert_equal(rec['f0'], ['test', 'test', 'test']) assert_equal(rec['f1'], ['', '', '']) dt = np.dtype([('f0', '|S4'), ('f1', '|S')]) rec = np.rec.fromarrays(cols, dtype=dt) assert_equal(rec.itemsize, 4) assert_equal(rec['f0'], [b'test', b'test', b'test']) assert_equal(rec['f1'], [b'', b'', b''])
TestFromrecords
python
Textualize__textual
src/textual/widgets/_markdown.py
{ "start": 22497, "end": 22573 }
class ____(MarkdownBlock): """A table header Markdown block."""
MarkdownTH
python
google__jax
tests/tree_util_test.py
{ "start": 4606, "end": 4668 }
class ____(tuple): pass @tree_util.register_static
StaticTuple
python
EpistasisLab__tpot
tpot/builtin_modules/nn.py
{ "start": 2859, "end": 3542 }
class ____(BaseEstimator): """Base class for Pytorch-based estimators (currently only classifiers) for use in TPOT. In the future, these will be merged into TPOT's main code base. """ @abstractmethod def fit(self, X, y): # pragma: no cover pass @abstractmethod def transform(self, X): # pragma: no cover pass def predict(self, X): return self.transform(X) def fit_transform(self, X, y): self.fit(X, y) return self.transform(X) def set_params(self, **parameters): for parameter, value in parameters.items(): setattr(self, parameter, value) return self
PytorchEstimator
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/distributions/util_test.py
{ "start": 18991, "end": 19681 }
class ____(test.TestCase): @test_util.run_deprecated_v1 def testCorrectlyPicksVector(self): with self.cached_session(): x = np.arange(10, 12) y = np.arange(15, 18) self.assertAllEqual( x, self.evaluate(du.pick_vector(math_ops.less(0, 5), x, y))) self.assertAllEqual( y, self.evaluate(du.pick_vector(math_ops.less(5, 0), x, y))) self.assertAllEqual(x, du.pick_vector( constant_op.constant(True), x, y)) # No eval. self.assertAllEqual(y, du.pick_vector( constant_op.constant(False), x, y)) # No eval.
PickVectorTest
python
run-llama__llama_index
llama-index-integrations/retrievers/llama-index-retrievers-tldw/llama_index/retrievers/tldw/base.py
{ "start": 405, "end": 594 }
class ____(BaseModel): """Represents a fragment of a video scene with metadata.""" uuid: str start_ms: float end_ms: float similarity: float description: str
Fragment
python
matplotlib__matplotlib
lib/matplotlib/collections.py
{ "start": 65099, "end": 65285 }
class ____(RegularPolyCollection): """Draw a collection of regular asterisks with *numsides* points.""" _path_generator = mpath.Path.unit_regular_asterisk
AsteriskPolygonCollection
python
pypa__setuptools
setuptools/command/build_ext.py
{ "start": 2879, "end": 18505 }
class ____(_build_ext): distribution: Distribution # override distutils.dist.Distribution with setuptools.dist.Distribution editable_mode = False inplace = False def run(self) -> None: """Build extensions in build directory, then copy if --inplace""" old_inplace, self.inplace = self.inplace, False _build_ext.run(self) self.inplace = old_inplace if old_inplace: self.copy_extensions_to_source() def _get_inplace_equivalent(self, build_py, ext: Extension) -> tuple[str, str]: fullname = self.get_ext_fullname(ext.name) filename = self.get_ext_filename(fullname) modpath = fullname.split('.') package = '.'.join(modpath[:-1]) package_dir = build_py.get_package_dir(package) inplace_file = os.path.join(package_dir, os.path.basename(filename)) regular_file = os.path.join(self.build_lib, filename) return (inplace_file, regular_file) def copy_extensions_to_source(self) -> None: build_py = self.get_finalized_command('build_py') for ext in self.extensions: inplace_file, regular_file = self._get_inplace_equivalent(build_py, ext) # Always copy, even if source is older than destination, to ensure # that the right extensions for the current Python/platform are # used. if os.path.exists(regular_file) or not ext.optional: self.copy_file(regular_file, inplace_file, level=self.verbose) if ext._needs_stub: inplace_stub = self._get_equivalent_stub(ext, inplace_file) self._write_stub_file(inplace_stub, ext, compile=True) # Always compile stub and remove the original (leave the cache behind) # (this behaviour was observed in previous iterations of the code) def _get_equivalent_stub(self, ext: Extension, output_file: str) -> str: dir_ = os.path.dirname(output_file) _, _, name = ext.name.rpartition(".") return f"{os.path.join(dir_, name)}.py" def _get_output_mapping(self) -> Iterator[tuple[str, str]]: if not self.inplace: return build_py = self.get_finalized_command('build_py') opt = self.get_finalized_command('install_lib').optimize or "" for ext in self.extensions: inplace_file, regular_file = self._get_inplace_equivalent(build_py, ext) yield (regular_file, inplace_file) if ext._needs_stub: # This version of `build_ext` always builds artifacts in another dir, # when "inplace=True" is given it just copies them back. # This is done in the `copy_extensions_to_source` function, which # always compile stub files via `_compile_and_remove_stub`. # At the end of the process, a `.pyc` stub file is created without the # corresponding `.py`. inplace_stub = self._get_equivalent_stub(ext, inplace_file) regular_stub = self._get_equivalent_stub(ext, regular_file) inplace_cache = _compiled_file_name(inplace_stub, optimization=opt) output_cache = _compiled_file_name(regular_stub, optimization=opt) yield (output_cache, inplace_cache) def get_ext_filename(self, fullname: str) -> str: so_ext = os.getenv('SETUPTOOLS_EXT_SUFFIX') if so_ext: filename = os.path.join(*fullname.split('.')) + so_ext else: filename = _build_ext.get_ext_filename(self, fullname) ext_suffix = get_config_var('EXT_SUFFIX') if not isinstance(ext_suffix, str): raise OSError( "Configuration variable EXT_SUFFIX not found for this platform " "and environment variable SETUPTOOLS_EXT_SUFFIX is missing" ) so_ext = ext_suffix if fullname in self.ext_map: ext = self.ext_map[fullname] abi3_suffix = get_abi3_suffix() if ext.py_limited_api and abi3_suffix: # Use abi3 filename = filename[: -len(so_ext)] + abi3_suffix if isinstance(ext, Library): fn, ext = os.path.splitext(filename) return self.shlib_compiler.library_filename(fn, libtype) elif use_stubs and ext._links_to_dynamic: d, fn = os.path.split(filename) return os.path.join(d, 'dl-' + fn) return filename def initialize_options(self): _build_ext.initialize_options(self) self.shlib_compiler = None self.shlibs = [] self.ext_map = {} self.editable_mode = False def finalize_options(self) -> None: _build_ext.finalize_options(self) self.extensions = self.extensions or [] self.check_extensions_list(self.extensions) self.shlibs = [ext for ext in self.extensions if isinstance(ext, Library)] if self.shlibs: self.setup_shlib_compiler() for ext in self.extensions: ext._full_name = self.get_ext_fullname(ext.name) for ext in self.extensions: fullname = ext._full_name self.ext_map[fullname] = ext # distutils 3.1 will also ask for module names # XXX what to do with conflicts? self.ext_map[fullname.split('.')[-1]] = ext ltd = self.shlibs and self.links_to_dynamic(ext) or False ns = ltd and use_stubs and not isinstance(ext, Library) ext._links_to_dynamic = ltd ext._needs_stub = ns filename = ext._file_name = self.get_ext_filename(fullname) libdir = os.path.dirname(os.path.join(self.build_lib, filename)) if ltd and libdir not in ext.library_dirs: ext.library_dirs.append(libdir) if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs: ext.runtime_library_dirs.append(os.curdir) if self.editable_mode: self.inplace = True def setup_shlib_compiler(self) -> None: compiler = self.shlib_compiler = new_compiler( compiler=self.compiler, dry_run=self.dry_run, force=self.force ) _customize_compiler_for_shlib(compiler) if self.include_dirs is not None: compiler.set_include_dirs(self.include_dirs) if self.define is not None: # 'define' option is a list of (name,value) tuples for name, value in self.define: compiler.define_macro(name, value) if self.undef is not None: for macro in self.undef: compiler.undefine_macro(macro) if self.libraries is not None: compiler.set_libraries(self.libraries) if self.library_dirs is not None: compiler.set_library_dirs(self.library_dirs) if self.rpath is not None: compiler.set_runtime_library_dirs(self.rpath) if self.link_objects is not None: compiler.set_link_objects(self.link_objects) # hack so distutils' build_extension() builds a library instead compiler.link_shared_object = link_shared_object.__get__(compiler) # type: ignore[method-assign] def get_export_symbols(self, ext): if isinstance(ext, Library): return ext.export_symbols return _build_ext.get_export_symbols(self, ext) def build_extension(self, ext) -> None: ext._convert_pyx_sources_to_lang() _compiler = self.compiler try: if isinstance(ext, Library): self.compiler = self.shlib_compiler _build_ext.build_extension(self, ext) if ext._needs_stub: build_lib = self.get_finalized_command('build_py').build_lib self.write_stub(build_lib, ext) finally: self.compiler = _compiler def links_to_dynamic(self, ext): """Return true if 'ext' links to a dynamic lib in the same package""" # XXX this should check to ensure the lib is actually being built # XXX as dynamic, and not just using a locally-found version or a # XXX static-compiled version libnames = dict.fromkeys([lib._full_name for lib in self.shlibs]) pkg = '.'.join(ext._full_name.split('.')[:-1] + ['']) return any(pkg + libname in libnames for libname in ext.libraries) def get_source_files(self) -> list[str]: return [*_build_ext.get_source_files(self), *self._get_internal_depends()] def _get_internal_depends(self) -> Iterator[str]: """Yield ``ext.depends`` that are contained by the project directory""" project_root = Path(self.distribution.src_root or os.curdir).resolve() depends = (dep for ext in self.extensions for dep in ext.depends) def skip(orig_path: str, reason: str) -> None: log.info( "dependency %s won't be automatically " "included in the manifest: the path %s", orig_path, reason, ) for dep in depends: path = Path(dep) if path.is_absolute(): skip(dep, "must be relative") continue if ".." in path.parts: skip(dep, "can't have `..` segments") continue try: resolved = (project_root / path).resolve(strict=True) except OSError: skip(dep, "doesn't exist") continue try: resolved.relative_to(project_root) except ValueError: skip(dep, "must be inside the project root") continue yield path.as_posix() def get_outputs(self) -> list[str]: if self.inplace: return list(self.get_output_mapping().keys()) return sorted(_build_ext.get_outputs(self) + self.__get_stubs_outputs()) def get_output_mapping(self) -> dict[str, str]: """See :class:`setuptools.commands.build.SubCommand`""" mapping = self._get_output_mapping() return dict(sorted(mapping, key=operator.itemgetter(0))) def __get_stubs_outputs(self): # assemble the base name for each extension that needs a stub ns_ext_bases = ( os.path.join(self.build_lib, *ext._full_name.split('.')) for ext in self.extensions if ext._needs_stub ) # pair each base with the extension pairs = itertools.product(ns_ext_bases, self.__get_output_extensions()) return list(base + fnext for base, fnext in pairs) def __get_output_extensions(self): yield '.py' yield '.pyc' if self.get_finalized_command('build_py').optimize: yield '.pyo' def write_stub(self, output_dir, ext, compile=False) -> None: stub_file = os.path.join(output_dir, *ext._full_name.split('.')) + '.py' self._write_stub_file(stub_file, ext, compile) def _write_stub_file(self, stub_file: str, ext: Extension, compile=False): log.info("writing stub loader for %s to %s", ext._full_name, stub_file) if compile and os.path.exists(stub_file): raise BaseError(stub_file + " already exists! Please delete.") if not self.dry_run: with open(stub_file, 'w', encoding="utf-8") as f: content = ( textwrap.dedent(f""" def __bootstrap__(): global __bootstrap__, __file__, __loader__ import sys, os, importlib.resources as irs, importlib.util #rtld import dl with irs.files(__name__).joinpath( {os.path.basename(ext._file_name)!r}) as __file__: del __bootstrap__ if '__loader__' in globals(): del __loader__ #rtld old_flags = sys.getdlopenflags() old_dir = os.getcwd() try: os.chdir(os.path.dirname(__file__)) #rtld sys.setdlopenflags(dl.RTLD_NOW) spec = importlib.util.spec_from_file_location( __name__, __file__) mod = importlib.util.module_from_spec(spec) spec.loader.exec_module(mod) finally: #rtld sys.setdlopenflags(old_flags) os.chdir(old_dir) __bootstrap__() """) .lstrip() .replace('#rtld', '#rtld' * (not have_rtld)) ) f.write(content) if compile: self._compile_and_remove_stub(stub_file) def _compile_and_remove_stub(self, stub_file: str): from distutils.util import byte_compile byte_compile([stub_file], optimize=0, force=True, dry_run=self.dry_run) optimize = self.get_finalized_command('install_lib').optimize if optimize > 0: byte_compile( [stub_file], optimize=optimize, force=True, dry_run=self.dry_run, ) if os.path.exists(stub_file) and not self.dry_run: os.unlink(stub_file) if use_stubs or os.name == 'nt': # Build shared libraries # def link_shared_object( self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug: bool = False, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None, ) -> None: self.link( self.SHARED_LIBRARY, objects, output_libname, output_dir, libraries, library_dirs, runtime_library_dirs, export_symbols, debug, extra_preargs, extra_postargs, build_temp, target_lang, ) else: # Build static libraries everywhere else libtype = 'static' def link_shared_object( self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug: bool = False, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None, ) -> None: # XXX we need to either disallow these attrs on Library instances, # or warn/abort here if set, or something... # libraries=None, library_dirs=None, runtime_library_dirs=None, # export_symbols=None, extra_preargs=None, extra_postargs=None, # build_temp=None assert output_dir is None # distutils build_ext doesn't pass this output_dir, filename = os.path.split(output_libname) basename, _ext = os.path.splitext(filename) if self.library_filename("x").startswith('lib'): # strip 'lib' prefix; this is kludgy if some platform uses # a different prefix basename = basename[3:] self.create_static_lib(objects, basename, output_dir, debug, target_lang)
build_ext
python
joke2k__faker
faker/providers/automotive/ar_BH/__init__.py
{ "start": 48, "end": 275 }
class ____(AutomotiveProvider): """Implement automotive provider for ``ar_BH`` locale. Source: - https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_Bahrain """ license_formats = ("######",)
Provider
python
gevent__gevent
src/greentest/3.10/test_signal.py
{ "start": 28080, "end": 40369 }
class ____(unittest.TestCase): """ Test pthread_sigmask(), pthread_kill(), sigpending() and sigwait() functions. """ @unittest.skipUnless(hasattr(signal, 'sigpending'), 'need signal.sigpending()') def test_sigpending_empty(self): self.assertEqual(signal.sigpending(), set()) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') @unittest.skipUnless(hasattr(signal, 'sigpending'), 'need signal.sigpending()') def test_sigpending(self): code = """if 1: import os import signal def handler(signum, frame): 1/0 signum = signal.SIGUSR1 signal.signal(signum, handler) signal.pthread_sigmask(signal.SIG_BLOCK, [signum]) os.kill(os.getpid(), signum) pending = signal.sigpending() for sig in pending: assert isinstance(sig, signal.Signals), repr(pending) if pending != {signum}: raise Exception('%s != {%s}' % (pending, signum)) try: signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum]) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") """ assert_python_ok('-c', code) @unittest.skipUnless(hasattr(signal, 'pthread_kill'), 'need signal.pthread_kill()') def test_pthread_kill(self): code = """if 1: import signal import threading import sys signum = signal.SIGUSR1 def handler(signum, frame): 1/0 signal.signal(signum, handler) tid = threading.get_ident() try: signal.pthread_kill(tid, signum) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") """ assert_python_ok('-c', code) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def wait_helper(self, blocked, test): """ test: body of the "def test(signum):" function. blocked: number of the blocked signal """ code = '''if 1: import signal import sys from signal import Signals def handler(signum, frame): 1/0 %s blocked = %s signum = signal.SIGALRM # child: block and wait the signal try: signal.signal(signum, handler) signal.pthread_sigmask(signal.SIG_BLOCK, [blocked]) # Do the tests test(signum) # The handler must not be called on unblock try: signal.pthread_sigmask(signal.SIG_UNBLOCK, [blocked]) except ZeroDivisionError: print("the signal handler has been called", file=sys.stderr) sys.exit(1) except BaseException as err: print("error: {}".format(err), file=sys.stderr) sys.stderr.flush() sys.exit(1) ''' % (test.strip(), blocked) # sig*wait* must be called with the signal blocked: since the current # process might have several threads running, use a subprocess to have # a single thread. assert_python_ok('-c', code) @unittest.skipUnless(hasattr(signal, 'sigwait'), 'need signal.sigwait()') def test_sigwait(self): self.wait_helper(signal.SIGALRM, ''' def test(signum): signal.alarm(1) received = signal.sigwait([signum]) assert isinstance(received, signal.Signals), received if received != signum: raise Exception('received %s, not %s' % (received, signum)) ''') @unittest.skipUnless(hasattr(signal, 'sigwaitinfo'), 'need signal.sigwaitinfo()') def test_sigwaitinfo(self): self.wait_helper(signal.SIGALRM, ''' def test(signum): signal.alarm(1) info = signal.sigwaitinfo([signum]) if info.si_signo != signum: raise Exception("info.si_signo != %s" % signum) ''') @unittest.skipUnless(hasattr(signal, 'sigtimedwait'), 'need signal.sigtimedwait()') def test_sigtimedwait(self): self.wait_helper(signal.SIGALRM, ''' def test(signum): signal.alarm(1) info = signal.sigtimedwait([signum], 10.1000) if info.si_signo != signum: raise Exception('info.si_signo != %s' % signum) ''') @unittest.skipUnless(hasattr(signal, 'sigtimedwait'), 'need signal.sigtimedwait()') def test_sigtimedwait_poll(self): # check that polling with sigtimedwait works self.wait_helper(signal.SIGALRM, ''' def test(signum): import os os.kill(os.getpid(), signum) info = signal.sigtimedwait([signum], 0) if info.si_signo != signum: raise Exception('info.si_signo != %s' % signum) ''') @unittest.skipUnless(hasattr(signal, 'sigtimedwait'), 'need signal.sigtimedwait()') def test_sigtimedwait_timeout(self): self.wait_helper(signal.SIGALRM, ''' def test(signum): received = signal.sigtimedwait([signum], 1.0) if received is not None: raise Exception("received=%r" % (received,)) ''') @unittest.skipUnless(hasattr(signal, 'sigtimedwait'), 'need signal.sigtimedwait()') def test_sigtimedwait_negative_timeout(self): signum = signal.SIGALRM self.assertRaises(ValueError, signal.sigtimedwait, [signum], -1.0) @unittest.skipUnless(hasattr(signal, 'sigwait'), 'need signal.sigwait()') @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_sigwait_thread(self): # Check that calling sigwait() from a thread doesn't suspend the whole # process. A new interpreter is spawned to avoid problems when mixing # threads and fork(): only async-safe functions are allowed between # fork() and exec(). assert_python_ok("-c", """if True: import os, threading, sys, time, signal # the default handler terminates the process signum = signal.SIGUSR1 def kill_later(): # wait until the main thread is waiting in sigwait() time.sleep(1) os.kill(os.getpid(), signum) # the signal must be blocked by all the threads signal.pthread_sigmask(signal.SIG_BLOCK, [signum]) killer = threading.Thread(target=kill_later) killer.start() received = signal.sigwait([signum]) if received != signum: print("sigwait() received %s, not %s" % (received, signum), file=sys.stderr) sys.exit(1) killer.join() # unblock the signal, which should have been cleared by sigwait() signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum]) """) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_pthread_sigmask_arguments(self): self.assertRaises(TypeError, signal.pthread_sigmask) self.assertRaises(TypeError, signal.pthread_sigmask, 1) self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3) self.assertRaises(OSError, signal.pthread_sigmask, 1700, []) with self.assertRaises(ValueError): signal.pthread_sigmask(signal.SIG_BLOCK, [signal.NSIG]) with self.assertRaises(ValueError): signal.pthread_sigmask(signal.SIG_BLOCK, [0]) with self.assertRaises(ValueError): signal.pthread_sigmask(signal.SIG_BLOCK, [1<<1000]) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_pthread_sigmask_valid_signals(self): s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) self.addCleanup(signal.pthread_sigmask, signal.SIG_SETMASK, s) # Get current blocked set s = signal.pthread_sigmask(signal.SIG_UNBLOCK, signal.valid_signals()) self.assertLessEqual(s, signal.valid_signals()) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_pthread_sigmask(self): code = """if 1: import signal import os; import threading def handler(signum, frame): 1/0 def kill(signum): os.kill(os.getpid(), signum) def check_mask(mask): for sig in mask: assert isinstance(sig, signal.Signals), repr(sig) def read_sigmask(): sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, []) check_mask(sigmask) return sigmask signum = signal.SIGUSR1 # Install our signal handler old_handler = signal.signal(signum, handler) # Unblock SIGUSR1 (and copy the old mask) to test our signal handler old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum]) check_mask(old_mask) try: kill(signum) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") # Block and then raise SIGUSR1. The signal is blocked: the signal # handler is not called, and the signal is now pending mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signum]) check_mask(mask) kill(signum) # Check the new mask blocked = read_sigmask() check_mask(blocked) if signum not in blocked: raise Exception("%s not in %s" % (signum, blocked)) if old_mask ^ blocked != {signum}: raise Exception("%s ^ %s != {%s}" % (old_mask, blocked, signum)) # Unblock SIGUSR1 try: # unblock the pending signal calls immediately the signal handler signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum]) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") try: kill(signum) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") # Check the new mask unblocked = read_sigmask() if signum in unblocked: raise Exception("%s in %s" % (signum, unblocked)) if blocked ^ unblocked != {signum}: raise Exception("%s ^ %s != {%s}" % (blocked, unblocked, signum)) if old_mask != unblocked: raise Exception("%s != %s" % (old_mask, unblocked)) """ assert_python_ok('-c', code) @unittest.skipUnless(hasattr(signal, 'pthread_kill'), 'need signal.pthread_kill()') def test_pthread_kill_main_thread(self): # Test that a signal can be sent to the main thread with pthread_kill() # before any other thread has been created (see issue #12392). code = """if True: import threading import signal import sys def handler(signum, frame): sys.exit(3) signal.signal(signal.SIGUSR1, handler) signal.pthread_kill(threading.get_ident(), signal.SIGUSR1) sys.exit(2) """ with spawn_python('-c', code) as process: stdout, stderr = process.communicate() exitcode = process.wait() if exitcode != 3: raise Exception("Child error (exit code %s): %s" % (exitcode, stdout))
PendingSignalsTests
python
getsentry__sentry
src/sentry_plugins/bitbucket/endpoints/webhook.py
{ "start": 1105, "end": 3317 }
class ____(Webhook): # https://confluence.atlassian.com/bitbucket/event-payloads-740262817.html#EventPayloads-Push def __call__(self, organization_id: int, event): authors = {} try: repo = Repository.objects.get( organization_id=organization_id, provider="bitbucket", external_id=str(event["repository"]["uuid"]), ) except Repository.DoesNotExist: raise Http404() if repo.config.get("name") != event["repository"]["full_name"]: repo.config["name"] = event["repository"]["full_name"] repo.save() for change in event["push"]["changes"]: for commit in change.get("commits", []): if RepositoryProvider.should_ignore_commit(commit["message"]): continue author_email = parse_email(commit["author"]["raw"]) # TODO(dcramer): we need to deal with bad values here, but since # its optional, lets just throw it out for now if author_email is None or len(author_email) > 75: author = None elif author_email not in authors: authors[author_email] = author = CommitAuthor.objects.get_or_create( organization_id=organization_id, email=author_email, defaults={"name": commit["author"]["raw"].split("<")[0].strip()}, )[0] else: author = authors[author_email] try: with transaction.atomic(router.db_for_write(Commit)): Commit.objects.create( repository_id=repo.id, organization_id=organization_id, key=commit["hash"], message=commit["message"], author=author, date_added=parse_date(commit["date"]).astimezone(timezone.utc), ) except IntegrityError: pass @region_silo_view
PushEventWebhook
python
streamlit__streamlit
lib/tests/streamlit/runtime/caching/cache_data_api_test.py
{ "start": 20069, "end": 20979 }
class ____(DeltaGeneratorTestCase): """st.cache_data disk persistence tests""" def setUp(self) -> None: super().setUp() mock_runtime = MagicMock(spec=Runtime) mock_runtime.cache_storage_manager = AlwaysFailingTestCacheStorageManager() Runtime._instance = mock_runtime def test_error_logged_and_raised_on_improperly_configured_cache_data(self): with ( pytest.raises(InvalidCacheStorageContextError) as e, self.assertLogs( "streamlit.runtime.caching.cache_data_api", level=logging.ERROR ) as logs, ): @st.cache_data(persist="disk") def foo(): return "data" assert str(e.value) == "This CacheStorageManager always fails" output = "".join(logs.output) assert "This CacheStorageManager always fails" in output
CacheDataValidateParamsTest
python
Pylons__pyramid
src/pyramid/exceptions.py
{ "start": 2361, "end": 2802 }
class ____(UnicodeDecodeError): """ This exception is raised when :app:`Pyramid` cannot successfully decode a URL or a URL path segment. This exception behaves just like the Python builtin :exc:`UnicodeDecodeError`. It is a subclass of the builtin :exc:`UnicodeDecodeError` exception only for identity purposes, mostly so an exception view can be registered when a URL cannot be decoded. """
URLDecodeError
python
sympy__sympy
sympy/printing/theanocode.py
{ "start": 2621, "end": 19094 }
class ____(Printer): """ Code printer which creates Theano symbolic expression graphs. Parameters ========== cache : dict Cache dictionary to use. If None (default) will use the global cache. To create a printer which does not depend on or alter global state pass an empty dictionary. Note: the dictionary is not copied on initialization of the printer and will be updated in-place, so using the same dict object when creating multiple printers or making multiple calls to :func:`.theano_code` or :func:`.theano_function` means the cache is shared between all these applications. Attributes ========== cache : dict A cache of Theano variables which have been created for SymPy symbol-like objects (e.g. :class:`sympy.core.symbol.Symbol` or :class:`sympy.matrices.expressions.MatrixSymbol`). This is used to ensure that all references to a given symbol in an expression (or multiple expressions) are printed as the same Theano variable, which is created only once. Symbols are differentiated only by name and type. The format of the cache's contents should be considered opaque to the user. """ printmethod = "_theano" def __init__(self, *args, **kwargs): self.cache = kwargs.pop('cache', {}) super().__init__(*args, **kwargs) def _get_key(self, s, name=None, dtype=None, broadcastable=None): """ Get the cache key for a SymPy object. Parameters ========== s : sympy.core.basic.Basic SymPy object to get key for. name : str Name of object, if it does not have a ``name`` attribute. """ if name is None: name = s.name return (name, type(s), s.args, dtype, broadcastable) def _get_or_create(self, s, name=None, dtype=None, broadcastable=None): """ Get the Theano variable for a SymPy symbol from the cache, or create it if it does not exist. """ # Defaults if name is None: name = s.name if dtype is None: dtype = 'floatX' if broadcastable is None: broadcastable = () key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable) if key in self.cache: return self.cache[key] value = tt.tensor(name=name, dtype=dtype, broadcastable=broadcastable) self.cache[key] = value return value def _print_Symbol(self, s, **kwargs): dtype = kwargs.get('dtypes', {}).get(s) bc = kwargs.get('broadcastables', {}).get(s) return self._get_or_create(s, dtype=dtype, broadcastable=bc) def _print_AppliedUndef(self, s, **kwargs): name = str(type(s)) + '_' + str(s.args[0]) dtype = kwargs.get('dtypes', {}).get(s) bc = kwargs.get('broadcastables', {}).get(s) return self._get_or_create(s, name=name, dtype=dtype, broadcastable=bc) def _print_Basic(self, expr, **kwargs): op = mapping[type(expr)] children = [self._print(arg, **kwargs) for arg in expr.args] return op(*children) def _print_Number(self, n, **kwargs): # Integers already taken care of below, interpret as float return float(n.evalf()) def _print_MatrixSymbol(self, X, **kwargs): dtype = kwargs.get('dtypes', {}).get(X) return self._get_or_create(X, dtype=dtype, broadcastable=(None, None)) def _print_DenseMatrix(self, X, **kwargs): if not hasattr(tt, 'stacklists'): raise NotImplementedError( "Matrix translation not yet supported in this version of Theano") return tt.stacklists([ [self._print(arg, **kwargs) for arg in L] for L in X.tolist() ]) _print_ImmutableMatrix = _print_ImmutableDenseMatrix = _print_DenseMatrix def _print_MatMul(self, expr, **kwargs): children = [self._print(arg, **kwargs) for arg in expr.args] result = children[0] for child in children[1:]: result = tt.dot(result, child) return result def _print_MatPow(self, expr, **kwargs): children = [self._print(arg, **kwargs) for arg in expr.args] result = 1 if isinstance(children[1], int) and children[1] > 0: for i in range(children[1]): result = tt.dot(result, children[0]) else: raise NotImplementedError('''Only non-negative integer powers of matrices can be handled by Theano at the moment''') return result def _print_MatrixSlice(self, expr, **kwargs): parent = self._print(expr.parent, **kwargs) rowslice = self._print(slice(*expr.rowslice), **kwargs) colslice = self._print(slice(*expr.colslice), **kwargs) return parent[rowslice, colslice] def _print_BlockMatrix(self, expr, **kwargs): nrows, ncols = expr.blocks.shape blocks = [[self._print(expr.blocks[r, c], **kwargs) for c in range(ncols)] for r in range(nrows)] return tt.join(0, *[tt.join(1, *row) for row in blocks]) def _print_slice(self, expr, **kwargs): return slice(*[self._print(i, **kwargs) if isinstance(i, sympy.Basic) else i for i in (expr.start, expr.stop, expr.step)]) def _print_Pi(self, expr, **kwargs): return math.pi def _print_Exp1(self, expr, **kwargs): return ts.exp(1) def _print_Piecewise(self, expr, **kwargs): import numpy as np e, cond = expr.args[0].args # First condition and corresponding value # Print conditional expression and value for first condition p_cond = self._print(cond, **kwargs) p_e = self._print(e, **kwargs) # One condition only if len(expr.args) == 1: # Return value if condition else NaN return tt.switch(p_cond, p_e, np.nan) # Return value_1 if condition_1 else evaluate remaining conditions p_remaining = self._print(sympy.Piecewise(*expr.args[1:]), **kwargs) return tt.switch(p_cond, p_e, p_remaining) def _print_Rational(self, expr, **kwargs): return tt.true_div(self._print(expr.p, **kwargs), self._print(expr.q, **kwargs)) def _print_Integer(self, expr, **kwargs): return expr.p def _print_factorial(self, expr, **kwargs): return self._print(sympy.gamma(expr.args[0] + 1), **kwargs) def _print_Derivative(self, deriv, **kwargs): rv = self._print(deriv.expr, **kwargs) for var in deriv.variables: var = self._print(var, **kwargs) rv = tt.Rop(rv, var, tt.ones_like(var)) return rv def emptyPrinter(self, expr): return expr def doprint(self, expr, dtypes=None, broadcastables=None): """ Convert a SymPy expression to a Theano graph variable. The ``dtypes`` and ``broadcastables`` arguments are used to specify the data type, dimension, and broadcasting behavior of the Theano variables corresponding to the free symbols in ``expr``. Each is a mapping from SymPy symbols to the value of the corresponding argument to ``theano.tensor.Tensor``. See the corresponding `documentation page`__ for more information on broadcasting in Theano. .. __: http://deeplearning.net/software/theano/tutorial/broadcasting.html Parameters ========== expr : sympy.core.expr.Expr SymPy expression to print. dtypes : dict Mapping from SymPy symbols to Theano datatypes to use when creating new Theano variables for those symbols. Corresponds to the ``dtype`` argument to ``theano.tensor.Tensor``. Defaults to ``'floatX'`` for symbols not included in the mapping. broadcastables : dict Mapping from SymPy symbols to the value of the ``broadcastable`` argument to ``theano.tensor.Tensor`` to use when creating Theano variables for those symbols. Defaults to the empty tuple for symbols not included in the mapping (resulting in a scalar). Returns ======= theano.gof.graph.Variable A variable corresponding to the expression's value in a Theano symbolic expression graph. """ if dtypes is None: dtypes = {} if broadcastables is None: broadcastables = {} return self._print(expr, dtypes=dtypes, broadcastables=broadcastables) global_cache: dict[Any, Any] = {} def theano_code(expr, cache=None, **kwargs): """ Convert a SymPy expression into a Theano graph variable. .. deprecated:: 1.8 ``sympy.printing.theanocode`` is deprecated. Theano has been renamed to Aesara. Use ``sympy.printing.aesaracode`` instead. See :ref:`theanocode-deprecated` for more information. Parameters ========== expr : sympy.core.expr.Expr SymPy expression object to convert. cache : dict Cached Theano variables (see :class:`TheanoPrinter.cache <TheanoPrinter>`). Defaults to the module-level global cache. dtypes : dict Passed to :meth:`.TheanoPrinter.doprint`. broadcastables : dict Passed to :meth:`.TheanoPrinter.doprint`. Returns ======= theano.gof.graph.Variable A variable corresponding to the expression's value in a Theano symbolic expression graph. """ sympy_deprecation_warning( """ sympy.printing.theanocode is deprecated. Theano has been renamed to Aesara. Use sympy.printing.aesaracode instead.""", deprecated_since_version="1.8", active_deprecations_target='theanocode-deprecated') if not theano: raise ImportError("theano is required for theano_code") if cache is None: cache = global_cache return TheanoPrinter(cache=cache, settings={}).doprint(expr, **kwargs) def dim_handling(inputs, dim=None, dims=None, broadcastables=None): r""" Get value of ``broadcastables`` argument to :func:`.theano_code` from keyword arguments to :func:`.theano_function`. Included for backwards compatibility. Parameters ========== inputs Sequence of input symbols. dim : int Common number of dimensions for all inputs. Overrides other arguments if given. dims : dict Mapping from input symbols to number of dimensions. Overrides ``broadcastables`` argument if given. broadcastables : dict Explicit value of ``broadcastables`` argument to :meth:`.TheanoPrinter.doprint`. If not None function will return this value unchanged. Returns ======= dict Dictionary mapping elements of ``inputs`` to their "broadcastable" values (tuple of ``bool``\ s). """ if dim is not None: return dict.fromkeys(inputs, (False,) * dim) if dims is not None: maxdim = max(dims.values()) return { s: (False,) * d + (True,) * (maxdim - d) for s, d in dims.items() } if broadcastables is not None: return broadcastables return {} @doctest_depends_on(modules=('theano',)) def theano_function(inputs, outputs, scalar=False, *, dim=None, dims=None, broadcastables=None, **kwargs): """ Create a Theano function from SymPy expressions. .. deprecated:: 1.8 ``sympy.printing.theanocode`` is deprecated. Theano has been renamed to Aesara. Use ``sympy.printing.aesaracode`` instead. See :ref:`theanocode-deprecated` for more information. The inputs and outputs are converted to Theano variables using :func:`.theano_code` and then passed to ``theano.function``. Parameters ========== inputs Sequence of symbols which constitute the inputs of the function. outputs Sequence of expressions which constitute the outputs(s) of the function. The free symbols of each expression must be a subset of ``inputs``. scalar : bool Convert 0-dimensional arrays in output to scalars. This will return a Python wrapper function around the Theano function object. cache : dict Cached Theano variables (see :class:`TheanoPrinter.cache <TheanoPrinter>`). Defaults to the module-level global cache. dtypes : dict Passed to :meth:`.TheanoPrinter.doprint`. broadcastables : dict Passed to :meth:`.TheanoPrinter.doprint`. dims : dict Alternative to ``broadcastables`` argument. Mapping from elements of ``inputs`` to integers indicating the dimension of their associated arrays/tensors. Overrides ``broadcastables`` argument if given. dim : int Another alternative to the ``broadcastables`` argument. Common number of dimensions to use for all arrays/tensors. ``theano_function([x, y], [...], dim=2)`` is equivalent to using ``broadcastables={x: (False, False), y: (False, False)}``. Returns ======= callable A callable object which takes values of ``inputs`` as positional arguments and returns an output array for each of the expressions in ``outputs``. If ``outputs`` is a single expression the function will return a Numpy array, if it is a list of multiple expressions the function will return a list of arrays. See description of the ``squeeze`` argument above for the behavior when a single output is passed in a list. The returned object will either be an instance of ``theano.compile.function_module.Function`` or a Python wrapper function around one. In both cases, the returned value will have a ``theano_function`` attribute which points to the return value of ``theano.function``. Examples ======== >>> from sympy.abc import x, y, z >>> from sympy.printing.theanocode import theano_function A simple function with one input and one output: >>> f1 = theano_function([x], [x**2 - 1], scalar=True) >>> f1(3) 8.0 A function with multiple inputs and one output: >>> f2 = theano_function([x, y, z], [(x**z + y**z)**(1/z)], scalar=True) >>> f2(3, 4, 2) 5.0 A function with multiple inputs and multiple outputs: >>> f3 = theano_function([x, y], [x**2 + y**2, x**2 - y**2], scalar=True) >>> f3(2, 3) [13.0, -5.0] See also ======== dim_handling """ sympy_deprecation_warning( """ sympy.printing.theanocode is deprecated. Theano has been renamed to Aesara. Use sympy.printing.aesaracode instead""", deprecated_since_version="1.8", active_deprecations_target='theanocode-deprecated') if not theano: raise ImportError("theano is required for theano_function") # Pop off non-theano keyword args cache = kwargs.pop('cache', {}) dtypes = kwargs.pop('dtypes', {}) broadcastables = dim_handling( inputs, dim=dim, dims=dims, broadcastables=broadcastables, ) # Print inputs/outputs code = partial(theano_code, cache=cache, dtypes=dtypes, broadcastables=broadcastables) tinputs = list(map(code, inputs)) toutputs = list(map(code, outputs)) #fix constant expressions as variables toutputs = [output if isinstance(output, theano.Variable) else tt.as_tensor_variable(output) for output in toutputs] if len(toutputs) == 1: toutputs = toutputs[0] # Compile theano func func = theano.function(tinputs, toutputs, **kwargs) is_0d = [len(o.variable.broadcastable) == 0 for o in func.outputs] # No wrapper required if not scalar or not any(is_0d): func.theano_function = func return func # Create wrapper to convert 0-dimensional outputs to scalars def wrapper(*args): out = func(*args) # out can be array(1.0) or [array(1.0), array(2.0)] if is_sequence(out): return [o[()] if is_0d[i] else o for i, o in enumerate(out)] else: return out[()] wrapper.__wrapped__ = func wrapper.__doc__ = func.__doc__ wrapper.theano_function = func return wrapper
TheanoPrinter
python
Lightning-AI__lightning
src/lightning/pytorch/_graveyard/tpu.py
{ "start": 2427, "end": 2866 }
class ____(XLAPrecision): """Legacy class. Use :class:`~lightning.pytorch.plugins.precision.xla.XLAPrecision` instead. """ def __init__(self, *args: Any, **kwargs: Any) -> None: rank_zero_deprecation( "The `TPUPrecisionPlugin` class is deprecated. Use `lightning.pytorch.plugins.precision.XLAPrecision`" " instead." ) super().__init__(precision="32-true")
TPUPrecisionPlugin
python
huggingface__transformers
src/transformers/models/kosmos2/modeling_kosmos2.py
{ "start": 7475, "end": 12132 }
class ____(nn.Module): def __init__(self, config: Kosmos2VisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size): raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})." ) target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings # Adapted from transformers.models.siglip.modeling_siglip.eager_attention_forward -> Kosmos2 doesn't cast attn weights to fp32 def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights
Kosmos2VisionEmbeddings
python
facebookresearch__faiss
tests/test_fast_scan.py
{ "start": 21417, "end": 21862 }
class ____(unittest.TestCase): def test_issue_2739(self): ds = datasets.SyntheticDataset(960, 200, 1, 0) M = 32 index = faiss.index_factory(ds.d, f"PQ{M}x4fs") index.train(ds.get_train()) index.add(ds.get_database()) np.testing.assert_array_equal( index.pq.decode(index.pq.compute_codes(ds.get_database()))[0, ::100], index.reconstruct(0)[::100] )
TestBlockDecode
python
wireservice__csvkit
tests/utils.py
{ "start": 2587, "end": 2805 }
class ____: def test_empty(self): with open('examples/empty.csv', 'rb') as f, stdin_as_string(f): utility = self.Utility(getattr(self, 'default_args', [])) utility.run()
EmptyFileTests
python
django__django
django/contrib/contenttypes/admin.py
{ "start": 4996, "end": 5099 }
class ____(GenericInlineModelAdmin): template = "admin/edit_inline/stacked.html"
GenericStackedInline
python
PrefectHQ__prefect
tests/test_flows.py
{ "start": 172837, "end": 178575 }
class ____: def test_load_flow_name_from_entrypoint(self, tmp_path: Path): flow_source = dedent( """ from prefect import flow @flow(name="My custom name") def flow_function(name: str) -> str: return name """ ) tmp_path.joinpath("flow.py").write_text(flow_source) entrypoint = f"{tmp_path.joinpath('flow.py')}:flow_function" result = load_flow_arguments_from_entrypoint(entrypoint) assert result["name"] == "My custom name" def test_load_flow_name_from_entrypoint_no_name(self, tmp_path: Path): flow_source = dedent( """ from prefect import flow @flow def flow_function(name: str) -> str: return name """ ) tmp_path.joinpath("flow.py").write_text(flow_source) entrypoint = f"{tmp_path.joinpath('flow.py')}:flow_function" result = load_flow_arguments_from_entrypoint(entrypoint) assert result["name"] == "flow-function" def test_load_flow_name_from_entrypoint_dynamic_name_fstring(self, tmp_path: Path): flow_source = dedent( """ from prefect import flow version = "1.0" @flow(name=f"flow-function-{version}") def flow_function(name: str) -> str: return name """ ) tmp_path.joinpath("flow.py").write_text(flow_source) entrypoint = f"{tmp_path.joinpath('flow.py')}:flow_function" result = load_flow_arguments_from_entrypoint(entrypoint) assert result["name"] == "flow-function-1.0" def test_load_flow_name_from_entrypoint_dyanmic_name_function(self, tmp_path: Path): flow_source = dedent( """ from prefect import flow def get_name(): return "from-a-function" @flow(name=get_name()) def flow_function(name: str) -> str: return name """ ) tmp_path.joinpath("flow.py").write_text(flow_source) entrypoint = f"{tmp_path.joinpath('flow.py')}:flow_function" result = load_flow_arguments_from_entrypoint(entrypoint) assert result["name"] == "from-a-function" def test_load_flow_name_from_entrypoint_dynamic_name_depends_on_missing_import( self, tmp_path: Path, caplog: pytest.LogCaptureFixture ): flow_source = dedent( """ from prefect import flow from non_existent import get_name @flow(name=get_name()) def flow_function(name: str) -> str: return name """ ) tmp_path.joinpath("flow.py").write_text(flow_source) entrypoint = f"{tmp_path.joinpath('flow.py')}:flow_function" result = load_flow_arguments_from_entrypoint(entrypoint) assert result["name"] == "flow-function" assert "Failed to parse @flow argument: `name=get_name()`" in caplog.text def test_load_flow_name_from_entrypoint_dynamic_name_fstring_multiline( self, tmp_path: Path ): flow_source = dedent( """ from prefect import flow flow_base_name = "flow-function" version = "1.0" @flow( name=( f"{flow_base_name}-" f"{version}" ) ) def flow_function(name: str) -> str: return name """ ) tmp_path.joinpath("flow.py").write_text(flow_source) entrypoint = f"{tmp_path.joinpath('flow.py')}:flow_function" result = load_flow_arguments_from_entrypoint(entrypoint) assert result["name"] == "flow-function-1.0" def test_load_async_flow_from_entrypoint_no_name(self, tmp_path: Path): flow_source = dedent( """ from prefect import flow @flow async def flow_function(name: str) -> str: return name """ ) tmp_path.joinpath("flow.py").write_text(flow_source) entrypoint = f"{tmp_path.joinpath('flow.py')}:flow_function" result = load_flow_arguments_from_entrypoint(entrypoint) assert result["name"] == "flow-function" def test_load_flow_description_from_entrypoint(self, tmp_path: Path): flow_source = dedent( """ from prefect import flow @flow(description="My custom description") def flow_function(name: str) -> str: return name """ ) tmp_path.joinpath("flow.py").write_text(flow_source) entrypoint = f"{tmp_path.joinpath('flow.py')}:flow_function" result = load_flow_arguments_from_entrypoint(entrypoint) assert result["description"] == "My custom description" def test_load_flow_description_from_entrypoint_no_description(self, tmp_path: Path): flow_source = dedent( """ from prefect import flow @flow def flow_function(name: str) -> str: return name """ ) tmp_path.joinpath("flow.py").write_text(flow_source) entrypoint = f"{tmp_path.joinpath('flow.py')}:flow_function" result = load_flow_arguments_from_entrypoint(entrypoint) assert "description" not in result def test_load_no_flow(self, tmp_path: Path): flow_source = dedent( """ from prefect import flow """ ) tmp_path.joinpath("flow.py").write_text(flow_source) entrypoint = f"{tmp_path.joinpath('flow.py')}:flow_function" with pytest.raises(ValueError, match="Could not find object"): load_flow_arguments_from_entrypoint(entrypoint)
TestLoadFlowArgumentFromEntrypoint
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/util/typing.py
{ "start": 2471, "end": 3030 }
class ____(Protocol[_T]): """protocol for generic types. this since Python.typing _GenericAlias is private """ __args__: Tuple[_AnnotationScanType, ...] __origin__: Type[_T] # Python's builtin _GenericAlias has this method, however builtins like # list, dict, etc. do not, even though they have ``__origin__`` and # ``__args__`` # # def copy_with(self, params: Tuple[_AnnotationScanType, ...]) -> Type[_T]: # ... # copied from TypeShed, required in order to implement # MutableMapping.update()
GenericProtocol
python
encode__starlette
starlette/convertors.py
{ "start": 388, "end": 715 }
class ____(Convertor[str]): regex = "[^/]+" def convert(self, value: str) -> str: return value def to_string(self, value: str) -> str: value = str(value) assert "/" not in value, "May not contain path separators" assert value, "Must not be empty" return value
StringConvertor
python
huggingface__transformers
src/transformers/models/dac/modeling_dac.py
{ "start": 3273, "end": 3544 }
class ____(ModelOutput): r""" audio_values (`torch.FloatTensor` of shape `(batch_size, input_length)`, *optional*): Decoded audio values, obtained using the decoder part of Dac. """ audio_values: Optional[torch.FloatTensor] = None
DacDecoderOutput
python
facelessuser__pymdown-extensions
tests/test_extensions/test_highlight.py
{ "start": 6985, "end": 7654 }
class ____(util.MdCase): """Test no class.""" extension = ['pymdownx.highlight', 'pymdownx.superfences'] extension_configs = { 'pymdownx.highlight': { 'css_class': '', 'use_pygments': False } } def test_no_class_no_pygments(self): """Test with no class and no Pygments.""" self.check_markdown( r''' ```python import test test.test() ``` ''', r''' <pre><code class="language-python">import test test.test()</code></pre> ''', True )
TestNoClassNoPygments
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/genericType28.py
{ "start": 1692, "end": 1818 }
class ____(Class6[T_co, T_co]): ... # This should generate an error because T_contra isn't # compatible with T_co.
Class6_Child2
python
jschneier__django-storages
storages/compress.py
{ "start": 1133, "end": 1295 }
class ____: def _compress_content(self, content): """Gzip a given string content.""" return GzipCompressionWrapper(content)
CompressStorageMixin
python
modin-project__modin
stress_tests/kaggle/kaggle4.py
{ "start": 9412, "end": 10067 }
class ____(BaseEstimator, RegressorMixin, TransformerMixin): def __init__(self, models): self.models = models def fit(self, X, y): self.models_ = [clone(x) for x in self.models] for model in self.models_: model.fit(X, y) return self def predict(self, X): predictions = np.column_stack([model.predict(X) for model in self.models_]) return np.mean(predictions, axis=1) averaged_models = AveragingModels(models=(ENet, GBoost, KRR, lasso)) score = rmsle_cv(averaged_models) print( " Averaged base models score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()) )
AveragingModels
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_contextlib.py
{ "start": 15315, "end": 15574 }
class ____(__TestCase): def test_nullcontext(self): with torch._dynamo.error_on_graph_break(False): class C: pass c = C() with nullcontext(c) as c_in: self.assertIs(c_in, c)
NullcontextTestCase
python
doocs__leetcode
solution/1700-1799/1711.Count Good Meals/Solution2.py
{ "start": 0, "end": 377 }
class ____: def countPairs(self, deliciousness: List[int]) -> int: mod = 10**9 + 7 cnt = Counter(deliciousness) ans = 0 for i in range(22): s = 1 << i for a, m in cnt.items(): if (b := s - a) in cnt: ans += m * (m - 1) if a == b else m * cnt[b] return (ans >> 1) % mod
Solution
python
apache__airflow
providers/standard/src/airflow/providers/standard/sensors/time_delta.py
{ "start": 6183, "end": 7449 }
class ____(BaseSensorOperator): """ A sensor that waits a specified period of time before completing. This differs from TimeDeltaSensor because the time to wait is measured from the start of the task, not the data_interval_end of the DAG run. :param time_to_wait: time length to wait after the task starts before succeeding. :param deferrable: Run sensor in deferrable mode """ def __init__( self, time_to_wait: timedelta | int, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), **kwargs, ) -> None: super().__init__(**kwargs) self.deferrable = deferrable if isinstance(time_to_wait, int): self.time_to_wait = timedelta(minutes=time_to_wait) else: self.time_to_wait = time_to_wait def execute(self, context: Context) -> None: if self.deferrable: self.defer( trigger=TimeDeltaTrigger(self.time_to_wait, end_from_trigger=True) if AIRFLOW_V_3_0_PLUS else TimeDeltaTrigger(self.time_to_wait), method_name="execute_complete", ) else: sleep(int(self.time_to_wait.total_seconds()))
WaitSensor
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/emr.py
{ "start": 49345, "end": 64274 }
class ____(AwsBaseOperator[EmrServerlessHook]): """ Operator to start EMR Serverless job. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:EmrServerlessStartJobOperator` :param application_id: ID of the EMR Serverless application to start. :param execution_role_arn: ARN of role to perform action. :param job_driver: Driver that the job runs on. :param configuration_overrides: Configuration specifications to override existing configurations. :param client_request_token: The client idempotency token of the application to create. Its value must be unique for each request. :param config: Optional dictionary for arbitrary parameters to the boto API start_job_run call. :param wait_for_completion: If true, waits for the job to start before returning. Defaults to True. If set to False, ``waiter_max_attempts`` and ``waiter_delay`` will only be applied when waiting for the application to be in the ``STARTED`` state. :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param name: Name for the EMR Serverless job. If not provided, a default name will be assigned. :param waiter_max_attempts: Number of times the waiter should poll the application to check the state. Defaults to 25 if not set. :param waiter_delay: Number of seconds between polling the state of the job run. Defaults to 60 seconds if not set. :param deferrable: If True, the operator will wait asynchronously for the crawl to complete. This implies waiting for completion. This mode requires aiobotocore module to be installed. (default: False, but can be overridden in config file by setting default_deferrable to True) :param enable_application_ui_links: If True, the operator will generate one-time links to EMR Serverless application UIs. The generated links will allow any user with access to the DAG to see the Spark or Tez UI or Spark stdout logs. Defaults to False. """ aws_hook_class = EmrServerlessHook template_fields: Sequence[str] = aws_template_fields( "application_id", "config", "execution_role_arn", "job_driver", "configuration_overrides", "name", ) template_fields_renderers = { "config": "json", "configuration_overrides": "json", } operator_extra_links = ( EmrServerlessS3LogsLink(), EmrServerlessCloudWatchLogsLink(), EmrServerlessDashboardLink(), EmrServerlessLogsLink(), ) def __init__( self, application_id: str, execution_role_arn: str, job_driver: dict, configuration_overrides: dict | None = None, client_request_token: str = "", config: dict | None = None, wait_for_completion: bool = True, name: str | None = None, waiter_max_attempts: int | ArgNotSet = NOTSET, waiter_delay: int | ArgNotSet = NOTSET, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), enable_application_ui_links: bool = False, **kwargs, ): waiter_delay = 60 if waiter_delay is NOTSET else waiter_delay waiter_max_attempts = 25 if waiter_max_attempts is NOTSET else waiter_max_attempts self.application_id = application_id self.execution_role_arn = execution_role_arn self.job_driver = job_driver self.configuration_overrides = configuration_overrides self.wait_for_completion = wait_for_completion self.config = config or {} self.name = name self.waiter_max_attempts = int(waiter_max_attempts) # type: ignore[arg-type] self.waiter_delay = int(waiter_delay) # type: ignore[arg-type] self.job_id: str | None = None self.deferrable = deferrable self.enable_application_ui_links = enable_application_ui_links super().__init__(**kwargs) self.client_request_token = client_request_token or str(uuid4()) def execute(self, context: Context, event: dict[str, Any] | None = None) -> str | None: app_state = self.hook.conn.get_application(applicationId=self.application_id)["application"]["state"] if app_state not in EmrServerlessHook.APPLICATION_SUCCESS_STATES: self.log.info("Application state is %s", app_state) self.log.info("Starting application %s", self.application_id) self.hook.conn.start_application(applicationId=self.application_id) waiter = self.hook.get_waiter("serverless_app_started") if self.deferrable: self.defer( trigger=EmrServerlessStartApplicationTrigger( application_id=self.application_id, waiter_delay=self.waiter_delay, waiter_max_attempts=self.waiter_max_attempts, aws_conn_id=self.aws_conn_id, ), method_name="execute", timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay), ) wait( waiter=waiter, waiter_max_attempts=self.waiter_max_attempts, waiter_delay=self.waiter_delay, args={"applicationId": self.application_id}, failure_message="Serverless Application failed to start", status_message="Serverless Application status is", status_args=["application.state", "application.stateDetails"], ) self.log.info("Starting job on Application: %s", self.application_id) self.name = self.name or self.config.pop("name", f"emr_serverless_job_airflow_{uuid4()}") args = { "clientToken": self.client_request_token, "applicationId": self.application_id, "executionRoleArn": self.execution_role_arn, "jobDriver": self.job_driver, "name": self.name, **self.config, } if self.configuration_overrides is not None: args["configurationOverrides"] = self.configuration_overrides response = self.hook.conn.start_job_run( **args, ) if response["ResponseMetadata"]["HTTPStatusCode"] != 200: raise AirflowException(f"EMR serverless job failed to start: {response}") self.job_id = response["jobRunId"] self.log.info("EMR serverless job started: %s", self.job_id) self.persist_links(context) if self.wait_for_completion: if self.deferrable: self.defer( trigger=EmrServerlessStartJobTrigger( application_id=self.application_id, job_id=self.job_id, waiter_delay=self.waiter_delay, waiter_max_attempts=self.waiter_max_attempts, aws_conn_id=self.aws_conn_id, ), method_name="execute_complete", timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay), ) else: waiter = self.hook.get_waiter("serverless_job_completed") wait( waiter=waiter, waiter_max_attempts=self.waiter_max_attempts, waiter_delay=self.waiter_delay, args={"applicationId": self.application_id, "jobRunId": self.job_id}, failure_message="Serverless Job failed", status_message="Serverless Job status is", status_args=["jobRun.state", "jobRun.stateDetails"], ) return self.job_id def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None: validated_event = validate_execute_complete_event(event) if validated_event["status"] == "success": self.log.info("Serverless job completed") return validated_event["job_id"] def on_kill(self) -> None: """ Cancel the submitted job run. Note: this method will not run in deferrable mode. """ if self.job_id: self.log.info("Stopping job run with jobId - %s", self.job_id) response = self.hook.conn.cancel_job_run(applicationId=self.application_id, jobRunId=self.job_id) http_status_code = ( response.get("ResponseMetadata", {}).get("HTTPStatusCode") if response else None ) if http_status_code is None or http_status_code != 200: self.log.error("Unable to request query cancel on EMR Serverless. Exiting") return self.log.info( "Polling EMR Serverless for query with id %s to reach final state", self.job_id, ) # This should be replaced with a boto waiter when available. waiter( get_state_callable=self.hook.conn.get_job_run, get_state_args={ "applicationId": self.application_id, "jobRunId": self.job_id, }, parse_response=["jobRun", "state"], desired_state=EmrServerlessHook.JOB_TERMINAL_STATES, failure_states=set(), object_type="job", action="cancelled", countdown=self.waiter_delay * self.waiter_max_attempts, check_interval_seconds=self.waiter_delay, ) def is_monitoring_in_job_override(self, config_key: str, job_override: dict | None) -> bool: """ Check if monitoring is enabled for the job. Note: This is not compatible with application defaults: https://docs.aws.amazon.com/emr/latest/EMR-Serverless-UserGuide/default-configs.html This is used to determine what extra links should be shown. """ monitoring_config = (job_override or {}).get("monitoringConfiguration") if monitoring_config is None or config_key not in monitoring_config: return False # CloudWatch can have an "enabled" flag set to False if config_key == "cloudWatchLoggingConfiguration": return monitoring_config.get(config_key).get("enabled") is True return config_key in monitoring_config def persist_links(self, context: Context): """Populate the relevant extra links for the EMR Serverless jobs.""" # Persist the EMR Serverless Dashboard link (Spark/Tez UI) if self.enable_application_ui_links: EmrServerlessDashboardLink.persist( context=context, operator=self, region_name=self.hook.conn_region_name, aws_partition=self.hook.conn_partition, conn_id=self.hook.aws_conn_id, application_id=self.application_id, job_run_id=self.job_id, ) # If this is a Spark job, persist the EMR Serverless logs link (Driver stdout) if self.enable_application_ui_links and "sparkSubmit" in self.job_driver: EmrServerlessLogsLink.persist( context=context, operator=self, region_name=self.hook.conn_region_name, aws_partition=self.hook.conn_partition, conn_id=self.hook.aws_conn_id, application_id=self.application_id, job_run_id=self.job_id, ) # Add S3 and/or CloudWatch links if either is enabled if self.is_monitoring_in_job_override("s3MonitoringConfiguration", self.configuration_overrides): log_uri = ( (self.configuration_overrides or {}) .get("monitoringConfiguration", {}) .get("s3MonitoringConfiguration", {}) .get("logUri") ) EmrServerlessS3LogsLink.persist( context=context, operator=self, region_name=self.hook.conn_region_name, aws_partition=self.hook.conn_partition, log_uri=log_uri, application_id=self.application_id, job_run_id=self.job_id, ) emrs_s3_url = EmrServerlessS3LogsLink().format_link( aws_domain=EmrServerlessCloudWatchLogsLink.get_aws_domain(self.hook.conn_partition), region_name=self.hook.conn_region_name, aws_partition=self.hook.conn_partition, log_uri=log_uri, application_id=self.application_id, job_run_id=self.job_id, ) self.log.info("S3 logs available at: %s", emrs_s3_url) if self.is_monitoring_in_job_override("cloudWatchLoggingConfiguration", self.configuration_overrides): cloudwatch_config = ( (self.configuration_overrides or {}) .get("monitoringConfiguration", {}) .get("cloudWatchLoggingConfiguration", {}) ) log_group_name = cloudwatch_config.get("logGroupName", "/aws/emr-serverless") log_stream_prefix = cloudwatch_config.get("logStreamNamePrefix", "") log_stream_prefix = f"{log_stream_prefix}/applications/{self.application_id}/jobs/{self.job_id}" EmrServerlessCloudWatchLogsLink.persist( context=context, operator=self, region_name=self.hook.conn_region_name, aws_partition=self.hook.conn_partition, awslogs_group=log_group_name, stream_prefix=log_stream_prefix, ) emrs_cloudwatch_url = EmrServerlessCloudWatchLogsLink().format_link( aws_domain=EmrServerlessCloudWatchLogsLink.get_aws_domain(self.hook.conn_partition), region_name=self.hook.conn_region_name, aws_partition=self.hook.conn_partition, awslogs_group=log_group_name, stream_prefix=log_stream_prefix, ) self.log.info("CloudWatch logs available at: %s", emrs_cloudwatch_url)
EmrServerlessStartJobOperator
python
pytorch__pytorch
torch/testing/_internal/common_quantization.py
{ "start": 70349, "end": 70885 }
class ____(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) self.relu = torch.nn.ReLU() self.fc2 = torch.nn.Linear(5, 5).to(dtype=torch.float) def forward(self, x): x = self.fc1(x) x = self.relu(x) x = torch.add(x, 5) x = self.fc2(x) self.relu = torch.nn.ReLU() return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5),)
LinearReluAddModel
python
apache__airflow
providers/hashicorp/tests/unit/hashicorp/hooks/test_vault.py
{ "start": 58178, "end": 61389 }
class ____: @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") @conf_vars( { ("secrets", "backend"): "airflow.providers.hashicorp.secrets.vault.VaultBackend", ("secrets", "backend_kwargs"): '{"url": "http://127.0.0.1:8200", "token": "token"}', } ) def test_config_from_secret_backend(self, mock_hvac): """Get Config Value from a Secret Backend""" mock_client = mock.MagicMock() mock_hvac.Client.return_value = mock_client mock_client.secrets.kv.v2.read_secret_version.return_value = { "request_id": "2d48a2ad-6bcb-e5b6-429d-da35fdf31f56", "lease_id": "", "renewable": False, "lease_duration": 0, "data": { "data": {"value": "sqlite:////Users/airflow/airflow/airflow.db"}, "metadata": { "created_time": "2020-03-28T02:10:54.301784Z", "deletion_time": "", "destroyed": False, "version": 1, }, }, "wrap_info": None, "warnings": None, "auth": None, } test_config = """[test] sql_alchemy_conn_secret = sql_alchemy_conn """ test_config_default = """[test] sql_alchemy_conn = airflow """ test_conf = AirflowConfigParser(default_config=test_config_default) test_conf.read_string(test_config) test_conf.sensitive_config_values = test_conf.sensitive_config_values | { ("test", "sql_alchemy_conn"), } assert test_conf.get("test", "sql_alchemy_conn") == "sqlite:////Users/airflow/airflow/airflow.db" @mock.patch("airflow.providers.hashicorp._internal_client.vault_client.hvac") @conf_vars( { ("secrets", "backend"): "airflow.providers.hashicorp.secrets.vault.VaultBackend", ("secrets", "backend_kwargs"): '{"url": "http://127.0.0.1:8200", "token": "token"}', } ) def test_config_raise_exception_from_secret_backend_connection_error(self, mock_hvac): """Get Config Value from a Secret Backend""" mock_client = mock.MagicMock() # mock_client.side_effect = AirflowConfigException mock_hvac.Client.return_value = mock_client mock_client.secrets.kv.v2.read_secret_version.return_value = Exception test_config = """[test] sql_alchemy_conn_secret = sql_alchemy_conn """ test_config_default = """[test] sql_alchemy_conn = airflow """ test_conf = AirflowConfigParser(default_config=test_config_default) test_conf.read_string(test_config) test_conf.sensitive_config_values = test_conf.sensitive_config_values | { ("test", "sql_alchemy_conn"), } with pytest.raises( AirflowConfigException, match=re.escape( "Cannot retrieve config from alternative secrets backend. " "Make sure it is configured properly and that the Backend " "is accessible." ), ): test_conf.get("test", "sql_alchemy_conn")
TestConfigurationFromSecrets
python
ansible__ansible
test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup.py
{ "start": 84, "end": 207 }
class ____(LookupBase): def run(self, terms, variables, **kwargs): return ['mylookup_from_user_dir']
LookupModule
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 9782, "end": 10171 }
class ____(sgqlc.types.Enum): """ See source code for more info. """ __schema__ = graphql_schema __choices__ = ( "COMMUNITY_BRIDGE", "CUSTOM", "GITHUB", "ISSUEHUNT", "KO_FI", "LFX_CROWDFUNDING", "LIBERAPAY", "OPEN_COLLECTIVE", "OTECHIE", "PATREON", "TIDELIFT", )
FundingPlatform
python
matplotlib__matplotlib
lib/matplotlib/category.py
{ "start": 3675, "end": 4196 }
class ____(ticker.Locator): """Tick at every integer mapping of the string data.""" def __init__(self, units_mapping): """ Parameters ---------- units_mapping : dict Mapping of category names (str) to indices (int). """ self._units = units_mapping def __call__(self): # docstring inherited return list(self._units.values()) def tick_values(self, vmin, vmax): # docstring inherited return self()
StrCategoryLocator
python
airbytehq__airbyte
airbyte-integrations/connectors/source-microsoft-sharepoint/source_microsoft_sharepoint/spec.py
{ "start": 1424, "end": 2624 }
class ____(BaseModel): """ ServiceCredentials class for service key authentication. This class is structured similarly to OAuthCredentials but for a different authentication method. """ class Config: title = "Service Key Authentication" # Fields for the Service authentication, similar to OAuthCredentials auth_type: Literal["Service"] = Field("Service", const=True) tenant_id: str = Field(title="Tenant ID", description="Tenant ID of the Microsoft SharePoint user", airbyte_secret=True) user_principal_name: str = Field( title="User Principal Name", description="Special characters such as a period, comma, space, and the at sign (@) are converted to underscores (_). More details: https://learn.microsoft.com/en-us/sharepoint/list-onedrive-urls", airbyte_secret=True, ) client_id: str = Field( title="Client ID", description="Client ID of your Microsoft developer application", airbyte_secret=True, ) client_secret: str = Field( title="Client Secret", description="Client Secret of your Microsoft developer application", airbyte_secret=True, )
ServiceCredentials
python
viewflow__viewflow
viewflow/workflow/migrations/0005_merge.py
{ "start": 108, "end": 288 }
class ____(migrations.Migration): dependencies = [ ("viewflow", "0004_subprocess"), ("viewflow", "0004_extend_fields_length"), ] operations = []
Migration