language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pyqtgraph__pyqtgraph | pyqtgraph/imageview/ImageView.py | {
"start": 1290,
"end": 1523
} | class ____(ROI):
def __init__(self, size):
ROI.__init__(self, pos=[0,0], size=size) #, scaleSnap=True, translateSnap=True)
self.addScaleHandle([1, 1], [0, 0])
self.addRotateHandle([0, 0], [0.5, 0.5])
| PlotROI |
python | pandas-dev__pandas | asv_bench/benchmarks/frame_ctor.py | {
"start": 3345,
"end": 3817
} | class ____:
def setup(self):
self.nrows = 100_000
def time_frame_from_scalar_ea_float64(self):
DataFrame(
1.0,
index=range(self.nrows),
columns=list("abc"),
dtype=Float64Dtype(),
)
def time_frame_from_scalar_ea_float64_na(self):
DataFrame(
NA,
index=range(self.nrows),
columns=list("abc"),
dtype=Float64Dtype(),
)
| FromScalar |
python | optuna__optuna | optuna/storages/_rdb/alembic/versions/v2.4.0.a.py | {
"start": 2070,
"end": 6407
} | class ____(BaseModel):
__tablename__ = "trial_intermediate_values"
__table_args__: Any = (UniqueConstraint("trial_id", "step"),)
trial_intermediate_value_id = Column(Integer, primary_key=True)
trial_id = Column(Integer, ForeignKey("trials.trial_id"), nullable=False)
step = Column(Integer, nullable=False)
intermediate_value = Column(Float, nullable=False)
def upgrade():
bind = op.get_bind()
inspector = sa.inspect(bind)
tables = inspector.get_table_names()
if "study_directions" not in tables:
op.create_table(
"study_directions",
sa.Column("study_direction_id", sa.Integer(), nullable=False),
sa.Column(
"direction",
sa.Enum("NOT_SET", "MINIMIZE", "MAXIMIZE", name="studydirection"),
nullable=False,
),
sa.Column("study_id", sa.Integer(), nullable=False),
sa.Column("objective", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["study_id"],
["studies.study_id"],
),
sa.PrimaryKeyConstraint("study_direction_id"),
sa.UniqueConstraint("study_id", "objective"),
)
if "trial_intermediate_values" not in tables:
op.create_table(
"trial_intermediate_values",
sa.Column("trial_intermediate_value_id", sa.Integer(), nullable=False),
sa.Column("trial_id", sa.Integer(), nullable=False),
sa.Column("step", sa.Integer(), nullable=False),
sa.Column("intermediate_value", sa.Float(), nullable=False),
sa.ForeignKeyConstraint(
["trial_id"],
["trials.trial_id"],
),
sa.PrimaryKeyConstraint("trial_intermediate_value_id"),
sa.UniqueConstraint("trial_id", "step"),
)
session = orm.Session(bind=bind)
try:
studies_records = session.query(StudyModel).all()
objects = [
StudyDirectionModel(study_id=r.study_id, direction=r.direction, objective=0)
for r in studies_records
]
session.bulk_save_objects(objects)
intermediate_values_records = session.query(
TrialValueModel.trial_id, TrialValueModel.value, TrialValueModel.step
).all()
objects = [
TrialIntermediateValueModel(
trial_id=r.trial_id, intermediate_value=r.value, step=r.step
)
for r in intermediate_values_records
]
session.bulk_save_objects(objects)
session.query(TrialValueModel).delete()
session.commit()
with op.batch_alter_table("trial_values", schema=None) as batch_op:
batch_op.add_column(sa.Column("objective", sa.Integer(), nullable=False))
# The name of this constraint is manually determined.
# In the future, the naming convention may be determined based on
# https://alembic.sqlalchemy.org/en/latest/naming.html
batch_op.create_unique_constraint(
"uq_trial_values_trial_id_objective", ["trial_id", "objective"]
)
trials_records = session.query(TrialModel).all()
objects = [
TrialValueModel(trial_id=r.trial_id, value=r.value, objective=0)
for r in trials_records
]
session.bulk_save_objects(objects)
session.commit()
except SQLAlchemyError as e:
session.rollback()
raise e
finally:
session.close()
with op.batch_alter_table("studies", schema=None) as batch_op:
batch_op.drop_column("direction")
with op.batch_alter_table("trial_values", schema=None) as batch_op:
batch_op.drop_column("step")
with op.batch_alter_table("trials", schema=None) as batch_op:
batch_op.drop_column("value")
for c in inspector.get_unique_constraints("trial_values"):
# MySQL changes the uniq constraint of (trial_id, step) to that of trial_id.
if c["column_names"] == ["trial_id"]:
with op.batch_alter_table("trial_values", schema=None) as batch_op:
batch_op.drop_constraint(c["name"], type_="unique")
break
# TODO(imamura): Implement downgrade
def downgrade():
pass
| TrialIntermediateValueModel |
python | pandas-dev__pandas | pandas/tests/indexes/test_any_index.py | {
"start": 4842,
"end": 5041
} | class ____:
def test_str(self, index):
# test the string repr
index.name = "foo"
assert "'foo'" in str(index)
assert type(index).__name__ in str(index)
| TestRendering |
python | mlflow__mlflow | tests/tracing/test_fluent.py | {
"start": 1362,
"end": 1793
} | class ____:
@mlflow.trace()
def predict(self, x, y):
z = x + y
z = self.add_one(z)
z = mlflow.trace(self.square)(z)
return z # noqa: RET504
@mlflow.trace(span_type=SpanType.LLM, name="add_one_with_custom_name", attributes={"delta": 1})
def add_one(self, z):
return z + 1
def square(self, t):
res = t**2
time.sleep(0.1)
return res
| DefaultTestModel |
python | ray-project__ray | python/ray/tune/examples/cifar10_pytorch.py | {
"start": 1623,
"end": 9332
} | class ____(nn.Module):
def __init__(self, l1=120, l2=84):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, l1)
self.fc2 = nn.Linear(l1, l2)
self.fc3 = nn.Linear(l2, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# __net_end__
# __train_begin__
def train_cifar(config):
net = Net(config["l1"], config["l2"])
device = "cpu"
if torch.cuda.is_available():
device = "cuda:0"
if torch.cuda.device_count() > 1:
net = nn.DataParallel(net)
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=config["lr"], momentum=0.9)
# Load existing checkpoint through `get_checkpoint()` API.
if tune.get_checkpoint():
loaded_checkpoint = tune.get_checkpoint()
with loaded_checkpoint.as_directory() as loaded_checkpoint_dir:
model_state, optimizer_state = torch.load(
os.path.join(loaded_checkpoint_dir, "checkpoint.pt")
)
net.load_state_dict(model_state)
optimizer.load_state_dict(optimizer_state)
if config["smoke_test"]:
trainset, testset = load_test_data()
else:
trainset, testset = load_data(DATA_DIR)
test_abs = int(len(trainset) * 0.8)
train_subset, val_subset = random_split(
trainset, [test_abs, len(trainset) - test_abs])
trainloader = torch.utils.data.DataLoader(
train_subset,
batch_size=int(config["batch_size"]),
shuffle=True,
num_workers=0 if config["smoke_test"] else 8,
)
valloader = torch.utils.data.DataLoader(
val_subset,
batch_size=int(config["batch_size"]),
shuffle=True,
num_workers=0 if config["smoke_test"] else 8,
)
for epoch in range(10): # loop over the dataset multiple times
running_loss = 0.0
epoch_steps = 0
for i, data in enumerate(trainloader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
epoch_steps += 1
if i % 2000 == 1999: # print every 2000 mini-batches
print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1,
running_loss / epoch_steps))
running_loss = 0.0
# Validation loss
val_loss = 0.0
val_steps = 0
total = 0
correct = 0
for i, data in enumerate(valloader, 0):
with torch.no_grad():
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
val_loss += loss.cpu().numpy()
val_steps += 1
# Here we save a checkpoint. It is automatically registered with
# Ray Tune and will potentially be accessed through in ``get_checkpoint()``
# in future iterations.
# Note to save a file like checkpoint, you still need to put it under a directory
# to construct a checkpoint.
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
path = os.path.join(temp_checkpoint_dir, "checkpoint.pt")
torch.save(
(net.state_dict(), optimizer.state_dict()), path
)
checkpoint = Checkpoint.from_directory(temp_checkpoint_dir)
tune.report(
{"loss": (val_loss / val_steps), "accuracy": correct / total},
checkpoint=checkpoint,
)
print("Finished Training")
# __train_end__
# __test_acc_begin__
def test_best_model(config: Dict, checkpoint: "Checkpoint", smoke_test=False):
best_trained_model = Net(config["l1"], config["l2"])
device = "cuda:0" if torch.cuda.is_available() else "cpu"
best_trained_model.to(device)
with checkpoint.as_directory() as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "checkpoint.pt")
model_state, optimizer_state = torch.load(checkpoint_path)
best_trained_model.load_state_dict(model_state)
if smoke_test:
_, testset = load_test_data()
else:
_, testset = load_data(DATA_DIR)
testloader = torch.utils.data.DataLoader(
testset, batch_size=4, shuffle=False, num_workers=2)
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = best_trained_model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print("Best trial test set accuracy: {}".format(correct / total))
# __test_acc_end__
# __main_begin__
def main(num_samples=10, max_num_epochs=10, gpus_per_trial=2, smoke_test=False):
config = {
"l1": tune.sample_from(lambda _: 2 ** np.random.randint(2, 9)),
"l2": tune.sample_from(lambda _: 2 ** np.random.randint(2, 9)),
"lr": tune.loguniform(1e-4, 1e-1),
"batch_size": tune.choice([2, 4, 8, 16]),
"smoke_test": smoke_test,
}
scheduler = ASHAScheduler(
max_t=max_num_epochs,
grace_period=1,
reduction_factor=2)
tuner = tune.Tuner(
tune.with_resources(
tune.with_parameters(train_cifar),
resources={"cpu": 2, "gpu": gpus_per_trial},
),
tune_config=tune.TuneConfig(
metric="loss",
mode="min",
num_samples=num_samples,
scheduler=scheduler
),
param_space=config,
)
results = tuner.fit()
best_result = results.get_best_result("loss", "min")
print("Best trial config: {}".format(best_result.config))
print("Best trial final validation loss: {}".format(
best_result.metrics["loss"]))
print("Best trial final validation accuracy: {}".format(
best_result.metrics["accuracy"]))
test_best_model(best_result.config, best_result.checkpoint, smoke_test=smoke_test)
# __main_end__
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--ray-address",
help="Address of Ray cluster for seamless distributed execution.",
required=False)
args, _ = parser.parse_known_args()
if args.smoke_test:
ray.init(num_cpus=2)
main(num_samples=1, max_num_epochs=1, gpus_per_trial=0, smoke_test=True)
else:
ray.init(args.ray_address)
# Change this to activate training on GPUs
main(num_samples=10, max_num_epochs=10, gpus_per_trial=0)
| Net |
python | wandb__wandb | wandb/vendor/pygments/lexers/pascal.py | {
"start": 666,
"end": 26923
} | class ____(Lexer):
"""
For `Delphi <http://www.borland.com/delphi/>`_ (Borland Object Pascal),
Turbo Pascal and Free Pascal source code.
Additional options accepted:
`turbopascal`
Highlight Turbo Pascal specific keywords (default: ``True``).
`delphi`
Highlight Borland Delphi specific keywords (default: ``True``).
`freepascal`
Highlight Free Pascal specific keywords (default: ``True``).
`units`
A list of units that should be considered builtin, supported are
``System``, ``SysUtils``, ``Classes`` and ``Math``.
Default is to consider all of them builtin.
"""
name = 'Delphi'
aliases = ['delphi', 'pas', 'pascal', 'objectpascal']
filenames = ['*.pas', '*.dpr']
mimetypes = ['text/x-pascal']
TURBO_PASCAL_KEYWORDS = (
'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case',
'const', 'constructor', 'continue', 'destructor', 'div', 'do',
'downto', 'else', 'end', 'file', 'for', 'function', 'goto',
'if', 'implementation', 'in', 'inherited', 'inline', 'interface',
'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator',
'or', 'packed', 'procedure', 'program', 'record', 'reintroduce',
'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to',
'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor'
)
DELPHI_KEYWORDS = (
'as', 'class', 'except', 'exports', 'finalization', 'finally',
'initialization', 'is', 'library', 'on', 'property', 'raise',
'threadvar', 'try'
)
FREE_PASCAL_KEYWORDS = (
'dispose', 'exit', 'false', 'new', 'true'
)
BLOCK_KEYWORDS = set((
'begin', 'class', 'const', 'constructor', 'destructor', 'end',
'finalization', 'function', 'implementation', 'initialization',
'label', 'library', 'operator', 'procedure', 'program', 'property',
'record', 'threadvar', 'type', 'unit', 'uses', 'var'
))
FUNCTION_MODIFIERS = set((
'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe',
'pascal', 'register', 'safecall', 'softfloat', 'stdcall',
'varargs', 'name', 'dynamic', 'near', 'virtual', 'external',
'override', 'assembler'
))
# XXX: those aren't global. but currently we know no way for defining
# them just for the type context.
DIRECTIVES = set((
'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far',
'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected',
'published', 'public'
))
BUILTIN_TYPES = set((
'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool',
'cardinal', 'char', 'comp', 'currency', 'double', 'dword',
'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint',
'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean',
'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency',
'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle',
'pint64', 'pinteger', 'plongint', 'plongword', 'pointer',
'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint',
'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword',
'pwordarray', 'pwordbool', 'real', 'real48', 'shortint',
'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate',
'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant',
'widechar', 'widestring', 'word', 'wordbool'
))
BUILTIN_UNITS = {
'System': (
'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8',
'append', 'arctan', 'assert', 'assigned', 'assignfile',
'beginthread', 'blockread', 'blockwrite', 'break', 'chdir',
'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble',
'concat', 'continue', 'copy', 'cos', 'dec', 'delete',
'dispose', 'doubletocomp', 'endthread', 'enummodules',
'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr',
'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize',
'fillchar', 'finalize', 'findclasshinstance', 'findhinstance',
'findresourcehinstance', 'flush', 'frac', 'freemem',
'get8087cw', 'getdir', 'getlasterror', 'getmem',
'getmemorymanager', 'getmodulefilename', 'getvariantmanager',
'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert',
'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset',
'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd',
'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount',
'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random',
'randomize', 'read', 'readln', 'reallocmem',
'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir',
'round', 'runerror', 'seek', 'seekeof', 'seekeoln',
'set8087cw', 'setlength', 'setlinebreakstyle',
'setmemorymanager', 'setstring', 'settextbuf',
'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt',
'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar',
'succ', 'swap', 'trunc', 'truncate', 'typeinfo',
'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring',
'upcase', 'utf8decode', 'utf8encode', 'utf8toansi',
'utf8tounicode', 'val', 'vararrayredim', 'varclear',
'widecharlentostring', 'widecharlentostrvar',
'widechartostring', 'widechartostrvar',
'widestringtoucs4string', 'write', 'writeln'
),
'SysUtils': (
'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks',
'allocmem', 'ansicomparefilename', 'ansicomparestr',
'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr',
'ansilastchar', 'ansilowercase', 'ansilowercasefilename',
'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext',
'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp',
'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan',
'ansistrscan', 'ansistrupper', 'ansiuppercase',
'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep',
'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype',
'callterminateprocs', 'changefileext', 'charlength',
'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr',
'comparetext', 'createdir', 'createguid', 'currentyear',
'currtostr', 'currtostrf', 'date', 'datetimetofiledate',
'datetimetostr', 'datetimetostring', 'datetimetosystemtime',
'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate',
'decodedatefully', 'decodetime', 'deletefile', 'directoryexists',
'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime',
'exceptionerrormessage', 'excludetrailingbackslash',
'excludetrailingpathdelimiter', 'expandfilename',
'expandfilenamecase', 'expanduncfilename', 'extractfiledir',
'extractfiledrive', 'extractfileext', 'extractfilename',
'extractfilepath', 'extractrelativepath', 'extractshortpathname',
'fileage', 'fileclose', 'filecreate', 'filedatetodatetime',
'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly',
'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr',
'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage',
'findclose', 'findcmdlineswitch', 'findfirst', 'findnext',
'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr',
'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr',
'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr',
'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir',
'getenvironmentvariable', 'getfileversion', 'getformatsettings',
'getlocaleformatsettings', 'getmodulename', 'getpackagedescription',
'getpackageinfo', 'gettime', 'guidtostring', 'incamonth',
'includetrailingbackslash', 'includetrailingpathdelimiter',
'incmonth', 'initializepackage', 'interlockeddecrement',
'interlockedexchange', 'interlockedexchangeadd',
'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter',
'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident',
'languages', 'lastdelimiter', 'loadpackage', 'loadstr',
'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now',
'outofmemoryerror', 'quotedstr', 'raiselastoserror',
'raiselastwin32error', 'removedir', 'renamefile', 'replacedate',
'replacetime', 'safeloadlibrary', 'samefilename', 'sametext',
'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize',
'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy',
'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp',
'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy',
'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew',
'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos',
'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr',
'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime',
'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint',
'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime',
'strtotimedef', 'strupper', 'supports', 'syserrormessage',
'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime',
'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright',
'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime',
'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime',
'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime',
'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext',
'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase',
'widesamestr', 'widesametext', 'wideuppercase', 'win32check',
'wraptext'
),
'Classes': (
'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize',
'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect',
'extractstrings', 'findclass', 'findglobalcomponent', 'getclass',
'groupdescendantswith', 'hextobin', 'identtoint',
'initinheritedcomponent', 'inttoident', 'invalidpoint',
'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext',
'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource',
'pointsequal', 'readcomponentres', 'readcomponentresex',
'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias',
'registerclasses', 'registercomponents', 'registerintegerconsts',
'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup',
'teststreamformat', 'unregisterclass', 'unregisterclasses',
'unregisterintegerconsts', 'unregistermoduleclasses',
'writecomponentresfile'
),
'Math': (
'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec',
'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil',
'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc',
'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle',
'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance',
'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask',
'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg',
'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate',
'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero',
'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue',
'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue',
'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods',
'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance',
'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd',
'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant',
'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode',
'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev',
'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation',
'tan', 'tanh', 'totalvariance', 'variance'
)
}
ASM_REGISTERS = set((
'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0',
'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0',
'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx',
'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp',
'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6',
'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5',
'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5',
'xmm6', 'xmm7'
))
ASM_INSTRUCTIONS = set((
'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound',
'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw',
'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae',
'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg',
'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb',
'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl',
'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo',
'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb',
'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid',
'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt',
'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd',
'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd',
'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe',
'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle',
'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge',
'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe',
'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave',
'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw',
'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw',
'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr',
'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx',
'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd',
'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw',
'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw',
'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe',
'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror',
'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb',
'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe',
'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle',
'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng',
'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz',
'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl',
'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold',
'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str',
'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit',
'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait',
'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat',
'xlatb', 'xor'
))
def __init__(self, **options):
Lexer.__init__(self, **options)
self.keywords = set()
if get_bool_opt(options, 'turbopascal', True):
self.keywords.update(self.TURBO_PASCAL_KEYWORDS)
if get_bool_opt(options, 'delphi', True):
self.keywords.update(self.DELPHI_KEYWORDS)
if get_bool_opt(options, 'freepascal', True):
self.keywords.update(self.FREE_PASCAL_KEYWORDS)
self.builtins = set()
for unit in get_list_opt(options, 'units', list(self.BUILTIN_UNITS)):
self.builtins.update(self.BUILTIN_UNITS[unit])
def get_tokens_unprocessed(self, text):
scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE)
stack = ['initial']
in_function_block = False
in_property_block = False
was_dot = False
next_token_is_function = False
next_token_is_property = False
collect_labels = False
block_labels = set()
brace_balance = [0, 0]
while not scanner.eos:
token = Error
if stack[-1] == 'initial':
if scanner.scan(r'\s+'):
token = Text
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r'[-+*\/=<>:;,.@\^]'):
token = Operator
# stop label highlighting on next ";"
if collect_labels and scanner.match == ';':
collect_labels = False
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
# abort function naming ``foo = Function(...)``
next_token_is_function = False
# if we are in a function block we count the open
# braces because ootherwise it's impossible to
# determine the end of the modifier context
if in_function_block or in_property_block:
if scanner.match == '(':
brace_balance[0] += 1
elif scanner.match == ')':
brace_balance[0] -= 1
elif scanner.match == '[':
brace_balance[1] += 1
elif scanner.match == ']':
brace_balance[1] -= 1
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name == 'result':
token = Name.Builtin.Pseudo
elif lowercase_name in self.keywords:
token = Keyword
# if we are in a special block and a
# block ending keyword occours (and the parenthesis
# is balanced) we end the current block context
if (in_function_block or in_property_block) and \
lowercase_name in self.BLOCK_KEYWORDS and \
brace_balance[0] <= 0 and \
brace_balance[1] <= 0:
in_function_block = False
in_property_block = False
brace_balance = [0, 0]
block_labels = set()
if lowercase_name in ('label', 'goto'):
collect_labels = True
elif lowercase_name == 'asm':
stack.append('asm')
elif lowercase_name == 'property':
in_property_block = True
next_token_is_property = True
elif lowercase_name in ('procedure', 'operator',
'function', 'constructor',
'destructor'):
in_function_block = True
next_token_is_function = True
# we are in a function block and the current name
# is in the set of registered modifiers. highlight
# it as pseudo keyword
elif in_function_block and \
lowercase_name in self.FUNCTION_MODIFIERS:
token = Keyword.Pseudo
# if we are in a property highlight some more
# modifiers
elif in_property_block and \
lowercase_name in ('read', 'write'):
token = Keyword.Pseudo
next_token_is_function = True
# if the last iteration set next_token_is_function
# to true we now want this name highlighted as
# function. so do that and reset the state
elif next_token_is_function:
# Look if the next token is a dot. If yes it's
# not a function, but a class name and the
# part after the dot a function name
if scanner.test(r'\s*\.\s*'):
token = Name.Class
# it's not a dot, our job is done
else:
token = Name.Function
next_token_is_function = False
# same for properties
elif next_token_is_property:
token = Name.Property
next_token_is_property = False
# Highlight this token as label and add it
# to the list of known labels
elif collect_labels:
token = Name.Label
block_labels.add(scanner.match.lower())
# name is in list of known labels
elif lowercase_name in block_labels:
token = Name.Label
elif lowercase_name in self.BUILTIN_TYPES:
token = Keyword.Type
elif lowercase_name in self.DIRECTIVES:
token = Keyword.Pseudo
# builtins are just builtins if the token
# before isn't a dot
elif not was_dot and lowercase_name in self.builtins:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'):
token = String.Char
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
# if the stack depth is deeper than once, pop
if len(stack) > 1:
stack.pop()
scanner.get_char()
elif stack[-1] == 'string':
if scanner.scan(r"''"):
token = String.Escape
elif scanner.scan(r"'"):
token = String
stack.pop()
elif scanner.scan(r"[^']*"):
token = String
else:
scanner.get_char()
stack.pop()
elif stack[-1] == 'asm':
if scanner.scan(r'\s+'):
token = Text
elif scanner.scan(r'end'):
token = Keyword
stack.pop()
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'):
token = Name.Label
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name in self.ASM_INSTRUCTIONS:
token = Keyword
elif lowercase_name in self.ASM_REGISTERS:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'):
token = Operator
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
scanner.get_char()
stack.pop()
# save the dot!!!11
if scanner.match.strip():
was_dot = scanner.match == '.'
yield scanner.start_pos, token, scanner.match or ''
| DelphiLexer |
python | numpy__numpy | numpy/testing/tests/test_utils.py | {
"start": 1956,
"end": 11302
} | class ____(_GenericTest):
def _assert_func(self, *args, **kwargs):
assert_array_equal(*args, **kwargs)
def test_generic_rank1(self):
"""Test rank 1 array for all dtypes."""
def foo(t):
a = np.empty(2, t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_0_ndim_array(self):
x = np.array(473963742225900817127911193656584771)
y = np.array(18535119325151578301457182298393896)
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msg = str(exc_info.value)
assert_('Mismatched elements: 1 / 1 (100%)\n'
in msg)
y = x
self._assert_func(x, y)
x = np.array(4395065348745.5643764887869876)
y = np.array(0)
expected_msg = ('Mismatched elements: 1 / 1 (100%)\n'
'Max absolute difference among violations: '
'4.39506535e+12\n'
'Max relative difference among violations: inf\n')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y)
x = y
self._assert_func(x, y)
def test_generic_rank3(self):
"""Test rank 3 array for all dtypes."""
def foo(t):
a = np.empty((4, 2, 3), t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_nan_array(self):
"""Test arrays with nan values in them."""
a = np.array([1, 2, np.nan])
b = np.array([1, 2, np.nan])
self._test_equal(a, b)
c = np.array([1, 2, 3])
self._test_not_equal(c, b)
def test_string_arrays(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array(['floupi', 'floupa'])
b = np.array(['floupi', 'floupa'])
self._test_equal(a, b)
c = np.array(['floupipi', 'floupa'])
self._test_not_equal(c, b)
def test_recarrays(self):
"""Test record arrays."""
a = np.empty(2, [('floupi', float), ('floupa', float)])
a['floupi'] = [1, 2]
a['floupa'] = [1, 2]
b = a.copy()
self._test_equal(a, b)
c = np.empty(2, [('floupipi', float),
('floupi', float), ('floupa', float)])
c['floupipi'] = a['floupi'].copy()
c['floupa'] = a['floupa'].copy()
with pytest.raises(TypeError):
self._test_not_equal(c, b)
def test_masked_nan_inf(self):
# Regression test for gh-11121
a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False])
b = np.array([3., np.nan, 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False])
b = np.array([np.inf, 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
# Also provides test cases for gh-11121
def test_masked_scalar(self):
# Test masked scalar vs. plain/masked scalar
for a_val, b_val, b_masked in itertools.product(
[3., np.nan, np.inf],
[3., 4., np.nan, np.inf, -np.inf],
[False, True],
):
a = np.ma.MaskedArray(a_val, mask=True)
b = np.ma.MaskedArray(b_val, mask=True) if b_masked else np.array(b_val)
self._test_equal(a, b)
self._test_equal(b, a)
# Test masked scalar vs. plain array
for a_val, b_val in itertools.product(
[3., np.nan, -np.inf],
itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2),
):
a = np.ma.MaskedArray(a_val, mask=True)
b = np.array(b_val)
self._test_equal(a, b)
self._test_equal(b, a)
# Test masked scalar vs. masked array
for a_val, b_val, b_mask in itertools.product(
[3., np.nan, np.inf],
itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2),
itertools.product([False, True], repeat=2),
):
a = np.ma.MaskedArray(a_val, mask=True)
b = np.ma.MaskedArray(b_val, mask=b_mask)
self._test_equal(a, b)
self._test_equal(b, a)
def test_subclass_that_overrides_eq(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
def __eq__(self, other):
return bool(np.equal(self, other).all())
def __ne__(self, other):
return not self == other
a = np.array([1., 2.]).view(MyArray)
b = np.array([2., 3.]).view(MyArray)
assert_(type(a == a), bool)
assert_(a == a)
assert_(a != b)
self._test_equal(a, a)
self._test_not_equal(a, b)
self._test_not_equal(b, a)
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Max absolute difference among violations: 1.\n'
'Max relative difference among violations: 0.5')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._test_equal(a, b)
c = np.array([0., 2.9]).view(MyArray)
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Max absolute difference among violations: 2.\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._test_equal(b, c)
def test_subclass_that_does_not_implement_npall(self):
class MyArray(np.ndarray):
def __array_function__(self, *args, **kwargs):
return NotImplemented
a = np.array([1., 2.]).view(MyArray)
b = np.array([2., 3.]).view(MyArray)
with assert_raises(TypeError):
np.all(a)
self._test_equal(a, a)
self._test_not_equal(a, b)
self._test_not_equal(b, a)
def test_suppress_overflow_warnings(self):
# Based on issue #18992
with pytest.raises(AssertionError):
with np.errstate(all="raise"):
np.testing.assert_array_equal(
np.array([1, 2, 3], np.float32),
np.array([1, 1e-40, 3], np.float32))
def test_array_vs_scalar_is_equal(self):
"""Test comparing an array with a scalar when all values are equal."""
a = np.array([1., 1., 1.])
b = 1.
self._test_equal(a, b)
def test_array_vs_array_not_equal(self):
"""Test comparing an array with a scalar when not all values equal."""
a = np.array([34986, 545676, 439655, 563766])
b = np.array([34986, 545676, 439655, 0])
expected_msg = ('Mismatched elements: 1 / 4 (25%)\n'
'Mismatch at index:\n'
' [3]: 563766 (ACTUAL), 0 (DESIRED)\n'
'Max absolute difference among violations: 563766\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b)
a = np.array([34986, 545676, 439655.2, 563766])
expected_msg = ('Mismatched elements: 2 / 4 (50%)\n'
'Mismatch at indices:\n'
' [2]: 439655.2 (ACTUAL), 439655 (DESIRED)\n'
' [3]: 563766.0 (ACTUAL), 0 (DESIRED)\n'
'Max absolute difference among violations: '
'563766.\n'
'Max relative difference among violations: '
'4.54902139e-07')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b)
def test_array_vs_scalar_strict(self):
"""Test comparing an array with a scalar with strict option."""
a = np.array([1., 1., 1.])
b = 1.
with pytest.raises(AssertionError):
self._assert_func(a, b, strict=True)
def test_array_vs_array_strict(self):
"""Test comparing two arrays with strict option."""
a = np.array([1., 1., 1.])
b = np.array([1., 1., 1.])
self._assert_func(a, b, strict=True)
def test_array_vs_float_array_strict(self):
"""Test comparing two arrays with strict option."""
a = np.array([1, 1, 1])
b = np.array([1., 1., 1.])
with pytest.raises(AssertionError):
self._assert_func(a, b, strict=True)
| TestArrayEqual |
python | huggingface__transformers | src/transformers/models/pvt/modeling_pvt.py | {
"start": 13963,
"end": 17787
} | class ____(nn.Module):
def __init__(self, config: PvtConfig):
super().__init__()
self.config = config
# stochastic depth decay rule
drop_path_decays = torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu").tolist()
# patch embeddings
embeddings = []
for i in range(config.num_encoder_blocks):
embeddings.append(
PvtPatchEmbeddings(
config=config,
image_size=config.image_size if i == 0 else self.config.image_size // (2 ** (i + 1)),
patch_size=config.patch_sizes[i],
stride=config.strides[i],
num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1],
hidden_size=config.hidden_sizes[i],
cls_token=i == config.num_encoder_blocks - 1,
)
)
self.patch_embeddings = nn.ModuleList(embeddings)
# Transformer blocks
blocks = []
cur = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
layers = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
PvtLayer(
config=config,
hidden_size=config.hidden_sizes[i],
num_attention_heads=config.num_attention_heads[i],
drop_path=drop_path_decays[cur + j],
sequences_reduction_ratio=config.sequence_reduction_ratios[i],
mlp_ratio=config.mlp_ratios[i],
)
)
blocks.append(nn.ModuleList(layers))
self.block = nn.ModuleList(blocks)
# Layer norms
self.layer_norm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
def forward(
self,
pixel_values: torch.FloatTensor,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
batch_size = pixel_values.shape[0]
num_blocks = len(self.block)
hidden_states = pixel_values
for idx, (embedding_layer, block_layer) in enumerate(zip(self.patch_embeddings, self.block)):
# first, obtain patch embeddings
hidden_states, height, width = embedding_layer(hidden_states)
# second, send embeddings through blocks
for block in block_layer:
layer_outputs = block(hidden_states, height, width, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if idx != num_blocks - 1:
hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous()
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
| PvtEncoder |
python | scipy__scipy | benchmarks/benchmarks/integrate.py | {
"start": 3504,
"end": 3878
} | class ____(Benchmark):
def setup(self) -> None:
x, self.dx = np.linspace(0, 5, 1000, retstep=True)
self.y = np.sin(2*np.pi*x)
self.y2 = np.tile(self.y, (100, 100, 1))
def time_1d(self) -> None:
cumulative_simpson(self.y, dx=self.dx)
def time_multid(self) -> None:
cumulative_simpson(self.y2, dx=self.dx)
| CumulativeSimpson |
python | doocs__leetcode | solution/1000-1099/1072.Flip Columns For Maximum Number of Equal Rows/Solution.py | {
"start": 0,
"end": 267
} | class ____:
def maxEqualRowsAfterFlips(self, matrix: List[List[int]]) -> int:
cnt = Counter()
for row in matrix:
t = tuple(row) if row[0] == 0 else tuple(x ^ 1 for x in row)
cnt[t] += 1
return max(cnt.values())
| Solution |
python | catalyst-team__catalyst | catalyst/metrics/_segmentation.py | {
"start": 18063,
"end": 23214
} | class ____(RegionBasedMetric):
"""
Trevsky Metric,
trevsky score = tp / (tp + fp * beta + fn * alpha)
Args:
alpha: false negative coefficient, bigger alpha bigger penalty for
false negative. if beta is None, alpha must be in (0, 1)
beta: false positive coefficient, bigger alpha bigger penalty for false
positive. Must be in (0, 1), if None beta = (1 - alpha)
class_dim: indicates class dimension (K) for ``outputs`` and
``targets`` tensors (default = 1)
weights: class weights
class_names: class names
threshold: threshold for outputs binarization
eps: epsilon to avoid zero division
compute_on_call: Computes and returns metric value during metric call.
Used for per-batch logging. default: True
compute_per_class_metrics: boolean flag to compute per-class metrics
(default: SETTINGS.compute_per_class_metrics or False).
prefix: metric prefix
suffix: metric suffix
Examples:
.. code-block:: python
import torch
from catalyst import metrics
outputs = torch.tensor([[[[0.8, 0.1, 0], [0, 0.4, 0.3], [0, 0, 1]]]])
targets = torch.tensor([[[[1.0, 0, 0], [0, 1, 0], [1, 1, 0]]]])
metric = metrics.TrevskyMetric(alpha=0.2)
metric.reset()
metric.compute()
# per_class, micro, macro, weighted
# ([tensor(0.4167)], tensor(0.4167), tensor(0.4167), None)
metric.update_key_value(outputs, targets)
metric.compute_key_value()
# {
# 'trevsky': tensor(0.4167),
# 'trevsky/_macro': tensor(0.4167)
# 'trevsky/_micro': tensor(0.4167),
# 'trevsky/class_00': tensor(0.4167),
# }
.. code-block:: python
import os
import torch
from torch import nn
from torch.utils.data import DataLoader
from catalyst import dl
from catalyst.contrib import IoULoss, MNIST
model = nn.Sequential(
nn.Conv2d(1, 1, 3, 1, 1), nn.ReLU(),
nn.Conv2d(1, 1, 3, 1, 1), nn.Sigmoid(),
)
criterion = IoULoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True),
batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False),
batch_size=32
),
}
class CustomRunner(dl.SupervisedRunner):
def handle_batch(self, batch):
x = batch[self._input_key]
x_noise = (x + torch.rand_like(x)).clamp_(0, 1)
x_ = self.model(x_noise)
self.batch = {
self._input_key: x, self._output_key: x_, self._target_key: x
}
runner = CustomRunner(
input_key="features",
output_key="scores",
target_key="targets",
loss_key="loss"
)
# model training
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
num_epochs=1,
callbacks=[
dl.IOUCallback(input_key="scores", target_key="targets"),
dl.DiceCallback(input_key="scores", target_key="targets"),
dl.TrevskyCallback(input_key="scores", target_key="targets", alpha=0.2),
],
logdir="./logdir",
valid_loader="valid",
valid_metric="loss",
minimize_valid_metric=True,
verbose=True,
)
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
alpha: float,
beta: Optional[float] = None,
class_dim: int = 1,
weights: Optional[List[float]] = None,
class_names: Optional[List[str]] = None,
threshold: Optional[float] = None,
eps: float = 1e-7,
compute_on_call: bool = True,
compute_per_class_metrics: bool = SETTINGS.compute_per_class_metrics,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
):
"""Init."""
if beta is None:
assert 0 < alpha < 1, "if beta=None, alpha must be in (0, 1)"
beta = 1 - alpha
metric_fn = partial(_trevsky, alpha=alpha, beta=beta, eps=eps)
super().__init__(
metric_fn=metric_fn,
metric_name="trevsky",
compute_on_call=compute_on_call,
compute_per_class_metrics=compute_per_class_metrics,
prefix=prefix,
suffix=suffix,
class_dim=class_dim,
weights=weights,
class_names=class_names,
threshold=threshold,
)
__all__ = [
"RegionBasedMetric",
"IOUMetric",
"DiceMetric",
"TrevskyMetric",
]
| TrevskyMetric |
python | kamyu104__LeetCode-Solutions | Python/maximum-69-number.py | {
"start": 32,
"end": 387
} | class ____(object):
def maximum69Number (self, num):
"""
:type num: int
:rtype: int
"""
curr, base, change = num, 3, 0
while curr:
if curr%10 == 6:
change = base
base *= 10
curr //= 10
return num+change
# Time: O(logn)
# Space: O(logn)
| Solution |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup.py | {
"start": 4348,
"end": 5770
} | class ____:
pass
@pytest.mark.parametrize(
"typ,coll_type",
[
(_Set[Elem], set),
(_FrozenSet[Elem], frozenset),
(_Dict[Elem, None], dict),
(set[Elem], set),
(frozenset[Elem], frozenset),
# (dict[Elem, None], dict), # FIXME this should work
(typing.DefaultDict[Elem, None], collections.defaultdict),
(typing.KeysView[Elem], type({}.keys())),
(typing.ValuesView[Elem], type({}.values())),
(_List[Elem], list),
(_Tuple[Elem], tuple),
(_Tuple[Elem, ...], tuple),
(list[Elem], list),
(tuple[Elem], tuple),
(tuple[Elem, ...], tuple),
(typing.Iterator[Elem], typing.Iterator),
(typing.Sequence[Elem], typing.Sequence),
(typing.Iterable[Elem], typing.Iterable),
(typing.Mapping[Elem, None], typing.Mapping),
(typing.Container[Elem], typing.Container),
(typing.NamedTuple("A_NamedTuple", (("elem", Elem),)), tuple),
(typing.Counter[Elem], typing.Counter),
(typing.Deque[Elem], typing.Deque),
],
ids=repr,
)
@given(data=st.data())
def test_specialised_collection_types(data, typ, coll_type):
ex = data.draw(from_type(typ))
assert isinstance(ex, coll_type)
instances = [isinstance(elem, Elem) for elem in ex]
assert all(instances)
assume(instances) # non-empty collections without calling len(iterator)
| Elem |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 205544,
"end": 205899
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "repository")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
repository = sgqlc.types.Field("Repository", graphql_name="repository")
| ArchiveRepositoryPayload |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol53.py | {
"start": 3809,
"end": 3899
} | class ____(Proto_ContraSelf):
def m(self, x: Self) -> None: ...
| Impl_ContraSelfExplicit2 |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/expressions/selection.py | {
"start": 1941,
"end": 2674
} | class ____(Expr):
__slots__ = ()
_non_child = ("dtype",)
def __init__(self, dtype: DataType, values: Expr, indices: Expr):
self.dtype = dtype
self.children = (values, indices)
self.is_pointwise = False
def do_evaluate(
self, df: DataFrame, *, context: ExecutionContext = ExecutionContext.FRAME
) -> Column:
"""Evaluate this expression given a dataframe for context."""
values, mask = (child.evaluate(df, context=context) for child in self.children)
table = plc.stream_compaction.apply_boolean_mask(
plc.Table([values.obj]), mask.obj, stream=df.stream
)
return Column(table.columns()[0], dtype=self.dtype).sorted_like(values)
| Filter |
python | RaRe-Technologies__gensim | gensim/test/test_text_analysis.py | {
"start": 4952,
"end": 5075
} | class ____(BaseTestCases.TextAnalyzerTestBase):
accumulator_cls = WordOccurrenceAccumulator
| TestWordOccurrenceAccumulator |
python | PyCQA__flake8 | src/flake8/exceptions.py | {
"start": 345,
"end": 980
} | class ____(Flake8Exception):
"""Exception raised when a plugin fails to load."""
FORMAT = 'Flake8 failed to load plugin "%(name)s" due to %(exc)s.'
def __init__(self, plugin_name: str, exception: Exception) -> None:
"""Initialize our FailedToLoadPlugin exception."""
self.plugin_name = plugin_name
self.original_exception = exception
super().__init__(plugin_name, exception)
def __str__(self) -> str:
"""Format our exception message."""
return self.FORMAT % {
"name": self.plugin_name,
"exc": self.original_exception,
}
| FailedToLoadPlugin |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 21844,
"end": 21962
} | class ____(BaseModel, extra="forbid"):
create_alias: "CreateAlias" = Field(..., description="")
| CreateAliasOperation |
python | gevent__gevent | src/gevent/tests/test__order.py | {
"start": 704,
"end": 742
} | class ____(Test):
count = 1000
| TestM |
python | pytorch__pytorch | test/inductor/test_fxir_backend.py | {
"start": 27649,
"end": 40917
} | class ____(InductorTestCase):
device = GPU_TYPE
def check(
self, model, inp, dynamic_shapes=None, strict=False
) -> torch.fx.GraphModule:
with torch.no_grad():
ep = torch.export.export(
model, inp, dynamic_shapes=dynamic_shapes, strict=strict
)
gm = torch._inductor.aot_compile(
ep.module(), inp, options={"fx_wrapper": True, **test_config}
)
# Flatten args for fx_wrapper gm
flat_args, _ = pytree.tree_flatten(inp)
self.assertTrue(same(model(*inp), gm(*flat_args)))
for node in gm.graph.nodes:
if (
node.op == "call_function"
and node.target != triton_kernel_wrapper_mutation
):
self.assertTrue(node.meta.get("val", None) is not None)
return gm
def test_aoti_fx_add(self):
class M(torch.nn.Module):
def forward(self, x, y):
return x + y
inp = (torch.ones(3, device=self.device), torch.ones(3, device=self.device))
self.check(M(), inp)
def test_aoti_fx_const(self):
class M(torch.nn.Module):
def __init__(self, device):
super().__init__()
self.device = device
self.a = torch.nn.Parameter(torch.ones(3, device=self.device))
self.b = torch.ones(3, device=self.device)
def forward(self, x, y):
return x + y + self.a + self.b + torch.tensor(3, device=self.device)
inp = (torch.ones(3, device=self.device), torch.ones(3, device=self.device))
self.check(M(self.device), inp)
def test_aoti_fx_linear(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, x):
return self.linear(x)
inp = (torch.ones(3, 3, device=self.device),)
self.check(M().to(self.device), inp)
def test_aoti_fx_dynamic(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y
inp = (torch.ones(3, device=self.device), torch.ones(3, device=self.device))
self.check(
M().to(device=self.device),
inp,
dynamic_shapes=({0: Dim.DYNAMIC}, {0: Dim.DYNAMIC}),
)
def test_custom_triton_autotune_dynamic(self):
class Model(torch.nn.Module):
def forward(self, x, y):
output = torch.zeros_like(x)
x_elements = output.size()[0]
y_elements = output.size()[1]
def grid(meta):
return (
triton.cdiv(x_elements, meta["BLOCK_SIZE_X"]),
triton.cdiv(y_elements, meta["BLOCK_SIZE_Y"]),
)
add_kernel_2d_autotuned[grid](x, y, output, x_elements, y_elements)
return output
num_dims = 2
dims = [10] * num_dims
x = torch.randn(*dims, device=self.device)
y = torch.randn(*dims, device=self.device)
dim0_x = Dim("dim0_x", min=1, max=10)
dim0_y = Dim("dim0_y", min=1, max=10)
dynamic_shapes = {"x": {0: dim0_x}, "y": {0: dim0_y}}
self.check(
Model().to(device=self.device),
(x, y),
dynamic_shapes=dynamic_shapes,
strict=True,
)
def test_custom_backend(self):
"""
Test registering a custom FX backend.
"""
called = False
class CustomWrapperCodegen(WrapperFxCodegen):
def compile_graph(self, gm):
"""
Simply records whether this override was called.
"""
nonlocal called
called = True
return super().compile_graph(gm)
class M(torch.nn.Module):
def forward(self, x):
return x + 1
# Register a custom FX backend.
custom_backend = common.DeviceCodegen(
TritonScheduling,
PythonWrapperCodegen,
fx_wrapper_codegen=CustomWrapperCodegen,
)
with unittest.mock.patch.dict(
common.device_codegens, {self.device: custom_backend}
):
# The backend should not have been called yet.
self.assertFalse(called)
inp = (torch.randn(8, device=self.device),)
self.check(M().to(self.device), inp)
# Now the backend should have been called.
self.assertTrue(called)
@parametrize(
"expr",
[
(2 * Dim("x") + 1),
(Dim("x", min=3) - 3),
],
)
def test_dynamic_input_expr(self, expr: sympy.Expr):
"""
Test dynamic shapes with a nontrivial input expression.
"""
class M(torch.nn.Module):
def forward(self, x):
return x.reshape(x.shape[0] * x.shape[1]) + x.shape[1]
dynamic_shapes = {"x": {0: expr}}
inp = (torch.randn((5, 4), device=self.device),)
gm = self.check(M().to(self.device), inp, dynamic_shapes=dynamic_shapes)
# Check for dynamic size ops.
self.assertEqual(
len(
gm.graph.find_nodes(
op="call_function", target=torch.ops.aten.sym_size.int
)
),
1,
)
@parametrize("pred", (False, True))
def test_cond_multi_inputs_and_outputs(self, pred):
"""
Test torch.cond and check the output graphs.
"""
class M(torch.nn.Module):
def forward(self, pred, x, y):
def true_fn(x, y):
return torch.tanh(x), torch.relu(y)
def false_fn(x, y):
return tuple(t / 2 for t in true_fn(x, y))
return torch.cond(pred, true_fn, false_fn, (x, y))
pred = torch.tensor([True], device=self.device)
(x, y) = [torch.randn(8, device=self.device) for _ in range(2)]
gm = self.check(M(), (pred, x, y))
# Check the graph.
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, arg0_1, arg1_1, arg2_1):
true_graph_0 = self.true_graph_0
false_graph_0 = self.false_graph_0
cond = torch.ops.higher_order.cond(arg0_1, true_graph_0, false_graph_0, (arg1_1, arg2_1)); arg0_1 = true_graph_0 = false_graph_0 = arg1_1 = arg2_1 = None
buf1 = cond[0]
buf2 = cond[1]; cond = None
return [buf1, buf2]""", # noqa: B950
)
def test_dims_dynamic_outer_static_padded_inner(self):
"""
Test padding on inner dimensions, with dynamic outer dimensions.
"""
class M(torch.nn.Module):
def forward(self, x, y):
return x + y
def get_input_padded_inner(shape):
full_shape = shape[:-1] + (shape[-1] * 2,)
full = torch.randn(full_shape, dtype=torch.float32, device=self.device)
view = torch.as_strided(full, shape, full.stride())
return view
shape = (4, 4, 4)
args = tuple(get_input_padded_inner(shape) for _ in range(2))
self.check(
M(),
args,
dynamic_shapes=({0: Dim.DYNAMIC, 1: Dim.DYNAMIC, 2: Dim.STATIC},) * 2,
)
@parametrize("length", (4, 8))
def test_cond_dynamic_shape_pred_scalar_closure(self, length: int):
"""
Test cond using a predicate computed from dynamic shapes.
Also test a dynamic scalar computed outside the branches.
"""
class M(torch.nn.Module):
def forward(self, x, y):
z = x.reshape(-1)
a = y.shape[0]
def true_fn(x):
return x + a
def false_fn(x):
return true_fn(x) / 2
return torch.cond(x.shape[0] > 5, true_fn, false_fn, (z,))
(x, y) = [
torch.randn(shape, device=self.device)
for shape in [(length // 2,) * 2, (length,)]
]
dynamic_shapes = {
"x": {0: Dim.DYNAMIC},
"y": {0: Dim.DYNAMIC},
}
self.check(M(), (x, y), dynamic_shapes=dynamic_shapes)
def test_dynamic_scalar_output(self):
"""
Test an output scalar from dynamic shapes.
"""
class M(torch.nn.Module):
def forward(self, x):
return x.shape[0] * 3
x = torch.randn(7, device=self.device)
self.check(M(), (x,), dynamic_shapes=({0: Dim.DYNAMIC},))
@parametrize("dynamic", (False, True))
@parametrize("input_", (1.5, 2, False))
def test_item(self, input_, dynamic: bool):
"""
Test calling Tensor.item.
"""
class M(torch.nn.Module):
def forward(self, x):
return x[1].item()
x = torch.tensor((input_,) * 10)
d = Dim("s0", min=1)
dynamic_shapes = ({0: 2 * d},) if dynamic else None
self.check(M(), (x,), dynamic_shapes=dynamic_shapes)
@parametrize("pred", (False, True))
def test_mismatched_branch_dynamic(self, pred: bool):
"""
Test cond branches with mismatched dynamic shapes.
"""
# Apply an offset to guarantee the truith of the predicate.
pred_offset = 1 if pred else -1
inputs = [
torch.tensor([pred], device=self.device),
] + [torch.randn(10, 20, device=self.device) + pred_offset for _ in range(3)]
dim0_a = Dim("s0", min=4, max=1024)
dim0_b = Dim("s1", min=4, max=1024)
dynamic_shapes = {
"p": {},
"x": {0: dim0_a, 1: None},
"y": {0: dim0_b, 1: None},
"z": {0: dim0_a, 1: None},
}
self.check(
CondModels.MismatchedOutputSize(),
tuple(inputs),
dynamic_shapes=dynamic_shapes,
)
def test_const_folded_subgraph(self):
"""
If a graph only contains a call_module node to a subgraph,
where the subgraph can be const-folded away,
validate the fake mode used in FXConverter generation is not None.
"""
device = self.device
shape = (5, 10)
class Submodule(torch.nn.Module):
def forward(self):
return torch.randn(*shape, device=device) + 1
# Create a parent graph with this module as a subgraph and output
ep = torch.export.export(Submodule(), ())
parent_graph = torch.fx.Graph()
call_mod = parent_graph.call_module("sub", args=())
get_item = parent_graph.call_function(
operator.getitem, args=(call_mod, slice(None))
)
parent_graph.output((get_item,))
parent = torch.fx.GraphModule({"sub": ep.module()}, parent_graph)
# Verify FXConverter.generate uses non-null fake mode
# Intercept _set_node_metadata_hook to ensure fake_mode is not None
orig_set_hook = torch._inductor.codegen.wrapper_fxir._set_node_metadata_hook
called = False
def mock_set_hook(gm: torch.fx.GraphModule, fn):
nonlocal called
called = True
# Please update this check if `fake_mode` is
# no longer used in FXConverter call to _node_metadata_hook
self.assertTrue("fake_mode" in fn.keywords)
self.assertIsNotNone(fn.keywords["fake_mode"])
return orig_set_hook(gm, fn)
self.assertFalse(called)
with unittest.mock.patch.object(
torch._inductor.codegen.wrapper_fxir,
"_set_node_metadata_hook",
mock_set_hook,
):
args = ()
compiled = torch._inductor.aot_compile(
parent, args, options={"fx_wrapper": True}
)
self.assertTrue(called)
compiled_out = compiled(*args)
self.assertEqual(compiled_out.shape, shape)
def test_reshape_dynamic_ph(self):
"""
Test dynamic scalars using SymInts placeholder
"""
class TestModule(torch.nn.Module):
def forward(self, x, shape):
return torch.reshape(x, shape) + 2
ds = {
"x": (torch.export.Dim.AUTO, torch.export.Dim.AUTO),
"shape": [torch.export.Dim.AUTO, torch.export.Dim.AUTO],
}
args = (torch.randn((12, 14), device=self.device), [6, 28])
self.check(TestModule(), args, ds)
def test_reshape_dynamic_tmd(self):
"""
Test dynamic reshape using shape dependent information
"""
class TestModule(torch.nn.Module):
def forward(self, x):
new_shape = [x.shape[0] // 2, x.shape[1] * 2]
return torch.reshape(x, new_shape) + 2
ds = {
"x": (torch.export.Dim.AUTO, torch.export.Dim.AUTO),
}
args = (torch.randn((12, 14), device=self.device),)
self.check(TestModule(), args, ds)
| AOTFxirTestCase |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/ops_jobs_graphs/graph_provides_config.py | {
"start": 23,
"end": 144
} | class ____(dg.Config):
n: float
@dg.op
def add_n(config: AddNConfig, number):
return number + config.n
| AddNConfig |
python | gevent__gevent | src/gevent/tests/test__signal.py | {
"start": 285,
"end": 3813
} | class ____(greentest.TestCase):
error_fatal = False
__timeout__ = greentest.LARGE_TIMEOUT
def test_handler(self):
with self.assertRaises(TypeError):
gevent.signal_handler(signal.SIGALRM, 1)
def test_alarm(self):
sig = gevent.signal_handler(signal.SIGALRM, raise_Expected)
self.assertFalse(sig.ref)
sig.ref = True
self.assertTrue(sig.ref)
sig.ref = False
def test():
signal.alarm(1)
with self.assertRaises(Expected) as exc:
gevent.sleep(2)
ex = exc.exception
self.assertEqual(str(ex), 'TestSignal')
try:
test()
# also let's check that the handler stays installed.
test()
finally:
sig.cancel()
@greentest.ignores_leakcheck
def test_reload(self):
# The site module tries to set attributes
# on all the modules that are loaded (specifically, __file__).
# If gevent.signal is loaded, and is our compatibility shim,
# this used to fail on Python 2: sys.modules['gevent.signal'] has no
# __loader__ attribute, so site.py's main() function tries to do
# gevent.signal.__file__ = os.path.abspath(gevent.signal.__file__), which
# used to not be allowed. (Under Python 3, __loader__ is present so this
# doesn't happen). See
# https://github.com/gevent/gevent/issues/805
# This fails on Python 3.5 under linux (travis CI) but not
# locally on macOS with (for both libuv and libev cffi); sometimes it
# failed with libuv on Python 3.6 too, but not always:
# AttributeError: cffi library 'gevent.libuv._corecffi' has no function,
# constant or global variable named '__loader__'
# which in turn leads to:
# SystemError: <built-in function getattr> returned a result with an error set
# It's not safe to continue after a SystemError, so we just skip the test there.
# As of Jan 2018 with CFFI 1.11.2 this happens reliably on macOS 3.6 and 3.7
# as well.
# See https://bitbucket.org/cffi/cffi/issues/352/systemerror-returned-a-result-with-an
# This is fixed in 1.11.3
import gevent.signal # make sure it's in sys.modules pylint:disable=redefined-outer-name
assert gevent.signal
import site
if greentest.PY3:
from importlib import reload as reload_module
else:
# builtin on py2
reload_module = reload # pylint:disable=undefined-variable
try:
reload_module(site)
except TypeError:
# Non-CFFI on Travis triggers this, for some reason,
# but only on 3.6, not 3.4 or 3.5, and not yet on 3.7.
# The only module seen to trigger this is __main__, i.e., this module.
# This is hard to trigger in a virtualenv since it appears they
# install their own site.py, different from the one that ships with
# Python 3.6., and at least the version I have doesn't mess with
# __cached__
assert greentest.PY36
import sys
for m in set(sys.modules.values()):
try:
if m.__cached__ is None:
print("Module has None __cached__", m, file=sys.stderr)
except AttributeError:
continue
if __name__ == '__main__':
greentest.main()
| TestSignal |
python | bokeh__bokeh | src/bokeh/models/widgets/inputs.py | {
"start": 9630,
"end": 10038
} | class ____(Widget):
""" Base class for toggleable (boolean) input widgets. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
active = Bool(default=False, help="""
The state of the widget.
""")
label = String(default="", help="""
The label next to the input.
""")
| ToggleInput |
python | zarr-developers__zarr-python | src/zarr/core/dtype/npy/string.py | {
"start": 986,
"end": 1259
} | class ____(TypedDict):
"""
Configuration for a fixed-length string data type in Zarr V3.
Attributes
----------
length_bytes : int
The length in bytes of the data associated with this configuration.
"""
length_bytes: int
| LengthBytesConfig |
python | doocs__leetcode | lcof2/剑指 Offer II 115. 重建序列/Solution.py | {
"start": 0,
"end": 633
} | class ____:
def sequenceReconstruction(
self, nums: List[int], sequences: List[List[int]]
) -> bool:
n = len(nums)
g = [[] for _ in range(n)]
indeg = [0] * n
for seq in sequences:
for a, b in pairwise(seq):
a, b = a - 1, b - 1
g[a].append(b)
indeg[b] += 1
q = deque(i for i, x in enumerate(indeg) if x == 0)
while len(q) == 1:
i = q.popleft()
for j in g[i]:
indeg[j] -= 1
if indeg[j] == 0:
q.append(j)
return len(q) == 0
| Solution |
python | getsentry__sentry | src/sentry/sentry_metrics/consumers/indexer/parallel.py | {
"start": 1297,
"end": 2751
} | class ____(ProcessingStep[Union[FilteredPayload, IndexerOutputMessageBatch]]):
def __init__(
self,
next_step: ProcessingStep[KafkaPayload | RoutingPayload | InvalidMessage | FilteredPayload],
) -> None:
self.__next_step = next_step
self.__closed = False
self.__messages: Deque[Message[KafkaPayload | RoutingPayload | InvalidMessage]] = deque()
def poll(self) -> None:
self.__next_step.poll()
while self.__messages:
msg = self.__messages.popleft()
if isinstance(msg.payload, InvalidMessage):
raise msg.payload
self.__next_step.submit(msg)
def submit(self, message: Message[FilteredPayload | IndexerOutputMessageBatch]) -> None:
assert not self.__closed
if self.__messages:
raise MessageRejected()
if isinstance(message.payload, FilteredPayload):
self.__next_step.submit(cast(Message[KafkaPayload], message))
return
self.__messages.extend(message.payload.data)
_ = message.payload.cogs_data
def close(self) -> None:
self.__closed = True
def terminate(self) -> None:
self.__closed = True
logger.debug("Terminating %r...", self.__next_step)
self.__next_step.terminate()
def join(self, timeout: float | None = None) -> None:
self.__next_step.close()
self.__next_step.join(timeout)
| Unbatcher |
python | Netflix__metaflow | metaflow/_vendor/click/exceptions.py | {
"start": 284,
"end": 1076
} | class ____(Exception):
"""An exception that Click can handle and show to the user."""
#: The exit code for this exception
exit_code = 1
def __init__(self, message):
ctor_msg = message
if PY2:
if ctor_msg is not None:
ctor_msg = ctor_msg.encode("utf-8")
Exception.__init__(self, ctor_msg)
self.message = message
def format_message(self):
return self.message
def __str__(self):
return self.message
if PY2:
__unicode__ = __str__
def __str__(self):
return self.message.encode("utf-8")
def show(self, file=None):
if file is None:
file = get_text_stderr()
echo("Error: {}".format(self.format_message()), file=file)
| ClickException |
python | tensorflow__tensorflow | tensorflow/python/distribute/collective_util.py | {
"start": 1071,
"end": 1866
} | class ____(enum.Enum):
"""Cross device communication implementation.
Warning: The alias `tf.distribute.experimental.CollectiveCommunication` is
deprecated and will be removed in a future version. Use
`tf.distribute.experimental.CommunicationImplementation` instead.
* `AUTO`: Automatically chosen by Tensorflow.
* `RING`: TensorFlow's ring algorithms for all-reduce and
all-gather.
* `NCCL`: NVIDIA®'s NCCL library. This is now only used for all-reduce on
GPUs; all-reduce on CPU, all-gather and broadcast fallbacks to RING.
"""
AUTO = "AUTO"
RING = "RING"
NCCL = "NCCL"
# TODO(ayushd): add ncclAllGather implementation.
CollectiveCommunication = CommunicationImplementation
@tf_export("distribute.experimental.CommunicationOptions")
| CommunicationImplementation |
python | django__django | tests/composite_pk/test_order_by.py | {
"start": 109,
"end": 2372
} | class ____(TestCase):
maxDiff = None
@classmethod
def setUpTestData(cls):
cls.tenant_1 = Tenant.objects.create()
cls.tenant_2 = Tenant.objects.create()
cls.tenant_3 = Tenant.objects.create()
cls.user_1 = User.objects.create(
tenant=cls.tenant_1,
id=1,
email="user0001@example.com",
)
cls.user_2 = User.objects.create(
tenant=cls.tenant_1,
id=2,
email="user0002@example.com",
)
cls.user_3 = User.objects.create(
tenant=cls.tenant_2,
id=3,
email="user0003@example.com",
)
cls.comment_1 = Comment.objects.create(id=1, user=cls.user_1)
cls.comment_2 = Comment.objects.create(id=2, user=cls.user_1)
cls.comment_3 = Comment.objects.create(id=3, user=cls.user_2)
cls.comment_4 = Comment.objects.create(id=4, user=cls.user_3)
cls.comment_5 = Comment.objects.create(id=5, user=cls.user_1)
def test_order_comments_by_pk_asc(self):
self.assertSequenceEqual(
Comment.objects.order_by("pk"),
(
self.comment_1, # (1, 1)
self.comment_2, # (1, 2)
self.comment_3, # (1, 3)
self.comment_5, # (1, 5)
self.comment_4, # (2, 4)
),
)
def test_order_comments_by_pk_desc(self):
self.assertSequenceEqual(
Comment.objects.order_by("-pk"),
(
self.comment_4, # (2, 4)
self.comment_5, # (1, 5)
self.comment_3, # (1, 3)
self.comment_2, # (1, 2)
self.comment_1, # (1, 1)
),
)
def test_order_comments_by_pk_expr(self):
self.assertQuerySetEqual(
Comment.objects.order_by("pk"),
Comment.objects.order_by(F("pk")),
)
self.assertQuerySetEqual(
Comment.objects.order_by("-pk"),
Comment.objects.order_by(F("pk").desc()),
)
self.assertQuerySetEqual(
Comment.objects.order_by("-pk"),
Comment.objects.order_by(F("pk").desc(nulls_last=True)),
)
| CompositePKOrderByTests |
python | doocs__leetcode | solution/1500-1599/1547.Minimum Cost to Cut a Stick/Solution2.py | {
"start": 0,
"end": 436
} | class ____:
def minCost(self, n: int, cuts: List[int]) -> int:
cuts.extend([0, n])
cuts.sort()
m = len(cuts)
f = [[0] * m for _ in range(m)]
for i in range(m - 1, -1, -1):
for j in range(i + 2, m):
f[i][j] = inf
for k in range(i + 1, j):
f[i][j] = min(f[i][j], f[i][k] + f[k][j] + cuts[j] - cuts[i])
return f[0][-1]
| Solution |
python | openai__openai-python | src/openai/resources/responses/responses.py | {
"start": 158207,
"end": 160326
} | class ____:
def __init__(self, responses: AsyncResponses) -> None:
self._responses = responses
self.create = async_to_streamed_response_wrapper(
responses.create,
)
self.retrieve = async_to_streamed_response_wrapper(
responses.retrieve,
)
self.delete = async_to_streamed_response_wrapper(
responses.delete,
)
self.cancel = async_to_streamed_response_wrapper(
responses.cancel,
)
@cached_property
def input_items(self) -> AsyncInputItemsWithStreamingResponse:
return AsyncInputItemsWithStreamingResponse(self._responses.input_items)
@cached_property
def input_tokens(self) -> AsyncInputTokensWithStreamingResponse:
return AsyncInputTokensWithStreamingResponse(self._responses.input_tokens)
def _make_tools(tools: Iterable[ParseableToolParam] | Omit) -> List[ToolParam] | Omit:
if not is_given(tools):
return omit
converted_tools: List[ToolParam] = []
for tool in tools:
if tool["type"] != "function":
converted_tools.append(tool)
continue
if "function" not in tool:
# standard Responses API case
converted_tools.append(tool)
continue
function = cast(Any, tool)["function"] # pyright: ignore[reportUnnecessaryCast]
if not isinstance(function, PydanticFunctionTool):
raise Exception(
"Expected Chat Completions function tool shape to be created using `openai.pydantic_function_tool()`"
)
assert "parameters" in function
new_tool = ResponsesPydanticFunctionTool(
{
"type": "function",
"name": function["name"],
"description": function.get("description"),
"parameters": function["parameters"],
"strict": function.get("strict") or False,
},
function.model,
)
converted_tools.append(new_tool.cast())
return converted_tools
| AsyncResponsesWithStreamingResponse |
python | ansible__ansible | test/lib/ansible_test/_internal/completion.py | {
"start": 3117,
"end": 4105
} | class ____(CompletionConfig):
"""Base class for completion configuration of remote environments provisioned through Ansible Core CI."""
provider: t.Optional[str] = None
arch: t.Optional[str] = None
@property
def platform(self) -> str:
"""The name of the platform."""
return self.name.partition('/')[0]
@property
def version(self) -> str:
"""The version of the platform."""
return self.name.partition('/')[2]
@property
def is_default(self) -> bool:
"""True if the completion entry is only used for defaults, otherwise False."""
return not self.version
def __post_init__(self):
if not self.provider:
raise Exception(f'Remote completion entry "{self.name}" must provide a "provider" setting.')
if not self.arch:
raise Exception(f'Remote completion entry "{self.name}" must provide a "arch" setting.')
@dataclasses.dataclass(frozen=True)
| RemoteCompletionConfig |
python | sphinx-doc__sphinx | tests/roots/test-ext-coverage/grog/coverage_missing.py | {
"start": 53,
"end": 165
} | class ____:
"""An undocumented class."""
def missing_a(self):
"""An undocumented method."""
| Missing |
python | numba__numba | numba/cuda/tests/cudapy/test_frexp_ldexp.py | {
"start": 315,
"end": 2024
} | class ____(CUDATestCase):
def template_test_frexp(self, nptype, nbtype):
compiled = cuda.jit(void(nbtype[:], int32[:], nbtype))(simple_frexp)
arg = 3.1415
aryx = np.zeros(1, dtype=nptype)
aryexp = np.zeros(1, dtype=np.int32)
compiled[1, 1](aryx, aryexp, arg)
np.testing.assert_array_equal(aryx, nptype(0.785375))
self.assertEqual(aryexp, 2)
arg = np.inf
compiled[1, 1](aryx, aryexp, arg)
np.testing.assert_array_equal(aryx, nptype(np.inf))
self.assertEqual(aryexp, 0) # np.frexp gives -1
arg = np.nan
compiled[1, 1](aryx, aryexp, arg)
np.testing.assert_array_equal(aryx, nptype(np.nan))
self.assertEqual(aryexp, 0) # np.frexp gives -1
def template_test_ldexp(self, nptype, nbtype):
compiled = cuda.jit(void(nbtype[:], nbtype, int32))(simple_ldexp)
arg = 0.785375
exp = 2
aryx = np.zeros(1, dtype=nptype)
compiled[1, 1](aryx, arg, exp)
np.testing.assert_array_equal(aryx, nptype(3.1415))
arg = np.inf
compiled[1, 1](aryx, arg, exp)
np.testing.assert_array_equal(aryx, nptype(np.inf))
arg = np.nan
compiled[1, 1](aryx, arg, exp)
np.testing.assert_array_equal(aryx, nptype(np.nan))
def test_frexp_f4(self):
self.template_test_frexp(np.float32, float32)
def test_ldexp_f4(self):
self.template_test_ldexp(np.float32, float32)
def test_frexp_f8(self):
self.template_test_frexp(np.float64, float64)
def test_ldexp_f8(self):
self.template_test_ldexp(np.float64, float64)
if __name__ == '__main__':
unittest.main()
| TestCudaFrexpLdexp |
python | huggingface__transformers | src/transformers/models/markuplm/modeling_markuplm.py | {
"start": 10214,
"end": 10881
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->MarkupLM
| MarkupLMPooler |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/waiters/test_bedrock.py | {
"start": 6185,
"end": 7583
} | class ____(TestBedrockCustomWaitersBase):
WAITER_NAME = "batch_inference_scheduled"
SENSOR = BedrockBatchInferenceSensor(
task_id="task_id",
job_arn="job_arn",
success_state=BedrockBatchInferenceSensor.SuccessState.SCHEDULED,
)
@pytest.fixture
def mock_get_job(self):
with mock.patch.object(self.client, "get_model_invocation_job") as mock_getter:
yield mock_getter
@pytest.mark.parametrize("state", SENSOR.SUCCESS_STATES)
def test_batch_inference_complete(self, state, mock_get_job):
mock_get_job.return_value = {"status": state}
BedrockHook().get_waiter(self.WAITER_NAME).wait(jobIdentifier="job_arn")
@pytest.mark.parametrize("state", SENSOR.FAILURE_STATES)
def test_batch_inference_failed(self, state, mock_get_job):
mock_get_job.return_value = {"status": state}
with pytest.raises(botocore.exceptions.WaiterError):
BedrockHook().get_waiter(self.WAITER_NAME).wait(jobIdentifier="job_arn")
def test_batch_inference_wait(self, mock_get_job):
wait = {"status": "InProgress"}
success = {"status": "Completed"}
mock_get_job.side_effect = [wait, wait, success]
BedrockHook().get_waiter(self.WAITER_NAME).wait(
jobIdentifier="job_arn", WaiterConfig={"Delay": 0.01, "MaxAttempts": 3}
)
| TestBatchInferenceScheduledWaiter |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/event/base.py | {
"start": 12187,
"end": 13914
} | class ____(_HasEventsDispatch[_ET]):
"""Define event listening functions for a particular target type."""
@classmethod
def _accept_with(
cls, target: Union[_ET, Type[_ET]], identifier: str
) -> Optional[Union[_ET, Type[_ET]]]:
def dispatch_is(*types: Type[Any]) -> bool:
return all(isinstance(target.dispatch, t) for t in types)
def dispatch_parent_is(t: Type[Any]) -> bool:
parent = cast("_JoinedDispatcher[_ET]", target.dispatch).parent
while isinstance(parent, _JoinedDispatcher):
parent = cast("_JoinedDispatcher[_ET]", parent).parent
return isinstance(parent, t)
# Mapper, ClassManager, Session override this to
# also accept classes, scoped_sessions, sessionmakers, etc.
if hasattr(target, "dispatch"):
if (
dispatch_is(cls.dispatch.__class__)
or dispatch_is(type, cls.dispatch.__class__)
or (
dispatch_is(_JoinedDispatcher)
and dispatch_parent_is(cls.dispatch.__class__)
)
):
return target
return None
@classmethod
def _listen(
cls,
event_key: _EventKey[_ET],
*,
propagate: bool = False,
insert: bool = False,
named: bool = False,
asyncio: bool = False,
) -> None:
event_key.base_listen(
propagate=propagate, insert=insert, named=named, asyncio=asyncio
)
@classmethod
def _remove(cls, event_key: _EventKey[_ET]) -> None:
event_key.remove()
@classmethod
def _clear(cls) -> None:
cls.dispatch._clear()
| Events |
python | apache__airflow | providers/ftp/src/airflow/providers/ftp/hooks/ftp.py | {
"start": 1047,
"end": 9857
} | class ____(BaseHook):
"""
Interact with FTP.
Errors that may occur throughout but should be handled downstream.
You can specify mode for data transfers in the extra field of your
connection as ``{"passive": "true"}``.
You can also specify encoding for the FTP connection as ``{"encoding": "cp1251"}``.
:param ftp_conn_id: The :ref:`ftp connection id <howto/connection:ftp>`
reference.
"""
conn_name_attr = "ftp_conn_id"
default_conn_name = "ftp_default"
conn_type = "ftp"
hook_name = "FTP"
def __init__(self, ftp_conn_id: str = default_conn_name) -> None:
super().__init__()
self.ftp_conn_id = ftp_conn_id
self.conn: ftplib.FTP | None = None
self.encoding: str | None = None
def __enter__(self):
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
if self.conn is not None:
self.close_conn()
def get_conn(self) -> ftplib.FTP:
"""Return an FTP connection object."""
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
pasv = params.extra_dejson.get("passive", True)
encoding = params.extra_dejson.get("encoding")
self.encoding = encoding
if encoding:
self.conn = ftplib.FTP(encoding=encoding) # nosec: B321
else:
self.conn = ftplib.FTP() # nosec: B321
if params.host:
port: int = int(ftplib.FTP_PORT)
if params.port is not None:
port = params.port
logger.info("Connecting via FTP to %s:%d", params.host, port)
self.conn.connect(params.host, port)
if params.login:
params.password = cast("str", params.password)
self.conn.login(params.login, params.password)
self.conn.set_pasv(pasv)
return self.conn
def close_conn(self):
"""Close the connection; an error will occur if the connection was never opened."""
conn = self.conn
conn.quit()
self.conn = None
def describe_directory(self, path: str) -> dict:
"""
Return a dictionary of {filename: {attributes}} for all files on a remote system which supports MLSD.
:param path: full path to the remote directory
"""
conn = self.get_conn()
return dict(conn.mlsd(path))
def list_directory(self, path: str) -> list[str]:
"""
Return a list of files on the remote system.
:param path: full path to the remote directory to list
"""
conn = self.get_conn()
return conn.nlst(path)
def create_directory(self, path: str) -> None:
"""
Create a directory on the remote system.
:param path: full path to the remote directory to create
"""
conn = self.get_conn()
conn.mkd(path)
def delete_directory(self, path: str) -> None:
"""
Delete a directory on the remote system.
:param path: full path to the remote directory to delete
"""
conn = self.get_conn()
conn.rmd(path)
def retrieve_file(
self,
remote_full_path: str,
local_full_path_or_buffer: Any,
callback: Callable | None = None,
block_size: int = 8192,
) -> None:
"""
Transfer the remote file to a local location.
If local_full_path_or_buffer is a string path, the file will be put
at that location; if it is a file-like buffer, the file will
be written to the buffer but not closed.
:param remote_full_path: full path to the remote file
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
:param callback: callback which is called each time a block of data
is read. if you do not use a callback, these blocks will be written
to the file or buffer passed in. if you do pass in a callback, note
that writing to a file or buffer will need to be handled inside the
callback.
[default: output_handle.write()]
:param block_size: file is transferred in chunks of default size 8192
or as set by user
.. code-block:: python
hook = FTPHook(ftp_conn_id="my_conn")
remote_path = "/path/to/remote/file"
local_path = "/path/to/local/file"
# with a custom callback (in this case displaying progress on each read)
def print_progress(percent_progress):
self.log.info("Percent Downloaded: %s%%" % percent_progress)
total_downloaded = 0
total_file_size = hook.get_size(remote_path)
output_handle = open(local_path, "wb")
def write_to_file_with_progress(data):
total_downloaded += len(data)
output_handle.write(data)
percent_progress = (total_downloaded / total_file_size) * 100
print_progress(percent_progress)
hook.retrieve_file(remote_path, None, callback=write_to_file_with_progress)
# without a custom callback data is written to the local_path
hook.retrieve_file(remote_path, local_path)
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, str)
# without a callback, default to writing to a user-provided file or
# file-like buffer
if not callback:
if is_path:
output_handle = open(local_full_path_or_buffer, "wb")
else:
output_handle = local_full_path_or_buffer
callback = output_handle.write
self.log.info("Retrieving file from FTP: %s", remote_full_path)
conn.retrbinary(f"RETR {remote_full_path}", callback, block_size)
self.log.info("Finished retrieving file from FTP: %s", remote_full_path)
if is_path and output_handle:
output_handle.close()
def store_file(
self, remote_full_path: str, local_full_path_or_buffer: Any, block_size: int = 8192
) -> None:
"""
Transfers a local file to the remote location.
If local_full_path_or_buffer is a string path, the file will be read
from that location; if it is a file-like buffer, the file will
be read from the buffer but not closed.
:param remote_full_path: full path to the remote file
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
:param block_size: file is transferred in chunks of default size 8192
or as set by user
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, str)
if is_path:
input_handle = open(local_full_path_or_buffer, "rb")
else:
input_handle = local_full_path_or_buffer
conn.storbinary(f"STOR {remote_full_path}", input_handle, block_size)
if is_path:
input_handle.close()
def delete_file(self, path: str) -> None:
"""
Remove a file on the FTP Server.
:param path: full path to the remote file
"""
conn = self.get_conn()
conn.delete(path)
def rename(self, from_name: str, to_name: str) -> str:
"""
Rename a file.
:param from_name: rename file from name
:param to_name: rename file to name
"""
conn = self.get_conn()
return conn.rename(from_name, to_name)
def get_mod_time(self, path: str) -> datetime.datetime:
"""
Return a datetime object representing the last time the file was modified.
:param path: remote file path
"""
conn = self.get_conn()
ftp_mdtm = conn.sendcmd("MDTM " + path)
time_val = ftp_mdtm[4:]
# time_val optionally has microseconds
try:
return datetime.datetime.strptime(time_val, "%Y%m%d%H%M%S.%f")
except ValueError:
return datetime.datetime.strptime(time_val, "%Y%m%d%H%M%S")
def get_size(self, path: str) -> int | None:
"""
Return the size of a file (in bytes).
:param path: remote file path
"""
conn = self.get_conn()
size = conn.size(path)
return int(size) if size else None
def test_connection(self) -> tuple[bool, str]:
"""Test the FTP connection by calling path with directory."""
try:
conn = self.get_conn()
conn.pwd
return True, "Connection successfully tested"
except Exception as e:
return False, str(e)
| FTPHook |
python | huggingface__transformers | src/transformers/models/bert_generation/modeling_bert_generation.py | {
"start": 12542,
"end": 13252
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->BertGeneration
| BertGenerationOutput |
python | matplotlib__matplotlib | lib/matplotlib/colors.py | {
"start": 80071,
"end": 82001
} | class ____(BivarColormap):
"""
BivarColormap object generated by supersampling a regular grid.
Parameters
----------
patch : np.array
Patch is required to have a shape (k, l, 3), and will get supersampled
to a lut of shape (N, N, 4).
N : int
The number of RGB quantization levels along each axis.
shape : {'square', 'circle', 'ignore', 'circleignore'}
- If 'square' each variate is clipped to [0,1] independently
- If 'circle' the variates are clipped radially to the center
of the colormap, and a circular mask is applied when the colormap
is displayed
- If 'ignore' the variates are not clipped, but instead assigned the
'outside' color
- If 'circleignore' a circular mask is applied, but the data is not clipped
origin : (float, float)
The relative origin of the colormap. Typically (0, 0), for colormaps
that are linear on both axis, and (.5, .5) for circular colormaps.
Used when getting 1D colormaps from 2D colormaps.
name : str, optional
The name of the colormap.
"""
def __init__(self, patch, N=256, shape='square', origin=(0, 0),
name='segmented bivariate colormap'):
_api.check_shape((None, None, 3), patch=patch)
self.patch = patch
super().__init__(N, N, shape, origin, name=name)
def _init(self):
s = self.patch.shape
_patch = np.empty((s[0], s[1], 4))
_patch[:, :, :3] = self.patch
_patch[:, :, 3] = 1
transform = mpl.transforms.Affine2D().translate(-0.5, -0.5)\
.scale(self.N / (s[1] - 1), self.N / (s[0] - 1))
self._lut = np.empty((self.N, self.N, 4))
_image.resample(_patch, self._lut, transform, _image.BILINEAR,
resample=False, alpha=1)
self._isinit = True
| SegmentedBivarColormap |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed8.py | {
"start": 273,
"end": 773
} | class ____(TypedDict, extra_items="str | int | Typed | Named"):
name: str
td2_1: Named = {
"name": "Fred",
"birth": {
"type": "date",
"year": 2000,
"month": 12,
"day": 31,
},
}
td2_2: Named = {
"name": "Fred",
"extra": {
"name": "test",
"value": "",
},
}
td2_3: Named = {
"name": "Fred",
}
td2_4: Named = {
"name": "Fred",
"test1": 1,
"test2": {"name": "Barb", "value": {"type": "date", "day": 31}},
}
| Named |
python | pypa__warehouse | tests/conftest.py | {
"start": 21830,
"end": 23598
} | class ____(_webtest.TestApp):
def xmlrpc(self, path, method, *args):
body = xmlrpc.client.dumps(args, methodname=method)
resp = self.post(path, body, headers={"Content-Type": "text/xml"})
return xmlrpc.client.loads(resp.body)
@pytest.fixture
def tm():
# Create a new transaction manager for dependant test cases
tm = transaction.TransactionManager(explicit=True)
tm.begin()
yield tm
# Abort the transaction, leaving database in previous state
tm.abort()
@pytest.fixture
def webtest(app_config_dbsession_from_env, tm):
"""
This fixture yields a test app with an alternative Pyramid configuration,
injecting the database session and transaction manager into the app.
This is because the Warehouse app normally manages its own database session.
After the fixture has yielded the app, the transaction is rolled back and
the database is left in its previous state.
"""
# We want to disable anything that relies on TLS here.
app_config_dbsession_from_env.add_settings(enforce_https=False)
app = app_config_dbsession_from_env.make_wsgi_app()
with get_db_session_for_app_config(app_config_dbsession_from_env) as _db_session:
# Register the app with the external test environment, telling
# request.db to use this db_session and use the Transaction manager.
testapp = _TestApp(
app,
extra_environ={
"warehouse.db_session": _db_session,
"tm.active": True, # disable pyramid_tm
"tm.manager": tm, # pass in our own tm for the app to use
"REMOTE_ADDR": REMOTE_ADDR, # set the same address for all requests
},
)
yield testapp
| _TestApp |
python | getsentry__sentry | tests/sentry/integrations/bitbucket/test_repository.py | {
"start": 564,
"end": 5865
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.base_url = "https://api.bitbucket.org"
self.shared_secret = "234567890"
self.subject = "connect:1234567"
self.integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
provider="bitbucket",
external_id=self.subject,
name="MyBitBucket",
metadata={
"base_url": self.base_url,
"shared_secret": self.shared_secret,
"subject": self.subject,
},
)
self.repo = Repository.objects.create(
provider="bitbucket",
name="sentryuser/newsdiffs",
organization_id=self.organization.id,
config={"name": "sentryuser/newsdiffs"},
integration_id=self.integration.id,
)
@cached_property
def provider(self):
return BitbucketRepositoryProvider("bitbucket")
def test_get_client(self) -> None:
installation = self.integration.get_installation(self.repo.organization_id)
client = installation.get_client()
assert client.base_url == self.base_url
assert client.shared_secret == self.shared_secret
assert client.subject == self.subject
@responses.activate
def test_compare_commits(self) -> None:
responses.add(
responses.GET,
"https://api.bitbucket.org/2.0/repositories/sentryuser/newsdiffs/commits/e18e4e72de0d824edfbe0d73efe34cbd0d01d301",
body=COMPARE_COMMITS_EXAMPLE,
)
responses.add(
responses.GET,
"https://api.bitbucket.org/2.0/repositories/sentryuser/newsdiffs/diff/e18e4e72de0d824edfbe0d73efe34cbd0d01d301",
body=COMMIT_DIFF_PATCH,
)
res = self.provider.compare_commits(
self.repo, None, "e18e4e72de0d824edfbe0d73efe34cbd0d01d301"
)
assert res == [
{
"author_email": "sentryuser@getsentry.com",
"author_name": "Sentry User",
"message": "README.md edited online with Bitbucket",
"id": "e18e4e72de0d824edfbe0d73efe34cbd0d01d301",
"repository": "sentryuser/newsdiffs",
"patch_set": [{"path": "README.md", "type": "M"}],
"timestamp": datetime.datetime(2017, 5, 16, 23, 21, 40, tzinfo=timezone.utc),
}
]
@responses.activate
def test_build_repository_config(self) -> None:
full_repo_name = "laurynsentry/helloworld"
webhook_id = "web-hook-id"
organization = self.create_organization()
responses.add(
responses.GET,
"https://api.bitbucket.org/2.0/repositories/%s" % full_repo_name,
json=REPO,
)
expected_post_payload = {
"active": True,
"description": "sentry-bitbucket-repo-hook",
"events": ["repo:push", "pullrequest:fulfilled"],
"secret": "supersecret",
"url": f"http://testserver/extensions/bitbucket/organizations/{organization.id}/webhook/",
}
responses.add(
responses.POST,
"https://api.bitbucket.org/2.0/repositories/%s/hooks" % full_repo_name,
json={"uuid": webhook_id},
status=201,
match=[responses.matchers.json_params_matcher(expected_post_payload)],
)
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(
provider="bitbucket",
external_id="bitbucket_external_id",
name="Hello world",
metadata={
"base_url": "https://api.bitbucket.org",
"shared_secret": "23456789",
"webhook_secret": "supersecret",
},
)
integration.add_organization(organization)
data = {
"provider": "integrations:bitbucket",
"identifier": full_repo_name,
"installation": integration.id,
}
data = self.provider.get_repository_data(organization, data)
assert data == {
"provider": "integrations:bitbucket",
"identifier": full_repo_name,
"installation": integration.id,
"external_id": REPO["uuid"],
"name": full_repo_name,
}
data = self.provider.build_repository_config(organization, data)
assert data == {
"name": full_repo_name,
"external_id": REPO["uuid"],
"url": "https://bitbucket.org/laurynsentry/helloworld",
"integration_id": integration.id,
"config": {"name": full_repo_name, "webhook_id": webhook_id},
}
def test_repository_external_slug(self) -> None:
result = self.provider.repository_external_slug(self.repo)
assert result == self.repo.name
def test_get_repository_data_no_installation_id(self) -> None:
with pytest.raises(IntegrationError) as e:
self.provider.get_repository_data(self.organization, {})
assert "requires an integration id" in str(e.value)
| BitbucketRepositoryProviderTest |
python | donnemartin__interactive-coding-challenges | graphs_trees/graph_shortest_path/test_shortest_path.py | {
"start": 18,
"end": 1321
} | class ____(unittest.TestCase):
def test_shortest_path(self):
graph = Graph()
graph.add_edge('a', 'b', weight=5)
graph.add_edge('a', 'c', weight=3)
graph.add_edge('a', 'e', weight=2)
graph.add_edge('b', 'd', weight=2)
graph.add_edge('c', 'b', weight=1)
graph.add_edge('c', 'd', weight=1)
graph.add_edge('d', 'a', weight=1)
graph.add_edge('d', 'g', weight=2)
graph.add_edge('d', 'h', weight=1)
graph.add_edge('e', 'a', weight=1)
graph.add_edge('e', 'h', weight=4)
graph.add_edge('e', 'i', weight=7)
graph.add_edge('f', 'b', weight=3)
graph.add_edge('f', 'g', weight=1)
graph.add_edge('g', 'c', weight=3)
graph.add_edge('g', 'i', weight=2)
graph.add_edge('h', 'c', weight=2)
graph.add_edge('h', 'f', weight=2)
graph.add_edge('h', 'g', weight=2)
shortest_path = ShortestPath(graph)
result = shortest_path.find_shortest_path('a', 'i')
self.assertEqual(result, ['a', 'c', 'd', 'g', 'i'])
self.assertEqual(shortest_path.path_weight['i'], 8)
print('Success: test_shortest_path')
def main():
test = TestShortestPath()
test.test_shortest_path()
if __name__ == '__main__':
main()
| TestShortestPath |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 70885,
"end": 71396
} | class ____(torch.nn.Module):
def __init__(self, with_bn=True):
super().__init__()
self.linear = nn.Linear(5, 5)
self.bn1d = nn.BatchNorm1d(5)
self.leaky_relu = nn.LeakyReLU(0.01)
self.with_bn = with_bn
def forward(self, x):
x = self.linear(x)
if self.with_bn:
x = self.bn1d(x)
x = self.leaky_relu(x)
return x
def get_example_inputs(self) -> tuple[Any, ...]:
return (torch.rand(1, 5),)
| LinearBnLeakyReluModel |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 17275,
"end": 17631
} | class ____(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the requested resource has been assigned a new
permanent URI and any future references to this resource SHOULD use
one of the returned URIs.
code: 301, title: Moved Permanently
"""
code = 301
title = 'Moved Permanently'
| HTTPMovedPermanently |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_datacatalog.py | {
"start": 23819,
"end": 25182
} | class ____:
@mock.patch(
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogHook",
**{"return_value.get_entry.return_value": TEST_ENTRY},
)
def test_assert_valid_hook_call(self, mock_hook) -> None:
with pytest.warns(AirflowProviderDeprecationWarning):
task = CloudDataCatalogGetEntryOperator(
task_id="task_id",
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
task.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_entry.assert_called_once_with(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| TestCloudDataCatalogGetEntryOperator |
python | huggingface__transformers | tests/models/llava_next_video/test_processing_llava_next_video.py | {
"start": 958,
"end": 5127
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaNextVideoProcessor
@classmethod
def _setup_tokenizer(cls):
tokenizer_class = cls._get_component_class_from_processor("tokenizer")
tokenizer = tokenizer_class.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf")
tokenizer.add_special_tokens({"additional_special_tokens": ["<image>", "<video>"]})
return tokenizer
@classmethod
def _setup_test_attributes(cls, processor):
cls.image_token = processor.image_token
cls.video_token = processor.video_token
@classmethod
def prepare_processor_dict(cls):
return {
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + ' '}}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>' }}{% endfor %}{# Render all video then #}{% for content in message['content'] | selectattr('type', 'equalto', 'video') %}{{ '<video>' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ '\n' + content['text'] }}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ '\n' + content['text'] }}{% endgeneration %}{% endfor %}{% endif %}{{'<|im_end|>'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"num_additional_image_tokens": 0,
"patch_size": 128,
"vision_feature_select_strategy": "default",
}
# Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens
def test_get_num_vision_tokens(self):
"Tests general functionality of the helper used internally in vLLM"
processor = self.get_processor()
output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)])
self.assertTrue("num_image_tokens" in output)
self.assertEqual(len(output["num_image_tokens"]), 3)
self.assertTrue("num_image_patches" in output)
self.assertEqual(len(output["num_image_patches"]), 3)
# Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_chat_template_is_saved
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded)
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
def test_image_token_filling(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
processor.patch_size = 14
processor.vision_feature_select_strategy = "default"
processor.image_processor.crop_size = {"height": 336, "width": 336}
processor.image_processor.size = {"shortest_edge": 336}
processor.image_processor.image_grid_pinpoints = [[672, 336]]
# Important to check with non square image
image = torch.randint(0, 2, (3, 503, 316))
expected_image_tokens = 1525
image_token_index = processor.image_token_id
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
inputs = processor(
text=[processor.apply_chat_template(messages)],
images=[image],
return_tensors="pt",
)
image_tokens = (inputs["input_ids"] == image_token_index).sum().item()
self.assertEqual(expected_image_tokens, image_tokens)
| LlavaNextVideoProcessorTest |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/keras_tensor.py | {
"start": 18032,
"end": 20845
} | class ____(KerasTensor):
"""A specialized KerasTensor representation for `tf.RaggedTensor`s.
Specifically, it:
1. Specializes the conversion to a placeholder in order
to maintain shape information for non-ragged dimensions.
2. Overloads the KerasTensor's operators with the RaggedTensor versions
when they don't match the `tf.Tensor` versions
3. Exposes some of the instance method/attribute that are unique to
the RaggedTensor API (such as ragged_rank).
"""
def _to_placeholder(self):
ragged_spec = self.type_spec
if ragged_spec.ragged_rank == 0 or ragged_spec.shape.rank is None:
return super(RaggedKerasTensor, self)._to_placeholder()
flat_shape = ragged_spec.shape[ragged_spec.ragged_rank:]
result = array_ops.placeholder(ragged_spec.dtype, flat_shape)
known_num_splits = []
prod = 1
for axis_size in ragged_spec.shape:
if prod is not None:
if axis_size is None or (
getattr(axis_size, 'value', True) is None):
prod = None
else:
prod = prod * axis_size
known_num_splits.append(prod)
for axis in range(ragged_spec.ragged_rank, 0, -1):
axis_size = ragged_spec.shape[axis]
if axis_size is None or (getattr(axis_size, 'value', True) is None):
num_splits = known_num_splits[axis-1]
if num_splits is not None:
num_splits = num_splits + 1
splits = array_ops.placeholder(
ragged_spec.row_splits_dtype, [num_splits])
result = ragged_tensor.RaggedTensor.from_row_splits(
result, splits, validate=False)
else:
rowlen = constant_op.constant(axis_size, ragged_spec.row_splits_dtype)
result = ragged_tensor.RaggedTensor.from_uniform_row_length(
result, rowlen, validate=False)
return result
@property
def ragged_rank(self):
return self.type_spec.ragged_rank
# Overload slicing
RaggedKerasTensor._overload_operator(ragged_tensor.RaggedTensor, '__getitem__') # pylint: disable=protected-access
# Overload math ops
RaggedKerasTensor._overload_operator(ragged_tensor.RaggedTensor, '__add__') # pylint: disable=protected-access
RaggedKerasTensor._overload_operator(ragged_tensor.RaggedTensor, '__radd__') # pylint: disable=protected-access
RaggedKerasTensor._overload_operator(ragged_tensor.RaggedTensor, '__mul__') # pylint: disable=protected-access
RaggedKerasTensor._overload_operator(ragged_tensor.RaggedTensor, '__rmul__') # pylint: disable=protected-access
# TODO(b/161487382):
# Special-case user-registered symbolic objects (registered by the
# private `register_symbolic_tensor_type` method) by passing them between
# scratch graphs directly.
# This is needed to not break Tensorflow probability
# while they finish migrating to composite tensors.
| RaggedKerasTensor |
python | PrefectHQ__prefect | tests/server/models/test_filters.py | {
"start": 22211,
"end": 27808
} | class ____:
params = [
[{}, 10],
[dict(flow_filter=filters.FlowFilter(name=dict(any_=["f-1", "f-2"]))), 6],
[dict(flow_filter=filters.FlowFilter(name=dict(any_=["f-1", "f-100"]))), 3],
[dict(flow_filter=filters.FlowFilter(name=dict(any_=["f-1"]))), 3],
[dict(flow_filter=filters.FlowFilter(name=dict(like_="f-"))), 10],
[dict(task_run_filter=filters.TaskRunFilter(name=dict(like_="task-run"))), 3],
[dict(task_run_filter=filters.TaskRunFilter(name=dict(like_="run-2"))), 2],
[dict(task_run_filter=filters.TaskRunFilter(name=dict(like_="2a"))), 1],
[dict(flow_filter=filters.FlowFilter(tags=dict(all_=["db"]))), 6],
[dict(flow_filter=filters.FlowFilter(tags=dict(all_=["db", "blue"]))), 3],
[dict(flow_filter=filters.FlowFilter(tags=dict(all_=["db", "red"]))), 0],
[dict(flow_run_filter=filters.FlowRunFilter(tags=dict(all_=["db", "red"]))), 0],
[
dict(flow_run_filter=filters.FlowRunFilter(tags=dict(all_=["db", "blue"]))),
3,
],
[
dict(flow_run_filter=filters.FlowRunFilter(name=dict(like_="test-happy"))),
3,
],
[
dict(
flow_run_filter=filters.FlowRunFilter(
name=dict(like_="test-happy-mallard")
)
),
0,
],
[dict(deployment_filter=filters.DeploymentFilter(id=dict(any_=[d_1_1_id]))), 3],
[dict(deployment_filter=filters.DeploymentFilter(name=dict(like_="d_1"))), 3],
[dict(flow_run_filter=filters.FlowRunFilter(tags=dict(is_null_=True))), 4],
[
dict(
flow_run_filter=filters.FlowRunFilter(
state=dict(type=dict(any_=["COMPLETED"]))
)
),
4,
],
# search for completed states with "NOT-COMPLETED" as the name, should return nothing
[
dict(
flow_run_filter=filters.FlowRunFilter(
state=dict(
type=dict(any_=["COMPLETED"]), name=dict(any_=["NOT-COMPLETED"])
)
)
),
0,
],
[
dict(
flow_run_filter=filters.FlowRunFilter(
state=dict(name=dict(any_=["Completed"]))
)
),
4,
],
[
dict(
flow_run_filter=filters.FlowRunFilter(
state=dict(type=dict(any_=["COMPLETED"]))
),
flow_filter=filters.FlowFilter(tags=dict(all_=["xyz"])),
),
0,
],
[
dict(
flow_run_filter=filters.FlowRunFilter(
deployment_id=dict(any_=[d_1_1_id, d_1_2_id])
)
),
3,
],
[
dict(
flow_run_filter=filters.FlowRunFilter(
deployment_id=dict(any_=[d_1_1_id, d_3_1_id])
)
),
4,
],
# task runs with subflow children
[
dict(
task_run_filter=filters.TaskRunFilter(subflow_runs=dict(exists_=True))
),
1,
],
# task runs without subflow children
[
dict(
task_run_filter=filters.TaskRunFilter(subflow_runs=dict(exists_=False))
),
9,
],
# task runs with subflow children and the tag 'subflow'
[
dict(
task_run_filter=filters.TaskRunFilter(
subflow_runs=dict(exists_=True), tags=dict(all_=["subflow"])
)
),
0,
],
# empty filter
[dict(flow_filter=filters.FlowFilter()), 10],
# multiple empty filters
[
dict(
flow_filter=filters.FlowFilter(),
flow_run_filter=filters.FlowRunFilter(),
),
10,
],
]
@pytest.mark.parametrize("kwargs,expected", params)
async def test_python_client_filter(self, kwargs, expected):
async with get_client() as client:
task_runs = await client.read_task_runs(**kwargs)
assert len(task_runs) == expected
@pytest.mark.parametrize("kwargs,expected", params)
async def test_models_count(self, session, kwargs, expected):
count = await models.task_runs.count_task_runs(session=session, **kwargs)
assert count == expected
@pytest.mark.parametrize("kwargs,expected", params)
async def test_models_read(self, session, kwargs, expected):
read = await models.task_runs.read_task_runs(session=session, **kwargs)
assert len({r.id for r in read}) == expected
@pytest.mark.parametrize("kwargs,expected", params)
async def test_api_count(self, client, kwargs, expected):
adjusted_kwargs = adjust_kwargs_for_client(kwargs)
response = await client.post(
"/task_runs/count",
json=adjusted_kwargs,
)
assert response.json() == expected
@pytest.mark.parametrize("kwargs,expected", params)
async def test_api_read(self, client, kwargs, expected):
adjusted_kwargs = adjust_kwargs_for_client(kwargs)
response = await client.post(
"/task_runs/filter",
json=adjusted_kwargs,
)
assert len({r["id"] for r in response.json()}) == expected
| TestCountTaskRunsModels |
python | ipython__ipython | IPython/lib/pretty.py | {
"start": 14965,
"end": 15057
} | class ____:
def output(self, stream, output_width):
return output_width
| Printable |
python | ray-project__ray | doc/external/test_hashes.py | {
"start": 152,
"end": 1304
} | class ____(TypedDict):
file: str
digest: str
ref: str
# Files here are referenced on external pages as examples, and are tested
# to make sure exteranl referenced Ray examples are working with latest version
# of Ray. If you need to make changes, make sure to update the external examples
# too, and then update the digests here as a confirmation.
docs = [
ExternalDoc(
file="pytorch_tutorials_hyperparameter_tuning_tutorial.py",
digest="04f8bab9fda98bceaf541984482faacab7bd8d35d6e5850ae610bfea08709743",
ref="https://pytorch.org/tutorials/beginner/hyperparameter_tuning_tutorial.html"
),
]
def test_hashes():
for doc in docs:
path = os.path.join(_REPO_NAME, "doc", "external", doc["file"])
runfile = _runfiles.Rlocation(path)
with open(runfile, "rb") as f:
content = f.read()
want = doc["digest"]
got = hashlib.sha256(content).hexdigest()
name = doc["file"]
ref = doc["ref"]
assert got == want, f"{name} ({ref}) has sha256 {got}, want {want}"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| ExternalDoc |
python | getsentry__sentry | src/sentry/api/serializers/models/event.py | {
"start": 21034,
"end": 23552
} | class ____(EventSerializer):
"""
Simple event serializer that renders a basic outline of an event without
most interfaces/breadcrumbs. This can be used for basic event list queries
where we don't need the full detail. The side effect is that, if the
serialized events are actually SnubaEvents, we can render them without
needing to fetch the event bodies from nodestore.
NB it would be super easy to inadvertently add a property accessor here
that would require a nodestore lookup for a SnubaEvent serialized using
this serializer. You will only really notice you've done this when the
organization event search API gets real slow.
"""
def get_attrs(self, item_list, user, **kwargs):
crash_files = get_crash_files(item_list)
serialized_files = {
file.event_id: serialized
for file, serialized in zip(crash_files, serialize(crash_files, user=user))
}
return {event: {"crash_file": serialized_files.get(event.event_id)} for event in item_list}
def serialize( # type: ignore[override] # intentionally different shape
self, obj: Event | GroupEvent, attrs, user, **kwargs
) -> SimpleEventSerializerResponse:
tags: list[EventTag] = [
{"key": key.split("sentry:", 1)[-1], "value": value} for key, value in obj.tags
]
for tag in tags:
query = convert_user_tag_to_query(tag["key"], tag["value"])
if query:
tag["query"] = query
map_device_class_tags(tags)
event_user = obj.get_minimal_user()
return {
"id": str(obj.event_id),
"event.type": str(obj.get_event_type()),
"groupID": str(obj.group_id) if obj.group_id else None,
"eventID": str(obj.event_id),
"projectID": str(obj.project_id),
# XXX for 'message' this doesn't do the proper resolution of logentry
# etc. that _get_legacy_message_with_meta does.
"message": obj.message,
"title": obj.title,
"location": obj.location,
"culprit": obj.culprit,
"user": event_user.get_api_context() if event_user is not None else None,
"tags": tags,
"platform": obj.platform,
"dateCreated": obj.datetime,
# Needed to generate minidump links in UI
"crashFile": attrs["crash_file"],
"metadata": obj.get_event_metadata(),
}
| SimpleEventSerializer |
python | pyca__cryptography | tests/hazmat/primitives/test_hkdf.py | {
"start": 527,
"end": 5834
} | class ____:
def test_overflow_protection_enormous_digest_size(self, backend):
enormous_digest_size = sys.maxsize >> 3
dummy_hash = DummyHashAlgorithm(enormous_digest_size)
with pytest.raises(
ValueError, match="Digest size too large, would cause overflow"
):
HKDF(dummy_hash, 32, salt=None, info=None)
def test_length_limit(self, backend):
big_length = 255 * hashes.SHA256().digest_size + 1
with pytest.raises(ValueError):
HKDF(
hashes.SHA256(),
big_length,
salt=None,
info=None,
backend=backend,
)
def test_already_finalized(self, backend):
hkdf = HKDF(hashes.SHA256(), 16, salt=None, info=None, backend=backend)
hkdf.derive(b"\x01" * 16)
with pytest.raises(AlreadyFinalized):
hkdf.derive(b"\x02" * 16)
hkdf = HKDF(hashes.SHA256(), 16, salt=None, info=None, backend=backend)
hkdf.verify(b"\x01" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u")
with pytest.raises(AlreadyFinalized):
hkdf.verify(b"\x02" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u")
hkdf = HKDF(hashes.SHA256(), 16, salt=None, info=None, backend=backend)
def test_verify(self, backend):
hkdf = HKDF(hashes.SHA256(), 16, salt=None, info=None, backend=backend)
hkdf.verify(b"\x01" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u")
def test_verify_invalid(self, backend):
hkdf = HKDF(hashes.SHA256(), 16, salt=None, info=None, backend=backend)
with pytest.raises(InvalidKey):
hkdf.verify(b"\x02" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u")
def test_unicode_typeerror(self, backend):
with pytest.raises(TypeError):
HKDF(
hashes.SHA256(),
16,
salt="foo", # type: ignore[arg-type]
info=None,
backend=backend,
)
with pytest.raises(TypeError):
HKDF(
hashes.SHA256(),
16,
salt=None,
info="foo", # type: ignore[arg-type]
backend=backend,
)
with pytest.raises(TypeError):
hkdf = HKDF(
hashes.SHA256(), 16, salt=None, info=None, backend=backend
)
hkdf.derive("foo") # type: ignore[arg-type]
with pytest.raises(TypeError):
hkdf = HKDF(
hashes.SHA256(), 16, salt=None, info=None, backend=backend
)
hkdf.verify("foo", b"bar") # type: ignore[arg-type]
with pytest.raises(TypeError):
hkdf = HKDF(
hashes.SHA256(), 16, salt=None, info=None, backend=backend
)
hkdf.verify(b"foo", "bar") # type: ignore[arg-type]
def test_derive_short_output(self, backend):
hkdf = HKDF(hashes.SHA256(), 4, salt=None, info=None, backend=backend)
assert hkdf.derive(b"\x01" * 16) == b"gJ\xfb{"
def test_derive_long_output(self, backend):
vector = load_vectors_from_file(
os.path.join("KDF", "hkdf-generated.txt"), load_nist_vectors
)[0]
hkdf = HKDF(
hashes.SHA256(),
int(vector["l"]),
salt=vector["salt"],
info=vector["info"],
backend=backend,
)
ikm = binascii.unhexlify(vector["ikm"])
assert hkdf.derive(ikm) == binascii.unhexlify(vector["okm"])
def test_private_extract_exists(self):
# This was DeprecatedIn47 but we can't raise a warning
# because the scapy tests are fragile butterflies
hkdf = HKDF(hashes.SHA256(), 32, salt=b"0", info=None)
prk = hkdf._extract(b"0") # type:ignore[attr-defined]
assert len(prk) == 32
def test_buffer_protocol(self, backend):
vector = load_vectors_from_file(
os.path.join("KDF", "hkdf-generated.txt"), load_nist_vectors
)[0]
hkdf = HKDF(
hashes.SHA256(),
int(vector["l"]),
salt=vector["salt"],
info=vector["info"],
backend=backend,
)
ikm = bytearray(binascii.unhexlify(vector["ikm"]))
assert hkdf.derive(ikm) == binascii.unhexlify(vector["okm"])
def test_derive_into(self):
hkdf = HKDF(hashes.SHA256(), 16, salt=None, info=None)
buf = bytearray(16)
n = hkdf.derive_into(b"\x01" * 16, buf)
assert n == 16
assert buf == b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u"
@pytest.mark.parametrize(
("buflen", "outlen"), [(15, 16), (17, 16), (22, 23), (24, 23)]
)
def test_derive_into_buffer_incorrect_size(self, buflen, outlen):
hkdf = HKDF(hashes.SHA256(), outlen, salt=None, info=None)
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
hkdf.derive_into(b"\x01" * 16, buf)
def test_derive_into_already_finalized(self):
hkdf = HKDF(hashes.SHA256(), 16, salt=None, info=None)
buf = bytearray(16)
hkdf.derive_into(b"\x01" * 16, buf)
with pytest.raises(AlreadyFinalized):
hkdf.derive_into(b"\x02" * 16, buf)
| TestHKDF |
python | python-openxml__python-docx | tests/text/test_paragraph.py | {
"start": 593,
"end": 14793
} | class ____:
"""Unit-test suite for `docx.text.run.Paragraph`."""
@pytest.mark.parametrize(
("p_cxml", "expected_value"),
[
("w:p/w:r", False),
('w:p/w:r/w:t"foobar"', False),
('w:p/w:hyperlink/w:r/(w:t"abc",w:lastRenderedPageBreak,w:t"def")', True),
("w:p/w:r/(w:lastRenderedPageBreak, w:lastRenderedPageBreak)", True),
],
)
def it_knows_whether_it_contains_a_page_break(
self, p_cxml: str, expected_value: bool, fake_parent: t.ProvidesStoryPart
):
p = cast(CT_P, element(p_cxml))
paragraph = Paragraph(p, fake_parent)
assert paragraph.contains_page_break == expected_value
@pytest.mark.parametrize(
("p_cxml", "count"),
[
("w:p", 0),
("w:p/w:r", 0),
("w:p/w:hyperlink", 1),
("w:p/(w:r,w:hyperlink,w:r)", 1),
("w:p/(w:r,w:hyperlink,w:r,w:hyperlink)", 2),
("w:p/(w:hyperlink,w:r,w:hyperlink,w:r)", 2),
],
)
def it_provides_access_to_the_hyperlinks_it_contains(
self, p_cxml: str, count: int, fake_parent: t.ProvidesStoryPart
):
p = cast(CT_P, element(p_cxml))
paragraph = Paragraph(p, fake_parent)
hyperlinks = paragraph.hyperlinks
actual = [type(item).__name__ for item in hyperlinks]
expected = ["Hyperlink" for _ in range(count)]
assert actual == expected, f"expected: {expected}, got: {actual}"
@pytest.mark.parametrize(
("p_cxml", "expected"),
[
("w:p", []),
("w:p/w:r", ["Run"]),
("w:p/w:hyperlink", ["Hyperlink"]),
("w:p/(w:r,w:hyperlink,w:r)", ["Run", "Hyperlink", "Run"]),
("w:p/(w:hyperlink,w:r,w:hyperlink)", ["Hyperlink", "Run", "Hyperlink"]),
],
)
def it_can_iterate_its_inner_content_items(
self, p_cxml: str, expected: List[str], fake_parent: t.ProvidesStoryPart
):
p = cast(CT_P, element(p_cxml))
paragraph = Paragraph(p, fake_parent)
inner_content = paragraph.iter_inner_content()
actual = [type(item).__name__ for item in inner_content]
assert actual == expected, f"expected: {expected}, got: {actual}"
def it_knows_its_paragraph_style(self, style_get_fixture):
paragraph, style_id_, style_ = style_get_fixture
style = paragraph.style
paragraph.part.get_style.assert_called_once_with(style_id_, WD_STYLE_TYPE.PARAGRAPH)
assert style is style_
def it_can_change_its_paragraph_style(self, style_set_fixture):
paragraph, value, expected_xml = style_set_fixture
paragraph.style = value
paragraph.part.get_style_id.assert_called_once_with(value, WD_STYLE_TYPE.PARAGRAPH)
assert paragraph._p.xml == expected_xml
@pytest.mark.parametrize(
("p_cxml", "count"),
[
("w:p", 0),
("w:p/w:r", 0),
("w:p/w:r/w:lastRenderedPageBreak", 1),
("w:p/w:hyperlink/w:r/w:lastRenderedPageBreak", 1),
(
"w:p/(w:r/w:lastRenderedPageBreak,w:hyperlink/w:r/w:lastRenderedPageBreak)",
2,
),
(
"w:p/(w:hyperlink/w:r/w:lastRenderedPageBreak,w:r,"
"w:r/w:lastRenderedPageBreak,w:r,w:hyperlink)",
2,
),
],
)
def it_provides_access_to_the_rendered_page_breaks_it_contains(
self, p_cxml: str, count: int, fake_parent: t.ProvidesStoryPart
):
p = cast(CT_P, element(p_cxml))
paragraph = Paragraph(p, fake_parent)
rendered_page_breaks = paragraph.rendered_page_breaks
actual = [type(item).__name__ for item in rendered_page_breaks]
expected = ["RenderedPageBreak" for _ in range(count)]
assert actual == expected, f"expected: {expected}, got: {actual}"
@pytest.mark.parametrize(
("p_cxml", "expected_value"),
[
("w:p", ""),
("w:p/w:r", ""),
("w:p/w:r/w:t", ""),
('w:p/w:r/w:t"foo"', "foo"),
('w:p/w:r/(w:t"foo", w:t"bar")', "foobar"),
('w:p/w:r/(w:t"fo ", w:t"bar")', "fo bar"),
('w:p/w:r/(w:t"foo", w:tab, w:t"bar")', "foo\tbar"),
('w:p/w:r/(w:t"foo", w:br, w:t"bar")', "foo\nbar"),
('w:p/w:r/(w:t"foo", w:cr, w:t"bar")', "foo\nbar"),
(
'w:p/(w:r/w:t"click ",w:hyperlink{r:id=rId6}/w:r/w:t"here",w:r/w:t" for more")',
"click here for more",
),
],
)
def it_knows_the_text_it_contains(self, p_cxml: str, expected_value: str):
"""Including the text of embedded hyperlinks."""
paragraph = Paragraph(element(p_cxml), None)
assert paragraph.text == expected_value
def it_can_replace_the_text_it_contains(self, text_set_fixture):
paragraph, text, expected_text = text_set_fixture
paragraph.text = text
assert paragraph.text == expected_text
def it_knows_its_alignment_value(self, alignment_get_fixture):
paragraph, expected_value = alignment_get_fixture
assert paragraph.alignment == expected_value
def it_can_change_its_alignment_value(self, alignment_set_fixture):
paragraph, value, expected_xml = alignment_set_fixture
paragraph.alignment = value
assert paragraph._p.xml == expected_xml
def it_provides_access_to_its_paragraph_format(self, parfmt_fixture):
paragraph, ParagraphFormat_, paragraph_format_ = parfmt_fixture
paragraph_format = paragraph.paragraph_format
ParagraphFormat_.assert_called_once_with(paragraph._element)
assert paragraph_format is paragraph_format_
def it_provides_access_to_the_runs_it_contains(self, runs_fixture):
paragraph, Run_, r_, r_2_, run_, run_2_ = runs_fixture
runs = paragraph.runs
assert Run_.mock_calls == [call(r_, paragraph), call(r_2_, paragraph)]
assert runs == [run_, run_2_]
def it_can_add_a_run_to_itself(self, add_run_fixture):
paragraph, text, style, style_prop_, expected_xml = add_run_fixture
run = paragraph.add_run(text, style)
assert paragraph._p.xml == expected_xml
assert isinstance(run, Run)
assert run._r is paragraph._p.r_lst[0]
if style:
style_prop_.assert_called_once_with(style)
def it_can_insert_a_paragraph_before_itself(self, insert_before_fixture):
text, style, paragraph_, add_run_calls = insert_before_fixture
paragraph = Paragraph(None, None)
new_paragraph = paragraph.insert_paragraph_before(text, style)
paragraph._insert_paragraph_before.assert_called_once_with(paragraph)
assert new_paragraph.add_run.call_args_list == add_run_calls
assert new_paragraph.style == style
assert new_paragraph is paragraph_
def it_can_remove_its_content_while_preserving_formatting(self, clear_fixture):
paragraph, expected_xml = clear_fixture
_paragraph = paragraph.clear()
assert paragraph._p.xml == expected_xml
assert _paragraph is paragraph
def it_inserts_a_paragraph_before_to_help(self, _insert_before_fixture):
paragraph, body, expected_xml = _insert_before_fixture
new_paragraph = paragraph._insert_paragraph_before()
assert isinstance(new_paragraph, Paragraph)
assert body.xml == expected_xml
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
("w:p", None, None, "w:p/w:r"),
("w:p", "foobar", None, 'w:p/w:r/w:t"foobar"'),
("w:p", None, "Strong", "w:p/w:r"),
("w:p", "foobar", "Strong", 'w:p/w:r/w:t"foobar"'),
]
)
def add_run_fixture(self, request, run_style_prop_):
before_cxml, text, style, after_cxml = request.param
paragraph = Paragraph(element(before_cxml), None)
expected_xml = xml(after_cxml)
return paragraph, text, style, run_style_prop_, expected_xml
@pytest.fixture(
params=[
("w:p/w:pPr/w:jc{w:val=center}", WD_ALIGN_PARAGRAPH.CENTER),
("w:p", None),
]
)
def alignment_get_fixture(self, request):
cxml, expected_alignment_value = request.param
paragraph = Paragraph(element(cxml), None)
return paragraph, expected_alignment_value
@pytest.fixture(
params=[
("w:p", WD_ALIGN_PARAGRAPH.LEFT, "w:p/w:pPr/w:jc{w:val=left}"),
(
"w:p/w:pPr/w:jc{w:val=left}",
WD_ALIGN_PARAGRAPH.CENTER,
"w:p/w:pPr/w:jc{w:val=center}",
),
("w:p/w:pPr/w:jc{w:val=left}", None, "w:p/w:pPr"),
("w:p", None, "w:p/w:pPr"),
]
)
def alignment_set_fixture(self, request):
initial_cxml, new_alignment_value, expected_cxml = request.param
paragraph = Paragraph(element(initial_cxml), None)
expected_xml = xml(expected_cxml)
return paragraph, new_alignment_value, expected_xml
@pytest.fixture(
params=[
("w:p", "w:p"),
("w:p/w:pPr", "w:p/w:pPr"),
('w:p/w:r/w:t"foobar"', "w:p"),
('w:p/(w:pPr, w:r/w:t"foobar")', "w:p/w:pPr"),
]
)
def clear_fixture(self, request):
initial_cxml, expected_cxml = request.param
paragraph = Paragraph(element(initial_cxml), None)
expected_xml = xml(expected_cxml)
return paragraph, expected_xml
@pytest.fixture(
params=[
(None, None),
("Foo", None),
(None, "Bar"),
("Foo", "Bar"),
]
)
def insert_before_fixture(self, request, _insert_paragraph_before_, add_run_):
text, style = request.param
paragraph_ = _insert_paragraph_before_.return_value
add_run_calls = [] if text is None else [call(text)]
paragraph_.style = None
return text, style, paragraph_, add_run_calls
@pytest.fixture(params=[("w:body/w:p{id=42}", "w:body/(w:p,w:p{id=42})")])
def _insert_before_fixture(self, request):
body_cxml, expected_cxml = request.param
body = element(body_cxml)
paragraph = Paragraph(body[0], None)
expected_xml = xml(expected_cxml)
return paragraph, body, expected_xml
@pytest.fixture
def parfmt_fixture(self, ParagraphFormat_, paragraph_format_):
paragraph = Paragraph(element("w:p"), None)
return paragraph, ParagraphFormat_, paragraph_format_
@pytest.fixture
def runs_fixture(self, p_, Run_, r_, r_2_, runs_):
paragraph = Paragraph(p_, None)
run_, run_2_ = runs_
return paragraph, Run_, r_, r_2_, run_, run_2_
@pytest.fixture
def style_get_fixture(self, part_prop_):
style_id = "Foobar"
p_cxml = "w:p/w:pPr/w:pStyle{w:val=%s}" % style_id
paragraph = Paragraph(element(p_cxml), None)
style_ = part_prop_.return_value.get_style.return_value
return paragraph, style_id, style_
@pytest.fixture(
params=[
("w:p", "Heading 1", "Heading1", "w:p/w:pPr/w:pStyle{w:val=Heading1}"),
(
"w:p/w:pPr",
"Heading 1",
"Heading1",
"w:p/w:pPr/w:pStyle{w:val=Heading1}",
),
(
"w:p/w:pPr/w:pStyle{w:val=Heading1}",
"Heading 2",
"Heading2",
"w:p/w:pPr/w:pStyle{w:val=Heading2}",
),
("w:p/w:pPr/w:pStyle{w:val=Heading1}", "Normal", None, "w:p/w:pPr"),
("w:p", None, None, "w:p/w:pPr"),
]
)
def style_set_fixture(self, request, part_prop_):
p_cxml, value, style_id, expected_cxml = request.param
paragraph = Paragraph(element(p_cxml), None)
part_prop_.return_value.get_style_id.return_value = style_id
expected_xml = xml(expected_cxml)
return paragraph, value, expected_xml
@pytest.fixture
def text_set_fixture(self):
paragraph = Paragraph(element("w:p"), None)
paragraph.add_run("must not appear in result")
new_text_value = "foo\tbar\rbaz\n"
expected_text_value = "foo\tbar\nbaz\n"
return paragraph, new_text_value, expected_text_value
# fixture components ---------------------------------------------
@pytest.fixture
def add_run_(self, request):
return method_mock(request, Paragraph, "add_run")
@pytest.fixture
def document_part_(self, request):
return instance_mock(request, DocumentPart)
@pytest.fixture
def _insert_paragraph_before_(self, request):
return method_mock(request, Paragraph, "_insert_paragraph_before")
@pytest.fixture
def p_(self, request, r_, r_2_):
return instance_mock(request, CT_P, r_lst=(r_, r_2_))
@pytest.fixture
def ParagraphFormat_(self, request, paragraph_format_):
return class_mock(
request,
"docx.text.paragraph.ParagraphFormat",
return_value=paragraph_format_,
)
@pytest.fixture
def paragraph_format_(self, request):
return instance_mock(request, ParagraphFormat)
@pytest.fixture
def part_prop_(self, request, document_part_):
return property_mock(request, Paragraph, "part", return_value=document_part_)
@pytest.fixture
def Run_(self, request, runs_):
run_, run_2_ = runs_
return class_mock(request, "docx.text.paragraph.Run", side_effect=[run_, run_2_])
@pytest.fixture
def r_(self, request):
return instance_mock(request, CT_R)
@pytest.fixture
def r_2_(self, request):
return instance_mock(request, CT_R)
@pytest.fixture
def run_style_prop_(self, request):
return property_mock(request, Run, "style")
@pytest.fixture
def runs_(self, request):
run_ = instance_mock(request, Run, name="run_")
run_2_ = instance_mock(request, Run, name="run_2_")
return run_, run_2_
| DescribeParagraph |
python | skorch-dev__skorch | skorch/hf.py | {
"start": 22855,
"end": 29739
} | class ____(_HuggingfaceTokenizerBase):
"""Wraps a pretrained Huggingface tokenizer to work as an sklearn
transformer
From the `tokenizers docs
<https://huggingface.co/docs/tokenizers/python/latest/index.html>`_:
::
🤗 Tokenizers provides an implementation of today’s most used
tokenizers, with a focus on performance and versatility.
Use pretrained Hugging Face tokenizers in an sklearn compatible transformer.
Examples
--------
>>> from skorch.hf import HuggingfacePretrainedTokenizer
>>> # pass the model name to be downloaded
>>> hf_tokenizer = HuggingfacePretrainedTokenizer('bert-base-uncased')
>>> data = ['hello there', 'this is a text']
>>> hf_tokenizer.fit(data) # only loads the model
>>> hf_tokenizer.transform(data)
>>> # pass pretrained tokenizer as object
>>> my_tokenizer = ...
>>> hf_tokenizer = HuggingfacePretrainedTokenizer(my_tokenizer)
>>> hf_tokenizer.fit(data)
>>> hf_tokenizer.transform(data)
>>> # use hyper params from pretrained tokenizer to fit on own data
>>> hf_tokenizer = HuggingfacePretrainedTokenizer(
... 'bert-base-uncased', train=True, vocab_size=12345)
>>> data = ...
>>> hf_tokenizer.fit(data) # fits new tokenizer on data
>>> hf_tokenizer.transform(data)
Parameters
----------
tokenizer : str or os.PathLike or transformers.PreTrainedTokenizerFast
If a string, the model id of a predefined tokenizer hosted inside a model
repo on huggingface.co. Valid model ids can be located at the root-level,
like bert-base-uncased, or namespaced under a user or organization name,
like dbmdz/bert-base-german-cased. If a path, A path to a directory
containing vocabulary files required by the tokenizer, e.g.,
./my_model_directory/. Else, should be an instantiated
``PreTrainedTokenizerFast``.
train : bool (default=False)
Whether to use the pre-trained tokenizer directly as is or to retrain it
on your data. If you just want to use the pre-trained tokenizer without
further modification, leave this parameter as False. However, if you want
to fit the tokenizer on your own data (completely from scratch, forgetting
what it has learned previously), set this argument to True. The latter
option is useful if you want to use the same hyper-parameters as the
pre-trained tokenizer but want the vocabulary to be fitted to your
dataset. The vocabulary size of this new tokenizer can be set explicitly
by passing the ``vocab_size`` argument.
max_length : int (default=256)
Maximum number of tokens used per sequence.
return_tensors : one of None, str, 'pt', 'np', 'tf' (default='pt')
What type of result values to return. By default, return a padded and
truncated (to ``max_length``) PyTorch Tensor. Similarly, 'np' results in a
padded and truncated numpy array. Tensorflow tensors are not supported
officially supported but should also work. If None or str, return a list
of lists instead. These lists are not padded or truncated, thus each row
may have different numbers of elements.
return_attention_mask : bool (default=True)
Whether to return the attention mask.
return_token_type_ids : bool (default=False)
Whether to return the token type ids.
return_length : bool (default=False)
Whether to return the length of the encoded inputs.
pad_token : str (default='[PAD]')
A special token used to make arrays of tokens the same size for batching
purpose. Will then be ignored by attention mechanisms.
vocab_size : int or None (default=None)
Change this parameter only if you use ``train=True``. In that case, this
parameter will determine the vocabulary size of the newly trained
tokenizer. If you set ``train=True`` but leave this parameter as None, the
same vocabulary size as the one from the initial toknizer will be used.
verbose : int (default=0)
Whether the tokenizer should print more information and warnings.
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
fast_tokenizer_ : transformers.PreTrainedTokenizerFast
If you want to extract the Hugging Face tokenizer to use it without skorch,
use this attribute.
.. _tokenizers: https://huggingface.co/docs/tokenizers/python/latest/index.html
"""
def __init__(
self,
tokenizer,
train=False,
max_length=256,
return_tensors='pt',
return_attention_mask=True,
return_token_type_ids=False,
return_length=False,
verbose=0,
vocab_size=None,
):
self.tokenizer = tokenizer
self.train = train
self.max_length = max_length
self.return_tensors = return_tensors
self.return_attention_mask = return_attention_mask
self.return_token_type_ids = return_token_type_ids
self.return_length = return_length
self.vocab_size = vocab_size
self.verbose = verbose
def fit(self, X, y=None, **fit_params):
"""Load the pretrained tokenizer
Parameters
----------
X : iterable of str
This parameter is ignored.
y : None
This parameter is ignored.
fit_params : dict
This parameter is ignored.
Returns
-------
self : HuggingfacePretrainedTokenizer
The fitted instance of the tokenizer.
"""
from transformers import AutoTokenizer
# from sklearn, triggers a parameter validation
# even though X is not used, we leave this check in for consistency
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
if not self.train and (self.vocab_size is not None):
raise ValueError("Setting vocab_size has no effect if train=False")
if isinstance(self.tokenizer, (str, os.PathLike)):
self.fast_tokenizer_ = AutoTokenizer.from_pretrained(
self.tokenizer
)
else:
self.fast_tokenizer_ = self.tokenizer
if not self.train:
self.fixed_vocabulary_ = True
else:
X = list(X) # transformers tokenizer does not accept arrays
vocab_size = (
self.fast_tokenizer_.vocab_size if self.vocab_size is None
else self.vocab_size
)
self.fast_tokenizer_ = self.fast_tokenizer_.train_new_from_iterator(
X, vocab_size=vocab_size
)
self.fixed_vocabulary_ = False
return self
| HuggingfacePretrainedTokenizer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictReadOnly1.py | {
"start": 637,
"end": 746
} | class ____(TypedDict):
a: Required[int]
b: ReadOnly[NotRequired[int]]
c: ReadOnly[Required[int]]
| F1 |
python | scipy__scipy | scipy/special/tests/test_basic.py | {
"start": 34365,
"end": 39193
} | class ____:
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_allclose(x, array([0.13689066, -0.16050153, 1.19815925, 0.92046818]),
atol=1.5e-8, rtol=0)
x = special.airy(.41)
assert_allclose(x, array([0.25238916, -.23480512, 0.80686202, 0.51053919]),
atol=1.5e-8, rtol=0)
x = special.airy(-.36)
assert_allclose(x, array([0.44508477,-0.23186773,0.44939534,0.48105354]),
atol=1.5e-8, rtol=0)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_allclose(a, b1, atol=1.5e-6, rtol=0)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_allclose(bi, bia, atol=1.5e-4, rtol=0)
bi = special.bi_zeros(5)
assert_allclose(bi[0], array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),
atol=1.5e-11, rtol=0)
assert_allclose(bi[1], array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),
atol=1.5e-10, rtol=0)
assert_allclose(bi[2], array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),
atol=1.5e-11, rtol=0)
assert_allclose(bi[3], array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),
atol=1.5e-10, rtol=0)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_allclose(ai, (array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),
atol=1.5e-4, rtol=0)
@pytest.mark.fail_slow(5)
def test_ai_zeros_big(self):
z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)
ai_z, aip_z, _, _ = special.airy(z)
ai_zp, aip_zp, _, _ = special.airy(zp)
ai_envelope = 1/abs(z)**(1./4)
aip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(ai_zpx, ai_zp, rtol=1e-10)
assert_allclose(aip_zx, aip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.1
assert_allclose(z[:6],
[-2.3381074105, -4.0879494441, -5.5205598281,
-6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)
assert_allclose(zp[:6],
[-1.0187929716, -3.2481975822, -4.8200992112,
-6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)
@pytest.mark.fail_slow(5)
def test_bi_zeros_big(self):
z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)
_, _, bi_z, bip_z = special.airy(z)
_, _, bi_zp, bip_zp = special.airy(zp)
bi_envelope = 1/abs(z)**(1./4)
bip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(bi_zpx, bi_zp, rtol=1e-10)
assert_allclose(bip_zx, bip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.2
assert_allclose(z[:6],
[-1.1737132227, -3.2710933028, -4.8307378417,
-6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)
assert_allclose(zp[:6],
[-2.2944396826, -4.0731550891, -5.5123957297,
-6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)
| TestAiry |
python | pytorch__pytorch | test/test_mps.py | {
"start": 24038,
"end": 26432
} | class ____(TestCaseMPS):
def _npRelu(self, np_features):
return np.maximum(np_features, np.zeros(np_features.shape)).astype(np_features.dtype)
def testNpRelu(self):
torch.testing.assert_close(
np.array([[0., 0.7, 0.0, 0.3, 0.0], [0.1, 0.0, 0.5, 0.0, 0.9]]),
self._npRelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]])))
def _testRelu(self, np_features, device):
np_relu = self._npRelu(np_features)
# Convert the numpy array to a PyTorch Tensor,
# and move the Tensor to the CPU/GPU based on the "device" parameter
py_tensor = torch.from_numpy(np_features).to(device)
py_relu = torch.nn.ReLU(inplace=False)(py_tensor)
py_relu_cpu = py_relu.to("cpu")
self.assertEqual(np_relu, py_relu_cpu)
def _testReluInPlace(self, np_features, device):
np_relu = self._npRelu(np_features)
# Convert the numpy array to a PyTorch Tensor,
# and move the Tensor to the CPU/GPU based on the "device" parameter
py_tensor = torch.from_numpy(np_features).to(device)
py_relu = torch.nn.ReLU(inplace=True)(py_tensor)
py_relu_cpu = py_relu.to("cpu")
self.assertEqual(np_relu, py_relu_cpu)
# Inplace Relu modifies the initial input and it should match the output of Relu
self.assertEqual(np_relu, py_tensor.to("cpu"))
def testNumbersCPU(self):
for t in [np.int32]:
# Force execution on CPU even if a GPU kernel is available for the type.
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
device="cpu")
self._testReluInPlace(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
device="cpu")
def testNumbersGPU(self):
for t in [np.float16, np.float32]:
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
device="mps")
self._testReluInPlace(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
device="mps")
self._testRelu(np.array([]).astype(t), device="mps")
self._testReluInPlace(np.array([]).astype(t), device="mps")
| MPSReluTest |
python | falconry__falcon | examples/things_advanced.py | {
"start": 2134,
"end": 2749
} | class ____:
def process_request(self, req, resp):
if not req.client_accepts_json:
raise falcon.HTTPNotAcceptable(
description='This API only supports responses encoded as JSON.',
href='http://docs.examples.com/api/json',
)
if req.method in ('POST', 'PUT'):
if 'application/json' not in req.content_type:
raise falcon.HTTPUnsupportedMediaType(
title='This API only supports requests encoded as JSON.',
href='http://docs.examples.com/api/json',
)
| RequireJSON |
python | pypa__pipenv | pipenv/patched/pip/_internal/utils/temp_dir.py | {
"start": 998,
"end": 2086
} | class ____:
"""Manages temp directory behavior"""
def __init__(self) -> None:
self._should_delete: Dict[str, bool] = {}
def set_delete(self, kind: str, value: bool) -> None:
"""Indicate whether a TempDirectory of the given kind should be
auto-deleted.
"""
self._should_delete[kind] = value
def get_delete(self, kind: str) -> bool:
"""Get configured auto-delete flag for a given TempDirectory type,
default True.
"""
return self._should_delete.get(kind, True)
_tempdir_registry: Optional[TempDirectoryTypeRegistry] = None
@contextmanager
def tempdir_registry() -> Generator[TempDirectoryTypeRegistry, None, None]:
"""Provides a scoped global tempdir registry that can be used to dictate
whether directories should be deleted.
"""
global _tempdir_registry
old_tempdir_registry = _tempdir_registry
_tempdir_registry = TempDirectoryTypeRegistry()
try:
yield _tempdir_registry
finally:
_tempdir_registry = old_tempdir_registry
| TempDirectoryTypeRegistry |
python | pydata__xarray | asv_bench/benchmarks/rolling.py | {
"start": 3486,
"end": 4352
} | class ____(RollingMemory):
@parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False]))
def peakmem_ndrolling_reduce(self, func, use_bottleneck):
with xr.set_options(use_bottleneck=use_bottleneck):
roll = self.ds.var1.rolling(x=10, y=4)
getattr(roll, func)()
@parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False]))
def peakmem_1drolling_reduce(self, func, use_bottleneck):
with xr.set_options(use_bottleneck=use_bottleneck):
roll = self.ds.var3.rolling(t=100)
getattr(roll, func)()
@parameterized(["stride"], ([None, 5, 50]))
def peakmem_1drolling_construct(self, stride):
self.ds.var2.rolling(t=100).construct("w", stride=stride)
self.ds.var3.rolling(t=100).construct("w", stride=stride)
| DataArrayRollingMemory |
python | great-expectations__great_expectations | tests/integration/data_sources_and_expectations/expectations/test_expect_column_values_to_be_between.py | {
"start": 6880,
"end": 13735
} | class ____:
# expect a standard error message, but exclude the column type string, which is backend specific
EXPECTED_ERROR = "ColumnValuesBetween metrics cannot be computed on column of type"
@parameterize_batch_for_data_sources(
data_source_configs=SQL_DATA_SOURCES,
data=DATA,
)
def test_fails_when_run_against_invalid_column_type(self, batch_for_datasource: Batch) -> None:
expect = gxe.ExpectColumnValuesToBeBetween(
column=STRING_COLUMN,
min_value=0,
max_value=1,
)
result = batch_for_datasource.validate(expect=expect)
exception_info = list(result.exception_info.values())
assert len(exception_info) == 1
assert self.EXPECTED_ERROR in exception_info[0]["exception_message"]
@parameterize_batch_for_data_sources(
data_source_configs=SQL_DATA_SOURCES,
data=DATA,
)
def test_other_expectations_pass_on_failure(self, batch_for_datasource: Batch) -> None:
"""Prior to GX v1.3.8, if ExpectColumnValuesToBeBetween ran against a string column in a
SQL context, every other expectation would fail with an exception, including metrics for
unrelated columns. This test ensures that when a user tries to run that expectation
against an invalid column type, other expectations continue to work as expected.
"""
expect = ExpectationSuite(
name="test_suite",
expectations=[
gxe.ExpectColumnValuesToBeBetween(
column=STRING_COLUMN,
min_value=0,
max_value=1,
),
# this expectation used to fail because of shared metrics
gxe.ExpectColumnValuesToNotBeNull(column=STRING_COLUMN),
# this expectation also used to fail despite not sharing metrics
gxe.ExpectColumnValuesToNotBeNull(
column=NUMERIC_COLUMN,
),
],
)
result = batch_for_datasource.validate(expect=expect)
# expect only one ExpectationResult to have an error
results_with_errors = [
result
for result in result.results
if result.exception_info.get("raised_exception") is not False
]
assert len(results_with_errors) == 1
exception_info = list(results_with_errors[0].exception_info.values())
assert len(exception_info) == 1
assert self.EXPECTED_ERROR in exception_info[0]["exception_message"]
@pytest.mark.parametrize(
"suite_param_value,expected_result",
[
pytest.param(True, True, id="success"),
],
)
@parameterize_batch_for_data_sources(data_source_configs=JUST_PANDAS_DATA_SOURCES, data=DATA)
def test_success_with_suite_param_strict_min_(
batch_for_datasource: Batch, suite_param_value: bool, expected_result: bool
) -> None:
suite_param_key = "test_expect_column_values_to_be_between"
expectation = gxe.ExpectColumnValuesToBeBetween(
column=NUMERIC_COLUMN,
min_value=0,
max_value=6,
strict_min={"$PARAMETER": suite_param_key},
result_format=ResultFormat.SUMMARY,
)
result = batch_for_datasource.validate(
expectation, expectation_parameters={suite_param_key: suite_param_value}
)
assert result.success == expected_result
@pytest.mark.parametrize(
"suite_param_value,expected_result",
[
pytest.param(True, True, id="success"),
],
)
@parameterize_batch_for_data_sources(data_source_configs=JUST_PANDAS_DATA_SOURCES, data=DATA)
def test_success_with_suite_param_strict_max_(
batch_for_datasource: Batch, suite_param_value: bool, expected_result: bool
) -> None:
suite_param_key = "test_expect_column_values_to_be_between"
expectation = gxe.ExpectColumnValuesToBeBetween(
column=NUMERIC_COLUMN,
min_value=0,
max_value=6,
strict_max={"$PARAMETER": suite_param_key},
result_format=ResultFormat.SUMMARY,
)
result = batch_for_datasource.validate(
expectation, expectation_parameters={suite_param_key: suite_param_value}
)
assert result.success == expected_result
@parameterize_batch_for_data_sources(data_source_configs=JUST_PANDAS_DATA_SOURCES, data=DATA)
def test_include_unexpected_rows_pandas(batch_for_datasource: Batch) -> None:
"""Test include_unexpected_rows for ExpectColumnValuesToBeBetween with pandas data sources."""
expectation = gxe.ExpectColumnValuesToBeBetween(column=NUMERIC_COLUMN, min_value=2, max_value=4)
result = batch_for_datasource.validate(
expectation, result_format={"result_format": "BASIC", "include_unexpected_rows": True}
)
assert not result.success
result_dict = result["result"]
# Verify that unexpected_rows is present and contains the expected data
assert "unexpected_rows" in result_dict
assert result_dict["unexpected_rows"] is not None
# For pandas data sources, unexpected_rows should be directly usable
unexpected_rows_data = result_dict["unexpected_rows"]
assert isinstance(unexpected_rows_data, pd.DataFrame)
# Convert directly to DataFrame for pandas data sources
unexpected_rows_df = unexpected_rows_data
# Should contain 2 rows where NUMERIC_COLUMN is outside range [2,4] (values 1 and 5)
assert len(unexpected_rows_df) == 2
# The unexpected rows should have values 1 and 5 in NUMERIC_COLUMN
unexpected_values = sorted(unexpected_rows_df[NUMERIC_COLUMN].tolist())
assert unexpected_values == [1, 5]
@parameterize_batch_for_data_sources(
data_source_configs=[PostgreSQLDatasourceTestConfig()], data=DATA
)
def test_include_unexpected_rows_sql(batch_for_datasource: Batch) -> None:
"""Test include_unexpected_rows for ExpectColumnValuesToBeBetween with SQL data sources."""
expectation = gxe.ExpectColumnValuesToBeBetween(column=NUMERIC_COLUMN, min_value=2, max_value=4)
result = batch_for_datasource.validate(
expectation, result_format={"result_format": "BASIC", "include_unexpected_rows": True}
)
assert not result.success
result_dict = result["result"]
# Verify that unexpected_rows is present and contains the expected data
assert "unexpected_rows" in result_dict
assert result_dict["unexpected_rows"] is not None
unexpected_rows_data = result_dict["unexpected_rows"]
assert isinstance(unexpected_rows_data, list)
# Should contain 2 rows where NUMERIC_COLUMN is outside range [2,4] (values 1 and 5)
assert len(unexpected_rows_data) == 2
# Check that values 1 and 5 appear in the unexpected rows data
unexpected_rows_str = str(unexpected_rows_data)
assert "1" in unexpected_rows_str
assert "5" in unexpected_rows_str
| TestColumnValuesBetweenAgainstInvalidColumn |
python | sphinx-doc__sphinx | sphinx/builders/_epub_base.py | {
"start": 2322,
"end": 2391
} | class ____(NamedTuple):
type: str
title: str
uri: str
| Guide |
python | python__mypy | mypy/test/teststubgen.py | {
"start": 33072,
"end": 57376
} | class ____(unittest.TestCase):
"""Unit tests for stub generation from C modules using introspection.
Note that these don't cover a lot!
"""
def test_infer_hash_sig(self) -> None:
assert_equal(infer_c_method_args("__hash__"), [self_arg])
assert_equal(infer_method_ret_type("__hash__"), "int")
def test_infer_getitem_sig(self) -> None:
assert_equal(infer_c_method_args("__getitem__"), [self_arg, ArgSig(name="index")])
def test_infer_setitem_sig(self) -> None:
assert_equal(
infer_c_method_args("__setitem__"),
[self_arg, ArgSig(name="index"), ArgSig(name="object")],
)
assert_equal(infer_method_ret_type("__setitem__"), "None")
def test_infer_eq_op_sig(self) -> None:
for op in ("eq", "ne", "lt", "le", "gt", "ge"):
assert_equal(
infer_c_method_args(f"__{op}__"), [self_arg, ArgSig(name="other", type="object")]
)
def test_infer_binary_op_sig(self) -> None:
for op in ("add", "radd", "sub", "rsub", "mul", "rmul"):
assert_equal(infer_c_method_args(f"__{op}__"), [self_arg, ArgSig(name="other")])
def test_infer_equality_op_sig(self) -> None:
for op in ("eq", "ne", "lt", "le", "gt", "ge", "contains"):
assert_equal(infer_method_ret_type(f"__{op}__"), "bool")
def test_infer_unary_op_sig(self) -> None:
for op in ("neg", "pos"):
assert_equal(infer_c_method_args(f"__{op}__"), [self_arg])
def test_infer_cast_sig(self) -> None:
for op in ("float", "bool", "bytes", "int"):
assert_equal(infer_method_ret_type(f"__{op}__"), op)
def test_generate_class_stub_no_crash_for_object(self) -> None:
output: list[str] = []
mod = ModuleType("module", "") # any module is fine
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_class_stub("alias", object, output)
assert_equal(gen.get_imports().splitlines(), [])
assert_equal(output[0], "class alias:")
def test_generate_class_stub_variable_type_annotation(self) -> None:
# This class mimics the stubgen unit test 'testClassVariable'
class TestClassVariableCls:
x = 1
output: list[str] = []
mod = ModuleType("module", "") # any module is fine
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_class_stub("C", TestClassVariableCls, output)
assert_equal(gen.get_imports().splitlines(), ["from typing import ClassVar"])
assert_equal(output, ["class C:", " x: ClassVar[int] = ..."])
def test_generate_c_type_none_default(self) -> None:
class TestClass:
def test(self, arg0=1, arg1=None) -> None: # type: ignore[no-untyped-def]
pass
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.is_c_module = False
gen.generate_function_stub(
"test",
TestClass.test,
output=output,
class_info=ClassInfo(
self_var="self",
cls=TestClass,
name="TestClass",
docstring=getattr(TestClass, "__doc__", None),
),
)
assert_equal(
output, ["def test(self, arg0: int = ..., arg1: Incomplete | None = ...) -> None: ..."]
)
def test_non_c_generate_signature_with_kw_only_args(self) -> None:
class TestClass:
def test(
self, arg0: str, *, keyword_only: str, keyword_only_with_default: int = 7
) -> None:
pass
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.is_c_module = False
gen.generate_function_stub(
"test",
TestClass.test,
output=output,
class_info=ClassInfo(
self_var="self",
cls=TestClass,
name="TestClass",
docstring=getattr(TestClass, "__doc__", None),
),
)
assert_equal(
output,
[
"def test(self, arg0: str, *, keyword_only: str, keyword_only_with_default: int = ...) -> None: ..."
],
)
def test_generate_c_type_inheritance(self) -> None:
class TestClass(KeyError):
pass
output: list[str] = []
mod = ModuleType("module, ")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_class_stub("C", TestClass, output)
assert_equal(output, ["class C(KeyError): ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_inheritance_same_module(self) -> None:
output: list[str] = []
mod = ModuleType(TestBaseClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_class_stub("C", TestClass, output)
assert_equal(output, ["class C(TestBaseClass): ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_inheritance_other_module(self) -> None:
import argparse
class TestClass(argparse.Action):
pass
output: list[str] = []
mod = ModuleType("module", "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_class_stub("C", TestClass, output)
assert_equal(output, ["class C(argparse.Action): ..."])
assert_equal(gen.get_imports().splitlines(), ["import argparse"])
def test_generate_c_type_inheritance_builtin_type(self) -> None:
class TestClass(type):
pass
output: list[str] = []
mod = ModuleType("module", "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_class_stub("C", TestClass, output)
assert_equal(output, ["class C(type): ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_with_docstring(self) -> None:
class TestClass:
def test(self, arg0: str) -> None:
"""
test(self: TestClass, arg0: int)
"""
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub(
"test",
TestClass.test,
output=output,
class_info=ClassInfo(self_var="self", cls=TestClass, name="TestClass"),
)
assert_equal(output, ["def test(self, arg0: int) -> Any: ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_with_docstring_no_self_arg(self) -> None:
class TestClass:
def test(self, arg0: str) -> None:
"""
test(arg0: int)
"""
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub(
"test",
TestClass.test,
output=output,
class_info=ClassInfo(self_var="self", cls=TestClass, name="TestClass"),
)
assert_equal(output, ["def test(self, arg0: int) -> Any: ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_classmethod(self) -> None:
class TestClass:
@classmethod
def test(cls, arg0: str) -> None:
pass
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub(
"test",
TestClass.test,
output=output,
class_info=ClassInfo(self_var="cls", cls=TestClass, name="TestClass"),
)
assert_equal(output, ["@classmethod", "def test(cls, *args, **kwargs): ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_classmethod_with_overloads(self) -> None:
class TestClass:
@classmethod
def test(cls, arg0: str) -> None:
"""
test(cls, arg0: str)
test(cls, arg0: int)
"""
pass
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub(
"test",
TestClass.test,
output=output,
class_info=ClassInfo(self_var="cls", cls=TestClass, name="TestClass"),
)
assert_equal(
output,
[
"@overload",
"@classmethod",
"def test(cls, arg0: str) -> Any: ...",
"@overload",
"@classmethod",
"def test(cls, arg0: int) -> Any: ...",
],
)
assert_equal(gen.get_imports().splitlines(), ["from typing import overload"])
def test_generate_c_type_with_docstring_empty_default(self) -> None:
class TestClass:
def test(self, arg0: str = "") -> None:
"""
test(self: TestClass, arg0: str = "")
"""
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub(
"test",
TestClass.test,
output=output,
class_info=ClassInfo(self_var="self", cls=TestClass, name="TestClass"),
)
assert_equal(output, ["def test(self, arg0: str = ...) -> Any: ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_function_other_module_arg(self) -> None:
"""Test that if argument references type from other module, module will be imported."""
# Provide different type in python spec than in docstring to make sure, that docstring
# information is used.
def test(arg0: str) -> None:
"""
test(arg0: argparse.Action)
"""
output: list[str] = []
mod = ModuleType(self.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub("test", test, output=output)
assert_equal(output, ["def test(arg0: argparse.Action) -> Any: ..."])
assert_equal(gen.get_imports().splitlines(), ["import argparse"])
def test_generate_c_function_same_module(self) -> None:
"""Test that if annotation references type from same module but using full path, no module
will be imported, and type specification will be striped to local reference.
"""
# Provide different type in python spec than in docstring to make sure, that docstring
# information is used.
def test(arg0: str) -> None:
"""
test(arg0: argparse.Action) -> argparse.Action
"""
output: list[str] = []
mod = ModuleType("argparse", "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub("test", test, output=output)
assert_equal(output, ["def test(arg0: Action) -> Action: ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_function_other_module(self) -> None:
"""Test that if annotation references type from other module, module will be imported."""
def test(arg0: str) -> None:
"""
test(arg0: argparse.Action) -> argparse.Action
"""
output: list[str] = []
mod = ModuleType(self.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub("test", test, output=output)
assert_equal(output, ["def test(arg0: argparse.Action) -> argparse.Action: ..."])
assert_equal(gen.get_imports().splitlines(), ["import argparse"])
def test_generate_c_function_same_module_nested(self) -> None:
"""Test that if annotation references type from same module but using full path, no module
will be imported, and type specification will be stripped to local reference.
"""
# Provide different type in python spec than in docstring to make sure, that docstring
# information is used.
def test(arg0: str) -> None:
"""
test(arg0: list[argparse.Action]) -> list[argparse.Action]
"""
output: list[str] = []
mod = ModuleType("argparse", "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub("test", test, output=output)
assert_equal(output, ["def test(arg0: list[Action]) -> list[Action]: ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_function_same_module_compound(self) -> None:
"""Test that if annotation references type from same module but using full path, no module
will be imported, and type specification will be stripped to local reference.
"""
# Provide different type in python spec than in docstring to make sure, that docstring
# information is used.
def test(arg0: str) -> None:
"""
test(arg0: Union[argparse.Action, NoneType]) -> Tuple[argparse.Action, NoneType]
"""
output: list[str] = []
mod = ModuleType("argparse", "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub("test", test, output=output)
assert_equal(output, ["def test(arg0: Union[Action, None]) -> Tuple[Action, None]: ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_function_other_module_nested(self) -> None:
"""Test that if annotation references type from other module, module will be imported,
and the import will be restricted to one of the known modules."""
def test(arg0: str) -> None:
"""
test(arg0: foo.bar.Action) -> other.Thing
"""
output: list[str] = []
mod = ModuleType(self.__module__, "")
gen = InspectionStubGenerator(
mod.__name__, known_modules=["foo", "foo.spangle", "bar"], module=mod
)
gen.generate_function_stub("test", test, output=output)
assert_equal(output, ["def test(arg0: foo.bar.Action) -> other.Thing: ..."])
assert_equal(gen.get_imports().splitlines(), ["import foo", "import other"])
def test_generate_c_function_no_crash_for_non_str_docstring(self) -> None:
def test(arg0: str) -> None: ...
test.__doc__ = property(lambda self: "test(arg0: str) -> None") # type: ignore[assignment]
output: list[str] = []
mod = ModuleType(self.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub("test", test, output=output)
assert_equal(output, ["def test(*args, **kwargs): ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_property_with_pybind11(self) -> None:
"""Signatures included by PyBind11 inside property.fget are read."""
class TestClass:
def get_attribute(self) -> None:
"""
(self: TestClass) -> str
"""
attribute = property(get_attribute, doc="")
readwrite_properties: list[str] = []
readonly_properties: list[str] = []
mod = ModuleType("module", "") # any module is fine
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_property_stub(
"attribute",
TestClass.__dict__["attribute"],
TestClass.attribute,
[],
readwrite_properties,
readonly_properties,
)
assert_equal(readwrite_properties, [])
assert_equal(readonly_properties, ["@property", "def attribute(self) -> str: ..."])
def test_generate_c_property_with_rw_property(self) -> None:
class TestClass:
def __init__(self) -> None:
self._attribute = 0
@property
def attribute(self) -> int:
return self._attribute
@attribute.setter
def attribute(self, value: int) -> None:
self._attribute = value
readwrite_properties: list[str] = []
readonly_properties: list[str] = []
mod = ModuleType("module", "") # any module is fine
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_property_stub(
"attribute",
TestClass.__dict__["attribute"],
TestClass.attribute,
[],
readwrite_properties,
readonly_properties,
)
assert_equal(readwrite_properties, ["attribute: Incomplete"])
assert_equal(readonly_properties, [])
def test_generate_c_type_with_single_arg_generic(self) -> None:
class TestClass:
def test(self, arg0: str) -> None:
"""
test(self: TestClass, arg0: List[int])
"""
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub(
"test",
TestClass.test,
output=output,
class_info=ClassInfo(self_var="self", cls=TestClass, name="TestClass"),
)
assert_equal(output, ["def test(self, arg0: List[int]) -> Any: ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_with_double_arg_generic(self) -> None:
class TestClass:
def test(self, arg0: str) -> None:
"""
test(self: TestClass, arg0: Dict[str, int])
"""
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub(
"test",
TestClass.test,
output=output,
class_info=ClassInfo(self_var="self", cls=TestClass, name="TestClass"),
)
assert_equal(output, ["def test(self, arg0: Dict[str, int]) -> Any: ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_with_nested_generic(self) -> None:
class TestClass:
def test(self, arg0: str) -> None:
"""
test(self: TestClass, arg0: Dict[str, List[int]])
"""
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub(
"test",
TestClass.test,
output=output,
class_info=ClassInfo(self_var="self", cls=TestClass, name="TestClass"),
)
assert_equal(output, ["def test(self, arg0: Dict[str, List[int]]) -> Any: ..."])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_with_generic_using_other_module_first(self) -> None:
class TestClass:
def test(self, arg0: str) -> None:
"""
test(self: TestClass, arg0: Dict[argparse.Action, int])
"""
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub(
"test",
TestClass.test,
output=output,
class_info=ClassInfo(self_var="self", cls=TestClass, name="TestClass"),
)
assert_equal(output, ["def test(self, arg0: Dict[argparse.Action, int]) -> Any: ..."])
assert_equal(gen.get_imports().splitlines(), ["import argparse"])
def test_generate_c_type_with_generic_using_other_module_last(self) -> None:
class TestClass:
def test(self, arg0: str) -> None:
"""
test(self: TestClass, arg0: Dict[str, argparse.Action])
"""
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub(
"test",
TestClass.test,
output=output,
class_info=ClassInfo(self_var="self", cls=TestClass, name="TestClass"),
)
assert_equal(output, ["def test(self, arg0: Dict[str, argparse.Action]) -> Any: ..."])
assert_equal(gen.get_imports().splitlines(), ["import argparse"])
def test_generate_c_type_with_overload_pybind11(self) -> None:
class TestClass:
def __init__(self, arg0: str) -> None:
"""
__init__(*args, **kwargs)
Overloaded function.
1. __init__(self: TestClass, arg0: str) -> None
2. __init__(self: TestClass, arg0: str, arg1: str) -> None
"""
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub(
"__init__",
TestClass.__init__,
output=output,
class_info=ClassInfo(self_var="self", cls=TestClass, name="TestClass"),
)
assert_equal(
output,
[
"@overload",
"def __init__(self, arg0: str) -> None: ...",
"@overload",
"def __init__(self, arg0: str, arg1: str) -> None: ...",
"@overload",
"def __init__(self, *args, **kwargs) -> Any: ...",
],
)
assert_equal(gen.get_imports().splitlines(), ["from typing import overload"])
def test_generate_c_type_with_overload_shiboken(self) -> None:
class TestClass:
"""
TestClass(self: TestClass, arg0: str) -> None
TestClass(self: TestClass, arg0: str, arg1: str) -> None
"""
def __init__(self, arg0: str) -> None:
pass
output: list[str] = []
mod = ModuleType(TestClass.__module__, "")
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub(
"__init__",
TestClass.__init__,
output=output,
class_info=ClassInfo(
self_var="self",
cls=TestClass,
name="TestClass",
docstring=getattr(TestClass, "__doc__", None),
),
)
assert_equal(
output,
[
"@overload",
"def __init__(self, arg0: str) -> None: ...",
"@overload",
"def __init__(self, arg0: str, arg1: str) -> None: ...",
],
)
assert_equal(gen.get_imports().splitlines(), ["from typing import overload"])
| StubgencSuite |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 12705,
"end": 12775
} | class ____(FirstLevelInheritedModel):
pass
| SecondLevelInheritedModel |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 204426,
"end": 216169
} | class ____(sparse_test_class(getset=False,
slicing=False, slicing_assign=False,
fancy_indexing=False, fancy_assign=False,
nnz_axis=False)):
spcreator = bsr_array
math_dtypes = [np.int_, np.float64, np.complex128]
def test_constructor1(self):
# check native BSR format constructor
indptr = array([0,2,2,4])
indices = array([0,2,2,3])
data = zeros((4,2,3))
data[0] = array([[0, 1, 2],
[3, 0, 5]])
data[1] = array([[0, 2, 4],
[6, 0, 10]])
data[2] = array([[0, 4, 8],
[12, 0, 20]])
data[3] = array([[0, 5, 10],
[15, 0, 25]])
A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]])
Asp = self.bsr_container((data,indices,indptr),shape=(6,12))
assert_equal(Asp.toarray(), A)
# infer shape from arrays
Asp = self.bsr_container((data,indices,indptr))
assert_equal(Asp.toarray(), A)
def test_constructor2(self):
# construct from dense
# test zero mats
for shape in [(1,1), (5,1), (1,10), (10,4), (3,7), (2,1)]:
A = zeros(shape)
assert_equal(self.bsr_container(A).toarray(), A)
A = zeros((4,6))
assert_equal(self.bsr_container(A, blocksize=(2, 2)).toarray(), A)
assert_equal(self.bsr_container(A, blocksize=(2, 3)).toarray(), A)
A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]])
assert_equal(self.bsr_container(A).toarray(), A)
assert_equal(self.bsr_container(A, shape=(6, 12)).toarray(), A)
assert_equal(self.bsr_container(A, blocksize=(1, 1)).toarray(), A)
assert_equal(self.bsr_container(A, blocksize=(2, 3)).toarray(), A)
assert_equal(self.bsr_container(A, blocksize=(2, 6)).toarray(), A)
assert_equal(self.bsr_container(A, blocksize=(2, 12)).toarray(), A)
assert_equal(self.bsr_container(A, blocksize=(3, 12)).toarray(), A)
assert_equal(self.bsr_container(A, blocksize=(6, 12)).toarray(), A)
A = kron([[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]])
assert_equal(self.bsr_container(A, blocksize=(2, 3)).toarray(), A)
def test_constructor3(self):
# construct from coo-like (data,(row,col)) format
arg = ([1,2,3], ([0,1,1], [0,0,1]))
A = array([[1,0],[2,3]])
assert_equal(self.bsr_container(arg, blocksize=(2, 2)).toarray(), A)
def test_constructor4(self):
# regression test for gh-6292: self.bsr_matrix((data, indices, indptr)) was
# trying to compare an int to a None
n = 8
data = np.ones((n, n, 1), dtype=np.int8)
indptr = np.array([0, n], dtype=np.int32)
indices = np.arange(n, dtype=np.int32)
self.bsr_container((data, indices, indptr), blocksize=(n, 1), copy=False)
def test_constructor5(self):
# check for validations introduced in gh-13400
n = 8
data_1dim = np.ones(n)
data = np.ones((n, n, n))
indptr = np.array([0, n])
indices = np.arange(n)
with assert_raises(ValueError):
# data ndim check
self.bsr_container((data_1dim, indices, indptr))
with assert_raises(ValueError):
# invalid blocksize
self.bsr_container((data, indices, indptr), blocksize=(1, 1, 1))
with assert_raises(ValueError):
# mismatching blocksize
self.bsr_container((data, indices, indptr), blocksize=(1, 1))
def test_default_dtype(self):
# As a numpy array, `values` has shape (2, 2, 1).
values = [[[1], [1]], [[1], [1]]]
indptr = np.array([0, 2], dtype=np.int32)
indices = np.array([0, 1], dtype=np.int32)
b = self.bsr_container((values, indices, indptr), blocksize=(2, 1))
assert b.dtype == np.array(values).dtype
def test_bsr_tocsr(self):
# check native conversion from BSR to CSR
indptr = array([0, 2, 2, 4])
indices = array([0, 2, 2, 3])
data = zeros((4, 2, 3))
data[0] = array([[0, 1, 2],
[3, 0, 5]])
data[1] = array([[0, 2, 4],
[6, 0, 10]])
data[2] = array([[0, 4, 8],
[12, 0, 20]])
data[3] = array([[0, 5, 10],
[15, 0, 25]])
A = kron([[1, 0, 2, 0], [0, 0, 0, 0], [0, 0, 4, 5]],
[[0, 1, 2], [3, 0, 5]])
Absr = self.bsr_container((data, indices, indptr), shape=(6, 12))
Acsr = Absr.tocsr()
Acsr_via_coo = Absr.tocoo().tocsr()
assert_equal(Acsr.toarray(), A)
assert_equal(Acsr.toarray(), Acsr_via_coo.toarray())
def test_eliminate_zeros(self):
data = kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T
data = data.reshape(-1,2,2)
indices = array([1, 2, 3, 4, 5, 6, 7, 8])
indptr = array([0, 3, 8])
asp = self.bsr_container((data, indices, indptr), shape=(4,20))
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3*4)
assert_array_equal(asp.toarray(), bsp.toarray())
# GitHub issue #9687
def test_eliminate_zeros_all_zero(self):
np.random.seed(0)
m = self.bsr_container(np.random.random((12, 12)), blocksize=(2, 3))
# eliminate some blocks, but not all
m.data[m.data <= 0.9] = 0
m.eliminate_zeros()
assert_equal(m.nnz, 66)
assert_array_equal(m.data.shape, (11, 2, 3))
# eliminate all remaining blocks
m.data[m.data <= 1.0] = 0
m.eliminate_zeros()
assert_equal(m.nnz, 0)
assert_array_equal(m.data.shape, (0, 2, 3))
assert_array_equal(m.toarray(), np.zeros((12, 12)))
# test fast path
m.eliminate_zeros()
assert_equal(m.nnz, 0)
assert_array_equal(m.data.shape, (0, 2, 3))
assert_array_equal(m.toarray(), np.zeros((12, 12)))
def test_has_canonical_format(self):
"Ensure has_canonical_format memoizes state for sum_duplicates"
A = np.array([[2, 3, 2], [0, 2, 1], [-4, 0, 2]])
M = self.bsr_container(A)
assert_equal(True, M.has_canonical_format)
indices = np.array([0, 0]) # contains duplicate
data = np.array([A, A*0])
indptr = np.array([0, 2])
M = self.bsr_container((data, indices, indptr)).copy()
assert_equal(False, M.has_canonical_format)
assert isinstance(M.has_canonical_format, bool)
# set flag by deduplicating
M.sum_duplicates()
assert_equal(True, M.has_canonical_format)
assert_equal(1, len(M.indices))
# manually set flag True (although underlyingly duplicated)
M = self.bsr_container((data, indices, indptr)).copy()
M.has_canonical_format = True
assert_equal(True, M.has_canonical_format)
assert_equal(2, len(M.indices)) # unaffected content
# ensure deduplication bypassed when has_canonical_format == True
M.sum_duplicates()
assert_equal(2, len(M.indices)) # still has duplicates!!!!
# ensure deduplication reenabled when has_canonical_format == False
M.has_canonical_format = False
M.sum_duplicates()
assert_equal(1, len(M.indices))
assert_equal(True, M.has_canonical_format)
# manually set flag False (although underlyingly canonical)
M = self.bsr_container(A)
M.has_canonical_format = False
assert_equal(False, M.has_canonical_format)
assert_equal(1, len(M.indices))
# sum_duplicates does not complain when no work to do
M.sum_duplicates()
assert_equal(True, M.has_canonical_format)
# manually reset index arrays before accessing M.has_canonical_format is OK
M = self.bsr_container(A)
M.data, M.indices, M.indptr = data, indices, indptr
assert_equal(False, M.has_canonical_format)
assert_equal(2, len(M.indices)) # dups and has_canonical_format is False
# but reset after accessing M.has_canonical_format can break flag
M = self.bsr_container(A)
M.has_canonical_format # underlying attr is set here
M.data, M.indices, M.indptr = data, indices, indptr
assert_equal(True, M.has_canonical_format)
assert_equal(2, len(M.indices)) # dups but has_canonical_format is True
M.sum_duplicates()
assert_equal(2, len(M.indices)) # still has duplicates!!!!
def test_bsr_matvec(self):
A = self.bsr_container(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5))
x = arange(A.shape[1]).reshape(-1,1)
assert_equal(A @ x, A.toarray() @ x)
def test_bsr_matvecs(self):
A = self.bsr_container(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5))
x = arange(A.shape[1]*6).reshape(-1,6)
assert_equal(A @ x, A.toarray() @ x)
@pytest.mark.xfail(run=False, reason='BSR does not have a __getitem__')
def test_iterator(self):
pass
@pytest.mark.xfail(run=False, reason='BSR does not have a __setitem__')
def test_setdiag(self):
pass
def test_resize_blocked(self):
# test resize() with non-(1,1) blocksize
D = np.array([[1, 0, 3, 4],
[2, 0, 0, 0],
[3, 0, 0, 0]])
S = self.spcreator(D, blocksize=(1, 2))
assert_(S.resize((3, 2)) is None)
assert_array_equal(S.toarray(), [[1, 0],
[2, 0],
[3, 0]])
S.resize((2, 2))
assert_array_equal(S.toarray(), [[1, 0],
[2, 0]])
S.resize((3, 2))
assert_array_equal(S.toarray(), [[1, 0],
[2, 0],
[0, 0]])
S.resize((3, 4))
assert_array_equal(S.toarray(), [[1, 0, 0, 0],
[2, 0, 0, 0],
[0, 0, 0, 0]])
assert_raises(ValueError, S.resize, (2, 3))
@pytest.mark.xfail(run=False, reason='BSR does not have a __setitem__')
def test_setdiag_comprehensive(self):
pass
@pytest.mark.skipif(IS_COLAB, reason="exceeds memory limit")
def test_scalar_idx_dtype(self):
# Check that index dtype takes into account all parameters
# passed to sparsetools, including the scalar ones
indptr = np.zeros(2, dtype=np.int32)
indices = np.zeros(0, dtype=np.int32)
vals = np.zeros((0, 1, 1))
a = self.bsr_container((vals, indices, indptr), shape=(1, 2**31-1))
b = self.bsr_container((vals, indices, indptr), shape=(1, 2**31))
c = self.bsr_container((1, 2**31-1))
d = self.bsr_container((1, 2**31))
assert_equal(a.indptr.dtype, np.int32)
assert_equal(b.indptr.dtype, np.int64)
assert_equal(c.indptr.dtype, np.int32)
assert_equal(d.indptr.dtype, np.int64)
try:
vals2 = np.zeros((0, 1, 2**31-1))
vals3 = np.zeros((0, 1, 2**31))
e = self.bsr_container((vals2, indices, indptr), shape=(1, 2**31-1))
f = self.bsr_container((vals3, indices, indptr), shape=(1, 2**31))
assert_equal(e.indptr.dtype, np.int32)
assert_equal(f.indptr.dtype, np.int64)
except (MemoryError, ValueError):
# May fail on 32-bit Python
e = 0
f = 0
# These shouldn't fail
for x in [a, b, c, d, e, f]:
x + x
| TestBSR |
python | ansible__ansible | lib/ansible/module_utils/facts/timeout.py | {
"start": 887,
"end": 2453
} | class ____(Exception):
pass
def timeout(seconds=None, error_message="Timer expired"):
"""
Timeout decorator to expire after a set number of seconds. This raises an
ansible.module_utils.facts.TimeoutError if the timeout is hit before the
function completes.
"""
def decorator(func):
def wrapper(*args, **kwargs):
timeout_value = seconds
if timeout_value is None:
timeout_value = globals().get('GATHER_TIMEOUT') or DEFAULT_GATHER_TIMEOUT
pool = mp.ThreadPool(processes=1)
res = pool.apply_async(func, args, kwargs)
pool.close()
try:
return res.get(timeout_value)
except multiprocessing.TimeoutError:
# This is an ansible.module_utils.common.facts.timeout.TimeoutError
raise TimeoutError(f'{error_message} after {timeout_value} seconds')
finally:
pool.terminate()
return wrapper
# If we were called as @timeout, then the first parameter will be the
# function we are to wrap instead of the number of seconds. Detect this
# and correct it by setting seconds to our default value and return the
# inner decorator function manually wrapped around the function
if callable(seconds):
func = seconds
seconds = None
return decorator(func)
# If we were called as @timeout([...]) then python itself will take
# care of wrapping the inner decorator around the function
return decorator
| TimeoutError |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/repository/github_client.py | {
"start": 3875,
"end": 4735
} | class ____(Protocol):
def get_all_endpoints(self) -> Dict[str, str]: ...
async def request(
self,
endpoint: str,
method: str,
headers: Dict[str, Any] = {},
**kwargs: Any,
) -> Any: ...
async def get_tree(
self,
owner: str,
repo: str,
tree_sha: str,
) -> GitTreeResponseModel: ...
async def get_blob(
self,
owner: str,
repo: str,
file_sha: str,
) -> Optional[GitBlobResponseModel]: ...
async def get_commit(
self,
owner: str,
repo: str,
commit_sha: str,
) -> GitCommitResponseModel: ...
async def get_branch(
self,
owner: str,
repo: str,
branch: Optional[str],
branch_name: Optional[str],
) -> GitBranchResponseModel: ...
| BaseGithubClient |
python | PyCQA__pylint | tests/functional/e/enum_subclasses.py | {
"start": 773,
"end": 827
} | class ____(OrderedEnum):
red = 0
green = 1
| Color |
python | streamlit__streamlit | lib/tests/streamlit/elements/echo_test.py | {
"start": 2826,
"end": 4288
} | class ____:
def do_x(self):
pass
def do_y(self):
pass"""
element = self.get_delta_from_queue(echo_index).new_element
assert echo_str == element.code.code_text
element = self.get_delta_from_queue(output_index).new_element
assert element.markdown.body == "Hello"
self.clear_queue()
def test_if_elif_else(self):
page = "Dual"
if page == "Single":
with st.echo():
st.write("Single")
elif page == "Dual":
with st.echo():
st.write("Dual")
else:
with st.echo():
st.write("ELSE")
echo_str = 'st.write("Dual")'
element = self.get_delta_from_queue(0).new_element
assert echo_str == element.code.code_text
element = self.get_delta_from_queue(1).new_element
assert element.markdown.body == "Dual"
self.clear_queue()
def test_root_level_echo(self):
import tests.streamlit.echo_test_data.root_level_echo # noqa: F401
echo_str = "a = 123"
element = self.get_delta_from_queue(0).new_element
assert echo_str == element.code.code_text
def test_echo_multiline_param(self):
import tests.streamlit.echo_test_data.multiline_param_echo # noqa: F401
echo_str = "a = 123"
element = self.get_delta_from_queue(0).new_element
assert echo_str == element.code.code_text
| MyClass |
python | pytorch__pytorch | test/inductor/test_minifier_isolate.py | {
"start": 524,
"end": 1998
} | class ____(MinifierTestBase):
def _test_after_aot_runtime_error(self, device, expected_error):
run_code = f"""\
@torch.compile()
def inner(x):
x = torch.relu(x)
x = torch.cos(x)
return x
inner(torch.randn(2, 2).to("{device}"))
"""
# These must isolate because they crash the process
self._run_full_test(run_code, "aot", expected_error, isolate=True)
@unittest.skipIf(IS_JETSON, "Fails on Jetson")
@inductor_config.patch("cpp.inject_relu_bug_TESTING_ONLY", "runtime_error")
@skipIfWindows(
msg="Build Failed: fatal error C1083: Cannot open include file: 'Python.h': No such file or directory"
)
def test_after_aot_cpu_runtime_error(self):
self._test_after_aot_runtime_error("cpu", "")
@skipIfRocm
@skipIfXpu
@requires_gpu
@inductor_config.patch("triton.inject_relu_bug_TESTING_ONLY", "runtime_error")
def test_after_aot_gpu_runtime_error(self):
self._test_after_aot_runtime_error(GPU_TYPE, "device-side assert")
if __name__ == "__main__":
import sys
from torch._dynamo.test_case import run_tests
# Skip CI tests on mac since CPU inductor does not seem to work due to C++ compile errors,
# also skip on ASAN due to https://github.com/pytorch/pytorch/issues/98262
# also skip on Py 3.11+ since unhandled exceptions can cause segfaults
if not IS_MACOS and not TEST_WITH_ASAN and sys.version_info < (3, 11):
run_tests()
| MinifierIsolateTests |
python | py-pdf__pypdf | pypdf/generic/_appearance_stream.py | {
"start": 584,
"end": 749
} | class ____(IntEnum):
"""Defines the alignment options for text within a form field's appearance stream."""
LEFT = 0
CENTER = 1
RIGHT = 2
| TextAlignment |
python | django-compressor__django-compressor | compressor/filters/template.py | {
"start": 124,
"end": 370
} | class ____(FilterBase):
def input(self, filename=None, basename=None, **kwargs):
template = Template(self.content)
context = Context(settings.COMPRESS_TEMPLATE_FILTER_CONTEXT)
return template.render(context)
| TemplateFilter |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 1018407,
"end": 1018945
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("actor", "created_at", "from_repository", "issue")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
from_repository = sgqlc.types.Field(Repository, graphql_name="fromRepository")
issue = sgqlc.types.Field(sgqlc.types.non_null(Issue), graphql_name="issue")
| TransferredEvent |
python | ray-project__ray | python/ray/tune/utils/util.py | {
"start": 903,
"end": 4352
} | class ____(Thread):
"""Class for system usage utilization monitoring.
It keeps track of CPU, RAM, GPU, VRAM usage (each gpu separately) by
pinging for information every x seconds in a separate thread.
Requires psutil and GPUtil to be installed. Can be enabled with
Tuner(param_space={"log_sys_usage": True}).
"""
def __init__(self, start=True, delay=0.7):
self.stopped = True
GPUtil = _import_gputil()
self.GPUtil = GPUtil
if GPUtil is None and start:
logger.warning("Install gputil for GPU system monitoring.")
if psutil is None and start:
logger.warning("Install psutil to monitor system performance.")
if GPUtil is None and psutil is None:
return
super(UtilMonitor, self).__init__()
self.delay = delay # Time between calls to GPUtil
self.values = defaultdict(list)
self.lock = threading.Lock()
self.daemon = True
if start:
self.start()
def _read_utilization(self):
with self.lock:
if psutil is not None:
self.values["cpu_util_percent"].append(
float(psutil.cpu_percent(interval=None))
)
self.values["ram_util_percent"].append(
float(psutil.virtual_memory().percent)
)
if self.GPUtil is not None:
gpu_list = []
try:
gpu_list = self.GPUtil.getGPUs()
except Exception:
logger.debug("GPUtil failed to retrieve GPUs.")
for gpu in gpu_list:
self.values["gpu_util_percent" + str(gpu.id)].append(
float(gpu.load)
)
self.values["vram_util_percent" + str(gpu.id)].append(
float(gpu.memoryUtil)
)
def get_data(self):
if self.stopped:
return {}
with self.lock:
ret_values = copy.deepcopy(self.values)
for key, val in self.values.items():
del val[:]
return {"perf": {k: np.mean(v) for k, v in ret_values.items() if len(v) > 0}}
def run(self):
self.stopped = False
while not self.stopped:
self._read_utilization()
time.sleep(self.delay)
def stop(self):
self.stopped = True
@DeveloperAPI
def retry_fn(
fn: Callable[[], Any],
exception_type: Union[Type[Exception], Sequence[Type[Exception]]] = Exception,
num_retries: int = 3,
sleep_time: int = 1,
timeout: Optional[Number] = None,
) -> bool:
errored = threading.Event()
def _try_fn():
try:
fn()
except exception_type as e:
logger.warning(e)
errored.set()
for i in range(num_retries):
errored.clear()
proc = threading.Thread(target=_try_fn)
proc.daemon = True
proc.start()
proc.join(timeout=timeout)
if proc.is_alive():
logger.debug(
f"Process timed out (try {i+1}/{num_retries}): "
f"{getattr(fn, '__name__', None)}"
)
elif not errored.is_set():
return True
# Timed out, sleep and try again
time.sleep(sleep_time)
# Timed out, so return False
return False
@DeveloperAPI
| UtilMonitor |
python | kamyu104__LeetCode-Solutions | Python/right-triangles.py | {
"start": 689,
"end": 1170
} | class ____(object):
def numberOfRightTriangles(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
n, m = len(grid), len(grid[0])
cnt1 = [sum(grid[i][j] for j in xrange(m)) for i in xrange(n)]
cnt2 = [sum(grid[i][j] for i in xrange(n)) for j in xrange(m)]
return sum((cnt1[i]-1)*(cnt2[j]-1) for i in xrange(n) for j in xrange(m) if grid[i][j])
# Time: O(n * m)
# Space: O(min(n, m))
# freq table
| Solution2 |
python | pydantic__pydantic | pydantic/experimental/pipeline.py | {
"start": 2245,
"end": 2483
} | class ____:
constraint: _ConstraintAnnotation
_Step = Union[_ValidateAs, _ValidateAsDefer, _Transform, _PipelineOr, _PipelineAnd, _Constraint]
_InT = TypeVar('_InT')
_OutT = TypeVar('_OutT')
_NewOutT = TypeVar('_NewOutT')
| _Constraint |
python | milvus-io__pymilvus | pymilvus/settings.py | {
"start": 1104,
"end": 1341
} | class ____:
def format_col(self, message_str: str, level_name: str):
if level_name in COLORS:
message_str = COLORS.get(level_name) + message_str + COLORS.get("ENDC")
return message_str
| ColorFulFormatColMixin |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F401_14.py | {
"start": 136,
"end": 193
} | class ____:
datetime: "Optional[datetime.datetime]"
| Class |
python | pyqtgraph__pyqtgraph | pyqtgraph/opengl/shaders.py | {
"start": 10848,
"end": 10972
} | class ____(Shader):
def __init__(self, code):
Shader.__init__(self, GL.GL_VERTEX_SHADER, code)
| VertexShader |
python | celery__celery | t/unit/backends/test_redis.py | {
"start": 12661,
"end": 12720
} | class ____(CredentialProvider):
pass
| MyCredentialProvider |
python | getsentry__sentry | src/bitfield/models.py | {
"start": 2251,
"end": 5323
} | class ____(BigIntegerField):
def contribute_to_class(self, cls: type[Model], name: str, private_only: bool = False) -> None:
super().contribute_to_class(cls, name, private_only=private_only)
setattr(cls, self.name, BitFieldCreator(self))
def __init__(self, flags, default=None, *args, **kwargs):
if isinstance(flags, dict):
# Get only integer keys in correct range
valid_keys = (
k for k in flags.keys() if isinstance(k, int) and (0 <= k < MAX_FLAG_COUNT)
)
if not valid_keys:
raise ValueError("Wrong keys or empty dictionary")
# Fill list with values from dict or with empty values
flags = [flags.get(i, "") for i in range(max(valid_keys) + 1)]
if len(flags) > MAX_FLAG_COUNT:
raise ValueError("Too many flags")
self._arg_flags = flags
flags = list(flags)
labels = []
for num, flag in enumerate(flags):
if isinstance(flag, (tuple, list)):
flags[num] = flag[0]
labels.append(flag[1])
else:
labels.append(flag)
if isinstance(default, (list, tuple, set, frozenset)):
new_value = 0
for flag in default:
new_value |= Bit(flags.index(flag))
default = new_value
kwargs["default"] = default
BigIntegerField.__init__(self, *args, **kwargs)
self.flags = flags
self.labels = labels
def pre_save(self, instance, add):
value = getattr(instance, self.attname)
return value
def get_prep_value(self, value):
if value is None:
return None
if isinstance(value, (BitHandler, Bit)):
value = value.mask
return int(value)
def to_python(self, value):
if isinstance(value, Bit):
value = value.mask
if not isinstance(value, BitHandler):
# Regression for #1425: fix bad data that was created resulting
# in negative values for flags. Compute the value that would
# have been visible ot the application to preserve compatibility.
if isinstance(value, int) and value < 0:
new_value = 0
for bit_number, _ in enumerate(self.flags):
new_value |= value & (2**bit_number)
value = new_value
value = BitHandler(value, self.flags, self.labels)
else:
# Ensure flags are consistent for unpickling
value._keys = self.flags
return value
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
args = [self._arg_flags, *args]
return name, path, args, kwargs
def flags_from_annotations(annotations: Mapping[str, type]) -> Sequence[str]:
flags = []
for attr, ty in annotations.items():
assert ty in ("bool", bool), f"bitfields can only hold bools, {attr} is {ty!r}"
flags.append(attr)
return flags
| BitField |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 30147,
"end": 31128
} | class ____(AbstractTemplate):
def generic(self, args, kws):
assert not kws
if len(args) == 1:
[arg] = args
if arg not in types.number_domain:
raise errors.NumbaTypeError("complex() only support for numbers")
if arg == types.float32:
return signature(types.complex64, arg)
else:
return signature(types.complex128, arg)
elif len(args) == 2:
[real, imag] = args
if (real not in types.number_domain or
imag not in types.number_domain):
raise errors.NumbaTypeError("complex() only support for numbers")
if real == imag == types.float32:
return signature(types.complex64, real, imag)
else:
return signature(types.complex128, real, imag)
#------------------------------------------------------------------------------
@infer_global(enumerate)
| Complex |
python | ray-project__ray | python/ray/tests/unit/test_runtime_env_validation.py | {
"start": 16956,
"end": 19930
} | class ____:
def test_validate_pip_invalid_types(self):
with pytest.raises(TypeError):
validation.parse_and_validate_pip(1)
with pytest.raises(TypeError):
validation.parse_and_validate_pip(True)
def test_validate_pip_invalid_path(self):
with pytest.raises(ValueError):
validation.parse_and_validate_pip("../bad_path.txt")
@pytest.mark.parametrize("absolute_path", [True, False])
def test_validate_pip_valid_file(self, test_directory, absolute_path):
_, requirements_file, _, _ = test_directory
if absolute_path:
requirements_file = requirements_file.resolve()
result = validation.parse_and_validate_pip(str(requirements_file))
assert result["packages"] == _PIP_LIST
assert not result["pip_check"]
assert "pip_version" not in result
def test_validate_pip_valid_list(self):
result = validation.parse_and_validate_pip(_PIP_LIST)
assert result["packages"] == _PIP_LIST
assert not result["pip_check"]
assert "pip_version" not in result
def test_validate_ray(self):
result = validation.parse_and_validate_pip(["pkg1", "ray", "pkg2"])
assert result["packages"] == ["pkg1", "ray", "pkg2"]
assert not result["pip_check"]
assert "pip_version" not in result
def test_validate_pip_install_options(self):
# Happy path for non-empty pip_install_options
opts = ["--no-cache-dir", "--no-build-isolation", "--disable-pip-version-check"]
result = validation.parse_and_validate_pip(
{
"packages": ["pkg1", "ray", "pkg2"],
"pip_install_options": list(opts),
}
)
assert result["packages"] == ["pkg1", "ray", "pkg2"]
assert not result["pip_check"]
assert "pip_version" not in result
assert result["pip_install_options"] == opts
# Happy path for missing pip_install_options. No default value for field
# to maintain backwards compatibility with ray==2.0.1
result = validation.parse_and_validate_pip(
{
"packages": ["pkg1", "ray", "pkg2"],
}
)
assert "pip_install_options" not in result
with pytest.raises(TypeError) as e:
validation.parse_and_validate_pip(
{
"packages": ["pkg1", "ray", "pkg2"],
"pip_install_options": [False],
}
)
assert "pip_install_options" in str(e) and "must be of type list[str]" in str(e)
with pytest.raises(TypeError) as e:
validation.parse_and_validate_pip(
{
"packages": ["pkg1", "ray", "pkg2"],
"pip_install_options": None,
}
)
assert "pip_install_options" in str(e) and "must be of type list[str]" in str(e)
| TestValidatePip |
python | ipython__ipython | IPython/core/magics/code.py | {
"start": 1573,
"end": 5112
} | class ____(ValueError): pass
ipython_input_pat = re.compile(r"<ipython\-input\-(\d+)-[a-z\d]+>$")
# To match, e.g. 8-10 1:5 :10 3-
range_re = re.compile(r"""
(?P<start>\d+)?
((?P<sep>[\-:])
(?P<end>\d+)?)?
$""", re.VERBOSE)
def extract_code_ranges(ranges_str):
"""Turn a string of range for %%load into 2-tuples of (start, stop)
ready to use as a slice of the content split by lines.
Examples
--------
list(extract_input_ranges("5-10 2"))
[(4, 10), (1, 2)]
"""
for range_str in ranges_str.split():
rmatch = range_re.match(range_str)
if not rmatch:
continue
sep = rmatch.group("sep")
start = rmatch.group("start")
end = rmatch.group("end")
if sep == '-':
start = int(start) - 1 if start else None
end = int(end) if end else None
elif sep == ':':
start = int(start) - 1 if start else None
end = int(end) - 1 if end else None
else:
end = int(start)
start = int(start) - 1
yield (start, end)
def extract_symbols(code, symbols):
"""
Return a tuple (blocks, not_found)
where ``blocks`` is a list of code fragments
for each symbol parsed from code, and ``not_found`` are
symbols not found in the code.
For example::
In [1]: code = '''a = 10
...: def b(): return 42
...: class A: pass'''
In [2]: extract_symbols(code, 'A,b,z')
Out[2]: (['class A: pass\\n', 'def b(): return 42\\n'], ['z'])
"""
symbols = symbols.split(',')
# this will raise SyntaxError if code isn't valid Python
py_code = ast.parse(code)
marks = [(getattr(s, 'name', None), s.lineno) for s in py_code.body]
code = code.split('\n')
symbols_lines = {}
# we already know the start_lineno of each symbol (marks).
# To find each end_lineno, we traverse in reverse order until each
# non-blank line
end = len(code)
for name, start in reversed(marks):
while not code[end - 1].strip():
end -= 1
if name:
symbols_lines[name] = (start - 1, end)
end = start - 1
# Now symbols_lines is a map
# {'symbol_name': (start_lineno, end_lineno), ...}
# fill a list with chunks of codes for each requested symbol
blocks = []
not_found = []
for symbol in symbols:
if symbol in symbols_lines:
start, end = symbols_lines[symbol]
blocks.append('\n'.join(code[start:end]) + '\n')
else:
not_found.append(symbol)
return blocks, not_found
def strip_initial_indent(lines):
"""For %load, strip indent from lines until finding an unindented line.
https://github.com/ipython/ipython/issues/9775
"""
indent_re = re.compile(r'\s+')
it = iter(lines)
first_line = next(it)
indent_match = indent_re.match(first_line)
if indent_match:
# First line was indented
indent = indent_match.group()
yield first_line[len(indent):]
for line in it:
if line.startswith(indent):
yield line[len(indent) :]
elif line in ("\n", "\r\n") or len(line) == 0:
yield line
else:
# Less indented than the first line - stop dedenting
yield line
break
else:
yield first_line
# Pass the remaining lines through without dedenting
for line in it:
yield line
| MacroToEdit |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/test_response_format.py | {
"start": 1165,
"end": 1262
} | class ____:
"""Weather response."""
temperature: float
condition: str
| WeatherDataclass |
python | instagram__MonkeyType | monkeytype/tracing.py | {
"start": 627,
"end": 2510
} | class ____:
"""CallTrace contains the types observed during a single invocation of a function"""
def __init__(
self,
func: Callable[..., Any],
arg_types: Dict[str, type],
return_type: Optional[type] = None,
yield_type: Optional[type] = None,
) -> None:
"""
Args:
func: The function where the trace occurred
arg_types: The collected argument types
return_type: The collected return type. This will be None if the called function returns
due to an unhandled exception. It will be NoneType if the function returns the value None.
yield_type: The collected yield type. This will be None if the called function never
yields. It will be NoneType if the function yields the value None.
"""
self.func = func
self.arg_types = arg_types
self.return_type = return_type
self.yield_type = yield_type
def __eq__(self, other: object) -> bool:
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __repr__(self) -> str:
return "CallTrace(%s, %s, %s, %s)" % (
self.func,
self.arg_types,
self.return_type,
self.yield_type,
)
def __hash__(self) -> int:
return hash(
(
self.func,
frozenset(self.arg_types.items()),
self.return_type,
self.yield_type,
)
)
def add_yield_type(self, typ: type) -> None:
if self.yield_type is None:
self.yield_type = typ
else:
self.yield_type = cast(type, Union[self.yield_type, typ])
@property
def funcname(self) -> str:
return get_func_fqname(self.func)
| CallTrace |
python | huggingface__transformers | tests/models/bert_japanese/test_tokenization_bert_japanese.py | {
"start": 14323,
"end": 17885
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "cl-tohoku/bert-base-japanese"
tokenizer_class = BertJapaneseTokenizer
test_rust_tokenizer = False
@classmethod
def setUpClass(cls):
super().setUpClass()
# Create a separate temp directory for the vocab file to avoid conflicts
# with files saved by the base class setUpClass (e.g., tokenizer_config.json, added_tokens.json)
cls.vocab_tmpdirname = tempfile.mkdtemp()
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
cls.vocab_file = os.path.join(cls.vocab_tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
@classmethod
def tearDownClass(cls):
super().tearDownClass()
if hasattr(cls, "vocab_tmpdirname"):
shutil.rmtree(cls.vocab_tmpdirname, ignore_errors=True)
@classmethod
@classmethod
def get_tokenizer(cls, pretrained_name=None, **kwargs):
"""Override to use vocab_tmpdirname instead of tmpdirname to avoid conflicts with saved tokenizer files."""
pretrained_name = pretrained_name or cls.vocab_tmpdirname
return BertJapaneseTokenizer.from_pretrained(pretrained_name, subword_tokenizer_type="character", **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "こんにちは、世界。 \nこんばんは、世界。"
output_text = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def test_pretokenized_inputs(self):
pass # TODO add if relevant
def test_maximum_encoding_length_pair_input(self):
pass # TODO add if relevant
def test_maximum_encoding_length_single_input(self):
pass # TODO add if relevant
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file, subword_tokenizer_type="character")
tokens = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。")
self.assertListEqual(tokens, ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"]) # fmt: skip
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12]
)
def test_character_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
vocab = {}
for i, token in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = CharacterTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("こんにちは"), ["こ", "ん", "に", "ち", "は"])
self.assertListEqual(tokenizer.tokenize("こんにちほ"), ["こ", "ん", "に", "ち", "[UNK]"])
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char")
text = tokenizer.encode("ありがとう。", add_special_tokens=False)
text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_2 + [3]
@custom_tokenizers
| BertJapaneseCharacterTokenizationTest |
python | Pylons__pyramid | src/pyramid/url.py | {
"start": 1516,
"end": 36622
} | class ____:
"""Request methods mixin for BaseRequest having to do with URL
generation"""
def _partial_application_url(self, scheme=None, host=None, port=None):
"""
Construct the URL defined by request.application_url, replacing any
of the default scheme, host, or port portions with user-supplied
variants.
If ``scheme`` is passed as ``https``, and the ``port`` is *not*
passed, the ``port`` value is assumed to ``443``. Likewise, if
``scheme`` is passed as ``http`` and ``port`` is not passed, the
``port`` value is assumed to be ``80``.
"""
e = self.environ
if scheme is None:
scheme = e['wsgi.url_scheme']
else:
if scheme == 'https':
if port is None:
port = '443'
if scheme == 'http':
if port is None:
port = '80'
if host is None:
host = e.get('HTTP_HOST')
if host is None:
host = e['SERVER_NAME']
if port is None:
if ':' in host:
host, port = host.split(':', 1)
else:
port = e['SERVER_PORT']
else:
port = str(port)
if ':' in host:
host, _ = host.split(':', 1)
if scheme == 'https':
if port == '443':
port = None
elif scheme == 'http':
if port == '80':
port = None
url = scheme + '://' + host
if port:
url += ':%s' % port
url_encoding = getattr(self, 'url_encoding', 'utf-8') # webob 1.2b3+
bscript_name = bytes_(self.script_name, url_encoding)
return url + url_quote(bscript_name, PATH_SAFE)
def route_url(self, route_name, *elements, **kw):
"""Generates a fully qualified URL for a named :app:`Pyramid`
:term:`route configuration`.
Use the route's ``name`` as the first positional argument.
Additional positional arguments (``*elements``) are appended to the
URL as path segments after it is generated.
Use keyword arguments to supply values which match any dynamic
path elements in the route definition. Raises a :exc:`KeyError`
exception if the URL cannot be generated for any reason (not
enough arguments, for example).
For example, if you've defined a route named "foobar" with the path
``{foo}/{bar}/*traverse``::
request.route_url('foobar',
foo='1') => <KeyError exception>
request.route_url('foobar',
foo='1',
bar='2') => <KeyError exception>
request.route_url('foobar',
foo='1',
bar='2',
traverse=('a','b')) => http://e.com/1/2/a/b
request.route_url('foobar',
foo='1',
bar='2',
traverse='/a/b') => http://e.com/1/2/a/b
Values replacing ``:segment`` arguments can be passed as strings
or Unicode objects. They will be encoded to UTF-8 and URL-quoted
before being placed into the generated URL.
Values replacing ``*remainder`` arguments can be passed as strings
*or* tuples of Unicode/string values. If a tuple is passed as a
``*remainder`` replacement value, its values are URL-quoted and
encoded to UTF-8. The resulting strings are joined with slashes
and rendered into the URL. If a string is passed as a
``*remainder`` replacement value, it is tacked on to the URL
after being URL-quoted-except-for-embedded-slashes.
If ``_query`` is provided, it will be used to compose a query string
that will be tacked on to the end of the URL. The value of ``_query``
may be a sequence of two-tuples *or* a data structure with an
``.items()`` method that returns a sequence of two-tuples
(presumably a dictionary). This data structure will be turned into
a query string per the documentation of the
:func:`pyramid.url.urlencode` function. This will produce a query
string in the ``x-www-form-urlencoded`` format. A
non-``x-www-form-urlencoded`` query string may be used by passing a
*string* value as ``_query`` in which case it will be URL-quoted
(e.g. query="foo bar" will become "foo%20bar"). However, the result
will not need to be in ``k=v`` form as required by
``x-www-form-urlencoded``. After the query data is turned into a query
string, a leading ``?`` is prepended, and the resulting string is
appended to the generated URL.
.. note::
Python data structures that are passed as ``_query`` which are
sequences or dictionaries are turned into a string under the same
rules as when run through :func:`urllib.urlencode` with the
``doseq`` argument equal to ``True``. This means that sequences can
be passed as values, and a k=v pair will be placed into the query
string for each value.
If a keyword argument ``_anchor`` is present, its string
representation will be quoted per :rfc:`3986#section-3.5` and used as
a named anchor in the generated URL
(e.g. if ``_anchor`` is passed as ``foo`` and the route URL is
``http://example.com/route/url``, the resulting generated URL will
be ``http://example.com/route/url#foo``).
.. note::
If ``_anchor`` is passed as a string, it should be UTF-8 encoded. If
``_anchor`` is passed as a Unicode object, it will be converted to
UTF-8 before being appended to the URL.
If both ``_anchor`` and ``_query`` are specified, the anchor
element will always follow the query element,
e.g. ``http://example.com?foo=1#bar``.
If any of the keyword arguments ``_scheme``, ``_host``, or ``_port``
is passed and is non-``None``, the provided value will replace the
named portion in the generated URL. For example, if you pass
``_host='foo.com'``, and the URL that would have been generated
without the host replacement is ``http://example.com/a``, the result
will be ``http://foo.com/a``.
Note that if ``_scheme`` is passed as ``https``, and ``_port`` is not
passed, the ``_port`` value is assumed to have been passed as
``443``. Likewise, if ``_scheme`` is passed as ``http`` and
``_port`` is not passed, the ``_port`` value is assumed to have been
passed as ``80``. To avoid this behavior, always explicitly pass
``_port`` whenever you pass ``_scheme``.
If a keyword ``_app_url`` is present, it will be used as the
protocol/hostname/port/leading path prefix of the generated URL.
For example, using an ``_app_url`` of
``http://example.com:8080/foo`` would cause the URL
``http://example.com:8080/foo/fleeb/flub`` to be returned from
this function if the expansion of the route pattern associated
with the ``route_name`` expanded to ``/fleeb/flub``. If
``_app_url`` is not specified, the result of
``request.application_url`` will be used as the prefix (the
default).
If both ``_app_url`` and any of ``_scheme``, ``_host``, or ``_port``
are passed, ``_app_url`` takes precedence and any values passed for
``_scheme``, ``_host``, and ``_port`` will be ignored.
This function raises a :exc:`KeyError` if the URL cannot be
generated due to missing replacement names. Extra replacement
names are ignored.
If the route object which matches the ``route_name`` argument has
a :term:`pregenerator`, the ``*elements`` and ``**kw``
arguments passed to this function might be augmented or changed.
.. versionchanged:: 1.5
Allow the ``_query`` option to be a string to enable alternative
encodings.
The ``_anchor`` option will be escaped instead of using
its raw string representation.
.. versionchanged:: 1.9
If ``_query`` or ``_anchor`` are falsey (such as ``None`` or an
empty string) they will not be included in the generated url.
"""
try:
reg = self.registry
except AttributeError:
reg = get_current_registry() # b/c
mapper = reg.getUtility(IRoutesMapper)
route = mapper.get_route(route_name)
if route is None:
raise KeyError('No such route named %s' % route_name)
if route.pregenerator is not None:
elements, kw = route.pregenerator(self, elements, kw)
app_url, qs, anchor = parse_url_overrides(self, kw)
path = route.generate(kw) # raises KeyError if generate fails
if elements:
suffix = _join_elements(elements)
if not path.endswith('/'):
suffix = '/' + suffix
else:
suffix = ''
return app_url + path + suffix + qs + anchor
def route_path(self, route_name, *elements, **kw):
"""
Generates a path (aka a 'relative URL', a URL minus the host, scheme,
and port) for a named :app:`Pyramid` :term:`route configuration`.
This function accepts the same argument as
:meth:`pyramid.request.Request.route_url` and performs the same duty.
It just omits the host, port, and scheme information in the return
value; only the script_name, path, query parameters, and anchor data
are present in the returned string.
For example, if you've defined a route named 'foobar' with the path
``/{foo}/{bar}``, this call to ``route_path``::
request.route_path('foobar', foo='1', bar='2')
Will return the string ``/1/2``.
.. note::
Calling ``request.route_path('route')`` is the same as calling
``request.route_url('route', _app_url=request.script_name)``.
:meth:`pyramid.request.Request.route_path` is, in fact,
implemented in terms of :meth:`pyramid.request.Request.route_url`
in just this way. As a result, any ``_app_url`` passed within the
``**kw`` values to ``route_path`` will be ignored.
"""
kw['_app_url'] = self.script_name
return self.route_url(route_name, *elements, **kw)
def resource_url(self, resource, *elements, **kw):
"""
Generate a string representing the absolute URL of the
:term:`resource` object based on the ``wsgi.url_scheme``,
``HTTP_HOST`` or ``SERVER_NAME`` in the request, plus any
``SCRIPT_NAME``. The overall result of this method is always a
UTF-8 encoded string.
Examples::
request.resource_url(resource) =>
http://example.com/
request.resource_url(resource, 'a.html') =>
http://example.com/a.html
request.resource_url(resource, 'a.html', query={'q':'1'}) =>
http://example.com/a.html?q=1
request.resource_url(resource, 'a.html', anchor='abc') =>
http://example.com/a.html#abc
request.resource_url(resource, app_url='') =>
/
Any positional arguments passed in as ``elements`` must be strings
Unicode objects, or integer objects. These will be joined by slashes
and appended to the generated resource URL. Each of the elements
passed in is URL-quoted before being appended; if any element is
Unicode, it will converted to a UTF-8 bytestring before being
URL-quoted. If any element is an integer, it will be converted to its
string representation before being URL-quoted.
.. warning:: if no ``elements`` arguments are specified, the resource
URL will end with a trailing slash. If any
``elements`` are used, the generated URL will *not*
end in a trailing slash.
If ``query`` is provided, it will be used to compose a query string
that will be tacked on to the end of the URL. The value of ``query``
may be a sequence of two-tuples *or* a data structure with an
``.items()`` method that returns a sequence of two-tuples
(presumably a dictionary). This data structure will be turned into
a query string per the documentation of the
:func:`pyramid.url.urlencode` function. This will produce a query
string in the ``x-www-form-urlencoded`` format. A
non-``x-www-form-urlencoded`` query string may be used by passing a
*string* value as ``query`` in which case it will be URL-quoted
(e.g. query="foo bar" will become "foo%20bar"). However, the result
will not need to be in ``k=v`` form as required by
``x-www-form-urlencoded``. After the query data is turned into a query
string, a leading ``?`` is prepended, and the resulting string is
appended to the generated URL.
.. note::
Python data structures that are passed as ``query`` which are
sequences or dictionaries are turned into a string under the same
rules as when run through :func:`urllib.urlencode` with the
``doseq`` argument equal to ``True``. This means that sequences can
be passed as values, and a k=v pair will be placed into the query
string for each value.
If a keyword argument ``anchor`` is present, its string
representation will be used as a named anchor in the generated URL
(e.g. if ``anchor`` is passed as ``foo`` and the resource URL is
``http://example.com/resource/url``, the resulting generated URL will
be ``http://example.com/resource/url#foo``).
.. note::
If ``anchor`` is passed as a string, it should be UTF-8 encoded. If
``anchor`` is passed as a Unicode object, it will be converted to
UTF-8 before being appended to the URL.
If both ``anchor`` and ``query`` are specified, the anchor element
will always follow the query element,
e.g. ``http://example.com?foo=1#bar``.
If any of the keyword arguments ``scheme``, ``host``, or ``port`` is
passed and is non-``None``, the provided value will replace the named
portion in the generated URL. For example, if you pass
``host='foo.com'``, and the URL that would have been generated
without the host replacement is ``http://example.com/a``, the result
will be ``http://foo.com/a``.
If ``scheme`` is passed as ``https``, and an explicit ``port`` is not
passed, the ``port`` value is assumed to have been passed as ``443``.
Likewise, if ``scheme`` is passed as ``http`` and ``port`` is not
passed, the ``port`` value is assumed to have been passed as
``80``. To avoid this behavior, always explicitly pass ``port``
whenever you pass ``scheme``.
If a keyword argument ``app_url`` is passed and is not ``None``, it
should be a string that will be used as the port/hostname/initial
path portion of the generated URL instead of the default request
application URL. For example, if ``app_url='http://foo'``, then the
resulting url of a resource that has a path of ``/baz/bar`` will be
``http://foo/baz/bar``. If you want to generate completely relative
URLs with no leading scheme, host, port, or initial path, you can
pass ``app_url=''``. Passing ``app_url=''`` when the resource path is
``/baz/bar`` will return ``/baz/bar``.
If ``app_url`` is passed and any of ``scheme``, ``port``, or ``host``
are also passed, ``app_url`` will take precedence and the values
passed for ``scheme``, ``host``, and/or ``port`` will be ignored.
If the ``resource`` passed in has a ``__resource_url__`` method, it
will be used to generate the URL (scheme, host, port, path) for the
base resource which is operated upon by this function.
.. seealso::
See also :ref:`overriding_resource_url_generation`.
If ``route_name`` is passed, this function will delegate its URL
production to the ``route_url`` function. Calling
``resource_url(someresource, 'element1', 'element2', query={'a':1},
route_name='blogentry')`` is roughly equivalent to doing::
traversal_path = request.resource_path(someobject)
url = request.route_url(
'blogentry',
'element1',
'element2',
_query={'a':'1'},
traverse=traversal_path,
)
It is only sensible to pass ``route_name`` if the route being named has
a ``*remainder`` stararg value such as ``*traverse``. The remainder
value will be ignored in the output otherwise.
By default, the resource path value will be passed as the name
``traverse`` when ``route_url`` is called. You can influence this by
passing a different ``route_remainder_name`` value if the route has a
different ``*stararg`` value at its end. For example if the route
pattern you want to replace has a ``*subpath`` stararg ala
``/foo*subpath``::
request.resource_url(
resource,
route_name='myroute',
route_remainder_name='subpath'
)
If ``route_name`` is passed, it is also permissible to pass
``route_kw``, which will passed as additional keyword arguments to
``route_url``. Saying ``resource_url(someresource, 'element1',
'element2', route_name='blogentry', route_kw={'id':'4'},
_query={'a':'1'})`` is roughly equivalent to::
traversal_path = request.resource_path_tuple(someobject)
kw = {'id':'4', '_query':{'a':'1'}, 'traverse':traversal_path}
url = request.route_url(
'blogentry',
'element1',
'element2',
**kw,
)
If ``route_kw`` or ``route_remainder_name`` is passed, but
``route_name`` is not passed, both ``route_kw`` and
``route_remainder_name`` will be ignored. If ``route_name``
is passed, the ``__resource_url__`` method of the resource passed is
ignored unconditionally. This feature is incompatible with
resources which generate their own URLs.
.. note::
If the :term:`resource` used is the result of a :term:`traversal`,
it must be :term:`location`-aware. The resource can also be the
context of a :term:`URL dispatch`; contexts found this way do not
need to be location-aware.
.. note::
If a 'virtual root path' is present in the request environment (the
value of the WSGI environ key ``HTTP_X_VHM_ROOT``), and the resource
was obtained via :term:`traversal`, the URL path will not include
the virtual root prefix (it will be stripped off the left hand side
of the generated URL).
.. note::
For backwards compatibility purposes, this method is also
aliased as the ``model_url`` method of request.
.. versionchanged:: 1.3
Added the ``app_url`` keyword argument.
.. versionchanged:: 1.5
Allow the ``query`` option to be a string to enable alternative
encodings.
The ``anchor`` option will be escaped instead of using
its raw string representation.
Added the ``route_name``, ``route_kw``, and
``route_remainder_name`` keyword arguments.
.. versionchanged:: 1.9
If ``query`` or ``anchor`` are falsey (such as ``None`` or an
empty string) they will not be included in the generated url.
"""
try:
reg = self.registry
except AttributeError:
reg = get_current_registry() # b/c
url_adapter = reg.queryMultiAdapter((resource, self), IResourceURL)
if url_adapter is None:
url_adapter = ResourceURL(resource, self)
virtual_path = getattr(url_adapter, 'virtual_path', None)
urlkw = {}
for name in ('app_url', 'scheme', 'host', 'port', 'query', 'anchor'):
val = kw.get(name, None)
if val is not None:
urlkw['_' + name] = val
if 'route_name' in kw:
route_name = kw['route_name']
remainder = getattr(url_adapter, 'virtual_path_tuple', None)
if remainder is None:
# older user-supplied IResourceURL adapter without 1.5
# virtual_path_tuple
remainder = tuple(url_adapter.virtual_path.split('/'))
remainder_name = kw.get('route_remainder_name', 'traverse')
urlkw[remainder_name] = remainder
if 'route_kw' in kw:
route_kw = kw.get('route_kw')
if route_kw is not None:
urlkw.update(route_kw)
return self.route_url(route_name, *elements, **urlkw)
app_url, qs, anchor = parse_url_overrides(self, urlkw)
resource_url = None
local_url = getattr(resource, '__resource_url__', None)
if local_url is not None:
# the resource handles its own url generation
d = dict(
virtual_path=virtual_path,
physical_path=url_adapter.physical_path,
app_url=app_url,
)
# allow __resource_url__ to punt by returning None
resource_url = local_url(self, d)
if resource_url is None:
# the resource did not handle its own url generation or the
# __resource_url__ function returned None
resource_url = app_url + virtual_path
if elements:
suffix = _join_elements(elements)
else:
suffix = ''
return resource_url + suffix + qs + anchor
model_url = resource_url # b/w compat forever
def resource_path(self, resource, *elements, **kw):
"""
Generates a path (aka a 'relative URL', a URL minus the host, scheme,
and port) for a :term:`resource`.
This function accepts the same argument as
:meth:`pyramid.request.Request.resource_url` and performs the same
duty. It just omits the host, port, and scheme information in the
return value; only the script_name, path, query parameters, and
anchor data are present in the returned string.
.. note::
Calling ``request.resource_path(resource)`` is the same as calling
``request.resource_path(resource, app_url=request.script_name)``.
:meth:`pyramid.request.Request.resource_path` is, in fact,
implemented in terms of
:meth:`pyramid.request.Request.resource_url` in just this way. As
a result, any ``app_url`` passed within the ``**kw`` values to
``route_path`` will be ignored. ``scheme``, ``host``, and
``port`` are also ignored.
"""
kw['app_url'] = self.script_name
return self.resource_url(resource, *elements, **kw)
def static_url(self, path, **kw):
"""
Generates a fully qualified URL for a static :term:`asset`.
The asset must live within a location defined via the
:meth:`pyramid.config.Configurator.add_static_view`
:term:`configuration declaration` (see :ref:`static_assets_section`).
Example::
request.static_url('mypackage:static/foo.css') =>
http://example.com/static/foo.css
The ``path`` argument points at a file or directory on disk which
a URL should be generated for. The ``path`` may be either a
relative path (e.g. ``static/foo.css``) or an absolute path (e.g.
``/abspath/to/static/foo.css``) or a :term:`asset specification`
(e.g. ``mypackage:static/foo.css``).
The purpose of the ``**kw`` argument is the same as the purpose of
the :meth:`pyramid.request.Request.route_url` ``**kw`` argument. See
the documentation for that function to understand the arguments which
you can provide to it. However, typically, you don't need to pass
anything as ``*kw`` when generating a static asset URL.
This function raises a :exc:`ValueError` if a static view
definition cannot be found which matches the path specification.
"""
if not os.path.isabs(path):
if ':' not in path:
# if it's not a package:relative/name and it's not an
# /absolute/path it's a relative/path; this means its relative
# to the package in which the caller's module is defined.
package = caller_package()
path = f'{package.__name__}:{path}'
try:
reg = self.registry
except AttributeError:
reg = get_current_registry() # b/c
info = reg.queryUtility(IStaticURLInfo)
if info is None:
raise ValueError('No static URL definition matching %s' % path)
return info.generate(path, self, **kw)
def static_path(self, path, **kw):
"""
Generates a path (aka a 'relative URL', a URL minus the host, scheme,
and port) for a static resource.
This function accepts the same argument as
:meth:`pyramid.request.Request.static_url` and performs the
same duty. It just omits the host, port, and scheme information in
the return value; only the script_name, path, query parameters, and
anchor data are present in the returned string.
Example::
request.static_path('mypackage:static/foo.css') =>
/static/foo.css
.. note::
Calling ``request.static_path(apath)`` is the same as calling
``request.static_url(apath, _app_url=request.script_name)``.
:meth:`pyramid.request.Request.static_path` is, in fact, implemented
in terms of :meth:`pyramid.request.Request.static_url` in just this
way. As a result, any ``_app_url`` passed within the ``**kw`` values
to ``static_path`` will be ignored.
"""
if not os.path.isabs(path):
if ':' not in path:
# if it's not a package:relative/name and it's not an
# /absolute/path it's a relative/path; this means its relative
# to the package in which the caller's module is defined.
package = caller_package()
path = f'{package.__name__}:{path}'
kw['_app_url'] = self.script_name
return self.static_url(path, **kw)
def current_route_url(self, *elements, **kw):
"""
Generates a fully qualified URL for a named :app:`Pyramid`
:term:`route configuration` based on the 'current route'.
This function supplements
:meth:`pyramid.request.Request.route_url`. It presents an easy way to
generate a URL for the 'current route' (defined as the route which
matched when the request was generated).
The arguments to this method have the same meaning as those with the
same names passed to :meth:`pyramid.request.Request.route_url`. It
also understands an extra argument which ``route_url`` does not named
``_route_name``.
The route name used to generate a URL is taken from either the
``_route_name`` keyword argument or the name of the route which is
currently associated with the request if ``_route_name`` was not
passed. Keys and values from the current request :term:`matchdict`
are combined with the ``kw`` arguments to form a set of defaults
named ``newkw``. Then ``request.route_url(route_name, *elements,
**newkw)`` is called, returning a URL.
Examples follow.
If the 'current route' has the route pattern ``/foo/{page}`` and the
current url path is ``/foo/1`` , the matchdict will be
``{'page':'1'}``. The result of ``request.current_route_url()`` in
this situation will be ``/foo/1``.
If the 'current route' has the route pattern ``/foo/{page}`` and the
current url path is ``/foo/1``, the matchdict will be
``{'page':'1'}``. The result of
``request.current_route_url(page='2')`` in this situation will be
``/foo/2``.
Usage of the ``_route_name`` keyword argument: if our routing table
defines routes ``/foo/{action}`` named 'foo' and
``/foo/{action}/{page}`` named ``fooaction``, and the current url
pattern is ``/foo/view`` (which has matched the ``/foo/{action}``
route), we may want to use the matchdict args to generate a URL to
the ``fooaction`` route. In this scenario,
``request.current_route_url(_route_name='fooaction', page='5')``
Will return string like: ``/foo/view/5``.
"""
if '_route_name' in kw:
route_name = kw.pop('_route_name')
else:
route = getattr(self, 'matched_route', None)
route_name = getattr(route, 'name', None)
if route_name is None:
raise ValueError('Current request matches no route')
if '_query' not in kw:
kw['_query'] = self.GET
newkw = {}
newkw.update(self.matchdict)
newkw.update(kw)
return self.route_url(route_name, *elements, **newkw)
def current_route_path(self, *elements, **kw):
"""
Generates a path (aka a 'relative URL', a URL minus the host, scheme,
and port) for the :app:`Pyramid` :term:`route configuration` matched
by the current request.
This function accepts the same argument as
:meth:`pyramid.request.Request.current_route_url` and performs the
same duty. It just omits the host, port, and scheme information in
the return value; only the script_name, path, query parameters, and
anchor data are present in the returned string.
For example, if the route matched by the current request has the
pattern ``/{foo}/{bar}``, this call to ``current_route_path``::
request.current_route_path(foo='1', bar='2')
Will return the string ``/1/2``.
.. note::
Calling ``request.current_route_path('route')`` is the same
as calling ``request.current_route_url('route',
_app_url=request.script_name)``.
:meth:`pyramid.request.Request.current_route_path` is, in fact,
implemented in terms of
:meth:`pyramid.request.Request.current_route_url` in just this
way. As a result, any ``_app_url`` passed within the ``**kw``
values to ``current_route_path`` will be ignored.
"""
kw['_app_url'] = self.script_name
return self.current_route_url(*elements, **kw)
def route_url(route_name, request, *elements, **kw):
"""
This is a backwards compatibility function. Its result is the same as
calling::
request.route_url(route_name, *elements, **kw)
See :meth:`pyramid.request.Request.route_url` for more information.
"""
return request.route_url(route_name, *elements, **kw)
def route_path(route_name, request, *elements, **kw):
"""
This is a backwards compatibility function. Its result is the same as
calling::
request.route_path(route_name, *elements, **kw)
See :meth:`pyramid.request.Request.route_path` for more information.
"""
return request.route_path(route_name, *elements, **kw)
def resource_url(resource, request, *elements, **kw):
"""
This is a backwards compatibility function. Its result is the same as
calling::
request.resource_url(resource, *elements, **kw)
See :meth:`pyramid.request.Request.resource_url` for more information.
"""
return request.resource_url(resource, *elements, **kw)
model_url = resource_url # b/w compat (forever)
def static_url(path, request, **kw):
"""
This is a backwards compatibility function. Its result is the same as
calling::
request.static_url(path, **kw)
See :meth:`pyramid.request.Request.static_url` for more information.
"""
if not os.path.isabs(path):
if ':' not in path:
# if it's not a package:relative/name and it's not an
# /absolute/path it's a relative/path; this means its relative
# to the package in which the caller's module is defined.
package = caller_package()
path = f'{package.__name__}:{path}'
return request.static_url(path, **kw)
def static_path(path, request, **kw):
"""
This is a backwards compatibility function. Its result is the same as
calling::
request.static_path(path, **kw)
See :meth:`pyramid.request.Request.static_path` for more information.
"""
if not os.path.isabs(path):
if ':' not in path:
# if it's not a package:relative/name and it's not an
# /absolute/path it's a relative/path; this means its relative
# to the package in which the caller's module is defined.
package = caller_package()
path = f'{package.__name__}:{path}'
return request.static_path(path, **kw)
def current_route_url(request, *elements, **kw):
"""
This is a backwards compatibility function. Its result is the same as
calling::
request.current_route_url(*elements, **kw)
See :meth:`pyramid.request.Request.current_route_url` for more
information.
"""
return request.current_route_url(*elements, **kw)
def current_route_path(request, *elements, **kw):
"""
This is a backwards compatibility function. Its result is the same as
calling::
request.current_route_path(*elements, **kw)
See :meth:`pyramid.request.Request.current_route_path` for more
information.
"""
return request.current_route_path(*elements, **kw)
@lru_cache(1000)
def _join_elements(elements):
return '/'.join(
[quote_path_segment(s, safe=PATH_SEGMENT_SAFE) for s in elements]
)
| URLMethodsMixin |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 8707,
"end": 11611
} | class ____(str, Enum):
"""
* `CREATING`: Indicates that the cluster is being created.
* `DID_NOT_EXPAND_DISK`: Indicates that a disk is low on space, but adding disks would put it over the max capacity.
* `EXPANDED_DISK`: Indicates that a disk was low on space and the disks were expanded.
* `FAILED_TO_EXPAND_DISK`: Indicates that a disk was low on space and disk space could not be expanded.
* `INIT_SCRIPTS_STARTING`: Indicates that the cluster scoped init script has started.
* `INIT_SCRIPTS_FINISHED`: Indicates that the cluster scoped init script has finished.
* `STARTING`: Indicates that the cluster is being started.
* `RESTARTING`: Indicates that the cluster is being started.
* `TERMINATING`: Indicates that the cluster is being terminated.
* `EDITED`: Indicates that the cluster has been edited.
* `RUNNING`: Indicates the cluster has finished being created. Includes the number of nodes in the cluster and a failure reason if some nodes could not be acquired.
* `RESIZING`: Indicates a change in the target size of the cluster (upsize or downsize).
* `UPSIZE_COMPLETED`: Indicates that nodes finished being added to the cluster. Includes the number of nodes in the cluster and a failure reason if some nodes could not be acquired.
* `NODES_LOST`: Indicates that some nodes were lost from the cluster.
* `DRIVER_HEALTHY`: Indicates that the driver is healthy and the cluster is ready for use.
* `DRIVER_UNAVAILABLE`: Indicates that the driver is unavailable.
* `SPARK_EXCEPTION`: Indicates that a Spark exception was thrown from the driver.
* `DRIVER_NOT_RESPONDING`: Indicates that the driver is up but is not responsive, likely due to GC.
* `DBFS_DOWN`: Indicates that the driver is up but DBFS is down.
* `METASTORE_DOWN`: Indicates that the driver is up but the metastore is down.
* `NODE_BLACKLISTED`: Indicates that a node is not allowed by Spark.
* `PINNED`: Indicates that the cluster was pinned.
* `UNPINNED`: Indicates that the cluster was unpinned.
"""
creating = "CREATING"
didnotexpanddisk = "DID_NOT_EXPAND_DISK"
expandeddisk = "EXPANDED_DISK"
failedtoexpanddisk = "FAILED_TO_EXPAND_DISK"
initscriptsstarting = "INIT_SCRIPTS_STARTING"
initscriptsfinished = "INIT_SCRIPTS_FINISHED"
starting = "STARTING"
restarting = "RESTARTING"
terminating = "TERMINATING"
edited = "EDITED"
running = "RUNNING"
resizing = "RESIZING"
upsizecompleted = "UPSIZE_COMPLETED"
nodeslost = "NODES_LOST"
driverhealthy = "DRIVER_HEALTHY"
driverunavailable = "DRIVER_UNAVAILABLE"
sparkexception = "SPARK_EXCEPTION"
drivernotresponding = "DRIVER_NOT_RESPONDING"
dbfsdown = "DBFS_DOWN"
metastoredown = "METASTORE_DOWN"
nodeblacklisted = "NODE_BLACKLISTED"
pinned = "PINNED"
unpinned = "UNPINNED"
| ClusterEventType |
python | ray-project__ray | release/llm_tests/serve/test_llm_serve_integration.py | {
"start": 6623,
"end": 8791
} | class ____:
"""Tests for remote code model loading behavior."""
@pytest.mark.parametrize("remote_model_app", [False], indirect=True)
def test_remote_code_failure(self, remote_model_app):
"""
Tests that a remote code model fails to load when trust_remote_code=False.
If it loads successfully without remote code, the fixture should be changed to one that does require remote code.
"""
app = remote_model_app
with pytest.raises(RuntimeError, match="Deploying application default failed"):
serve.run(app, blocking=False)
def check_for_failed_deployment():
"""Check if the application deployment has failed."""
try:
default_app = serve.status().applications[SERVE_DEFAULT_APP_NAME]
return default_app.status == ApplicationStatus.DEPLOY_FAILED
except (KeyError, AttributeError):
return False
# Wait for either failure or success (timeout after 2 minutes)
try:
wait_for_condition(check_for_failed_deployment, timeout=120)
except TimeoutError:
# If deployment didn't fail, check if it succeeded
if is_default_app_running():
pytest.fail(
"App deployed successfully without trust_remote_code=True. "
"This model may not actually require remote code. "
"Consider using a different model that requires remote code."
)
else:
pytest.fail("Deployment did not fail or succeed within timeout period.")
@pytest.mark.parametrize("remote_model_app", [True], indirect=True)
def test_remote_code_success(self, remote_model_app):
"""
Tests that a remote code model succeeds to load when trust_remote_code=True.
"""
app = remote_model_app
serve.run(app, blocking=False)
# Wait for the application to be running (timeout after 5 minutes)
wait_for_condition(is_default_app_running, timeout=300)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestRemoteCode |
python | apache__avro | lang/py/avro/errors.py | {
"start": 3874,
"end": 3996
} | class ____(NotImplementedError, AvroException):
"""Raised when the compression named cannot be used."""
| UnsupportedCodec |
python | google__python-fire | fire/test_components.py | {
"start": 6588,
"end": 6752
} | class ____(NamedTuplePoint):
"""Used for verifying subclasses of namedtuples behave as intended."""
def coordinate_sum(self):
return self.x + self.y
| SubPoint |
python | spyder-ide__spyder | spyder/plugins/help/plugin.py | {
"start": 744,
"end": 849
} | class ____:
# Documentation related
ShowSpyderTutorialAction = "spyder_tutorial_action"
| HelpActions |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/integration/test_videos.py | {
"start": 2588,
"end": 8279
} | class ____(TestCase):
@staticmethod
def _read(config_: ConfigBuilder, expecting_exception: bool = False, json_schema: Optional[Dict[str, any]] = None) -> EntrypointOutput:
return read_output(
config_builder=config_,
stream_name=_STREAM_NAME,
sync_mode=SyncMode.full_refresh,
expecting_exception=expecting_exception,
json_schema=json_schema,
)
@HttpMocker()
def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
client_side_account_id = ACCOUNT_ID
server_side_account_id = ACCOUNT_ID
http_mocker.get(
get_account_request(account_id=client_side_account_id).build(),
get_account_response(account_id=server_side_account_id),
)
http_mocker.get(
_get_videos_request(account_id=server_side_account_id).build(),
_get_videos_response().with_record(_video_record()).build(),
)
output = self._read(config().with_account_ids([client_side_account_id]))
assert len(output.records) == 1
@HttpMocker()
def test_request_fields_from_json_schema_in_configured_catalog(self, http_mocker: HttpMocker) -> None:
"""
The purpose of this test is to check that the request fields are the same provided in json_request_schema inside configured catalog
"""
configured_json_schema = find_template(f"{_STREAM_NAME}_reduced_configured_schema_fields", __file__)
params_fields = [field for field in configured_json_schema["properties"]]
http_mocker.get(
get_account_request().build(),
get_account_response(),
)
http_mocker.get(
_get_videos_request(fields=params_fields).build(),
_get_videos_response().with_record(_video_record()).build(),
)
output = self._read(config(), json_schema=configured_json_schema)
assert len(output.records) == 1
@HttpMocker()
def test_given_multiple_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(get_account_request().build(), get_account_response())
http_mocker.get(
_get_videos_request().build(),
_get_videos_response().with_pagination().with_record(_video_record()).build(),
)
http_mocker.get(
_get_videos_request().with_next_page_token(NEXT_PAGE_TOKEN).build(),
_get_videos_response().with_record(_video_record()).with_record(_video_record()).build(),
)
output = self._read(config())
assert len(output.records) == 3
@HttpMocker()
def test_given_multiple_account_ids_when_read_then_return_records_from_all_accounts(self, http_mocker: HttpMocker) -> None:
account_id_1 = "123123123"
account_id_2 = "321321321"
http_mocker.get(get_account_request().with_account_id(account_id_1).build(), get_account_response(account_id=account_id_1))
http_mocker.get(
_get_videos_request().with_account_id(account_id_1).build(),
_get_videos_response().with_record(_video_record()).build(),
)
http_mocker.get(get_account_request().with_account_id(account_id_2).build(), get_account_response(account_id=account_id_2))
http_mocker.get(
_get_videos_request().with_account_id(account_id_2).build(),
_get_videos_response().with_record(_video_record()).build(),
)
output = self._read(config().with_account_ids([account_id_1, account_id_2]))
assert len(output.records) == 2
@HttpMocker()
def test_when_read_then_add_account_id_field(self, http_mocker: HttpMocker) -> None:
account_id = "123123123"
http_mocker.get(get_account_request().with_account_id(account_id).build(), get_account_response(account_id=account_id))
http_mocker.get(
_get_videos_request().with_account_id(account_id).build(),
_get_videos_response().with_record(_video_record()).build(),
)
output = self._read(config().with_account_ids([account_id]))
assert output.records[0].record.data["account_id"] == account_id
@HttpMocker()
def test_when_read_then_datetime_fields_transformed(self, http_mocker: HttpMocker) -> None:
created_time_field = "created_time"
input_datetime_value = "2024-01-01t00:00:00 0000"
expected_datetime_value = "2024-01-01T00:00:00+0000"
http_mocker.get(get_account_request().build(), get_account_response())
http_mocker.get(
_get_videos_request().with_fields(_FIELDS).with_summary().build(),
_get_videos_response().with_record(_video_record().with_field(FieldPath(created_time_field), input_datetime_value)).build(),
)
output = self._read(config())
assert output.records[0].record.data[created_time_field] == expected_datetime_value
@HttpMocker()
def test_given_status_500_reduce_amount_of_data_when_read_then_limit_reduced(self, http_mocker: HttpMocker) -> None:
limit = 100
http_mocker.get(get_account_request().build(), get_account_response())
http_mocker.get(
_get_videos_request().with_limit(limit).with_fields(_FIELDS).with_summary().build(),
error_reduce_amount_of_data_response(),
)
http_mocker.get(
_get_videos_request().with_limit(int(limit / 2)).with_fields(_FIELDS).with_summary().build(),
_get_videos_response().with_record(_video_record()).build(),
)
self._read(config())
@freezegun.freeze_time(NOW.isoformat())
| TestFullRefresh |
python | apache__airflow | scripts/in_container/verify_providers.py | {
"start": 1948,
"end": 2082
} | class ____(NamedTuple):
entities: list[str]
new_entities_table: str
wrong_entities: list[tuple[type, str]]
| EntityTypeSummary |
python | google__pytype | pytype/blocks/blocks_test.py | {
"start": 9126,
"end": 14666
} | class ____(BaseBlocksTest):
"""Test the add_pop_block_targets function."""
def assertTargets(self, code, targets):
co = self.make_code(code)
bytecode = opcodes.dis(co)
blocks.add_pop_block_targets(bytecode)
for i in range(len(bytecode)):
op = bytecode[i]
actual_target = op.target
actual_block_target = op.block_target
target_id, block_id = targets.get(i, (None, None))
expected_target = None if target_id is None else bytecode[target_id]
expected_block_target = None if block_id is None else bytecode[block_id]
self.assertEqual(
actual_target,
expected_target,
msg=(
f"Block {i} ({op!r}) has target {actual_target!r}, "
f"expected target {expected_target!r}"
),
)
self.assertEqual(
actual_block_target,
expected_block_target,
msg=(
f"Block {i} ({op!r}) has block target {actual_block_target!r}, "
f"expected block target {expected_block_target!r}"
),
)
def test_finally(self):
# Disassembled from:
# | try:
# | pass
# | finally:
# | pass
self.assertTargets(
[
(o.SETUP_FINALLY, 3),
(o.POP_BLOCK, 0),
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
(o.RERAISE, 0),
],
{
# SETUP_FINALLY.target == RERAISE
0: (4, None),
# POP_BLOCK.block_target == RERAISE
1: (None, 4),
},
)
def test_except(self):
# Disassembled from:
# | try:
# | pass
# | except:
# | pass
self.assertTargets(
[
(o.SETUP_FINALLY, 3),
(o.POP_BLOCK, 0),
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
(o.POP_TOP, 0),
(o.POP_TOP, 0),
(o.POP_TOP, 0),
(o.POP_EXCEPT, 0),
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
],
{
# SETUP_FINALLY.target == POP_TOP
0: (4, None),
# POP_BLOCK.block_target == POP_TOP
1: (None, 4),
},
)
def test_with(self):
# Disassembled from:
# | with None:
# | pass
self.assertTargets(
[
(o.LOAD_CONST, 0),
(o.SETUP_WITH, 9),
(o.POP_TOP, 0),
(o.POP_BLOCK, 0),
(o.LOAD_CONST, 0),
(o.DUP_TOP, 0),
(o.DUP_TOP, 0),
(o.CALL_FUNCTION, 3),
(o.POP_TOP, 0),
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
(o.WITH_EXCEPT_START, 0),
(o.POP_JUMP_IF_TRUE, 14),
(o.RERAISE, 1),
(o.POP_TOP, 0),
(o.POP_TOP, 0),
],
{
# SETUP_WITH.target == WITH_EXCEPT_START
1: (11, None),
# POP_BLOCK.block_target == WITH_EXCEPT_START
3: (None, 11),
# POP_JUMP_IF_TRUE.target == POP_TOP
12: (14, None),
},
)
def test_loop(self):
# Disassembled from:
# | while []:
# | break
self.assertTargets(
[
(o.BUILD_LIST, 0),
(o.POP_JUMP_IF_FALSE, 4),
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
],
{
# POP_JUMP_IF_FALSE.target == LOAD_CONST
1: (4, None),
},
)
def test_break(self):
# Disassembled from:
# | while True:
# | if []:
# | break
self.assertTargets(
[
(o.NOP, 0),
(o.BUILD_LIST, 0),
(o.POP_JUMP_IF_FALSE, 5),
(o.LOAD_CONST, 1),
(o.RETURN_VALUE, 0),
(o.JUMP_ABSOLUTE, 1),
],
{
# POP_JUMP_IF_FALSE.target == JUMP_ABSOLUTE
2: (5, None),
# JUMP_ABSOLUTE.target == BUILD_LIST
5: (1, None),
},
)
def test_continue(self):
# Disassembled from:
# | while True:
# | try:
# | continue
# | except:
# | pass
self.assertTargets(
[
(o.NOP, 0),
(o.SETUP_FINALLY, 2),
(o.POP_BLOCK, 0),
(o.JUMP_ABSOLUTE, 0),
(o.POP_TOP, 0),
(o.POP_TOP, 0),
(o.POP_TOP, 0),
(o.POP_EXCEPT, 0),
(o.JUMP_ABSOLUTE, 1),
],
{
# SETUP_FINALLY.target == POP_TOP
1: (4, None),
# POP_BLOCK.block_target == POP_TOP
2: (None, 4),
# JUMP_ABSOLUTE.target == NOP
3: (0, None),
# JUMP_ABSOLUTE.target == SETUP_FINALLY
8: (1, None),
},
)
def test_apply_typecomments(self):
# Disassembly + type comment map from
# a = 1; b = 2 # type: float
# The type comment should only apply to b.
co = self.make_code([
(o.LOAD_CONST, 1),
(o.STORE_FAST, 0),
(o.LOAD_CONST, 2),
(o.STORE_FAST, 1),
(o.LOAD_CONST, 0),
(o.RETURN_VALUE, 0),
])
code, _ = blocks.process_code(co)
ordered_code = process_blocks.merge_annotations(
code, {1: annotations.VariableAnnotation(None, "float")}, {}
)
bytecode = ordered_code.order[0].code
self.assertIsNone(bytecode[1].annotation)
self.assertEqual(bytecode[3].annotation, "float")
if __name__ == "__main__":
unittest.main()
| BlockStackTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.