code stringlengths 3 6.57k |
|---|
dict(size=10) |
plt.gca() |
axis('off') |
plt.axis('off') |
plt.tight_layout() |
plt.show() |
gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_full) |
pyLDAvis.gensim.prepare(mallet2lda_full, bow_corpus_full, dictionary_full) |
pyLDAvis.show() |
gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_pos) |
pyLDAvis.gensim.prepare(mallet2lda_pos, bow_corpus_pos, dictionary_pos) |
pyLDAvis.show(visualizeLDA_pos) |
gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_neg) |
pyLDAvis.gensim.prepare(mallet2lda_neg, bow_corpus_neg, dictionary_neg) |
pyLDAvis.show(visualizeLDA_neg) |
upgrade() |
sa.Boolean() |
sa.false() |
sa.Boolean() |
sa.false() |
op.execute("UPDATE provider_details SET supports_international=True WHERE identifier='mmg'") |
op.execute("UPDATE provider_details_history SET supports_international=True WHERE identifier='mmg'") |
downgrade() |
op.drop_column("provider_details_history", "supports_international") |
op.drop_column("provider_details", "supports_international") |
logging.getLogger(__name__) |
log_start_end(log=logger) |
check_api_key(["API_WHALE_ALERT_KEY"]) |
whale_alert_model.get_whales_transactions(min_value) |
console.print("Failed to retrieve data.") |
df.copy() |
df.sort_values(by=sortby, ascending=descend) |
df.drop(["from_address", "to_address"], axis=1) |
df.drop(["from", "to", "blockchain"], axis=1) |
apply(lambda x: lambda_long_number_format(x) |
df.head(top) |
list(df.columns) |
os.path.dirname(os.path.abspath(__file__) |
split_half_float_double_csr(tensors) |
CSRTensor.type() |
enumerate(dtypes) |
t.type() |
buckets.append((dtype, bucket) |
_initialize_parameter_parallel_groups(parameter_parallel_size=None) |
int(dist.get_world_size() |
int(data_parallel_size) |
dist.get_rank() |
range(dist.get_world_size() |
range(i * parameter_parallel_size, (i + 1) |
torch.distributed.new_group(ranks) |
print_configuration(args, name) |
logger.info('{}:'.format(name) |
sorted(vars(args) |
len(arg) |
logger.info(' {} {} {}'.format(arg, dots, getattr(args, arg) |
DeepSpeedEngine(Module) |
super(DeepSpeedEngine, self) |
__init__() |
dist.is_initialized() |
dist.is_initialized() |
deepspeed.initialize() |
init_distributed(dist_backend=self.dist_backend) |
see_memory_usage(f"DeepSpeed Engine: Before args sanity test") |
self._do_args_sanity_check(args) |
self._configure_with_arguments(args, mpu) |
self._do_sanity_check() |
self.elasticity_enabled() |
self._set_distributed_vars() |
self.tensorboard_enabled() |
self.get_summary_writer() |
see_memory_usage(f"DeepSpeed Engine: Before configure distributed model") |
self._configure_distributed_model(model) |
see_memory_usage(f"DeepSpeed Engine: After configure distributed model") |
SynchronizedWallClockTimer() |
self.train_micro_batch_size_per_gpu() |
self.steps_per_print() |
self.deepspeed_io(training_data) |
self._configure_optimizer(optimizer, model_parameters) |
self._configure_lr_scheduler(lr_scheduler) |
self._report_progress(0) |
set() |
self.sparse_gradients_enabled() |
self.module.named_modules() |
isinstance(module, torch.nn.Embedding) |
self.csr_tensor_module_names.add(name + ".weight") |
logger.info("Will convert {} to sparse (csr) |
format(name) |
self._configure_checkpointing(dist_init_required) |
self.pld_enabled() |
self._configure_progressive_layer_drop() |
self._config.print('DeepSpeedEngine configuration') |
self.dump_state() |
print_configuration(self, 'DeepSpeedEngine') |
compile (un) |
UtilsBuilder() |
load() |
get_batch_info(self) |
train_batch_size (int) |
train_micro_batch_size_per_gpu (int) |
step (without gradient accumulation) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.