function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def get_create_serializer(self):
return serializers.Resource(MaxCount=1, MinCount=1) | yaybu/touchdown | [
11,
4,
11,
17,
1410353271
] |
def get_destroy_serializer(self):
return serializers.Dict(
InstanceIds=serializers.ListOfOne(serializers.Property("InstanceId"))
) | yaybu/touchdown | [
11,
4,
11,
17,
1410353271
] |
def get_network_id(self, runner):
# FIXME: We can save on some steps if we only do this once
obj = runner.get_plan(self.adapts).describe_object()
return obj.get("VpcId", None) | yaybu/touchdown | [
11,
4,
11,
17,
1410353271
] |
def _is_printer_printing(printer: OctoprintPrinterInfo) -> bool:
return (
printer
and printer.state
and printer.state.flags
and printer.state.flags.printing
) | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def __init__(
self,
coordinator: OctoprintDataUpdateCoordinator,
sensor_type: str,
device_id: str, | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def device_info(self):
"""Device info."""
return self.coordinator.device_info | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def __init__(
self, coordinator: OctoprintDataUpdateCoordinator, device_id: str | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def native_value(self):
"""Return sensor state."""
printer: OctoprintPrinterInfo = self.coordinator.data["printer"]
if not printer:
return None
return printer.state.text | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def available(self) -> bool:
"""Return if entity is available."""
return self.coordinator.last_update_success and self.coordinator.data["printer"] | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def __init__(
self, coordinator: OctoprintDataUpdateCoordinator, device_id: str | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def native_value(self):
"""Return sensor state."""
job: OctoprintJobInfo = self.coordinator.data["job"]
if not job:
return None
if not (state := job.progress.completion):
return 0
return round(state, 2) | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def __init__(
self, coordinator: OctoprintDataUpdateCoordinator, device_id: str | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def native_value(self) -> datetime | None:
"""Return sensor state."""
job: OctoprintJobInfo = self.coordinator.data["job"]
if (
not job
or not job.progress.print_time_left
or not _is_printer_printing(self.coordinator.data["printer"])
):
ret... | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def __init__(
self, coordinator: OctoprintDataUpdateCoordinator, device_id: str | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def native_value(self) -> datetime | None:
"""Return sensor state."""
job: OctoprintJobInfo = self.coordinator.data["job"]
if (
not job
or not job.progress.print_time
or not _is_printer_printing(self.coordinator.data["printer"])
):
return ... | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def __init__(
self,
coordinator: OctoprintDataUpdateCoordinator,
tool: str,
temp_type: str,
device_id: str, | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def native_value(self):
"""Return sensor state."""
printer: OctoprintPrinterInfo = self.coordinator.data["printer"]
if not printer:
return None
for temp in printer.temperatures:
if temp.name == self._api_tool:
val = (
temp.actu... | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def test_error_on_wrong_value_for_consumed_capacity():
resource = boto3.resource("dynamodb", region_name="ap-northeast-3")
client = boto3.client("dynamodb", region_name="ap-northeast-3")
client.create_table(
TableName="jobs",
KeySchema=[{"AttributeName": "job_id", "KeyType": "HASH"}],
... | spulec/moto | [
6700,
1808,
6700,
82,
1361221859
] |
def test_consumed_capacity_get_unknown_item():
conn = boto3.client("dynamodb", region_name="us-east-1")
conn.create_table(
TableName="test_table",
KeySchema=[{"AttributeName": "u", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "u", "AttributeType": "S"}],
BillingMo... | spulec/moto | [
6700,
1808,
6700,
82,
1361221859
] |
def test_only_return_consumed_capacity_when_required(
capacity, should_have_capacity, should_have_table | spulec/moto | [
6700,
1808,
6700,
82,
1361221859
] |
def validate_response(
response, should_have_capacity, should_have_table, is_index=False, value=1.0 | spulec/moto | [
6700,
1808,
6700,
82,
1361221859
] |
def index(self, request):
queryset = Condition.objects.select_related('source', 'target_option')
serializer = ConditionIndexSerializer(queryset, many=True)
return Response(serializer.data) | rdmorganiser/rdmo | [
80,
41,
80,
122,
1438336991
] |
def export(self, request):
serializer = ConditionExportSerializer(self.get_queryset(), many=True)
xml = ConditionRenderer().render(serializer.data)
return XMLResponse(xml, name='conditions') | rdmorganiser/rdmo | [
80,
41,
80,
122,
1438336991
] |
def detail_export(self, request, pk=None):
serializer = ConditionExportSerializer(self.get_object())
xml = ConditionRenderer().render([serializer.data])
return XMLResponse(xml, name=self.get_object().key) | rdmorganiser/rdmo | [
80,
41,
80,
122,
1438336991
] |
def roll_die(size):
first_die = choice(range(1, size + 1))
second_die = choice(range(1, size + 1))
return (first_die + second_die, (first_die == second_die)) | dhermes/project-euler | [
11,
3,
11,
1,
1299471955
] |
def next_specific(square, next_type):
if next_type not in ["R", "U"]:
raise Exception("next_specific only intended for R and U")
# R1=5, R2=15, R3=25, R4=35
index = SQUARES.index(square)
if next_type == "R":
if 0 <= index < 5 or 35 < index:
return "R1"
elif 5 < index... | dhermes/project-euler | [
11,
3,
11,
1,
1299471955
] |
def main(verbose=False):
GAME_PLAY = 10 ** 6
dice_size = 4
visited = {"GO": 1}
current = "GO"
chance_card = 0
chest_card = 0
doubles = 0
for place in xrange(GAME_PLAY):
total, double = roll_die(dice_size)
if double:
doubles += 1
else:
doubl... | dhermes/project-euler | [
11,
3,
11,
1,
1299471955
] |
def matplotlib_pyplot():
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("agg")
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
return plt | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def convert_predictions_to_image_summaries(hook_args):
"""Optionally converts images from hooks_args to image summaries.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
summaries: list of tf.Summary values if hook_args.decode_hpara
"""
decode_hparams = hook_args.decode_hparams
if not decode_hpa... | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def make_multiscale(image, resolutions,
resize_method=tf.image.ResizeMethod.BICUBIC,
num_channels=3):
"""Returns list of scaled images, one for each resolution.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's hei... | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def num_channels(self):
"""Number of color channels."""
return 3 | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def vocab_size(self):
"""Number of pixel values."""
return 256 | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def preprocess_example(self, example, mode, hparams):
if not self._was_reversed:
example["inputs"] = tf.image.per_image_standardization(example["inputs"])
return example | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def decode_hooks(self):
return [convert_predictions_to_image_summaries] | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def is_small(self):
raise NotImplementedError() | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def num_classes(self):
raise NotImplementedError() | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def train_shards(self):
raise NotImplementedError() | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def dev_shards(self):
return 1 | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def class_labels(self):
return ["ID_%d" % i for i in range(self.num_classes)] | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def generator(self, data_dir, tmp_dir, is_training):
raise NotImplementedError() | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.IMAGE,
"targets": modalities.ModalityType.CLASS_LABEL}
p.vocab_size = {"inputs": 256,
"targets": self.num_classes}
p.batch_size_multiplier = 4 if self.is_... | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def encode_images_as_png(images):
"""Yield images encoded as pngs."""
if tf.executing_eagerly():
for image in images:
yield tf.image.encode_png(image).numpy()
else:
(height, width, channels) = images[0].shape
with tf.Graph().as_default():
image_t = tf.placeholder(dtype=tf.uint8, shape=(hei... | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def is_character_level(self):
raise NotImplementedError() | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def vocab_problem(self):
raise NotImplementedError() # Not needed if self.is_character_level. | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def target_space_id(self):
raise NotImplementedError() | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def train_shards(self):
raise NotImplementedError() | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def dev_shards(self):
raise NotImplementedError() | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def example_reading_spec(self):
label_key = "image/class/label"
data_fields, data_items_to_decoders = (
super(Image2TextProblem, self).example_reading_spec())
data_fields[label_key] = tf.VarLenFeature(tf.int64)
data_items_to_decoders["targets"] = contrib.slim().tfexample_decoder.Tensor(
... | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.IMAGE,
"targets": modalities.ModalityType.SYMBOL}
p.vocab_size = {"inputs": 256,
"targets": self._encoders["targets"].vocab_size}
p.batch_size_multiplier ... | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def image_augmentation(images, do_colors=False, crop_size=None):
"""Image augmentation: cropping, flipping, and color transforms."""
if crop_size is None:
crop_size = [299, 299]
images = tf.random_crop(images, crop_size + [3])
images = tf.image.random_flip_left_right(images)
if do_colors: # More augmenta... | tensorflow/tensor2tensor | [
13097,
3192,
13097,
587,
1497545859
] |
def generate_dataset(I,J,K,lambdaU,lambdaV,tau):
# Generate U, V
U = numpy.zeros((I,K))
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
U[i,k] = exponential_draw(lambdaU[i,k])
V = numpy.zeros((J,K))
for j,k in itertools.product(xrange(0,J),xrange(0,K)):
V[j,k] = exponential_dr... | ThomasBrouwer/BNMTF | [
18,
6,
18,
1,
1438595414
] |
def add_noise(true_R,tau):
if numpy.isinf(tau):
return numpy.copy(true_R) | ThomasBrouwer/BNMTF | [
18,
6,
18,
1,
1438595414
] |
def try_generate_M(I,J,fraction_unknown,attempts):
for attempt in range(1,attempts+1):
try:
M = generate_M(I,J,fraction_unknown)
sums_columns = M.sum(axis=0)
sums_rows = M.sum(axis=1)
for i,c in enumerate(sums_rows):
assert c != 0, "Fully unobs... | ThomasBrouwer/BNMTF | [
18,
6,
18,
1,
1438595414
] |
def create_channel(
cls,
host: str = "analyticsdata.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs, | googleapis/python-analytics-data | [
114,
29,
114,
9,
1600119359
] |
def __init__(
self,
*,
host: str = "analyticsdata.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
... | googleapis/python-analytics-data | [
114,
29,
114,
9,
1600119359
] |
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel | googleapis/python-analytics-data | [
114,
29,
114,
9,
1600119359
] |
def run_report(
self, | googleapis/python-analytics-data | [
114,
29,
114,
9,
1600119359
] |
def run_pivot_report(
self, | googleapis/python-analytics-data | [
114,
29,
114,
9,
1600119359
] |
def batch_run_reports(
self, | googleapis/python-analytics-data | [
114,
29,
114,
9,
1600119359
] |
def batch_run_pivot_reports(
self, | googleapis/python-analytics-data | [
114,
29,
114,
9,
1600119359
] |
def get_metadata(
self, | googleapis/python-analytics-data | [
114,
29,
114,
9,
1600119359
] |
def run_realtime_report(
self, | googleapis/python-analytics-data | [
114,
29,
114,
9,
1600119359
] |
def __init__(self, subtype, msg=None):
if msg is None:
msg = "An error occured for subtype {}".format(subtype)
super(PhylotyperError, self).__init__(msg)
self.subtype = subtype | superphy/backend | [
4,
2,
4,
35,
1484074797
] |
def __init__(self, subtype, msg=None):
super(PhylotyperError, self).__init__(
subtype, msg="Unrecognized subtype {}".format(subtype)) | superphy/backend | [
4,
2,
4,
35,
1484074797
] |
def setUp(self):
self._mock_multiplexer = mock.create_autospec(
plugin_event_multiplexer.EventMultiplexer
)
self._mock_tb_context = base_plugin.TBContext(
multiplexer=self._mock_multiplexer
) | tensorflow/tensorboard | [
6136,
1581,
6136,
616,
1494878887
] |
def test_csv(self):
body, mime_type = self._run_handler(
EXPERIMENT, SESSION_GROUPS, download_data.OutputFormat.CSV
)
self.assertEqual("text/csv", mime_type)
self.assertEqual(EXPECTED_CSV, body) | tensorflow/tensorboard | [
6136,
1581,
6136,
616,
1494878887
] |
def test_json(self):
body, mime_type = self._run_handler(
EXPERIMENT, SESSION_GROUPS, download_data.OutputFormat.JSON
)
self.assertEqual("application/json", mime_type)
expected_result = {
"header": [
"initial_temp",
"final_temp",
... | tensorflow/tensorboard | [
6136,
1581,
6136,
616,
1494878887
] |
def friendly_time(msecs):
secs, msecs = divmod(msecs, 1000)
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
if hours:
return '%dh%dm%ds' % (hours, mins, secs)
elif mins:
return '%dm%ds' % (mins, secs)
elif secs:
return '%ds%dms' % (secs, msecs)
else:
... | piglei/uwsgi-sloth | [
207,
17,
207,
1,
1402890563
] |
def forwards(self, orm): | mollyproject/mollyproject | [
75,
20,
75,
4,
1288876632
] |
def backwards(self, orm): | mollyproject/mollyproject | [
75,
20,
75,
4,
1288876632
] |
def dem_threshold(domain, b):
'''Just use a height threshold on the DEM!'''
heightLevel = float(domain.algorithm_params['dem_threshold'])
dem = domain.get_dem().image
return dem.lt(heightLevel).select(['elevation'], ['b1']) | nasa/CrisisMappingToolkit | [
183,
75,
183,
5,
1417805519
] |
def evi(domain, b):
'''Simple EVI based classifier'''
#no_clouds = b['b3'].lte(2100).select(['sur_refl_b03'], ['b1'])
criteria1 = b['EVI'].lte(0.3).And(b['LSWI'].subtract(b['EVI']).gte(0.05)).select(['sur_refl_b02'], ['b1'])
criteria2 = b['EVI'].lte(0.05).And(b['LSWI'].lte(0.0)).select(['sur_refl_b02'],... | nasa/CrisisMappingToolkit | [
183,
75,
183,
5,
1417805519
] |
def get_diff(b):
'''Just the internals of the difference method'''
return b['b2'].subtract(b['b1']).select(['sur_refl_b02'], ['b1']) | nasa/CrisisMappingToolkit | [
183,
75,
183,
5,
1417805519
] |
def modis_diff(domain, b, threshold=None):
'''Compute (b2-b1) < threshold, a simple water detection index. | nasa/CrisisMappingToolkit | [
183,
75,
183,
5,
1417805519
] |
def get_dartmouth(b):
A = 500
B = 2500
return b['b2'].add(A).divide(b['b1'].add(B)).select(['sur_refl_b02'], ['b1']) | nasa/CrisisMappingToolkit | [
183,
75,
183,
5,
1417805519
] |
def dartmouth(domain, b, threshold=None):
'''A flood detection method from the Dartmouth Flood Observatory. | nasa/CrisisMappingToolkit | [
183,
75,
183,
5,
1417805519
] |
def get_mod_ndwi(b):
return b['b6'].subtract(b['b4']).divide(b['b4'].add(b['b6'])).select(['sur_refl_b06'], ['b1']) | nasa/CrisisMappingToolkit | [
183,
75,
183,
5,
1417805519
] |
def mod_ndwi(domain, b, threshold=None):
if threshold == None:
threshold = float(domain.algorithm_params['mod_ndwi_threshold'])
return get_mod_ndwi(b).lte(threshold) | nasa/CrisisMappingToolkit | [
183,
75,
183,
5,
1417805519
] |
def get_fai(b):
'''Just the internals of the FAI method'''
return b['b2'].subtract(b['b1'].add(b['b5'].subtract(b['b1']).multiply((859.0 - 645) / (1240 - 645)))).select(['sur_refl_b02'], ['b1']) | nasa/CrisisMappingToolkit | [
183,
75,
183,
5,
1417805519
] |
def get_spec(field, limit=10, query='', query_dsl=''):
"""Returns aggregation specs for a term of filtered events.
The aggregation spec will summarize values of an attribute
whose events fall under a filter.
Args:
field (str): this denotes the event attribute that is used
for aggre... | google/timesketch | [
2113,
486,
2113,
278,
1403200185
] |
def chart_title(self):
"""Returns a title for the chart."""
if self.field:
return 'Top filtered results for "{0:s}"'.format(self.field)
return 'Top results for an unknown field after filtering' | google/timesketch | [
2113,
486,
2113,
278,
1403200185
] |
def run(
self, field, query_string='', query_dsl='',
supported_charts='table', start_time='', end_time='', limit=10):
"""Run the aggregation.
Args:
field (str): this denotes the event attribute that is used
for aggregation.
query_string (s... | google/timesketch | [
2113,
486,
2113,
278,
1403200185
] |
def create_colors_list():
colors_list = []
for color in plt.cm.tab10(np.linspace(0, 1, 10))[:-1]:
colors_list.append(tuple(color))
colors_list.append("black")
for color in plt.cm.Set2(np.linspace(0, 1, 8)):
colors_list.append(tuple(color))
for color in plt.cm.Set3(np.linspace(0, 1, 1... | CAMI-challenge/AMBER | [
18,
7,
18,
3,
1502787274
] |
def plot_precision_vs_bin_size(pd_bins, output_dir):
pd_plot = pd_bins[pd_bins[utils_labels.TOOL] != utils_labels.GS]
for tool_label, pd_tool in pd_plot.groupby(utils_labels.TOOL):
fig, axs = plt.subplots(figsize=(5, 4.5))
axs.scatter(np.log(pd_tool['total_length']), pd_tool['precision_bp'], ma... | CAMI-challenge/AMBER | [
18,
7,
18,
3,
1502787274
] |
def get_pd_genomes_recall(sample_id_to_queries_list):
pd_genomes_recall = pd.DataFrame()
for sample_id in sample_id_to_queries_list:
for query in sample_id_to_queries_list[sample_id]:
if not isinstance(query, binning_classes.GenomeQuery):
continue
recall_df = quer... | CAMI-challenge/AMBER | [
18,
7,
18,
3,
1502787274
] |
def plot_heatmap(df_confusion, sample_id, output_dir, label, separate_bar=False, log_scale=False):
if log_scale:
df_confusion = df_confusion.apply(np.log10, inplace=True).replace(-np.inf, 0)
fig, axs = plt.subplots(figsize=(10, 8))
fontsize = 20
# replace columns and rows labels by numbers
... | CAMI-challenge/AMBER | [
18,
7,
18,
3,
1502787274
] |
def plot_summary(color_indices, df_results, labels, output_dir, rank, plot_type, file_name, xlabel, ylabel):
available_tools = df_results[utils_labels.TOOL].unique()
tools = [tool for tool in labels if tool in available_tools]
colors_list = create_colors_list()
if color_indices:
colors_list = [... | CAMI-challenge/AMBER | [
18,
7,
18,
3,
1502787274
] |
def plot_precision_recall(colors, summary_per_query, labels, output_dir, rank=None):
plot_summary(colors,
summary_per_query,
labels,
output_dir,
rank,
'w',
'purity_recall_bp',
'Purity for sample (%... | CAMI-challenge/AMBER | [
18,
7,
18,
3,
1502787274
] |
def plot_taxonomic_results(df_summary_t, metrics_list, errors_list, file_name, output_dir):
colors_list = ["#006cba", "#008000", "#ba9e00", "red"]
for tool, pd_results in df_summary_t.groupby(utils_labels.TOOL):
dict_metric_list = []
for metric in metrics_list:
rank_to_metric = Orde... | CAMI-challenge/AMBER | [
18,
7,
18,
3,
1502787274
] |
def create_completeness_minus_contamination_column(pd_tool_bins):
pd_tool_bins['newcolumn'] = pd_tool_bins['recall_bp'] + pd_tool_bins['precision_bp'] - 1 | CAMI-challenge/AMBER | [
18,
7,
18,
3,
1502787274
] |
def get_number_of_hq_bins(tools, pd_bins):
pd_counts = pd.DataFrame()
pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
for tool in tools:
pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
x50 = pd_tool_bins[(pd... | CAMI-challenge/AMBER | [
18,
7,
18,
3,
1502787274
] |
def print_banner(bootstrap, no_shell_file):
"""Print the Pigweed or project-specific banner"""
enable_colors()
print(Color.green('\n WELCOME TO...'))
print(Color.magenta(_PIGWEED_BANNER))
if bootstrap:
print(
Color.green('\n BOOTSTRAP! Bootstrap may take a few minutes; '
... | google/pigweed | [
161,
44,
161,
1,
1615327645
] |
def main():
"""Script entry point."""
if os.name != 'nt':
return 1
return print_banner(**vars(parse())) | google/pigweed | [
161,
44,
161,
1,
1615327645
] |
def main(unused_argv):
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=FLAGS.iterations_per_loop,
... | tensorflow/tpu | [
5035,
1773,
5035,
290,
1499817279
] |
def test_engine_module_name():
engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
assert engine.name == "foobar" | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def __init__(self, vocab, clusters):
self.vocab = vocab
self.clusters = clusters | danielfrg/word2vec | [
2483,
628,
2483,
5,
1377467108
] |
def __getitem__(self, word):
return self.get_cluster(word) | danielfrg/word2vec | [
2483,
628,
2483,
5,
1377467108
] |
def get_words_on_cluster(self, cluster):
return self.vocab[self.clusters == cluster] | danielfrg/word2vec | [
2483,
628,
2483,
5,
1377467108
] |
def setUp(self):
super(TestCatalog, self).setUp(capture_output=True) | prestodb/presto-admin | [
170,
102,
170,
63,
1432266042
] |
def test_add_not_exist(self, isfile_mock):
isfile_mock.return_value = False
self.assertRaisesRegexp(ConfigurationError,
'Configuration for catalog dummy not found',
catalog.add, 'dummy') | prestodb/presto-admin | [
170,
102,
170,
63,
1432266042
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.