hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
080d591dcd18d5349f4e389d01e598d959bebf78
| 232
|
py
|
Python
|
tests/unit/executors/encoders/test_frameworks.py
|
NickCwh/jina
|
d3dec7b82d6301bce72a82ad6a41dddc944ead0b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/executors/encoders/test_frameworks.py
|
NickCwh/jina
|
d3dec7b82d6301bce72a82ad6a41dddc944ead0b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/executors/encoders/test_frameworks.py
|
NickCwh/jina
|
d3dec7b82d6301bce72a82ad6a41dddc944ead0b
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from jina.executors.encoders.frameworks import BaseOnnxEncoder
from jina.excepts import ModelCheckpointNotExist
def test_raised_exception():
with pytest.raises(ModelCheckpointNotExist):
BaseOnnxEncoder()
| 25.777778
| 62
| 0.818966
| 23
| 232
| 8.173913
| 0.695652
| 0.085106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 232
| 8
| 63
| 29
| 0.926108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.5
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f290df82121ee9c9600a7a71cc1add7aa646a9e9
| 188
|
py
|
Python
|
SelectAllElementsInView.py
|
gnomesoup/pyDynamo
|
dea046e96f7973fcb6c28a274a3092b246457551
|
[
"Unlicense",
"MIT"
] | null | null | null |
SelectAllElementsInView.py
|
gnomesoup/pyDynamo
|
dea046e96f7973fcb6c28a274a3092b246457551
|
[
"Unlicense",
"MIT"
] | null | null | null |
SelectAllElementsInView.py
|
gnomesoup/pyDynamo
|
dea046e96f7973fcb6c28a274a3092b246457551
|
[
"Unlicense",
"MIT"
] | null | null | null |
import clr
import os
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference('RevitServices')
import RevitServices
from RevitServices.Persistence import DocumentMa
| 18.8
| 48
| 0.829787
| 22
| 188
| 7.090909
| 0.545455
| 0.115385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095745
| 188
| 9
| 49
| 20.888889
| 0.917647
| 0
| 0
| 0
| 0
| 0
| 0.111702
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.714286
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f2ae50797201246cdba1129302be063bb4e236e9
| 135
|
py
|
Python
|
torch_chemistry/nn/conv/__init__.py
|
0h-n0/pytorch_chemistry
|
14ca01ab2a30728016ce6c6793f119438a09ade5
|
[
"MIT"
] | 7
|
2019-12-21T12:36:20.000Z
|
2022-01-15T11:05:25.000Z
|
torch_chemistry/nn/conv/__init__.py
|
0h-n0/pytorch-chemistry
|
14ca01ab2a30728016ce6c6793f119438a09ade5
|
[
"MIT"
] | null | null | null |
torch_chemistry/nn/conv/__init__.py
|
0h-n0/pytorch-chemistry
|
14ca01ab2a30728016ce6c6793f119438a09ade5
|
[
"MIT"
] | 1
|
2020-11-05T09:33:18.000Z
|
2020-11-05T09:33:18.000Z
|
import torch
import torch.nn as nn
from .base_conv import GNNConv
from .gcn_conv import GCNConv
from .censnet_conv import CensNetConv
| 19.285714
| 37
| 0.82963
| 22
| 135
| 4.954545
| 0.545455
| 0.275229
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140741
| 135
| 6
| 38
| 22.5
| 0.939655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4b3ed8c47591ea97e0c3d53ffcf61f6d32d5a56a
| 128
|
py
|
Python
|
server/externalrelations/webhookserver/admin.py
|
podyssea/Chatbot
|
c5e54da3493269e63bf486acc1da525fce4fa170
|
[
"MIT"
] | 2
|
2019-03-31T15:28:39.000Z
|
2021-07-09T10:57:13.000Z
|
server/externalrelations/webhookserver/admin.py
|
modelorona/External-Relations-Chatbot
|
23dbfc99f4bb14b6cb6483cceb6b245cff963f1f
|
[
"MIT"
] | 3
|
2021-03-09T13:58:14.000Z
|
2022-02-26T16:07:45.000Z
|
server/externalrelations/webhookserver/admin.py
|
podyssea/Chatbot
|
c5e54da3493269e63bf486acc1da525fce4fa170
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import ShortCourse
# Register your models here.
admin.site.register(ShortCourse)
| 21.333333
| 32
| 0.820313
| 17
| 128
| 6.176471
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117188
| 128
| 5
| 33
| 25.6
| 0.929204
| 0.203125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4b46e9560b9adf35b83b33e82791b1a1edce0cd4
| 95
|
py
|
Python
|
tictactoe_project/setup.py
|
agryman/sean
|
11baf69c6eb9308266126bf9c8b1c67c6fd33afc
|
[
"MIT"
] | 1
|
2020-03-28T18:17:52.000Z
|
2020-03-28T18:17:52.000Z
|
tictactoe_project/setup.py
|
agryman/sean
|
11baf69c6eb9308266126bf9c8b1c67c6fd33afc
|
[
"MIT"
] | 1
|
2022-01-21T21:33:00.000Z
|
2022-01-21T21:33:00.000Z
|
tictactoe_project/setup.py
|
agryman/sean
|
11baf69c6eb9308266126bf9c8b1c67c6fd33afc
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(name='tictactoe', packages=find_packages())
| 23.75
| 49
| 0.810526
| 12
| 95
| 6.25
| 0.666667
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084211
| 95
| 3
| 50
| 31.666667
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0.094737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
29aebb7295b0760b9dbc72efea5fc618bae33f99
| 84
|
py
|
Python
|
nestedtensor/nn/__init__.py
|
seemethere/nestedtensor
|
b4190efc91f3cd4891ae370502b656cbb63e7def
|
[
"BSD-3-Clause"
] | 1
|
2021-07-16T16:09:51.000Z
|
2021-07-16T16:09:51.000Z
|
nestedtensor/nn/__init__.py
|
seemethere/nestedtensor
|
b4190efc91f3cd4891ae370502b656cbb63e7def
|
[
"BSD-3-Clause"
] | 1
|
2021-04-17T11:15:19.000Z
|
2021-04-17T11:15:19.000Z
|
nestedtensor/nn/__init__.py
|
seemethere/nestedtensor
|
b4190efc91f3cd4891ae370502b656cbb63e7def
|
[
"BSD-3-Clause"
] | null | null | null |
from .mha import MultiheadAttention
from .parameter import Parameter as NTParameter
| 28
| 47
| 0.857143
| 10
| 84
| 7.2
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 84
| 2
| 48
| 42
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4b07a76c49e74fa826fad4da87066fc4d3d4f1e6
| 167
|
py
|
Python
|
setup.py
|
aaronbacher/RandomTransformationLayer
|
0174fe392aaefbc092cb26aa3b7c2f618562b1a9
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
aaronbacher/RandomTransformationLayer
|
0174fe392aaefbc092cb26aa3b7c2f618562b1a9
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
aaronbacher/RandomTransformationLayer
|
0174fe392aaefbc092cb26aa3b7c2f618562b1a9
|
[
"BSD-3-Clause"
] | null | null | null |
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(ext_modules=cythonize('transform_cy.pyx'), include_dirs=[numpy.get_include()])
| 27.833333
| 84
| 0.820359
| 24
| 167
| 5.541667
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077844
| 167
| 5
| 85
| 33.4
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0.095808
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d9eda1953d94cb78fc0096b7481d4ec40d23c209
| 5,137
|
py
|
Python
|
test/wecall_acceptance/genotyping/test_input_file.py
|
dylex/wecall
|
35d24cefa4fba549e737cd99329ae1b17dd0156b
|
[
"MIT"
] | 8
|
2018-10-08T15:47:21.000Z
|
2021-11-09T07:13:05.000Z
|
test/wecall_acceptance/genotyping/test_input_file.py
|
dylex/wecall
|
35d24cefa4fba549e737cd99329ae1b17dd0156b
|
[
"MIT"
] | 4
|
2018-11-05T09:16:27.000Z
|
2020-04-09T12:32:56.000Z
|
test/wecall_acceptance/genotyping/test_input_file.py
|
dylex/wecall
|
35d24cefa4fba549e737cd99329ae1b17dd0156b
|
[
"MIT"
] | 4
|
2019-09-03T15:46:39.000Z
|
2021-06-04T07:28:33.000Z
|
# All content Copyright (C) 2018 Genomics plc
from os.path import join
from unittest import expectedFailure
from wecall.genomics.variant import Variant
from wecall.vcfutils.info_data import InfoData
from wecall.vcfutils.schema import Schema
from wecall.vcfutils.vcf_builder import VCFBuilder
from wecall_test_drivers.base_test import BaseTest
from wecall_test_drivers.svc_driver import SVCDriver
class TestInputSpecification(BaseTest):
def test_doesnt_give_a_flying_damn_about_spurious_filter_header(self):
chrom = "22"
variant = Variant(chrom, 11, "A", "C")
schema = Schema()
complex_filter_name = '.+-*\\/~@?!%^&><=\"\'(){}[]_|'
schema.set_filter(complex_filter_name, 'unusual characters')
gv_builder = VCFBuilder(join(self.work_dir, "genotype.vcf"), schema=schema)
gv_builder.with_record_from_variant(variant, filters={complex_filter_name})
gv_builder.build().index()
driver = SVCDriver(self)
dodgy_sample = "bobs_your_uncle"
driver.with_ref_sequence(
"ACGCCCCCTGCAAAAAAAAAA", chrom=chrom, pos_from=0
).with_read(
"...........C.........", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample
).with_genotype_alleles(
gv_builder.compressed_filename
)
expect = driver.call(expected_success=True)
expect .with_output_vcf()\
.has_record_for_variant(variant)\
.with_sample(dodgy_sample)\
.has_genotype("1/1")
def test_doesnt_give_a_flying_damn_about_spurious_filters(self):
chrom = "22"
variant = Variant(chrom, 11, "A", "C")
gv_builder = VCFBuilder(join(self.work_dir, "genotype.vcf"))
gv_builder.with_record_from_variant(
variant, filters={"#$.:@$%$%^&**()7!"})
gv_builder.build().index()
driver = SVCDriver(self)
dodgy_sample = "bobs_your_uncle"
driver.with_ref_sequence(
"ACGCCCCCTGCAAAAAAAAAA", chrom=chrom, pos_from=0
).with_read(
"...........C.........", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample
).with_genotype_alleles(
gv_builder.compressed_filename
)
expect = driver.call(expected_success=True)
expect.with_output_vcf()\
.has_record_for_variant(variant)\
.with_sample(dodgy_sample)\
.has_genotype("1/1")
def test_should_handle_complex_variant_input(self):
chrom = "22"
variant = Variant(chrom, 10, "CAA", "CA")
gv_builder = VCFBuilder(join(self.work_dir, "genotype.vcf"))
gv_builder.with_record_from_variant(variant)
gv_builder.build().index()
driver = SVCDriver(self)
dodgy_sample = "bobs_your_uncle"
driver.with_ref_sequence(
"ACGCCCCCTGCAAAAAAAAAA", chrom=chrom, pos_from=0
).with_read(
"...........C.........", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample
).with_genotype_alleles(
gv_builder.compressed_filename
)
expect = driver.call()
expect.with_log()\
.input_variant_trimmed_warning(variant, Variant(chrom, 11, "A", ""))
expect.with_output_vcf()\
.record_count(1)
@expectedFailure # "Unskip test if parameter made public"
def test_should_raise_if_output_ref_calls_is_switched_on(self):
chrom = "22"
variant = Variant(chrom, 10, "CAA", "CA")
gv_builder = VCFBuilder(join(self.work_dir, "genotype.vcf"))
gv_builder.with_record_from_variant(variant)
gv_builder.build().index()
driver = SVCDriver(self)
dodgy_sample = "bobs_your_uncle"
driver.with_ref_sequence(
"ACGCCCCCTGCAAAAAAAAAA", chrom=chrom, pos_from=0
).with_read(
"...........C.........", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample
).with_genotype_alleles(
gv_builder.compressed_filename
).with_output_ref_calls(True)
driver.call(False).genotyping_is_incompatible_with_outputting_reference_calls_error()
def test_doesnt_give_a_flying_damn_about_spurious_info(self):
chrom = "22"
variant = Variant(chrom, 11, "A", "C")
gv_builder = VCFBuilder(join(self.work_dir, "genotype.vcf"))
gv_builder.with_record_from_variant(variant,
info=InfoData(None, {"#f$@$e%$%^&k**()7!": ["#o$@$f%$%f^&**()7!"]}))
gv_builder.build().index()
driver = SVCDriver(self)
dodgy_sample = "bobs_your_uncle"
driver.with_ref_sequence(
"ACGCCCCCTGCAAAAAAAAAA", chrom=chrom, pos_from=0
).with_read(
"...........C.........", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample
).with_genotype_alleles(
gv_builder.compressed_filename
)
expect = driver.call(expected_success=True)
expect.with_output_vcf() \
.has_record_for_variant(variant)\
.with_sample(dodgy_sample)\
.has_genotype("1/1")
| 36.692857
| 112
| 0.626825
| 601
| 5,137
| 5.009983
| 0.199667
| 0.059781
| 0.037861
| 0.02989
| 0.731319
| 0.724012
| 0.724012
| 0.724012
| 0.707074
| 0.642312
| 0
| 0.013104
| 0.242359
| 5,137
| 139
| 113
| 36.956835
| 0.760534
| 0.015963
| 0
| 0.693694
| 0
| 0
| 0.093428
| 0.041568
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045045
| false
| 0
| 0.072072
| 0
| 0.126126
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8a11e4b7e2af3f74598584d5f6845be159074ae7
| 163
|
py
|
Python
|
PyMOTW/source/importlib/example/__init__.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 1
|
2019-01-04T05:47:50.000Z
|
2019-01-04T05:47:50.000Z
|
PyMOTW/source/importlib/example/__init__.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 1
|
2020-07-18T03:52:03.000Z
|
2020-07-18T04:18:01.000Z
|
PyMOTW/source/importlib/example/__init__.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 2
|
2021-03-06T04:28:32.000Z
|
2021-03-06T04:59:17.000Z
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
#end_pymotw_header
print('Importing example package')
| 14.818182
| 55
| 0.705521
| 22
| 163
| 5.136364
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.141104
| 163
| 10
| 56
| 16.3
| 0.771429
| 0.656442
| 0
| 0
| 0
| 0
| 0.581395
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 5
|
8a44e2f2c3888e8b4328df8380d22c69b3844951
| 147
|
py
|
Python
|
src/hebphonics/controllers/__init__.py
|
ohizkiya/hebphonics
|
60f46f2fbec6704c4598dfccaa4326b1e17b133a
|
[
"MIT"
] | null | null | null |
src/hebphonics/controllers/__init__.py
|
ohizkiya/hebphonics
|
60f46f2fbec6704c4598dfccaa4326b1e17b133a
|
[
"MIT"
] | 11
|
2020-11-20T20:23:00.000Z
|
2021-01-28T14:23:19.000Z
|
src/hebphonics/controllers/__init__.py
|
ohizkiya/hebphonics
|
60f46f2fbec6704c4598dfccaa4326b1e17b133a
|
[
"MIT"
] | 1
|
2021-01-01T20:06:01.000Z
|
2021-01-01T20:06:01.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""Flask controllers."""
__all__ = ["jsend", "login", "hebphonics"]
from . import jsend, login, hebphonics
| 18.375
| 42
| 0.666667
| 18
| 147
| 5.222222
| 0.833333
| 0.212766
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007874
| 0.136054
| 147
| 7
| 43
| 21
| 0.732283
| 0.360544
| 0
| 0
| 0
| 0
| 0.229885
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8a48d4ef8babf6ad05d45a0da90e9dc110dc5246
| 75
|
py
|
Python
|
laspy/vlrs/__init__.py
|
CCInc/laspy
|
999306c92162fe6e4376960ac5b4df4368d5c3c7
|
[
"BSD-2-Clause"
] | 240
|
2016-11-29T15:11:38.000Z
|
2022-03-30T20:22:42.000Z
|
laspy/vlrs/__init__.py
|
CCInc/laspy
|
999306c92162fe6e4376960ac5b4df4368d5c3c7
|
[
"BSD-2-Clause"
] | 136
|
2016-11-28T16:38:05.000Z
|
2022-03-28T16:49:42.000Z
|
laspy/vlrs/__init__.py
|
CCInc/laspy
|
999306c92162fe6e4376960ac5b4df4368d5c3c7
|
[
"BSD-2-Clause"
] | 76
|
2016-12-08T14:07:35.000Z
|
2022-03-16T00:41:17.000Z
|
from . import geotiff
from .known import BaseKnownVLR
from .vlr import VLR
| 18.75
| 31
| 0.8
| 11
| 75
| 5.454545
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 75
| 3
| 32
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8a65d5e0780a9d154e594e3249f408e2d418272c
| 36
|
py
|
Python
|
run_train.py
|
thiagozampieri/gaia-loginface
|
ed1b9e6a4323718d070a659c59a9cf316a54fda2
|
[
"MIT"
] | null | null | null |
run_train.py
|
thiagozampieri/gaia-loginface
|
ed1b9e6a4323718d070a659c59a9cf316a54fda2
|
[
"MIT"
] | null | null | null |
run_train.py
|
thiagozampieri/gaia-loginface
|
ed1b9e6a4323718d070a659c59a9cf316a54fda2
|
[
"MIT"
] | null | null | null |
import gaia.train as run
run.train()
| 18
| 24
| 0.777778
| 7
| 36
| 4
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 2
| 25
| 18
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8ad330d40365e2f54693c5adfdcabdb0c6463646
| 145
|
py
|
Python
|
spherov2/toy/sprk2.py
|
superfashi/spherov2.py
|
2f11b4e68ab4829972ba4e84bf23844c141f2266
|
[
"MIT"
] | 1
|
2020-06-02T15:51:31.000Z
|
2020-06-02T15:51:31.000Z
|
spherov2/toy/sprk2.py
|
superfashi/spherov2.py
|
2f11b4e68ab4829972ba4e84bf23844c141f2266
|
[
"MIT"
] | null | null | null |
spherov2/toy/sprk2.py
|
superfashi/spherov2.py
|
2f11b4e68ab4829972ba4e84bf23844c141f2266
|
[
"MIT"
] | null | null | null |
from spherov2.toy.bb8 import BB8
from spherov2.types import ToyType
class Sprk2(BB8):
toy_type = ToyType('Sphero SPRK+', 'SK-', 'SK', .06)
| 20.714286
| 56
| 0.696552
| 22
| 145
| 4.545455
| 0.636364
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065574
| 0.158621
| 145
| 6
| 57
| 24.166667
| 0.754098
| 0
| 0
| 0
| 0
| 0
| 0.117241
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
76d5c5ff0da8a95cad203a56ad96389c8938d32e
| 5,118
|
py
|
Python
|
huaweicloud-sdk-dms/huaweicloudsdkdms/v2/__init__.py
|
githubmilesma/huaweicloud-sdk-python-v3
|
9d9449ed68a609ca65f0aa50b5b2a1c28445bf03
|
[
"Apache-2.0"
] | 1
|
2021-04-16T07:59:28.000Z
|
2021-04-16T07:59:28.000Z
|
huaweicloud-sdk-dms/huaweicloudsdkdms/v2/__init__.py
|
Lencof/huaweicloud-sdk-python-v3
|
d13dc4e2830a83e295be6e4de021999b3376e34e
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-dms/huaweicloudsdkdms/v2/__init__.py
|
Lencof/huaweicloud-sdk-python-v3
|
d13dc4e2830a83e295be6e4de021999b3376e34e
|
[
"Apache-2.0"
] | 1
|
2022-01-17T02:24:18.000Z
|
2022-01-17T02:24:18.000Z
|
# coding: utf-8
from __future__ import absolute_import
# import DmsClient
from huaweicloudsdkdms.v2.dms_client import DmsClient
from huaweicloudsdkdms.v2.dms_async_client import DmsAsyncClient
# import models into sdk package
from huaweicloudsdkdms.v2.model.batch_create_or_delete_queue_tag_request import BatchCreateOrDeleteQueueTagRequest
from huaweicloudsdkdms.v2.model.batch_create_or_delete_queue_tag_response import BatchCreateOrDeleteQueueTagResponse
from huaweicloudsdkdms.v2.model.batch_create_or_delete_tag_req import BatchCreateOrDeleteTagReq
from huaweicloudsdkdms.v2.model.batch_create_or_delete_tag_req_tags import BatchCreateOrDeleteTagReqTags
from huaweicloudsdkdms.v2.model.confirm_consumption_messages_req import ConfirmConsumptionMessagesReq
from huaweicloudsdkdms.v2.model.confirm_consumption_messages_request import ConfirmConsumptionMessagesRequest
from huaweicloudsdkdms.v2.model.confirm_consumption_messages_response import ConfirmConsumptionMessagesResponse
from huaweicloudsdkdms.v2.model.confirm_dead_letters_messages_req import ConfirmDeadLettersMessagesReq
from huaweicloudsdkdms.v2.model.confirm_dead_letters_messages_req_message import ConfirmDeadLettersMessagesReqMessage
from huaweicloudsdkdms.v2.model.confirm_dead_letters_messages_request import ConfirmDeadLettersMessagesRequest
from huaweicloudsdkdms.v2.model.confirm_dead_letters_messages_response import ConfirmDeadLettersMessagesResponse
from huaweicloudsdkdms.v2.model.consume_deadletters_message import ConsumeDeadlettersMessage
from huaweicloudsdkdms.v2.model.consume_deadletters_message_message import ConsumeDeadlettersMessageMessage
from huaweicloudsdkdms.v2.model.consume_deadletters_message_request import ConsumeDeadlettersMessageRequest
from huaweicloudsdkdms.v2.model.consume_deadletters_message_response import ConsumeDeadlettersMessageResponse
from huaweicloudsdkdms.v2.model.consume_message import ConsumeMessage
from huaweicloudsdkdms.v2.model.consume_message_message import ConsumeMessageMessage
from huaweicloudsdkdms.v2.model.consume_messages_request import ConsumeMessagesRequest
from huaweicloudsdkdms.v2.model.consume_messages_response import ConsumeMessagesResponse
from huaweicloudsdkdms.v2.model.create_consumer_group_req import CreateConsumerGroupReq
from huaweicloudsdkdms.v2.model.create_consumer_group_request import CreateConsumerGroupRequest
from huaweicloudsdkdms.v2.model.create_consumer_group_resp_groups import CreateConsumerGroupRespGroups
from huaweicloudsdkdms.v2.model.create_consumer_group_response import CreateConsumerGroupResponse
from huaweicloudsdkdms.v2.model.create_queue_req import CreateQueueReq
from huaweicloudsdkdms.v2.model.create_queue_request import CreateQueueRequest
from huaweicloudsdkdms.v2.model.create_queue_response import CreateQueueResponse
from huaweicloudsdkdms.v2.model.delete_queue_request import DeleteQueueRequest
from huaweicloudsdkdms.v2.model.delete_queue_response import DeleteQueueResponse
from huaweicloudsdkdms.v2.model.delete_specified_consumer_group_request import DeleteSpecifiedConsumerGroupRequest
from huaweicloudsdkdms.v2.model.delete_specified_consumer_group_response import DeleteSpecifiedConsumerGroupResponse
from huaweicloudsdkdms.v2.model.group_entity import GroupEntity
from huaweicloudsdkdms.v2.model.list_consumer_groups_request import ListConsumerGroupsRequest
from huaweicloudsdkdms.v2.model.list_consumer_groups_response import ListConsumerGroupsResponse
from huaweicloudsdkdms.v2.model.list_queue_groups_resp_groups import ListQueueGroupsRespGroups
from huaweicloudsdkdms.v2.model.list_queues_request import ListQueuesRequest
from huaweicloudsdkdms.v2.model.list_queues_resp_queues import ListQueuesRespQueues
from huaweicloudsdkdms.v2.model.list_queues_response import ListQueuesResponse
from huaweicloudsdkdms.v2.model.send_message_entity import SendMessageEntity
from huaweicloudsdkdms.v2.model.send_messages_req import SendMessagesReq
from huaweicloudsdkdms.v2.model.send_messages_request import SendMessagesRequest
from huaweicloudsdkdms.v2.model.send_messages_resp_messages import SendMessagesRespMessages
from huaweicloudsdkdms.v2.model.send_messages_response import SendMessagesResponse
from huaweicloudsdkdms.v2.model.show_project_tags_request import ShowProjectTagsRequest
from huaweicloudsdkdms.v2.model.show_project_tags_resp_tags import ShowProjectTagsRespTags
from huaweicloudsdkdms.v2.model.show_project_tags_response import ShowProjectTagsResponse
from huaweicloudsdkdms.v2.model.show_queue_request import ShowQueueRequest
from huaweicloudsdkdms.v2.model.show_queue_response import ShowQueueResponse
from huaweicloudsdkdms.v2.model.show_queue_tags_request import ShowQueueTagsRequest
from huaweicloudsdkdms.v2.model.show_queue_tags_response import ShowQueueTagsResponse
from huaweicloudsdkdms.v2.model.show_quotas_request import ShowQuotasRequest
from huaweicloudsdkdms.v2.model.show_quotas_resp_quotas import ShowQuotasRespQuotas
from huaweicloudsdkdms.v2.model.show_quotas_resp_quotas_resources import ShowQuotasRespQuotasResources
from huaweicloudsdkdms.v2.model.show_quotas_response import ShowQuotasResponse
| 81.238095
| 117
| 0.920477
| 559
| 5,118
| 8.13059
| 0.184258
| 0.254125
| 0.278328
| 0.326513
| 0.538174
| 0.524092
| 0.332453
| 0.142134
| 0.072607
| 0.047525
| 0
| 0.011478
| 0.046698
| 5,118
| 62
| 118
| 82.548387
| 0.920066
| 0.011919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
76ea8b3aeacc180f09c1dd804ba1aee065c74697
| 227
|
py
|
Python
|
graphgallery/datasets/__init__.py
|
kisekizzz/GraphGallery
|
fd4a1f474c244f774397460ae95935638ef48f5b
|
[
"MIT"
] | null | null | null |
graphgallery/datasets/__init__.py
|
kisekizzz/GraphGallery
|
fd4a1f474c244f774397460ae95935638ef48f5b
|
[
"MIT"
] | null | null | null |
graphgallery/datasets/__init__.py
|
kisekizzz/GraphGallery
|
fd4a1f474c244f774397460ae95935638ef48f5b
|
[
"MIT"
] | null | null | null |
from .dataset import Dataset
from .in_memory_dataset import InMemoryDataset
from .planetoid import Planetoid
from .npz_dataset import NPZDataset
from .ppi import PPI
from .reddit import Reddit
from .tu_dataset import TUDataset
| 28.375
| 46
| 0.845815
| 32
| 227
| 5.875
| 0.40625
| 0.276596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123348
| 227
| 7
| 47
| 32.428571
| 0.944724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0a08e6d49c00b5c3e0b57a9469f13ef038a2d0a6
| 49
|
py
|
Python
|
data/indicator/ME1/__init__.py
|
simonzabrocki/Anticipe
|
ad0e0aa39217a7e38ed10e1b3eb5be8e47d0e965
|
[
"MIT"
] | null | null | null |
data/indicator/ME1/__init__.py
|
simonzabrocki/Anticipe
|
ad0e0aa39217a7e38ed10e1b3eb5be8e47d0e965
|
[
"MIT"
] | 1
|
2022-01-27T07:44:44.000Z
|
2022-01-27T07:44:44.000Z
|
data/indicator/ME1/__init__.py
|
simonzabrocki/Anticipe
|
ad0e0aa39217a7e38ed10e1b3eb5be8e47d0e965
|
[
"MIT"
] | null | null | null |
from data.indicator.ME1.preprocess import config
| 24.5
| 48
| 0.857143
| 7
| 49
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 0.081633
| 49
| 1
| 49
| 49
| 0.911111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0a0ed16fc973b3a217def6a264f87a4655c8119c
| 22
|
py
|
Python
|
tests/__init__.py
|
jbool24/CashWEB
|
da0f71956e95b70863bd8743372629609376f30b
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
jbool24/CashWEB
|
da0f71956e95b70863bd8743372629609376f30b
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
jbool24/CashWEB
|
da0f71956e95b70863bd8743372629609376f30b
|
[
"MIT"
] | null | null | null |
# ./tests/__init__.py
| 11
| 21
| 0.681818
| 3
| 22
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 22
| 1
| 22
| 22
| 0.55
| 0.863636
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0a16aefe0222857f03fe6a72857fd0b7db5ff5ae
| 375
|
py
|
Python
|
seq_graph_retro/layers/__init__.py
|
vsomnath/graphretro
|
2a88730da980be59541780e790cdb0fbdc581828
|
[
"MIT"
] | 5
|
2022-02-21T08:26:15.000Z
|
2022-03-31T12:05:47.000Z
|
seq_graph_retro/layers/__init__.py
|
vsomnath/graphretro
|
2a88730da980be59541780e790cdb0fbdc581828
|
[
"MIT"
] | null | null | null |
seq_graph_retro/layers/__init__.py
|
vsomnath/graphretro
|
2a88730da980be59541780e790cdb0fbdc581828
|
[
"MIT"
] | 2
|
2022-03-11T15:33:33.000Z
|
2022-03-23T06:28:07.000Z
|
from seq_graph_retro.layers.reaction import AtomAttention, PairFeat
from seq_graph_retro.layers.rnn import GRU, LSTM, MPNLayer
from seq_graph_retro.layers.graph_transformer import (SublayerConnection, MultiHeadBlock,
MultiHeadAttention, PositionwiseFeedForward)
from seq_graph_retro.layers.encoder import GraphFeatEncoder, WLNEncoder, LogitEncoder, GTransEncoder
| 62.5
| 100
| 0.850667
| 42
| 375
| 7.380952
| 0.547619
| 0.090323
| 0.154839
| 0.219355
| 0.296774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098667
| 375
| 5
| 101
| 75
| 0.91716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0a17360497754b8b07462e61d7a4f08e8cf6fa1a
| 265
|
py
|
Python
|
Chapter 05/Chap05_Example5.6.py
|
bpbpublications/Programming-Techniques-using-Python
|
49b785f37e95a3aad1d36cef51e219ac56e5e9f0
|
[
"MIT"
] | null | null | null |
Chapter 05/Chap05_Example5.6.py
|
bpbpublications/Programming-Techniques-using-Python
|
49b785f37e95a3aad1d36cef51e219ac56e5e9f0
|
[
"MIT"
] | null | null | null |
Chapter 05/Chap05_Example5.6.py
|
bpbpublications/Programming-Techniques-using-Python
|
49b785f37e95a3aad1d36cef51e219ac56e5e9f0
|
[
"MIT"
] | null | null | null |
import time
from imp import reload
import safetyprecautions
print("I am inside testmodule")
print("I am sleeping for 40 secs")
print("--------------------------")
time.sleep(40)
reload(safetyprecautions)
print("This is displayed after updation of module")
| 26.5
| 52
| 0.683019
| 34
| 265
| 5.323529
| 0.676471
| 0.243094
| 0.088398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 0.139623
| 265
| 9
| 53
| 29.444444
| 0.776316
| 0
| 0
| 0
| 0
| 0
| 0.449219
| 0.101563
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.444444
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
0a1bbd032b6538f787eda58f6052875670170207
| 39
|
py
|
Python
|
wacy/apps/__init__.py
|
thewaverguy/wacy
|
4a2ad247fad3b4af281392053d4ac50a67550a01
|
[
"Apache-2.0"
] | 5
|
2021-03-11T17:41:10.000Z
|
2021-03-23T09:36:27.000Z
|
wacy/apps/__init__.py
|
thewaverguy/wacy
|
4a2ad247fad3b4af281392053d4ac50a67550a01
|
[
"Apache-2.0"
] | null | null | null |
wacy/apps/__init__.py
|
thewaverguy/wacy
|
4a2ad247fad3b4af281392053d4ac50a67550a01
|
[
"Apache-2.0"
] | null | null | null |
from wacy.apps.base.app import BaseApp
| 19.5
| 38
| 0.820513
| 7
| 39
| 4.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6a71722bbf8844642b0863038e7859984a07f1cb
| 175
|
py
|
Python
|
media_management_api/media_service/apps.py
|
Harvard-ATG/media_management_api
|
6ccc53c53def64f1976f6b21fd95abd68332a5b7
|
[
"BSD-3-Clause"
] | 1
|
2017-09-25T19:55:49.000Z
|
2017-09-25T19:55:49.000Z
|
media_management_api/media_service/apps.py
|
Harvard-ATG/media_management_api
|
6ccc53c53def64f1976f6b21fd95abd68332a5b7
|
[
"BSD-3-Clause"
] | 32
|
2015-12-09T20:31:19.000Z
|
2022-03-11T23:33:50.000Z
|
media_management_api/media_service/apps.py
|
Harvard-ATG/media_management_api
|
6ccc53c53def64f1976f6b21fd95abd68332a5b7
|
[
"BSD-3-Clause"
] | 1
|
2020-12-10T16:52:56.000Z
|
2020-12-10T16:52:56.000Z
|
from django.apps import AppConfig
class MediaServiceConfig(AppConfig):
name = 'media_management_api.media_service'
verbose_name = 'media_management_api.media_service'
| 35
| 55
| 0.817143
| 21
| 175
| 6.47619
| 0.619048
| 0.132353
| 0.279412
| 0.323529
| 0.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 175
| 5
| 55
| 35
| 0.877419
| 0
| 0
| 0
| 0
| 0
| 0.386364
| 0.386364
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
6ad89cbc01cd9317285f504f55adb9e0c2279392
| 357
|
py
|
Python
|
nnet/activation_func/_base.py
|
zhaoyan1117/NeuralNet
|
a0343dd469e981bf9b4f18db0209ca9bfaf58c4f
|
[
"BSD-2-Clause"
] | null | null | null |
nnet/activation_func/_base.py
|
zhaoyan1117/NeuralNet
|
a0343dd469e981bf9b4f18db0209ca9bfaf58c4f
|
[
"BSD-2-Clause"
] | null | null | null |
nnet/activation_func/_base.py
|
zhaoyan1117/NeuralNet
|
a0343dd469e981bf9b4f18db0209ca9bfaf58c4f
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
class ActivationFuncBase(object):
__metaclass__ = ABCMeta
@abstractmethod
def apply(self, z):
pass
@abstractmethod
def apply_scalar(self, s):
pass
@abstractmethod
def mult_with_derivative(self, target, activated_z):
pass
| 18.789474
| 56
| 0.697479
| 38
| 357
| 6.210526
| 0.605263
| 0.216102
| 0.186441
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.243697
| 357
| 18
| 57
| 19.833333
| 0.874074
| 0
| 0
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0.230769
| 0.153846
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
6ae2bfe9690305fe8239169ce4e1cac0913037a7
| 3,215
|
py
|
Python
|
testaskjunoace.py
|
jpvelsamy/hotdog
|
df45cdc0b9e6abfecd16a43f75f1671e51cbc47c
|
[
"Apache-2.0"
] | null | null | null |
testaskjunoace.py
|
jpvelsamy/hotdog
|
df45cdc0b9e6abfecd16a43f75f1671e51cbc47c
|
[
"Apache-2.0"
] | null | null | null |
testaskjunoace.py
|
jpvelsamy/hotdog
|
df45cdc0b9e6abfecd16a43f75f1671e51cbc47c
|
[
"Apache-2.0"
] | null | null | null |
import logging
import numpy as np
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers
logger = logging.getLogger("ACE")
class TestAskJunoACE:
def __init__(self):
self.k_fold_count = 4
self.num_epochs = 500
self.all_mae_histories = []
def fit_1(self, file_name):
names = ["reach", "impressions", "results", "amount", "frequency", "clicks", "cpc", "ctr", "cpreach", "cpm",
"engagement", "cpr"]
data = pd.read_csv(file_name, engine='c', dtype='float64', names=names, header=0, skiprows=0)
mean = data.mean(axis=0)
data -= mean
std = data.std(axis=0)
data /= std
x = data.iloc[:, 0:10]
y = data.iloc[:, -1]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
model = keras.Sequential([
layers.Dense(64, activation="relu", input_shape=(x_train.shape[1],)),
layers.Dense(64, activation="relu"),
layers.Dense(1)
])
model.compile(optimizer="rmsprop", loss="mse", metrics=["mae"])
model.fit(x_train, y_train, epochs=130, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(x_test, y_test)
logger.info(f'mse score #{test_mse_score}, mae score #{test_mae_score}')
#https://stackoverflow.com/questions/40729162/merging-results-from-model-predict-with-original-pandas-dataframe
y_hats = model.predict(x_test)
y_test['preds'] = y_hats
df_out = pd.merge(data, y_test[['preds']], how='left', left_index=True, right_index=True)
df_out.to_csv('/home/jpvel/Desktop/outcome.csv', float_format='%.2f')
def fit_2(self, file_name):
names = ["reach", "impressions", "results", "amount", "frequency", "clicks", "cpc", "ctr", "cpreach", "cpm",
"engagement", "cpr"]
data = pd.read_csv(file_name, engine='c', dtype='float64', names=names, header=0, skiprows=0)
mean = data.mean(axis=0)
data -= mean
std = data.std(axis=0)
data /= std
x = data.iloc[:, 0:10]
y = data.iloc[:, -1]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
model = keras.Sequential([
layers.Dense(64, activation="relu", input_shape=(x_train.shape[1],)),
layers.Dense(64, activation="relu"),
layers.Dense(1)
])
model.compile(optimizer="rmsprop", loss="mse", metrics=["mae"])
model.fit(x_train, y_train, epochs=130, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(x_test, y_test)
logger.info(f'mse score #{test_mse_score}, mae score #{test_mae_score}')
#https://stackoverflow.com/questions/40729162/merging-results-from-model-predict-with-original-pandas-dataframe
outcome = model.predict(x_test)
y_test['preds'] = outcome
df_out = pd.merge(data, y_test, how='left', left_index=True, right_index=True)
logger.info(df_out.head(10))
df_out.to_csv('/home/jpvel/Desktop/outcome2.csv', float_format='%.2f')
| 44.652778
| 119
| 0.625505
| 446
| 3,215
| 4.318386
| 0.275785
| 0.025961
| 0.018692
| 0.047767
| 0.805815
| 0.805815
| 0.805815
| 0.728972
| 0.693666
| 0.693666
| 0
| 0.030788
| 0.222084
| 3,215
| 72
| 120
| 44.652778
| 0.739304
| 0.068429
| 0
| 0.612903
| 0
| 0
| 0.137943
| 0.021042
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.112903
| 0
| 0.177419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6af068ae69bc92b753b8d82591ccaed2c902f428
| 5,782
|
py
|
Python
|
tools/perf/page_sets/media_cns_cases.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777
|
2017-08-29T15:15:32.000Z
|
2022-03-21T05:29:41.000Z
|
tools/perf/page_sets/media_cns_cases.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66
|
2017-08-30T18:31:18.000Z
|
2021-08-02T10:59:35.000Z
|
tools/perf/page_sets/media_cns_cases.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123
|
2017-08-30T01:19:34.000Z
|
2022-03-17T22:55:31.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class BasicPlayPage(page_module.Page):
def __init__(self, url, page_set, name=''):
super(BasicPlayPage, self).__init__(url=url, page_set=page_set, name=name)
self.add_browser_metrics = True
def PlayAction(self, action_runner):
action_runner.PlayMedia(playing_event_timeout_in_seconds=60,
ended_event_timeout_in_seconds=60)
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
def SeekBeforeAndAfterPlayhead(self, action_runner):
action_runner.PlayMedia(playing_event_timeout_in_seconds=60)
# Wait for 1 second so that we know the play-head is at ~1s.
action_runner.Wait(1)
# Seek to before the play-head location.
action_runner.SeekMedia(seconds=0.5, timeout_in_seconds=60,
label='seek_warm')
# Seek to after the play-head location.
action_runner.SeekMedia(seconds=15, timeout_in_seconds=60,
label='seek_cold')
class SeekBeforeAndAfterPlayheadPage(BasicPlayPage):
def __init__(self, url, page_set, name):
super(SeekBeforeAndAfterPlayheadPage, self).__init__(url=url,
page_set=page_set,
name=name)
self.add_browser_metrics = False
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class MediaCnsCasesPageSet(story.StorySet):
""" Media benchmark on network constrained conditions. """
def __init__(self):
super(MediaCnsCasesPageSet, self).__init__()
urls_list = [
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_webm&src=tulip2.webm&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_webm&src=tulip2.webm&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_webm&src=tulip2.webm&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_ogv&src=tulip2.ogv&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_ogv&src=tulip2.ogv&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_ogv&src=tulip2.ogv&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_mp4&src=tulip2.mp4&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_mp4&src=tulip2.mp4&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_mp4&src=tulip2.mp4&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_wav&src=tulip2.wav&type=audio&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_wav&src=tulip2.wav&type=audio&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_wav&src=tulip2.wav&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_ogg&src=tulip2.ogg&type=audio&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_ogg&src=tulip2.ogg&type=audio&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_ogg&src=tulip2.ogg&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_mp3&src=tulip2.mp3&type=audio&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_mp3&src=tulip2.mp3&type=audio&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_mp3&src=tulip2.mp3&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=no_constraints_m4a&src=tulip2.m4a&type=audio&net=none',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=cable_m4a&src=tulip2.m4a&type=audio&net=cable',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_m4a&src=tulip2.m4a&type=audio&net=wifi'
]
for url in urls_list:
self.AddStory(BasicPlayPage(url, self))
urls_list2 = [
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_mp3&src=tulip2.mp3&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_m4a&src=tulip2.m4a&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_ogg&src=tulip2.ogg&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_wav&src=tulip2.wav&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_mp4&src=tulip2.mp4&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_ogv&src=tulip2.ogv&type=audio&net=wifi',
# pylint: disable=line-too-long
'file://tough_video_cases/video.html?id=wifi_webm&src=tulip2.webm&type=audio&net=wifi'
]
for url in urls_list2:
if url in urls_list:
name = 'seek_' + url
else:
name = ''
self.AddStory(SeekBeforeAndAfterPlayheadPage(url, self, name=name))
| 47.393443
| 101
| 0.699066
| 835
| 5,782
| 4.646707
| 0.148503
| 0.093814
| 0.12268
| 0.14433
| 0.794588
| 0.764433
| 0.728866
| 0.697938
| 0.647938
| 0.647938
| 0
| 0.015589
| 0.167935
| 5,782
| 121
| 102
| 47.785124
| 0.790896
| 0.204773
| 0
| 0.115942
| 0
| 0.405797
| 0.50724
| 0.502194
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101449
| false
| 0
| 0.028986
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0a81eb32e4c38257c06b81ad370744bfa1131769
| 29
|
py
|
Python
|
k8s-deploy-external-dns/provider/__init__.py
|
Filippo125/k8s-deploy-external-dns
|
b83c74c7e00c9566088a232ce0c5da98b23f8198
|
[
"MIT"
] | null | null | null |
k8s-deploy-external-dns/provider/__init__.py
|
Filippo125/k8s-deploy-external-dns
|
b83c74c7e00c9566088a232ce0c5da98b23f8198
|
[
"MIT"
] | null | null | null |
k8s-deploy-external-dns/provider/__init__.py
|
Filippo125/k8s-deploy-external-dns
|
b83c74c7e00c9566088a232ce0c5da98b23f8198
|
[
"MIT"
] | null | null | null |
from .povh import OVHProvider
| 29
| 29
| 0.862069
| 4
| 29
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0ab464c6834affd58b59373c6b0f9bc1ca82876d
| 95
|
py
|
Python
|
test29.py
|
cherytony/test1
|
506ce4cab6f641beff817c81d7a616db29a7131d
|
[
"Apache-2.0"
] | null | null | null |
test29.py
|
cherytony/test1
|
506ce4cab6f641beff817c81d7a616db29a7131d
|
[
"Apache-2.0"
] | null | null | null |
test29.py
|
cherytony/test1
|
506ce4cab6f641beff817c81d7a616db29a7131d
|
[
"Apache-2.0"
] | null | null | null |
print("Hello")
import sys
print(sys.version)
import tensorflow as tf
print(tf.__version__)
| 9.5
| 23
| 0.757895
| 14
| 95
| 4.857143
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136842
| 95
| 9
| 24
| 10.555556
| 0.829268
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0.6
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
0adc929657b93d6b766c909d3c2ba6f9e165ded8
| 39
|
py
|
Python
|
Src/Hosts/Silverlight/Tests/tests/manual/test_s_clock_rb/verification.py
|
jdhardy/dlr
|
dca078fbf9d103fad4dcabda76795a23d82106bc
|
[
"Apache-2.0"
] | null | null | null |
Src/Hosts/Silverlight/Tests/tests/manual/test_s_clock_rb/verification.py
|
jdhardy/dlr
|
dca078fbf9d103fad4dcabda76795a23d82106bc
|
[
"Apache-2.0"
] | null | null | null |
Src/Hosts/Silverlight/Tests/tests/manual/test_s_clock_rb/verification.py
|
jdhardy/dlr
|
dca078fbf9d103fad4dcabda76795a23d82106bc
|
[
"Apache-2.0"
] | null | null | null |
from SL_util import *
PositiveTest(0)
| 9.75
| 21
| 0.769231
| 6
| 39
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.153846
| 39
| 3
| 22
| 13
| 0.848485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0af4867553aa1f35b075b7edaaabaf94da3023a3
| 129
|
py
|
Python
|
gdeltdoc/__init__.py
|
Man-who-sold-the-world/gdelt-doc-api
|
9e2a5922aba7a56718fc6886e926e351e73597b4
|
[
"MIT"
] | null | null | null |
gdeltdoc/__init__.py
|
Man-who-sold-the-world/gdelt-doc-api
|
9e2a5922aba7a56718fc6886e926e351e73597b4
|
[
"MIT"
] | null | null | null |
gdeltdoc/__init__.py
|
Man-who-sold-the-world/gdelt-doc-api
|
9e2a5922aba7a56718fc6886e926e351e73597b4
|
[
"MIT"
] | null | null | null |
from gdeltdoc.api_client import GdeltDoc
from gdeltdoc.filters import Filters, near, repeat, multi_repeat
__version__ = "1.3.0"
| 25.8
| 64
| 0.806202
| 19
| 129
| 5.157895
| 0.684211
| 0.244898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.116279
| 129
| 4
| 65
| 32.25
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.03876
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0afd7d04feca8eba88caab05f3b761663f018969
| 72
|
py
|
Python
|
server/jbei/rest/clients/ice/__init__.py
|
zhwycsz/edd
|
bdc1d2f8b5e375d3a1254829b9d2b460dd09ca12
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2020-04-07T03:14:52.000Z
|
2020-04-07T03:14:52.000Z
|
server/jbei/rest/clients/ice/__init__.py
|
zhwycsz/edd
|
bdc1d2f8b5e375d3a1254829b9d2b460dd09ca12
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
server/jbei/rest/clients/ice/__init__.py
|
zhwycsz/edd
|
bdc1d2f8b5e375d3a1254829b9d2b460dd09ca12
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
# coding: utf-8
from .api import IceApi, IceApiException # noqa: F401
| 18
| 54
| 0.722222
| 10
| 72
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 0.180556
| 72
| 3
| 55
| 24
| 0.813559
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7c8f76616e97f4b8db1394308640f991177e14b2
| 80
|
py
|
Python
|
PythonCurso01/aula121_docstrings/exemplo01/uma_linha.py
|
AlissonAnjos21/Aprendendo
|
9454d9e53ef9fb8bc61bf481b6592164f5bf8695
|
[
"MIT"
] | null | null | null |
PythonCurso01/aula121_docstrings/exemplo01/uma_linha.py
|
AlissonAnjos21/Aprendendo
|
9454d9e53ef9fb8bc61bf481b6592164f5bf8695
|
[
"MIT"
] | null | null | null |
PythonCurso01/aula121_docstrings/exemplo01/uma_linha.py
|
AlissonAnjos21/Aprendendo
|
9454d9e53ef9fb8bc61bf481b6592164f5bf8695
|
[
"MIT"
] | null | null | null |
"""Uma linha de documentação"""
variavel = 'valor'
def funcao():
return 1
| 11.428571
| 31
| 0.6375
| 10
| 80
| 5.1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 0.2125
| 80
| 6
| 32
| 13.333333
| 0.793651
| 0.3125
| 0
| 0
| 0
| 0
| 0.102041
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
7c97aa6049b7ef8ab182c4473c5de87e84395ffa
| 126
|
py
|
Python
|
Own/Python/Tutorials/Animals/Cat.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
Own/Python/Tutorials/Animals/Cat.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
Own/Python/Tutorials/Animals/Cat.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
#Cristian Chitiva
#cychitvav@unal.educo
#16/Sept/2018
class Cat:
def __init__(self, name):
self.name = name
| 18
| 30
| 0.650794
| 17
| 126
| 4.588235
| 0.823529
| 0.205128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.238095
| 126
| 7
| 31
| 18
| 0.75
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
7caa8f1c5bbe37f5b9b87c8b7b87e1dbfc9fc3f1
| 10,018
|
py
|
Python
|
opensfm/test/test_stats.py
|
ricklentz/OpenSfM
|
b44b5f2b533b6fce8055b3a5a98a59bc22ae2cf6
|
[
"BSD-2-Clause"
] | 2,535
|
2015-01-04T17:59:20.000Z
|
2022-03-31T06:12:43.000Z
|
opensfm/test/test_stats.py
|
ricklentz/OpenSfM
|
b44b5f2b533b6fce8055b3a5a98a59bc22ae2cf6
|
[
"BSD-2-Clause"
] | 752
|
2015-01-11T22:15:20.000Z
|
2022-03-31T15:23:47.000Z
|
opensfm/test/test_stats.py
|
ricklentz/OpenSfM
|
b44b5f2b533b6fce8055b3a5a98a59bc22ae2cf6
|
[
"BSD-2-Clause"
] | 780
|
2015-01-15T15:06:00.000Z
|
2022-03-26T20:47:26.000Z
|
from opensfm import stats, types
from opensfm.synthetic_data import synthetic_dataset, synthetic_scene
def test_processing_statistics_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
dataset = synthetic_dataset.SyntheticDataSet(
reference,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
processing_statistics = stats.processing_statistics(dataset, [reference])
assert list(processing_statistics.keys()) == ["steps_times", "date", "area"]
assert processing_statistics["steps_times"] == {
"Feature Extraction": -1,
"Features Matching": -1,
"Tracks Merging": -1,
"Reconstruction": -1,
"Total Time": 0,
}
assert processing_statistics["date"] == "unknown"
assert 3500 < processing_statistics["area"] < 3600
def test_processing_statistics_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
dataset = synthetic_dataset.SyntheticDataSet(
null_scene,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
processing_statistics = stats.processing_statistics(dataset, [null_scene])
assert list(processing_statistics.keys()) == ["steps_times", "date", "area"]
assert processing_statistics["steps_times"] == {
"Feature Extraction": -1,
"Features Matching": -1,
"Tracks Merging": -1,
"Reconstruction": -1,
"Total Time": 0,
}
assert processing_statistics["date"] == "unknown"
assert processing_statistics["area"] == -1
def test_features_statistics_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
dataset = synthetic_dataset.SyntheticDataSet(
reference,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
features_statistics = stats.features_statistics(
dataset, scene_synthetic.tracks_manager, [reference]
)
assert list(features_statistics.keys()) == [
"detected_features",
"reconstructed_features",
]
assert (
features_statistics["detected_features"]
== features_statistics["reconstructed_features"]
)
assert features_statistics["reconstructed_features"] == {
"min": 303,
"max": 1065,
"mean": 841,
"median": 884,
}
def test_features_statistics_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
dataset = synthetic_dataset.SyntheticDataSet(
null_scene,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
features_statistics = stats.features_statistics(
dataset, scene_synthetic.tracks_manager, [null_scene]
)
assert list(features_statistics.keys()) == [
"detected_features",
"reconstructed_features",
]
assert (
features_statistics["detected_features"]
== features_statistics["reconstructed_features"]
)
assert features_statistics["reconstructed_features"] == {
"min": -1,
"max": -1,
"mean": -1,
"median": -1,
}
def test_reconstruction_statistics_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
dataset = synthetic_dataset.SyntheticDataSet(
reference,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
reconstruction_statistics = stats.reconstruction_statistics(
dataset, scene_synthetic.tracks_manager, [reference]
)
assert reconstruction_statistics["components"] == 1
assert not reconstruction_statistics["has_gps"]
assert not reconstruction_statistics["has_gcp"]
assert 4900 < reconstruction_statistics["initial_points_count"] < 5000
assert reconstruction_statistics["initial_shots_count"] == 20
assert 4900 < reconstruction_statistics["reconstructed_points_count"] < 5000
assert reconstruction_statistics["reconstructed_shots_count"] == 20
assert 16800 < reconstruction_statistics["observations_count"] < 16900
assert 3.3 < reconstruction_statistics["average_track_length"] < 3.4
assert 3.4 < reconstruction_statistics["average_track_length_over_two"] < 3.5
assert len(reconstruction_statistics["histogram_track_length"]) == 5
assert 0.15 < reconstruction_statistics["reprojection_error_normalized"] < 0.16
assert 1.25 < reconstruction_statistics["reprojection_error_pixels"] < 1.28
assert len(reconstruction_statistics["reprojection_histogram_normalized"][0]) == 30
assert len(reconstruction_statistics["reprojection_histogram_normalized"][1]) == 31
assert len(reconstruction_statistics["reprojection_histogram_pixels"][0]) == 30
assert len(reconstruction_statistics["reprojection_histogram_pixels"][1]) == 31
def test_reconstruction_statistics_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
dataset = synthetic_dataset.SyntheticDataSet(
null_scene,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
reconstruction_statistics = stats.reconstruction_statistics(
dataset, scene_synthetic.tracks_manager, [null_scene]
)
assert reconstruction_statistics["components"] == 1
assert not reconstruction_statistics["has_gps"]
assert not reconstruction_statistics["has_gcp"]
assert 4900 < reconstruction_statistics["initial_points_count"] < 5000
assert reconstruction_statistics["initial_shots_count"] == 0
assert reconstruction_statistics["reconstructed_points_count"] == 0
assert reconstruction_statistics["reconstructed_shots_count"] == 0
assert reconstruction_statistics["observations_count"] == 0
assert reconstruction_statistics["average_track_length"] == -1
assert reconstruction_statistics["average_track_length_over_two"] == -1
assert len(reconstruction_statistics["histogram_track_length"]) == 0
assert reconstruction_statistics["reprojection_error_normalized"] == -1.0
assert reconstruction_statistics["reprojection_error_pixels"] == -1.0
assert len(reconstruction_statistics["reprojection_histogram_normalized"][0]) == 0
assert len(reconstruction_statistics["reprojection_histogram_normalized"][1]) == 0
assert len(reconstruction_statistics["reprojection_histogram_pixels"][0]) == 0
assert len(reconstruction_statistics["reprojection_histogram_pixels"][1]) == 0
def test_cameras_statistics_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
dataset = synthetic_dataset.SyntheticDataSet(
reference,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
cameras_statistics = stats.cameras_statistics(dataset, [reference])
assert cameras_statistics == {
"1": {
"initial_values": {"k1": -0.1, "k2": 0.01, "focal": 0.7},
"optimized_values": {"k1": -0.1, "k2": 0.01, "focal": 0.7},
"bias": {
"rotation": [-0.0, -0.0, -0.0],
"scale": 1.0,
"translation": [0.0, 0.0, 0.0],
},
}
}
def test_cameras_statistics_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
dataset = synthetic_dataset.SyntheticDataSet(
null_scene,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
cameras_statistics = stats.cameras_statistics(dataset, [null_scene])
assert cameras_statistics == {}
def test_rig_statistics_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
dataset = synthetic_dataset.SyntheticDataSet(
reference,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
rig_statistics = stats.rig_statistics(dataset, [reference])
assert rig_statistics == {}
def test_rig_statistics_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
dataset = synthetic_dataset.SyntheticDataSet(
null_scene,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
cameras_statistics = stats.rig_statistics(dataset, [null_scene])
assert cameras_statistics == {}
def test_gps_errors_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
gps_errors = stats.gps_errors([reference])
assert gps_errors == {}
def test_gps_errors_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
gps_errors = stats.gps_errors([null_scene])
assert gps_errors == {}
def test_gcp_errors_normal(
scene_synthetic: synthetic_scene.SyntheticInputData,
):
reference = scene_synthetic.reconstruction
dataset = synthetic_dataset.SyntheticDataSet(
reference,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
gcp_errors = stats.gcp_errors(dataset, [reference])
assert gcp_errors == {}
def test_gcp_errors_null(
scene_synthetic: synthetic_scene.SyntheticInputData,
null_scene: types.Reconstruction,
):
dataset = synthetic_dataset.SyntheticDataSet(
null_scene,
scene_synthetic.exifs,
scene_synthetic.features,
scene_synthetic.tracks_manager,
)
gcp_errors = stats.gcp_errors(dataset, [null_scene])
assert gcp_errors == {}
| 33.61745
| 87
| 0.712418
| 987
| 10,018
| 6.89463
| 0.101317
| 0.125496
| 0.047024
| 0.063483
| 0.911536
| 0.865393
| 0.797208
| 0.76723
| 0.689199
| 0.672594
| 0
| 0.019904
| 0.192553
| 10,018
| 297
| 88
| 33.73064
| 0.821362
| 0
| 0
| 0.611111
| 0
| 0
| 0.130465
| 0.069076
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.055556
| false
| 0
| 0.007937
| 0
| 0.063492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7cbaa967a7579192571a1e2bd188aed5f9e90329
| 1,754
|
py
|
Python
|
src/wheezy/http/tests/test_method.py
|
akornatskyy/wheezy.http
|
85f89fc5492e09f2049b8d7a25b0d2a09387060f
|
[
"MIT"
] | null | null | null |
src/wheezy/http/tests/test_method.py
|
akornatskyy/wheezy.http
|
85f89fc5492e09f2049b8d7a25b0d2a09387060f
|
[
"MIT"
] | 29
|
2020-07-18T04:32:17.000Z
|
2021-07-06T09:42:16.000Z
|
src/wheezy/http/tests/test_method.py
|
akornatskyy/wheezy.http
|
85f89fc5492e09f2049b8d7a25b0d2a09387060f
|
[
"MIT"
] | null | null | null |
""" Unit tests for ``wheezy.http.method``.
"""
import unittest
from unittest.mock import Mock
from wheezy.http.method import accept_method
from wheezy.http.response import HTTPResponse
class AcceptMethodTestCase(unittest.TestCase):
"""Test the ``accept_method`` decorator."""
def test_exact_strategy(self):
"""A single HTTP method constraint check."""
mock_request = Mock()
mock_handler = Mock(return_value=HTTPResponse())
for method in ["GET", "HEAD", "POST", "PUT"]:
mock_request.reset_mock()
mock_request.method = method
handler = accept_method(method)(mock_handler)
response = handler(mock_request)
assert 200 == response.status_code
for method in ["HEAD", "POST", "PUT"]:
mock_request.reset_mock()
mock_request.method = method
handler = accept_method("GET")(mock_handler)
response = handler(mock_request)
assert 405 == response.status_code
def test_one_of_strategy(self):
"""Multiple HTTP methods constraint check."""
mock_request = Mock()
mock_handler = Mock(return_value=HTTPResponse())
for method in ["GET", "HEAD"]:
mock_request.reset_mock()
mock_request.method = method
handler = accept_method(("GET", "HEAD"))(mock_handler)
response = handler(mock_request)
assert 200 == response.status_code
for method in ["POST", "PUT"]:
mock_request.reset_mock()
mock_request.method = method
handler = accept_method(("GET", "HEAD"))(mock_handler)
response = handler(mock_request)
assert 405 == response.status_code
| 34.392157
| 66
| 0.619156
| 192
| 1,754
| 5.442708
| 0.239583
| 0.147368
| 0.042105
| 0.076555
| 0.708134
| 0.708134
| 0.708134
| 0.708134
| 0.708134
| 0.708134
| 0
| 0.00939
| 0.27138
| 1,754
| 50
| 67
| 35.08
| 0.808294
| 0.088369
| 0
| 0.628571
| 0
| 0
| 0.035533
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 1
| 0.057143
| false
| 0
| 0.114286
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6b138e8ad017f0979c52c6c5e1b7da79cf726a9b
| 56
|
py
|
Python
|
api/lastfm.py
|
imsuwj/noambox
|
99404b773c16ee30fc3593b64cd91a8ff9aeaedf
|
[
"MIT"
] | 3
|
2015-10-20T08:41:01.000Z
|
2017-08-08T17:45:59.000Z
|
api/lastfm.py
|
imsuwj/noambox
|
99404b773c16ee30fc3593b64cd91a8ff9aeaedf
|
[
"MIT"
] | null | null | null |
api/lastfm.py
|
imsuwj/noambox
|
99404b773c16ee30fc3593b64cd91a8ff9aeaedf
|
[
"MIT"
] | null | null | null |
import requests
from handler.config import Config, data
| 18.666667
| 39
| 0.839286
| 8
| 56
| 5.875
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 56
| 2
| 40
| 28
| 0.959184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
860b4025d89e0eb085eea3f0f81608b2dd2fd0cd
| 5,601
|
py
|
Python
|
python/paddle/sparse/functional/unary.py
|
Lieberk/Paddle
|
2eacef496854b6e8e3b06daaf1c83478c575fbb3
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/sparse/functional/unary.py
|
Lieberk/Paddle
|
2eacef496854b6e8e3b06daaf1c83478c575fbb3
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/sparse/functional/unary.py
|
Lieberk/Paddle
|
2eacef496854b6e8e3b06daaf1c83478c575fbb3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = []
from paddle import _C_ops, in_dynamic_mode
def relu(x, name=None):
"""
sparse relu activation, requiring x to be a sparse coo or sparse csr tensor.
.. math::
out = max(x, 0)
Parameters:
x (Tensor): The input Sparse Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Sparse Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = paddle.to_tensor([-2, 0, 1], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.sparse.functional.relu(sparse_x)
"""
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
if x.is_sparse_coo():
return _C_ops.final_state_sparse_coo_relu(x)
elif x.is_sparse_csr():
return _C_ops.final_state_sparse_csr_relu(x)
else:
raise ValueError(
"Currently, sparse.relu only support the input of SparseCooTensor or SparseCsrTensor"
)
def tanh(x, name=None):
"""
sparse tanh activation, requiring x to be a sparse coo or sparse csr tensor.
.. math::
out = tanh(x)
Parameters:
x (Tensor): The input Sparse Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Sparse Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = paddle.to_tensor([-2, 0, 1], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.sparse.tanh(sparse_x)
"""
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
if x.is_sparse_coo():
return _C_ops.final_state_sparse_coo_tanh(x)
elif x.is_sparse_csr():
return _C_ops.final_state_sparse_csr_tanh(x)
else:
raise ValueError(
"Currently, sparse.tanh only support the input of SparseCooTensor or SparseCsrTensor"
)
def sqrt(x, name=None):
"""
Calculate square root of x, requiring x to be a sparse coo or sparse csr tensor.
.. math::
out = sqrt(x)
Parameters:
x (Tensor): The input Sparse Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Sparse Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = paddle.to_tensor([4, 0, 1], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.sparse.sqrt(sparse_x)
"""
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
if x.is_sparse_coo():
return _C_ops.final_state_sparse_coo_sqrt(x)
elif x.is_sparse_csr():
return _C_ops.final_state_sparse_csr_sqrt(x)
else:
raise ValueError(
"Currently, sparse.sqrt only support the input of SparseCooTensor or SparseCsrTensor"
)
def sin(x, name=None):
"""
Calculate sin of x, requiring x to be a sparse coo or sparse csr tensor.
.. math::
out = sin(x)
Parameters:
x (Tensor): The input Sparse Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Sparse Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = paddle.to_tensor([-2, 0, 3], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.sparse.sin(sparse_x)
"""
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
if x.is_sparse_coo():
return _C_ops.final_state_sparse_coo_sin(x)
elif x.is_sparse_csr():
return _C_ops.final_state_sparse_csr_sin(x)
else:
raise ValueError(
"Currently, sparse.sin only support the input of SparseCooTensor or SparseCsrTensor"
)
| 31.466292
| 97
| 0.638993
| 764
| 5,601
| 4.501309
| 0.187173
| 0.041873
| 0.03722
| 0.034894
| 0.797906
| 0.797906
| 0.757197
| 0.757197
| 0.741785
| 0.692934
| 0
| 0.012102
| 0.277093
| 5,601
| 177
| 98
| 31.644068
| 0.837244
| 0.631494
| 0
| 0.47619
| 0
| 0
| 0.299135
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.095238
| false
| 0
| 0.02381
| 0
| 0.309524
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
86664b0dd9dac91d6fa00cf899f41b6b1858e394
| 3,930
|
py
|
Python
|
tools/perf/core/results_dashboard_unittest.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575
|
2015-06-18T23:58:20.000Z
|
2022-03-23T09:32:39.000Z
|
tools/perf/core/results_dashboard_unittest.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
tools/perf/core/results_dashboard_unittest.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52
|
2015-07-14T10:40:50.000Z
|
2022-03-15T01:11:49.000Z
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
from mock import call
from core import results_dashboard
class ResultsDashboardTest(unittest.TestCase):
def setUp(self):
self.dummy_token_generator = lambda service_file, timeout: 'Arthur-Merlin'
self.perf_data = {'foo': 1, 'bar': 2}
self.dashboard_url = 'https://chromeperf.appspot.com'
def testRetryForSendResultRetryException(self):
def raise_retry_exception(
url, histogramset_json, token_generator_callback):
del url, histogramset_json # unused
del token_generator_callback # unused
raise results_dashboard.SendResultsRetryException('Should retry')
with mock.patch('core.results_dashboard.time.sleep') as sleep_mock:
with mock.patch('core.results_dashboard._SendHistogramJson',
side_effect=raise_retry_exception) as m:
upload_result = results_dashboard.SendResults(
self.perf_data, 'dummy_benchmark',
self.dashboard_url, send_as_histograms=True,
token_generator_callback=self.dummy_token_generator, num_retries=5)
self.assertFalse(upload_result)
self.assertEqual(m.call_count, 5)
self.assertEqual(
sleep_mock.mock_calls,
[call(15), call(30), call(60), call(120), call(240)])
def testNoRetryForSendResultFatalException(self):
def raise_retry_exception(
url, histogramset_json, token_generator_callback):
del url, histogramset_json # unused
del token_generator_callback # unused
raise results_dashboard.SendResultsFatalException('Do not retry')
with mock.patch('core.results_dashboard.time.sleep') as sleep_mock:
with mock.patch('core.results_dashboard._SendHistogramJson',
side_effect=raise_retry_exception) as m:
upload_result = results_dashboard.SendResults(
self.perf_data, 'dummy_benchmark',
self.dashboard_url, send_as_histograms=True,
token_generator_callback=self.dummy_token_generator,
num_retries=5)
self.assertFalse(upload_result)
self.assertEqual(m.call_count, 1)
self.assertFalse(sleep_mock.mock_calls)
def testNoRetryForSuccessfulSendResult(self):
with mock.patch('core.results_dashboard.time.sleep') as sleep_mock:
with mock.patch('core.results_dashboard._SendHistogramJson') as m:
upload_result = results_dashboard.SendResults(
self.perf_data, 'dummy_benchmark',
self.dashboard_url, send_as_histograms=True,
token_generator_callback=self.dummy_token_generator,
num_retries=5)
self.assertTrue(upload_result)
self.assertEqual(m.call_count, 1)
self.assertFalse(sleep_mock.mock_calls)
def testNoRetryAfterSucessfulSendResult(self):
counter = [0]
def raise_retry_exception_first_two_times(
url, histogramset_json, token_generator_callback):
del url, histogramset_json # unused
del token_generator_callback # unused
counter[0] += 1
if counter[0] <= 2:
raise results_dashboard.SendResultsRetryException('Please retry')
with mock.patch('core.results_dashboard.time.sleep') as sleep_mock:
with mock.patch('core.results_dashboard._SendHistogramJson',
side_effect=raise_retry_exception_first_two_times) as m:
upload_result = results_dashboard.SendResults(
self.perf_data, 'dummy_benchmark',
self.dashboard_url, send_as_histograms=True,
token_generator_callback=self.dummy_token_generator,
num_retries=5)
self.assertTrue(upload_result)
self.assertEqual(m.call_count, 3)
self.assertEqual(
sleep_mock.mock_calls, [call(15), call(30)])
| 42.258065
| 79
| 0.708397
| 459
| 3,930
| 5.786492
| 0.246187
| 0.096386
| 0.082831
| 0.051205
| 0.739081
| 0.739081
| 0.722139
| 0.722139
| 0.722139
| 0.722139
| 0
| 0.011279
| 0.210433
| 3,930
| 92
| 80
| 42.717391
| 0.844666
| 0.050127
| 0
| 0.631579
| 0
| 0
| 0.118453
| 0.079506
| 0
| 0
| 0
| 0
| 0.157895
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.171053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
86a11146e13501c9c6a54abae9650546fb715a24
| 3,217
|
py
|
Python
|
projects/models.py
|
robertruhiu/bac
|
2ab6d6b3fae806f96b1322f1d092ec8ff9860b1b
|
[
"MIT"
] | null | null | null |
projects/models.py
|
robertruhiu/bac
|
2ab6d6b3fae806f96b1322f1d092ec8ff9860b1b
|
[
"MIT"
] | 4
|
2020-06-05T19:32:33.000Z
|
2021-06-10T21:01:56.000Z
|
projects/models.py
|
robertruhiu/bac
|
2ab6d6b3fae806f96b1322f1d092ec8ff9860b1b
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
# TODO: add model for category to classify all projects using project category, can be multiple ie frontend, backend
# TODO: categorise language into frontend, backend etc
class Language(models.Model):
name = models.CharField(max_length=140)
def __str__(self):
return self.name
class Framework(models.Model):
name = models.CharField(max_length=140)
language = models.ForeignKey(Language, on_delete=models.DO_NOTHING)
def __str__(self):
return self.name
class level(models.Model):
name = models.CharField(max_length=140)
language = models.ForeignKey(Language, on_delete=models.DO_NOTHING)
def __str__(self):
return self.name
class Devtype(models.Model):
name = models.CharField(max_length=140)
def __str__(self):
return self.name
class Projecttype(models.Model):
name = models.CharField(max_length=140)
def __str__(self):
return self.name
class Project(models.Model):
name = models.CharField(max_length=140)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
description = models.CharField(max_length=500, blank=True, null=True, )
level = models.CharField(max_length=200, blank=True, null=True, )
concept = models.CharField(max_length=200, blank=True, null=True, )
projectimage1 = models.CharField(max_length=200, blank=True, null=True, )
projectimage2 = models.CharField(max_length=200, blank=True, null=True, )
projectimage3 = models.CharField(max_length=200, blank=True, null=True, )
projectimage4 = models.CharField(max_length=200, blank=True, null=True, )
projectimage5 = models.CharField(max_length=200, blank=True, null=True, )
projectimage6 = models.CharField(max_length=200, blank=True, null=True, )
projectimage7 = models.CharField(max_length=200, blank=True, null=True, )
projectimage8 = models.CharField(max_length=200, blank=True, null=True, )
projectimage9 = models.CharField(max_length=200, blank=True, null=True, )
projectimage10 = models.CharField(max_length=200, blank=True, null=True, )
requirement1 = models.CharField(max_length=200, blank=True, null=True, )
requirement2 = models.CharField(max_length=200, blank=True, null=True, )
requirement3 = models.CharField(max_length=200, blank=True, null=True, )
requirement4 = models.CharField(max_length=200, blank=True, null=True, )
requirement5 = models.CharField(max_length=200, blank=True, null=True, )
requirement6 = models.CharField(max_length=200, blank=True, null=True, )
requirement7 = models.CharField(max_length=200, blank=True, null=True, )
requirement8 = models.CharField(max_length=200, blank=True, null=True, )
requirement9 = models.CharField(max_length=200, blank=True, null=True, )
requirement10 = models.CharField(max_length=200, blank=True, null=True, )
framework = models.ForeignKey(Framework, on_delete=False, null=True)
devtype = models.ForeignKey(Devtype, on_delete=False, null=True)
projecttype = models.ForeignKey(Projecttype, on_delete=False, null=True)
def __str__(self):
return self.name
| 41.779221
| 116
| 0.733914
| 420
| 3,217
| 5.47619
| 0.192857
| 0.18913
| 0.226957
| 0.302609
| 0.67913
| 0.651739
| 0.641304
| 0.641304
| 0.623043
| 0.202174
| 0
| 0.039971
| 0.152316
| 3,217
| 76
| 117
| 42.328947
| 0.803447
| 0.059683
| 0
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013158
| 0
| 1
| 0.109091
| false
| 0
| 0.036364
| 0.109091
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
86cfe20fc2558f24232c36ffbb64f15bed7ebcf2
| 120
|
py
|
Python
|
loonflow/__init__.py
|
youjiajia/loonflow
|
0542e543ffea49b2eda864397b9875b6bf107dd5
|
[
"MIT"
] | null | null | null |
loonflow/__init__.py
|
youjiajia/loonflow
|
0542e543ffea49b2eda864397b9875b6bf107dd5
|
[
"MIT"
] | null | null | null |
loonflow/__init__.py
|
youjiajia/loonflow
|
0542e543ffea49b2eda864397b9875b6bf107dd5
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
from tasks import app as celery_app
__all__ = ['celery_app']
| 20
| 56
| 0.816667
| 17
| 120
| 5.058824
| 0.647059
| 0.209302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 120
| 5
| 57
| 24
| 0.826923
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
86ef99952a5376efe78273dc70b91ad2a7ddfd5c
| 46
|
py
|
Python
|
__init__.py
|
floort/buienbadge
|
178a090970e69891b3381a8d158b06d1b611c147
|
[
"Unlicense"
] | null | null | null |
__init__.py
|
floort/buienbadge
|
178a090970e69891b3381a8d158b06d1b611c147
|
[
"Unlicense"
] | null | null | null |
__init__.py
|
floort/buienbadge
|
178a090970e69891b3381a8d158b06d1b611c147
|
[
"Unlicense"
] | null | null | null |
from buienbadge import service
service.loop()
| 15.333333
| 30
| 0.826087
| 6
| 46
| 6.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 46
| 2
| 31
| 23
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8118d32aa05d295c6dc173836bc206609862c9aa
| 271
|
py
|
Python
|
app/utils/open_api/__init__.py
|
maxzhenzhera/my_vocab_backend
|
2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1
|
[
"MIT"
] | null | null | null |
app/utils/open_api/__init__.py
|
maxzhenzhera/my_vocab_backend
|
2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1
|
[
"MIT"
] | null | null | null |
app/utils/open_api/__init__.py
|
maxzhenzhera/my_vocab_backend
|
2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1
|
[
"MIT"
] | null | null | null |
"""
headers:
Set-Cookie:
description: Session cookie
schema:
type: string
example: SESSIONID=abcde12345; Path=/
"\0Set-Cookie":
description: CSRF token
schema:
type: string
example: CSRFTOKEN=fghijk678910; Path=/; HttpOnly
"""
| 19.357143
| 55
| 0.634686
| 26
| 271
| 6.615385
| 0.692308
| 0.197674
| 0.186047
| 0.267442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059113
| 0.250923
| 271
| 13
| 56
| 20.846154
| 0.788177
| 0.9631
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d493243ba40b13e8763016daae5929b87739c50c
| 126
|
py
|
Python
|
languages/python/design_stackinspection.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | 1
|
2021-04-09T04:15:24.000Z
|
2021-04-09T04:15:24.000Z
|
languages/python/design_stackinspection.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | null | null | null |
languages/python/design_stackinspection.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | 1
|
2021-07-31T02:45:29.000Z
|
2021-07-31T02:45:29.000Z
|
import sys
def foo():
"""blah"""
print sys._getframe().f_back.f_locals[sys._getframe().f_code.co_name].__doc__
foo()
| 18
| 81
| 0.674603
| 20
| 126
| 3.75
| 0.7
| 0.293333
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134921
| 126
| 6
| 82
| 21
| 0.688073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.25
| null | null | 0.25
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4a504e8d7d216e49c2588b144a44a382e28a483
| 132
|
py
|
Python
|
python-arithmetic-operators.py
|
nabin-info/hackerrank.com
|
da66a470d2e97a093821bfe41eb233d51784b9cc
|
[
"MIT"
] | null | null | null |
python-arithmetic-operators.py
|
nabin-info/hackerrank.com
|
da66a470d2e97a093821bfe41eb233d51784b9cc
|
[
"MIT"
] | null | null | null |
python-arithmetic-operators.py
|
nabin-info/hackerrank.com
|
da66a470d2e97a093821bfe41eb233d51784b9cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
a = int(raw_input().strip())
b = int(raw_input().strip())
print (a + b)
print (a - b)
print (a * b)
| 12
| 28
| 0.590909
| 24
| 132
| 3.166667
| 0.5
| 0.236842
| 0.276316
| 0.421053
| 0.276316
| 0.276316
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 132
| 10
| 29
| 13.2
| 0.703704
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
d4ea50fda756725eb38446c5005404d8ecdcc6eb
| 362
|
py
|
Python
|
titration/utils/devices/serial_mock.py
|
kieransukachevin/AlkalinityTitrator
|
09b642ee1368278b7b4fc180bed50ff538a0938a
|
[
"MIT"
] | null | null | null |
titration/utils/devices/serial_mock.py
|
kieransukachevin/AlkalinityTitrator
|
09b642ee1368278b7b4fc180bed50ff538a0938a
|
[
"MIT"
] | 31
|
2021-06-29T17:53:56.000Z
|
2021-08-19T21:59:03.000Z
|
titration/utils/devices/serial_mock.py
|
kieransukachevin/AlkalinityTitrator
|
09b642ee1368278b7b4fc180bed50ff538a0938a
|
[
"MIT"
] | 4
|
2021-02-12T23:21:17.000Z
|
2021-11-15T16:55:38.000Z
|
class Serial:
def __init__(self, port=None, baudrate=None, timeout=None):
pass
def reset_output_buffer(self):
pass
def reset_input_buffer(self):
pass
def writable(self):
return True
def write(self, bytes):
pass
def flush(self):
pass
def readline(self):
return b"DONE\r\n"
| 16.454545
| 63
| 0.58011
| 46
| 362
| 4.391304
| 0.543478
| 0.173267
| 0.163366
| 0.168317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.328729
| 362
| 21
| 64
| 17.238095
| 0.831276
| 0
| 0
| 0.333333
| 0
| 0
| 0.022099
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.466667
| false
| 0.333333
| 0
| 0.133333
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
d4f43313db29aad0f1d413cf5527559585790e82
| 91
|
py
|
Python
|
Funcoes/exceptions.py
|
felipezago/ControleEstoque
|
229659c4f9888fd01df34375ec92af7a1f734d10
|
[
"MIT"
] | null | null | null |
Funcoes/exceptions.py
|
felipezago/ControleEstoque
|
229659c4f9888fd01df34375ec92af7a1f734d10
|
[
"MIT"
] | null | null | null |
Funcoes/exceptions.py
|
felipezago/ControleEstoque
|
229659c4f9888fd01df34375ec92af7a1f734d10
|
[
"MIT"
] | null | null | null |
from psycopg2 import OperationalError
class OperationalError(OperationalError):
pass
| 15.166667
| 41
| 0.824176
| 8
| 91
| 9.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012821
| 0.142857
| 91
| 5
| 42
| 18.2
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
be2397b5fc6d613fe0be03080ccf42f5b3d01be6
| 47
|
py
|
Python
|
Python/Topics/Creating bytes/To_bytes()/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 5
|
2020-08-29T15:15:31.000Z
|
2022-03-01T18:22:34.000Z
|
Python/Topics/Creating bytes/To_bytes()/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | null | null | null |
Python/Topics/Creating bytes/To_bytes()/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 1
|
2020-12-02T11:13:14.000Z
|
2020-12-02T11:13:14.000Z
|
print(sum(int(input()).to_bytes(2, 'little')))
| 23.5
| 46
| 0.659574
| 8
| 47
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 0.042553
| 47
| 1
| 47
| 47
| 0.644444
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
076c58ad90deb0d4925ec6e95008e65878b03f89
| 242
|
py
|
Python
|
getting_started/any.py
|
AoEiuV020/LearningPython
|
aac0f3f99cfd3d03a96a3c0e41da8f82ea0b8c70
|
[
"MIT"
] | null | null | null |
getting_started/any.py
|
AoEiuV020/LearningPython
|
aac0f3f99cfd3d03a96a3c0e41da8f82ea0b8c70
|
[
"MIT"
] | null | null | null |
getting_started/any.py
|
AoEiuV020/LearningPython
|
aac0f3f99cfd3d03a96a3c0e41da8f82ea0b8c70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
assert any(list(map(lambda x: x > 4, range(8))))
assert any(map(lambda x: x > 4, range(8)))
assert not any(list(map(lambda x: x < 0, range(8))))
assert not any((map(lambda x: x < 0, range(8))))
| 30.25
| 52
| 0.61157
| 47
| 242
| 3.148936
| 0.361702
| 0.189189
| 0.27027
| 0.297297
| 0.783784
| 0.662162
| 0.567568
| 0.324324
| 0
| 0
| 0
| 0.04902
| 0.157025
| 242
| 7
| 53
| 34.571429
| 0.676471
| 0.177686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
078a293fd5203a7a1c134fcd3b7753a539c60a7a
| 9,912
|
py
|
Python
|
test/test_blf_solver.py
|
Hasenpfote/image_packer
|
826f7899cd90372638c5d241314423df7991ceb3
|
[
"MIT"
] | 3
|
2020-11-16T17:03:05.000Z
|
2021-08-14T12:16:29.000Z
|
test/test_blf_solver.py
|
Hasenpfote/image_packer
|
826f7899cd90372638c5d241314423df7991ceb3
|
[
"MIT"
] | null | null | null |
test/test_blf_solver.py
|
Hasenpfote/image_packer
|
826f7899cd90372638c5d241314423df7991ceb3
|
[
"MIT"
] | 1
|
2021-02-17T02:48:20.000Z
|
2021-02-17T02:48:20.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import math
import random
import uuid
from unittest import TestCase
import sys
sys.path.append('../')
from image_packer import blf
from image_packer import blf_solver
class TestBlfSolver(TestCase):
@classmethod
def setUpClass(cls):
logging.disable(logging.CRITICAL)
@classmethod
def tearDownClass(cls):
logging.disable(logging.NOTSET)
@staticmethod
def next_power_of_2(x):
return 2.0 ** math.ceil(math.log2(x))
@staticmethod
def is_power_of_2(x):
p = math.log2(x)
return math.ceil(p) == math.floor(p)
@staticmethod
def make_random_pieces(width, height, num_pieces):
if isinstance(width, tuple):
min_width, max_width = width
else:
min_width, max_width = width, width
if isinstance(height, tuple):
min_height, max_height = height
else:
min_height, max_height = height, height
pieces = list()
for _ in range(num_pieces):
w = random.randint(min_width, max_width)
h = random.randint(min_height, max_height)
pieces.append(blf.Piece(uid=uuid.uuid4(), size=blf.Size(w, h)))
return pieces
def test_calc_minimum_container_size(self):
margin = blf.Thickness(top=1, right=1, bottom=1, left=1)
regions = list()
region1 = blf.Region(
uid=uuid.uuid4(),
top=margin.bottom + 10,
right=margin.left + 10,
bottom=margin.bottom,
left=margin.left
)
regions.append(region1)
region2 = blf.Region(
uid=uuid.uuid4(),
top=margin.bottom + 5,
right=region1.right + margin.right + margin.left + 5,
bottom=margin.bottom,
left=region1.right + margin.right + margin.left
)
regions.append(region2)
size = blf_solver.calc_minimum_container_size(regions, margin)
self.assertEqual(size.width, region2.right + margin.right)
self.assertEqual(size.height, region1.top + margin.top)
def test_calc_container_size(self):
margin = blf.Thickness(top=1, right=1, bottom=1, left=1)
regions = list()
region1 = blf.Region(
uid=uuid.uuid4(),
top=margin.bottom + 10,
right=margin.left + 10,
bottom=margin.bottom,
left=margin.left
)
regions.append(region1)
region2 = blf.Region(
uid=uuid.uuid4(),
top=margin.bottom + 5,
right=region1.right + margin.right + margin.left + 5,
bottom=margin.bottom,
left=region1.right + margin.right + margin.left
)
regions.append(region2)
container_width = 100
#
size = blf_solver.calc_container_size(container_width, regions, margin, False, False)
self.assertEqual(size.width, container_width)
self.assertEqual(size.height, region1.top + margin.top)
#
size = blf_solver.calc_container_size(container_width, regions, margin, True, False)
self.assertEqual(size.width, region2.right + margin.right)
self.assertEqual(size.height, region1.top + margin.top)
#
size = blf_solver.calc_container_size(container_width, regions, margin, False, True)
self.assertEqual(size.width, self.next_power_of_2(container_width))
self.assertEqual(size.height, self.next_power_of_2(region1.top + margin.top))
#
size = blf_solver.calc_container_size(container_width, regions, margin, True, True)
self.assertEqual(size.width, self.next_power_of_2(region2.right + margin.right))
self.assertEqual(size.height, self.next_power_of_2(region1.top + margin.top))
def test_calc_filling_rate(self):
margin = blf.Thickness(top=1, right=1, bottom=1, left=1)
regions = list()
region1 = blf.Region(
uid=uuid.uuid4(),
top=margin.bottom + 10,
right=margin.left + 10,
bottom=margin.bottom,
left=margin.left
)
regions.append(region1)
region2 = blf.Region(
uid=uuid.uuid4(),
top=margin.bottom + 5,
right=region1.right + margin.right + margin.left + 5,
bottom=margin.bottom,
left=region1.right + margin.right + margin.left
)
regions.append(region2)
container_size = blf.Size(region2.right + margin.right, region1.top + margin.top)
expected_area = sum(region.area for region in regions) / container_size.area
area = blf_solver.calc_filling_rate(container_size, regions)
self.assertAlmostEqual(area, expected_area)
def test_default(self):
pieces = self.make_random_pieces(width=(1, 64), height=(1, 64), num_pieces=10)
result = blf_solver.solve(pieces=pieces, container_width=1)
self.assertTrue(isinstance(result, tuple))
self.assertTrue(isinstance(result[0], int))
self.assertTrue(isinstance(result[1], int))
self.assertTrue(isinstance(result[2], list))
self.assertEqual(len(pieces), len(result[2]))
def test_margin(self):
pieces = self.make_random_pieces(width=(1, 64), height=(1, 64), num_pieces=10)
options = {
'margin': blf.Thickness(top=1, right=1, bottom=1, left=1),
}
result = blf_solver.solve(pieces=pieces, container_width=1, options=options)
self.assertTrue(isinstance(result, tuple))
self.assertTrue(isinstance(result[0], int))
self.assertTrue(isinstance(result[1], int))
self.assertTrue(isinstance(result[2], list))
self.assertEqual(len(pieces), len(result[2]))
def test_collapse_margin(self):
pieces = self.make_random_pieces(width=(1, 64), height=(1, 64), num_pieces=10)
options = {
'margin': blf.Thickness(top=1, right=1, bottom=1, left=1),
'collapse_margin': True
}
result = blf_solver.solve(pieces=pieces, container_width=1, options=options)
self.assertTrue(isinstance(result, tuple))
self.assertTrue(isinstance(result[0], int))
self.assertTrue(isinstance(result[1], int))
self.assertTrue(isinstance(result[2], list))
self.assertEqual(len(pieces), len(result[2]))
def test_disable_auto_size(self):
pieces = self.make_random_pieces(width=64, height=64, num_pieces=10)
options = {
'margin': blf.Thickness(top=1, right=1, bottom=1, left=1),
'enable_auto_size': False
}
result = blf_solver.solve(pieces=pieces, container_width=66, options=options)
self.assertTrue(isinstance(result, tuple))
self.assertTrue(isinstance(result[0], int))
self.assertTrue(isinstance(result[1], int))
self.assertTrue(isinstance(result[2], list))
self.assertEqual(len(pieces), len(result[2]))
#
with self.assertRaises(blf.LocationNotFoundError):
blf_solver.solve(pieces=pieces, container_width=64, options=options)
def test_force_pow2(self):
pieces = self.make_random_pieces(width=(1, 64), height=(1, 64), num_pieces=10)
options = {
'margin': blf.Thickness(top=1, right=1, bottom=1, left=1),
'force_pow2': True
}
result = blf_solver.solve(pieces=pieces, container_width=1, options=options)
self.assertTrue(isinstance(result, tuple))
self.assertTrue(isinstance(result[0], int))
self.assertTrue(isinstance(result[1], int))
self.assertTrue(isinstance(result[2], list))
self.assertTrue(self.is_power_of_2(result[0]))
self.assertTrue(self.is_power_of_2(result[1]))
self.assertEqual(len(pieces), len(result[2]))
def test_combination(self):
keys = ('collapse_margin', 'enable_auto_size', 'force_pow2')
patterns = (
(True, True, True),
(True, False, False),
(True, True, False),
(True, False, True),
(False, True, False),
(False, True, True),
(False, False, True),
(False, False, False),
)
pieces = self.make_random_pieces(width=(1, 64), height=(1, 64), num_pieces=10)
margin = blf.Thickness(top=1, right=1, bottom=1, left=1)
container_width = 66
for pattern in patterns:
options = {k: v for k, v in zip(keys, pattern)}
options['margin'] = margin
result = blf_solver.solve(pieces=pieces, container_width=container_width, options=options)
self.assertTrue(isinstance(result, tuple))
self.assertTrue(isinstance(result[0], int))
self.assertTrue(isinstance(result[1], int))
self.assertTrue(isinstance(result[2], list))
if options['force_pow2']:
self.assertTrue(self.is_power_of_2(result[0]))
self.assertTrue(self.is_power_of_2(result[1]))
self.assertEqual(len(pieces), len(result[2]))
def test_concurrent_processing(self):
pieces = self.make_random_pieces(width=64, height=64, num_pieces=100)
options = {
'margin': blf.Thickness(top=1, right=1, bottom=1, left=1),
'enable_auto_size': False
}
result = blf_solver.solve(pieces=pieces, container_width=66, options=options)
self.assertTrue(isinstance(result, tuple))
self.assertTrue(isinstance(result[0], int))
self.assertTrue(isinstance(result[1], int))
self.assertTrue(isinstance(result[2], list))
self.assertEqual(len(pieces), len(result[2]))
#
with self.assertRaises(blf.LocationNotFoundError):
blf_solver.solve(pieces=pieces, container_width=64, options=options)
| 38.27027
| 102
| 0.619552
| 1,198
| 9,912
| 4.996661
| 0.101002
| 0.074841
| 0.112262
| 0.140327
| 0.794688
| 0.763782
| 0.747077
| 0.743401
| 0.731874
| 0.724023
| 0
| 0.028723
| 0.258878
| 9,912
| 258
| 103
| 38.418605
| 0.786142
| 0.004338
| 0
| 0.592593
| 0
| 0
| 0.014907
| 0
| 0
| 0
| 0
| 0
| 0.240741
| 1
| 0.069444
| false
| 0
| 0.037037
| 0.00463
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
079927797c3c3f572fc52109a1bd4bce7f2e61da
| 174
|
py
|
Python
|
feature/vectors/feature_vectors.py
|
hhk998402/NaiveBayesClassifier
|
ac7e7b8b67505e526376a1a8e96f25f5a1ac5705
|
[
"MIT"
] | 27
|
2018-09-13T21:13:34.000Z
|
2022-02-05T21:48:54.000Z
|
feature/vectors/feature_vectors.py
|
hhk998402/NaiveBayesClassifier
|
ac7e7b8b67505e526376a1a8e96f25f5a1ac5705
|
[
"MIT"
] | null | null | null |
feature/vectors/feature_vectors.py
|
hhk998402/NaiveBayesClassifier
|
ac7e7b8b67505e526376a1a8e96f25f5a1ac5705
|
[
"MIT"
] | 28
|
2018-12-19T18:59:43.000Z
|
2022-03-05T20:00:11.000Z
|
from abc import ABC, abstractmethod
class FeatureVectors(ABC):
@abstractmethod
def add(self, label, index, feature):
"""Add a feature to the container."""
| 19.333333
| 45
| 0.678161
| 21
| 174
| 5.619048
| 0.761905
| 0.288136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.218391
| 174
| 8
| 46
| 21.75
| 0.867647
| 0.178161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
07b385b14e6ebc1f4a04cfdc00bc8c54de87728b
| 85
|
py
|
Python
|
utils/__init__.py
|
KabirSingh114/DeepFake_Face_Detection
|
0cf1ce3e69a7e4a18adff889dab4f6db029a0f25
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
KabirSingh114/DeepFake_Face_Detection
|
0cf1ce3e69a7e4a18adff889dab4f6db029a0f25
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
KabirSingh114/DeepFake_Face_Detection
|
0cf1ce3e69a7e4a18adff889dab4f6db029a0f25
|
[
"MIT"
] | null | null | null |
from .aug import cutout
from .layers import ReLU6, HardSigmoid, HardSwish, Attention
| 28.333333
| 60
| 0.811765
| 11
| 85
| 6.272727
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013514
| 0.129412
| 85
| 2
| 61
| 42.5
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ed265dfb8956ab67677bc496a0399ae23d6829fd
| 298
|
py
|
Python
|
duckdb_engine/tests/conftest.py
|
marcua/duckdb_engine
|
31d91a1606164cef529332133a08b1cb8b246706
|
[
"MIT"
] | 34
|
2020-10-02T10:49:04.000Z
|
2022-03-27T09:20:57.000Z
|
duckdb_engine/tests/conftest.py
|
marcua/duckdb_engine
|
31d91a1606164cef529332133a08b1cb8b246706
|
[
"MIT"
] | 100
|
2020-10-24T06:26:02.000Z
|
2022-03-24T22:10:35.000Z
|
duckdb_engine/tests/conftest.py
|
marcua/duckdb_engine
|
31d91a1606164cef529332133a08b1cb8b246706
|
[
"MIT"
] | 6
|
2021-04-30T13:36:11.000Z
|
2022-02-06T20:18:33.000Z
|
from pytest import fixture
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.engine.url import registry
@fixture
def engine() -> Engine:
registry.register("duckdb", "duckdb_engine", "Dialect")
return create_engine("duckdb:///:memory:")
| 24.833333
| 60
| 0.738255
| 35
| 298
| 6.2
| 0.428571
| 0.193548
| 0.184332
| 0.239631
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161074
| 298
| 11
| 61
| 27.090909
| 0.868
| 0
| 0
| 0
| 0
| 0
| 0.15331
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ed3a13bd5ee01563b1aef32009178ba1087a0788
| 115
|
py
|
Python
|
entropy/__init__.py
|
talhaanwarch/entropy
|
6686567c613474d6b46e080c453f4ae69f02bcbe
|
[
"BSD-3-Clause"
] | 2
|
2020-04-28T12:50:26.000Z
|
2020-05-13T08:52:42.000Z
|
entropy/entropy/__init__.py
|
MahdadJafarzadeh/Zzzscoring
|
f5d22cb7a457412fcc575c5cc6d331286f117dbf
|
[
"MIT"
] | null | null | null |
entropy/entropy/__init__.py
|
MahdadJafarzadeh/Zzzscoring
|
f5d22cb7a457412fcc575c5cc6d331286f117dbf
|
[
"MIT"
] | 1
|
2020-07-14T13:48:56.000Z
|
2020-07-14T13:48:56.000Z
|
# Import EntroPy objects
from .utils import *
from .entropy import *
from .fractal import *
__version__ = "0.1.1"
| 16.428571
| 24
| 0.721739
| 16
| 115
| 4.9375
| 0.5625
| 0.253165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031579
| 0.173913
| 115
| 6
| 25
| 19.166667
| 0.8
| 0.191304
| 0
| 0
| 0
| 0
| 0.054945
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ed597d9c8847dd075928856b27a43ee5d04fb1d3
| 45
|
py
|
Python
|
shared_utils/__init__.py
|
Chotom/rl-db-indexing
|
16eaf0a3e3aef83b3fd077111e922dea6dd6a1f3
|
[
"MIT"
] | null | null | null |
shared_utils/__init__.py
|
Chotom/rl-db-indexing
|
16eaf0a3e3aef83b3fd077111e922dea6dd6a1f3
|
[
"MIT"
] | null | null | null |
shared_utils/__init__.py
|
Chotom/rl-db-indexing
|
16eaf0a3e3aef83b3fd077111e922dea6dd6a1f3
|
[
"MIT"
] | null | null | null |
"""Utility functions for benchmark module
"""
| 22.5
| 41
| 0.755556
| 5
| 45
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 45
| 2
| 42
| 22.5
| 0.85
| 0.844444
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed617d710b18f624aa219e59e193edf082866eb2
| 15,969
|
py
|
Python
|
sdk/search/azure-search-documents/tests/async_tests/test_index_live_async.py
|
malthe/azure-sdk-for-python
|
0394ca66256f18fd45975b75ceea0e2527208abf
|
[
"MIT"
] | null | null | null |
sdk/search/azure-search-documents/tests/async_tests/test_index_live_async.py
|
malthe/azure-sdk-for-python
|
0394ca66256f18fd45975b75ceea0e2527208abf
|
[
"MIT"
] | null | null | null |
sdk/search/azure-search-documents/tests/async_tests/test_index_live_async.py
|
malthe/azure-sdk-for-python
|
0394ca66256f18fd45975b75ceea0e2527208abf
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import asyncio
import functools
import json
from os.path import dirname, join, realpath
import time
import pytest
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
from search_service_preparer import SearchServicePreparer
from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function
CWD = dirname(realpath(__file__))
SCHEMA = open(join(CWD, "..", "hotel_schema.json")).read()
BATCH = json.load(open(join(CWD, "..", "hotel_small.json")))
from azure.core.exceptions import HttpResponseError
from azure.core.credentials import AzureKeyCredential
from azure.search.documents import (
AutocompleteQuery,
SearchQuery,
SuggestQuery,
)
from azure.search.documents.aio import SearchIndexClient
def await_prepared_test(test_fn):
"""Synchronous wrapper for async test methods. Used to avoid making changes
upstream to AbstractPreparer (which doesn't await the functions it wraps)
"""
@functools.wraps(test_fn)
def run(test_class_instance, *args, **kwargs):
trim_kwargs_from_test_function(test_fn, kwargs)
loop = asyncio.get_event_loop()
return loop.run_until_complete(test_fn(test_class_instance, **kwargs))
return run
class SearchIndexClientTestAsync(AzureMgmtTestCase):
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_async_get_document_count(
self, api_key, endpoint, index_name, **kwargs
):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
async with client:
assert await client.get_document_count() == 10
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_get_document(self, api_key, endpoint, index_name, **kwargs):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
async with client:
for hotel_id in range(1, 11):
result = await client.get_document(key=str(hotel_id))
expected = BATCH["value"][hotel_id - 1]
assert result.get("hotelId") == expected.get("hotelId")
assert result.get("hotelName") == expected.get("hotelName")
assert result.get("description") == expected.get("description")
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_get_document_missing(self, api_key, endpoint, index_name, **kwargs):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
async with client:
with pytest.raises(HttpResponseError):
await client.get_document(key="1000")
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_get_search_simple(self, api_key, endpoint, index_name, **kwargs):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
async with client:
results = []
async for x in await client.search(query="hotel"):
results.append(x)
assert len(results) == 7
results = []
async for x in await client.search(query="motel"):
results.append(x)
assert len(results) == 2
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_get_search_filter(self, api_key, endpoint, index_name, **kwargs):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
query = SearchQuery(search_text="WiFi")
query.filter("category eq 'Budget'")
query.select("hotelName", "category", "description")
query.order_by("hotelName desc")
async with client:
results = []
async for x in await client.search(query=query):
results.append(x)
assert [x["hotelName"] for x in results] == sorted(
[x["hotelName"] for x in results], reverse=True
)
expected = {
"category",
"hotelName",
"description",
"@search.score",
"@search.highlights",
}
assert all(set(x) == expected for x in results)
assert all(x["category"] == "Budget" for x in results)
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_get_search_counts(self, api_key, endpoint, index_name, **kwargs):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
query = SearchQuery(search_text="hotel")
results = await client.search(query=query)
assert await results.get_count() is None
query = SearchQuery(search_text="hotel", include_total_result_count=True)
results = await client.search(query=query)
assert await results.get_count() == 7
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_get_search_coverage(self, api_key, endpoint, index_name, **kwargs):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
query = SearchQuery(search_text="hotel")
results = await client.search(query=query)
assert await results.get_coverage() is None
query = SearchQuery(search_text="hotel", minimum_coverage=50.0)
results = await client.search(query=query)
cov = await results.get_coverage()
assert isinstance(cov, float)
assert cov >= 50.0
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_get_search_facets_none(
self, api_key, endpoint, index_name, **kwargs
):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
query = SearchQuery(search_text="WiFi")
query.select("hotelName", "category", "description")
async with client:
results = await client.search(query=query)
assert await results.get_facets() is None
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_get_search_facets_result(
self, api_key, endpoint, index_name, **kwargs
):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
query = SearchQuery(search_text="WiFi", facets=["category"])
query.select("hotelName", "category", "description")
async with client:
results = await client.search(query=query)
assert await results.get_facets() == {
"category": [
{"value": "Budget", "count": 4},
{"value": "Luxury", "count": 1},
]
}
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_autocomplete(self, api_key, endpoint, index_name, **kwargs):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
async with client:
query = AutocompleteQuery(search_text="mot", suggester_name="sg")
results = await client.autocomplete(query=query)
assert results == [{"text": "motel", "query_plus_text": "motel"}]
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_suggest(self, api_key, endpoint, index_name, **kwargs):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
async with client:
query = SuggestQuery(search_text="mot", suggester_name="sg")
results = await client.suggest(query=query)
assert results == [
{"hotelId": "2", "text": "Cheapest hotel in town. Infact, a motel."},
{"hotelId": "9", "text": "Secret Point Motel"},
]
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_upload_documents_new(self, api_key, endpoint, index_name, **kwargs):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
DOCUMENTS = [
{"hotelId": "1000", "rating": 5, "rooms": [], "hotelName": "Azure Inn"},
{"hotelId": "1001", "rating": 4, "rooms": [], "hotelName": "Redmond Hotel"},
]
async with client:
results = await client.upload_documents(DOCUMENTS)
assert len(results) == 2
assert set(x.status_code for x in results) == {201}
# There can be some lag before a document is searchable
time.sleep(3)
assert await client.get_document_count() == 12
for doc in DOCUMENTS:
result = await client.get_document(key=doc["hotelId"])
assert result["hotelId"] == doc["hotelId"]
assert result["hotelName"] == doc["hotelName"]
assert result["rating"] == doc["rating"]
assert result["rooms"] == doc["rooms"]
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_upload_documents_existing(
self, api_key, endpoint, index_name, **kwargs
):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
DOCUMENTS = [
{"hotelId": "1000", "rating": 5, "rooms": [], "hotelName": "Azure Inn"},
{"hotelId": "3", "rating": 4, "rooms": [], "hotelName": "Redmond Hotel"},
]
async with client:
results = await client.upload_documents(DOCUMENTS)
assert len(results) == 2
assert set(x.status_code for x in results) == {200, 201}
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_delete_documents_existing(
self, api_key, endpoint, index_name, **kwargs
):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
async with client:
results = await client.delete_documents(
[{"hotelId": "3"}, {"hotelId": "4"}]
)
assert len(results) == 2
assert set(x.status_code for x in results) == {200}
# There can be some lag before a document is searchable
time.sleep(3)
assert await client.get_document_count() == 8
with pytest.raises(HttpResponseError):
await client.get_document(key="3")
with pytest.raises(HttpResponseError):
await client.get_document(key="4")
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_delete_documents_missing(
self, api_key, endpoint, index_name, **kwargs
):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
async with client:
results = await client.delete_documents(
[{"hotelId": "1000"}, {"hotelId": "4"}]
)
assert len(results) == 2
assert set(x.status_code for x in results) == {200}
# There can be some lag before a document is searchable
time.sleep(3)
assert await client.get_document_count() == 9
with pytest.raises(HttpResponseError):
await client.get_document(key="1000")
with pytest.raises(HttpResponseError):
await client.get_document(key="4")
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_merge_documents_existing(
self, api_key, endpoint, index_name, **kwargs
):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
async with client:
results = await client.merge_documents(
[{"hotelId": "3", "rating": 1}, {"hotelId": "4", "rating": 2}]
)
assert len(results) == 2
assert set(x.status_code for x in results) == {200}
# There can be some lag before a document is searchable
time.sleep(3)
assert await client.get_document_count() == 10
result = await client.get_document(key="3")
assert result["rating"] == 1
result = await client.get_document(key="4")
assert result["rating"] == 2
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_merge_documents_missing(
self, api_key, endpoint, index_name, **kwargs
):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
async with client:
results = await client.merge_documents(
[{"hotelId": "1000", "rating": 1}, {"hotelId": "4", "rating": 2}]
)
assert len(results) == 2
assert set(x.status_code for x in results) == {200, 404}
# There can be some lag before a document is searchable
time.sleep(3)
assert await client.get_document_count() == 10
with pytest.raises(HttpResponseError):
await client.get_document(key="1000")
result = await client.get_document(key="4")
assert result["rating"] == 2
@ResourceGroupPreparer(random_name_enabled=True)
@SearchServicePreparer(schema=SCHEMA, index_batch=BATCH)
@await_prepared_test
async def test_merge_or_upload_documents(
self, api_key, endpoint, index_name, **kwargs
):
client = SearchIndexClient(
endpoint, index_name, AzureKeyCredential(api_key)
)
async with client:
results = await client.merge_or_upload_documents(
[{"hotelId": "1000", "rating": 1}, {"hotelId": "4", "rating": 2}]
)
assert len(results) == 2
assert set(x.status_code for x in results) == {200, 201}
# There can be some lag before a document is searchable
time.sleep(3)
assert await client.get_document_count() == 11
result = await client.get_document(key="1000")
assert result["rating"] == 1
result = await client.get_document(key="4")
assert result["rating"] == 2
| 38.387019
| 88
| 0.626025
| 1,683
| 15,969
| 5.75104
| 0.124183
| 0.043186
| 0.06323
| 0.045459
| 0.785515
| 0.775597
| 0.754107
| 0.736026
| 0.736026
| 0.716396
| 0
| 0.012096
| 0.264888
| 15,969
| 415
| 89
| 38.479518
| 0.81242
| 0.048281
| 0
| 0.581602
| 0
| 0
| 0.060634
| 0
| 0
| 0
| 0
| 0
| 0.139466
| 1
| 0.005935
| false
| 0
| 0.038576
| 0
| 0.053412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed6c9364ee5ca3b0c33fabcd7b2dfc63e73df6c3
| 34
|
py
|
Python
|
tasks/UDEMY/100_days/L025/main.py
|
AleksNeStu/projects
|
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
|
[
"Apache-2.0"
] | 2
|
2022-01-19T18:01:35.000Z
|
2022-02-06T06:54:38.000Z
|
tasks/UDEMY/100_days/L025/main.py
|
AleksNeStu/projects
|
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
|
[
"Apache-2.0"
] | null | null | null |
tasks/UDEMY/100_days/L025/main.py
|
AleksNeStu/projects
|
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
|
[
"Apache-2.0"
] | null | null | null |
# weather_data_from_csv =
#
# with
| 11.333333
| 25
| 0.735294
| 5
| 34
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 34
| 3
| 26
| 11.333333
| 0.758621
| 0.823529
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed87e69c4e90baecffcf0005e7a0db42d3d1980e
| 45
|
py
|
Python
|
exercises/twelve-days/twelve_days.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 1,177
|
2017-06-21T20:24:06.000Z
|
2022-03-29T02:30:55.000Z
|
exercises/twelve-days/twelve_days.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 1,890
|
2017-06-18T20:06:10.000Z
|
2022-03-31T18:35:51.000Z
|
exercises/twelve-days/twelve_days.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 1,095
|
2017-06-26T23:06:19.000Z
|
2022-03-29T03:25:38.000Z
|
def recite(start_verse, end_verse):
pass
| 15
| 35
| 0.733333
| 7
| 45
| 4.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 45
| 2
| 36
| 22.5
| 0.837838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
71f486e66f0f2d778e3b2e6856334ab501aa84ee
| 136
|
py
|
Python
|
python/ray/_private/runtime_env/constants.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 33
|
2020-05-27T14:25:24.000Z
|
2022-03-22T06:11:30.000Z
|
python/ray/_private/runtime_env/constants.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 227
|
2021-10-01T08:00:01.000Z
|
2021-12-28T16:47:26.000Z
|
python/ray/_private/runtime_env/constants.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 10
|
2018-04-27T10:50:59.000Z
|
2020-02-24T02:41:43.000Z
|
# Env var set by job manager to pass runtime env and metadata to subprocess
RAY_JOB_CONFIG_JSON_ENV_VAR = "RAY_JOB_CONFIG_JSON_ENV_VAR"
| 45.333333
| 75
| 0.838235
| 26
| 136
| 4
| 0.576923
| 0.173077
| 0.230769
| 0.307692
| 0.423077
| 0.423077
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132353
| 136
| 2
| 76
| 68
| 0.881356
| 0.536765
| 0
| 0
| 0
| 0
| 0.442623
| 0.442623
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
71f5abccabfda5ef012493efb78e2b2db9dee28e
| 96
|
py
|
Python
|
mysite/converters.py
|
wonjoonSeol/ScienceScape
|
8d8a3cb76193b6f85b7a2a6c7219e249237d64c8
|
[
"BSD-3-Clause"
] | 5
|
2018-02-14T21:11:06.000Z
|
2020-02-23T14:53:11.000Z
|
mysite/converters.py
|
wonjoonSeol/ScienceScape
|
8d8a3cb76193b6f85b7a2a6c7219e249237d64c8
|
[
"BSD-3-Clause"
] | 106
|
2018-02-09T00:31:05.000Z
|
2018-03-29T07:28:34.000Z
|
mysite/converters.py
|
wonjoonSeol/ScienceScape
|
8d8a3cb76193b6f85b7a2a6c7219e249237d64c8
|
[
"BSD-3-Clause"
] | 6
|
2018-02-23T17:48:03.000Z
|
2020-05-14T13:39:36.000Z
|
class FilePath:
regex = '[0-9]{4}'
def to_python(self, value):
return int(value)
| 16
| 30
| 0.583333
| 14
| 96
| 3.928571
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042254
| 0.260417
| 96
| 5
| 31
| 19.2
| 0.732394
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
9c3d46a6d2ba07e68f5836e03d8d4a8a4530a02c
| 71
|
py
|
Python
|
8 kyu/L1: Set Alarm/L1: Set Alarm.py
|
anthonyjatoba/codewars
|
76b0d66dd1ba76a4d136b658920cdf85fd5c4b06
|
[
"MIT"
] | null | null | null |
8 kyu/L1: Set Alarm/L1: Set Alarm.py
|
anthonyjatoba/codewars
|
76b0d66dd1ba76a4d136b658920cdf85fd5c4b06
|
[
"MIT"
] | 2
|
2019-01-22T10:53:42.000Z
|
2019-01-31T08:02:48.000Z
|
8 kyu/L1: Set Alarm/L1: Set Alarm.py
|
anthonyjatoba/codewars
|
76b0d66dd1ba76a4d136b658920cdf85fd5c4b06
|
[
"MIT"
] | 13
|
2019-01-22T10:37:42.000Z
|
2019-01-25T13:30:43.000Z
|
def set_alarm(employed, vacation):
return employed and not vacation
| 35.5
| 36
| 0.788732
| 10
| 71
| 5.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15493
| 71
| 2
| 36
| 35.5
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
9c5267b4dc63a15ac4389a0f16d9d49b96ed03d9
| 110
|
py
|
Python
|
pyf/_map_int.py
|
snoopyjc/pythonizer
|
6b3683084f41f0aa06b1b4e652a0f00b19cceac1
|
[
"Artistic-2.0"
] | 1
|
2022-03-13T22:08:25.000Z
|
2022-03-13T22:08:25.000Z
|
pyf/_map_int.py
|
snoopyjc/pythonizer
|
6b3683084f41f0aa06b1b4e652a0f00b19cceac1
|
[
"Artistic-2.0"
] | 21
|
2022-03-17T16:53:04.000Z
|
2022-03-31T23:55:24.000Z
|
pyf/_map_int.py
|
snoopyjc/pythonizer
|
6b3683084f41f0aa06b1b4e652a0f00b19cceac1
|
[
"Artistic-2.0"
] | null | null | null |
def _map_int(*args):
"""Convert each element to an int"""
return list(map(_int, _flatten(args)))
| 22
| 43
| 0.627273
| 16
| 110
| 4.0625
| 0.75
| 0.184615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.218182
| 110
| 4
| 44
| 27.5
| 0.755814
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
9c551f76d76a7c031ac7ea50a2d58a2e51b6c2d4
| 324
|
py
|
Python
|
toontown/cogdominium/DistCogdoLevelGameAI.py
|
TrueBlueDogemon/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | 1
|
2021-02-25T06:22:49.000Z
|
2021-02-25T06:22:49.000Z
|
toontown/cogdominium/DistCogdoLevelGameAI.py
|
TrueBlueDogemon/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | null | null | null |
toontown/cogdominium/DistCogdoLevelGameAI.py
|
TrueBlueDogemon/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | 2
|
2020-11-08T03:38:35.000Z
|
2021-09-02T07:03:47.000Z
|
from direct.directnotify import DirectNotifyGlobal
from toontown.cogdominium.DistCogdoGameAI import DistCogdoGameAI
from otp.level.DistributedLevelAI import DistributedLevelAI
class DistCogdoLevelGameAI(DistCogdoGameAI, DistributedLevelAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistCogdoLevelGameAI")
| 40.5
| 80
| 0.876543
| 26
| 324
| 10.923077
| 0.576923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 324
| 7
| 81
| 46.285714
| 0.946667
| 0
| 0
| 0
| 0
| 0
| 0.06192
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9c5d023bdce2ad6f340d7808605056f6530170c6
| 131
|
py
|
Python
|
app.py
|
leeroywking/potential-waddle
|
07b4055989c9851a23a87f78f74452da67942a94
|
[
"MIT"
] | null | null | null |
app.py
|
leeroywking/potential-waddle
|
07b4055989c9851a23a87f78f74452da67942a94
|
[
"MIT"
] | null | null | null |
app.py
|
leeroywking/potential-waddle
|
07b4055989c9851a23a87f78f74452da67942a94
|
[
"MIT"
] | null | null | null |
from flask import Flask, request
import requests as rq
app = Flask(__name__)
@app.route("/")
def index():
return "hello world"
| 18.714286
| 32
| 0.709924
| 19
| 131
| 4.684211
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167939
| 131
| 7
| 33
| 18.714286
| 0.816514
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
130b2aa2f75551d8af7e2a79b326f14112a7145a
| 11,071
|
py
|
Python
|
tests/test_grpc_functions.py
|
spring-operator/python2-function-invoker
|
60c6b7aeb344f971090774fc5e0c3e0c024d4a65
|
[
"Apache-2.0"
] | null | null | null |
tests/test_grpc_functions.py
|
spring-operator/python2-function-invoker
|
60c6b7aeb344f971090774fc5e0c3e0c024d4a65
|
[
"Apache-2.0"
] | null | null | null |
tests/test_grpc_functions.py
|
spring-operator/python2-function-invoker
|
60c6b7aeb344f971090774fc5e0c3e0c024d4a65
|
[
"Apache-2.0"
] | null | null | null |
__copyright__ = '''
Copyright 2018 the original author or authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__author__ = 'David Turanski'
import sys
if sys.version_info[0] != 2:
raise RuntimeError("Requires Python 2")
import grpc
import unittest
import subprocess
import os
import uuid
import time
from invoker import function_pb2_grpc as function
from invoker import function_pb2 as message
PYTHON = sys.executable
class GrpcFunctionTest(unittest.TestCase):
"""
Assumes os.getcwd() is the project base directory
"""
@classmethod
def setUpClass(cls):
cls.workingdir = os.path.abspath("./invoker")
cls.command = "%s function_invoker.py" % PYTHON
def setUp(self):
pass
def tearDown(self):
self.process.kill()
def test_upper(self):
port = find_free_port()
env = {
'PYTHONPATH': '%s/tests/functions:$PYTHONPATH' % os.getcwd(),
'GRPC_PORT': str(port),
'FUNCTION_URI': 'file://%s/tests/functions/upper.py?handler=handle' % os.getcwd()
}
self.process = subprocess.Popen(self.command,
cwd=self.workingdir,
shell=True,
env=env,
preexec_fn=os.setsid,
)
channel = grpc.insecure_channel('localhost:%s' % port)
wait_until_channel_ready(channel)
self.stub = function.MessageFunctionStub(channel)
def generate_messages():
headers = {
'Content-Type': message.Message.HeaderValue(values=['text/plain']),
'correlationId': message.Message.HeaderValue(values=[str(uuid.uuid4())])
}
messages = [
message.Message(payload="hello", headers=headers),
message.Message(payload="world", headers=headers),
message.Message(payload="foo", headers=headers),
message.Message(payload="bar", headers=headers),
]
for msg in messages:
yield msg
responses = self.stub.Call(generate_messages())
expected = ['HELLO', 'WORLD', 'FOO', 'BAR']
for response in responses:
self.assertTrue(response.payload in expected)
expected.remove(response.payload)
self.assertEquals(0, len(expected))
def test_upper_no_correlation(self):
port = find_free_port()
env = {
'PYTHONPATH': '%s/tests/functions:$PYTHONPATH' % os.getcwd(),
'GRPC_PORT': str(port),
'FUNCTION_URI': 'file://%s/tests/functions/upper.py?handler=handle' % os.getcwd()
}
self.process = subprocess.Popen(self.command,
cwd=self.workingdir,
shell=True,
env=env,
preexec_fn=os.setsid,
)
channel = grpc.insecure_channel('localhost:%s' % port)
wait_until_channel_ready(channel)
self.stub = function.MessageFunctionStub(channel)
def generate_messages():
headers = {}
messages = [
message.Message(payload="hello", headers=headers),
message.Message(payload="world", headers=headers),
message.Message(payload="foo", headers=headers),
message.Message(payload="bar", headers=headers),
]
for msg in messages:
yield msg
responses = self.stub.Call(generate_messages())
expected = ['HELLO', 'WORLD', 'FOO', 'BAR']
for response in responses:
self.assertTrue(response.payload in expected)
self.assertEquals([], response.headers['correlationId'].values)
expected.remove(response.payload)
self.assertEquals(0, len(expected))
def test_concat(self):
port = find_free_port()
env = {
'PYTHONPATH': '%s/tests/functions:$PYTHONPATH' % os.getcwd(),
'GRPC_PORT': str(port),
'FUNCTION_URI': 'file://%s/tests/functions/concat.py?handler=concat' % os.getcwd()
}
self.process = subprocess.Popen(self.command,
cwd=self.workingdir,
shell=True,
env=env,
preexec_fn=os.setsid,
)
channel = grpc.insecure_channel('localhost:%s' % port)
wait_until_channel_ready(channel)
self.stub = function.MessageFunctionStub(channel)
def generate_messages():
headers = {
'Content-Type': message.Message.HeaderValue(values=['application/json']),
'correlationId': message.Message.HeaderValue(values=[str(uuid.uuid4())])
}
messages = [
message.Message(payload='{"foo":"bar","hello":"world"}', headers=headers),
]
for msg in messages:
yield msg
responses = self.stub.Call(generate_messages())
for response in responses:
self.assertEquals('{"result": "foobarhelloworld"}', response.payload)
def test_accepts_application_json(self):
port = find_free_port()
env = {
'PYTHONPATH': '%s/tests/functions:$PYTHONPATH' % os.getcwd(),
'GRPC_PORT': str(port),
'FUNCTION_URI': 'file://%s/tests/functions/concat.py?handler=concat' % os.getcwd()
}
self.process = subprocess.Popen(self.command,
cwd=self.workingdir,
shell=True,
env=env,
preexec_fn=os.setsid,
)
channel = grpc.insecure_channel('localhost:%d' % port)
wait_until_channel_ready(channel)
self.stub = function.MessageFunctionStub(channel)
def generate_messages():
headers = {
'Content-Type': message.Message.HeaderValue(values=['application/json']),
'Accept': message.Message.HeaderValue(values=['application/json']),
'correlationId': message.Message.HeaderValue(values=[str(uuid.uuid4())])
}
messages = [
message.Message(payload='{"foo":"bar","hello":"world"}', headers=headers),
]
for msg in messages:
yield msg
responses = self.stub.Call(generate_messages())
for response in responses:
self.assertEquals('{"result": "foobarhelloworld"}', response.payload)
def test_accepts_text_plain(self):
port = find_free_port()
env = {
'PYTHONPATH': '%s/tests/functions:$PYTHONPATH' % os.getcwd(),
'GRPC_PORT': str(port),
'FUNCTION_URI': 'file://%s/tests/functions/concat.py?handler=concat' % os.getcwd()
}
self.process = subprocess.Popen(self.command,
cwd=self.workingdir,
shell=True,
env=env,
preexec_fn=os.setsid,
)
channel = grpc.insecure_channel('localhost:%d' % port)
wait_until_channel_ready(channel)
self.stub = function.MessageFunctionStub(channel)
def generate_messages():
headers = {
'Content-Type': message.Message.HeaderValue(values=['application/json']),
'Accept': message.Message.HeaderValue(values=['text/plain']),
'correlationId': message.Message.HeaderValue(values=[str(uuid.uuid4())])
}
messages = [
message.Message(payload='{"foo":"bar","hello":"world"}', headers=headers),
]
for msg in messages:
yield msg
responses = self.stub.Call(generate_messages())
for response in responses:
self.assertEquals('{"result": "foobarhelloworld"}', response.payload)
def test_accepts_not_supported(self):
port = find_free_port()
env = {
'PYTHONPATH': '%s/tests/functions:$PYTHONPATH' % os.getcwd(),
'GRPC_PORT': str(port),
'FUNCTION_URI': 'file://%s/tests/functions/concat.py?handler=concat' % os.getcwd()
}
self.process = subprocess.Popen(self.command,
cwd=self.workingdir,
shell=True,
env=env,
preexec_fn=os.setsid,
)
channel = grpc.insecure_channel('localhost:%s' % port)
wait_until_channel_ready(channel)
self.stub = function.MessageFunctionStub(channel)
def generate_messages():
headers = {
'Content-Type': message.Message.HeaderValue(values=['application/json']),
'Accept': message.Message.HeaderValue(values=['application/xml']),
'correlationId': message.Message.HeaderValue(values=[str(uuid.uuid4())])
}
messages = [
message.Message(payload='{"foo":"bar","hello":"world"}', headers=headers),
]
for msg in messages:
yield msg
try:
responses = self.stub.Call(generate_messages())
self.assertEquals(grpc._channel._Rendezvous, type(responses))
# TODO: Investigate error handling
# https://github.com/projectriff/python2-function-invoker/issues/5
except RuntimeError:
pass
import socket
from contextlib import closing
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
def wait_until_channel_ready(channel):
max_tries = 100
ready = grpc.channel_ready_future(channel)
tries = 0
while not ready.done():
time.sleep(0.1)
tries = tries + 1
if tries == max_tries:
raise RuntimeError("cannot connect to gRPC server")
| 35.598071
| 94
| 0.541415
| 1,046
| 11,071
| 5.629063
| 0.200765
| 0.059443
| 0.055197
| 0.068444
| 0.756454
| 0.742188
| 0.735904
| 0.735904
| 0.735904
| 0.735904
| 0
| 0.004294
| 0.347846
| 11,071
| 310
| 95
| 35.712903
| 0.811219
| 0.013368
| 0
| 0.638655
| 0
| 0
| 0.180451
| 0.054465
| 0
| 0
| 0
| 0.003226
| 0.037815
| 1
| 0.071429
| false
| 0.008403
| 0.046218
| 0
| 0.12605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
132128145d049bb2b5ef02ce4715c274426c1980
| 75
|
py
|
Python
|
wot/__init__.py
|
faro1219/wot
|
fbe5b63e64a54857d3aebd55d6313d06b39f74de
|
[
"BSD-3-Clause"
] | null | null | null |
wot/__init__.py
|
faro1219/wot
|
fbe5b63e64a54857d3aebd55d6313d06b39f74de
|
[
"BSD-3-Clause"
] | null | null | null |
wot/__init__.py
|
faro1219/wot
|
fbe5b63e64a54857d3aebd55d6313d06b39f74de
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from .dataset import *
from .dataset_util import *
| 18.75
| 27
| 0.653333
| 10
| 75
| 4.8
| 0.7
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016129
| 0.173333
| 75
| 3
| 28
| 25
| 0.758065
| 0.28
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
133bc8c41c3079f100ec28e9bfa319a30702a14c
| 7,554
|
py
|
Python
|
model-optimizer/extensions/middle/MulQuantizeFuse_test.py
|
giulio1979/dldt
|
e7061922066ccefc54c8dae6e3215308ce9559e1
|
[
"Apache-2.0"
] | 1
|
2021-07-30T17:03:50.000Z
|
2021-07-30T17:03:50.000Z
|
model-optimizer/extensions/middle/MulQuantizeFuse_test.py
|
Dipet/dldt
|
b2140c083a068a63591e8c2e9b5f6b240790519d
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/middle/MulQuantizeFuse_test.py
|
Dipet/dldt
|
b2140c083a068a63591e8c2e9b5f6b240790519d
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from extensions.middle.MulFakeQuantizeFuse import MulFakeQuantizeFuse
from mo.middle.passes.eliminate_test import build_graph
from mo.utils.ir_engine.compare_graphs import compare_graphs
# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the
# dictionary with node attributes.
nodes = {
'x': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'x_data': {'value': None, 'shape': np.array([1, 64, 56, 56]), 'kind': 'data'},
'mul_const': {'op': 'Const', 'type': 'Const', 'kind': 'op', 'value': None, 'shape': None},
'mul_const_data': {'value': np.array([]), 'shape': np.array([]), 'kind': 'data'},
'mul': {'op': 'Mul', 'kind': 'op'},
'mul_data': {'value': np.array([]), 'shape': np.array([]), 'kind': 'data'},
'mi_i': {'op': 'Const', 'type': 'Const', 'kind': 'op', 'value': None, 'shape': None},
'mi_i_data': {'value': np.array([-10]), 'shape': np.array([]), 'kind': 'data'},
'ma_i': {'op': 'Const', 'type': 'Const', 'kind': 'op', 'value': None, 'shape': None},
'ma_i_data': {'value': np.array([10]), 'shape': np.array([]), 'kind': 'data'},
'mi_o': {'op': 'Const', 'type': 'Const', 'kind': 'op', 'value': None, 'shape': None},
'mi_o_data': {'value': np.array([]), 'shape': np.array([]), 'kind': 'data'},
'ma_o': {'op': 'Const', 'type': 'Const', 'kind': 'op', 'value': None, 'shape': None},
'ma_o_data': {'value': np.array([]), 'shape': np.array([]), 'kind': 'data'},
'quantize': {'type': 'FakeQuantize', 'kind': 'op', 'op': 'FakeQuantize', 'levels': 2, 'keep_in_IR': True},
'quantize_data': {'value': None, 'shape': np.array([1, 64, 56, 56]), 'kind': 'data'},
'output': {'op': 'Result', 'kind': 'op'},
}
edges = [
('x', 'x_data'),
('mul_const', 'mul_const_data'),
('mul', 'mul_data'),
('mi_i', 'mi_i_data'),
('ma_i', 'ma_i_data'),
('mi_o', 'mi_o_data'),
('ma_o', 'ma_o_data'),
('quantize', 'quantize_data'),
('quantize_data', 'output'),
('x_data', 'mul', {'in': 0}),
('mul_const_data', 'mul', {'in': 1}),
('mul_data', 'quantize', {'in': 0}),
('mi_i_data', 'quantize', {'in': 1}),
('ma_i_data', 'quantize', {'in': 2}),
('mi_o_data', 'quantize', {'in': 3}),
('ma_o_data', 'quantize', {'in': 4}),
]
edges_ref = [
('x', 'x_data'),
('mul_const', 'mul_const_data'),
('mul', 'mul_data'),
('mi_i', 'mi_i_data'),
('ma_i', 'ma_i_data'),
('mi_o', 'mi_o_data'),
('ma_o', 'ma_o_data'),
('quantize', 'quantize_data'),
('quantize_data', 'output'),
('x_data', 'quantize', {'in': 0}),
('mi_i_data', 'quantize', {'in': 1}),
('ma_i_data', 'quantize', {'in': 2}),
('mi_o_data', 'quantize', {'in': 3}),
('ma_o_data', 'quantize', {'in': 4}),
('x_data', 'mul', {'in': 0}),
('mul_const_data', 'mul', {'in': 1}),
]
class MulQuantizeFuseTest(unittest.TestCase):
def test_1(self):
graph = build_graph(nodes, edges, {
'mul_const_data': {'shape': np.array([3, 1, 1]), 'value': np.broadcast_to(np.array([1]), (3, 1, 1))},
'quantize_data': {'shape': np.array([2, 3, 4, 4])},
'mi_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([0]), (1, 1, 1, 1))},
'ma_o_data': {'shape': np.array([1, 3, 1, 1]), 'value': np.broadcast_to(np.array([1]), (1, 3, 1, 1))},
}, nodes_with_edges_only=True)
graph.stage = 'middle'
graph_ref = build_graph(nodes, edges_ref, {
'quantize_data': {'shape': np.array([2, 3, 4, 4])},
'mul_const_data': {'shape': np.array([3, 1, 1]), 'value': np.broadcast_to(np.array([1]), (3, 1, 1))},
'mi_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([0]), (1, 1, 1, 1))},
'ma_o_data': {'shape': np.array([1, 3, 1, 1]), 'value': np.broadcast_to(np.array([1]), (1, 3, 1, 1))},
'mi_i_data': {'shape': np.array([3, 1, 1]), 'value': np.broadcast_to(np.array([-10]), (3, 1, 1))},
'ma_i_data': {'shape': np.array([3, 1, 1]), 'value': np.broadcast_to(np.array([10]), (3, 1, 1))},
}, nodes_with_edges_only=True)
MulFakeQuantizeFuse().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_2(self):
graph = build_graph(nodes, edges, {
'mul_const_data': {'shape': np.array([1]), 'value': np.array([-1])},
'quantize_data': {'shape': np.array([2, 3, 4, 4])},
'mi_o_data': {'shape': np.array([1]), 'value': np.array([0])},
'ma_o_data': {'shape': np.array([1]), 'value': np.array([1])},
}, nodes_with_edges_only=True)
graph.stage = 'middle'
graph_ref = build_graph(nodes, edges_ref, {
'quantize_data': {'shape': np.array([2, 3, 4, 4])},
'mul_const_data': {'shape': np.array([1]), 'value': np.array([-1])},
'mi_o_data': {'shape': np.array([1]), 'value': np.array([1])},
'ma_o_data': {'shape': np.array([1]), 'value': np.array([0])},
'mi_i_data': {'shape': np.array([1]), 'value': np.array([10])},
'ma_i_data': {'shape': np.array([1]), 'value': np.array([-10])},
}, nodes_with_edges_only=True)
MulFakeQuantizeFuse().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_3(self):
graph = build_graph(nodes, edges, {
'mul_const_data': {'shape': np.array([3, 1, 1]), 'value': np.array([[[-1]], [[1]], [[-1]]])},
'quantize_data': {'shape': np.array([2, 3, 4, 4])},
'mi_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([0]), (1, 1, 1, 1))},
'ma_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([1]), (1, 1, 1, 1))},
}, nodes_with_edges_only=True)
graph.stage = 'middle'
graph_ref = build_graph(nodes, edges_ref, {
'quantize_data': {'shape': np.array([2, 3, 4, 4])},
'mul_const_data': {'shape': np.array([3, 1, 1]), 'value': np.array([[[-1]], [[1]], [[-1]]])},
'mi_o_data': {'shape': np.array([1, 3, 1, 1]), 'value': np.array([[[1]], [[0]], [[1]]])},
'ma_o_data': {'shape': np.array([1, 3, 1, 1]), 'value': np.array([[[0]], [[1]], [[0]]])},
'mi_i_data': {'shape': np.array([1, 3, 1, 1]), 'value': np.array([[[10]], [[-10]], [[10]]])},
'ma_i_data': {'shape': np.array([1, 3, 1, 1]), 'value': np.array([[[-10]], [[10]], [[-10]]])},
}, nodes_with_edges_only=True)
MulFakeQuantizeFuse().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
| 45.233533
| 119
| 0.548584
| 1,086
| 7,554
| 3.626151
| 0.127072
| 0.120874
| 0.115795
| 0.121889
| 0.748095
| 0.747842
| 0.74454
| 0.744286
| 0.742001
| 0.72321
| 0
| 0.03649
| 0.20188
| 7,554
| 166
| 120
| 45.506024
| 0.616686
| 0.095181
| 0
| 0.601695
| 0
| 0
| 0.245452
| 0
| 0
| 0
| 0
| 0
| 0.025424
| 1
| 0.025424
| false
| 0.008475
| 0.042373
| 0
| 0.076271
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13678837c4e767e0ef912669c109f4e31498dc5e
| 103
|
py
|
Python
|
firestore_collections/client.py
|
LasseRegin/firestore-collections
|
9a93f171f665c74078e36363e8fa3493e31e8f88
|
[
"MIT"
] | 2
|
2020-12-02T18:11:43.000Z
|
2020-12-03T08:19:02.000Z
|
firestore_collections/client.py
|
LasseRegin/firestore-collections
|
9a93f171f665c74078e36363e8fa3493e31e8f88
|
[
"MIT"
] | null | null | null |
firestore_collections/client.py
|
LasseRegin/firestore-collections
|
9a93f171f665c74078e36363e8fa3493e31e8f88
|
[
"MIT"
] | null | null | null |
import os
from google.cloud.firestore import Client
client = Client(project=os.getenv('PROJECT_ID'))
| 17.166667
| 48
| 0.786408
| 15
| 103
| 5.333333
| 0.666667
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106796
| 103
| 5
| 49
| 20.6
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0.097087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
136f2fb3a15e5fcbaa9071150864ead93711f3e2
| 148
|
py
|
Python
|
my_education/urls.py
|
Williano/CV
|
b2954ac1753d7f31461c59cd7fe24ea13405cddf
|
[
"MIT"
] | null | null | null |
my_education/urls.py
|
Williano/CV
|
b2954ac1753d7f31461c59cd7fe24ea13405cddf
|
[
"MIT"
] | 3
|
2020-02-11T21:48:34.000Z
|
2021-06-10T18:38:09.000Z
|
my_education/urls.py
|
Williano/CV
|
b2954ac1753d7f31461c59cd7fe24ea13405cddf
|
[
"MIT"
] | 1
|
2018-08-06T06:57:16.000Z
|
2018-08-06T06:57:16.000Z
|
from django.urls import path
from . import views
app_name = 'my_education'
urlpatterns = [
path('', views.my_education, name='my_education')
]
| 18.5
| 53
| 0.722973
| 20
| 148
| 5.15
| 0.55
| 0.320388
| 0.291262
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155405
| 148
| 8
| 54
| 18.5
| 0.824
| 0
| 0
| 0
| 0
| 0
| 0.161074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1380a42abeaa93815e95399ed518e4ec43793750
| 181
|
py
|
Python
|
tests/test_paper_view.py
|
refraction-ray/myarxiv-app
|
c5850ea143915747cfd0b86ea08d77ac85cca943
|
[
"MIT"
] | 2
|
2021-07-21T14:16:44.000Z
|
2021-08-07T14:11:26.000Z
|
tests/test_paper_view.py
|
refraction-ray/myarxiv-app
|
c5850ea143915747cfd0b86ea08d77ac85cca943
|
[
"MIT"
] | null | null | null |
tests/test_paper_view.py
|
refraction-ray/myarxiv-app
|
c5850ea143915747cfd0b86ea08d77ac85cca943
|
[
"MIT"
] | null | null | null |
def test_index(client):
r = client.get("/")
assert r.status_code == 200
def test_paper_page(client):
r = client.get("/paper/1812.35598")
assert r.status_code == 200
| 25.857143
| 39
| 0.657459
| 28
| 181
| 4.071429
| 0.5
| 0.122807
| 0.22807
| 0.280702
| 0.350877
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 0.187845
| 181
| 7
| 40
| 25.857143
| 0.673469
| 0
| 0
| 0.333333
| 0
| 0
| 0.098901
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13a404deddefde695997acff7817a6ac88b9e8b3
| 51
|
py
|
Python
|
terrascript/cloudscale/d.py
|
amlodzianowski/python-terrascript
|
1111affe6cd30d9b8b7bc74ae4e27590f7d4dc49
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/cloudscale/d.py
|
amlodzianowski/python-terrascript
|
1111affe6cd30d9b8b7bc74ae4e27590f7d4dc49
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/cloudscale/d.py
|
amlodzianowski/python-terrascript
|
1111affe6cd30d9b8b7bc74ae4e27590f7d4dc49
|
[
"BSD-2-Clause"
] | null | null | null |
# terrascript/cloudscale/d.py
import terrascript
| 12.75
| 30
| 0.803922
| 6
| 51
| 6.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 51
| 3
| 31
| 17
| 0.911111
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
13b774d8773441547bf6571130cebcd2854b6dc9
| 44
|
py
|
Python
|
notebooks/_solutions/pandas_04_time_series_data9.py
|
jonasvdd/DS-python-data-analysis
|
835226f562ee0b0631d70e48a17c4526ff58a538
|
[
"BSD-3-Clause"
] | 65
|
2017-03-21T09:15:40.000Z
|
2022-02-01T23:43:08.000Z
|
notebooks/_solutions/pandas_04_time_series_data9.py
|
jonasvdd/DS-python-data-analysis
|
835226f562ee0b0631d70e48a17c4526ff58a538
|
[
"BSD-3-Clause"
] | 100
|
2016-12-15T03:44:06.000Z
|
2022-03-07T08:14:07.000Z
|
notebooks/_solutions/pandas_04_time_series_data9.py
|
jorisvandenbossche/ICES-python-data
|
63864947657f37cb26cb4e2dcd67ff106dffe9cd
|
[
"BSD-3-Clause"
] | 52
|
2016-12-19T07:48:52.000Z
|
2022-02-19T17:53:48.000Z
|
data['2013':'2013'].mean().plot(kind='barh')
| 44
| 44
| 0.636364
| 7
| 44
| 4
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 0
| 44
| 1
| 44
| 44
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13df6e33d1e722587140b9671351f9297ebbbafd
| 162
|
py
|
Python
|
flask_oauthlib/contrib/client/signals.py
|
PCMan/flask-oauthlib
|
3735210211ac0e50c4d32b887bbd61722dd175c7
|
[
"BSD-3-Clause"
] | 1,292
|
2015-01-04T03:20:35.000Z
|
2022-03-23T11:08:15.000Z
|
flask_oauthlib/contrib/client/signals.py
|
PCMan/flask-oauthlib
|
3735210211ac0e50c4d32b887bbd61722dd175c7
|
[
"BSD-3-Clause"
] | 217
|
2015-01-05T09:51:41.000Z
|
2020-09-05T04:41:52.000Z
|
flask_oauthlib/contrib/client/signals.py
|
PCMan/flask-oauthlib
|
3735210211ac0e50c4d32b887bbd61722dd175c7
|
[
"BSD-3-Clause"
] | 496
|
2015-01-04T03:20:35.000Z
|
2022-03-19T08:31:42.000Z
|
from flask.signals import Namespace
__all__ = ['request_token_fetched']
_signals = Namespace()
request_token_fetched = _signals.signal('request-token-fetched')
| 23.142857
| 64
| 0.802469
| 19
| 162
| 6.315789
| 0.526316
| 0.3
| 0.475
| 0.433333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092593
| 162
| 6
| 65
| 27
| 0.816327
| 0
| 0
| 0
| 0
| 0
| 0.259259
| 0.259259
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13e63fa492e9906b2dcaebabfe9efc2f8fac1942
| 229
|
py
|
Python
|
example/sample/forms.py
|
ebspeter/django-editorjs-field
|
87016b6b14dc960b5e89bfce6fb3219e1e955e68
|
[
"Apache-2.0"
] | 10
|
2019-06-08T21:20:05.000Z
|
2021-02-22T12:42:58.000Z
|
example/sample/forms.py
|
ebspeter/django-editorjs-field
|
87016b6b14dc960b5e89bfce6fb3219e1e955e68
|
[
"Apache-2.0"
] | 8
|
2019-12-04T23:02:43.000Z
|
2022-02-10T08:19:24.000Z
|
example/sample/forms.py
|
ebspeter/django-editorjs-field
|
87016b6b14dc960b5e89bfce6fb3219e1e955e68
|
[
"Apache-2.0"
] | 7
|
2020-02-08T17:52:35.000Z
|
2020-07-31T20:59:25.000Z
|
from django import forms
from editorjs_field.widgets import EditorJsWidget
class ArticleEditorForm(forms.Form):
title = forms.CharField(label='Title')
document = forms.CharField(label='Document', widget=EditorJsWidget)
| 28.625
| 71
| 0.790393
| 26
| 229
| 6.923077
| 0.615385
| 0.155556
| 0.211111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117904
| 229
| 7
| 72
| 32.714286
| 0.891089
| 0
| 0
| 0
| 0
| 0
| 0.056769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b9373210f973ac710bf99d9222271c161d3c16f4
| 99,268
|
py
|
Python
|
tests/compute/test_transform.py
|
ayasar70/dgl
|
2f45f4d2a94fad094c4dd388507b0e787c77062f
|
[
"Apache-2.0"
] | null | null | null |
tests/compute/test_transform.py
|
ayasar70/dgl
|
2f45f4d2a94fad094c4dd388507b0e787c77062f
|
[
"Apache-2.0"
] | null | null | null |
tests/compute/test_transform.py
|
ayasar70/dgl
|
2f45f4d2a94fad094c4dd388507b0e787c77062f
|
[
"Apache-2.0"
] | null | null | null |
##
# Copyright 2019-2021 Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from scipy import sparse as spsp
import networkx as nx
import numpy as np
import os
import dgl
import dgl.function as fn
import dgl.partition
import backend as F
import unittest
import math
from utils import parametrize_dtype
from test_heterograph import create_test_heterograph3, create_test_heterograph4, create_test_heterograph5
D = 5
# line graph related
def test_line_graph1():
N = 5
G = dgl.DGLGraph(nx.star_graph(N)).to(F.ctx())
G.edata['h'] = F.randn((2 * N, D))
L = G.line_graph(shared=True)
assert L.number_of_nodes() == 2 * N
assert F.allclose(L.ndata['h'], G.edata['h'])
assert G.device == F.ctx()
@parametrize_dtype
def test_line_graph2(idtype):
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1, 1, 2, 2],[2, 0, 2, 0, 1])
}, idtype=idtype)
lg = dgl.line_graph(g)
assert lg.number_of_nodes() == 5
assert lg.number_of_edges() == 8
row, col = lg.edges()
assert np.array_equal(F.asnumpy(row),
np.array([0, 0, 1, 2, 2, 3, 4, 4]))
assert np.array_equal(F.asnumpy(col),
np.array([3, 4, 0, 3, 4, 0, 1, 2]))
lg = dgl.line_graph(g, backtracking=False)
assert lg.number_of_nodes() == 5
assert lg.number_of_edges() == 4
row, col = lg.edges()
assert np.array_equal(F.asnumpy(row),
np.array([0, 1, 2, 4]))
assert np.array_equal(F.asnumpy(col),
np.array([4, 0, 3, 1]))
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1, 1, 2, 2],[2, 0, 2, 0, 1])
}, idtype=idtype).formats('csr')
lg = dgl.line_graph(g)
assert lg.number_of_nodes() == 5
assert lg.number_of_edges() == 8
row, col = lg.edges()
assert np.array_equal(F.asnumpy(row),
np.array([0, 0, 1, 2, 2, 3, 4, 4]))
assert np.array_equal(F.asnumpy(col),
np.array([3, 4, 0, 3, 4, 0, 1, 2]))
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1, 1, 2, 2],[2, 0, 2, 0, 1])
}, idtype=idtype).formats('csc')
lg = dgl.line_graph(g)
assert lg.number_of_nodes() == 5
assert lg.number_of_edges() == 8
row, col, eid = lg.edges('all')
row = F.asnumpy(row)
col = F.asnumpy(col)
eid = F.asnumpy(eid).astype(int)
order = np.argsort(eid)
assert np.array_equal(row[order],
np.array([0, 0, 1, 2, 2, 3, 4, 4]))
assert np.array_equal(col[order],
np.array([3, 4, 0, 3, 4, 0, 1, 2]))
def test_no_backtracking():
N = 5
G = dgl.DGLGraph(nx.star_graph(N))
L = G.line_graph(backtracking=False)
assert L.number_of_nodes() == 2 * N
for i in range(1, N):
e1 = G.edge_id(0, i)
e2 = G.edge_id(i, 0)
assert not L.has_edge_between(e1, e2)
assert not L.has_edge_between(e2, e1)
# reverse graph related
@parametrize_dtype
def test_reverse(idtype):
g = dgl.DGLGraph()
g = g.astype(idtype).to(F.ctx())
g.add_nodes(5)
# The graph need not to be completely connected.
g.add_edges([0, 1, 2], [1, 2, 1])
g.ndata['h'] = F.tensor([[0.], [1.], [2.], [3.], [4.]])
g.edata['h'] = F.tensor([[5.], [6.], [7.]])
rg = g.reverse()
assert g.is_multigraph == rg.is_multigraph
assert g.number_of_nodes() == rg.number_of_nodes()
assert g.number_of_edges() == rg.number_of_edges()
assert F.allclose(F.astype(rg.has_edges_between(
[1, 2, 1], [0, 1, 2]), F.float32), F.ones((3,)))
assert g.edge_id(0, 1) == rg.edge_id(1, 0)
assert g.edge_id(1, 2) == rg.edge_id(2, 1)
assert g.edge_id(2, 1) == rg.edge_id(1, 2)
# test dgl.reverse
# test homogeneous graph
g = dgl.graph((F.tensor([0, 1, 2]), F.tensor([1, 2, 0])))
g.ndata['h'] = F.tensor([[0.], [1.], [2.]])
g.edata['h'] = F.tensor([[3.], [4.], [5.]])
g_r = dgl.reverse(g)
assert g.number_of_nodes() == g_r.number_of_nodes()
assert g.number_of_edges() == g_r.number_of_edges()
u_g, v_g, eids_g = g.all_edges(form='all')
u_rg, v_rg, eids_rg = g_r.all_edges(form='all')
assert F.array_equal(u_g, v_rg)
assert F.array_equal(v_g, u_rg)
assert F.array_equal(eids_g, eids_rg)
assert F.array_equal(g.ndata['h'], g_r.ndata['h'])
assert len(g_r.edata) == 0
# without share ndata
g_r = dgl.reverse(g, copy_ndata=False)
assert g.number_of_nodes() == g_r.number_of_nodes()
assert g.number_of_edges() == g_r.number_of_edges()
assert len(g_r.ndata) == 0
assert len(g_r.edata) == 0
# with share ndata and edata
g_r = dgl.reverse(g, copy_ndata=True, copy_edata=True)
assert g.number_of_nodes() == g_r.number_of_nodes()
assert g.number_of_edges() == g_r.number_of_edges()
assert F.array_equal(g.ndata['h'], g_r.ndata['h'])
assert F.array_equal(g.edata['h'], g_r.edata['h'])
# add new node feature to g_r
g_r.ndata['hh'] = F.tensor([0, 1, 2])
assert ('hh' in g.ndata) is False
assert ('hh' in g_r.ndata) is True
# add new edge feature to g_r
g_r.edata['hh'] = F.tensor([0, 1, 2])
assert ('hh' in g.edata) is False
assert ('hh' in g_r.edata) is True
# test heterogeneous graph
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1, 2, 4, 3 ,1, 3], [1, 2, 3, 2, 0, 0, 1]),
('user', 'plays', 'game'): ([0, 0, 2, 3, 3, 4, 1], [1, 0, 1, 0, 1, 0, 0]),
('developer', 'develops', 'game'): ([0, 1, 1, 2], [0, 0, 1, 1])},
idtype=idtype, device=F.ctx())
g.nodes['user'].data['h'] = F.tensor([0, 1, 2, 3, 4])
g.nodes['user'].data['hh'] = F.tensor([1, 1, 1, 1, 1])
g.nodes['game'].data['h'] = F.tensor([0, 1])
g.edges['follows'].data['h'] = F.tensor([0, 1, 2, 4, 3 ,1, 3])
g.edges['follows'].data['hh'] = F.tensor([1, 2, 3, 2, 0, 0, 1])
g_r = dgl.reverse(g)
for etype_g, etype_gr in zip(g.canonical_etypes, g_r.canonical_etypes):
assert etype_g[0] == etype_gr[2]
assert etype_g[1] == etype_gr[1]
assert etype_g[2] == etype_gr[0]
assert g.number_of_edges(etype_g) == g_r.number_of_edges(etype_gr)
for ntype in g.ntypes:
assert g.number_of_nodes(ntype) == g_r.number_of_nodes(ntype)
assert F.array_equal(g.nodes['user'].data['h'], g_r.nodes['user'].data['h'])
assert F.array_equal(g.nodes['user'].data['hh'], g_r.nodes['user'].data['hh'])
assert F.array_equal(g.nodes['game'].data['h'], g_r.nodes['game'].data['h'])
assert len(g_r.edges['follows'].data) == 0
u_g, v_g, eids_g = g.all_edges(form='all', etype=('user', 'follows', 'user'))
u_rg, v_rg, eids_rg = g_r.all_edges(form='all', etype=('user', 'follows', 'user'))
assert F.array_equal(u_g, v_rg)
assert F.array_equal(v_g, u_rg)
assert F.array_equal(eids_g, eids_rg)
u_g, v_g, eids_g = g.all_edges(form='all', etype=('user', 'plays', 'game'))
u_rg, v_rg, eids_rg = g_r.all_edges(form='all', etype=('game', 'plays', 'user'))
assert F.array_equal(u_g, v_rg)
assert F.array_equal(v_g, u_rg)
assert F.array_equal(eids_g, eids_rg)
u_g, v_g, eids_g = g.all_edges(form='all', etype=('developer', 'develops', 'game'))
u_rg, v_rg, eids_rg = g_r.all_edges(form='all', etype=('game', 'develops', 'developer'))
assert F.array_equal(u_g, v_rg)
assert F.array_equal(v_g, u_rg)
assert F.array_equal(eids_g, eids_rg)
# withour share ndata
g_r = dgl.reverse(g, copy_ndata=False)
for etype_g, etype_gr in zip(g.canonical_etypes, g_r.canonical_etypes):
assert etype_g[0] == etype_gr[2]
assert etype_g[1] == etype_gr[1]
assert etype_g[2] == etype_gr[0]
assert g.number_of_edges(etype_g) == g_r.number_of_edges(etype_gr)
for ntype in g.ntypes:
assert g.number_of_nodes(ntype) == g_r.number_of_nodes(ntype)
assert len(g_r.nodes['user'].data) == 0
assert len(g_r.nodes['game'].data) == 0
g_r = dgl.reverse(g, copy_ndata=True, copy_edata=True)
print(g_r)
for etype_g, etype_gr in zip(g.canonical_etypes, g_r.canonical_etypes):
assert etype_g[0] == etype_gr[2]
assert etype_g[1] == etype_gr[1]
assert etype_g[2] == etype_gr[0]
assert g.number_of_edges(etype_g) == g_r.number_of_edges(etype_gr)
assert F.array_equal(g.edges['follows'].data['h'], g_r.edges['follows'].data['h'])
assert F.array_equal(g.edges['follows'].data['hh'], g_r.edges['follows'].data['hh'])
# add new node feature to g_r
g_r.nodes['user'].data['hhh'] = F.tensor([0, 1, 2, 3, 4])
assert ('hhh' in g.nodes['user'].data) is False
assert ('hhh' in g_r.nodes['user'].data) is True
# add new edge feature to g_r
g_r.edges['follows'].data['hhh'] = F.tensor([1, 2, 3, 2, 0, 0, 1])
assert ('hhh' in g.edges['follows'].data) is False
assert ('hhh' in g_r.edges['follows'].data) is True
@parametrize_dtype
def test_reverse_shared_frames(idtype):
g = dgl.DGLGraph()
g = g.astype(idtype).to(F.ctx())
g.add_nodes(3)
g.add_edges([0, 1, 2], [1, 2, 1])
g.ndata['h'] = F.tensor([[0.], [1.], [2.]])
g.edata['h'] = F.tensor([[3.], [4.], [5.]])
rg = g.reverse(share_ndata=True, share_edata=True)
assert F.allclose(g.ndata['h'], rg.ndata['h'])
assert F.allclose(g.edata['h'], rg.edata['h'])
assert F.allclose(g.edges[[0, 2], [1, 1]].data['h'],
rg.edges[[1, 1], [0, 2]].data['h'])
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
def test_to_bidirected():
# homogeneous graph
elist = [(0, 0), (0, 1), (1, 0),
(1, 1), (2, 1), (2, 2)]
num_edges = 7
g = dgl.graph(tuple(zip(*elist)))
elist.append((1, 2))
elist = set(elist)
big = dgl.to_bidirected(g)
assert big.number_of_edges() == num_edges
src, dst = big.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == set(elist)
# heterogeneous graph
elist1 = [(0, 0), (0, 1), (1, 0),
(1, 1), (2, 1), (2, 2)]
elist2 = [(0, 0), (0, 1)]
g = dgl.heterograph({
('user', 'wins', 'user'): tuple(zip(*elist1)),
('user', 'follows', 'user'): tuple(zip(*elist2))
})
g.nodes['user'].data['h'] = F.ones((3, 1))
elist1.append((1, 2))
elist1 = set(elist1)
elist2.append((1, 0))
elist2 = set(elist2)
big = dgl.to_bidirected(g)
assert big.number_of_edges('wins') == 7
assert big.number_of_edges('follows') == 3
src, dst = big.edges(etype='wins')
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == set(elist1)
src, dst = big.edges(etype='follows')
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == set(elist2)
big = dgl.to_bidirected(g, copy_ndata=True)
assert F.array_equal(g.nodes['user'].data['h'], big.nodes['user'].data['h'])
def test_add_reverse_edges():
# homogeneous graph
g = dgl.graph((F.tensor([0, 1, 3, 1]), F.tensor([1, 2, 0, 2])))
g.ndata['h'] = F.tensor([[0.], [1.], [2.], [1.]])
g.edata['h'] = F.tensor([[3.], [4.], [5.], [6.]])
bg = dgl.add_reverse_edges(g, copy_ndata=True, copy_edata=True)
u, v = g.edges()
ub, vb = bg.edges()
assert F.array_equal(F.cat([u, v], dim=0), ub)
assert F.array_equal(F.cat([v, u], dim=0), vb)
assert F.array_equal(g.ndata['h'], bg.ndata['h'])
assert F.array_equal(F.cat([g.edata['h'], g.edata['h']], dim=0), bg.edata['h'])
bg.ndata['hh'] = F.tensor([[0.], [1.], [2.], [1.]])
assert ('hh' in g.ndata) is False
bg.edata['hh'] = F.tensor([[0.], [1.], [2.], [1.], [0.], [1.], [2.], [1.]])
assert ('hh' in g.edata) is False
# donot share ndata and edata
bg = dgl.add_reverse_edges(g, copy_ndata=False, copy_edata=False)
ub, vb = bg.edges()
assert F.array_equal(F.cat([u, v], dim=0), ub)
assert F.array_equal(F.cat([v, u], dim=0), vb)
assert ('h' in bg.ndata) is False
assert ('h' in bg.edata) is False
# zero edge graph
g = dgl.graph(([], []))
bg = dgl.add_reverse_edges(g, copy_ndata=True, copy_edata=True, exclude_self=False)
# heterogeneous graph
g = dgl.heterograph({
('user', 'wins', 'user'): (F.tensor([0, 2, 0, 2, 2]), F.tensor([1, 1, 2, 1, 0])),
('user', 'plays', 'game'): (F.tensor([1, 2, 1]), F.tensor([2, 1, 1])),
('user', 'follows', 'user'): (F.tensor([1, 2, 1]), F.tensor([0, 0, 0]))
})
g.nodes['game'].data['hv'] = F.ones((3, 1))
g.nodes['user'].data['hv'] = F.ones((3, 1))
g.edges['wins'].data['h'] = F.tensor([0, 1, 2, 3, 4])
bg = dgl.add_reverse_edges(g, copy_ndata=True, copy_edata=True, ignore_bipartite=True)
assert F.array_equal(g.nodes['game'].data['hv'], bg.nodes['game'].data['hv'])
assert F.array_equal(g.nodes['user'].data['hv'], bg.nodes['user'].data['hv'])
u, v = g.all_edges(order='eid', etype=('user', 'wins', 'user'))
ub, vb = bg.all_edges(order='eid', etype=('user', 'wins', 'user'))
assert F.array_equal(F.cat([u, v], dim=0), ub)
assert F.array_equal(F.cat([v, u], dim=0), vb)
assert F.array_equal(F.cat([g.edges['wins'].data['h'], g.edges['wins'].data['h']], dim=0),
bg.edges['wins'].data['h'])
u, v = g.all_edges(order='eid', etype=('user', 'follows', 'user'))
ub, vb = bg.all_edges(order='eid', etype=('user', 'follows', 'user'))
assert F.array_equal(F.cat([u, v], dim=0), ub)
assert F.array_equal(F.cat([v, u], dim=0), vb)
u, v = g.all_edges(order='eid', etype=('user', 'plays', 'game'))
ub, vb = bg.all_edges(order='eid', etype=('user', 'plays', 'game'))
assert F.array_equal(u, ub)
assert F.array_equal(v, vb)
assert set(bg.edges['plays'].data.keys()) == {dgl.EID}
assert set(bg.edges['follows'].data.keys()) == {dgl.EID}
# donot share ndata and edata
bg = dgl.add_reverse_edges(g, copy_ndata=False, copy_edata=False, ignore_bipartite=True)
assert len(bg.edges['wins'].data) == 0
assert len(bg.edges['plays'].data) == 0
assert len(bg.edges['follows'].data) == 0
assert len(bg.nodes['game'].data) == 0
assert len(bg.nodes['user'].data) == 0
u, v = g.all_edges(order='eid', etype=('user', 'wins', 'user'))
ub, vb = bg.all_edges(order='eid', etype=('user', 'wins', 'user'))
assert F.array_equal(F.cat([u, v], dim=0), ub)
assert F.array_equal(F.cat([v, u], dim=0), vb)
u, v = g.all_edges(order='eid', etype=('user', 'follows', 'user'))
ub, vb = bg.all_edges(order='eid', etype=('user', 'follows', 'user'))
assert F.array_equal(F.cat([u, v], dim=0), ub)
assert F.array_equal(F.cat([v, u], dim=0), vb)
u, v = g.all_edges(order='eid', etype=('user', 'plays', 'game'))
ub, vb = bg.all_edges(order='eid', etype=('user', 'plays', 'game'))
assert F.array_equal(u, ub)
assert F.array_equal(v, vb)
# test the case when some nodes have zero degree
# homogeneous graph
g = dgl.graph((F.tensor([0, 1, 3, 1]), F.tensor([1, 2, 0, 2])), num_nodes=6)
g.ndata['h'] = F.tensor([[0.], [1.], [2.], [1.], [1.], [1.]])
g.edata['h'] = F.tensor([[3.], [4.], [5.], [6.]])
bg = dgl.add_reverse_edges(g, copy_ndata=True, copy_edata=True)
assert g.number_of_nodes() == bg.number_of_nodes()
assert F.array_equal(g.ndata['h'], bg.ndata['h'])
assert F.array_equal(F.cat([g.edata['h'], g.edata['h']], dim=0), bg.edata['h'])
# heterogeneous graph
g = dgl.heterograph({
('user', 'wins', 'user'): (F.tensor([0, 2, 0, 2, 2]), F.tensor([1, 1, 2, 1, 0])),
('user', 'plays', 'game'): (F.tensor([1, 2, 1]), F.tensor([2, 1, 1])),
('user', 'follows', 'user'): (F.tensor([1, 2, 1]), F.tensor([0, 0, 0]))},
num_nodes_dict={
'user': 5,
'game': 3
})
g.nodes['game'].data['hv'] = F.ones((3, 1))
g.nodes['user'].data['hv'] = F.ones((5, 1))
g.edges['wins'].data['h'] = F.tensor([0, 1, 2, 3, 4])
bg = dgl.add_reverse_edges(g, copy_ndata=True, copy_edata=True, ignore_bipartite=True)
assert g.number_of_nodes('user') == bg.number_of_nodes('user')
assert g.number_of_nodes('game') == bg.number_of_nodes('game')
assert F.array_equal(g.nodes['game'].data['hv'], bg.nodes['game'].data['hv'])
assert F.array_equal(g.nodes['user'].data['hv'], bg.nodes['user'].data['hv'])
assert F.array_equal(F.cat([g.edges['wins'].data['h'], g.edges['wins'].data['h']], dim=0),
bg.edges['wins'].data['h'])
# test exclude_self
g = dgl.heterograph({
('A', 'r1', 'A'): (F.tensor([0, 0, 1, 1]), F.tensor([0, 1, 1, 2])),
('A', 'r2', 'A'): (F.tensor([0, 1]), F.tensor([1, 2]))
})
g.edges['r1'].data['h'] = F.tensor([0, 1, 2, 3])
rg = dgl.add_reverse_edges(g, copy_edata=True, exclude_self=True)
assert rg.num_edges('r1') == 6
assert rg.num_edges('r2') == 4
assert F.array_equal(rg.edges['r1'].data['h'], F.tensor([0, 1, 2, 3, 1, 3]))
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
def test_simple_graph():
elist = [(0, 1), (0, 2), (1, 2), (0, 1)]
g = dgl.DGLGraph(elist, readonly=True)
assert g.is_multigraph
sg = dgl.to_simple_graph(g)
assert not sg.is_multigraph
assert sg.number_of_edges() == 3
src, dst = sg.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == set(elist)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
def _test_bidirected_graph():
def _test(in_readonly, out_readonly):
elist = [(0, 0), (0, 1), (1, 0),
(1, 1), (2, 1), (2, 2)]
num_edges = 7
g = dgl.DGLGraph(elist, readonly=in_readonly)
elist.append((1, 2))
elist = set(elist)
big = dgl.to_bidirected_stale(g, out_readonly)
assert big.number_of_edges() == num_edges
src, dst = big.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == set(elist)
_test(True, True)
_test(True, False)
_test(False, True)
_test(False, False)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
def test_khop_graph():
N = 20
feat = F.randn((N, 5))
def _test(g):
for k in range(4):
g_k = dgl.khop_graph(g, k)
# use original graph to do message passing for k times.
g.ndata['h'] = feat
for _ in range(k):
g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
h_0 = g.ndata.pop('h')
# use k-hop graph to do message passing for one time.
g_k.ndata['h'] = feat
g_k.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
h_1 = g_k.ndata.pop('h')
assert F.allclose(h_0, h_1, rtol=1e-3, atol=1e-3)
# Test for random undirected graphs
g = dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3))
_test(g)
# Test for random directed graphs
g = dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3, directed=True))
_test(g)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
def test_khop_adj():
N = 20
feat = F.randn((N, 5))
g = dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3))
for k in range(3):
adj = F.tensor(F.swapaxes(dgl.khop_adj(g, k), 0, 1))
# use original graph to do message passing for k times.
g.ndata['h'] = feat
for _ in range(k):
g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
h_0 = g.ndata.pop('h')
# use k-hop adj to do message passing for one time.
h_1 = F.matmul(adj, feat)
assert F.allclose(h_0, h_1, rtol=1e-3, atol=1e-3)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
def test_laplacian_lambda_max():
N = 20
eps = 1e-6
# test DGLGraph
g = dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3))
l_max = dgl.laplacian_lambda_max(g)
assert (l_max[0] < 2 + eps)
# test batched DGLGraph
'''
N_arr = [20, 30, 10, 12]
bg = dgl.batch([
dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3))
for N in N_arr
])
l_max_arr = dgl.laplacian_lambda_max(bg)
assert len(l_max_arr) == len(N_arr)
for l_max in l_max_arr:
assert l_max < 2 + eps
'''
def create_large_graph(num_nodes, idtype=F.int64):
row = np.random.choice(num_nodes, num_nodes * 10)
col = np.random.choice(num_nodes, num_nodes * 10)
spm = spsp.coo_matrix((np.ones(len(row)), (row, col)))
spm.sum_duplicates()
return dgl.from_scipy(spm, idtype=idtype)
def get_nodeflow(g, node_ids, num_layers):
batch_size = len(node_ids)
expand_factor = g.number_of_nodes()
sampler = dgl.contrib.sampling.NeighborSampler(g, batch_size,
expand_factor=expand_factor, num_hops=num_layers,
seed_nodes=node_ids)
return next(iter(sampler))
# Disabled since everything will be on heterogeneous graphs
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
def test_partition_with_halo():
g = create_large_graph(1000)
node_part = np.random.choice(4, g.number_of_nodes())
subgs, _, _ = dgl.transforms.partition_graph_with_halo(g, node_part, 2, reshuffle=True)
for part_id, subg in subgs.items():
node_ids = np.nonzero(node_part == part_id)[0]
lnode_ids = np.nonzero(F.asnumpy(subg.ndata['inner_node']))[0]
orig_nids = F.asnumpy(subg.ndata['orig_id'])[lnode_ids]
assert np.all(np.sort(orig_nids) == node_ids)
assert np.all(F.asnumpy(subg.in_degrees(lnode_ids)) == F.asnumpy(g.in_degrees(orig_nids)))
assert np.all(F.asnumpy(subg.out_degrees(lnode_ids)) == F.asnumpy(g.out_degrees(orig_nids)))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(F._default_context_str == 'gpu', reason="METIS doesn't support GPU")
@parametrize_dtype
def test_metis_partition(idtype):
# TODO(zhengda) Metis fails to partition a small graph.
g = create_large_graph(1000, idtype=idtype)
if idtype == F.int64:
check_metis_partition(g, 0)
check_metis_partition(g, 1)
check_metis_partition(g, 2)
check_metis_partition_with_constraint(g)
else:
assert_fail = False
try:
check_metis_partition(g, 1)
except:
assert_fail = True
assert assert_fail
def check_metis_partition_with_constraint(g):
ntypes = np.zeros((g.number_of_nodes(),), dtype=np.int32)
ntypes[0:int(g.number_of_nodes()/4)] = 1
ntypes[int(g.number_of_nodes()*3/4):] = 2
subgs = dgl.transforms.metis_partition(g, 4, extra_cached_hops=1, balance_ntypes=ntypes)
if subgs is not None:
for i in subgs:
subg = subgs[i]
parent_nids = F.asnumpy(subg.ndata[dgl.NID])
sub_ntypes = ntypes[parent_nids]
print('type0:', np.sum(sub_ntypes == 0))
print('type1:', np.sum(sub_ntypes == 1))
print('type2:', np.sum(sub_ntypes == 2))
subgs = dgl.transforms.metis_partition(g, 4, extra_cached_hops=1,
balance_ntypes=ntypes, balance_edges=True)
if subgs is not None:
for i in subgs:
subg = subgs[i]
parent_nids = F.asnumpy(subg.ndata[dgl.NID])
sub_ntypes = ntypes[parent_nids]
print('type0:', np.sum(sub_ntypes == 0))
print('type1:', np.sum(sub_ntypes == 1))
print('type2:', np.sum(sub_ntypes == 2))
def check_metis_partition(g, extra_hops):
subgs = dgl.transforms.metis_partition(g, 4, extra_cached_hops=extra_hops)
num_inner_nodes = 0
num_inner_edges = 0
if subgs is not None:
for part_id, subg in subgs.items():
lnode_ids = np.nonzero(F.asnumpy(subg.ndata['inner_node']))[0]
ledge_ids = np.nonzero(F.asnumpy(subg.edata['inner_edge']))[0]
num_inner_nodes += len(lnode_ids)
num_inner_edges += len(ledge_ids)
assert np.sum(F.asnumpy(subg.ndata['part_id']) == part_id) == len(lnode_ids)
assert num_inner_nodes == g.number_of_nodes()
print(g.number_of_edges() - num_inner_edges)
if extra_hops == 0:
return
# partitions with node reshuffling
subgs = dgl.transforms.metis_partition(g, 4, extra_cached_hops=extra_hops, reshuffle=True)
num_inner_nodes = 0
num_inner_edges = 0
edge_cnts = np.zeros((g.number_of_edges(),))
if subgs is not None:
for part_id, subg in subgs.items():
lnode_ids = np.nonzero(F.asnumpy(subg.ndata['inner_node']))[0]
ledge_ids = np.nonzero(F.asnumpy(subg.edata['inner_edge']))[0]
num_inner_nodes += len(lnode_ids)
num_inner_edges += len(ledge_ids)
assert np.sum(F.asnumpy(subg.ndata['part_id']) == part_id) == len(lnode_ids)
nids = F.asnumpy(subg.ndata[dgl.NID])
# ensure the local node Ids are contiguous.
parent_ids = F.asnumpy(subg.ndata[dgl.NID])
parent_ids = parent_ids[:len(lnode_ids)]
assert np.all(parent_ids == np.arange(parent_ids[0], parent_ids[-1] + 1))
# count the local edges.
parent_ids = F.asnumpy(subg.edata[dgl.EID])[ledge_ids]
edge_cnts[parent_ids] += 1
orig_ids = subg.ndata['orig_id']
inner_node = F.asnumpy(subg.ndata['inner_node'])
for nid in range(subg.number_of_nodes()):
neighs = subg.predecessors(nid)
old_neighs1 = F.gather_row(orig_ids, neighs)
old_nid = F.asnumpy(orig_ids[nid])
old_neighs2 = g.predecessors(old_nid)
# If this is an inner node, it should have the full neighborhood.
if inner_node[nid]:
assert np.all(np.sort(F.asnumpy(old_neighs1)) == np.sort(F.asnumpy(old_neighs2)))
# Normally, local edges are only counted once.
assert np.all(edge_cnts == 1)
assert num_inner_nodes == g.number_of_nodes()
print(g.number_of_edges() - num_inner_edges)
@unittest.skipIf(F._default_context_str == 'gpu', reason="It doesn't support GPU")
def test_reorder_nodes():
g = create_large_graph(1000)
new_nids = np.random.permutation(g.number_of_nodes())
# TODO(zhengda) we need to test both CSR and COO.
new_g = dgl.partition.reorder_nodes(g, new_nids)
new_in_deg = new_g.in_degrees()
new_out_deg = new_g.out_degrees()
in_deg = g.in_degrees()
out_deg = g.out_degrees()
new_in_deg1 = F.scatter_row(in_deg, F.tensor(new_nids), in_deg)
new_out_deg1 = F.scatter_row(out_deg, F.tensor(new_nids), out_deg)
assert np.all(F.asnumpy(new_in_deg == new_in_deg1))
assert np.all(F.asnumpy(new_out_deg == new_out_deg1))
orig_ids = F.asnumpy(new_g.ndata['orig_id'])
for nid in range(g.number_of_nodes()):
neighs = F.asnumpy(g.successors(nid))
new_neighs1 = new_nids[neighs]
new_nid = new_nids[nid]
new_neighs2 = new_g.successors(new_nid)
assert np.all(np.sort(new_neighs1) == np.sort(F.asnumpy(new_neighs2)))
for nid in range(new_g.number_of_nodes()):
neighs = F.asnumpy(new_g.successors(nid))
old_neighs1 = orig_ids[neighs]
old_nid = orig_ids[nid]
old_neighs2 = g.successors(old_nid)
assert np.all(np.sort(old_neighs1) == np.sort(F.asnumpy(old_neighs2)))
neighs = F.asnumpy(new_g.predecessors(nid))
old_neighs1 = orig_ids[neighs]
old_nid = orig_ids[nid]
old_neighs2 = g.predecessors(old_nid)
assert np.all(np.sort(old_neighs1) == np.sort(F.asnumpy(old_neighs2)))
@parametrize_dtype
def test_compact(idtype):
g1 = dgl.heterograph({
('user', 'follow', 'user'): ([1, 3], [3, 5]),
('user', 'plays', 'game'): ([2, 3, 2], [4, 4, 5]),
('game', 'wished-by', 'user'): ([6, 5], [7, 7])},
{'user': 20, 'game': 10}, idtype=idtype, device=F.ctx())
g2 = dgl.heterograph({
('game', 'clicked-by', 'user'): ([3], [1]),
('user', 'likes', 'user'): ([1, 8], [8, 9])},
{'user': 20, 'game': 10}, idtype=idtype, device=F.ctx())
g3 = dgl.heterograph({('user', '_E', 'user'): ((0, 1), (1, 2))},
{'user': 10}, idtype=idtype, device=F.ctx())
g4 = dgl.heterograph({('user', '_E', 'user'): ((1, 3), (3, 5))},
{'user': 10}, idtype=idtype, device=F.ctx())
def _check(g, new_g, induced_nodes):
assert g.ntypes == new_g.ntypes
assert g.canonical_etypes == new_g.canonical_etypes
for ntype in g.ntypes:
assert -1 not in induced_nodes[ntype]
for etype in g.canonical_etypes:
g_src, g_dst = g.all_edges(order='eid', etype=etype)
g_src = F.asnumpy(g_src)
g_dst = F.asnumpy(g_dst)
new_g_src, new_g_dst = new_g.all_edges(order='eid', etype=etype)
new_g_src_mapped = induced_nodes[etype[0]][F.asnumpy(new_g_src)]
new_g_dst_mapped = induced_nodes[etype[2]][F.asnumpy(new_g_dst)]
assert (g_src == new_g_src_mapped).all()
assert (g_dst == new_g_dst_mapped).all()
# Test default
new_g1 = dgl.compact_graphs(g1)
induced_nodes = {ntype: new_g1.nodes[ntype].data[dgl.NID] for ntype in new_g1.ntypes}
induced_nodes = {k: F.asnumpy(v) for k, v in induced_nodes.items()}
assert new_g1.idtype == idtype
assert set(induced_nodes['user']) == set([1, 3, 5, 2, 7])
assert set(induced_nodes['game']) == set([4, 5, 6])
_check(g1, new_g1, induced_nodes)
# Test with always_preserve given a dict
new_g1 = dgl.compact_graphs(
g1, always_preserve={'game': F.tensor([4, 7], idtype)})
assert new_g1.idtype == idtype
induced_nodes = {ntype: new_g1.nodes[ntype].data[dgl.NID] for ntype in new_g1.ntypes}
induced_nodes = {k: F.asnumpy(v) for k, v in induced_nodes.items()}
assert set(induced_nodes['user']) == set([1, 3, 5, 2, 7])
assert set(induced_nodes['game']) == set([4, 5, 6, 7])
_check(g1, new_g1, induced_nodes)
# Test with always_preserve given a tensor
new_g3 = dgl.compact_graphs(
g3, always_preserve=F.tensor([1, 7], idtype))
induced_nodes = {ntype: new_g3.nodes[ntype].data[dgl.NID] for ntype in new_g3.ntypes}
induced_nodes = {k: F.asnumpy(v) for k, v in induced_nodes.items()}
assert new_g3.idtype == idtype
assert set(induced_nodes['user']) == set([0, 1, 2, 7])
_check(g3, new_g3, induced_nodes)
# Test multiple graphs
new_g1, new_g2 = dgl.compact_graphs([g1, g2])
induced_nodes = {ntype: new_g1.nodes[ntype].data[dgl.NID] for ntype in new_g1.ntypes}
induced_nodes = {k: F.asnumpy(v) for k, v in induced_nodes.items()}
assert new_g1.idtype == idtype
assert new_g2.idtype == idtype
assert set(induced_nodes['user']) == set([1, 3, 5, 2, 7, 8, 9])
assert set(induced_nodes['game']) == set([3, 4, 5, 6])
_check(g1, new_g1, induced_nodes)
_check(g2, new_g2, induced_nodes)
# Test multiple graphs with always_preserve given a dict
new_g1, new_g2 = dgl.compact_graphs(
[g1, g2], always_preserve={'game': F.tensor([4, 7], dtype=idtype)})
induced_nodes = {ntype: new_g1.nodes[ntype].data[dgl.NID] for ntype in new_g1.ntypes}
induced_nodes = {k: F.asnumpy(v) for k, v in induced_nodes.items()}
assert new_g1.idtype == idtype
assert new_g2.idtype == idtype
assert set(induced_nodes['user']) == set([1, 3, 5, 2, 7, 8, 9])
assert set(induced_nodes['game']) == set([3, 4, 5, 6, 7])
_check(g1, new_g1, induced_nodes)
_check(g2, new_g2, induced_nodes)
# Test multiple graphs with always_preserve given a tensor
new_g3, new_g4 = dgl.compact_graphs(
[g3, g4], always_preserve=F.tensor([1, 7], dtype=idtype))
induced_nodes = {ntype: new_g3.nodes[ntype].data[dgl.NID] for ntype in new_g3.ntypes}
induced_nodes = {k: F.asnumpy(v) for k, v in induced_nodes.items()}
assert new_g3.idtype == idtype
assert new_g4.idtype == idtype
assert set(induced_nodes['user']) == set([0, 1, 2, 3, 5, 7])
_check(g3, new_g3, induced_nodes)
_check(g4, new_g4, induced_nodes)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU to simple not implemented")
@parametrize_dtype
def test_to_simple(idtype):
# homogeneous graph
g = dgl.graph((F.tensor([0, 1, 2, 1]), F.tensor([1, 2, 0, 2])))
g.ndata['h'] = F.tensor([[0.], [1.], [2.]])
g.edata['h'] = F.tensor([[3.], [4.], [5.], [6.]])
sg, wb = dgl.to_simple(g, writeback_mapping=True)
u, v = g.all_edges(form='uv', order='eid')
u = F.asnumpy(u).tolist()
v = F.asnumpy(v).tolist()
uv = list(zip(u, v))
eid_map = F.asnumpy(wb)
su, sv = sg.all_edges(form='uv', order='eid')
su = F.asnumpy(su).tolist()
sv = F.asnumpy(sv).tolist()
suv = list(zip(su, sv))
sc = F.asnumpy(sg.edata['count'])
assert set(uv) == set(suv)
for i, e in enumerate(suv):
assert sc[i] == sum(e == _e for _e in uv)
for i, e in enumerate(uv):
assert eid_map[i] == suv.index(e)
# shared ndata
assert F.array_equal(sg.ndata['h'], g.ndata['h'])
assert 'h' not in sg.edata
# new ndata to sg
sg.ndata['hh'] = F.tensor([[0.], [1.], [2.]])
assert 'hh' not in g.ndata
sg = dgl.to_simple(g, writeback_mapping=False, copy_ndata=False)
assert 'h' not in sg.ndata
assert 'h' not in sg.edata
# test coalesce edge feature
sg = dgl.to_simple(g, copy_edata=True, aggregator='arbitrary')
assert F.allclose(sg.edata['h'][1], F.tensor([4.]))
sg = dgl.to_simple(g, copy_edata=True, aggregator='sum')
assert F.allclose(sg.edata['h'][1], F.tensor([10.]))
sg = dgl.to_simple(g, copy_edata=True, aggregator='mean')
assert F.allclose(sg.edata['h'][1], F.tensor([5.]))
# heterogeneous graph
g = dgl.heterograph({
('user', 'follow', 'user'): ([0, 1, 2, 1, 1, 1],
[1, 3, 2, 3, 4, 4]),
('user', 'plays', 'game'): ([3, 2, 1, 1, 3, 2, 2], [5, 3, 4, 4, 5, 3, 3])},
idtype=idtype, device=F.ctx())
g.nodes['user'].data['h'] = F.tensor([0, 1, 2, 3, 4])
g.nodes['user'].data['hh'] = F.tensor([0, 1, 2, 3, 4])
g.edges['follow'].data['h'] = F.tensor([0, 1, 2, 3, 4, 5])
sg, wb = dgl.to_simple(g, return_counts='weights', writeback_mapping=True, copy_edata=True)
g.nodes['game'].data['h'] = F.tensor([0, 1, 2, 3, 4, 5])
for etype in g.canonical_etypes:
u, v = g.all_edges(form='uv', order='eid', etype=etype)
u = F.asnumpy(u).tolist()
v = F.asnumpy(v).tolist()
uv = list(zip(u, v))
eid_map = F.asnumpy(wb[etype])
su, sv = sg.all_edges(form='uv', order='eid', etype=etype)
su = F.asnumpy(su).tolist()
sv = F.asnumpy(sv).tolist()
suv = list(zip(su, sv))
sw = F.asnumpy(sg.edges[etype].data['weights'])
assert set(uv) == set(suv)
for i, e in enumerate(suv):
assert sw[i] == sum(e == _e for _e in uv)
for i, e in enumerate(uv):
assert eid_map[i] == suv.index(e)
# shared ndata
assert F.array_equal(sg.nodes['user'].data['h'], g.nodes['user'].data['h'])
assert F.array_equal(sg.nodes['user'].data['hh'], g.nodes['user'].data['hh'])
assert 'h' not in sg.nodes['game'].data
# new ndata to sg
sg.nodes['user'].data['hhh'] = F.tensor([0, 1, 2, 3, 4])
assert 'hhh' not in g.nodes['user'].data
# share edata
feat_idx = F.asnumpy(wb[('user', 'follow', 'user')])
_, indices = np.unique(feat_idx, return_index=True)
assert np.array_equal(F.asnumpy(sg.edges['follow'].data['h']),
F.asnumpy(g.edges['follow'].data['h'])[indices])
sg = dgl.to_simple(g, writeback_mapping=False, copy_ndata=False)
for ntype in g.ntypes:
assert g.number_of_nodes(ntype) == sg.number_of_nodes(ntype)
assert 'h' not in sg.nodes['user'].data
assert 'hh' not in sg.nodes['user'].data
# verify DGLGraph.edge_ids() after dgl.to_simple()
# in case ids are not initialized in underlying coo2csr()
u = F.tensor([0, 1, 2])
v = F.tensor([1, 2, 3])
eids = F.tensor([0, 1, 2])
g = dgl.graph((u, v))
assert F.array_equal(g.edge_ids(u, v), eids)
sg = dgl.to_simple(g)
assert F.array_equal(sg.edge_ids(u, v), eids)
@parametrize_dtype
def test_to_block(idtype):
def check(g, bg, ntype, etype, dst_nodes, include_dst_in_src=True):
if dst_nodes is not None:
assert F.array_equal(bg.dstnodes[ntype].data[dgl.NID], dst_nodes)
n_dst_nodes = bg.number_of_nodes('DST/' + ntype)
if include_dst_in_src:
assert F.array_equal(
bg.srcnodes[ntype].data[dgl.NID][:n_dst_nodes],
bg.dstnodes[ntype].data[dgl.NID])
g = g[etype]
bg = bg[etype]
induced_src = bg.srcdata[dgl.NID]
induced_dst = bg.dstdata[dgl.NID]
induced_eid = bg.edata[dgl.EID]
bg_src, bg_dst = bg.all_edges(order='eid')
src_ans, dst_ans = g.all_edges(order='eid')
induced_src_bg = F.gather_row(induced_src, bg_src)
induced_dst_bg = F.gather_row(induced_dst, bg_dst)
induced_src_ans = F.gather_row(src_ans, induced_eid)
induced_dst_ans = F.gather_row(dst_ans, induced_eid)
assert F.array_equal(induced_src_bg, induced_src_ans)
assert F.array_equal(induced_dst_bg, induced_dst_ans)
def checkall(g, bg, dst_nodes, include_dst_in_src=True):
for etype in g.etypes:
ntype = g.to_canonical_etype(etype)[2]
if dst_nodes is not None and ntype in dst_nodes:
check(g, bg, ntype, etype, dst_nodes[ntype], include_dst_in_src)
else:
check(g, bg, ntype, etype, None, include_dst_in_src)
g = dgl.heterograph({
('A', 'AA', 'A'): ([0, 2, 1, 3], [1, 3, 2, 4]),
('A', 'AB', 'B'): ([0, 1, 3, 1], [1, 3, 5, 6]),
('B', 'BA', 'A'): ([2, 3], [3, 2])}, idtype=idtype, device=F.ctx())
g.nodes['A'].data['x'] = F.randn((5, 10))
g.nodes['B'].data['x'] = F.randn((7, 5))
g.edges['AA'].data['x'] = F.randn((4, 3))
g.edges['AB'].data['x'] = F.randn((4, 3))
g.edges['BA'].data['x'] = F.randn((2, 3))
g_a = g['AA']
def check_features(g, bg):
for ntype in bg.srctypes:
for key in g.nodes[ntype].data:
assert F.array_equal(
bg.srcnodes[ntype].data[key],
F.gather_row(g.nodes[ntype].data[key], bg.srcnodes[ntype].data[dgl.NID]))
for ntype in bg.dsttypes:
for key in g.nodes[ntype].data:
assert F.array_equal(
bg.dstnodes[ntype].data[key],
F.gather_row(g.nodes[ntype].data[key], bg.dstnodes[ntype].data[dgl.NID]))
for etype in bg.canonical_etypes:
for key in g.edges[etype].data:
assert F.array_equal(
bg.edges[etype].data[key],
F.gather_row(g.edges[etype].data[key], bg.edges[etype].data[dgl.EID]))
bg = dgl.to_block(g_a)
check(g_a, bg, 'A', 'AA', None)
check_features(g_a, bg)
assert bg.number_of_src_nodes() == 5
assert bg.number_of_dst_nodes() == 4
bg = dgl.to_block(g_a, include_dst_in_src=False)
check(g_a, bg, 'A', 'AA', None, False)
check_features(g_a, bg)
assert bg.number_of_src_nodes() == 4
assert bg.number_of_dst_nodes() == 4
dst_nodes = F.tensor([4, 3, 2, 1], dtype=idtype)
bg = dgl.to_block(g_a, dst_nodes)
check(g_a, bg, 'A', 'AA', dst_nodes)
check_features(g_a, bg)
g_ab = g['AB']
bg = dgl.to_block(g_ab)
assert bg.idtype == idtype
assert bg.number_of_nodes('SRC/B') == 4
assert F.array_equal(bg.srcnodes['B'].data[dgl.NID], bg.dstnodes['B'].data[dgl.NID])
assert bg.number_of_nodes('DST/A') == 0
checkall(g_ab, bg, None)
check_features(g_ab, bg)
dst_nodes = {'B': F.tensor([5, 6, 3, 1], dtype=idtype)}
bg = dgl.to_block(g, dst_nodes)
assert bg.number_of_nodes('SRC/B') == 4
assert F.array_equal(bg.srcnodes['B'].data[dgl.NID], bg.dstnodes['B'].data[dgl.NID])
assert bg.number_of_nodes('DST/A') == 0
checkall(g, bg, dst_nodes)
check_features(g, bg)
dst_nodes = {'A': F.tensor([4, 3, 2, 1], dtype=idtype), 'B': F.tensor([3, 5, 6, 1], dtype=idtype)}
bg = dgl.to_block(g, dst_nodes=dst_nodes)
checkall(g, bg, dst_nodes)
check_features(g, bg)
# test specifying lhs_nodes with include_dst_in_src
src_nodes = {}
for ntype in dst_nodes.keys():
# use the previous run to get the list of source nodes
src_nodes[ntype] = bg.srcnodes[ntype].data[dgl.NID]
bg = dgl.to_block(g, dst_nodes=dst_nodes, src_nodes=src_nodes)
checkall(g, bg, dst_nodes)
check_features(g, bg)
# test without include_dst_in_src
dst_nodes = {'A': F.tensor([4, 3, 2, 1], dtype=idtype), 'B': F.tensor([3, 5, 6, 1], dtype=idtype)}
bg = dgl.to_block(g, dst_nodes=dst_nodes, include_dst_in_src=False)
checkall(g, bg, dst_nodes, False)
check_features(g, bg)
# test specifying lhs_nodes without include_dst_in_src
src_nodes = {}
for ntype in dst_nodes.keys():
# use the previous run to get the list of source nodes
src_nodes[ntype] = bg.srcnodes[ntype].data[dgl.NID]
bg = dgl.to_block(g, dst_nodes=dst_nodes, include_dst_in_src=False,
src_nodes=src_nodes)
checkall(g, bg, dst_nodes, False)
check_features(g, bg)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not implemented")
@parametrize_dtype
def test_remove_edges(idtype):
def check(g1, etype, g, edges_removed):
src, dst, eid = g.edges(etype=etype, form='all')
src1, dst1 = g1.edges(etype=etype, order='eid')
if etype is not None:
eid1 = g1.edges[etype].data[dgl.EID]
else:
eid1 = g1.edata[dgl.EID]
src1 = F.asnumpy(src1)
dst1 = F.asnumpy(dst1)
eid1 = F.asnumpy(eid1)
src = F.asnumpy(src)
dst = F.asnumpy(dst)
eid = F.asnumpy(eid)
sde_set = set(zip(src, dst, eid))
for s, d, e in zip(src1, dst1, eid1):
assert (s, d, e) in sde_set
assert not np.isin(edges_removed, eid1).any()
assert g1.idtype == g.idtype
for fmt in ['coo', 'csr', 'csc']:
for edges_to_remove in [[2], [2, 2], [3, 2], [1, 3, 1, 2]]:
g = dgl.graph(([0, 2, 1, 3], [1, 3, 2, 4]), idtype=idtype).formats(fmt)
g1 = dgl.remove_edges(g, F.tensor(edges_to_remove, idtype))
check(g1, None, g, edges_to_remove)
g = dgl.from_scipy(
spsp.csr_matrix(([1, 1, 1, 1], ([0, 2, 1, 3], [1, 3, 2, 4])), shape=(5, 5)),
idtype=idtype).formats(fmt)
g1 = dgl.remove_edges(g, F.tensor(edges_to_remove, idtype))
check(g1, None, g, edges_to_remove)
g = dgl.heterograph({
('A', 'AA', 'A'): ([0, 2, 1, 3], [1, 3, 2, 4]),
('A', 'AB', 'B'): ([0, 1, 3, 1], [1, 3, 5, 6]),
('B', 'BA', 'A'): ([2, 3], [3, 2])}, idtype=idtype)
g2 = dgl.remove_edges(g, {'AA': F.tensor([2], idtype), 'AB': F.tensor([3], idtype), 'BA': F.tensor([1], idtype)})
check(g2, 'AA', g, [2])
check(g2, 'AB', g, [3])
check(g2, 'BA', g, [1])
g3 = dgl.remove_edges(g, {'AA': F.tensor([], idtype), 'AB': F.tensor([3], idtype), 'BA': F.tensor([1], idtype)})
check(g3, 'AA', g, [])
check(g3, 'AB', g, [3])
check(g3, 'BA', g, [1])
g4 = dgl.remove_edges(g, {'AB': F.tensor([3, 1, 2, 0], idtype)})
check(g4, 'AA', g, [])
check(g4, 'AB', g, [3, 1, 2, 0])
check(g4, 'BA', g, [])
@parametrize_dtype
def test_add_edges(idtype):
# homogeneous graph
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
u = 0
v = 1
g = dgl.add_edges(g, u, v)
assert g.device == F.ctx()
assert g.number_of_nodes() == 3
assert g.number_of_edges() == 3
u = [0]
v = [1]
g = dgl.add_edges(g, u, v)
assert g.device == F.ctx()
assert g.number_of_nodes() == 3
assert g.number_of_edges() == 4
u = F.tensor(u, dtype=idtype)
v = F.tensor(v, dtype=idtype)
g = dgl.add_edges(g, u, v)
assert g.device == F.ctx()
assert g.number_of_nodes() == 3
assert g.number_of_edges() == 5
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1, 0, 0, 0], dtype=idtype))
assert F.array_equal(v, F.tensor([1, 2, 1, 1, 1], dtype=idtype))
g = dgl.add_edges(g, [], [])
g = dgl.add_edges(g, 0, [])
g = dgl.add_edges(g, [], 0)
assert g.device == F.ctx()
assert g.number_of_nodes() == 3
assert g.number_of_edges() == 5
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1, 0, 0, 0], dtype=idtype))
assert F.array_equal(v, F.tensor([1, 2, 1, 1, 1], dtype=idtype))
# node id larger than current max node id
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
u = F.tensor([0, 1], dtype=idtype)
v = F.tensor([2, 3], dtype=idtype)
g = dgl.add_edges(g, u, v)
assert g.number_of_nodes() == 4
assert g.number_of_edges() == 4
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1, 0, 1], dtype=idtype))
assert F.array_equal(v, F.tensor([1, 2, 2, 3], dtype=idtype))
# has data
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.copy_to(F.tensor([1, 1, 1], dtype=idtype), ctx=F.ctx())
g.edata['h'] = F.copy_to(F.tensor([1, 1], dtype=idtype), ctx=F.ctx())
u = F.tensor([0, 1], dtype=idtype)
v = F.tensor([2, 3], dtype=idtype)
e_feat = {'h' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx()),
'hh' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())}
g = dgl.add_edges(g, u, v, e_feat)
assert g.number_of_nodes() == 4
assert g.number_of_edges() == 4
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1, 0, 1], dtype=idtype))
assert F.array_equal(v, F.tensor([1, 2, 2, 3], dtype=idtype))
assert F.array_equal(g.ndata['h'], F.tensor([1, 1, 1, 0], dtype=idtype))
assert F.array_equal(g.edata['h'], F.tensor([1, 1, 2, 2], dtype=idtype))
assert F.array_equal(g.edata['hh'], F.tensor([0, 0, 2, 2], dtype=idtype))
# zero data graph
g = dgl.graph(([], []), num_nodes=0, idtype=idtype, device=F.ctx())
u = F.tensor([0, 1], dtype=idtype)
v = F.tensor([2, 2], dtype=idtype)
e_feat = {'h' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx()),
'hh' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())}
g = dgl.add_edges(g, u, v, e_feat)
assert g.number_of_nodes() == 3
assert g.number_of_edges() == 2
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1], dtype=idtype))
assert F.array_equal(v, F.tensor([2, 2], dtype=idtype))
assert F.array_equal(g.edata['h'], F.tensor([2, 2], dtype=idtype))
assert F.array_equal(g.edata['hh'], F.tensor([2, 2], dtype=idtype))
# bipartite graph
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
u = 0
v = 1
g = dgl.add_edges(g, u, v)
assert g.device == F.ctx()
assert g.number_of_nodes('user') == 2
assert g.number_of_nodes('game') == 3
assert g.number_of_edges() == 3
u = [0]
v = [1]
g = dgl.add_edges(g, u, v)
assert g.device == F.ctx()
assert g.number_of_nodes('user') == 2
assert g.number_of_nodes('game') == 3
assert g.number_of_edges() == 4
u = F.tensor(u, dtype=idtype)
v = F.tensor(v, dtype=idtype)
g = dgl.add_edges(g, u, v)
assert g.device == F.ctx()
assert g.number_of_nodes('user') == 2
assert g.number_of_nodes('game') == 3
assert g.number_of_edges() == 5
u, v = g.edges(form='uv')
assert F.array_equal(u, F.tensor([0, 1, 0, 0, 0], dtype=idtype))
assert F.array_equal(v, F.tensor([1, 2, 1, 1, 1], dtype=idtype))
# node id larger than current max node id
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
u = F.tensor([0, 2], dtype=idtype)
v = F.tensor([2, 3], dtype=idtype)
g = dgl.add_edges(g, u, v)
assert g.device == F.ctx()
assert g.number_of_nodes('user') == 3
assert g.number_of_nodes('game') == 4
assert g.number_of_edges() == 4
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1, 0, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([1, 2, 2, 3], dtype=idtype))
# has data
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
g.nodes['user'].data['h'] = F.copy_to(F.tensor([1, 1], dtype=idtype), ctx=F.ctx())
g.nodes['game'].data['h'] = F.copy_to(F.tensor([2, 2, 2], dtype=idtype), ctx=F.ctx())
g.edata['h'] = F.copy_to(F.tensor([1, 1], dtype=idtype), ctx=F.ctx())
u = F.tensor([0, 2], dtype=idtype)
v = F.tensor([2, 3], dtype=idtype)
e_feat = {'h' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx()),
'hh' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())}
g = dgl.add_edges(g, u, v, e_feat)
assert g.number_of_nodes('user') == 3
assert g.number_of_nodes('game') == 4
assert g.number_of_edges() == 4
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1, 0, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([1, 2, 2, 3], dtype=idtype))
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 0], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 2, 0], dtype=idtype))
assert F.array_equal(g.edata['h'], F.tensor([1, 1, 2, 2], dtype=idtype))
assert F.array_equal(g.edata['hh'], F.tensor([0, 0, 2, 2], dtype=idtype))
# heterogeneous graph
g = create_test_heterograph3(idtype)
u = F.tensor([0, 2], dtype=idtype)
v = F.tensor([2, 3], dtype=idtype)
g = dgl.add_edges(g, u, v, etype='plays')
assert g.number_of_nodes('user') == 3
assert g.number_of_nodes('game') == 4
assert g.number_of_nodes('developer') == 2
assert g.number_of_edges('plays') == 6
assert g.number_of_edges('develops') == 2
u, v = g.edges(form='uv', order='eid', etype='plays')
assert F.array_equal(u, F.tensor([0, 1, 1, 2, 0, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([0, 0, 1, 1, 2, 3], dtype=idtype))
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 1], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 0, 0], dtype=idtype))
assert F.array_equal(g.edges['plays'].data['h'], F.tensor([1, 1, 1, 1, 0, 0], dtype=idtype))
# add with feature
e_feat = {'h': F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())}
u = F.tensor([0, 2], dtype=idtype)
v = F.tensor([2, 3], dtype=idtype)
g.nodes['game'].data['h'] = F.copy_to(F.tensor([2, 2, 1, 1], dtype=idtype), ctx=F.ctx())
g = dgl.add_edges(g, u, v, data=e_feat, etype='develops')
assert g.number_of_nodes('user') == 3
assert g.number_of_nodes('game') == 4
assert g.number_of_nodes('developer') == 3
assert g.number_of_edges('plays') == 6
assert g.number_of_edges('develops') == 4
u, v = g.edges(form='uv', order='eid', etype='develops')
assert F.array_equal(u, F.tensor([0, 1, 0, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([0, 1, 2, 3], dtype=idtype))
assert F.array_equal(g.nodes['developer'].data['h'], F.tensor([3, 3, 0], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 1, 1], dtype=idtype))
assert F.array_equal(g.edges['develops'].data['h'], F.tensor([0, 0, 2, 2], dtype=idtype))
@parametrize_dtype
def test_add_nodes(idtype):
# homogeneous Graphs
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.copy_to(F.tensor([1,1,1], dtype=idtype), ctx=F.ctx())
new_g = dgl.add_nodes(g, 1)
assert g.number_of_nodes() == 3
assert new_g.number_of_nodes() == 4
assert F.array_equal(new_g.ndata['h'], F.tensor([1, 1, 1, 0], dtype=idtype))
# zero node graph
g = dgl.graph(([], []), num_nodes=3, idtype=idtype, device=F.ctx())
g.ndata['h'] = F.copy_to(F.tensor([1,1,1], dtype=idtype), ctx=F.ctx())
g = dgl.add_nodes(g, 1, data={'h' : F.copy_to(F.tensor([2], dtype=idtype), ctx=F.ctx())})
assert g.number_of_nodes() == 4
assert F.array_equal(g.ndata['h'], F.tensor([1, 1, 1, 2], dtype=idtype))
# bipartite graph
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
g = dgl.add_nodes(g, 2, data={'h' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())}, ntype='user')
assert g.number_of_nodes('user') == 4
assert g.number_of_nodes('game') == 3
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([0, 0, 2, 2], dtype=idtype))
g = dgl.add_nodes(g, 2, ntype='game')
assert g.number_of_nodes('user') == 4
assert g.number_of_nodes('game') == 5
# heterogeneous graph
g = create_test_heterograph3(idtype)
g = dgl.add_nodes(g, 1, ntype='user')
g = dgl.add_nodes(g, 2, data={'h' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())}, ntype='game')
assert g.number_of_nodes('user') == 4
assert g.number_of_nodes('game') == 4
assert g.number_of_nodes('developer') == 2
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 1, 0], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 2, 2], dtype=idtype))
@parametrize_dtype
def test_remove_edges(idtype):
# homogeneous Graphs
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
e = 0
g = dgl.remove_edges(g, e)
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([1], dtype=idtype))
assert F.array_equal(v, F.tensor([2], dtype=idtype))
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
e = [0]
g = dgl.remove_edges(g, e)
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([1], dtype=idtype))
assert F.array_equal(v, F.tensor([2], dtype=idtype))
e = F.tensor([0], dtype=idtype)
g = dgl.remove_edges(g, e)
assert g.number_of_edges() == 0
# has node data
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx())
g = dgl.remove_edges(g, 1)
assert g.number_of_edges() == 1
assert F.array_equal(g.ndata['h'], F.tensor([1, 2, 3], dtype=idtype))
# has edge data
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
g.edata['h'] = F.copy_to(F.tensor([1, 2], dtype=idtype), ctx=F.ctx())
g = dgl.remove_edges(g, 0)
assert g.number_of_edges() == 1
assert F.array_equal(g.edata['h'], F.tensor([2], dtype=idtype))
# invalid eid
assert_fail = False
try:
g = dgl.remove_edges(g, 1)
except:
assert_fail = True
assert assert_fail
# bipartite graph
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
e = 0
g = dgl.remove_edges(g, e)
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([1], dtype=idtype))
assert F.array_equal(v, F.tensor([2], dtype=idtype))
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
e = [0]
g = dgl.remove_edges(g, e)
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([1], dtype=idtype))
assert F.array_equal(v, F.tensor([2], dtype=idtype))
e = F.tensor([0], dtype=idtype)
g = dgl.remove_edges(g, e)
assert g.number_of_edges() == 0
# has data
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
g.nodes['user'].data['h'] = F.copy_to(F.tensor([1, 1], dtype=idtype), ctx=F.ctx())
g.nodes['game'].data['h'] = F.copy_to(F.tensor([2, 2, 2], dtype=idtype), ctx=F.ctx())
g.edata['h'] = F.copy_to(F.tensor([1, 2], dtype=idtype), ctx=F.ctx())
g = dgl.remove_edges(g, 1)
assert g.number_of_edges() == 1
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 2], dtype=idtype))
assert F.array_equal(g.edata['h'], F.tensor([1], dtype=idtype))
# heterogeneous graph
g = create_test_heterograph3(idtype)
g.edges['plays'].data['h'] = F.copy_to(F.tensor([1, 2, 3, 4], dtype=idtype), ctx=F.ctx())
g = dgl.remove_edges(g, 1, etype='plays')
assert g.number_of_edges('plays') == 3
u, v = g.edges(form='uv', order='eid', etype='plays')
assert F.array_equal(u, F.tensor([0, 1, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([0, 1, 1], dtype=idtype))
assert F.array_equal(g.edges['plays'].data['h'], F.tensor([1, 3, 4], dtype=idtype))
# remove all edges of 'develops'
g = dgl.remove_edges(g, [0, 1], etype='develops')
assert g.number_of_edges('develops') == 0
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 1], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2], dtype=idtype))
assert F.array_equal(g.nodes['developer'].data['h'], F.tensor([3, 3], dtype=idtype))
# batched graph
ctx = F.ctx()
g1 = dgl.graph(([0, 1], [1, 2]), num_nodes=5, idtype=idtype, device=ctx)
g2 = dgl.graph(([], []), idtype=idtype, device=ctx)
g3 = dgl.graph(([2, 3, 4], [3, 2, 1]), idtype=idtype, device=ctx)
bg = dgl.batch([g1, g2, g3])
bg_r = dgl.remove_edges(bg, 2)
assert bg.batch_size == bg_r.batch_size
assert F.array_equal(bg.batch_num_nodes(), bg_r.batch_num_nodes())
assert F.array_equal(bg_r.batch_num_edges(), F.tensor([2, 0, 2], dtype=F.int64))
bg_r = dgl.remove_edges(bg, [0, 2])
assert bg.batch_size == bg_r.batch_size
assert F.array_equal(bg.batch_num_nodes(), bg_r.batch_num_nodes())
assert F.array_equal(bg_r.batch_num_edges(), F.tensor([1, 0, 2], dtype=F.int64))
bg_r = dgl.remove_edges(bg, F.tensor([0, 2], dtype=idtype))
assert bg.batch_size == bg_r.batch_size
assert F.array_equal(bg.batch_num_nodes(), bg_r.batch_num_nodes())
assert F.array_equal(bg_r.batch_num_edges(), F.tensor([1, 0, 2], dtype=F.int64))
# batched heterogeneous graph
g1 = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([1, 3], [0, 1])
}, num_nodes_dict={'user': 4, 'game': 3}, idtype=idtype, device=ctx)
g2 = dgl.heterograph({
('user', 'follows', 'user'): ([0, 2], [3, 4]),
('user', 'plays', 'game'): ([], [])
}, num_nodes_dict={'user': 6, 'game': 2}, idtype=idtype, device=ctx)
g3 = dgl.heterograph({
('user', 'follows', 'user'): ([], []),
('user', 'plays', 'game'): ([1, 2], [1, 2])
}, idtype=idtype, device=ctx)
bg = dgl.batch([g1, g2, g3])
bg_r = dgl.remove_edges(bg, 1, etype='follows')
assert bg.batch_size == bg_r.batch_size
ntypes = bg.ntypes
for nty in ntypes:
assert F.array_equal(bg.batch_num_nodes(nty), bg_r.batch_num_nodes(nty))
assert F.array_equal(bg_r.batch_num_edges('follows'), F.tensor([1, 2, 0], dtype=F.int64))
assert F.array_equal(bg_r.batch_num_edges('plays'), bg.batch_num_edges('plays'))
bg_r = dgl.remove_edges(bg, 2, etype='plays')
assert bg.batch_size == bg_r.batch_size
for nty in ntypes:
assert F.array_equal(bg.batch_num_nodes(nty), bg_r.batch_num_nodes(nty))
assert F.array_equal(bg.batch_num_edges('follows'), bg_r.batch_num_edges('follows'))
assert F.array_equal(bg_r.batch_num_edges('plays'), F.tensor([2, 0, 1], dtype=F.int64))
bg_r = dgl.remove_edges(bg, [0, 1, 3], etype='follows')
assert bg.batch_size == bg_r.batch_size
for nty in ntypes:
assert F.array_equal(bg.batch_num_nodes(nty), bg_r.batch_num_nodes(nty))
assert F.array_equal(bg_r.batch_num_edges('follows'), F.tensor([0, 1, 0], dtype=F.int64))
assert F.array_equal(bg.batch_num_edges('plays'), bg_r.batch_num_edges('plays'))
bg_r = dgl.remove_edges(bg, [1, 2], etype='plays')
assert bg.batch_size == bg_r.batch_size
for nty in ntypes:
assert F.array_equal(bg.batch_num_nodes(nty), bg_r.batch_num_nodes(nty))
assert F.array_equal(bg.batch_num_edges('follows'), bg_r.batch_num_edges('follows'))
assert F.array_equal(bg_r.batch_num_edges('plays'), F.tensor([1, 0, 1], dtype=F.int64))
bg_r = dgl.remove_edges(bg, F.tensor([0, 1, 3], dtype=idtype), etype='follows')
assert bg.batch_size == bg_r.batch_size
for nty in ntypes:
assert F.array_equal(bg.batch_num_nodes(nty), bg_r.batch_num_nodes(nty))
assert F.array_equal(bg_r.batch_num_edges('follows'), F.tensor([0, 1, 0], dtype=F.int64))
assert F.array_equal(bg.batch_num_edges('plays'), bg_r.batch_num_edges('plays'))
bg_r = dgl.remove_edges(bg, F.tensor([1, 2], dtype=idtype), etype='plays')
assert bg.batch_size == bg_r.batch_size
for nty in ntypes:
assert F.array_equal(bg.batch_num_nodes(nty), bg_r.batch_num_nodes(nty))
assert F.array_equal(bg.batch_num_edges('follows'), bg_r.batch_num_edges('follows'))
assert F.array_equal(bg_r.batch_num_edges('plays'), F.tensor([1, 0, 1], dtype=F.int64))
@parametrize_dtype
def test_remove_nodes(idtype):
# homogeneous Graphs
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
n = 0
g = dgl.remove_nodes(g, n)
assert g.number_of_nodes() == 2
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0], dtype=idtype))
assert F.array_equal(v, F.tensor([1], dtype=idtype))
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
n = [1]
g = dgl.remove_nodes(g, n)
assert g.number_of_nodes() == 2
assert g.number_of_edges() == 0
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
n = F.tensor([2], dtype=idtype)
g = dgl.remove_nodes(g, n)
assert g.number_of_nodes() == 2
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0], dtype=idtype))
assert F.array_equal(v, F.tensor([1], dtype=idtype))
# invalid nid
assert_fail = False
try:
g.remove_nodes(3)
except:
assert_fail = True
assert assert_fail
# has node and edge data
g = dgl.graph(([0, 0, 2], [0, 1, 2]), idtype=idtype, device=F.ctx())
g.ndata['hv'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx())
g.edata['he'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx())
g = dgl.remove_nodes(g, F.tensor([0], dtype=idtype))
assert g.number_of_nodes() == 2
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([1], dtype=idtype))
assert F.array_equal(v, F.tensor([1], dtype=idtype))
assert F.array_equal(g.ndata['hv'], F.tensor([2, 3], dtype=idtype))
assert F.array_equal(g.edata['he'], F.tensor([3], dtype=idtype))
# node id larger than current max node id
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
n = 0
g = dgl.remove_nodes(g, n, ntype='user')
assert g.number_of_nodes('user') == 1
assert g.number_of_nodes('game') == 3
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0], dtype=idtype))
assert F.array_equal(v, F.tensor([2], dtype=idtype))
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
n = [1]
g = dgl.remove_nodes(g, n, ntype='user')
assert g.number_of_nodes('user') == 1
assert g.number_of_nodes('game') == 3
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0], dtype=idtype))
assert F.array_equal(v, F.tensor([1], dtype=idtype))
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
n = F.tensor([0], dtype=idtype)
g = dgl.remove_nodes(g, n, ntype='game')
assert g.number_of_nodes('user') == 2
assert g.number_of_nodes('game') == 2
assert g.number_of_edges() == 2
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1], dtype=idtype))
assert F.array_equal(v, F.tensor([0 ,1], dtype=idtype))
# heterogeneous graph
g = create_test_heterograph3(idtype)
g.edges['plays'].data['h'] = F.copy_to(F.tensor([1, 2, 3, 4], dtype=idtype), ctx=F.ctx())
g = dgl.remove_nodes(g, 0, ntype='game')
assert g.number_of_nodes('user') == 3
assert g.number_of_nodes('game') == 1
assert g.number_of_nodes('developer') == 2
assert g.number_of_edges('plays') == 2
assert g.number_of_edges('develops') == 1
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 1], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2], dtype=idtype))
assert F.array_equal(g.nodes['developer'].data['h'], F.tensor([3, 3], dtype=idtype))
u, v = g.edges(form='uv', order='eid', etype='plays')
assert F.array_equal(u, F.tensor([1, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([0, 0], dtype=idtype))
assert F.array_equal(g.edges['plays'].data['h'], F.tensor([3, 4], dtype=idtype))
u, v = g.edges(form='uv', order='eid', etype='develops')
assert F.array_equal(u, F.tensor([1], dtype=idtype))
assert F.array_equal(v, F.tensor([0], dtype=idtype))
# batched graph
ctx = F.ctx()
g1 = dgl.graph(([0, 1], [1, 2]), num_nodes=5, idtype=idtype, device=ctx)
g2 = dgl.graph(([], []), idtype=idtype, device=ctx)
g3 = dgl.graph(([2, 3, 4], [3, 2, 1]), idtype=idtype, device=ctx)
bg = dgl.batch([g1, g2, g3])
bg_r = dgl.remove_nodes(bg, 1)
assert bg_r.batch_size == bg.batch_size
assert F.array_equal(bg_r.batch_num_nodes(), F.tensor([4, 0, 5], dtype=F.int64))
assert F.array_equal(bg_r.batch_num_edges(), F.tensor([0, 0, 3], dtype=F.int64))
bg_r = dgl.remove_nodes(bg, [1, 7])
assert bg_r.batch_size == bg.batch_size
assert F.array_equal(bg_r.batch_num_nodes(), F.tensor([4, 0, 4], dtype=F.int64))
assert F.array_equal(bg_r.batch_num_edges(), F.tensor([0, 0, 1], dtype=F.int64))
bg_r = dgl.remove_nodes(bg, F.tensor([1, 7], dtype=idtype))
assert bg_r.batch_size == bg.batch_size
assert F.array_equal(bg_r.batch_num_nodes(), F.tensor([4, 0, 4], dtype=F.int64))
assert F.array_equal(bg_r.batch_num_edges(), F.tensor([0, 0, 1], dtype=F.int64))
# batched heterogeneous graph
g1 = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([1, 3], [0, 1])
}, num_nodes_dict={'user': 4, 'game': 3}, idtype=idtype, device=ctx)
g2 = dgl.heterograph({
('user', 'follows', 'user'): ([0, 2], [3, 4]),
('user', 'plays', 'game'): ([], [])
}, num_nodes_dict={'user': 6, 'game': 2}, idtype=idtype, device=ctx)
g3 = dgl.heterograph({
('user', 'follows', 'user'): ([], []),
('user', 'plays', 'game'): ([1, 2], [1, 2])
}, idtype=idtype, device=ctx)
bg = dgl.batch([g1, g2, g3])
bg_r = dgl.remove_nodes(bg, 1, ntype='user')
assert bg_r.batch_size == bg.batch_size
assert F.array_equal(bg_r.batch_num_nodes('user'), F.tensor([3, 6, 3], dtype=F.int64))
assert F.array_equal(bg.batch_num_nodes('game'), bg_r.batch_num_nodes('game'))
assert F.array_equal(bg_r.batch_num_edges('follows'), F.tensor([0, 2, 0], dtype=F.int64))
assert F.array_equal(bg_r.batch_num_edges('plays'), F.tensor([1, 0, 2], dtype=F.int64))
bg_r = dgl.remove_nodes(bg, 6, ntype='game')
assert bg_r.batch_size == bg.batch_size
assert F.array_equal(bg.batch_num_nodes('user'), bg_r.batch_num_nodes('user'))
assert F.array_equal(bg_r.batch_num_nodes('game'), F.tensor([3, 2, 2], dtype=F.int64))
assert F.array_equal(bg.batch_num_edges('follows'), bg_r.batch_num_edges('follows'))
assert F.array_equal(bg_r.batch_num_edges('plays'), F.tensor([2, 0, 1], dtype=F.int64))
bg_r = dgl.remove_nodes(bg, [1, 5, 6, 11], ntype='user')
assert bg_r.batch_size == bg.batch_size
assert F.array_equal(bg_r.batch_num_nodes('user'), F.tensor([3, 4, 2], dtype=F.int64))
assert F.array_equal(bg.batch_num_nodes('game'), bg_r.batch_num_nodes('game'))
assert F.array_equal(bg_r.batch_num_edges('follows'), F.tensor([0, 1, 0], dtype=F.int64))
assert F.array_equal(bg_r.batch_num_edges('plays'), F.tensor([1, 0, 1], dtype=F.int64))
bg_r = dgl.remove_nodes(bg, [0, 3, 4, 7], ntype='game')
assert bg_r.batch_size == bg.batch_size
assert F.array_equal(bg.batch_num_nodes('user'), bg_r.batch_num_nodes('user'))
assert F.array_equal(bg_r.batch_num_nodes('game'), F.tensor([2, 0, 2], dtype=F.int64))
assert F.array_equal(bg.batch_num_edges('follows'), bg_r.batch_num_edges('follows'))
assert F.array_equal(bg_r.batch_num_edges('plays'), F.tensor([1, 0, 1], dtype=F.int64))
bg_r = dgl.remove_nodes(bg, F.tensor([1, 5, 6, 11], dtype=idtype), ntype='user')
assert bg_r.batch_size == bg.batch_size
assert F.array_equal(bg_r.batch_num_nodes('user'), F.tensor([3, 4, 2], dtype=F.int64))
assert F.array_equal(bg.batch_num_nodes('game'), bg_r.batch_num_nodes('game'))
assert F.array_equal(bg_r.batch_num_edges('follows'), F.tensor([0, 1, 0], dtype=F.int64))
assert F.array_equal(bg_r.batch_num_edges('plays'), F.tensor([1, 0, 1], dtype=F.int64))
bg_r = dgl.remove_nodes(bg, F.tensor([0, 3, 4, 7], dtype=idtype), ntype='game')
assert bg_r.batch_size == bg.batch_size
assert F.array_equal(bg.batch_num_nodes('user'), bg_r.batch_num_nodes('user'))
assert F.array_equal(bg_r.batch_num_nodes('game'), F.tensor([2, 0, 2], dtype=F.int64))
assert F.array_equal(bg.batch_num_edges('follows'), bg_r.batch_num_edges('follows'))
assert F.array_equal(bg_r.batch_num_edges('plays'), F.tensor([1, 0, 1], dtype=F.int64))
@parametrize_dtype
def test_add_selfloop(idtype):
# homogeneous graph
g = dgl.graph(([0, 0, 2], [2, 1, 0]), idtype=idtype, device=F.ctx())
g.edata['he'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx())
g.ndata['hn'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx())
g = dgl.add_self_loop(g)
assert g.number_of_nodes() == 3
assert g.number_of_edges() == 6
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 0, 2, 0, 1, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([2, 1, 0, 0, 1, 2], dtype=idtype))
assert F.array_equal(g.edata['he'], F.tensor([1, 2, 3, 0, 0, 0], dtype=idtype))
# bipartite graph
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1, 2], [1, 2, 2])}, idtype=idtype, device=F.ctx())
# nothing will happend
raise_error = False
try:
g = dgl.add_self_loop(g)
except:
raise_error = True
assert raise_error
g = create_test_heterograph5(idtype)
g = dgl.add_self_loop(g, etype='follows')
assert g.number_of_nodes('user') == 3
assert g.number_of_nodes('game') == 2
assert g.number_of_edges('follows') == 5
assert g.number_of_edges('plays') == 2
u, v = g.edges(form='uv', order='eid', etype='follows')
assert F.array_equal(u, F.tensor([1, 2, 0, 1, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([0, 1, 0, 1, 2], dtype=idtype))
assert F.array_equal(g.edges['follows'].data['h'], F.tensor([1, 2, 0, 0, 0], dtype=idtype))
assert F.array_equal(g.edges['plays'].data['h'], F.tensor([1, 2], dtype=idtype))
raise_error = False
try:
g = dgl.add_self_loop(g, etype='plays')
except:
raise_error = True
assert raise_error
@parametrize_dtype
def test_remove_selfloop(idtype):
# homogeneous graph
g = dgl.graph(([0, 0, 0, 1], [1, 0, 0, 2]), idtype=idtype, device=F.ctx())
g.edata['he'] = F.copy_to(F.tensor([1, 2, 3, 4], dtype=idtype), ctx=F.ctx())
g = dgl.remove_self_loop(g)
assert g.number_of_nodes() == 3
assert g.number_of_edges() == 2
assert F.array_equal(g.edata['he'], F.tensor([1, 4], dtype=idtype))
# bipartite graph
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1, 2], [1, 2, 2])}, idtype=idtype, device=F.ctx())
# nothing will happend
raise_error = False
try:
g = dgl.remove_self_loop(g, etype='plays')
except:
raise_error = True
assert raise_error
g = create_test_heterograph4(idtype)
g = dgl.remove_self_loop(g, etype='follows')
assert g.number_of_nodes('user') == 3
assert g.number_of_nodes('game') == 2
assert g.number_of_edges('follows') == 2
assert g.number_of_edges('plays') == 2
u, v = g.edges(form='uv', order='eid', etype='follows')
assert F.array_equal(u, F.tensor([1, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([0, 1], dtype=idtype))
assert F.array_equal(g.edges['follows'].data['h'], F.tensor([2, 4], dtype=idtype))
assert F.array_equal(g.edges['plays'].data['h'], F.tensor([1, 2], dtype=idtype))
raise_error = False
try:
g = dgl.remove_self_loop(g, etype='plays')
except:
raise_error = True
assert raise_error
@parametrize_dtype
def test_reorder_graph(idtype):
g = dgl.graph(([0, 1, 2, 3, 4], [2, 2, 3, 2, 3]),
idtype=idtype, device=F.ctx())
g.ndata['h'] = F.copy_to(F.randn((g.num_nodes(), 3)), ctx=F.ctx())
g.edata['w'] = F.copy_to(F.randn((g.num_edges(), 2)), ctx=F.ctx())
# call with default: node_permute_algo=None, edge_permute_algo='src'
rg = dgl.reorder_graph(g)
assert dgl.EID in rg.edata.keys()
src = F.asnumpy(rg.edges()[0])
assert np.array_equal(src, np.sort(src))
# call with 'rcmk' node_permute_algo
rg = dgl.reorder_graph(g, node_permute_algo='rcmk')
assert dgl.NID in rg.ndata.keys()
assert dgl.EID in rg.edata.keys()
src = F.asnumpy(rg.edges()[0])
assert np.array_equal(src, np.sort(src))
# call with 'dst' edge_permute_algo
rg = dgl.reorder_graph(g, edge_permute_algo='dst')
dst = F.asnumpy(rg.edges()[1])
assert np.array_equal(dst, np.sort(dst))
# call with unknown edge_permute_algo
raise_error = False
try:
dgl.reorder_graph(g, edge_permute_algo='none')
except:
raise_error = True
assert raise_error
# reorder back to original according to stored ids
rg = dgl.reorder_graph(g, node_permute_algo='rcmk')
rg2 = dgl.reorder_graph(rg, 'custom', permute_config={
'nodes_perm': np.argsort(F.asnumpy(rg.ndata[dgl.NID]))})
assert F.array_equal(g.ndata['h'], rg2.ndata['h'])
assert F.array_equal(g.edata['w'], rg2.edata['w'])
# do not store ids
rg = dgl.reorder_graph(g, store_ids=False)
assert not dgl.NID in rg.ndata.keys()
assert not dgl.EID in rg.edata.keys()
# metis does not work on windows.
if os.name == 'nt':
pass
else:
# metis_partition may fail for small graph.
mg = create_large_graph(1000).to(F.ctx())
# call with metis strategy, but k is not specified
raise_error = False
try:
dgl.reorder_graph(mg, node_permute_algo='metis')
except:
raise_error = True
assert raise_error
# call with metis strategy, k is specified
raise_error = False
try:
dgl.reorder_graph(mg,
node_permute_algo='metis', permute_config={'k': 2})
except:
raise_error = True
assert not raise_error
# call with qualified nodes_perm specified
nodes_perm = np.random.permutation(g.num_nodes())
raise_error = False
try:
dgl.reorder_graph(g, node_permute_algo='custom', permute_config={
'nodes_perm': nodes_perm})
except:
raise_error = True
assert not raise_error
# call with unqualified nodes_perm specified
raise_error = False
try:
dgl.reorder_graph(g, node_permute_algo='custom', permute_config={
'nodes_perm': nodes_perm[:g.num_nodes() - 1]})
except:
raise_error = True
assert raise_error
# call with unsupported strategy
raise_error = False
try:
dgl.reorder_graph(g, node_permute_algo='cmk')
except:
raise_error = True
assert raise_error
# heterograph: not supported
raise_error = False
try:
hg = dgl.heterogrpah({('user', 'follow', 'user'): (
[0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
dgl.reorder_graph(hg)
except:
raise_error = True
assert raise_error
# TODO: shall we fix them?
# add 'csc' format if needed
#fg = g.formats('csr')
#assert 'csc' not in sum(fg.formats().values(), [])
#rfg = dgl.reorder_graph(fg)
#assert 'csc' in sum(rfg.formats().values(), [])
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support a slicing operation")
@parametrize_dtype
def test_norm_by_dst(idtype):
# Case1: A homogeneous graph
g = dgl.graph(([0, 1, 1], [1, 1, 2]), idtype=idtype, device=F.ctx())
eweight = dgl.norm_by_dst(g)
assert F.allclose(eweight, F.tensor([0.5, 0.5, 1.0]))
# Case2: A heterogeneous graph
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1, 1], [1, 1, 2])
}, idtype=idtype, device=F.ctx())
eweight = dgl.norm_by_dst(g, etype=('user', 'plays', 'game'))
assert F.allclose(eweight, F.tensor([0.5, 0.5, 1.0]))
@parametrize_dtype
def test_module_add_self_loop(idtype):
g = dgl.graph(([1, 1], [1, 2]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.randn((g.num_nodes(), 2))
g.edata['w'] = F.randn((g.num_edges(), 3))
# Case1: add self-loops with the default setting
transform = dgl.AddSelfLoop()
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert new_g.num_nodes() == g.num_nodes()
assert new_g.num_edges() == 4
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 0), (1, 1), (1, 2), (2, 2)}
assert 'h' in new_g.ndata
assert 'w' in new_g.edata
# Case2: Remove self-loops first to avoid duplicate ones
transform = dgl.AddSelfLoop(allow_duplicate=True)
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert new_g.num_nodes() == g.num_nodes()
assert new_g.num_edges() == 5
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 0), (1, 1), (1, 2), (2, 2)}
assert 'h' in new_g.ndata
assert 'w' in new_g.edata
# Create a heterogeneous graph
g = dgl.heterograph({
('user', 'plays', 'game'): ([0], [1]),
('user', 'follows', 'user'): ([1], [3])
}, idtype=idtype, device=F.ctx())
g.nodes['user'].data['h1'] = F.randn((4, 2))
g.edges['plays'].data['w1'] = F.randn((1, 3))
g.nodes['game'].data['h2'] = F.randn((2, 4))
g.edges['follows'].data['w2'] = F.randn((1, 5))
# Case3: add self-loops for a heterogeneous graph
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert new_g.ntypes == g.ntypes
assert new_g.canonical_etypes == g.canonical_etypes
for nty in new_g.ntypes:
assert new_g.num_nodes(nty) == g.num_nodes(nty)
assert new_g.num_edges('plays') == 1
assert new_g.num_edges('follows') == 5
assert 'h1' in new_g.nodes['user'].data
assert 'h2' in new_g.nodes['game'].data
assert 'w1' in new_g.edges['plays'].data
assert 'w2' in new_g.edges['follows'].data
# Case4: add self-etypes for a heterogeneous graph
transform = dgl.AddSelfLoop(new_etypes=True)
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert new_g.ntypes == g.ntypes
assert set(new_g.canonical_etypes) == {
('user', 'plays', 'game'), ('user', 'follows', 'user'),
('user', 'self', 'user'), ('game', 'self', 'game')
}
for nty in new_g.ntypes:
assert new_g.num_nodes(nty) == g.num_nodes(nty)
assert new_g.num_edges('plays') == 1
assert new_g.num_edges('follows') == 5
assert new_g.num_edges(('user', 'self', 'user')) == 4
assert new_g.num_edges(('game', 'self', 'game')) == 2
assert 'h1' in new_g.nodes['user'].data
assert 'h2' in new_g.nodes['game'].data
assert 'w1' in new_g.edges['plays'].data
assert 'w2' in new_g.edges['follows'].data
@parametrize_dtype
def test_module_remove_self_loop(idtype):
transform = dgl.RemoveSelfLoop()
# Case1: homogeneous graph
g = dgl.graph(([1, 1], [1, 2]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.randn((g.num_nodes(), 2))
g.edata['w'] = F.randn((g.num_edges(), 3))
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert new_g.num_nodes() == g.num_nodes()
assert new_g.num_edges() == 1
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(1, 2)}
assert 'h' in new_g.ndata
assert 'w' in new_g.edata
# Case2: heterogeneous graph
g = dgl.heterograph({
('user', 'plays', 'game'): ([0, 1], [1, 1]),
('user', 'follows', 'user'): ([1, 2], [2, 2])
}, idtype=idtype, device=F.ctx())
g.nodes['user'].data['h1'] = F.randn((3, 2))
g.edges['plays'].data['w1'] = F.randn((2, 3))
g.nodes['game'].data['h2'] = F.randn((2, 4))
g.edges['follows'].data['w2'] = F.randn((2, 5))
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert new_g.ntypes == g.ntypes
assert new_g.canonical_etypes == g.canonical_etypes
for nty in new_g.ntypes:
assert new_g.num_nodes(nty) == g.num_nodes(nty)
assert new_g.num_edges('plays') == 2
assert new_g.num_edges('follows') == 1
assert 'h1' in new_g.nodes['user'].data
assert 'h2' in new_g.nodes['game'].data
assert 'w1' in new_g.edges['plays'].data
assert 'w2' in new_g.edges['follows'].data
@parametrize_dtype
def test_module_add_reverse(idtype):
transform = dgl.AddReverse()
# Case1: Add reverse edges for a homogeneous graph
g = dgl.graph(([0], [1]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.randn((g.num_nodes(), 3))
g.edata['w'] = F.randn((g.num_edges(), 2))
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert g.num_nodes() == new_g.num_nodes()
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 1), (1, 0)}
assert F.allclose(g.ndata['h'], new_g.ndata['h'])
assert F.allclose(g.edata['w'], F.narrow_row(new_g.edata['w'], 0, 1))
assert F.allclose(F.narrow_row(new_g.edata['w'], 1, 2), F.zeros((1, 2), F.float32, F.ctx()))
# Case2: Add reverse edges for a homogeneous graph and copy edata
transform = dgl.AddReverse(copy_edata=True)
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert g.num_nodes() == new_g.num_nodes()
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 1), (1, 0)}
assert F.allclose(g.ndata['h'], new_g.ndata['h'])
assert F.allclose(g.edata['w'], F.narrow_row(new_g.edata['w'], 0, 1))
assert F.allclose(g.edata['w'], F.narrow_row(new_g.edata['w'], 1, 2))
# Case3: Add reverse edges for a heterogeneous graph
g = dgl.heterograph({
('user', 'plays', 'game'): ([0, 1], [1, 1]),
('user', 'follows', 'user'): ([1, 2], [2, 2])
}, device=F.ctx())
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert g.ntypes == new_g.ntypes
assert set(new_g.canonical_etypes) == {
('user', 'plays', 'game'), ('user', 'follows', 'user'), ('game', 'rev_plays', 'user')}
for nty in g.ntypes:
assert g.num_nodes(nty) == new_g.num_nodes(nty)
src, dst = new_g.edges(etype='plays')
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 1), (1, 1)}
src, dst = new_g.edges(etype='follows')
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(1, 2), (2, 2), (2, 1)}
src, dst = new_g.edges(etype='rev_plays')
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(1, 1), (1, 0)}
# Case4: Enforce reverse edge types for symmetric canonical edge types
transform = dgl.AddReverse(sym_new_etype=True)
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert g.ntypes == new_g.ntypes
assert set(new_g.canonical_etypes) == {
('user', 'plays', 'game'), ('user', 'follows', 'user'),
('game', 'rev_plays', 'user'), ('user', 'rev_follows', 'user')}
for nty in g.ntypes:
assert g.num_nodes(nty) == new_g.num_nodes(nty)
src, dst = new_g.edges(etype='plays')
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 1), (1, 1)}
src, dst = new_g.edges(etype='follows')
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(1, 2), (2, 2)}
src, dst = new_g.edges(etype='rev_plays')
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(1, 1), (1, 0)}
src, dst = new_g.edges(etype='rev_follows')
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(2, 1), (2, 2)}
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU not supported for to_simple")
@parametrize_dtype
def test_module_to_simple(idtype):
transform = dgl.ToSimple()
g = dgl.graph(([0, 1, 1], [1, 2, 2]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.randn((g.num_nodes(), 2))
g.edata['w'] = F.tensor([[0.1], [0.2], [0.3]])
sg = transform(g)
assert sg.device == g.device
assert sg.idtype == g.idtype
assert sg.num_nodes() == g.num_nodes()
assert sg.num_edges() == 2
src, dst = sg.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 1), (1, 2)}
assert F.allclose(sg.edata['count'], F.tensor([1, 2]))
assert F.allclose(sg.ndata['h'], g.ndata['h'])
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1, 1], [1, 2, 2]),
('user', 'plays', 'game'): ([0, 1, 0], [1, 1, 1])
})
sg = transform(g)
assert sg.device == g.device
assert sg.idtype == g.idtype
assert sg.ntypes == g.ntypes
assert sg.canonical_etypes == g.canonical_etypes
for nty in sg.ntypes:
assert sg.num_nodes(nty) == g.num_nodes(nty)
for ety in sg.canonical_etypes:
assert sg.num_edges(ety) == 2
src, dst = sg.edges(etype='follows')
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 1), (1, 2)}
src, dst = sg.edges(etype='plays')
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 1), (1, 1)}
@parametrize_dtype
def test_module_line_graph(idtype):
transform = dgl.LineGraph()
g = dgl.graph(([0, 1, 1], [1, 0, 2]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.tensor([[0.], [1.], [2.]])
g.edata['w'] = F.tensor([[0.], [0.1], [0.2]])
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert new_g.num_nodes() == g.num_edges()
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 1), (0, 2), (1, 0)}
transform = dgl.LineGraph(backtracking=False)
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert new_g.num_nodes() == g.num_edges()
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 2)}
@parametrize_dtype
def test_module_khop_graph(idtype):
transform = dgl.KHopGraph(2)
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.randn((g.num_nodes(), 2))
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert new_g.num_nodes() == g.num_nodes()
assert F.allclose(g.ndata['h'], new_g.ndata['h'])
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 2)}
@parametrize_dtype
def test_module_add_metapaths(idtype):
g = dgl.heterograph({
('person', 'author', 'paper'): ([0, 0, 1], [1, 2, 2]),
('paper', 'accepted', 'venue'): ([1], [0]),
('paper', 'rejected', 'venue'): ([2], [1])
}, idtype=idtype, device=F.ctx())
g.nodes['venue'].data['h'] = F.randn((g.num_nodes('venue'), 2))
g.edges['author'].data['h'] = F.randn((g.num_edges('author'), 3))
# Case1: keep_orig_edges is True
metapaths = {
'accepted': [('person', 'author', 'paper'), ('paper', 'accepted', 'venue')],
'rejected': [('person', 'author', 'paper'), ('paper', 'rejected', 'venue')]
}
transform = dgl.AddMetaPaths(metapaths)
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert new_g.ntypes == g.ntypes
assert set(new_g.canonical_etypes) == {
('person', 'author', 'paper'), ('paper', 'accepted', 'venue'),
('paper', 'rejected', 'venue'), ('person', 'accepted', 'venue'),
('person', 'rejected', 'venue')
}
for nty in new_g.ntypes:
assert new_g.num_nodes(nty) == g.num_nodes(nty)
for ety in g.canonical_etypes:
assert new_g.num_edges(ety) == g.num_edges(ety)
assert F.allclose(g.nodes['venue'].data['h'], new_g.nodes['venue'].data['h'])
assert F.allclose(g.edges['author'].data['h'], new_g.edges['author'].data['h'])
src, dst = new_g.edges(etype=('person', 'accepted', 'venue'))
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 0)}
src, dst = new_g.edges(etype=('person', 'rejected', 'venue'))
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 1), (1, 1)}
# Case2: keep_orig_edges is False
transform = dgl.AddMetaPaths(metapaths, keep_orig_edges=False)
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert new_g.ntypes == g.ntypes
assert len(new_g.canonical_etypes) == 2
for nty in new_g.ntypes:
assert new_g.num_nodes(nty) == g.num_nodes(nty)
assert F.allclose(g.nodes['venue'].data['h'], new_g.nodes['venue'].data['h'])
src, dst = new_g.edges(etype=('person', 'accepted', 'venue'))
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 0)}
src, dst = new_g.edges(etype=('person', 'rejected', 'venue'))
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 1), (1, 1)}
@parametrize_dtype
def test_module_compose(idtype):
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
transform = dgl.Compose([dgl.AddReverse(), dgl.AddSelfLoop()])
new_g = transform(g)
assert new_g.device == g.device
assert new_g.idtype == g.idtype
assert new_g.num_edges() == 7
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 1), (1, 2), (1, 0), (2, 1), (0, 0), (1, 1), (2, 2)}
@parametrize_dtype
def test_module_gcnnorm(idtype):
g = dgl.heterograph({
('A', 'r1', 'A'): ([0, 1, 2], [0, 0, 1]),
('A', 'r2', 'B'): ([0, 0], [1, 1]),
('B', 'r3', 'B'): ([0, 1, 2], [0, 0, 1])
}, idtype=idtype, device=F.ctx())
g.edges['r3'].data['w'] = F.tensor([0.1, 0.2, 0.3])
transform = dgl.GCNNorm()
new_g = transform(g)
assert 'w' not in new_g.edges[('A', 'r2', 'B')].data
assert F.allclose(new_g.edges[('A', 'r1', 'A')].data['w'],
F.tensor([1./2, 1./math.sqrt(2), 0.]))
assert F.allclose(new_g.edges[('B', 'r3', 'B')].data['w'], F.tensor([1./3, 2./3, 0.]))
@unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now')
@parametrize_dtype
def test_module_ppr(idtype):
g = dgl.graph(([0, 1, 2, 3, 4], [2, 3, 4, 5, 3]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.randn((6, 2))
transform = dgl.PPR(avg_degree=2)
new_g = transform(g)
assert new_g.idtype == g.idtype
assert new_g.device == g.device
assert new_g.num_nodes() == g.num_nodes()
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 0), (0, 2), (0, 4), (1, 1), (1, 3), (1, 5), (2, 2),
(2, 3), (2, 4), (3, 3), (3, 5), (4, 3), (4, 4), (4, 5), (5, 5)}
assert F.allclose(g.ndata['h'], new_g.ndata['h'])
assert 'w' in new_g.edata
# Prior edge weights
g.edata['w'] = F.tensor([0.1, 0.2, 0.3, 0.4, 0.5])
new_g = transform(g)
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 0), (1, 1), (1, 3), (2, 2), (2, 3), (2, 4),
(3, 3), (3, 5), (4, 3), (4, 4), (4, 5), (5, 5)}
@unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now')
@parametrize_dtype
def test_module_heat_kernel(idtype):
# Case1: directed graph
g = dgl.graph(([0, 1, 2, 3, 4], [2, 3, 4, 5, 3]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.randn((6, 2))
transform = dgl.HeatKernel(avg_degree=1)
new_g = transform(g)
assert new_g.idtype == g.idtype
assert new_g.device == g.device
assert new_g.num_nodes() == g.num_nodes()
assert F.allclose(g.ndata['h'], new_g.ndata['h'])
assert 'w' in new_g.edata
# Case2: weighted undirected graph
g = dgl.graph(([0, 1, 2, 3], [1, 0, 3, 2]), idtype=idtype, device=F.ctx())
g.edata['w'] = F.tensor([0.1, 0.2, 0.3, 0.4])
new_g = transform(g)
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 0), (1, 1), (2, 2), (3, 3)}
@unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now')
@parametrize_dtype
def test_module_gdc(idtype):
transform = dgl.GDC([0.1, 0.2, 0.1], avg_degree=1)
g = dgl.graph(([0, 1, 2, 3, 4], [2, 3, 4, 5, 3]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.randn((6, 2))
new_g = transform(g)
assert new_g.idtype == g.idtype
assert new_g.device == g.device
assert new_g.num_nodes() == g.num_nodes()
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 0), (0, 2), (0, 4), (1, 1), (1, 3), (1, 5), (2, 2), (2, 3),
(2, 4), (3, 3), (3, 5), (4, 3), (4, 4), (4, 5), (5, 5)}
assert F.allclose(g.ndata['h'], new_g.ndata['h'])
assert 'w' in new_g.edata
# Prior edge weights
g.edata['w'] = F.tensor([0.1, 0.2, 0.3, 0.4, 0.5])
new_g = transform(g)
src, dst = new_g.edges()
eset = set(zip(list(F.asnumpy(src)), list(F.asnumpy(dst))))
assert eset == {(0, 0), (1, 1), (2, 2), (3, 3), (4, 3), (4, 4), (5, 5)}
@parametrize_dtype
def test_module_node_shuffle(idtype):
transform = dgl.NodeShuffle()
g = dgl.heterograph({
('A', 'r', 'B'): ([0, 1], [1, 2]),
}, idtype=idtype, device=F.ctx())
new_g = transform(g)
@unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now')
@parametrize_dtype
def test_module_drop_node(idtype):
transform = dgl.DropNode()
g = dgl.heterograph({
('A', 'r', 'B'): ([0, 1], [1, 2]),
}, idtype=idtype, device=F.ctx())
new_g = transform(g)
assert new_g.idtype == g.idtype
assert new_g.device == g.device
assert new_g.ntypes == g.ntypes
assert new_g.canonical_etypes == g.canonical_etypes
@unittest.skipIf(dgl.backend.backend_name != 'pytorch', reason='Only support PyTorch for now')
@parametrize_dtype
def test_module_drop_edge(idtype):
transform = dgl.DropEdge()
g = dgl.heterograph({
('A', 'r1', 'B'): ([0, 1], [1, 2]),
('C', 'r2', 'C'): ([3, 4, 5], [6, 7, 8])
}, idtype=idtype, device=F.ctx())
new_g = transform(g)
assert new_g.idtype == g.idtype
assert new_g.device == g.device
assert new_g.ntypes == g.ntypes
assert new_g.canonical_etypes == g.canonical_etypes
@parametrize_dtype
def test_module_add_edge(idtype):
transform = dgl.AddEdge()
g = dgl.heterograph({
('A', 'r1', 'B'): ([0, 1, 2, 3, 4], [1, 2, 3, 4, 5]),
('C', 'r2', 'C'): ([0, 1, 2, 3, 4], [1, 2, 3, 4, 5])
}, idtype=idtype, device=F.ctx())
new_g = transform(g)
assert new_g.num_edges(('A', 'r1', 'B')) == 6
assert new_g.num_edges(('C', 'r2', 'C')) == 6
assert new_g.idtype == g.idtype
assert new_g.device == g.device
assert new_g.ntypes == g.ntypes
assert new_g.canonical_etypes == g.canonical_etypes
@parametrize_dtype
def test_module_random_walk_pe(idtype):
transform = dgl.RandomWalkPE(2, 'rwpe')
g = dgl.graph(([0, 1, 1], [1, 1, 0]), idtype=idtype, device=F.ctx())
new_g = transform(g)
tgt = F.copy_to(F.tensor([[0., 0.5],[0.5, 0.75]]), g.device)
assert F.allclose(new_g.ndata['rwpe'], tgt)
@parametrize_dtype
def test_module_laplacian_pe(idtype):
transform = dgl.LaplacianPE(2, 'lappe')
g = dgl.graph(([2, 1, 0, 3, 1, 1],[3, 0, 1, 3, 3, 1]), idtype=idtype, device=F.ctx())
new_g = transform(g)
tgt = F.copy_to(F.tensor([[ 0.24971116, 0.],
[ 0.11771496, 0.],
[ 0.83237050, 1.],
[ 0.48056933, 0.]]), g.device)
# tensorflow has no abs() api
if dgl.backend.backend_name == 'tensorflow':
assert F.allclose(new_g.ndata['lappe'].__abs__(), tgt)
# pytorch & mxnet
else:
assert F.allclose(new_g.ndata['lappe'].abs(), tgt)
if __name__ == '__main__':
test_partition_with_halo()
test_module_heat_kernel(F.int32)
| 42.134126
| 117
| 0.598813
| 16,459
| 99,268
| 3.45829
| 0.035664
| 0.034557
| 0.044483
| 0.063018
| 0.824649
| 0.784751
| 0.743271
| 0.716058
| 0.690794
| 0.663352
| 0
| 0.034104
| 0.207499
| 99,268
| 2,355
| 118
| 42.152017
| 0.689424
| 0.04785
| 0
| 0.607873
| 0
| 0
| 0.051161
| 0
| 0
| 0
| 0
| 0.000425
| 0.360429
| 1
| 0.02863
| false
| 0.000511
| 0.006135
| 0
| 0.036299
| 0.004601
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b9570191d59a13f284d3694122d24aaa3eaa78dc
| 161
|
py
|
Python
|
mongoengine_todict/__init__.py
|
minazukie/mongoengine-todict
|
a2308845d0bf7a43969afefc12b9171fb754c160
|
[
"MIT"
] | 2
|
2020-07-14T01:51:04.000Z
|
2020-07-14T02:29:38.000Z
|
mongoengine_todict/__init__.py
|
minazukie/mongoengine-todict
|
a2308845d0bf7a43969afefc12b9171fb754c160
|
[
"MIT"
] | null | null | null |
mongoengine_todict/__init__.py
|
minazukie/mongoengine-todict
|
a2308845d0bf7a43969afefc12b9171fb754c160
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__version__ = "0.1.2"
from mongoengine_todict.mixin import DocumentMixin, register_field
__all__ = ("DocumentMixin", "register_field")
| 23
| 66
| 0.732919
| 19
| 161
| 5.631579
| 0.842105
| 0.392523
| 0.485981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028169
| 0.118012
| 161
| 6
| 67
| 26.833333
| 0.725352
| 0.130435
| 0
| 0
| 0
| 0
| 0.231884
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b95ee9d243df941087a901e985e586e37e233cfc
| 84
|
py
|
Python
|
utils/worker/__init__.py
|
forcast-open/federated-api
|
cbcbeacf00a7cb53a22b26c8170bf146f476ac1a
|
[
"MIT"
] | null | null | null |
utils/worker/__init__.py
|
forcast-open/federated-api
|
cbcbeacf00a7cb53a22b26c8170bf146f476ac1a
|
[
"MIT"
] | null | null | null |
utils/worker/__init__.py
|
forcast-open/federated-api
|
cbcbeacf00a7cb53a22b26c8170bf146f476ac1a
|
[
"MIT"
] | null | null | null |
#### Import sub-modules of the library ####
from .worker import app, api, celery, db
| 42
| 43
| 0.690476
| 13
| 84
| 4.461538
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154762
| 84
| 2
| 44
| 42
| 0.816901
| 0.392857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b9715f15290cc81fe60c28a4b901fdb5f8035c4a
| 47
|
py
|
Python
|
qiubai/manage.py
|
shredstar/webcrawler
|
38284e4c568565a30e2bd06049d8fda84b055bed
|
[
"Apache-2.0"
] | null | null | null |
qiubai/manage.py
|
shredstar/webcrawler
|
38284e4c568565a30e2bd06049d8fda84b055bed
|
[
"Apache-2.0"
] | null | null | null |
qiubai/manage.py
|
shredstar/webcrawler
|
38284e4c568565a30e2bd06049d8fda84b055bed
|
[
"Apache-2.0"
] | null | null | null |
from scrapy.cmdline import execute
execute()
| 9.4
| 34
| 0.787234
| 6
| 47
| 6.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 4
| 35
| 11.75
| 0.925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b988a1f1a5b293e2e8652b83c7f7b59bd480c5bd
| 19
|
py
|
Python
|
t2.py
|
soundaryathiagarajan/pythonprojects
|
174bd5aed96100e48e91d6a9552aa5b8ef1ce99a
|
[
"Apache-2.0"
] | null | null | null |
t2.py
|
soundaryathiagarajan/pythonprojects
|
174bd5aed96100e48e91d6a9552aa5b8ef1ce99a
|
[
"Apache-2.0"
] | null | null | null |
t2.py
|
soundaryathiagarajan/pythonprojects
|
174bd5aed96100e48e91d6a9552aa5b8ef1ce99a
|
[
"Apache-2.0"
] | null | null | null |
print 'hi how r u'
| 9.5
| 18
| 0.631579
| 5
| 19
| 2.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263158
| 19
| 1
| 19
| 19
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b9a912b742f153c7edd214df0016201563ea3caa
| 153
|
py
|
Python
|
tests/web_platform/CSS2/positioning/test_positioning_float.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 71
|
2015-04-13T09:44:14.000Z
|
2019-03-24T01:03:02.000Z
|
tests/web_platform/CSS2/positioning/test_positioning_float.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 35
|
2019-05-06T15:26:09.000Z
|
2022-03-28T06:30:33.000Z
|
tests/web_platform/CSS2/positioning/test_positioning_float.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 139
|
2015-05-30T18:37:43.000Z
|
2019-03-27T17:14:05.000Z
|
from tests.utils import W3CTestCase
class TestPositioningFloat(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'positioning-float-'))
| 25.5
| 73
| 0.797386
| 16
| 153
| 7.3125
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021583
| 0.091503
| 153
| 5
| 74
| 30.6
| 0.820144
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b9d4ac5ebde687c3a3f56410341cfe56155719b1
| 141
|
py
|
Python
|
AiXF/__init__.py
|
wzy916/wzy
|
5e491cc45c896fb1da79c63bae0e3fc3414a916e
|
[
"Apache-2.0"
] | null | null | null |
AiXF/__init__.py
|
wzy916/wzy
|
5e491cc45c896fb1da79c63bae0e3fc3414a916e
|
[
"Apache-2.0"
] | null | null | null |
AiXF/__init__.py
|
wzy916/wzy
|
5e491cc45c896fb1da79c63bae0e3fc3414a916e
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
from .celery import app as celery_app
import pymysql
pymysql.install_as_MySQLdb()
| 35.25
| 57
| 0.851064
| 20
| 141
| 5.55
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113475
| 141
| 4
| 58
| 35.25
| 0.888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b9ec9ad6d7eb78ba0aa300929a6f9c4cab430aeb
| 88
|
py
|
Python
|
neptune/internal/cli/processes/__init__.py
|
jiji-online/neptune-cli
|
50cf680a80d141497f9331ab7cdaee49fcb90b0c
|
[
"Apache-2.0"
] | null | null | null |
neptune/internal/cli/processes/__init__.py
|
jiji-online/neptune-cli
|
50cf680a80d141497f9331ab7cdaee49fcb90b0c
|
[
"Apache-2.0"
] | null | null | null |
neptune/internal/cli/processes/__init__.py
|
jiji-online/neptune-cli
|
50cf680a80d141497f9331ab7cdaee49fcb90b0c
|
[
"Apache-2.0"
] | null | null | null |
from .utils import build_process_command, recognize_execution_command, ExecutionCommand
| 44
| 87
| 0.897727
| 10
| 88
| 7.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 88
| 1
| 88
| 88
| 0.914634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a093b3b166bceee006b93349831064fbc3ae897
| 3,706
|
py
|
Python
|
pyaz/network/virtual_appliance/site/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/network/virtual_appliance/site/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/network/virtual_appliance/site/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
from .... pyaz_utils import _call_az
def create(address_prefix, appliance_name, name, resource_group, allow=None, default=None, optimize=None):
'''
Create an Azure network virtual appliance site.
Required Parameters:
- address_prefix -- Address Prefix of Network Virtual Appliance Site
- appliance_name -- The name of Network Virtual Appliance
- name -- The name of Network Virtual Appliance Site
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- allow -- Flag to control breakout of o365 allow category.
- default -- Flag to control breakout of o365 default category.
- optimize -- Flag to control breakout of o365 optimize category.
'''
return _call_az("az network virtual-appliance site create", locals())
def update(address_prefix, appliance_name, name, resource_group, add=None, allow=None, default=None, force_string=None, optimize=None, remove=None, set=None):
'''
Update an Azure network virtual appliance site.
Required Parameters:
- address_prefix -- Address Prefix of Network Virtual Appliance Site
- appliance_name -- The name of Network Virtual Appliance
- name -- The name of Network Virtual Appliance Site
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- add -- Add an object to a list of objects by specifying a path and key value pairs. Example: --add property.listProperty <key=value, string or JSON string>
- allow -- Flag to control breakout of o365 allow category.
- default -- Flag to control breakout of o365 default category.
- force_string -- When using 'set' or 'add', preserve string literals instead of attempting to convert to JSON.
- optimize -- Flag to control breakout of o365 optimize category.
- remove -- Remove a property or an element from a list. Example: --remove property.list <indexToRemove> OR --remove propertyToRemove
- set -- Update an object by specifying a property path and value to set. Example: --set property1.property2=<value>
'''
return _call_az("az network virtual-appliance site update", locals())
def show(appliance_name, name, resource_group):
'''
Show the detail of an Azure network virtual appliance site.
Required Parameters:
- appliance_name -- The name of Network Virtual Appliance
- name -- The name of Network Virtual Appliance Site
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network virtual-appliance site show", locals())
def delete(appliance_name, name, resource_group, yes=None):
'''
Delete an Azure network virtual appliance site.
Required Parameters:
- appliance_name -- The name of Network Virtual Appliance
- name -- The name of Network Virtual Appliance Site
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- yes -- Do not prompt for confirmation.
'''
return _call_az("az network virtual-appliance site delete", locals())
def list(appliance_name, resource_group):
'''
List all Azure network virtual appliance site.
Required Parameters:
- appliance_name -- The name of Network Virtual Appliance
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network virtual-appliance site list", locals())
| 46.325
| 162
| 0.723961
| 491
| 3,706
| 5.384929
| 0.171079
| 0.111195
| 0.182678
| 0.163389
| 0.727307
| 0.704614
| 0.704614
| 0.672088
| 0.625567
| 0.586989
| 0
| 0.006716
| 0.196438
| 3,706
| 79
| 163
| 46.911392
| 0.881128
| 0.711279
| 0
| 0
| 0
| 0
| 0.229777
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.454545
| false
| 0
| 0.090909
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a0a1d9f756db55856ff45c2108ec718cf3ecc59
| 81
|
py
|
Python
|
ascii_letter_classifier/__init__.py
|
mlpipes/ascii-letter-classifier
|
ed46275b978911e0a03d95e6d1383110e227f7cd
|
[
"MIT"
] | 2
|
2019-03-26T18:33:25.000Z
|
2019-03-26T18:33:28.000Z
|
ascii_letter_classifier/__init__.py
|
mlpipes/ascii-letter-classifier
|
ed46275b978911e0a03d95e6d1383110e227f7cd
|
[
"MIT"
] | null | null | null |
ascii_letter_classifier/__init__.py
|
mlpipes/ascii-letter-classifier
|
ed46275b978911e0a03d95e6d1383110e227f7cd
|
[
"MIT"
] | null | null | null |
from .config import AsciiLetterConfig
from .model import ascii_letter_classifier
| 27
| 42
| 0.876543
| 10
| 81
| 6.9
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098765
| 81
| 2
| 43
| 40.5
| 0.945205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a0b729e1c39745c9310a4456f9616358fe9e221
| 47
|
py
|
Python
|
atest/testdata/standard_libraries/operating_system/files/result.py
|
gdw2/robot-framework
|
f25068edf1502e76ba8664d4b5ed1aebe0ee2434
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2016-02-29T17:00:24.000Z
|
2019-06-27T08:49:13.000Z
|
atest/testdata/standard_libraries/operating_system/files/result.py
|
gdw2/robot-framework
|
f25068edf1502e76ba8664d4b5ed1aebe0ee2434
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
atest/testdata/standard_libraries/operating_system/files/result.py
|
gdw2/robot-framework
|
f25068edf1502e76ba8664d4b5ed1aebe0ee2434
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2017-10-30T06:34:47.000Z
|
2019-03-12T07:23:08.000Z
|
result = u'Hyv\u00E4\u00E4 \u00FC\u00F6t\u00E4'
| 47
| 47
| 0.765957
| 8
| 47
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.318182
| 0.06383
| 47
| 1
| 47
| 47
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0.729167
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a1f9d162710bb050df4dea2ca13d2475cc9ec34
| 168
|
py
|
Python
|
scripts/fips/docs.py
|
hyperknot/country-levels
|
3f200da4c806382273dce542ab79b65705be436b
|
[
"MIT"
] | 28
|
2020-03-23T19:49:05.000Z
|
2022-03-19T14:31:56.000Z
|
scripts/fips/docs.py
|
hyperknot/country-level-id
|
3f200da4c806382273dce542ab79b65705be436b
|
[
"MIT"
] | 16
|
2020-03-25T22:15:25.000Z
|
2020-06-11T19:00:14.000Z
|
scripts/fips/docs.py
|
hyperknot/country-level-id
|
3f200da4c806382273dce542ab79b65705be436b
|
[
"MIT"
] | 5
|
2020-03-30T12:36:57.000Z
|
2021-10-08T22:42:03.000Z
|
#!/usr/bin/env python3
from country_levels_lib.fips.fips_docs import generate_fips_list
def main():
generate_fips_list()
if __name__ == "__main__":
main()
| 14
| 64
| 0.72619
| 24
| 168
| 4.458333
| 0.708333
| 0.224299
| 0.299065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007092
| 0.160714
| 168
| 11
| 65
| 15.272727
| 0.751773
| 0.125
| 0
| 0
| 1
| 0
| 0.054795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.2
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dbefee0e3a84e5496ee66489191fddf406965e13
| 11,258
|
py
|
Python
|
plots.py
|
berkankadioglu/Bandits-Get-GANs
|
8cd5be8d12fc53ba96fe51b737b665ca9baaedef
|
[
"MIT"
] | null | null | null |
plots.py
|
berkankadioglu/Bandits-Get-GANs
|
8cd5be8d12fc53ba96fe51b737b665ca9baaedef
|
[
"MIT"
] | null | null | null |
plots.py
|
berkankadioglu/Bandits-Get-GANs
|
8cd5be8d12fc53ba96fe51b737b665ca9baaedef
|
[
"MIT"
] | null | null | null |
import pickle
import numpy as np
import argparse
import os
import random
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib as mpl
from IPython.display import HTML
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
# Load and standardize data
# Original GAN
with open('G_loss_orig', 'rb') as f:
G_losses_orig = pickle.load(f)
f.close()
with open('D_loss_orig', 'rb') as f:
D_losses_orig = pickle.load(f)
f.close()
with open('test_imgs_orig', 'rb') as f:
img_list_orig = pickle.load(f)
f.close()
G_orig_mean = np.mean(G_losses_orig, axis=0)
G_orig_std = np.std(G_losses_orig - G_orig_mean, axis=0)
D_orig_mean = np.mean(D_losses_orig, axis=0)
D_orig_std = np.std(D_losses_orig - D_orig_mean, axis=0)
####################################################
# MAB GAN
stat_reward = False
conf_bound = False
with open('G_loss_MAB'+'_stat_'+str(stat_reward)+'_ucb_'+str(conf_bound), 'rb') as f:
G_losses_MAB_stat_false_conf_false = pickle.load(f)
f.close()
with open('D_loss_MAB'+'_stat_'+str(stat_reward)+'_ucb_'+str(conf_bound), 'rb') as f:
D_losses_MAB_stat_false_conf_false = pickle.load(f)
f.close()
with open('test_imgs_MAB'+'_stat_'+str(stat_reward)+'_ucb_'+str(conf_bound), 'rb') as f:
img_list_MAB_stat_false_conf_false = pickle.load(f)
f.close()
with open('k_list_MAB' + '_stat_' + str(stat_reward) + '_ucb_' + str(conf_bound), 'rb') as f:
k_list_MAB_stat_false_conf_false = pickle.load(f)
f.close()
G_losses_MAB_stat_false_conf_false_mean = np.mean(G_losses_MAB_stat_false_conf_false, axis=0)
G_losses_MAB_stat_false_conf_false_std = np.std(G_losses_MAB_stat_false_conf_false - G_losses_MAB_stat_false_conf_false_mean, axis=0)
D_losses_MAB_stat_false_conf_false_mean = np.mean(D_losses_MAB_stat_false_conf_false, axis=0)
D_losses_MAB_stat_false_conf_false_std = np.std(D_losses_MAB_stat_false_conf_false - D_losses_MAB_stat_false_conf_false_mean, axis=0)
####################################################
stat_reward = True
conf_bound = False
with open('G_loss_MAB'+'_stat_'+str(stat_reward)+'_ucb_'+str(conf_bound), 'rb') as f:
G_losses_MAB_stat_true_conf_false = pickle.load(f)
with open('D_loss_MAB'+'_stat_'+str(stat_reward)+'_ucb_'+str(conf_bound), 'rb') as f:
D_losses_MAB_stat_true_conf_false = pickle.load(f)
with open('test_imgs_MAB'+'_stat_'+str(stat_reward)+'_ucb_'+str(conf_bound), 'rb') as f:
img_list_MAB_stat_true_conf_false = pickle.load(f)
# with open('k_list_MAB' + '_stat_' + str(stat_reward) + '_ucb_' + str(conf_bound), 'rb') as f:
# k_list_MAB_stat_true_conf_false = pickle.load(f)
G_losses_MAB_stat_true_conf_false_mean = np.mean(G_losses_MAB_stat_true_conf_false, axis=0)
G_losses_MAB_stat_true_conf_false_std = np.std(G_losses_MAB_stat_true_conf_false - G_losses_MAB_stat_true_conf_false_mean, axis=0)
D_losses_MAB_stat_true_conf_false_mean = np.mean(D_losses_MAB_stat_true_conf_false, axis=0)
D_losses_MAB_stat_true_conf_false_std = np.std(D_losses_MAB_stat_true_conf_false - D_losses_MAB_stat_true_conf_false_mean, axis=0)
####################################################
stat_reward = False
conf_bound = True
with open('G_loss_MAB'+'_stat_'+str(stat_reward)+'_ucb_'+str(conf_bound), 'rb') as f:
G_losses_MAB_stat_false_conf_true = pickle.load(f)
f.close()
with open('D_loss_MAB'+'_stat_'+str(stat_reward)+'_ucb_'+str(conf_bound), 'rb') as f:
D_losses_MAB_stat_false_conf_true = pickle.load(f)
f.close()
with open('test_imgs_MAB'+'_stat_'+str(stat_reward)+'_ucb_'+str(conf_bound), 'rb') as f:
img_list_MAB_stat_false_conf_true = pickle.load(f)
f.close()
with open('k_list_MAB' + '_stat_' + str(stat_reward) + '_ucb_' + str(conf_bound), 'rb') as f:
k_list_MAB_stat_false_conf_true = pickle.load(f)
f.close()
G_losses_MAB_stat_false_conf_true_mean = np.mean(G_losses_MAB_stat_false_conf_true, axis=0)
G_losses_MAB_stat_false_conf_true_std = np.std(G_losses_MAB_stat_false_conf_true - G_losses_MAB_stat_false_conf_true_mean, axis=0)
D_losses_MAB_stat_false_conf_true_mean = np.mean(D_losses_MAB_stat_false_conf_true, axis=0)
D_losses_MAB_stat_false_conf_true_std = np.std(D_losses_MAB_stat_false_conf_true - D_losses_MAB_stat_false_conf_true_mean, axis=0)
####################################################
stat_reward = True
conf_bound = True
with open('G_loss_MAB'+'_stat_'+str(stat_reward)+'_ucb_'+str(conf_bound), 'rb') as f:
G_losses_MAB_stat_true_conf_true = pickle.load(f)
f.close()
with open('D_loss_MAB'+'_stat_'+str(stat_reward)+'_ucb_'+str(conf_bound), 'rb') as f:
D_losses_MAB_stat_true_conf_true = pickle.load(f)
f.close()
with open('test_imgs_MAB'+'_stat_'+str(stat_reward)+'_ucb_'+str(conf_bound), 'rb') as f:
img_list_MAB_stat_true_conf_true = pickle.load(f)
f.close()
with open('k_list_MAB' + '_stat_' + str(stat_reward) + '_ucb_' + str(conf_bound), 'rb') as f:
k_list_MAB_stat_true_conf_true = pickle.load(f)
f.close()
G_losses_MAB_stat_true_conf_true_mean = np.mean(G_losses_MAB_stat_true_conf_true, axis=0)
G_losses_MAB_stat_true_conf_true_std = np.std(G_losses_MAB_stat_true_conf_true - G_losses_MAB_stat_true_conf_true_mean, axis=0)
D_losses_MAB_stat_true_conf_true_mean = np.mean(D_losses_MAB_stat_true_conf_true, axis=0)
D_losses_MAB_stat_true_conf_true_std = np.std(D_losses_MAB_stat_true_conf_true - D_losses_MAB_stat_true_conf_true_mean, axis=0)
####################################################
mpl.rcParams['lines.linewidth'] = .4
# Generator Plots
plt.figure(figsize=(10,5))
plt.title("Generator Loss During Training")
plt.plot(range(len(G_orig_mean)), G_orig_mean, label="Standard GAN")
#plt.fill_between(range(len(G_orig_mean)), y1=G_orig_mean-G_orig_std, y2=G_orig_mean+G_orig_std)
plt.plot(range(len(G_losses_MAB_stat_true_conf_true_mean)), G_losses_MAB_stat_true_conf_true_mean, label="MAB GAN stat. and UCB")
#plt.fill_between(range(len(G_losses_MAB_stat_true_conf_true_mean)),
# y1=G_losses_MAB_stat_true_conf_true_mean-G_losses_MAB_stat_true_conf_true_std,
# y2=G_losses_MAB_stat_true_conf_true_mean+G_losses_MAB_stat_true_conf_true_std)
plt.plot(range(len(G_losses_MAB_stat_false_conf_true_mean)), G_losses_MAB_stat_false_conf_true_mean, label="MAB GAN non-stat. and UCB")
#plt.fill_between(range(len(G_losses_MAB_stat_false_conf_true_mean)),
# y1=G_losses_MAB_stat_false_conf_true_mean-G_losses_MAB_stat_false_conf_true_std,
# y2=G_losses_MAB_stat_false_conf_true_mean+G_losses_MAB_stat_false_conf_true_std)
plt.plot(range(len(G_losses_MAB_stat_true_conf_false_mean)), G_losses_MAB_stat_true_conf_false_mean, label="MAB GAN stat. and no UCB")
#plt.fill_between(range(len(G_losses_MAB_stat_true_conf_false_mean)),
# y1=G_losses_MAB_stat_true_conf_false_mean-G_losses_MAB_stat_true_conf_false_std,
# y2=G_losses_MAB_stat_true_conf_false_mean+G_losses_MAB_stat_true_conf_false_std)
plt.plot(range(len(G_losses_MAB_stat_false_conf_false_mean)), G_losses_MAB_stat_false_conf_false_mean, label="MAB GAN non-stat. and no UCB")
#plt.fill_between(range(len(G_losses_MAB_stat_false_conf_false_mean)),
# y1=G_losses_MAB_stat_false_conf_false_mean-G_losses_MAB_stat_false_conf_false_std,
# y2=G_losses_MAB_stat_false_conf_false_mean+G_losses_MAB_stat_false_conf_false_std)
plt.xlabel("iterations")
plt.ylabel("Generator Loss")
plt.legend()
plt.show()
####################################################
# Discriminator Plots
plt.figure(figsize=(10,5))
plt.title("Discriminator Loss During Training")
plt.plot(range(len(D_orig_mean)), D_orig_mean, label="Standard GAN")
#plt.fill_between(range(len(D_orig_mean)), y1=D_orig_mean-D_orig_std, y2=D_orig_mean+D_orig_std)
plt.plot(range(len(D_losses_MAB_stat_true_conf_true_mean)), D_losses_MAB_stat_true_conf_true_mean, label="MAB GAN stat. and UCB")
#plt.fill_between(range(len(D_losses_MAB_stat_true_conf_true_mean)),
# y1=D_losses_MAB_stat_true_conf_true_mean-D_losses_MAB_stat_true_conf_true_std,
# y2=D_losses_MAB_stat_true_conf_true_mean+D_losses_MAB_stat_true_conf_true_std)
plt.plot(range(len(D_losses_MAB_stat_false_conf_true_mean)), D_losses_MAB_stat_false_conf_true_mean, label="MAB GAN non-stat. and UCB")
#plt.fill_between(range(len(D_losses_MAB_stat_false_conf_true_mean)),
# y1=D_losses_MAB_stat_false_conf_true_mean-D_losses_MAB_stat_false_conf_true_std,
# y2=D_losses_MAB_stat_false_conf_true_mean+D_losses_MAB_stat_false_conf_true_std)
plt.plot(range(len(D_losses_MAB_stat_true_conf_false_mean)), D_losses_MAB_stat_true_conf_false_mean, label="MAB GAN stat. and no UCB")
#plt.fill_between(range(len(D_losses_MAB_stat_true_conf_false_mean)),
# y1=D_losses_MAB_stat_true_conf_false_mean-D_losses_MAB_stat_true_conf_false_std,
# y2=D_losses_MAB_stat_true_conf_false_mean+D_losses_MAB_stat_true_conf_false_std)
plt.plot(range(len(D_losses_MAB_stat_false_conf_false_mean)), D_losses_MAB_stat_false_conf_false_mean, label="MAB GAN non-stat. and no UCB")
#plt.fill_between(range(len(D_losses_MAB_stat_false_conf_false_mean)),
# y1=D_losses_MAB_stat_false_conf_false_mean-D_losses_MAB_stat_false_conf_false_std,
# y2=D_losses_MAB_stat_false_conf_false_mean+D_losses_MAB_stat_false_conf_false_std)
plt.xlabel("iterations")
plt.ylabel("Discriminator Loss")
plt.legend()
plt.show()
####################################################
# Plot the fake images from the last epoch
plt.subplot(1,2,1)
plt.axis("off")
plt.title("Fake Images for Standard GAN")
plt.imshow(np.transpose(img_list_orig[-1],(1,2,0)))
#plt.show()
# Plot the fake images from the last epoch
plt.subplot(1,2,2)
plt.axis("off")
plt.title("Fake Images for MAB GAN")
plt.imshow(np.transpose(img_list_MAB_stat_true_conf_true[-1],(1,2,0)))
plt.show()
print()
#######################################################
mpl.rcParams['lines.linewidth'] = 1
# Plot k
plt.figure(figsize=(10,5))
plt.title("Number of Discriminator Updates per Iteration")
plt.plot(np.ones((len(k_list_MAB_stat_true_conf_true[0]),)), label="Standard GAN")
plt.plot(k_list_MAB_stat_true_conf_true[0], label="MAB GAN")
plt.xlabel("iterations")
plt.legend()
plt.show()
#######################################################
# Plot some training images
# real_batch = next(iter(dataloader))
# plt.figure(figsize=(8,8))
# plt.axis("off")
# plt.title("Training Images")
# plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=2, normalize=True).cpu(),(1,2,0)))
# **Visualization of G’s progression**
#
# Remember how we saved the generator’s output on the fixed_noise batch
# after every epoch of training. Now, we can visualize the training
# progression of G with an animation. Press the play button to start the
# animation.
#
#%%capture
#fig = plt.figure(figsize=(8,8))
#plt.axis("off")
#ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in img_list]
#ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)
#HTML(ani.to_jshtml())
| 52.12037
| 140
| 0.75644
| 1,981
| 11,258
| 3.801615
| 0.081777
| 0.121763
| 0.179525
| 0.117514
| 0.832426
| 0.803612
| 0.790201
| 0.761253
| 0.719958
| 0.543354
| 0
| 0.008708
| 0.092112
| 11,258
| 215
| 141
| 52.362791
| 0.728109
| 0.298188
| 0
| 0.384058
| 0
| 0
| 0.121574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0.007246
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dbf3f4d8f988efa6f80be843b6b91cbce4f8ee82
| 188
|
py
|
Python
|
store/admin.py
|
Kihara-tony/Bookstore
|
0023b86dc20297d22a3c01c340be9c50f6578109
|
[
"Unlicense"
] | null | null | null |
store/admin.py
|
Kihara-tony/Bookstore
|
0023b86dc20297d22a3c01c340be9c50f6578109
|
[
"Unlicense"
] | 5
|
2020-06-05T22:51:31.000Z
|
2021-09-08T01:16:57.000Z
|
store/admin.py
|
Kihara-tony/Bookstore
|
0023b86dc20297d22a3c01c340be9c50f6578109
|
[
"Unlicense"
] | null | null | null |
from django.contrib import admin
from .models import Teacher,Books,Profile
# Register your models here.
admin.site.register(Teacher)
admin.site.register(Books)
admin.site.register(Profile)
| 31.333333
| 41
| 0.824468
| 27
| 188
| 5.740741
| 0.481481
| 0.174194
| 0.329032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079787
| 188
| 6
| 42
| 31.333333
| 0.895954
| 0.138298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
dbf93baa075190a525d65c3a6289cc630451fc3a
| 167
|
py
|
Python
|
family_api/admin.py
|
zchuhui/django-rest-framework-example
|
82ee470b581473a0e9f5772ede75a90f2dfe1c54
|
[
"Apache-2.0"
] | null | null | null |
family_api/admin.py
|
zchuhui/django-rest-framework-example
|
82ee470b581473a0e9f5772ede75a90f2dfe1c54
|
[
"Apache-2.0"
] | null | null | null |
family_api/admin.py
|
zchuhui/django-rest-framework-example
|
82ee470b581473a0e9f5772ede75a90f2dfe1c54
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Relationship,Person
admin.site.register(Relationship)
admin.site.register(Person)
| 18.555556
| 39
| 0.814371
| 22
| 167
| 6.181818
| 0.545455
| 0.132353
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107784
| 167
| 8
| 40
| 20.875
| 0.912752
| 0.155689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e01fcddc55beb5ab4224200b63c007cf3be993c6
| 4,481
|
py
|
Python
|
scripts/field/ds_tuto_3_0.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/field/ds_tuto_3_0.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/field/ds_tuto_3_0.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# Created by MechAviv
# ID :: [931050000]
# Hidden Street : Extraction Room 1
sm.curNodeEventEnd(True)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(True, True, False, False)
sm.setStandAloneMode(True)
def failMessage(crack):
sm.chatScript("Tap the Control Key repeatedly to break the wall.")
sm.showEffect("Effect/Direction6.img/effect/tuto/guide1/0", 3000, 0, -100, 20, 0, False, 0)
sm.showEffect("Effect/Direction6.img/effect/tuto/breakEgg/" + str(crack), 6600, 0, 0, 1, 0, False, 0)
if not "1" in sm.getQRValue(23206):
sm.createQuestWithQRValue(23206, "1")
sm.levelUntil(10)
sm.sendDelay(3000)
sm.showFieldEffect("demonSlayer/text12", 0)
sm.sendDelay(5000)
sm.forcedInput(1)
sm.sendDelay(10)
sm.forcedInput(0)
sm.setSpeakerID(2159311)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendNext("........")
sm.showEffect("Effect/Direction6.img/effect/tuto/balloonMsg0/14", 2000, 130, 50, 10, 0, False, 0)
sm.sendDelay(2000)
sm.setSpeakerID(2159311)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendNext("(I think I hear something...)")
sm.showEffect("Effect/Direction6.img/effect/tuto/balloonMsg0/15", 2000, -130, 50, 10, 0, False, 0)
sm.sendDelay(2000)
sm.setSpeakerID(2159311)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendNext("(Where am I? Am I still alive...?)")
sm.showEffect("Effect/Direction6.img/effect/tuto/balloonMsg0/16", 2000, 130, 50, 10, 0, False, 0)
sm.sendDelay(2000)
sm.setSpeakerID(2159311)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendNext("(Ugh... My energy... Something is stealing my energy!)")
sm.showEffect("Effect/Direction6.img/effect/tuto/balloonMsg0/17", 2000, -130, 50, 10, 0, False, 0)
sm.sendDelay(2000)
sm.setSpeakerID(2159311)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendNext("(I must escape before they drain all my power!)")
sm.setPatternInputCount(0)
sm.chatScript("Tap the Control Key repeatedly to break the wall.")
sm.showEffect("Effect/Direction6.img/effect/tuto/guide1/0", 3000, 0, -100, 20, 0, False, 0)
while not sm.patternInputRequest("17#17#17#", 2, 2, 3000) and sm.getPatternInputCount() < 7:
failMessage(0)
sm.setPatternInputCount(0)
sm.playSound("demonSlayer/punch", 100)
sm.playSound("demonSlayer/crackEgg", 100)
sm.chatScript("Tap the Control Key repeatedly to break the wall.")
sm.showEffect("Effect/Direction6.img/effect/tuto/guide1/0", 3000, 0, -100, 20, 0, False, 0)
sm.showEffect("Effect/Direction6.img/effect/tuto/breakEgg/0", 6600, 0, 0, 1, 0, False, 0)
while not sm.patternInputRequest("17#17#17#", 2, 2, 3000) and sm.getPatternInputCount() < 7:
failMessage(0)
sm.setPatternInputCount(0)
sm.playSound("demonSlayer/punch", 100)
sm.playSound("demonSlayer/crackEgg", 100)
sm.showEffect("Effect/Direction6.img/effect/tuto/balloonMsg0/7", 2000, 130, 100, 10, 0, False, 0)
sm.chatScript("Tap the Control Key repeatedly to break the wall.")
sm.showEffect("Effect/Direction6.img/effect/tuto/guide1/0", 3000, 0, -100, 20, 0, False, 0)
sm.showEffect("Effect/Direction6.img/effect/tuto/breakEgg/0", 6600, 0, 0, 1, 0, False, 0)
while not sm.patternInputRequest("17#17#17#", 2, 2, 3000) and sm.getPatternInputCount() < 7:
failMessage(0)
sm.setPatternInputCount(0)
sm.playSound("demonSlayer/punch", 100)
sm.playSound("demonSlayer/crackEgg", 100)
sm.chatScript("Tap the Control Key repeatedly to break the wall.")
sm.showEffect("Effect/Direction6.img/effect/tuto/guide1/0", 3000, 0, -100, 20, 0, False, 0)
sm.showEffect("Effect/Direction6.img/effect/tuto/breakEgg/1", 6600, 0, 0, 1, 0, False, 0)
while not sm.patternInputRequest("17#17#17#", 2, 2, 3000) and sm.getPatternInputCount() < 7:
failMessage(1)
sm.setPatternInputCount(0)
sm.showEffect("Effect/Direction6.img/effect/tuto/breakEgg/0", 3600, 0, 0, 1, 0, False, 0)
sm.sendDelay(3000)
sm.showEffect("Effect/Direction6.img/effect/tuto/breakEgg/1", 3600, 0, 0, 1, 0, False, 0)
sm.showEffect("Effect/Direction6.img/effect/tuto/balloonMsg1/1", 2000, -130, 50, 10, 0, False, 0)
sm.playSound("demonSlayer/crackEgg", 100)
sm.sendDelay(1000)
sm.showEffect("Effect/Direction6.img/effect/tuto/breakEgg/2", 9000, 0, 0, 1, 0, False, 0)
sm.showEffect("Effect/Direction6.img/effect/tuto/balloonMsg1/2", 2000, 130, 50, 10, 0, False, 0)
sm.sendDelay(1000)
sm.playSound("demonSlayer/breakEgg", 100)
sm.showFieldEffect("demonSlayer/whiteOut", 0)
sm.warpInstanceIn(931050020, 0)
| 35.283465
| 105
| 0.738451
| 660
| 4,481
| 5.013636
| 0.163636
| 0.023572
| 0.103354
| 0.160774
| 0.798429
| 0.793291
| 0.783318
| 0.779087
| 0.674222
| 0.650045
| 0
| 0.11663
| 0.096853
| 4,481
| 126
| 106
| 35.563492
| 0.701013
| 0.015845
| 0
| 0.634409
| 0
| 0
| 0.339083
| 0.192919
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010753
| false
| 0
| 0
| 0
| 0.010753
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e026f95f826d4723e778086c5e7c53aac129329d
| 92
|
py
|
Python
|
shakenfist/exceptions.py
|
mcarden/shakenfist
|
cb90ffe81a3d0201949ddea8d4b36ce1b6c11246
|
[
"Apache-2.0"
] | null | null | null |
shakenfist/exceptions.py
|
mcarden/shakenfist
|
cb90ffe81a3d0201949ddea8d4b36ce1b6c11246
|
[
"Apache-2.0"
] | null | null | null |
shakenfist/exceptions.py
|
mcarden/shakenfist
|
cb90ffe81a3d0201949ddea8d4b36ce1b6c11246
|
[
"Apache-2.0"
] | null | null | null |
class HTTPError(Exception):
pass
class VersionSpecificationError(Exception):
pass
| 13.142857
| 43
| 0.76087
| 8
| 92
| 8.75
| 0.625
| 0.371429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 92
| 6
| 44
| 15.333333
| 0.921053
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
e04215b1a377992f554f9a6da13ebdda9f9ac242
| 1,088
|
py
|
Python
|
mywebsite/models.py
|
anton-donchev/mywebsite
|
1be02d08fc26477fc6dd7b0e7c94bf8de9c02bde
|
[
"MIT"
] | null | null | null |
mywebsite/models.py
|
anton-donchev/mywebsite
|
1be02d08fc26477fc6dd7b0e7c94bf8de9c02bde
|
[
"MIT"
] | null | null | null |
mywebsite/models.py
|
anton-donchev/mywebsite
|
1be02d08fc26477fc6dd7b0e7c94bf8de9c02bde
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from datetime import datetime
from mywebsite import db, login_manager
@login_manager.user_loader
def load_user(id):
return Admin.query.get(int(id))
class Admin(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
status = db.Column(db.String(32), index=True)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return f"<Admin {self.username}>"
@property
def password(self):
raise AttributeError("'password' is not a readable attribute!")
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
| 29.405405
| 75
| 0.725184
| 150
| 1,088
| 5.113333
| 0.426667
| 0.109518
| 0.078227
| 0.083442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012088
| 0.163603
| 1,088
| 36
| 76
| 30.222222
| 0.830769
| 0.030331
| 0
| 0
| 1
| 0
| 0.058879
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.208333
| false
| 0.375
| 0.166667
| 0.125
| 0.791667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
0edf3b1979c01112d2ac2ce8599c5385039d8532
| 3,282
|
py
|
Python
|
tests/test_minimap2_pipeline.py
|
ignasrum/hocort
|
5bf137ecaf8816cc8d951bb5168588eb87811097
|
[
"MIT"
] | null | null | null |
tests/test_minimap2_pipeline.py
|
ignasrum/hocort
|
5bf137ecaf8816cc8d951bb5168588eb87811097
|
[
"MIT"
] | null | null | null |
tests/test_minimap2_pipeline.py
|
ignasrum/hocort
|
5bf137ecaf8816cc8d951bb5168588eb87811097
|
[
"MIT"
] | null | null | null |
from hocort.pipelines.minimap2 import Minimap2
import tempfile
import os
temp_dir = tempfile.TemporaryDirectory()
path = os.path.dirname(__file__)
idx = f'{path}/test_data/minimap2/genome.mmi'
seq1 = f'{path}/test_data/sequences/sequences1.fastq'
out1 = f'{temp_dir.name}/out1.fastq'
seq2 = f'{path}/test_data/sequences/sequences2.fastq'
out2 = f'{temp_dir.name}/out2.fastq'
no_path = ''
def test_pipeline_temp_dir():
path = '.'
returncode = Minimap2(path).run(idx, seq1, out1, seq2=seq2, out2=out2)
assert returncode == 0
def test_pipeline_mapq():
returncode = Minimap2().run(idx, seq1, out1, seq2=seq2, out2=out2, mapq=2)
assert returncode == 0
def test_pipeline_idx_no_path():
returncode = Minimap2().run(no_path, seq1, out1)
assert returncode == 1
def test_pipeline_seq1_no_path():
returncode = Minimap2().run(idx, no_path, out1)
assert returncode == 1
def test_pipeline_out1_no_path():
returncode = Minimap2().run(idx, seq1, no_path)
assert returncode == 0
def test_pipeline_seq1_seq2_no_path():
returncode = Minimap2().run(idx, no_path, out1, seq2=no_path)
assert returncode == 1
def test_pipeline_seq2_no_path():
returncode = Minimap2().run(idx, seq1, out1, seq2=no_path)
assert returncode == 0
def test_pipeline_hcfilter_true_1():
returncode = Minimap2().run(idx, seq1, out1, hcfilter='t')
assert returncode == 0
def test_pipeline_hcfilter_false_1():
returncode = Minimap2().run(idx, seq1, out1, hcfilter='f')
assert returncode == 0
def test_pipeline_hcfilter_true_2():
returncode = Minimap2().run(idx, seq1, out1, seq2=seq2, out2=out2, hcfilter='t')
assert returncode == 0
def test_pipeline_hcfilter_false_2():
returncode = Minimap2().run(idx, seq1, out1, seq2=seq2, out2=out2, hcfilter='f')
assert returncode == 0
def test_pipeline_1():
returncode = Minimap2().run(idx, seq1, out1)
assert returncode == 0
def test_pipeline_2():
returncode = Minimap2().run(idx, seq1, out1, seq2=seq2, out2=out2)
assert returncode == 0
def test_pipeline_custom_options_1():
options = []
returncode = Minimap2().run(idx, seq1, out1, options=options)
assert returncode == 0
def test_pipeline_custom_options_2():
options = []
returncode = Minimap2().run(idx, seq1, out1, seq2=seq2, out2=out2, options=options)
assert returncode == 0
def test_pipeline_sam_1():
intermediary = 'SAM'
returncode = Minimap2().run(idx, seq1, out1, intermediary=intermediary)
assert returncode == 0
def test_pipeline_sam_2():
intermediary = 'SAM'
returncode = Minimap2().run(idx, seq1, out1, seq2=seq2, out2=out2, intermediary=intermediary)
assert returncode == 0
def test_pipeline_sam_1():
intermediary = 'BAM'
returncode = Minimap2().run(idx, seq1, out1, intermediary=intermediary)
assert returncode == 0
def test_pipeline_sam_2():
intermediary = 'BAM'
returncode = Minimap2().run(idx, seq1, out1, seq2=seq2, out2=out2, intermediary=intermediary)
assert returncode == 0
def test_pipeline_seq2_no_out2():
returncode = Minimap2().run(idx, seq1, out1, seq2=seq2)
assert returncode == 1
def test_pipeline_noseq2_out2():
returncode = Minimap2().run(idx, seq1, out1, out2=out2)
assert returncode == 0
| 31.257143
| 97
| 0.708714
| 443
| 3,282
| 5.047404
| 0.108352
| 0.065742
| 0.140877
| 0.203936
| 0.850626
| 0.805009
| 0.743292
| 0.622093
| 0.453041
| 0.38864
| 0
| 0.055313
| 0.162706
| 3,282
| 104
| 98
| 31.557692
| 0.75837
| 0
| 0
| 0.432099
| 0
| 0
| 0.058196
| 0.053016
| 0
| 0
| 0
| 0
| 0.259259
| 1
| 0.259259
| false
| 0
| 0.037037
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0eeb34c85c4c175585aaf02e5531af46cb4686c7
| 214
|
py
|
Python
|
mak/build_framework/configure/arch/mips.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | 4
|
2015-05-13T16:28:36.000Z
|
2017-05-24T15:34:14.000Z
|
mak/build_framework/configure/arch/mips.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | null | null | null |
mak/build_framework/configure/arch/mips.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | 1
|
2017-03-21T08:28:07.000Z
|
2017-03-21T08:28:07.000Z
|
def configure(conf):
conf.env.ARCHITECTURE = 'mips'
conf.env.VALID_ARCHITECTURES = ['mips']
conf.env.ARCH_FAMILY = 'mips'
conf.env.ARCH_LP64 = False
conf.env.append_unique('DEFINES', ['_MIPS'])
| 30.571429
| 48
| 0.672897
| 28
| 214
| 4.964286
| 0.535714
| 0.251799
| 0.23741
| 0.215827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011236
| 0.168224
| 214
| 6
| 49
| 35.666667
| 0.769663
| 0
| 0
| 0
| 0
| 0
| 0.11215
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0eec8e83919cdeb2106c1fcb7cebc1a419715cc5
| 114
|
py
|
Python
|
stataLogObject/Supports/__init__.py
|
sbaker-dev/stataLogObject
|
f72b80e4c69827f556d181dbfec66237c8464636
|
[
"MIT"
] | null | null | null |
stataLogObject/Supports/__init__.py
|
sbaker-dev/stataLogObject
|
f72b80e4c69827f556d181dbfec66237c8464636
|
[
"MIT"
] | null | null | null |
stataLogObject/Supports/__init__.py
|
sbaker-dev/stataLogObject
|
f72b80e4c69827f556d181dbfec66237c8464636
|
[
"MIT"
] | null | null | null |
from .supports import clean_line, extract_values, clean_value, FOREST_DICT, methods_in_line
from .Errors import *
| 38
| 91
| 0.833333
| 17
| 114
| 5.235294
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 114
| 2
| 92
| 57
| 0.872549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0ef47d2ba12968611e973f562249e06bb3e99777
| 258
|
py
|
Python
|
rotkehlchen/tests/unit/test_etherscan.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 137
|
2018-03-05T11:53:29.000Z
|
2019-11-03T16:38:42.000Z
|
rotkehlchen/tests/unit/test_etherscan.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 385
|
2018-03-08T12:43:41.000Z
|
2019-11-10T09:15:36.000Z
|
rotkehlchen/tests/unit/test_etherscan.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 59
|
2018-03-08T10:08:27.000Z
|
2019-10-26T11:30:44.000Z
|
from rotkehlchen.externalapis.etherscan import _hashes_tuple_to_list
def test_hashes_tuple_to_list():
hashes = {('0x1', 1), ('0x2', 2), ('0x3', 3), ('0x4', 4), ('0x5', 5)}
assert _hashes_tuple_to_list(hashes) == ['0x1', '0x2', '0x3', '0x4', '0x5']
| 36.857143
| 79
| 0.639535
| 37
| 258
| 4.135135
| 0.594595
| 0.215686
| 0.254902
| 0.333333
| 0.339869
| 0.339869
| 0
| 0
| 0
| 0
| 0
| 0.112613
| 0.139535
| 258
| 6
| 80
| 43
| 0.576577
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 0
| 0
| 0
| 0.116279
| 0
| 0.25
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0efcc647d5e2bea619fbe1de9c3ac6ac210090de
| 289
|
py
|
Python
|
playgrounds/test.py
|
Mariox222/htmlStruc
|
ebc4f9ae62b44b94a339ae34f0d8577738ff2801
|
[
"MIT"
] | null | null | null |
playgrounds/test.py
|
Mariox222/htmlStruc
|
ebc4f9ae62b44b94a339ae34f0d8577738ff2801
|
[
"MIT"
] | null | null | null |
playgrounds/test.py
|
Mariox222/htmlStruc
|
ebc4f9ae62b44b94a339ae34f0d8577738ff2801
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from dataclasses import field
""" @dataclass
class Node:
#name: str = field(default="")
attributes: list[str] = field(default_factory=list)
#isCloseTag: bool = field(default=True)
#depth: int = field(default=0) """
x: list[str] = list()
| 24.083333
| 55
| 0.681661
| 36
| 289
| 5.444444
| 0.555556
| 0.244898
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004202
| 0.176471
| 289
| 12
| 56
| 24.083333
| 0.819328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
160b1ded5fb8662b107af9b3b8d464b9918e275b
| 359
|
py
|
Python
|
python/replicate/exceptions.py
|
kennyworkman/replicate
|
df9358847cdbb3d0e87018511e0a392d750d818a
|
[
"Apache-2.0"
] | 810
|
2021-02-09T09:26:26.000Z
|
2022-03-25T14:06:13.000Z
|
python/replicate/exceptions.py
|
kennyworkman/replicate
|
df9358847cdbb3d0e87018511e0a392d750d818a
|
[
"Apache-2.0"
] | 347
|
2021-02-08T07:24:29.000Z
|
2022-03-31T23:05:29.000Z
|
python/replicate/exceptions.py
|
kennyworkman/replicate
|
df9358847cdbb3d0e87018511e0a392d750d818a
|
[
"Apache-2.0"
] | 43
|
2020-10-30T19:55:42.000Z
|
2021-01-18T22:41:49.000Z
|
from . import constants
class DoesNotExist(Exception):
pass
class ReadError(Exception):
pass
class WriteError(Exception):
pass
class RepositoryConfigurationError(Exception):
pass
class IncompatibleRepositoryVersion(Exception):
pass
class CorruptedRepositorySpec(Exception):
pass
class ConfigNotFound(Exception):
pass
| 11.966667
| 47
| 0.752089
| 31
| 359
| 8.709677
| 0.419355
| 0.337037
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18663
| 359
| 29
| 48
| 12.37931
| 0.924658
| 0
| 0
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.466667
| 0.066667
| 0
| 0.533333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
16454461da9204839872985eeb76753164f40f68
| 312
|
py
|
Python
|
OST_helper/UI/fields/__init__.py
|
HomeletW/OST
|
5e359d00a547af194a2a1a2591a53c93d8f40b84
|
[
"MIT"
] | 1
|
2020-07-31T16:43:13.000Z
|
2020-07-31T16:43:13.000Z
|
OST_helper/UI/fields/__init__.py
|
HomeletW/OST
|
5e359d00a547af194a2a1a2591a53c93d8f40b84
|
[
"MIT"
] | null | null | null |
OST_helper/UI/fields/__init__.py
|
HomeletW/OST
|
5e359d00a547af194a2a1a2591a53c93d8f40b84
|
[
"MIT"
] | null | null | null |
from OST_helper.UI.fields.course_panel import CoursePair, CoursePanel
from OST_helper.UI.fields.info_panel import OtherInfoPanel, PersonalInfoPanel
from OST_helper.UI.fields.menu_bar import MenuBar
from OST_helper.UI.fields.status_bar import StatusBar
from OST_helper.UI.fields.utility_panel import UtilityPanel
| 52
| 77
| 0.875
| 47
| 312
| 5.595745
| 0.425532
| 0.13308
| 0.247148
| 0.285171
| 0.39924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070513
| 312
| 5
| 78
| 62.4
| 0.906897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
165d63dc97b77ccebb9d275adb559016c6b9a146
| 151
|
py
|
Python
|
data/__init__.py
|
yezz123/Apollo
|
f27a655a6a5ee9186867b380840411feca21290f
|
[
"MIT"
] | 18
|
2021-07-28T22:01:48.000Z
|
2022-02-07T10:57:21.000Z
|
data/__init__.py
|
yezz123/Apollo
|
f27a655a6a5ee9186867b380840411feca21290f
|
[
"MIT"
] | null | null | null |
data/__init__.py
|
yezz123/Apollo
|
f27a655a6a5ee9186867b380840411feca21290f
|
[
"MIT"
] | 5
|
2021-07-29T01:47:28.000Z
|
2022-01-05T02:04:54.000Z
|
#!/usr/bin/python3
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
| 37.75
| 55
| 0.86755
| 20
| 151
| 6.45
| 0.65
| 0.325581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007194
| 0.07947
| 151
| 4
| 56
| 37.75
| 0.920863
| 0.112583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1695aedbbb339b924fdfe0e7abba346dbe8f3381
| 53
|
py
|
Python
|
weatherpy/__init__.py
|
cmcdowell/weatherpy
|
7da7bed9db946dfab9318d67aab5fbda1e4fed21
|
[
"MIT"
] | 2
|
2015-11-28T09:01:47.000Z
|
2016-04-13T13:00:11.000Z
|
weatherpy/__init__.py
|
cmcdowell/weatherpy
|
7da7bed9db946dfab9318d67aab5fbda1e4fed21
|
[
"MIT"
] | null | null | null |
weatherpy/__init__.py
|
cmcdowell/weatherpy
|
7da7bed9db946dfab9318d67aab5fbda1e4fed21
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from response import Response
| 13.25
| 29
| 0.773585
| 8
| 53
| 5.125
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 53
| 3
| 30
| 17.666667
| 0.891304
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
16d58b58f3b103201c63b5661f230d51a75528da
| 44
|
py
|
Python
|
config.py
|
marcusmguerrier/sql-challenge
|
0f0fb2236fa78509eec97cd2030c9f2e58f34d2c
|
[
"ADSL"
] | null | null | null |
config.py
|
marcusmguerrier/sql-challenge
|
0f0fb2236fa78509eec97cd2030c9f2e58f34d2c
|
[
"ADSL"
] | null | null | null |
config.py
|
marcusmguerrier/sql-challenge
|
0f0fb2236fa78509eec97cd2030c9f2e58f34d2c
|
[
"ADSL"
] | null | null | null |
# username = postgres
# password = postgres
| 14.666667
| 21
| 0.727273
| 4
| 44
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 2
| 22
| 22
| 0.888889
| 0.886364
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
16ef541d2550e782b662ec4437a77a4ce60f5e55
| 5,836
|
py
|
Python
|
tests/artillery_transformer/test_dist.py
|
swagger-atlas/atlas
|
64a0a6e3107da9f7cf894880823badfa84e11f25
|
[
"Apache-2.0"
] | 2
|
2019-04-25T10:22:53.000Z
|
2020-10-09T06:57:02.000Z
|
tests/artillery_transformer/test_dist.py
|
swagger-atlas/atlas
|
64a0a6e3107da9f7cf894880823badfa84e11f25
|
[
"Apache-2.0"
] | 9
|
2019-04-11T17:27:57.000Z
|
2021-05-08T13:12:10.000Z
|
tests/artillery_transformer/test_dist.py
|
swagger-atlas/atlas
|
64a0a6e3107da9f7cf894880823badfa84e11f25
|
[
"Apache-2.0"
] | 1
|
2019-04-18T22:18:37.000Z
|
2019-04-18T22:18:37.000Z
|
import os
from os import path
from unittest import mock
from atlas.modules.transformer.artillery.dist import ArtilleryDist, settings
class TestDist:
def test_start(self):
instance = ArtilleryDist()
instance.create_folder = mock.MagicMock()
instance.copy_files = mock.MagicMock()
instance.copy_folders = mock.MagicMock()
instance.start()
assert instance.create_folder.mock_calls == [
mock.call(settings.DIST_FOLDER),
mock.call(settings.ARTILLERY_FOLDER, os.path.join(instance.path, settings.DIST_FOLDER))
]
instance.copy_files.assert_called_once()
instance.copy_folders.assert_called_once()
@mock.patch('atlas.modules.transformer.artillery.dist.shutil')
@mock.patch('atlas.modules.transformer.artillery.dist.os.path.isfile')
def test_copy_files_all_files(self, patch_os_is_file, patched_shell):
patch_os_is_file.return_value = True
instance = ArtilleryDist()
instance.path = ""
instance.copy_files()
source_path = path.join(settings.BASE_DIR, "atlas", "modules", "data_provider", "artillery")
source_files = [path.join(source_path, file) for file in os.listdir(source_path)
if file not in {"constants.js", "settings.js", settings.ARTILLERY_RESOURCES}]
d_path = path.join(settings.DIST_FOLDER, settings.ARTILLERY_FOLDER, settings.ARTILLERY_LIB_FOLDER)
expected_sources = [((_file, d_path),) for _file in source_files]
expected_sources.extend([
(
(
path.join(settings.INPUT_FOLDER, settings.ARTILLERY_FOLDER, settings.ARTILLERY_HOOK_FILE),
path.join(settings.DIST_FOLDER, settings.ARTILLERY_FOLDER)
),
),
(
(
path.join(settings.OUTPUT_FOLDER, settings.ARTILLERY_FOLDER, settings.ARTILLERY_FILE),
path.join(settings.DIST_FOLDER, settings.ARTILLERY_FOLDER)
),
),
(
(
path.join(settings.OUTPUT_FOLDER, settings.ARTILLERY_FOLDER, settings.ARTILLERY_YAML),
path.join(settings.DIST_FOLDER, settings.ARTILLERY_FOLDER)
),
),
(
(
path.join(settings.OUTPUT_FOLDER, settings.SWAGGER_FILE),
path.join(settings.DIST_FOLDER)
),
)
])
assert patched_shell.copy.call_args_list == expected_sources
@mock.patch('atlas.modules.transformer.artillery.dist.shutil')
@mock.patch('atlas.modules.transformer.artillery.dist.os.path.isfile')
def test_copy_files_no_files(self, patch_os_is_file, patched_shell):
patch_os_is_file.return_value = False
instance = ArtilleryDist()
instance.path = ""
instance.copy_files()
expected_sources = [
(
(
path.join(settings.INPUT_FOLDER, settings.ARTILLERY_FOLDER, settings.ARTILLERY_HOOK_FILE),
path.join(settings.DIST_FOLDER, settings.ARTILLERY_FOLDER)
),
),
(
(
path.join(settings.OUTPUT_FOLDER, settings.ARTILLERY_FOLDER, settings.ARTILLERY_FILE),
path.join(settings.DIST_FOLDER, settings.ARTILLERY_FOLDER)
),
),
(
(
path.join(settings.OUTPUT_FOLDER, settings.ARTILLERY_FOLDER, settings.ARTILLERY_YAML),
path.join(settings.DIST_FOLDER, settings.ARTILLERY_FOLDER)
),
),
(
(
path.join(settings.OUTPUT_FOLDER, settings.SWAGGER_FILE),
path.join(settings.DIST_FOLDER)
),
)
]
assert patched_shell.copy.call_args_list == expected_sources
@mock.patch('atlas.modules.transformer.artillery.dist.shutil')
@mock.patch('atlas.modules.transformer.artillery.dist.os.path.exists')
def test_copy_folders_destination_exists(self, patch_os_path_exists, patched_shell):
patch_os_path_exists.return_value = True
instance = ArtilleryDist()
instance.path = ""
instance.copy_folders()
patched_shell.rmtree.assert_called()
patched_shell.copytree.assert_called()
@mock.patch('atlas.modules.transformer.artillery.dist.shutil')
@mock.patch('atlas.modules.transformer.artillery.dist.os.path.exists')
def test_copy_folders_destination_not_exists(self, patch_os_path_exists, patched_shell):
patch_os_path_exists.return_value = False
instance = ArtilleryDist()
instance.path = ""
instance.copy_folders()
patched_shell.rmtree.assert_not_called()
patched_shell.copytree.assert_called()
@mock.patch('atlas.modules.transformer.artillery.dist.os.makedirs')
@mock.patch('atlas.modules.transformer.artillery.dist.os.path.exists')
def test_create_folder_path_exists(self, patch_os_path_exists, patched_makedir):
patch_os_path_exists.return_value = True
instance = ArtilleryDist()
instance.path = ""
instance.create_folder('folder')
patched_makedir.assert_not_called()
@mock.patch('atlas.modules.transformer.artillery.dist.os.makedirs')
@mock.patch('atlas.modules.transformer.artillery.dist.os.path.exists')
def test_create_folder_path_not_exists(self, patch_os_path_exists, patched_makedir):
patch_os_path_exists.return_value = False
instance = ArtilleryDist()
instance.path = ""
instance.create_folder('folder')
patched_makedir.assert_called()
| 36.024691
| 110
| 0.63194
| 612
| 5,836
| 5.74183
| 0.124183
| 0.106431
| 0.130905
| 0.118384
| 0.791975
| 0.78173
| 0.776892
| 0.774047
| 0.758395
| 0.730222
| 0
| 0
| 0.271247
| 5,836
| 161
| 111
| 36.248447
| 0.82624
| 0
| 0
| 0.556452
| 0
| 0
| 0.118403
| 0.10658
| 0
| 0
| 0
| 0
| 0.08871
| 1
| 0.056452
| false
| 0
| 0.032258
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.