hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
293a1bcbc6d90a969408d79aa6adc718e9ac7de4
| 92
|
py
|
Python
|
run.py
|
b0nbon1/stack-overflow-lite-API
|
fccb7b9b7bf39434e6a26ffd5d8db99d27d4f680
|
[
"MIT"
] | 5
|
2019-04-08T21:12:46.000Z
|
2019-04-16T08:12:31.000Z
|
run.py
|
b0nbon1/stack-overflow-lite-API
|
fccb7b9b7bf39434e6a26ffd5d8db99d27d4f680
|
[
"MIT"
] | null | null | null |
run.py
|
b0nbon1/stack-overflow-lite-API
|
fccb7b9b7bf39434e6a26ffd5d8db99d27d4f680
|
[
"MIT"
] | null | null | null |
"""Runn the app"""
# local import
from app import create_app
app = create_app('development')
| 23
| 31
| 0.73913
| 14
| 92
| 4.714286
| 0.571429
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 92
| 4
| 31
| 23
| 0.825
| 0.282609
| 0
| 0
| 0
| 0
| 0.180328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2952e64663e3b517bbf6c69b4c9be78e790cfb65
| 5,340
|
py
|
Python
|
acquisitions/migrations/0022_auto_20160726_2026.py
|
18F/acqstackdb
|
7d939e7deb1cb8749f16fe6b6bc092f5db5c4469
|
[
"CC0-1.0"
] | 2
|
2016-06-03T16:33:34.000Z
|
2016-07-22T12:10:31.000Z
|
acquisitions/migrations/0022_auto_20160726_2026.py
|
18F/acqstackdb
|
7d939e7deb1cb8749f16fe6b6bc092f5db5c4469
|
[
"CC0-1.0"
] | 26
|
2016-06-02T11:21:15.000Z
|
2016-07-18T14:10:03.000Z
|
acquisitions/migrations/0022_auto_20160726_2026.py
|
18F/acqstackdb
|
7d939e7deb1cb8749f16fe6b6bc092f5db5c4469
|
[
"CC0-1.0"
] | 2
|
2017-07-14T08:33:32.000Z
|
2021-02-15T10:16:18.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-26 20:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('acquisitions', '0021_auto_20160712_2135'),
]
operations = [
migrations.AlterModelOptions(
name='agency',
options={'ordering': ('name',), 'verbose_name_plural': 'Agencies'},
),
migrations.AlterModelOptions(
name='step',
options={'ordering': ('steptrackthroughmodel__order',)},
),
migrations.AlterField(
model_name='acquisition',
name='competition_strategy',
field=models.CharField(blank=True, choices=[('A/E Procedures', 'A/E Procedures'), ('Competed under SAP', 'Competed under SAP'), ('Competitive Delivery Order Fair Opportunity Provided', 'Competitive Delivery Order Fair Opportunity Provided'), ('Competitive Schedule Buy', 'Competitive Schedule Buy'), ('Fair Opportunity', 'Fair Opportunity'), ('Follow On to Competed Action (FAR 6.302-1)', 'Follow On to Competed Action (FAR 6.302-1)'), ('Follow On to Competed Action', 'Follow On to Competed Action'), ('Full and Open after exclusion of sources (competitive small business set-asides, competitive 8a)', 'Full and Open after exclusion of sources (competitive small business set-asides, competitive 8a)'), ('Full and Open Competition Unrestricted', 'Full and Open Competition Unrestricted'), ('Full and Open Competition', 'Full and Open Competition'), ('Limited Sources FSS Order', 'Limited Sources FSS Order'), ('Limited Sources', 'Limited Sources'), ('Non-Competitive Delivery Order', 'Non-Competitive Delivery Order'), ('Not Available for Competition (e.g., 8a sole source, HUBZone & SDVOSB sole source, Ability One, all > SAT)', 'Not Available for Competition (e.g., 8a sole source, HUBZone & SDVOSB sole source, Ability One, all > SAT)'), ('Not Competed (e.g., sole source, urgency, etc., all > SAT)', 'Not Competed (e.g., sole source, urgency, etc., all > SAT)'), ('Not Competed under SAP (e.g., Urgent, Sole source, Logical Follow-On, 8a, HUBZone & SDVOSB sole source, all < SAT)', 'Not Competed under SAP (e.g., Urgent, Sole source, Logical Follow-On, 8a, HUBZone & SDVOSB sole source, all < SAT)'), ('Partial Small Business Set-Aside', 'Partial Small Business Set-Aside'), ('Set-Aside', 'Set-Aside'), ('Sole Source', 'Sole Source')], max_length=100, null=True),
),
migrations.AlterField(
model_name='acquisition',
name='contract_type',
field=models.CharField(blank=True, choices=[('Cost No Fee', 'Cost No Fee'), ('Cost Plus Award Fee', 'Cost Plus Award Fee'), ('Cost Plus Fixed Fee', 'Cost Plus Fixed Fee'), ('Cost Plus Incentive Fee', 'Cost Plus Incentive Fee'), ('Cost Sharing', 'Cost Sharing'), ('Fixed Price Award Fee', 'Fixed Price Award Fee'), ('Fixed Price Incentive', 'Fixed Price Incentive'), ('Fixed Price Labor Hours', 'Fixed Price Labor Hours'), ('Fixed Price Level of Effort', 'Fixed Price Level of Effort'), ('Fixed Price Time and Materials', 'Fixed Price Time and Materials'), ('Fixed Price with Economic Price Adjustment', 'Fixed Price with Economic Price Adjustment'), ('Fixed Price', 'Fixed Price'), ('Interagency Agreement', 'Interagency Agreement'), ('Labor Hours and Time and Materials', 'Labor Hours and Time and Materials'), ('Labor Hours', 'Labor Hours'), ('Order Dependent', 'Order Dependent'), ('Time and Materials', 'Time and Materials')], max_length=100, null=True),
),
migrations.AlterField(
model_name='acquisition',
name='procurement_method',
field=models.CharField(blank=True, choices=[('Ability One', 'Ability One'), ('Basic Ordering Agreement', 'Basic Ordering Agreement'), ('Blanket Purchase Agreement-BPA', 'Blanket Purchase Agreement-BPA'), ('BPA Call', 'BPA Call'), ('Call Order under GSA Schedules BPA', 'Call Order under GSA Schedules BPA'), ('Commercial Item Contract', 'Commercial Item Contract'), ('Contract modification', 'Contract modification'), ('Contract', 'Contract'), ('Definitive Contract other than IDV', 'Definitive Contract other than IDV'), ('Definitive Contract', 'Definitive Contract'), ('Government-wide Agency Contract-GWAC', 'Government-wide Agency Contract-GWAC'), ('GSA Schedule Contract', 'GSA Schedule Contract'), ('GSA Schedule', 'GSA Schedule'), ('GSA Schedules Program BPA', 'GSA Schedules Program BPA'), ('Indefinite Delivery Indefinite Quantity-IDIQ', 'Indefinite Delivery Indefinite Quantity-IDIQ'), ('Indefinite Delivery Vehicle (IDV)', 'Indefinite Delivery Vehicle (IDV)'), ('Indefinite Delivery Vehicle Base Contract', 'Indefinite Delivery Vehicle Base Contract'), ('Multi-Agency Contract', 'Multi-Agency Contract'), ('Negotiated', 'Negotiated'), ('Order under GSA Federal Supply Schedules Program', 'Order under GSA Federal Supply Schedules Program'), ('Order under GSA Schedules Program BPA', 'Order under GSA Schedules Program BPA'), ('Order under GSA Schedules Program', 'Order under GSA Schedules Program'), ('Order under IDV', 'Order under IDV'), ('Purchase Order', 'Purchase Order'), ('Sealed Bid', 'Sealed Bid')], max_length=100, null=True),
),
]
| 136.923077
| 1,852
| 0.68839
| 647
| 5,340
| 5.64915
| 0.250386
| 0.038304
| 0.028454
| 0.036115
| 0.665663
| 0.58632
| 0.479617
| 0.327497
| 0.28591
| 0.264295
| 0
| 0.012884
| 0.171536
| 5,340
| 38
| 1,853
| 140.526316
| 0.813291
| 0.012547
| 0
| 0.419355
| 1
| 0.129032
| 0.706641
| 0.009677
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4623fc8082a8955099775873e9b30efb4c7473c2
| 11,259
|
py
|
Python
|
tests/geometry.py
|
Pithikos/python-rectangles
|
3dcee1786b876117674c0a1371b0a0f1d5fc313f
|
[
"MIT"
] | 25
|
2015-05-29T21:12:00.000Z
|
2022-02-16T06:29:29.000Z
|
tests/geometry.py
|
Pithikos/python-rectangles
|
3dcee1786b876117674c0a1371b0a0f1d5fc313f
|
[
"MIT"
] | 3
|
2016-11-22T12:42:36.000Z
|
2017-05-22T10:26:23.000Z
|
tests/geometry.py
|
Pithikos/python-rectangles
|
3dcee1786b876117674c0a1371b0a0f1d5fc313f
|
[
"MIT"
] | 9
|
2018-03-07T16:44:30.000Z
|
2021-01-13T00:26:53.000Z
|
import sys, os
if os.getcwd().endswith('python-rectangles'):
sys.path.insert(0, os.path.abspath('.'))
elif os.getcwd().endswith('python-rectangles/tests'):
sys.path.insert(0, os.path.abspath('..'))
from geometry import *
import math
# ---------------------------- Tools -----------------------------
def Line(x1, y1, x2, y2):
return Point(x1, y1), Point(x2, y2)
# ---------------------------- Test Points -----------------------------
p1=Point(10, 20)
p2=Point(10, 20)
p3=Point(20, 30)
p4=Point(20, 30)
assert p1==p2
assert p1!=p3
assert p2!=p3
# --- Point in rect ---
r=Rect(0, 0, 1, 1)
assert r.is_point_inside_rect(Point(1, 0))
assert r.is_point_inside_rect(Point(1, 1))
assert not r.is_point_inside_rect(Point(-1, -1))
assert not r.is_point_inside_rect(Point(-1, 0))
assert not r.is_point_inside_rect(Point(-1, 1))
assert not r.is_point_inside_rect(Point(0, -1))
assert r.is_point_inside_rect(Point(0, 0))
assert r.is_point_inside_rect(Point(0, 1))
assert not r.is_point_inside_rect(Point(1, -1))
assert r.is_point_inside_rect(Point(1, 0))
assert r.is_point_inside_rect(Point(1, 1))
assert not r.is_point_inside_rect(Point(2, -1))
assert not r.is_point_inside_rect(Point(2, 0))
assert not r.is_point_inside_rect(Point(2, 1))
assert not r.is_point_inside_rect(Point(-1, -1))
assert not r.is_point_inside_rect(Point(-1, 0))
assert r.is_point_inside_rect(Point(0.5, 0.5))
# --- Distance ---
assert Point(0, 0).distance_to_point(Point(0, 1))==1
assert Point(0, 0).distance_to_point(Point(1, 0))==1
assert Point(0, 0).distance_to_point(Point(1, 1))==math.sqrt(2)
assert Point(0, 2).distance_to_point(Point(1, 1))==math.sqrt(2)
assert Point(0, 0).distance_to_point(Point(20, 20))==math.sqrt(20**2+20**2)
assert Point(20, 20).distance_to_point(Point(0, 0))==math.sqrt(20**2+20**2)
assert Point(-20, -20).distance_to_point(Point(0, 0))==math.sqrt(20**2+20**2)
assert Point(0, 0).distance_to_point(Point(-20, -20))==math.sqrt(20**2+20**2)
# --- Area of triangle
assert triangle_area_at_points(Point(0, 0), Point(0, 0), Point(0, 0))==0
assert round(triangle_area_at_points(Point(0, 0), Point(1, 1), Point(1, 0)), 2)==round(0.5, 2)
assert round(triangle_area_at_points(Point(0, 0), Point(1, 2), Point(1, 0)), 2)==round(1, 2)
# --- Point faces edge
edge = (Point(0, 0), Point(1, 1))
assert point_faces_edge(edge, Point(0, 2))
assert point_faces_edge(edge, Point(-1, 1))
assert point_faces_edge(edge, Point(0, 1))
assert point_faces_edge(edge, Point(1, -1))
assert point_faces_edge(edge, Point(5, -5))
assert not point_faces_edge(edge, Point(5, -7))
assert not point_faces_edge(edge, Point(0, 3))
edge = (Point(-5, -10), Point(-6, -11))
assert point_faces_edge(edge, Point(-5, -10))
assert point_faces_edge(edge, Point(-6, -11))
assert point_faces_edge(edge, Point(-6, -10))
assert not point_faces_edge(edge, Point(0, 0))
edge = (Point(-5, -100), Point(-5, 100))
assert point_faces_edge(edge, Point(-100, 0))
assert point_faces_edge(edge, Point(-100, 100))
assert not point_faces_edge(edge, Point(-100, 101))
# --- Distance between edge and point
edge = (Point(0, 0), Point(1, 1))
assert round(distance_between_edge_and_point(edge, Point(0, 1)), 2)==0.71
assert round(distance_between_edge_and_point(edge, Point(0, 2)), 2)==1.41
assert distance_between_edge_and_point(edge, Point(0, 0))==0
edge = (Point(0, 1), Point(2, 6)) # slope = 2, offset = (y=1)
assert distance_between_edge_and_point(edge, Point(0, 1))==0
assert distance_between_edge_and_point(edge, Point(2, 6))==0
edge = (Point(0, 0), Point(1, 2)) # slope = 2, offset = 0
assert round(distance_between_edge_and_point(edge, Point(0, 2)), 2)==0.89
edge = (Point(0.5, 0.5), Point(2.5, 0.5))
assert round(distance_between_edge_and_point(edge, Point(0, 1)), 2)==0.71
assert round(distance_between_edge_and_point(edge, Point(0, 0)), 2)==0.71
assert round(distance_between_edge_and_point(edge, Point(1, 1)), 2)==0.5
assert round(distance_between_edge_and_point(edge, Point(1, 0)), 2)==0.5
# -------------------------- Test properties ---------------------------
# Positive numbers
r1=Rect(100, 100, 30, 20)
assert r1.l_top == Point(100.0, 100.0)
assert r1.r_top == Point(130.0, 100.0)
assert r1.l_bot == Point(100.0, 120.0)
assert r1.r_bot == Point(130.0, 120.0)
assert r1.center == Point(100+30/2, 100+20/2)
assert r1.width == 30.0
assert r1.height == 20.0
assert r1.is_point_inside_rect(Point(100, 100))
assert r1.is_point_inside_rect(Point(130, 100))
assert r1.is_point_inside_rect(Point(130, 120))
assert r1.is_point_inside_rect(Point(100, 120))
assert not r1.is_point_inside_rect(Point(131, 100))
# ---------------- Test relations with other rectangles ----------------
# --- Alignment
r1=Rect(0, 0, 50, 50)
r2=Rect(40, 40, 20, 20)
r1.align_with_top_edge_of(r2)
assert r1.l_top.y==r2.l_top.y
assert r1.r_top.y==r2.r_top.y
assert r1.l_bot.y==r1.l_top.y+r1.height
assert r1.r_bot.y==r1.l_top.y+r1.height
r1=Rect(0, 0, 50, 50)
r2=Rect(40, 40, 20, 20)
r1.align_with_left_edge_of(r2)
assert r1.l_top.x==r2.l_top.x
assert r1.l_bot.x==r2.l_bot.x
assert r1.r_bot.x==r1.l_top.x+r1.width
assert r1.r_top.x==r1.l_top.x+r1.width
# --- Overlapping
r1=Rect(100, 100, 30, 20)
r2=Rect(110, 100, 30, 20) # a bit to the right compared to r1
r3=Rect(100, 110, 30, 20) # a bit to the bottom compared to r1
r4=Rect(150, 150, 50, 50) # doesn't overlap at all
assert r1.overlaps_with(r2)
assert r1.overlaps_with(r3)
assert not r1.overlaps_with(r4)
# (commutative property)
assert r2.overlaps_with(r1)
assert r3.overlaps_with(r1)
assert not r4.overlaps_with(r1)
# --- on x axis
r1=Rect(0, 0, 50, 50)
r2=Rect(0, 10, 50, 50)
r3=Rect(0, 500, 50, 50)
r4=Rect(500, 0, 50, 50)
assert r1.overlaps_on_x_axis_with(r2)
assert r1.overlaps_on_x_axis_with(r3)
assert not r1.overlaps_on_x_axis_with(r4)
# (commutative property)
assert r2.overlaps_on_x_axis_with(r1)
assert r3.overlaps_on_x_axis_with(r1)
assert not r4.overlaps_on_x_axis_with(r1)
# --- y axis
r1=Rect(0, 0, 50, 50)
r2=Rect(10, 0, 50, 50)
r3=Rect(50, 0, 50, 50)
r4=Rect(100, 0, 50, 50)
r5=Rect( 0, 100, 50, 50)
assert r1.overlaps_on_y_axis_with(r2)
assert r1.overlaps_on_y_axis_with(r3)
assert r1.overlaps_on_y_axis_with(r4)
assert not r1.overlaps_on_y_axis_with(r5)
# (commutative property)
assert r2.overlaps_on_y_axis_with(r1)
assert r3.overlaps_on_y_axis_with(r1)
assert r4.overlaps_on_y_axis_with(r1)
assert not r5.overlaps_on_y_axis_with(r1)
# other cases
rect1 = Rect(1, 0, 1, 1)
rect2 = Rect(0, 0, 6, 1)
assert rect1.overlaps_with(rect2)
# EDGE OVERLAPPING
# edge overlap on x axis
e1 = Line(0, 0, 100, 0)
assert lines_overlap_on_x_axis(e1, Line(50, 0, 150, 0))
assert lines_overlap_on_x_axis(Line(50, 0, 150, 0), e1)
assert lines_overlap_on_x_axis(Line(50, 0, 60, 0), e1)
assert lines_overlap_on_x_axis(e1, Line(50, 0, 60, 0))
assert lines_overlap_on_x_axis(e1, Line(50, 100, 150, 200))
assert lines_overlap_on_x_axis(Line(50, -190, 150, -200), e1)
assert not lines_overlap_on_x_axis(e1, Line(-1, 0, -100, 0))
# edge overlap on y axis
e1 = Line(0, 0, 0, 100)
assert lines_overlap_on_y_axis(e1, Line(0, 50, 0, 150))
assert lines_overlap_on_y_axis(Line(0, 50, 0, 150), e1)
assert lines_overlap_on_y_axis(e1, Line(0, 50, 0, 60))
assert lines_overlap_on_y_axis(Line(0, 50, 0, 60), e1)
assert not lines_overlap_on_y_axis(e1, Line(0, 101, 0, 110))
assert not lines_overlap_on_y_axis(e1, Line(0, -1, 0, -10))
# edge intersect detection
e1 = Line(0, 0, 1, 1)
assert lines_intersect(e1, Line(0, 0, 0, 1))
assert lines_intersect(e1, Line(0, 0, 1, 0))
assert lines_intersect(e1, Line(0, 0, 1, 1))
assert lines_intersect(e1, Line(1, 1, 1, 1))
assert lines_intersect(e1, Line(2, 2, 1, 1))
assert not lines_intersect(e1, Line(1.1, 1.1, 1.2, 1.2))
assert lines_intersect(e1, Line(1, 0, 0, 1))
assert not lines_intersect(e1, Line(-0.1, -0.1, -1, 1))
# more complex cases
r1 = Rect(0, 5, 50, 50)
r2 = Rect(35, 0, 1, 1)
assert r1.overlaps_on_x_axis_with(r2)
assert not r1.overlaps_with(r2)
assert round(r1.distance_to_rect(r2)) == 4.0
# --- Distance between rectangles
w, h = 1, 1
# positives
r1=Rect( 0, 0, w, h)
r2=Rect( 1, 0, w, h)
r3=Rect( 2, 0, w, h)
r4=Rect( 0, 1, w, h)
r5=Rect( 1, 1, w, h)
r6=Rect( 2, 1, w, h)
r7=Rect( 0, 2, w, h)
r8=Rect( 1, 2, w, h)
r9=Rect( 2, 2, w, h)
r0=Rect( 0.5, 0.5, w, h)
assert r1.distance_to_rect(r2)==0.0
assert r1.distance_to_rect(r3)==1.0
assert r1.distance_to_rect(r4)==0.0
assert r1.distance_to_rect(r5)==0.0
assert r1.distance_to_rect(r6)==1.0
assert r1.distance_to_rect(r7)==1.0
assert r1.distance_to_rect(r8)==1.0
assert round(r1.distance_to_rect(r9), 2)==1.41
assert r1.distance_to_rect(r0)==0
#negative x
r1=Rect( 0, 0, w, h)
r2=Rect(-1, 0, w, h)
r3=Rect(-2, 0, w, h)
r4=Rect( 0, 1, w, h)
r5=Rect(-1, 1, w, h)
r6=Rect(-2, 1, w, h)
r7=Rect( 0, 2, w, h)
r8=Rect(-1, 2, w, h)
r9=Rect(-2, 2, w, h)
r0=Rect(-0.5, 0.5, w, h)
assert r1.distance_to_rect(r2)==0.0
assert r1.distance_to_rect(r3)==1.0
assert r1.distance_to_rect(r4)==0.0
assert r1.distance_to_rect(r5)==0.0
assert r1.distance_to_rect(r6)==1.0
assert r1.distance_to_rect(r7)==1.0
assert r1.distance_to_rect(r8)==1.0
assert round(r1.distance_to_rect(r9), 2)==1.41
assert r1.distance_to_rect(r0)==0
# negative y
r1=Rect( 0, 0, w, h)
r2=Rect( 1, 0, w, h)
r3=Rect( 2, 0, w, h)
r4=Rect( 0,-1, w, h)
r5=Rect( 1,-1, w, h)
r6=Rect( 2,-1, w, h)
r7=Rect( 0,-2, w, h)
r8=Rect( 1,-2, w, h)
r9=Rect( 2,-2, w, h)
r0=Rect( 0.5,-0.5, w, h)
assert r1.distance_to_rect(r2)==0.0
assert r1.distance_to_rect(r3)==1.0
assert r1.distance_to_rect(r4)==0.0
assert r1.distance_to_rect(r5)==0.0
assert r1.distance_to_rect(r6)==1.0
assert r1.distance_to_rect(r7)==1.0
assert r1.distance_to_rect(r8)==1.0
assert round(r1.distance_to_rect(r9), 2)==1.41
assert r1.distance_to_rect(r0)==0
#negative x and y
r1=Rect( 0, 0, w, h)
r2=Rect(-1, 0, w, h)
r3=Rect(-2, 0, w, h)
r4=Rect( 0,-1, w, h)
r5=Rect(-1,-1, w, h)
r6=Rect(-2,-1, w, h)
r7=Rect( 0,-2, w, h)
r8=Rect(-1,-2, w, h)
r9=Rect(-2,-2, w, h)
r0=Rect(-0.5,-0.5, w, h)
assert r1.distance_to_rect(r2)==0.0
assert r1.distance_to_rect(r3)==1.0
assert r1.distance_to_rect(r4)==0.0
assert r1.distance_to_rect(r5)==0.0
assert r1.distance_to_rect(r6)==1.0
assert r1.distance_to_rect(r7)==1.0
assert r1.distance_to_rect(r8)==1.0
assert round(r1.distance_to_rect(r9), 2)==1.41
assert r1.distance_to_rect(r0)==0
# overlap
r1=Rect( 0, 0, 50, 50)
r2=Rect( 10, 10, 50, 50)
assert r1.distance_to_rect(r2)==0
# overlap on x axis
r1=Rect( 0, 0, 50, 50) # __
r2=Rect( 0, 60, 50, 50) # __
assert round(r1.distance_to_rect(r2), 2)==10
r1=Rect( 0, 0, 50, 50) # __
r2=Rect( 10, 60, 50, 50) # __
assert round(r1.distance_to_rect(r2), 2)==10
r1=Rect( 10, 60, 50, 50) # __
r2=Rect( 0, 0, 50, 50) # __
assert round(r1.distance_to_rect(r2), 2)==10
# overlap on y axis
r1=Rect( 0, 0, 50, 50) # ||
r2=Rect( 60, 0, 50, 50)
assert round(r1.distance_to_rect(r2), 2)==10
r1=Rect( 0, 0, 50, 50) # |.
r2=Rect( 60, 10, 50, 50) # '
assert round(r1.distance_to_rect(r2), 2)==10
r1=Rect( 0, 20, 50, 50) # .
r2=Rect( 60, 0, 50, 50) # |'
assert round(r1.distance_to_rect(r2), 2)==10
# ----------------------------------------------------------------------
print("No errors")
| 33.810811
| 94
| 0.671108
| 2,274
| 11,259
| 3.140281
| 0.061566
| 0.067217
| 0.073939
| 0.098586
| 0.839238
| 0.798768
| 0.748915
| 0.639826
| 0.57877
| 0.512393
| 0
| 0.127065
| 0.129052
| 11,259
| 332
| 95
| 33.912651
| 0.601163
| 0.090861
| 0
| 0.340824
| 0
| 0
| 0.005105
| 0.002258
| 0
| 0
| 0
| 0
| 0.610487
| 1
| 0.003745
| false
| 0
| 0.011236
| 0.003745
| 0.018727
| 0.003745
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4627c1074de5ba034558d3930e787f37d27fd104
| 83
|
py
|
Python
|
cntm_backend/cntm/apps.py
|
dzimmerer/cntm2018
|
c61987d882a53cc9f428e49f7d91dd0a1413d2e5
|
[
"Apache-2.0"
] | 2
|
2018-02-08T21:45:52.000Z
|
2018-03-19T14:45:01.000Z
|
cntm_backend/cntm/apps.py
|
dzimmerer/cntm2018
|
c61987d882a53cc9f428e49f7d91dd0a1413d2e5
|
[
"Apache-2.0"
] | null | null | null |
cntm_backend/cntm/apps.py
|
dzimmerer/cntm2018
|
c61987d882a53cc9f428e49f7d91dd0a1413d2e5
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class CntmConfig(AppConfig):
name = 'cntm'
| 13.833333
| 33
| 0.73494
| 10
| 83
| 6.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180723
| 83
| 5
| 34
| 16.6
| 0.897059
| 0
| 0
| 0
| 0
| 0
| 0.048193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
462aa5c3346a5f2bd03cfb570251e723c51a3093
| 55
|
py
|
Python
|
dependencies/extrae/src/others/pyextrae/profile/__init__.py
|
TANGO-Project/compss-tango
|
d9e007b6fe4f8337d4f267f95f383d8962602ab8
|
[
"Apache-2.0"
] | 3
|
2018-03-05T14:52:22.000Z
|
2019-02-08T09:58:24.000Z
|
dependencies/extrae/src/others/pyextrae/profile/__init__.py
|
TANGO-Project/compss-tango
|
d9e007b6fe4f8337d4f267f95f383d8962602ab8
|
[
"Apache-2.0"
] | 1
|
2019-11-13T14:30:21.000Z
|
2019-11-13T14:30:21.000Z
|
dependencies/extrae/src/others/pyextrae/profile/__init__.py
|
TANGO-Project/compss-tango
|
d9e007b6fe4f8337d4f267f95f383d8962602ab8
|
[
"Apache-2.0"
] | null | null | null |
from pyextrae.common.extrae import *
startProfiling()
| 13.75
| 36
| 0.8
| 6
| 55
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109091
| 55
| 3
| 37
| 18.333333
| 0.897959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
46823f6e46ef7fe88a8c8762e9b0d3b640fbb629
| 274
|
py
|
Python
|
tests/__init__.py
|
questionlp/libwwdtm
|
02b667cefd6c3bf971dc626110cd80fd32e61096
|
[
"Apache-2.0"
] | 2
|
2019-07-24T20:06:48.000Z
|
2019-11-13T04:12:34.000Z
|
tests/__init__.py
|
questionlp/libwwdtm
|
02b667cefd6c3bf971dc626110cd80fd32e61096
|
[
"Apache-2.0"
] | 1
|
2021-04-20T18:45:49.000Z
|
2021-04-20T18:45:49.000Z
|
tests/__init__.py
|
questionlp/libwwdtm
|
02b667cefd6c3bf971dc626110cd80fd32e61096
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2019 Linh Pham
# wwdtm is relased under the terms of the Apache License 2.0
"""Explicitly listing all modules in this package"""
from tests import test_guest, test_host, test_location, test_panelist, test_scorekeeper, test_show
| 39.142857
| 98
| 0.759124
| 43
| 274
| 4.697674
| 0.860465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047009
| 0.145985
| 274
| 6
| 99
| 45.666667
| 0.816239
| 0.591241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
46a38efa5d90121cc3e5a1b4b3c935b5c10e04a0
| 50
|
py
|
Python
|
setup.py
|
Murabei-OpenSource-Codes/pybats-detection
|
a5c7ad4bd4bd5be6d25ebb9665cf3637d21ca831
|
[
"Apache-2.0"
] | 1
|
2022-01-19T01:08:18.000Z
|
2022-01-19T01:08:18.000Z
|
setup.py
|
Murabei-OpenSource-Codes/pybats-detection
|
a5c7ad4bd4bd5be6d25ebb9665cf3637d21ca831
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Murabei-OpenSource-Codes/pybats-detection
|
a5c7ad4bd4bd5be6d25ebb9665cf3637d21ca831
|
[
"Apache-2.0"
] | null | null | null |
"""Setup."""
import setuptools
setuptools.setup()
| 12.5
| 18
| 0.72
| 5
| 50
| 7.2
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 50
| 3
| 19
| 16.666667
| 0.782609
| 0.12
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
46aac7f635101386e3b4e9f6808ea2d46aad7d65
| 216
|
py
|
Python
|
terra_sdk/client/lcd/__init__.py
|
fabio-nukui/terra.py
|
adee2e1abf41a05a1c39d52b664bd7cf7c9bc975
|
[
"MIT"
] | 66
|
2021-10-21T23:29:38.000Z
|
2022-03-30T15:58:13.000Z
|
terra_sdk/client/lcd/__init__.py
|
fabio-nukui/terra.py
|
adee2e1abf41a05a1c39d52b664bd7cf7c9bc975
|
[
"MIT"
] | 50
|
2021-10-19T06:11:56.000Z
|
2022-03-31T17:06:57.000Z
|
terra_sdk/client/lcd/__init__.py
|
fabio-nukui/terra.py
|
adee2e1abf41a05a1c39d52b664bd7cf7c9bc975
|
[
"MIT"
] | 39
|
2021-11-07T17:28:31.000Z
|
2022-03-31T15:03:57.000Z
|
from .lcdclient import AsyncLCDClient, LCDClient
from .params import PaginationOptions
from .wallet import AsyncWallet, Wallet
__all__ = ["AsyncLCDClient", "LCDClient", "AsyncWallet", "Wallet", "PaginationOptions"]
| 36
| 87
| 0.796296
| 20
| 216
| 8.4
| 0.45
| 0.27381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101852
| 216
| 5
| 88
| 43.2
| 0.865979
| 0
| 0
| 0
| 0
| 0
| 0.263889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d3c865164eedb320086fc6d02fe41ee6b9f04a86
| 101
|
py
|
Python
|
dummyauthenticator/__init__.py
|
Naba7/dummyauthenticator
|
4d2c39fc99a5665691e5d5dbc6156e5f29151e3e
|
[
"BSD-3-Clause"
] | 18
|
2019-03-22T10:50:42.000Z
|
2021-12-10T03:46:24.000Z
|
dummyauthenticator/__init__.py
|
Naba7/dummyauthenticator
|
4d2c39fc99a5665691e5d5dbc6156e5f29151e3e
|
[
"BSD-3-Clause"
] | 7
|
2018-10-08T07:46:40.000Z
|
2021-02-12T08:19:56.000Z
|
dummyauthenticator/__init__.py
|
Naba7/dummyauthenticator
|
4d2c39fc99a5665691e5d5dbc6156e5f29151e3e
|
[
"BSD-3-Clause"
] | 8
|
2018-12-13T08:30:54.000Z
|
2021-03-15T07:27:23.000Z
|
from dummyauthenticator.dummyauthenticator import DummyAuthenticator
__all__ = [DummyAuthenticator]
| 25.25
| 68
| 0.881188
| 7
| 101
| 12.142857
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079208
| 101
| 3
| 69
| 33.666667
| 0.913978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d3e328f383f79dcadab88566ca65a72bcb3a7ede
| 201
|
py
|
Python
|
tripadvisor/apps.py
|
baajarmeh/tripadvisor-scraper
|
e5dd7bf0864e4f87ff909e57e1ed531eeb30f9dd
|
[
"Apache-2.0"
] | 7
|
2018-06-26T14:02:32.000Z
|
2022-01-14T01:42:19.000Z
|
tripadvisor/apps.py
|
baajarmeh/tripadvisor-scraper
|
e5dd7bf0864e4f87ff909e57e1ed531eeb30f9dd
|
[
"Apache-2.0"
] | null | null | null |
tripadvisor/apps.py
|
baajarmeh/tripadvisor-scraper
|
e5dd7bf0864e4f87ff909e57e1ed531eeb30f9dd
|
[
"Apache-2.0"
] | 1
|
2020-03-27T15:48:11.000Z
|
2020-03-27T15:48:11.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
from suit.apps import DjangoSuitConfig
class TripadvisorConfig(AppConfig):
name = 'tripadvisor'
| 20.1
| 39
| 0.771144
| 23
| 201
| 6.521739
| 0.73913
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005814
| 0.144279
| 201
| 9
| 40
| 22.333333
| 0.866279
| 0.104478
| 0
| 0
| 0
| 0
| 0.061798
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
d3fa1d7c1de446e1123a9f727600d6971b849d3c
| 94
|
py
|
Python
|
pixiv_crawler/__init__.py
|
Akaisorani/pixiv-crawl
|
b661109b631e9bb0462f7219c39243cd40afbe6d
|
[
"MIT"
] | 40
|
2017-04-06T14:20:24.000Z
|
2021-10-31T10:09:13.000Z
|
pixiv_crawler/__init__.py
|
Akaisorani/pixiv-crawl
|
b661109b631e9bb0462f7219c39243cd40afbe6d
|
[
"MIT"
] | 6
|
2018-11-20T14:41:44.000Z
|
2020-08-03T07:58:14.000Z
|
pixiv_crawler/__init__.py
|
Akaisorani/pixiv-crawl
|
b661109b631e9bb0462f7219c39243cd40afbe6d
|
[
"MIT"
] | 7
|
2018-04-15T06:03:25.000Z
|
2021-05-25T19:03:42.000Z
|
name = "pixiv_crawler"
__author__ = "Akaisora"
from pixiv_crawler.scraper_manga import *
| 18.8
| 42
| 0.755319
| 11
| 94
| 5.818182
| 0.818182
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159574
| 94
| 4
| 43
| 23.5
| 0.810127
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
31095f2f12ef1570530e7745cf913677450025bc
| 17
|
py
|
Python
|
subscriptions/__version__.py
|
primal100/stripe-subscriptions
|
05b3c9a1253cf09e7ef17ef4c2ed872e16812641
|
[
"MIT"
] | null | null | null |
subscriptions/__version__.py
|
primal100/stripe-subscriptions
|
05b3c9a1253cf09e7ef17ef4c2ed872e16812641
|
[
"MIT"
] | null | null | null |
subscriptions/__version__.py
|
primal100/stripe-subscriptions
|
05b3c9a1253cf09e7ef17ef4c2ed872e16812641
|
[
"MIT"
] | null | null | null |
version = "0.5.3"
| 17
| 17
| 0.588235
| 4
| 17
| 2.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0.117647
| 17
| 1
| 17
| 17
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
31125bbb21aa7c647603e26b14a5f723059b0245
| 15,252
|
py
|
Python
|
blender/2.79/scripts/addons/space_view3d_brush_menus/texture_menu.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | 2
|
2019-11-27T09:05:42.000Z
|
2020-02-20T01:25:23.000Z
|
space_view3d_brush_menus/texture_menu.py
|
1-MillionParanoidTterabytes/blender-addons-master
|
acc8fc23a38e6e89099c3e5079bea31ce85da06a
|
[
"Unlicense"
] | null | null | null |
space_view3d_brush_menus/texture_menu.py
|
1-MillionParanoidTterabytes/blender-addons-master
|
acc8fc23a38e6e89099c3e5079bea31ce85da06a
|
[
"Unlicense"
] | 4
|
2020-02-19T20:02:26.000Z
|
2022-02-11T18:47:56.000Z
|
# gpl author: Ryan Inch (Imaginer)
import bpy
from bpy.types import Menu
from . import utils_core
class TextureMenu(Menu):
bl_label = "Texture Options"
bl_idname = "VIEW3D_MT_sv3_texture_menu"
@classmethod
def poll(self, context):
return utils_core.get_mode() in (
'SCULPT',
'VERTEX_PAINT',
'TEXTURE_PAINT'
)
def draw(self, context):
layout = self.layout
if utils_core.get_mode() == 'SCULPT':
self.sculpt(layout, context)
elif utils_core.get_mode() == 'VERTEX_PAINT':
self.vertpaint(layout, context)
else:
self.texpaint(layout, context)
def sculpt(self, layout, context):
has_brush = utils_core.get_brush_link(context, types="brush")
tex_slot = has_brush.texture_slot if has_brush else None
# Menus
layout.row().menu(Textures.bl_idname)
layout.row().menu(TextureMapMode.bl_idname)
layout.row().separator()
# Checkboxes
if tex_slot:
if tex_slot.map_mode != '3D':
if tex_slot.map_mode in ('RANDOM', 'VIEW_PLANE', 'AREA_PLANE'):
layout.row().prop(tex_slot, "use_rake", toggle=True)
layout.row().prop(tex_slot, "use_random", toggle=True)
# Sliders
layout.row().prop(tex_slot, "angle",
text=utils_core.PIW + "Angle", slider=True)
if tex_slot.tex_paint_map_mode in ('RANDOM', 'VIEW_PLANE') and tex_slot.use_random:
layout.row().prop(tex_slot, "random_angle",
text=utils_core.PIW + "Random Angle", slider=True)
# Operator
if tex_slot.tex_paint_map_mode == 'STENCIL':
if has_brush.texture and has_brush.texture.type == 'IMAGE':
layout.row().operator("brush.stencil_fit_image_aspect")
layout.row().operator("brush.stencil_reset_transform")
else:
layout.row().label("No Texture Slot available", icon="INFO")
def vertpaint(self, layout, context):
has_brush = utils_core.get_brush_link(context, types="brush")
tex_slot = has_brush.texture_slot if has_brush else None
# Menus
layout.row().menu(Textures.bl_idname)
layout.row().menu(TextureMapMode.bl_idname)
# Checkboxes
if tex_slot:
if tex_slot.tex_paint_map_mode != '3D':
if tex_slot.tex_paint_map_mode in ('RANDOM', 'VIEW_PLANE'):
layout.row().prop(tex_slot, "use_rake", toggle=True)
layout.row().prop(tex_slot, "use_random", toggle=True)
# Sliders
layout.row().prop(tex_slot, "angle",
text=utils_core.PIW + "Angle", slider=True)
if tex_slot.tex_paint_map_mode in ('RANDOM', 'VIEW_PLANE') and tex_slot.use_random:
layout.row().prop(tex_slot, "random_angle",
text=utils_core.PIW + "Random Angle", slider=True)
# Operator
if tex_slot.tex_paint_map_mode == 'STENCIL':
if has_brush.texture and has_brush.texture.type == 'IMAGE':
layout.row().operator("brush.stencil_fit_image_aspect")
layout.row().operator("brush.stencil_reset_transform")
else:
layout.row().label("No Texture Slot available", icon="INFO")
def texpaint(self, layout, context):
has_brush = utils_core.get_brush_link(context, types="brush")
tex_slot = has_brush.texture_slot if has_brush else None
mask_tex_slot = has_brush.mask_texture_slot if has_brush else None
# Texture Section
layout.row().label(text="Texture", icon='TEXTURE')
# Menus
layout.row().menu(Textures.bl_idname)
layout.row().menu(TextureMapMode.bl_idname)
# Checkboxes
if tex_slot:
if tex_slot.tex_paint_map_mode != '3D':
if tex_slot.tex_paint_map_mode in ('RANDOM', 'VIEW_PLANE'):
layout.row().prop(tex_slot, "use_rake", toggle=True)
layout.row().prop(tex_slot, "use_random", toggle=True)
# Sliders
layout.row().prop(tex_slot, "angle",
text=utils_core.PIW + "Angle", slider=True)
if tex_slot.tex_paint_map_mode in ('RANDOM', 'VIEW_PLANE') and tex_slot.use_random:
layout.row().prop(tex_slot, "random_angle",
text=utils_core.PIW + "Random Angle", slider=True)
# Operator
if tex_slot.tex_paint_map_mode == 'STENCIL':
if has_brush.texture and has_brush.texture.type == 'IMAGE':
layout.row().operator("brush.stencil_fit_image_aspect")
layout.row().operator("brush.stencil_reset_transform")
else:
layout.row().label("No Texture Slot available", icon="INFO")
layout.row().separator()
# Texture Mask Section
layout.row().label(text="Texture Mask", icon='MOD_MASK')
# Menus
layout.row().menu(MaskTextures.bl_idname)
layout.row().menu(MaskMapMode.bl_idname)
layout.row().menu(MaskPressureModeMenu.bl_idname)
# Checkboxes
if mask_tex_slot:
if mask_tex_slot.mask_map_mode in ('RANDOM', 'VIEW_PLANE'):
layout.row().prop(mask_tex_slot, "use_rake", toggle=True)
layout.row().prop(mask_tex_slot, "use_random", toggle=True)
# Sliders
layout.row().prop(mask_tex_slot, "angle",
text=utils_core.PIW + "Angle", icon_value=5, slider=True)
if mask_tex_slot.mask_map_mode in ('RANDOM', 'VIEW_PLANE') and mask_tex_slot.use_random:
layout.row().prop(mask_tex_slot, "random_angle",
text=utils_core.PIW + "Random Angle", slider=True)
# Operator
if mask_tex_slot.mask_map_mode == 'STENCIL':
if has_brush.mask_texture and has_brush.mask_texture.type == 'IMAGE':
layout.row().operator("brush.stencil_fit_image_aspect")
prop = layout.row().operator("brush.stencil_reset_transform")
prop.mask = True
else:
layout.row().label("Mask Texture not available", icon="INFO")
class Textures(Menu):
bl_label = "Brush Texture"
bl_idname = "VIEW3D_MT_sv3_texture_list"
def init(self):
if utils_core.get_mode() == 'SCULPT':
datapath = "tool_settings.sculpt.brush.texture"
elif utils_core.get_mode() == 'VERTEX_PAINT':
datapath = "tool_settings.vertex_paint.brush.texture"
elif utils_core.get_mode() == 'TEXTURE_PAINT':
datapath = "tool_settings.image_paint.brush.texture"
else:
datapath = ""
return datapath
def draw(self, context):
datapath = self.init()
has_brush = utils_core.get_brush_link(context, types="brush")
current_texture = eval("bpy.context.{}".format(datapath)) if \
has_brush else None
layout = self.layout
# get the current texture's name
if current_texture:
current_texture = current_texture.name
layout.row().label(text="Brush Texture")
layout.row().separator()
# add an item to set the texture to None
utils_core.menuprop(layout.row(), "None", "None",
datapath, icon='RADIOBUT_OFF', disable=True,
disable_icon='RADIOBUT_ON',
custom_disable_exp=(None, current_texture),
path=True)
# add the menu items
for item in bpy.data.textures:
utils_core.menuprop(layout.row(), item.name,
'bpy.data.textures["%s"]' % item.name,
datapath, icon='RADIOBUT_OFF',
disable=True,
disable_icon='RADIOBUT_ON',
custom_disable_exp=(item.name, current_texture),
path=True)
class TextureMapMode(Menu):
bl_label = "Brush Mapping"
bl_idname = "VIEW3D_MT_sv3_texture_map_mode"
def draw(self, context):
layout = self.layout
has_brush = utils_core.get_brush_link(context, types="brush")
layout.row().label(text="Brush Mapping")
layout.row().separator()
if has_brush:
if utils_core.get_mode() == 'SCULPT':
path = "tool_settings.sculpt.brush.texture_slot.map_mode"
# add the menu items
for item in has_brush. \
texture_slot.bl_rna.properties['map_mode'].enum_items:
utils_core.menuprop(
layout.row(), item.name, item.identifier, path,
icon='RADIOBUT_OFF',
disable=True,
disable_icon='RADIOBUT_ON'
)
elif utils_core.get_mode() == 'VERTEX_PAINT':
path = "tool_settings.vertex_paint.brush.texture_slot.tex_paint_map_mode"
# add the menu items
for item in has_brush. \
texture_slot.bl_rna.properties['tex_paint_map_mode'].enum_items:
utils_core.menuprop(
layout.row(), item.name, item.identifier, path,
icon='RADIOBUT_OFF',
disable=True,
disable_icon='RADIOBUT_ON'
)
else:
path = "tool_settings.image_paint.brush.texture_slot.tex_paint_map_mode"
# add the menu items
for item in has_brush. \
texture_slot.bl_rna.properties['tex_paint_map_mode'].enum_items:
utils_core.menuprop(
layout.row(), item.name, item.identifier, path,
icon='RADIOBUT_OFF',
disable=True,
disable_icon='RADIOBUT_ON'
)
else:
layout.row().label("No brushes available", icon="INFO")
class MaskTextures(Menu):
bl_label = "Mask Texture"
bl_idname = "VIEW3D_MT_sv3_mask_texture_list"
def draw(self, context):
layout = self.layout
datapath = "tool_settings.image_paint.brush.mask_texture"
has_brush = utils_core.get_brush_link(context, types="brush")
current_texture = eval("bpy.context.{}".format(datapath)) if \
has_brush else None
layout.row().label(text="Mask Texture")
layout.row().separator()
if has_brush:
# get the current texture's name
if current_texture:
current_texture = current_texture.name
# add an item to set the texture to None
utils_core.menuprop(
layout.row(), "None", "None",
datapath, icon='RADIOBUT_OFF', disable=True,
disable_icon='RADIOBUT_ON',
custom_disable_exp=(None, current_texture),
path=True
)
# add the menu items
for item in bpy.data.textures:
utils_core.menuprop(
layout.row(), item.name, 'bpy.data.textures["%s"]' % item.name,
datapath, icon='RADIOBUT_OFF', disable=True,
disable_icon='RADIOBUT_ON',
custom_disable_exp=(item.name, current_texture),
path=True
)
else:
layout.row().label("No brushes available", icon="INFO")
class MaskMapMode(Menu):
bl_label = "Mask Mapping"
bl_idname = "VIEW3D_MT_sv3_mask_map_mode"
def draw(self, context):
layout = self.layout
path = "tool_settings.image_paint.brush.mask_texture_slot.mask_map_mode"
has_brush = utils_core.get_brush_link(context, types="brush")
layout.row().label(text="Mask Mapping")
layout.row().separator()
if has_brush:
items = has_brush. \
mask_texture_slot.bl_rna.properties['mask_map_mode'].enum_items
# add the menu items
for item in items:
utils_core.menuprop(
layout.row(), item.name, item.identifier, path,
icon='RADIOBUT_OFF',
disable=True,
disable_icon='RADIOBUT_ON'
)
else:
layout.row().label("No brushes available", icon="INFO")
class TextureAngleSource(Menu):
bl_label = "Texture Angle Source"
bl_idname = "VIEW3D_MT_sv3_texture_angle_source"
def draw(self, context):
layout = self.layout
has_brush = utils_core.get_brush_link(context, types="brush")
if has_brush:
if utils_core.get_mode() == 'SCULPT':
items = has_brush. \
bl_rna.properties['texture_angle_source_random'].enum_items
path = "tool_settings.sculpt.brush.texture_angle_source_random"
elif utils_core.get_mode() == 'VERTEX_PAINT':
items = has_brush. \
bl_rna.properties['texture_angle_source_random'].enum_items
path = "tool_settings.vertex_paint.brush.texture_angle_source_random"
else:
items = has_brush. \
bl_rna.properties['texture_angle_source_random'].enum_items
path = "tool_settings.image_paint.brush.texture_angle_source_random"
# add the menu items
for item in items:
utils_core.menuprop(
layout.row(), item[0], item[1], path,
icon='RADIOBUT_OFF',
disable=True,
disable_icon='RADIOBUT_ON'
)
else:
layout.row().label("No brushes available", icon="INFO")
class MaskPressureModeMenu(Menu):
bl_label = "Mask Pressure Mode"
bl_idname = "VIEW3D_MT_sv3_mask_pressure_mode_menu"
def draw(self, context):
layout = self.layout
path = "tool_settings.image_paint.brush.use_pressure_masking"
layout.row().label(text="Mask Pressure Mode")
layout.row().separator()
# add the menu items
for item in context.tool_settings.image_paint.brush. \
bl_rna.properties['use_pressure_masking'].enum_items:
utils_core.menuprop(
layout.row(), item.name, item.identifier, path,
icon='RADIOBUT_OFF',
disable=True,
disable_icon='RADIOBUT_ON'
)
| 38.321608
| 100
| 0.552977
| 1,690
| 15,252
| 4.724852
| 0.076923
| 0.073262
| 0.027051
| 0.026299
| 0.859987
| 0.828679
| 0.757671
| 0.68015
| 0.676018
| 0.659487
| 0
| 0.002003
| 0.345463
| 15,252
| 397
| 101
| 38.418136
| 0.797856
| 0.032586
| 0
| 0.647687
| 0
| 0
| 0.171104
| 0.081103
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042705
| false
| 0
| 0.010676
| 0.003559
| 0.135231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
31139178ff5be5fe8dd99e7fdd7cdf1b6de74c36
| 9,974
|
py
|
Python
|
util/unauth/weakPassScan.py
|
Shinpachi8/SubDomainsResultDeal
|
0303d95bd96b8f1e696c6534f686f30809763970
|
[
"Apache-2.0"
] | 1
|
2020-04-23T08:01:36.000Z
|
2020-04-23T08:01:36.000Z
|
util/unauth/weakPassScan.py
|
Shinpachi8/SubDomainsResultDeal
|
0303d95bd96b8f1e696c6534f686f30809763970
|
[
"Apache-2.0"
] | null | null | null |
util/unauth/weakPassScan.py
|
Shinpachi8/SubDomainsResultDeal
|
0303d95bd96b8f1e696c6534f686f30809763970
|
[
"Apache-2.0"
] | 5
|
2017-09-24T15:54:03.000Z
|
2020-05-01T15:33:03.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'this script can bruter ftp/ssh/mysql'
__author__ = 'reber'
import Queue
import threading
import time
import logging
import socket
from optparse import OptionParser
import paramiko
from ftplib import FTP
import MySQLdb
#################公有类#################
class CommonFun(object):
"""docstring for CommonFun"""
def __init__(self):
super(CommonFun, self).__init__()
def set_log(self,lname):
logger = logging.getLogger(lname)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def show_log(self, lname, msg):
a = logging.getLogger(lname)
a.debug(msg)
def show_result(self, lname, rlist):
if rlist:
print "###################################################################"
for x in rlist:
self.show_log(lname,x)
else:
print "not found..."
#################SSH爆破模块#################
class SshBruter(CommonFun):
"""docstring for SshBruter"""
def __init__(self, *args):
super(SshBruter, self).__init__()
(options,arg) = args
self.host = options.host
self.userfile = options.userfile
self.passfile = options.passfile
self.threadnum = options.threadnum
self.timeout = options.timeout
self.result = []
self.set_log(self.host)
self.qlist = Queue.Queue()
self.is_exit = False
print self.host,self.userfile,self.passfile,self.threadnum
def get_queue(self):
with open(self.userfile, 'r') as f:
ulines = f.readlines()
with open(self.passfile, 'r') as f:
plines = f.readlines()
for name in ulines:
for pwd in plines:
name = name.strip()
pwd = pwd.strip()
self.qlist.put(name + ':' + pwd)
def thread(self):
while not self.qlist.empty():
if not self.is_exit:
name,pwd = self.qlist.get().split(':')
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=self.host,port=22,username=name,password=pwd,timeout=self.timeout)
time.sleep(0.05)
ssh.close()
s = "[OK] %s:%s" % (name,pwd)
self.show_log(self.host,s)
self.result.append(s)
except socket.timeout:
self.show_log(self.host,"Timeout...")
self.qlist.put(name + ':' + pwd)
time.sleep(3)
except Exception, e:
error = "[Error] %s:%s" % (name,pwd)
self.show_log(self.host,error)
pass
else:
break
def run(self):
self.get_queue()
starttime = time.time()
threads = []
for x in xrange(1,self.threadnum+1):
t = threading.Thread(target=self.thread)
threads.append(t)
t.setDaemon(True) #主线程完成后不管子线程有没有结束,直接退出
t.start()
try:
while True:
if self.qlist.empty():
break
else:
time.sleep(1)
except KeyboardInterrupt:
self.is_exit = True
print "Exit the program..."
print "Waiting..."
time.sleep(5)
self.show_result(self.host,self.result)
finishetime = time.time()
print "Used time: %f" % (finishetime-starttime)
#################FTP爆破模块#################
class FtpBruter(CommonFun):
"""docstring for FtpBruter"""
def __init__(self, *args):
super(FtpBruter, self).__init__()
(options,arg) = args
self.host = options.host
self.userfile = options.userfile
self.passfile = options.passfile
self.threadnum = options.threadnum
self.timeout = options.timeout
self.result = []
self.set_log(self.host)
self.qlist = Queue.Queue()
print self.host,self.userfile,self.passfile,self.threadnum
def get_queue(self):
with open(self.userfile, 'r') as f:
ulines = f.readlines()
with open(self.passfile, 'r') as f:
plines = f.readlines()
for name in ulines:
for pwd in plines:
name = name.strip()
pwd = pwd.strip()
self.qlist.put(name + ':' + pwd)
def thread(self):
while not self.qlist.empty():
name,pwd = self.qlist.get().split(':')
try:
ftp = FTP()
ftp.connect(self.host, 21, self.timeout)
ftp.login(name, pwd)
time.sleep(0.05)
ftp.quit()
s = "[OK] %s:%s" % (name,pwd)
self.show_log(self.host,s)
self.result.append(s)
except socket.timeout:
self.show_log(self.host,"Timeout...")
self.qlist.put(name + ':' + pwd)
time.sleep(1)
except Exception, e:
error = "[Error] %s:%s" % (name,pwd)
self.show_log(self.host,error)
pass
def run(self):
self.get_queue()
starttime = time.time()
threads = []
for x in xrange(1,self.threadnum+1):
t = threading.Thread(target=self.thread)
threads.append(t)
t.setDaemon(True) #主线程完成后不管子线程有没有结束,直接退出
t.start()
try:
while True:
if self.qlist.empty():
break
else:
time.sleep(1)
except KeyboardInterrupt:
self.is_exit = True
print "Exit the program..."
print "Waiting..."
time.sleep(5)
self.show_result(self.host,self.result)
finishetime = time.time()
print "Used time: %f" % (finishetime-starttime)
#################MySQL爆破模块#################
class MysqlBruter(CommonFun):
"""docstring for MysqlBruter"""
def __init__(self, *args):
super(MysqlBruter, self).__init__()
(options,arg) = args
self.host = options.host
self.userfile = options.userfile
self.passfile = options.passfile
self.threadnum = options.threadnum
self.timeout = options.timeout
self.result = []
self.set_log(self.host)
self.qlist = Queue.Queue()
print self.host,self.userfile,self.passfile,self.threadnum
def get_queue(self):
with open(self.userfile, 'r') as f:
ulines = f.readlines()
with open(self.passfile, 'r') as f:
plines = f.readlines()
for name in ulines:
for pwd in plines:
name = name.strip()
pwd = pwd.strip()
self.qlist.put(name + ':' + pwd)
def thread(self):
while not self.qlist.empty():
name,pwd = self.qlist.get().split(':')
try:
conn = MySQLdb.connect(host=self.host, user=name, passwd=pwd, db='mysql', port=3306)
if conn:
# time.sleep(0.05)
conn.close()
s = "[OK] %s:%s" % (name,pwd)
self.show_log(self.host,s)
self.result.append(s)
except socket.timeout:
self.show_log(self.host,"Timeout")
self.qlist.put(name + ':' + pwd)
time.sleep(3)
except Exception, e:
error = "[Error] %s:%s" % (name,pwd)
self.show_log(self.host,error)
pass
def run(self):
self.get_queue()
starttime = time.time()
threads = []
for x in xrange(1,self.threadnum+1):
t = threading.Thread(target=self.thread)
threads.append(t)
t.setDaemon(True) #主线程完成后不管子线程有没有结束,直接退出
t.start()
try:
while True:
if self.qlist.empty():
break
else:
time.sleep(1)
except KeyboardInterrupt:
self.is_exit = True
print "Exit the program..."
print "Waiting..."
time.sleep(5)
self.show_result(self.host,self.result)
finishetime = time.time()
print "Used time: %f" % (finishetime-starttime)
def main():
parser = OptionParser(usage='Usage: python %prog [options] type')
parser.add_option('-i','--host',dest='host',help='target ip')
parser.add_option('-o','--timeout',type=int,dest='timeout',default=5,help='timeout')
parser.add_option('-t','--thread',type=int,dest='threadnum',default=10,help='threadnum')
parser.add_option('-L','--userfile',dest='userfile',default='username.txt',help='userfile')
parser.add_option('-P','--passfile',dest='passfile',default='password.txt',help='passfile')
(options, args) = parser.parse_args()
if not args:
parser.print_help()
exit()
if args[0]=='ssh':
if options.host:
ssh = SshBruter(options, args)
ssh.run()
else:
parser.print_help()
elif args[0]=='ftp':
if options.host:
ftp = FtpBruter(options, args)
ftp.run()
else:
parser.print_help()
elif args[0]=='mysql':
if options.host:
mysql = MysqlBruter(options, args)
mysql.run()
else:
parser.print_help()
else:
print "type must be ssh or ftp or mysql"
if __name__ == '__main__':
main()
| 31.663492
| 107
| 0.510628
| 1,080
| 9,974
| 4.634259
| 0.162963
| 0.038362
| 0.026374
| 0.026973
| 0.647952
| 0.631568
| 0.631568
| 0.626174
| 0.613786
| 0.613786
| 0
| 0.005993
| 0.347504
| 9,974
| 315
| 108
| 31.663492
| 0.763061
| 0.014839
| 0
| 0.724528
| 0
| 0
| 0.068255
| 0.007003
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.056604
| 0.033962
| null | null | 0.071698
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
312b726b7e5ecb26482fbfae18fa550d78818cd1
| 435
|
py
|
Python
|
Python/PlusOneTest.py
|
TonnyL/Windary
|
39f85cdedaaf5b85f7ce842ecef975301fc974cf
|
[
"MIT"
] | 205
|
2017-11-16T08:38:46.000Z
|
2022-03-06T05:50:03.000Z
|
Python/PlusOneTest.py
|
santosh241/Windary
|
39f85cdedaaf5b85f7ce842ecef975301fc974cf
|
[
"MIT"
] | 3
|
2018-04-10T10:17:52.000Z
|
2020-12-11T08:00:09.000Z
|
Python/PlusOneTest.py
|
santosh241/Windary
|
39f85cdedaaf5b85f7ce842ecef975301fc974cf
|
[
"MIT"
] | 28
|
2018-04-10T06:42:42.000Z
|
2021-09-14T14:15:39.000Z
|
from unittest import TestCase
from PlusOne import PlusOne
class TestPlusOne(TestCase):
def test_plusOne(self):
po = PlusOne()
self.assertEqual(po.plusOne([1]), [2])
self.assertEqual(po.plusOne([9]), [1, 0])
self.assertEqual(po.plusOne([9, 9]), [1, 0, 0])
self.assertEqual(po.plusOne([2, 8, 9, 9, 9]), [2, 9, 0, 0, 0])
self.assertEqual(po.plusOne([2, 8, 8, 9]), [2, 8, 9, 0])
| 22.894737
| 70
| 0.574713
| 65
| 435
| 3.830769
| 0.261538
| 0.216867
| 0.341365
| 0.481928
| 0.429719
| 0.2249
| 0.2249
| 0.2249
| 0
| 0
| 0
| 0.084337
| 0.236782
| 435
| 18
| 71
| 24.166667
| 0.665663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
31404d6a297c546b7e49eff01d6b4e30888bf2a2
| 149
|
py
|
Python
|
app/config.py
|
shakedmanes/mandolin-cloud
|
faf6fe2d583d3d654ff0dc92b3f0389bbd5b7c4d
|
[
"MIT"
] | 3
|
2019-08-19T09:10:23.000Z
|
2020-12-02T09:45:50.000Z
|
app/config.py
|
shakedmanes/mandolin-cloud
|
faf6fe2d583d3d654ff0dc92b3f0389bbd5b7c4d
|
[
"MIT"
] | null | null | null |
app/config.py
|
shakedmanes/mandolin-cloud
|
faf6fe2d583d3d654ff0dc92b3f0389bbd5b7c4d
|
[
"MIT"
] | null | null | null |
import os
class BaseConfig(object):
"""Default configuration options for flask"""
SITE_NAME = os.environ.get('APP_NAME', 'mandolin-cloud')
| 21.285714
| 60
| 0.711409
| 19
| 149
| 5.473684
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154362
| 149
| 6
| 61
| 24.833333
| 0.825397
| 0.261745
| 0
| 0
| 0
| 0
| 0.211538
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
314cb1f81db06f5646dd964ee4351b26d94c250e
| 3,286
|
py
|
Python
|
wandb/vendor/prompt_toolkit/contrib/regular_languages/__init__.py
|
dreamflasher/client
|
c8267f1c6b8b6970172d622bb8fbf7cc773d78b2
|
[
"MIT"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
wandb/vendor/prompt_toolkit/contrib/regular_languages/__init__.py
|
dreamflasher/client
|
c8267f1c6b8b6970172d622bb8fbf7cc773d78b2
|
[
"MIT"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
wandb/vendor/prompt_toolkit/contrib/regular_languages/__init__.py
|
dreamflasher/client
|
c8267f1c6b8b6970172d622bb8fbf7cc773d78b2
|
[
"MIT"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
r"""
Tool for expressing the grammar of an input as a regular language.
==================================================================
The grammar for the input of many simple command line interfaces can be
expressed by a regular language. Examples are PDB (the Python debugger); a
simple (bash-like) shell with "pwd", "cd", "cat" and "ls" commands; arguments
that you can pass to an executable; etc. It is possible to use regular
expressions for validation and parsing of such a grammar. (More about regular
languages: http://en.wikipedia.org/wiki/Regular_language)
Example
-------
Let's take the pwd/cd/cat/ls example. We want to have a shell that accepts
these three commands. "cd" is followed by a quoted directory name and "cat" is
followed by a quoted file name. (We allow quotes inside the filename when
they're escaped with a backslash.) We could define the grammar using the
following regular expression::
grammar = \s* (
pwd |
ls |
(cd \s+ " ([^"]|\.)+ ") |
(cat \s+ " ([^"]|\.)+ ")
) \s*
What can we do with this grammar?
---------------------------------
- Syntax highlighting: We could use this for instance to give file names
different colour.
- Parse the result: .. We can extract the file names and commands by using a
regular expression with named groups.
- Input validation: .. Don't accept anything that does not match this grammar.
When combined with a parser, we can also recursively do
filename validation (and accept only existing files.)
- Autocompletion: .... Each part of the grammar can have its own autocompleter.
"cat" has to be completed using file names, while "cd"
has to be completed using directory names.
How does it work?
-----------------
As a user of this library, you have to define the grammar of the input as a
regular expression. The parts of this grammar where autocompletion, validation
or any other processing is required need to be marked using a regex named
group. Like ``(?P<varname>...)`` for instance.
When the input is processed for validation (for instance), the regex will
execute, the named group is captured, and the validator associated with this
named group will test the captured string.
There is one tricky bit:
Ofter we operate on incomplete input (this is by definition the case for
autocompletion) and we have to decide for the cursor position in which
possible state the grammar it could be and in which way variables could be
matched up to that point.
To solve this problem, the compiler takes the original regular expression and
translates it into a set of other regular expressions which each match prefixes
of strings that would match the first expression. (We translate it into
multiple expression, because we want to have each possible state the regex
could be in -- in case there are several or-clauses with each different
completers.)
TODO: some examples of:
- How to create a highlighter from this grammar.
- How to create a validator from this grammar.
- How to create an autocompleter from this grammar.
- How to create a parser from this grammar.
"""
from .compiler import compile
| 42.675325
| 79
| 0.688071
| 485
| 3,286
| 4.659794
| 0.408247
| 0.034071
| 0.019469
| 0.015929
| 0.070796
| 0.035398
| 0.023894
| 0
| 0
| 0
| 0
| 0
| 0.220633
| 3,286
| 76
| 80
| 43.236842
| 0.882468
| 0.987827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013158
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
3169e58d073baf05bd3b85c464514dfcc235a90e
| 65
|
py
|
Python
|
lambda.py
|
ricardodarocha/Pyjamas
|
8b12f3966740655c3a7bb1cad782689973059869
|
[
"MIT"
] | 1
|
2019-12-14T16:27:30.000Z
|
2019-12-14T16:27:30.000Z
|
lambda.py
|
ricardodarocha/Pyjamas
|
8b12f3966740655c3a7bb1cad782689973059869
|
[
"MIT"
] | null | null | null |
lambda.py
|
ricardodarocha/Pyjamas
|
8b12f3966740655c3a7bb1cad782689973059869
|
[
"MIT"
] | 2
|
2019-12-14T06:54:38.000Z
|
2021-11-08T10:31:48.000Z
|
reajuste = lambda salario, taxareajuste : salario += taxareajuste
| 65
| 65
| 0.8
| 6
| 65
| 8.666667
| 0.666667
| 0.730769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123077
| 65
| 1
| 65
| 65
| 0.912281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
316ad913d19aa1e5367045049f8d28a22b8bda37
| 463
|
py
|
Python
|
core/net_errors.py
|
nikon-petr/perceptron
|
40509070e1d5c2407e5778af9bccde1eda284efb
|
[
"MIT"
] | null | null | null |
core/net_errors.py
|
nikon-petr/perceptron
|
40509070e1d5c2407e5778af9bccde1eda284efb
|
[
"MIT"
] | null | null | null |
core/net_errors.py
|
nikon-petr/perceptron
|
40509070e1d5c2407e5778af9bccde1eda284efb
|
[
"MIT"
] | null | null | null |
class IncorrectInputVectorLength(Exception):
pass
class NetIsNotInitialized(Exception):
pass
class IncorrectFactorValue(Exception):
pass
class NetIsNotCalculated(Exception):
pass
class IncorrectExpectedOutputVectorLength(Exception):
pass
class NetConfigIndefined(Exception):
pass
class NetConfigIncorrect(Exception):
pass
class JsonFileNotFound(Exception):
pass
class JsonFileStructureIncorrect(Exception):
pass
| 13.617647
| 53
| 0.773218
| 36
| 463
| 9.944444
| 0.333333
| 0.326816
| 0.402235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168467
| 463
| 34
| 54
| 13.617647
| 0.92987
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
31802c55dbfe8b8397bbd035c7ff9bff835d52d0
| 2,641
|
py
|
Python
|
graphpipe/graphpipefb/IOMetadata.py
|
LaudateCorpus1/graphpipe-py
|
eb474b29860d4a0dc713b834586dec527028e688
|
[
"UPL-1.0"
] | 38
|
2018-08-15T15:56:15.000Z
|
2020-09-18T09:19:45.000Z
|
graphpipe/graphpipefb/IOMetadata.py
|
LaudateCorpus1/graphpipe-py
|
eb474b29860d4a0dc713b834586dec527028e688
|
[
"UPL-1.0"
] | 3
|
2018-08-16T04:56:53.000Z
|
2019-02-21T09:16:36.000Z
|
graphpipe/graphpipefb/IOMetadata.py
|
LaudateCorpus1/graphpipe-py
|
eb474b29860d4a0dc713b834586dec527028e688
|
[
"UPL-1.0"
] | 11
|
2018-08-16T09:10:05.000Z
|
2022-02-18T04:45:20.000Z
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: graphpipe
import flatbuffers
class IOMetadata(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsIOMetadata(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = IOMetadata()
x.Init(buf, n + offset)
return x
# IOMetadata
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# IOMetadata
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return bytes()
# IOMetadata
def Description(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return bytes()
# IOMetadata
def Shape(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# IOMetadata
def ShapeAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
return 0
# IOMetadata
def ShapeLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# IOMetadata
def Type(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
def IOMetadataStart(builder): builder.StartObject(4)
def IOMetadataAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def IOMetadataAddDescription(builder, description): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(description), 0)
def IOMetadataAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
def IOMetadataStartShapeVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def IOMetadataAddType(builder, type): builder.PrependUint8Slot(3, type, 0)
def IOMetadataEnd(builder): return builder.EndObject()
| 37.197183
| 154
| 0.688376
| 316
| 2,641
| 5.610759
| 0.243671
| 0.067118
| 0.161309
| 0.197406
| 0.456853
| 0.423576
| 0.328821
| 0.285956
| 0.285956
| 0.285956
| 0
| 0.016675
| 0.205225
| 2,641
| 70
| 155
| 37.728571
| 0.828013
| 0.062098
| 0
| 0.346939
| 1
| 0
| 0.001621
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.306122
| false
| 0
| 0.020408
| 0.040816
| 0.632653
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
318586b0ce470220bf289f9c66a976db9e14c909
| 376
|
py
|
Python
|
Ranmath/MatrixReconstructors/SingleMatrixReconstructor.py
|
pawel-ta/ranmath
|
f52a15b10bdb5830a50c43da11fed5f182026587
|
[
"MIT"
] | null | null | null |
Ranmath/MatrixReconstructors/SingleMatrixReconstructor.py
|
pawel-ta/ranmath
|
f52a15b10bdb5830a50c43da11fed5f182026587
|
[
"MIT"
] | null | null | null |
Ranmath/MatrixReconstructors/SingleMatrixReconstructor.py
|
pawel-ta/ranmath
|
f52a15b10bdb5830a50c43da11fed5f182026587
|
[
"MIT"
] | null | null | null |
from .AbstractReconstructor import AbstractReconstructor
import numpy.linalg as la
import numpy as np
from copy import deepcopy
class SingleMatrixReconstructor(AbstractReconstructor):
def __init__(self):
super().__init__()
def reconstruct(self, eigenvectors, eigenvalues):
return (eigenvectors @ np.diag(eigenvalues) @ la.inv(eigenvectors)).real
| 23.5
| 80
| 0.760638
| 39
| 376
| 7.128205
| 0.589744
| 0.194245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162234
| 376
| 15
| 81
| 25.066667
| 0.88254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.444444
| 0.111111
| 0.888889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
3189a7de210069fc59f46d79f74a30607865397b
| 243
|
py
|
Python
|
plugin.video.iwn/resources/lib/__init__.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2019-03-05T09:38:10.000Z
|
2019-03-05T09:38:10.000Z
|
plugin.video.iwn/resources/lib/__init__.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | null | null | null |
plugin.video.iwn/resources/lib/__init__.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2021-11-05T20:48:09.000Z
|
2021-11-05T20:48:09.000Z
|
# #!/usr/bin/env python
####################################################################################################
# Blank Init File
####################################################################################################
| 60.75
| 100
| 0.115226
| 7
| 243
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032922
| 243
| 4
| 101
| 60.75
| 0.119149
| 0.148148
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
3195516bba3c002d11bed7aa2adaeb584b55db98
| 305
|
py
|
Python
|
pyawx/exceptions/__init__.py
|
irunasroot/pyawx-client
|
ad2cdae2997d3026774ed89506c5fc5ac15f0002
|
[
"Apache-2.0"
] | null | null | null |
pyawx/exceptions/__init__.py
|
irunasroot/pyawx-client
|
ad2cdae2997d3026774ed89506c5fc5ac15f0002
|
[
"Apache-2.0"
] | null | null | null |
pyawx/exceptions/__init__.py
|
irunasroot/pyawx-client
|
ad2cdae2997d3026774ed89506c5fc5ac15f0002
|
[
"Apache-2.0"
] | null | null | null |
"""
exceptions/__init__.py
Comments:
Author: Dennis Whitney
Email: dennis@runasroot.com
Copyright (c) 2021, iRunAsRoot
"""
class ValueReadOnly(Exception):
pass
class ValueNotAllowed(Exception):
pass
class UnauthorizedAccess(Exception):
pass
class UnknownEndpoint(Exception):
pass
| 12.708333
| 36
| 0.744262
| 31
| 305
| 7.193548
| 0.677419
| 0.233184
| 0.242152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015686
| 0.163934
| 305
| 23
| 37
| 13.26087
| 0.858824
| 0.377049
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
31b41fab4a40f5e996c206128d9b5d1175495eba
| 59,387
|
py
|
Python
|
Leg-UP/models/attacker/aushplus_helper.py
|
sharanmayank/ShillingAttack
|
783f135a4fcc709e7ce478c2e6f2e7e6c5ad2ace
|
[
"MIT"
] | null | null | null |
Leg-UP/models/attacker/aushplus_helper.py
|
sharanmayank/ShillingAttack
|
783f135a4fcc709e7ce478c2e6f2e7e6c5ad2ace
|
[
"MIT"
] | null | null | null |
Leg-UP/models/attacker/aushplus_helper.py
|
sharanmayank/ShillingAttack
|
783f135a4fcc709e7ce478c2e6f2e7e6c5ad2ace
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2020/12/3 20:03
# @Author : chensi
# @File : tkde.py
# @Software : PyCharm
# @Desciption : None
import random
import numpy as np
import torch
from torch import nn
from utils.utils import *
from utils.loss import *
import higher
# tf = None
# try:
# import tensorflow.compat.v1 as tf
#
# tf.disable_v2_behavior()
# except:
# import tensorflow as tf
seed = 1234
random.seed(seed)
np.random.seed(seed)
# tf.set_random_seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
import time
import torch.nn.functional as F
import math
import torch.optim as optim
# =============================================================================================
# =============================================================================================
# =============================================================================================
# =============================================================================================
# =============================================================================================
class BaseGenerator(nn.Module):
def __init__(self, device, input_dim):
super(BaseGenerator, self).__init__()
#
self.input_dim = input_dim
self.device = device
"""helper_tensor"""
self.epsilon = torch.tensor(1e-4).to(self.device) # 计算boundary
self.helper_tensor = torch.tensor(2.5).to(device)
pass
def project(self, fake_tensor):
fake_tensor.data = torch.round(fake_tensor)
# fake_tensor.data = torch.where(fake_tensor < 1, torch.ones_like(fake_tensor).to(self.device), fake_tensor)
fake_tensor.data = torch.where(fake_tensor < 0, torch.zeros_like(fake_tensor).to(self.device), fake_tensor)
fake_tensor.data = torch.where(fake_tensor > 5, torch.tensor(5.).to(self.device), fake_tensor)
#
return fake_tensor
def forward(self, input):
raise NotImplementedError
class BaseDiscretGenerator(BaseGenerator):
def __init__(self, device, input_dim):
super(BaseDiscretGenerator, self).__init__(device, input_dim)
self.min_boundary_value = torch.nn.Parameter(torch.rand([self.input_dim]), requires_grad=True)
self.register_parameter("min_boundary_value", self.min_boundary_value)
self.interval_lengths = torch.nn.Parameter(torch.rand([self.input_dim, 4]), requires_grad=True)
self.register_parameter("interval_lengths", self.interval_lengths)
pass
def forward(self, input):
# fake_tensor = (self.main(input) * self.helper_tensor) + self.helper_tensor
# # project
# fake_dsct_distribution, fake_dsct_value = self.project(fake_tensor)
# return fake_dsct_value
raise NotImplementedError
def project(self, fake_tensor):
Heaviside = HeaviTanh.apply
boundary_values = self.get_boundary_values()
cnt_ratings = fake_tensor.flatten()
iids = np.expand_dims(np.arange(self.input_dim), 0).repeat(fake_tensor.shape[0], axis=0).flatten()
boundary_values_per_rating = boundary_values[iids]
def _project_helper(ratings, boundary_values_input):
def get_target_dst_rating_prob(target_dst_rating, input_cnt_rating, boundary_values, device):
# boundary_values = boundary_values.reshape([-1, 4])
# input_cnt_rating = input_cnt_rating.reshape([-1])
rating_prob = torch.ones(input_cnt_rating.shape[0]).to(self.device)
for boundary_idx in range(5):
"""
:param target_dst_rating: r_i_j
:param boundary_idx: k
:param input_cnt_rating: a_i_j
:param boundary_value: b_j_k
:return:
"""
#
p_1 = torch.sign(target_dst_rating - boundary_idx - torch.tensor(0.5).to(device))
#
p_2 = input_cnt_rating - boundary_values[:, boundary_idx]
#
rating_prob *= Heaviside(p_1 * p_2, torch.tensor(1.).to(device))
return rating_prob
cur_dsct_distribution = []
for rating_dsct in range(6):
p = get_target_dst_rating_prob(rating_dsct, ratings, boundary_values_input, self.device)
cur_dsct_distribution += [p]
dsct_distribution = torch.cat([torch.unsqueeze(p, 1) for p in cur_dsct_distribution], 1)
return dsct_distribution
fake_dsct_distribution = _project_helper(cnt_ratings, boundary_values_per_rating).reshape(
[-1, self.input_dim, 6])
fake_dsct_value = torch.matmul(fake_dsct_distribution,
torch.tensor(np.arange(0., 6.)).type(torch.float32).to(self.device))
return fake_dsct_distribution, fake_dsct_value
def project_old(self, fake_tensor):
boundary_values = self.get_boundary_values()
fake_dsct_distribution = []
for iid in range(self.input_dim):
cur_dsct_distribution = []
for rating_dsct in range(6):
rating_prob = torch.ones(fake_tensor.shape[0]).to(self.device)
for boundary_idx in range(5):
rating_prob *= self.is_in_interval(rating_dsct,
boundary_idx,
fake_tensor[:, iid],
boundary_values[iid][boundary_idx])
cur_dsct_distribution += [rating_prob]
fake_dsct_distribution += [torch.cat([torch.unsqueeze(p, 1) for p in cur_dsct_distribution], 1)]
fake_dsct_distribution = torch.cat([torch.unsqueeze(p, 1) for p in fake_dsct_distribution], 1)
fake_dsct_value = torch.matmul(fake_dsct_distribution,
torch.tensor(np.arange(6.0)).type(torch.float32).to(self.device))
return fake_dsct_distribution, fake_dsct_value
def get_boundary_values(self):
boundary_values = torch.zeros([self.input_dim, 5]).to(self.device)
boundary_values[:, 0] = self.min_boundary_value
for i in range(1, 5):
cur_interval_length = torch.relu(self.interval_lengths[:, i - 1]) + self.epsilon
boundary_values[:, i] = boundary_values[:, i - 1] + cur_interval_length
return boundary_values
def is_in_interval(self, rating_dsct, boundary_idx, rating_cnt, boundary_value):
tensor_aux_0_5 = torch.tensor(0.5).to(self.device)
tensor_aux_1 = torch.tensor(1.).to(self.device)
Heaviside = HeaviTanh.apply
"""
:param rating_dsct: r_i_j
:param boundary_idx: k
:param rating_cnt: a_i_j
:param boundary_value: b_j_k
:return:
"""
#
p_1 = torch.sign(rating_dsct - boundary_idx - tensor_aux_0_5)
#
p_2 = rating_cnt - boundary_value
#
return Heaviside(p_1 * p_2, tensor_aux_1)
class RecsysGenerator(BaseGenerator):
def __init__(self, device, init_tensor):
super(RecsysGenerator, self).__init__(device, init_tensor.shape[1])
"""
fake_parameter
"""
fake_tensor = init_tensor.clone().detach().requires_grad_(True)
self.fake_parameter = torch.nn.Parameter(fake_tensor, requires_grad=True)
self.register_parameter("fake_tensor", self.fake_parameter)
pass
def forward(self, input=None):
return None, self.project(self.fake_parameter * (input > 0))
class DiscretGenerator_AE(BaseDiscretGenerator):
def __init__(self, device, p_dims, q_dims=None):
super(DiscretGenerator_AE, self).__init__(device, input_dim=p_dims[0])
self.p_dims = p_dims
if q_dims:
assert q_dims[0] == p_dims[-1], "In and Out dimensions must equal to each other"
assert q_dims[-1] == p_dims[0], "Latent dimension for p- and q- network mismatches."
self.q_dims = q_dims
else:
self.q_dims = p_dims[::-1]
self.dims = self.p_dims + self.q_dims[1:]
self.layers = nn.ModuleList([nn.Linear(d_in, d_out) for
d_in, d_out in zip(self.dims[:-1], self.dims[1:])])
# self.drop = nn.Dropout(dropout)
self.init_weights()
def forward(self, input):
h = F.normalize(input)
# h = self.drop(h)
for i, layer in enumerate(self.layers):
h = layer(h)
if i != len(self.layers) - 1:
h = F.relu(h)
else:
h = torch.nn.Tanh()(h)
fake_tensor = (h * self.helper_tensor) + self.helper_tensor
# project
fake_dsct_distribution, fake_dsct_value = self.project(fake_tensor)
sampled_filler = (input > 0)
return fake_dsct_distribution, fake_dsct_value * sampled_filler
def init_weights(self):
for layer in self.layers:
# Xavier Initialization for weights
size = layer.weight.size()
fan_out = size[0]
fan_in = size[1]
std = np.sqrt(2.0 / (fan_in + fan_out))
layer.weight.data.normal_(0.0, std)
# Normal Initialization for Biases
layer.bias.data.normal_(0.0, 0.001)
class RoundGenerator_AE(BaseGenerator):
def __init__(self, device, p_dims, q_dims=None):
super(RoundGenerator_AE, self).__init__(device, input_dim=p_dims[0])
self.p_dims = p_dims
if q_dims:
assert q_dims[0] == p_dims[-1], "In and Out dimensions must equal to each other"
assert q_dims[-1] == p_dims[0], "Latent dimension for p- and q- network mismatches."
self.q_dims = q_dims
else:
self.q_dims = p_dims[::-1]
self.dims = self.p_dims + self.q_dims[1:]
self.layers = nn.ModuleList([nn.Linear(d_in, d_out) for
d_in, d_out in zip(self.dims[:-1], self.dims[1:])])
# self.drop = nn.Dropout(dropout)
self.init_weights()
def forward(self, input):
h = F.normalize(input)
# h = self.drop(h)
for i, layer in enumerate(self.layers):
h = layer(h)
if i != len(self.layers) - 1:
h = F.relu(h)
else:
h = torch.nn.Tanh()(h)
fake_tensor = (h * self.helper_tensor) + self.helper_tensor
# project
fake_dsct_value = self.project(fake_tensor)
return None, fake_dsct_value * (input > 0)
def init_weights(self):
for layer in self.layers:
# Xavier Initialization for weights
size = layer.weight.size()
fan_out = size[0]
fan_in = size[1]
std = np.sqrt(2.0 / (fan_in + fan_out))
layer.weight.data.normal_(0.0, std)
# Normal Initialization for Biases
layer.bias.data.normal_(0.0, 0.001)
# =============================================================================================
# =============================================================================================
# =============================================================================================
# =============================================================================================
# =============================================================================================
# 最小值为1
class BaseGenerator_1(nn.Module):
def __init__(self, device, input_dim):
super(BaseGenerator_1, self).__init__()
#
self.input_dim = input_dim
self.device = device
"""helper_tensor"""
self.epsilon = torch.tensor(1e-4).to(self.device) # 计算boundary
self.helper_tensor = torch.tensor(2.5).to(device)
pass
def project(self, fake_tensor):
fake_tensor.data = torch.round(fake_tensor)
fake_tensor.data = torch.where(fake_tensor < 1, torch.ones_like(fake_tensor).to(self.device), fake_tensor)
# fake_tensor.data = torch.where(fake_tensor < 0, torch.zeros_like(fake_tensor).to(self.device), fake_tensor)
fake_tensor.data = torch.where(fake_tensor > 5, torch.tensor(5.).to(self.device), fake_tensor)
#
return fake_tensor
def forward(self, input):
raise NotImplementedError
class BaseDiscretGenerator_1(BaseGenerator):
def __init__(self, device, input_dim):
super(BaseDiscretGenerator_1, self).__init__(device, input_dim)
# self.min_boundary_value = torch.nn.Parameter(torch.rand([self.input_dim]), requires_grad=True)
self.min_boundary_value = torch.nn.Parameter(torch.ones([self.input_dim]), requires_grad=True)
self.register_parameter("min_boundary_value", self.min_boundary_value)
# self.interval_lengths = torch.nn.Parameter(torch.rand([self.input_dim, 3]), requires_grad=True)
self.interval_lengths = torch.nn.Parameter(torch.ones([self.input_dim, 3]), requires_grad=True)
self.register_parameter("interval_lengths", self.interval_lengths)
pass
def forward(self, input):
# fake_tensor = (self.main(input) * self.helper_tensor) + self.helper_tensor
# # project
# fake_dsct_distribution, fake_dsct_value = self.project(fake_tensor)
# return fake_dsct_value
raise NotImplementedError
def project_old(self, fake_tensor):
boundary_values = self.get_boundary_values()
fake_dsct_distribution = []
for iid in range(self.input_dim):
cur_dsct_distribution = []
for rating_dsct in range(5):
rating_prob = torch.ones(fake_tensor.shape[0]).to(self.device)
for boundary_idx in range(4):
rating_prob *= self.is_in_interval(rating_dsct,
boundary_idx,
fake_tensor[:, iid],
boundary_values[iid][boundary_idx])
cur_dsct_distribution += [rating_prob]
fake_dsct_distribution += [torch.cat([torch.unsqueeze(p, 1) for p in cur_dsct_distribution], 1)]
fake_dsct_distribution = torch.cat([torch.unsqueeze(p, 1) for p in fake_dsct_distribution], 1)
fake_dsct_value = torch.matmul(fake_dsct_distribution,
torch.tensor(np.arange(1., 6.)).type(torch.float32).to(self.device))
return fake_dsct_distribution, fake_dsct_value
def project(self, fake_tensor):
Heaviside = HeaviTanh.apply
boundary_values = self.get_boundary_values()
cnt_ratings = fake_tensor.flatten()
iids = np.expand_dims(np.arange(self.input_dim), 0).repeat(fake_tensor.shape[0], axis=0).flatten()
boundary_values_per_rating = boundary_values[iids]
def _project_helper(ratings, boundary_values_input):
def get_target_dst_rating_prob(target_dst_rating, input_cnt_rating, boundary_values, device):
# boundary_values = boundary_values.reshape([-1, 4])
# input_cnt_rating = input_cnt_rating.reshape([-1])
rating_prob = torch.ones(input_cnt_rating.shape[0]).to(self.device)
for boundary_idx in range(4):
"""
:param target_dst_rating: r_i_j
:param boundary_idx: k
:param input_cnt_rating: a_i_j
:param boundary_value: b_j_k
:return:
"""
#
p_1 = torch.sign(target_dst_rating - boundary_idx - torch.tensor(0.5).to(device))
#
p_2 = input_cnt_rating - boundary_values[:, boundary_idx]
#
rating_prob *= Heaviside(p_1 * p_2, torch.tensor(1.).to(device))
return rating_prob
cur_dsct_distribution = []
for rating_dsct in range(5):
p = get_target_dst_rating_prob(rating_dsct, ratings, boundary_values_input, self.device)
cur_dsct_distribution += [p]
dsct_distribution = torch.cat([torch.unsqueeze(p, 1) for p in cur_dsct_distribution], 1)
return dsct_distribution
fake_dsct_distribution = _project_helper(cnt_ratings, boundary_values_per_rating).reshape(
[-1, self.input_dim, 5])
fake_dsct_value = torch.matmul(fake_dsct_distribution,
torch.tensor(np.arange(1., 6.)).type(torch.float32).to(self.device))
return fake_dsct_distribution, fake_dsct_value
def get_boundary_values(self):
boundary_values = torch.zeros([self.input_dim, 4]).to(self.device)
boundary_values[:, 0] = self.min_boundary_value
for i in range(1, 4):
cur_interval_length = torch.relu(self.interval_lengths[:, i - 1]) + self.epsilon
boundary_values[:, i] = boundary_values[:, i - 1] + cur_interval_length
return boundary_values
def is_in_interval(self, rating_dsct, boundary_idx, rating_cnt, boundary_value):
tensor_aux_0_5 = torch.tensor(0.5).to(self.device)
tensor_aux_1 = torch.tensor(1.).to(self.device)
Heaviside = HeaviTanh.apply
"""
:param rating_dsct: r_i_j
:param boundary_idx: k
:param rating_cnt: a_i_j
:param boundary_value: b_j_k
:return:
"""
#
p_1 = torch.sign(rating_dsct - boundary_idx - tensor_aux_0_5)
#
p_2 = rating_cnt - boundary_value
#
return Heaviside(p_1 * p_2, tensor_aux_1)
class DiscretGenerator_AE_1(BaseDiscretGenerator_1):
def __init__(self, device, p_dims, q_dims=None):
super(DiscretGenerator_AE_1, self).__init__(device, input_dim=p_dims[0])
self.p_dims = p_dims
if q_dims:
assert q_dims[0] == p_dims[-1], "In and Out dimensions must equal to each other"
assert q_dims[-1] == p_dims[0], "Latent dimension for p- and q- network mismatches."
self.q_dims = q_dims
else:
self.q_dims = p_dims[::-1]
self.dims = self.p_dims + self.q_dims[1:]
self.layers = nn.ModuleList([nn.Linear(d_in, d_out) for
d_in, d_out in zip(self.dims[:-1], self.dims[1:])])
# self.drop = nn.Dropout(dropout)
self.init_weights()
def forward(self, input):
h = F.normalize(input)
# h = self.drop(h)
for i, layer in enumerate(self.layers):
h = layer(h)
if i != len(self.layers) - 1:
h = F.relu(h)
else:
h = torch.nn.Tanh()(h)
fake_tensor = (h * self.helper_tensor) + self.helper_tensor
# project
fake_dsct_distribution, fake_dsct_value = self.project(fake_tensor)
sampled_filler = (input > 0)
# sampled_filler = (torch.rand(fake_dsct_value.shape) < (90 / 1924)).float()
# filler_num = np.sum(sampled_filler.detach().cpu().numpy()*(fake_dsct_value.detach().cpu().numpy()>0),1).mean()
# if filler_num<90:
return fake_dsct_distribution, fake_dsct_value * sampled_filler
def init_weights(self):
for layer in self.layers:
# Xavier Initialization for weights
size = layer.weight.size()
fan_out = size[0]
fan_in = size[1]
std = np.sqrt(2.0 / (fan_in + fan_out))
layer.weight.data.normal_(0.0, std)
# Normal Initialization for Biases
layer.bias.data.normal_(0.0, 0.001)
class RoundGenerator_AE_1(BaseGenerator_1):
def __init__(self, device, p_dims, q_dims=None):
super(RoundGenerator_AE_1, self).__init__(device, input_dim=p_dims[0])
self.p_dims = p_dims
if q_dims:
assert q_dims[0] == p_dims[-1], "In and Out dimensions must equal to each other"
assert q_dims[-1] == p_dims[0], "Latent dimension for p- and q- network mismatches."
self.q_dims = q_dims
else:
self.q_dims = p_dims[::-1]
self.dims = self.p_dims + self.q_dims[1:]
self.layers = nn.ModuleList([nn.Linear(d_in, d_out) for
d_in, d_out in zip(self.dims[:-1], self.dims[1:])])
# self.drop = nn.Dropout(dropout)
self.init_weights()
def forward(self, input):
h = F.normalize(input)
# h = self.drop(h)
for i, layer in enumerate(self.layers):
h = layer(h)
if i != len(self.layers) - 1:
h = F.relu(h)
else:
h = torch.nn.Tanh()(h)
fake_tensor = (h * self.helper_tensor) + self.helper_tensor
# project
fake_dsct_value = self.project(fake_tensor)
sampled_filler = (input > 0)
return None, fake_dsct_value * sampled_filler
def init_weights(self):
for layer in self.layers:
# Xavier Initialization for weights
size = layer.weight.size()
fan_out = size[0]
fan_in = size[1]
std = np.sqrt(2.0 / (fan_in + fan_out))
layer.weight.data.normal_(0.0, std)
# Normal Initialization for Biases
layer.bias.data.normal_(0.0, 0.001)
class DiscretRecsysGenerator_1(BaseDiscretGenerator_1):
def __init__(self, device, init_tensor):
super(DiscretRecsysGenerator_1, self).__init__(device, init_tensor.shape[1])
"""
fake_parameter
"""
fake_tensor = init_tensor.clone().detach().requires_grad_(True)
self.fake_parameter = torch.nn.Parameter(fake_tensor, requires_grad=True)
self.register_parameter("fake_tensor", self.fake_parameter)
pass
def forward(self, input=None):
fake_dsct_distribution, fake_dsct_value = self.project(self.fake_parameter)
sampled_filler = (input > 0)
return fake_dsct_distribution, fake_dsct_value * sampled_filler
# =============================================================================================
# =============================================================================================
# =============================================================================================
# =============================================================================================
# =============================================================================================
class HeaviTanh(torch.autograd.Function):
"""
Approximation of the heaviside step function as
h(x,k) = \frac{1}{2} + \frac{1}{2} \text{tanh}(k x)
"""
@staticmethod
def forward(ctx, x, k):
ctx.save_for_backward(x, k)
def heaviside(data):
"""
A `heaviside step function <https://en.wikipedia.org/wiki/Heaviside_step_function>`_
that truncates numbers <= 0 to 0 and everything else to 1.
.. math::
H[n]=\\begin{cases} 0, & n <= 0, \\ 1, & n \g 0, \end{cases}
"""
return torch.where(
data <= torch.zeros_like(data), torch.zeros_like(data), torch.ones_like(data),
)
return heaviside(x) # 0.5 + 0.5 * torch.tanh(k * x)
@staticmethod
def backward(ctx, dy):
x, k, = ctx.saved_tensors
dtanh = 1 - (x * k).tanh().pow(2)
return dy * dtanh, None
class Discriminator(nn.Module):
def __init__(self, input_dim):
super(Discriminator, self).__init__()
self.main = nn.Sequential(
nn.Linear(input_dim, 512),
nn.ReLU(True),
nn.Linear(512, 128),
nn.ReLU(True),
nn.Linear(128, 1),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
class BaseTrainer(object):
def __init__(self):
self.args = None
self.n_users = None
self.n_items = None
self.net = None
self.optimizer = None
self.metrics = None
self.golden_metric = "Recall@50"
@staticmethod
def minibatch(*tensors, **kwargs):
"""Mini-batch generator for pytorch tensor."""
batch_size = kwargs.get('batch_size', 128)
if len(tensors) == 1:
tensor = tensors[0]
for i in range(0, len(tensor), batch_size):
yield tensor[i:i + batch_size]
else:
for i in range(0, len(tensors[0]), batch_size):
yield tuple(x[i:i + batch_size] for x in tensors)
@staticmethod
def mult_ce_loss(data, logits):
# ========================================
# surrogate network loss function
# ========================================
# Func_WeightedMSELoss = lambda weight, input, target: \
# torch.where(target > 2, torch.tensor(weight), torch.tensor(1.)) \
# * MSELoss(reduce=False, size_average=False)(input, target)
#
# Func_MSELoss = MSELoss(reduce=False, size_average=False)
# adv_grads = torch.autograd.grad(adv_loss, data_tensor)[0]
# # Copy fmodel's parameters to default trainer.net().
# model.load_state_dict(fmodel.state_dict())
"""Multi-class cross-entropy loss."""
log_probs = F.log_softmax(logits, dim=-1)
loss = -log_probs * data
instance_data = data.sum(1)
instance_loss = loss.sum(1)
# Avoid divide by zeros.
res = instance_loss / (instance_data + 0.1) # PSILON)
return res
@staticmethod
def weighted_mse_loss(data, logits, weight_pos=1, weight_neg=0):
"""Mean square error loss."""
weights = torch.ones_like(data) * weight_neg
weights[data > 0] = weight_pos
res = weights * (data - logits) ** 2
return res.sum(1)
@staticmethod
def _array2sparsediag(x):
values = x
indices = np.vstack([np.arange(x.size), np.arange(x.size)])
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
shape = [x.size, x.size]
return torch.sparse.FloatTensor(i, v, torch.Size(shape))
@property
def _initialized(self):
return self.net is not None
def _initialize(self):
"""Initialize model and optimizer."""
# See actual implementation in each trainer.
raise NotImplementedError
def recommend(self, data, top_k, return_preds=False, allow_repeat=False):
"""Generate a top-k recommendation (ranked) list."""
# See actual implementation in each trainer.
raise NotImplementedError
def train_epoch(self, data):
"""Train model for one epoch"""
# See actual implementation in each trainer.
raise NotImplementedError
def train_epoch_wrapper(self, train_data, epoch_num):
"""Wrapper for train_epoch with some logs."""
time_st = time.time()
epoch_loss = self.train_epoch(train_data)
print("Training [{:.1f} s], epoch: {}, loss: {:.4f}".format(
time.time() - time_st, epoch_num, epoch_loss))
def evaluate_epoch(self, train_data, test_data, epoch_num):
"""Evaluate model performance on test data."""
t1 = time.time()
n_rows = train_data.shape[0]
n_evaluate_users = test_data.shape[0]
total_metrics_len = sum(len(x) for x in self.metrics)
total_val_metrics = np.zeros([n_rows, total_metrics_len], dtype=np.float32)
recommendations = self.recommend(train_data, top_k=100)
valid_rows = list()
for i in range(train_data.shape[0]):
# Ignore augmented users, evaluate only on real users.
if i >= n_evaluate_users:
continue
targets = test_data[i].indices
if targets.size <= 0:
continue
recs = recommendations[i].tolist()
metric_results = list()
for metric in self.metrics:
result = metric(targets, recs)
metric_results.append(result)
total_val_metrics[i, :] = np.concatenate(metric_results)
valid_rows.append(i)
# Average evaluation results by user.
total_val_metrics = total_val_metrics[valid_rows]
avg_val_metrics = (total_val_metrics.mean(axis=0)).tolist()
# Summary evaluation results into a dict.
# ind, result = 0, OrderedDict()
# for metric in self.metrics:
# values = avg_val_metrics[ind:ind + len(metric)]
# if len(values) <= 1:
# result[str(metric)] = values[0]
# else:
# for name, value in zip(str(metric).split(','), values):
# result[name] = value
# ind += len(metric)
#
# print("Evaluation [{:.1f} s], epoch: {}, {} ".format(
# time.time() - t1, epoch_num, str(result)))
# return result
def fit(self, train_data, test_data):
"""Full model training loop."""
if not self._initialized:
self._initialize()
if self.args.save_feq > self.args.epochs:
raise ValueError("Model save frequency should be smaller than"
" total training epochs.")
start_epoch = 1
best_checkpoint_path = ""
best_perf = 0.0
for epoch_num in range(start_epoch, self.args.epochs + 1):
# Train the model.
self.train_epoch_wrapper(train_data, epoch_num)
if epoch_num % self.args.save_feq == 0:
result = self.evaluate_epoch(train_data, test_data, epoch_num)
# Save model checkpoint if it has better performance.
# if result[self.golden_metric] > best_perf:
# str_metric = "{}={:.4f}".format(self.golden_metric,
# result[self.golden_metric])
# print("Having better model checkpoint with"
# " performance {}".format(str_metric))
# checkpoint_path = os.path.join(
# self.args.output_dir,
# self.args.model['model_name'])
# save_checkpoint(self.net, self.optimizer,
# checkpoint_path,
# epoch=epoch_num)
#
# best_perf = result[self.golden_metric]
# best_checkpoint_path = checkpoint_path
# Load best model and evaluate on test data.
print("Loading best model checkpoint.")
self.restore(best_checkpoint_path)
self.evaluate_epoch(train_data, test_data, -1)
return
def restore(self, path):
return
class WeightedMF(nn.Module):
def __init__(self, n_users, n_items, hidden_dim):
super(WeightedMF, self).__init__()
self.n_users = n_users
self.n_items = n_items
self.dim = hidden_dim
self.Q = nn.Parameter(
torch.zeros([self.n_items, self.dim]).normal_(mean=0, std=0.1))
self.P = nn.Parameter(
torch.zeros([self.n_users, self.dim]).normal_(mean=0, std=0.1))
self.params = nn.ParameterList([self.Q, self.P])
def forward(self, user_id=None, item_id=None):
if user_id is None and item_id is None:
return torch.mm(self.P, self.Q.t())
if user_id is not None:
return torch.mm(self.P[[user_id]], self.Q.t())
if item_id is not None:
return torch.mm(self.P, self.Q[[item_id]].t())
class WMFTrainer(BaseTrainer):
def __init__(self, n_users, n_items, device, hidden_dim, lr, weight_decay, batch_size,
weight_pos, weight_neg, verbose=False):
super(WMFTrainer, self).__init__()
self.device = device
#
self.n_users = n_users
self.n_items = n_items
#
self.hidden_dim = hidden_dim
#
self.lr = lr
self.weight_decay = weight_decay
self.batch_size = batch_size
#
self.weight_pos = weight_pos
self.weight_neg = weight_neg
#
self.verbose = verbose
pass
def _initialize(self):
self.net = WeightedMF(n_users=self.n_users,
n_items=self.n_items,
hidden_dim=self.hidden_dim).to(self.device)
self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr,
weight_decay=self.weight_decay)
self.dim = self.net.dim
def fit_adv(self, data_tensor, epoch_num, unroll_steps):
self._initialize()
import higher
if not data_tensor.requires_grad:
raise ValueError("To compute adversarial gradients, data_tensor "
"should have requires_grad=True.")
#
data_tensor = data_tensor.to(self.device)
n_rows = data_tensor.shape[0]
idx_list = np.arange(n_rows)
#
model = self.net.to(self.device)
#
for i in range(1, epoch_num - unroll_steps + 1):
t1 = time.time()
np.random.shuffle(idx_list)
model.train()
epoch_loss = 0.0
for batch_idx in self.minibatch(idx_list, batch_size=self.batch_size):
# Compute loss
# TODO detach()
loss = self.weighted_mse_loss(data=data_tensor[batch_idx].detach(),
logits=model(user_id=batch_idx),
weight_pos=self.weight_pos,
weight_neg=self.weight_neg).sum()
epoch_loss += loss.item()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.verbose:
print("Training [{:.1f} s], epoch: {}, loss: {:.4f}".format(
time.time() - t1, i, epoch_loss), flush=True)
with higher.innerloop_ctx(model, self.optimizer) as (fmodel, diffopt):
if self.verbose:
print("Switching to higher mode...")
for i in range(epoch_num - unroll_steps + 1, epoch_num + 1):
t1 = time.time()
np.random.shuffle(idx_list)
fmodel.train()
epoch_loss = 0.0
for batch_idx in self.minibatch(idx_list, batch_size=self.batch_size):
# Compute loss
# ===========warning=================
loss = self.weighted_mse_loss(data=data_tensor[batch_idx],
logits=fmodel(user_id=batch_idx),
weight_pos=self.weight_pos,
weight_neg=self.weight_neg).sum()
# ====================================
epoch_loss += loss.item()
diffopt.step(loss)
if self.verbose:
print("Training (higher mode) [{:.1f} s],"
" epoch: {}, loss: {:.4f}".format(time.time() - t1, i, epoch_loss), flush=True)
#
if self.verbose:
print("Finished surrogate model training,"
" {} copies of surrogate model params.".format(len(fmodel._fast_params)), flush=True)
fmodel.eval()
predictions = fmodel()
return predictions # adv_loss # .item(), adv_grads[-n_fakes:, ]
def recommend(self, data, top_k, return_preds=False, allow_repeat=False):
# Set model to eval mode
model = self.net.to(self.device)
model.eval()
n_rows = data.shape[0]
idx_list = np.arange(n_rows)
recommendations = np.empty([n_rows, top_k], dtype=np.int64)
all_preds = list()
with torch.no_grad():
for batch_idx in self.minibatch(
idx_list, batch_size=self.args.valid_batch_size):
batch_data = data[batch_idx].toarray()
preds = model(user_id=batch_idx)
if return_preds:
all_preds.append(preds)
if not allow_repeat:
preds[batch_data.nonzero()] = -np.inf
if top_k > 0:
_, recs = preds.topk(k=top_k, dim=1)
recommendations[batch_idx] = recs.cpu().numpy()
if return_preds:
return recommendations, torch.cat(all_preds, dim=0).cpu()
else:
return recommendations
class ItemAE(nn.Module):
def __init__(self, input_dim, hidden_dims):
super(ItemAE, self).__init__()
self.q_dims = [input_dim] + [hidden_dims]
self.p_dims = self.q_dims[::-1]
self.q_layers = nn.ModuleList([nn.Linear(d_in, d_out) for d_in, d_out
in zip(self.q_dims[:-1], self.q_dims[1:])])
self.p_layers = nn.ModuleList([nn.Linear(d_in, d_out) for d_in, d_out
in zip(self.p_dims[:-1], self.p_dims[1:])])
def encode(self, input):
h = input
for i, layer in enumerate(self.q_layers):
h = layer(h)
h = torch.tanh(h)
return h
def decode(self, z):
h = z
for i, layer in enumerate(self.p_layers):
h = layer(h)
if i != len(self.p_layers) - 1:
h = torch.tanh(h)
return h
def forward(self, input):
z = self.encode(input)
return self.decode(z)
def loss(self, data, outputs):
return BaseTrainer.weighted_mse_loss(data=data, logits=outputs)
class ItemAETrainer(BaseTrainer):
def __init__(self, n_users, n_items, hidden_dims, device, lr, l2, batch_size, weight_pos, weight_neg,
verbose=False):
super(ItemAETrainer, self).__init__()
self.n_users = n_users
self.n_items = n_items
self.hidden_dims = hidden_dims
self.device = device
self.lr = lr
self.l2 = l2
self.batch_size = batch_size
self.weight_pos = weight_pos
self.weight_neg = weight_neg
self.device = device
self.verbose = verbose
pass
def _initialize(self):
self.net = ItemAE(self.n_users, self.hidden_dims).to(self.device)
self.optimizer = optim.Adam(self.net.parameters(),
lr=self.lr, weight_decay=self.l2)
def train_epoch(self, data):
# Transpose the data first for ItemVAE.
data = data.transpose()
n_rows = data.shape[0]
n_cols = data.shape[1]
idx_list = np.arange(n_rows)
# Set model to training mode.
model = self.net.to(self.device)
model.train()
np.random.shuffle(idx_list)
epoch_loss = 0.0
batch_size = (self.args.batch_size
if self.args.batch_size > 0 else len(idx_list))
for batch_idx in self.minibatch(idx_list, batch_size=batch_size):
batch_tensor = data[batch_idx].to(self.device)
# Compute loss
outputs = model(batch_tensor)
loss = model.loss(data=batch_tensor,
outputs=outputs).sum()
epoch_loss += loss.item()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return epoch_loss
def fit_adv(self, data_tensor, epoch_num, unroll_steps, ):
import higher
if not data_tensor.requires_grad:
raise ValueError("To compute adversarial gradients, data_tensor "
"should have requires_grad=True.")
self._initialize()
data_tensor = data_tensor.to(self.device)
# target_tensor = torch.zeros_like(data_tensor)
# target_tensor[:, target_items] = 1.0
data_tensor = data_tensor.t()
n_rows = data_tensor.shape[0]
n_cols = data_tensor.shape[1]
idx_list = np.arange(n_rows)
# Set model to training mode.
model = self.net.to(self.device)
optimizer = self.optimizer
batch_size = (self.batch_size if self.batch_size > 0 else len(idx_list))
for i in range(1, epoch_num - unroll_steps + 1):
t1 = time.time()
np.random.shuffle(idx_list)
model.train()
epoch_loss = 0.0
for batch_idx in self.minibatch(idx_list, batch_size=batch_size):
# TODO detach()
batch_tensor = data_tensor[batch_idx].detach()
# Compute loss
outputs = model(batch_tensor)
loss = model.loss(data=batch_tensor, outputs=outputs).sum()
epoch_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if self.verbose: # and i%20==0:
print("Training [{:.1f} s], epoch: {}, loss: {:.4f}".format(
time.time() - t1, i, epoch_loss))
with higher.innerloop_ctx(model, optimizer) as (fmodel, diffopt):
if self.verbose:
print("Switching to higher mode...")
for i in range(epoch_num - unroll_steps + 1, epoch_num + 1):
t1 = time.time()
np.random.shuffle(idx_list)
epoch_loss = 0.0
fmodel.train()
for batch_idx in self.minibatch(idx_list, batch_size=batch_size):
batch_tensor = data_tensor[batch_idx]
# Compute loss
outputs = fmodel(batch_tensor)
loss = fmodel.loss(data=batch_tensor, outputs=outputs).sum()
epoch_loss += loss.item()
diffopt.step(loss)
if self.verbose:
print("Training (higher mode) [{:.1f} s],"
" epoch: {}, loss: {:.4f}".format(time.time() - t1, i, epoch_loss))
if self.verbose:
print("Finished surrogate model training,"
" {} copies of surrogate model params.".format(len(fmodel._fast_params)))
fmodel.eval()
all_preds = list()
for batch_idx in self.minibatch(np.arange(n_rows),
batch_size=batch_size):
all_preds += [fmodel(data_tensor[batch_idx])]
predictions = torch.cat(all_preds, dim=0).t()
# # Compute adversarial (outer) loss.
# adv_loss = self.mult_ce_loss(
# logits=predictions[:-n_fakes, ],
# data=target_tensor[:-n_fakes, ]).sum()
# adv_grads = torch.autograd.grad(adv_loss, data_tensor)[0]
# # Copy fmodel's parameters to default trainer.net().
# model.load_state_dict(fmodel.state_dict())
return predictions # adv_loss.item(), adv_grads.t()[-n_fakes:, :]
class SVDpp(nn.Module):
def __init__(self, n_users, n_items, hidden_dims, data):
super(SVDpp, self).__init__()
self.n_users = n_users
self.n_items = n_items
self.dim = hidden_dims[0]
self.data = data
self.Q1 = nn.Parameter(
torch.zeros([self.n_items, self.dim]).normal_(mean=0, std=0.1))
self.Q2 = nn.Parameter(
torch.zeros([self.n_items, self.dim]).normal_(mean=0, std=0.1))
self.P = nn.Parameter(
torch.zeros([self.n_users, self.dim]).normal_(mean=0, std=0.1))
self.bu = nn.Parameter(torch.zeros(self.n_users))
self.bi = nn.Parameter(torch.zeros(self.n_items))
# store each users' interaction history
self.Ni = list()
for user in self.data:
self.Ni.append(user.nonzero().squeeze(1))
self.u = self.data.float().mean()
def forward(self, user_id=None, item_id=None):
# bias computing
bu = self.bu.expand((self.n_items, self.n_users)).t()
bi = self.bi.expand((self.n_users, self.n_items))
b = bu + bi
# user features computing
P = list()
for i in self.Ni:
yi = self.Q2[i]
Yi = self.Q2[i].sum(dim=0)
length = len(yi)
P.append(Yi / math.sqrt(length))
P = torch.cat(P).view((self.n_users, self.dim))
P = P + self.P
if user_id is None and item_id is None:
return torch.sigmoid(torch.mm(P, self.Q1.t()) + b[[user_id]] + self.u) * 5
if user_id is not None:
return torch.sigmoid(torch.mm(P[[user_id]], self.Q1.t()) + b[[user_id]] + self.u) * 5
if item_id is not None:
return torch.sigmoid(torch.mm(P, self.Q1[[item_id]].t()) + b[[user_id]] + self.u) * 5
class SVDppTrainer(BaseTrainer):
def __init__(self, n_users, n_items, hidden_dims, device, lr, l2, batch_size, weight_alpha):
super(SVDppTrainer, self).__init__()
self.n_users = n_users
self.n_items = n_items
self.hidden_dims = hidden_dims
self.device = device
self.lr = lr
self.l2 = l2
self.batch_size = batch_size
self.weight_alpha = weight_alpha
def _initialize(self, data):
self.net = SVDpp(
n_users=self.n_users,
n_items=self.n_items,
hidden_dims=self.hidden_dims,
data=data
).to(self.device)
self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr, weight_decay=self.l2)
def fit_adv(self, data_tensor, epoch_num, unroll_steps):
self._initialize(data_tensor)
import higher
if not data_tensor.requires_grad:
raise ValueError("To compute adversarial gradients, data_tensor "
"should have requires_grad=True.")
data_tensor = data_tensor.to(self.device)
n_rows = data_tensor.shape[0]
n_cols = data_tensor.shape[1]
idx_list = np.arange(n_rows)
model = self.net.to(self.device)
optimizer = self.optimizer
batch_size = (self.batch_size
if self.batch_size > 0 else len(idx_list))
for i in range(1, epoch_num - unroll_steps + 1):
t1 = time.time()
np.random.shuffle(idx_list)
model.train()
epoch_loss = 0.0
for batch_idx in minibatch(
idx_list, batch_size=batch_size):
# Compute loss
loss = mse_loss(data=data_tensor[batch_idx],
logits=model(user_id=batch_idx),
weight=self.weight_alpha).sum()
epoch_loss += loss.item()
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
print("Training [{:.1f} s], epoch: {}, loss: {:.4f}".format(
time.time() - t1, i, epoch_loss))
with higher.innerloop_ctx(model, optimizer) as (fmodel, diffopt):
print("Switching to higher mode...")
for i in range(epoch_num - unroll_steps + 1, epoch_num + 1):
t1 = time.time()
np.random.shuffle(idx_list)
fmodel.train()
epoch_loss = 0.0
for batch_idx in minibatch(
idx_list, batch_size=batch_size):
# Compute loss
loss = mse_loss(data=data_tensor[batch_idx],
logits=fmodel(user_id=batch_idx),
weight=self.weight_alpha).sum()
epoch_loss += loss.item()
diffopt.step(loss)
print("Training (higher mode) [{:.1f} s],"
" epoch: {}, loss: {:.4f}".format(time.time() - t1, i, epoch_loss))
print("Finished surrogate model training,"
" {} copies of surrogate model params.".format(len(fmodel._fast_params)))
fmodel.eval()
predictions = fmodel()
return predictions.squeeze(0)
class NMF(nn.Module):
def __init__(self, n_users, n_items, hidden_dim, data):
super(NMF, self).__init__()
self.n_users, self.n_items = n_users, n_items
self.hideen_dim = hidden_dim
self.data = data
self.scale = torch.sqrt(torch.mean(self.data.detach()) / self.hideen_dim)
W = torch.abs(torch.rand([self.n_users, self.hideen_dim]) * self.scale)
H = torch.abs(torch.rand([self.hideen_dim, self.n_items]) * self.scale)
self.W = torch.nn.Parameter(W, requires_grad=True)
self.H = torch.nn.Parameter(H, requires_grad=True)
def forward(self, user_id=None, item_id=None):
if user_id is None and item_id is None:
return torch.mm(self.W, self.H)
if user_id is not None:
return torch.mm(self.W[[user_id]], self.H)
if item_id is not None:
return torch.mm(self.W, self.H[[item_id]])
class NMFTrainer(BaseTrainer):
def __init__(self, n_users, n_items,
batch_size, device,
k=128, solver='autograd', eps=1e-7,
alpha=0.99,
loss='l2',
lr=1e-2):
super(NMFTrainer, self).__init__()
self.n_users = n_users
self.n_items = n_items
self.batch_size = batch_size
self.k = k
self.loss = loss
self.lr = lr
self.alpha = alpha
self.solver = solver
self.eps = eps
self.device = device
@staticmethod
def weighted_mse_loss(data, logits, weight_pos=1.0, weight_neg=0.0):
"""Mean square error loss."""
weights = torch.ones_like(data) * weight_neg
weights[data > 0] = weight_pos
res = weights * (data - logits) ** 2
return res.sum(1)
@staticmethod
def l2(x, y):
return torch.nn.MSELoss()(x, y)
@staticmethod
def kl_dev(x, y):
return (x * torch.log(x / y) - x + y).mean()
def _initialize(self, data_tensor):
self.net = NMF(self.n_users, self.n_items, self.k, data_tensor)
# for autograd solver
self.opt = torch.optim.RMSprop(self.net.parameters(), alpha=self.alpha, lr=self.lr, weight_decay=1e-6)
def plus(self, X):
X[X < 0] = self.eps
return X
def fit_adv(self, data_tensor, epoch_num, unroll_steps):
self._initialize(data_tensor)
import higher
if not data_tensor.requires_grad:
raise ValueError("To compute adversarial gradients, data_tensor "
"should have requires_grad=True.")
data_tensor = data_tensor.to(self.device)
n_rows = data_tensor.shape[0]
n_cols = data_tensor.shape[1]
idx_list = np.arange(n_rows)
model = self.net.to(self.device)
optimizer = self.opt
batch_size = (self.batch_size
if self.batch_size > 0 else len(idx_list))
for i in range(1, epoch_num - unroll_steps + 1):
t1 = time.time()
np.random.shuffle(idx_list)
model.train()
epoch_loss = 0.0
for batch_idx in minibatch(
idx_list, batch_size=self.batch_size):
# Compute loss
# loss = NMFTrainer.weighted_mse_loss(data=data_tensor[batch_idx],
# logits=model(user_id=batch_idx),
# weight_pos=1.0,
# weight_neg=-1.0 * self.eps).sum()
loss = NMFTrainer.l2(data_tensor[batch_idx], model(user_id=batch_idx))
epoch_loss += loss.item()
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
for p in model.parameters():
p.data = self.plus(p.data)
print("Training [{:.1f} s], epoch: {}, loss: {:.4f}".format(
time.time() - t1, i, epoch_loss))
with higher.innerloop_ctx(model, optimizer) as (fmodel, diffopt):
print("Switching to higher mode...")
for i in range(epoch_num - unroll_steps + 1, epoch_num + 1):
t1 = time.time()
np.random.shuffle(idx_list)
fmodel.train()
epoch_loss = 0.0
for batch_idx in minibatch(
idx_list, batch_size=self.batch_size):
# Compute loss
# loss = NMFTrainer.weighted_mse_loss(data=data_tensor[batch_idx],
# logits=fmodel(user_id=batch_idx),
# weight_pos=1.0,
# weight_neg=-1.0*self.eps).sum()
loss = NMFTrainer.l2(data_tensor[batch_idx], fmodel(user_id=batch_idx))
epoch_loss += loss.item()
diffopt.step(loss)
for p in fmodel.parameters():
p.data = self.plus(p.data)
print("Training (higher mode) [{:.1f} s],"
" epoch: {}, loss: {:.4f}".format(time.time() - t1, i, epoch_loss))
print("Finished surrogate model training,"
" {} copies of surrogate model params.".format(len(fmodel._fast_params)))
fmodel.eval()
predictions = fmodel()
return predictions
from numpy.random import RandomState
class PMF(nn.Module):
def __init__(self, n_users, n_items, n_factors=128, is_sparse=False, no_cuda=None):
super(PMF, self).__init__()
self.n_users = n_users
self.n_items = n_items
self.n_factors = n_factors
self.no_cuda = no_cuda
self.random_state = RandomState(1)
self.user_embeddings = nn.Embedding(n_users, n_factors, sparse=is_sparse)
self.user_embeddings.weight.data = torch.from_numpy(0.1 * self.random_state.rand(n_users, n_factors)).float()
self.item_embeddings = nn.Embedding(n_items, n_factors, sparse=is_sparse)
self.item_embeddings.weight.data = torch.from_numpy(0.1 * self.random_state.rand(n_items, n_factors)).float()
self.ub = nn.Embedding(n_users, 1)
self.ib = nn.Embedding(n_items, 1)
self.ub.weight.data.uniform_(-.01, .01)
self.ib.weight.data.uniform_(-.01, .01)
def forward(self, user_id, item_id):
user_h1 = self.user_embeddings(user_id)
item_h1 = self.item_embeddings(item_id).T
R_h = torch.mm(user_h1, item_h1) + self.ub(user_id) + self.ib(item_id).T
return R_h
class PMFTrainer(BaseTrainer):
def __init__(self, n_users, n_items, device, hidden_dim, lr, weight_decay, batch_size,
momentum, verbose=False):
super(PMFTrainer, self).__init__()
self.device = device
#
self.n_users = n_users
self.n_items = n_items
#
self.hidden_dim = hidden_dim
#
self.lr = lr
self.weight_decay = weight_decay
self.momentum = momentum
self.batch_size = batch_size
#
self.verbose = verbose
pass
def _initialize(self):
self.net = PMF(n_users=self.n_users,
n_items=self.n_items,
n_factors=self.hidden_dim).to(self.device)
# self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
self.optimizer = optim.SGD(self.net.parameters(), lr=self.lr, weight_decay=self.weight_decay, momentum=self.momentum)
for name, param in self.net.named_parameters():
print(name)
def fit_adv(self, data_tensor, epoch_num, unroll_steps):
self._initialize()
import higher
if not data_tensor.requires_grad:
raise ValueError("To compute adversarial gradients, data_tensor "
"should have requires_grad=True.")
#
data_tensor = data_tensor.to(self.device)
n_rows = data_tensor.shape[0]
idx_list = np.arange(n_rows)
#
model = self.net.to(self.device)
#
user_idx = np.array(range(self.n_users), dtype=np.int16)
item_idx = np.array(range(self.n_items), dtype=np.int16)
for i in range(1, epoch_num - unroll_steps + 1):
t1 = time.time()
np.random.shuffle(idx_list)
model.train()
epoch_loss = 0.0
for batch_idx in self.minibatch(idx_list, batch_size=self.batch_size):
# Compute loss
loss = mse_loss(data_tensor[batch_idx].float(),
model(user_id=torch.tensor(batch_idx).long(),
item_id=torch.tensor(item_idx).long()).float(),
1).sum()
epoch_loss += loss.item()
self.optimizer.zero_grad()
loss.backward(retain_graph=True)
self.optimizer.step()
if self.verbose:
print("Training [{:.1f} s], epoch: {}, loss: {:.4f}".format(
time.time() - t1, i, epoch_loss), flush=True)
with higher.innerloop_ctx(model, self.optimizer) as (fmodel, diffopt):
if self.verbose:
print("Switching to higher mode...")
for i in range(epoch_num - unroll_steps + 1, epoch_num + 1):
t1 = time.time()
np.random.shuffle(idx_list)
fmodel.train()
epoch_loss = 0.0
for batch_idx in self.minibatch(idx_list, batch_size=self.batch_size):
# Compute loss
# ===========warning=================
loss = mse_loss(data_tensor[batch_idx].float(),
fmodel(user_id=torch.tensor(batch_idx).long(),
item_id=torch.tensor(item_idx).long()).float(),
1).sum()
# ====================================
epoch_loss += loss.item()
diffopt.step(loss)
if self.verbose:
print("Training (higher mode) [{:.1f} s],"
" epoch: {}, loss: {:.4f}".format(time.time() - t1, i, epoch_loss), flush=True)
#
if self.verbose:
print("Finished surrogate model training,"
" {} copies of surrogate model params.".format(len(fmodel._fast_params)), flush=True)
fmodel.eval()
predictions = fmodel(torch.tensor(user_idx).long(), torch.tensor(item_idx).long())
print(predictions)
return predictions
| 38.840419
| 125
| 0.551383
| 7,163
| 59,387
| 4.333938
| 0.069664
| 0.019327
| 0.015075
| 0.007087
| 0.76543
| 0.742881
| 0.725905
| 0.710443
| 0.694144
| 0.674655
| 0
| 0.013682
| 0.315709
| 59,387
| 1,528
| 126
| 38.865838
| 0.750234
| 0.127604
| 0
| 0.656405
| 0
| 0
| 0.040153
| 0
| 0
| 0
| 0
| 0.000654
| 0.007944
| 1
| 0.089374
| false
| 0.008937
| 0.016882
| 0.006951
| 0.184707
| 0.023833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
31b44a654f4e853f7c24f653ac8a36ad960df7df
| 75
|
py
|
Python
|
BOJ_Python/24266.py
|
tnsgh9603/BOJ_CPP
|
432b1350f6c67cce83aec3e723e30a3c6b5dbfda
|
[
"MIT"
] | null | null | null |
BOJ_Python/24266.py
|
tnsgh9603/BOJ_CPP
|
432b1350f6c67cce83aec3e723e30a3c6b5dbfda
|
[
"MIT"
] | null | null | null |
BOJ_Python/24266.py
|
tnsgh9603/BOJ_CPP
|
432b1350f6c67cce83aec3e723e30a3c6b5dbfda
|
[
"MIT"
] | null | null | null |
import sys
num = int(sys.stdin.readline())
print(num * num * num)
print(3)
| 15
| 31
| 0.68
| 13
| 75
| 3.923077
| 0.615385
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 0.146667
| 75
| 5
| 32
| 15
| 0.78125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
9ed5992090eeb7bba5868b5ccaff9c66e4e62254
| 610
|
py
|
Python
|
lib/utils.py
|
LeDernier/avalanche_simulation
|
9a73b9862356270add9adf0319ac92d910827ac1
|
[
"Unlicense"
] | null | null | null |
lib/utils.py
|
LeDernier/avalanche_simulation
|
9a73b9862356270add9adf0319ac92d910827ac1
|
[
"Unlicense"
] | null | null | null |
lib/utils.py
|
LeDernier/avalanche_simulation
|
9a73b9862356270add9adf0319ac92d910827ac1
|
[
"Unlicense"
] | null | null | null |
#########################################################################################################################################################################
# Author : Remi Monthiller, remi.monthiller@etu.enseeiht.fr
# Adapted from the code of Raphael Maurin, raphael.maurin@imft.fr
# 30/10/2018
#
# Incline plane simulations
#
#########################################################################################################################################################################
def lengthVector3(vect):
return (vect[0] ** 2.0 + vect[1] ** 2.0 + vect[2] ** 2.0) ** (1.0/2.0)
| 46.923077
| 169
| 0.296721
| 45
| 610
| 4.022222
| 0.6
| 0.044199
| 0.033149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038869
| 0.072131
| 610
| 12
| 170
| 50.833333
| 0.280919
| 0.259016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
7328c8e72a13ab852db005ef4c10aa743256ffb8
| 344
|
py
|
Python
|
src/FFEAT/ffeat/__init__.py
|
PatrikValkovic/MasterThesis
|
6e9f3b186541db6c8395ebc96ace7289d01c805b
|
[
"MIT"
] | null | null | null |
src/FFEAT/ffeat/__init__.py
|
PatrikValkovic/MasterThesis
|
6e9f3b186541db6c8395ebc96ace7289d01c805b
|
[
"MIT"
] | null | null | null |
src/FFEAT/ffeat/__init__.py
|
PatrikValkovic/MasterThesis
|
6e9f3b186541db6c8395ebc96ace7289d01c805b
|
[
"MIT"
] | null | null | null |
###############################
#
# Created by Patrik Valkovic
# 3/9/2021
#
###############################
from .Pipe import Pipe, STANDARD_REPRESENTATION
from .NormalizedPipe import NormalizedPipe
from . import flow
from . import measure
from . import utils
from . import strategies
from . import genetic
from . import pso
from . import cma
| 20.235294
| 47
| 0.630814
| 38
| 344
| 5.684211
| 0.526316
| 0.324074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020339
| 0.142442
| 344
| 16
| 48
| 21.5
| 0.711864
| 0.101744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
732fa2f9aa6599c1773b284aa81c6537453cf733
| 439
|
py
|
Python
|
FirebaseLoginScreen/main.py
|
solointer11/KivyBusApp
|
2eaa0158e3594c4b2ffc81affd0ec177adc60d8d
|
[
"MIT"
] | 1
|
2020-07-18T22:07:39.000Z
|
2020-07-18T22:07:39.000Z
|
FirebaseLoginScreen/main.py
|
solointer11/KivyBusApp
|
2eaa0158e3594c4b2ffc81affd0ec177adc60d8d
|
[
"MIT"
] | null | null | null |
FirebaseLoginScreen/main.py
|
solointer11/KivyBusApp
|
2eaa0158e3594c4b2ffc81affd0ec177adc60d8d
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
from kivy.app import App
from kivy import utils
# -- This import can be done in kv lang or python
class MainApp(App):
#login_primary_color = utils.get_color_from_hex("#ABCDEF")#(1, 0, 0, 1)
#login_secondary_color = utils.get_color_from_hex("#060809")#(1, 1, 0, 1)
#login_tertiary_color = utils.get_color_from_hex("#434343")#(0,0, 1, 1)
pass
MainApp().run()
| 31.357143
| 81
| 0.637813
| 67
| 439
| 3.835821
| 0.492537
| 0.116732
| 0.151751
| 0.210117
| 0.291829
| 0.291829
| 0
| 0
| 0
| 0
| 0
| 0.070796
| 0.22779
| 439
| 14
| 82
| 31.357143
| 0.687316
| 0.57631
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.333333
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
b407ea921989eec447bc1d366976486f8c6fcc21
| 1,228
|
py
|
Python
|
ciperpus_test_client.py
|
R-N/ciperpus_test
|
6075133fbd6cf1b7dd16434a9eb9611d3bd72929
|
[
"MIT"
] | null | null | null |
ciperpus_test_client.py
|
R-N/ciperpus_test
|
6075133fbd6cf1b7dd16434a9eb9611d3bd72929
|
[
"MIT"
] | null | null | null |
ciperpus_test_client.py
|
R-N/ciperpus_test
|
6075133fbd6cf1b7dd16434a9eb9611d3bd72929
|
[
"MIT"
] | null | null | null |
from ciperpus_exception import *
from ciperpus_test_exception import *
from ciperpus_test_context import *
from ciperpus_client import ciperpus_client
class ciperpus_test_client:
def __init__(self, client=None):
if client is None:
self.client = ciperpus_client()
else:
self.client = client
def login(self, username, password, expect_error=None, use_button=True):
with ciperpus_test_context(expect_error) as context:
self.client.login(username, password)
def logout(self, expect_error=None):
with ciperpus_test_context(expect_error) as context:
self.client.logout()
def dashboard(self, expect_error=None):
with ciperpus_test_context(expect_error) as context:
self.client.logout()
@property
def url(self):
return self.client.url
def check_url(self, endpoint):
return self.client.check_url(endpoint)
def close(self):
return self.client.close()
def quit(self):
return self.client.quit()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.quit()
test_client_instance = None
def get_test_client():
global test_client_instance
test_client_instance = test_client_instance or ciperpus_test_client()
return test_client_instance
| 24.56
| 73
| 0.773616
| 173
| 1,228
| 5.184971
| 0.254335
| 0.111483
| 0.100334
| 0.076923
| 0.362319
| 0.2932
| 0.232999
| 0.232999
| 0.232999
| 0.232999
| 0
| 0
| 0.140879
| 1,228
| 50
| 74
| 24.56
| 0.850237
| 0
| 0
| 0.135135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.297297
| false
| 0.054054
| 0.108108
| 0.135135
| 0.594595
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
b412227b154ac29c5efea1e06da298740ad7c046
| 130
|
py
|
Python
|
test.py
|
canyoupleasecreateanaccount/pytest-slack
|
1894d6168394a1daffdeff5ced05814a68594cdd
|
[
"MIT"
] | null | null | null |
test.py
|
canyoupleasecreateanaccount/pytest-slack
|
1894d6168394a1daffdeff5ced05814a68594cdd
|
[
"MIT"
] | null | null | null |
test.py
|
canyoupleasecreateanaccount/pytest-slack
|
1894d6168394a1daffdeff5ced05814a68594cdd
|
[
"MIT"
] | null | null | null |
z = "Tests were ran for service: matchService \n" \
"Passed: \n" \
"Failed: \n" \
"Error: \n" \
"Skipped: \n"
| 21.666667
| 51
| 0.492308
| 16
| 130
| 4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.315385
| 130
| 5
| 52
| 26
| 0.719101
| 0
| 0
| 0
| 0
| 0
| 0.669231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.2
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
b4179e48e94d2df8b9be7d67d89d129fb24f7f10
| 41
|
py
|
Python
|
Day_1_Scientific_Python/snippets/02-pandas_introduction65.py
|
Morisset/python-workshop
|
ec8b0c4f08a24833e53a22f6b52566a08715c9d0
|
[
"BSD-3-Clause"
] | 183
|
2016-08-24T12:32:07.000Z
|
2022-03-26T14:05:04.000Z
|
Day_1_Scientific_Python/snippets/02-pandas_introduction65.py
|
Morisset/python-workshop
|
ec8b0c4f08a24833e53a22f6b52566a08715c9d0
|
[
"BSD-3-Clause"
] | 100
|
2016-12-15T03:44:06.000Z
|
2022-03-07T08:14:07.000Z
|
Day_1_Scientific_Python/snippets/02-pandas_introduction65.py
|
Morisset/python-workshop
|
ec8b0c4f08a24833e53a22f6b52566a08715c9d0
|
[
"BSD-3-Clause"
] | 204
|
2016-08-24T14:22:58.000Z
|
2022-03-29T15:09:03.000Z
|
df.loc[df['Sex'] == 'male', 'Age'].mean()
| 41
| 41
| 0.512195
| 7
| 41
| 3
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 41
| 1
| 41
| 41
| 0.552632
| 0
| 0
| 0
| 0
| 0
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b43ff936903c6eb6884da38660988e5cfb3d3c3a
| 342
|
py
|
Python
|
py17track/errors.py
|
ofalvai/py17track
|
4e2705adcebf2a5ea6d9da2e39d6886013afd9b4
|
[
"MIT"
] | 23
|
2018-07-28T17:44:03.000Z
|
2022-03-14T19:30:27.000Z
|
py17track/errors.py
|
ofalvai/py17track
|
4e2705adcebf2a5ea6d9da2e39d6886013afd9b4
|
[
"MIT"
] | 62
|
2018-10-31T03:58:05.000Z
|
2022-03-14T20:18:41.000Z
|
py17track/errors.py
|
ofalvai/py17track
|
4e2705adcebf2a5ea6d9da2e39d6886013afd9b4
|
[
"MIT"
] | 9
|
2020-10-16T10:49:42.000Z
|
2022-02-17T04:24:26.000Z
|
"""Define module exceptions."""
class SeventeenTrackError(Exception):
"""Define a base error."""
pass
class InvalidTrackingNumberError(SeventeenTrackError):
"""Define an error for an invalid tracking number."""
pass
class RequestError(SeventeenTrackError):
"""Define an error for HTTP request errors."""
pass
| 17.1
| 57
| 0.704678
| 34
| 342
| 7.088235
| 0.588235
| 0.074689
| 0.224066
| 0.26556
| 0.290456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.187135
| 342
| 19
| 58
| 18
| 0.866906
| 0.394737
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
b45d024c5f144ec7d9bd572d324d3eea4f2b4820
| 317
|
py
|
Python
|
backend/rumergy_backend/rumergy/models/__init__.py
|
Firefly-Tech/rumergy-webapp
|
859054bd9ee710a11b393027bb9cb1bad55d0f00
|
[
"MIT"
] | 1
|
2021-11-08T00:28:37.000Z
|
2021-11-08T00:28:37.000Z
|
backend/rumergy_backend/rumergy/models/__init__.py
|
Firefly-Tech/rumergy-webapp
|
859054bd9ee710a11b393027bb9cb1bad55d0f00
|
[
"MIT"
] | 1
|
2021-11-02T02:17:37.000Z
|
2021-11-02T02:17:37.000Z
|
backend/rumergy_backend/rumergy/models/__init__.py
|
Firefly-Tech/rumergy-webapp
|
859054bd9ee710a11b393027bb9cb1bad55d0f00
|
[
"MIT"
] | 1
|
2021-10-18T22:27:04.000Z
|
2021-10-18T22:27:04.000Z
|
from .user_profile import UserProfile
from .access_request import AccessRequest
from .data_log import DataLog
from .meter_model import MeterModel
from .building import Building
from .meter import Meter
from .meter_data import MeterData
from .data_log_measures import DataLogMeasures
from .data_point import DataPoint
| 31.7
| 46
| 0.858044
| 44
| 317
| 6
| 0.454545
| 0.090909
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113565
| 317
| 9
| 47
| 35.222222
| 0.939502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
c30b201d7ad864cdb0e856d23ae2e8b0ca901a7f
| 101
|
py
|
Python
|
python/testData/inspections/PyUnresolvedReferencesInspection/InstanceAttributeCreatedThroughWithStatementInAnotherFile/foo.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyUnresolvedReferencesInspection/InstanceAttributeCreatedThroughWithStatementInAnotherFile/foo.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyUnresolvedReferencesInspection/InstanceAttributeCreatedThroughWithStatementInAnotherFile/foo.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class Foo(object):
def __init__(self):
with open('scope') as self.scope:
pass
| 25.25
| 41
| 0.564356
| 13
| 101
| 4.076923
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.316832
| 101
| 4
| 42
| 25.25
| 0.768116
| 0
| 0
| 0
| 0
| 0
| 0.04902
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
c3161a1bfc0ebbf49c7e875757b2ef002e247b84
| 953
|
py
|
Python
|
lib/systems/anisole.py
|
pulsar-chem/BPModule
|
f8e64e04fdb01947708f098e833600c459c2ff0e
|
[
"BSD-3-Clause"
] | null | null | null |
lib/systems/anisole.py
|
pulsar-chem/BPModule
|
f8e64e04fdb01947708f098e833600c459c2ff0e
|
[
"BSD-3-Clause"
] | null | null | null |
lib/systems/anisole.py
|
pulsar-chem/BPModule
|
f8e64e04fdb01947708f098e833600c459c2ff0e
|
[
"BSD-3-Clause"
] | null | null | null |
import pulsar as psr
def load_ref_system():
""" Returns anisole as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 0.6504 1.1978 -0.0297
C 1.9454 0.7011 -0.0190
C 2.1752 -0.6726 0.0122
C 1.1032 -1.5553 0.0323
C -0.2051 -1.0813 0.0222
C -0.4284 0.2985 -0.0085
H 0.4708 2.2786 -0.0544
H 2.7923 1.3949 -0.0349
H 3.2006 -1.0551 0.0208
H 1.2845 -2.6349 0.0565
H -1.0364 -1.7948 0.0383
O -1.6672 0.9093 -0.0224
C -2.8002 0.0778 0.0128
H -2.8636 -0.5772 -0.8639
H -3.6200 0.8011 -0.0005
H -2.8459 -0.5255 0.9269
""")
| 39.708333
| 63
| 0.41553
| 141
| 953
| 2.787234
| 0.567376
| 0.015267
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.486815
| 0.482686
| 953
| 23
| 64
| 41.434783
| 0.310345
| 0.109129
| 0
| 0
| 0
| 0
| 0.898673
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| true
| 0
| 0.05
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c31c4347b4c4487f06bb4cfdc0a77ff446995c75
| 64
|
py
|
Python
|
FWCore/Integration/python/forbidden_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 1
|
2020-08-12T08:37:04.000Z
|
2020-08-12T08:37:04.000Z
|
FWCore/Integration/python/forbidden_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
FWCore/Integration/python/forbidden_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 1
|
2019-03-19T13:44:54.000Z
|
2019-03-19T13:44:54.000Z
|
import FWCore.ParameterSet.Config as cms
import restricted_cff
| 16
| 40
| 0.859375
| 9
| 64
| 6
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 64
| 3
| 41
| 21.333333
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
c334e1370343a51072d6b6f11e1cdd14b02fd88f
| 361
|
py
|
Python
|
preprocessing/cleaning/FilterSpaces.py
|
mrForest13/sentiment-analysis
|
f747800b78dbf5d9c598d52cbe9f92a44b90cc42
|
[
"Apache-2.0"
] | null | null | null |
preprocessing/cleaning/FilterSpaces.py
|
mrForest13/sentiment-analysis
|
f747800b78dbf5d9c598d52cbe9f92a44b90cc42
|
[
"Apache-2.0"
] | null | null | null |
preprocessing/cleaning/FilterSpaces.py
|
mrForest13/sentiment-analysis
|
f747800b78dbf5d9c598d52cbe9f92a44b90cc42
|
[
"Apache-2.0"
] | null | null | null |
from preprocessing.Processor import Processor
class FilterSpacesProcessor(Processor):
def process(self, data):
data['text'] = self.remove_unnecessary_spaces(data)
return self.next_processor.process(data)
@staticmethod
def remove_unnecessary_spaces(data):
return data["text"].swifter.apply(lambda x: " ".join(x.split()))
| 25.785714
| 72
| 0.711911
| 41
| 361
| 6.146341
| 0.560976
| 0.063492
| 0.18254
| 0.214286
| 0.261905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174515
| 361
| 13
| 73
| 27.769231
| 0.845638
| 0
| 0
| 0
| 0
| 0
| 0.024931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.125
| 0.125
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
c339ce8297c0aad1a314edcb843f427becbadd79
| 1,548
|
py
|
Python
|
src/niveristand/clientapi/__init__.py
|
ioancornea/niveristand-python
|
a7fd578aefa904e9eb0bab00762af0ebba21ada0
|
[
"MIT"
] | 6
|
2018-07-04T10:59:43.000Z
|
2022-03-24T13:34:33.000Z
|
src/niveristand/clientapi/__init__.py
|
ioancornea/niveristand-python
|
a7fd578aefa904e9eb0bab00762af0ebba21ada0
|
[
"MIT"
] | 14
|
2018-11-05T20:05:33.000Z
|
2022-03-10T12:54:58.000Z
|
src/niveristand/clientapi/__init__.py
|
ioancornea/niveristand-python
|
a7fd578aefa904e9eb0bab00762af0ebba21ada0
|
[
"MIT"
] | 15
|
2018-07-04T07:58:49.000Z
|
2022-02-22T16:35:26.000Z
|
from niveristand.clientapi._datatypes import BooleanValue
from niveristand.clientapi._datatypes import BooleanValueArray
from niveristand.clientapi._datatypes import ChannelReference
from niveristand.clientapi._datatypes import DoubleValue
from niveristand.clientapi._datatypes import DoubleValueArray
from niveristand.clientapi._datatypes import I32Value
from niveristand.clientapi._datatypes import I32ValueArray
from niveristand.clientapi._datatypes import I64Value
from niveristand.clientapi._datatypes import I64ValueArray
from niveristand.clientapi._datatypes import U32Value
from niveristand.clientapi._datatypes import U32ValueArray
from niveristand.clientapi._datatypes import U64Value
from niveristand.clientapi._datatypes import U64ValueArray
from niveristand.clientapi._datatypes import VectorChannelReference
from niveristand.clientapi._realtimesequencedefinitionapi.erroraction import ErrorAction
from niveristand.clientapi.realtimesequence import RealTimeSequence
from niveristand.clientapi.stimulusprofileapi import StimulusProfileState
__all__ = ["BooleanValue",
"BooleanValueArray",
"ChannelReference",
"DoubleValue",
"DoubleValueArray",
"I32Value",
"I32ValueArray",
"I64Value",
"I64ValueArray",
"U32Value",
"U32ValueArray",
"U64Value",
"U64ValueArray",
"VectorChannelReference",
"ErrorAction",
"RealTimeSequence",
"StimulusProfileState",
]
| 40.736842
| 88
| 0.763566
| 121
| 1,548
| 9.61157
| 0.206612
| 0.219261
| 0.350817
| 0.397249
| 0.469475
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025177
| 0.178941
| 1,548
| 37
| 89
| 41.837838
| 0.889851
| 0
| 0
| 0
| 0
| 0
| 0.145349
| 0.014212
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.485714
| 0
| 0.485714
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
c33d4eca6c592b71e641e5dcbdadf82d15d2244f
| 4,788
|
py
|
Python
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/RebarContainerParameterManager.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/RebarContainerParameterManager.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/RebarContainerParameterManager.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class RebarContainerParameterManager(object,IDisposable):
""" Provides implementation of RebarContainer parameters overrides. """
def AddOverride(self,paramId,value):
"""
AddOverride(self: RebarContainerParameterManager,paramId: ElementId,value: int)
Adds an override for the given parameter as its value will be displayed for the
Rebar Container element.
paramId: The id of the parameter
value: The override value of the parameter.
AddOverride(self: RebarContainerParameterManager,paramId: ElementId,value: float)
Adds an override for the given parameter as its value will be displayed for the
Rebar Container element.
paramId: The id of the parameter
value: The override value of the parameter.
AddOverride(self: RebarContainerParameterManager,paramId: ElementId,value: ElementId)
Adds an override for the given parameter as its value will be displayed for the
Rebar Container element.
paramId: The id of the parameter
value: The override value of the parameter.
AddOverride(self: RebarContainerParameterManager,paramId: ElementId,value: str)
Adds an override for the given parameter as its value will be displayed for the
Rebar Container element.
paramId: The id of the parameter
value: The override value of the parameter.
"""
pass
def AddSharedParameterAsOverride(self,paramId):
"""
AddSharedParameterAsOverride(self: RebarContainerParameterManager,paramId: ElementId)
Adds a shared parameter as one of the parameter overrides stored by this Rebar
Container element.
paramId: The id of the shared parameter element
"""
pass
def ClearOverrides(self):
"""
ClearOverrides(self: RebarContainerParameterManager)
Clears any overridden values from all parameters of the associated
RebarContainer element.
"""
pass
def Dispose(self):
""" Dispose(self: RebarContainerParameterManager) """
pass
def IsOverriddenParameterModifiable(self,paramId):
"""
IsOverriddenParameterModifiable(self: RebarContainerParameterManager,paramId: ElementId) -> bool
Checks if overridden parameter is modifiable.
paramId: Overridden parameter id
Returns: True if the parameter is modifiable,false if the parameter is readonly.
"""
pass
def IsParameterOverridden(self,paramId):
"""
IsParameterOverridden(self: RebarContainerParameterManager,paramId: ElementId) -> bool
Checks if the parameter has an override
paramId: The id of the parameter element
Returns: True if the parameter has an override
"""
pass
def IsRebarContainerParameter(self,paramId):
"""
IsRebarContainerParameter(self: RebarContainerParameterManager,paramId: ElementId) -> bool
Checks if the parameter is a Rebar Container parameter
paramId: The id of the parameter element
Returns: True if the parameter is a Rebar Container parameter
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: RebarContainerParameterManager,disposing: bool) """
pass
def RemoveOverride(self,paramId):
"""
RemoveOverride(self: RebarContainerParameterManager,paramId: ElementId)
Removes an overridden value from the given parameter.
paramId: The id of the parameter
"""
pass
def SetOverriddenParameterModifiable(self,paramId):
"""
SetOverriddenParameterModifiable(self: RebarContainerParameterManager,paramId: ElementId)
Sets this overridden parameter to be modifiable.
paramId: Overridden parameter id
"""
pass
def SetOverriddenParameterReadonly(self,paramId):
"""
SetOverriddenParameterReadonly(self: RebarContainerParameterManager,paramId: ElementId)
Sets this overridden parameter to be readonly.
paramId: Overridden parameter id
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: RebarContainerParameterManager) -> bool
"""
| 24.680412
| 215
| 0.71533
| 503
| 4,788
| 6.675944
| 0.198807
| 0.064324
| 0.05003
| 0.163788
| 0.525015
| 0.506552
| 0.450268
| 0.421382
| 0.39458
| 0.350506
| 0
| 0
| 0.211571
| 4,788
| 193
| 216
| 24.80829
| 0.889536
| 0.694862
| 0
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.46875
| false
| 0.46875
| 0
| 0
| 0.53125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
c346e7dcb789d9a57933e9423921d7e66c3c59a8
| 2,010
|
py
|
Python
|
data_structures/linked-list/test_ll_merge.py
|
zarkle/data-structures-and-algorithms
|
0485b95f5aabc0ee255cd7e50b48a6ccec851e00
|
[
"MIT"
] | 1
|
2021-01-28T07:32:17.000Z
|
2021-01-28T07:32:17.000Z
|
data_structures/linked-list/test_ll_merge.py
|
zarkle/data-structures-and-algorithms
|
0485b95f5aabc0ee255cd7e50b48a6ccec851e00
|
[
"MIT"
] | null | null | null |
data_structures/linked-list/test_ll_merge.py
|
zarkle/data-structures-and-algorithms
|
0485b95f5aabc0ee255cd7e50b48a6ccec851e00
|
[
"MIT"
] | 1
|
2020-04-10T08:01:50.000Z
|
2020-04-10T08:01:50.000Z
|
from ll_merge import merge_lists as ml
def test_merge_list(short_ll, long_ll):
"""test merged list with lists of varying lengths"""
assert ml(short_ll, long_ll) == 8
assert len(long_ll) == 10
def test_merge_list_values(short_ll, long_ll):
"""test merged list with lists of varying lengths"""
ml(short_ll, long_ll)
assert long_ll.head.val == 8
assert long_ll.head._next.val == 16
def test_merge_list_two(long_ll, short_ll):
"""test merged list with lists of varying lengths"""
assert ml(long_ll, short_ll) == 16
assert len(long_ll) == 10
def test_merge_list_two_values(long_ll, short_ll):
"""test merged list with lists of varying lengths"""
ml(long_ll, short_ll)
assert long_ll.head.val == 16
assert long_ll.head._next.val == 8
def test_merge_list_same(short_ll, small_ll):
"""test merged list with lists of same length"""
assert ml(short_ll, small_ll) == 8
assert len(small_ll) == 8
def test_merge_list_same_values(short_ll, small_ll):
"""test merged list with lists of same length"""
ml(short_ll, small_ll)
assert small_ll.head.val == 8
assert small_ll.head._next.val == 4
assert small_ll.head._next._next._next._next._next._next._next.val == 1
def test_merge_list_empty(short_ll, empty_ll):
"""test merged list when one list is empty"""
assert ml(short_ll, empty_ll) == 8
assert len(short_ll) == 4
def test_merge_list_empty_values(short_ll, empty_ll):
"""test merged list when one list is empty"""
ml(short_ll, empty_ll)
assert short_ll.head.val == 8
assert short_ll.head._next.val == 7
def test_merge_list_empty_first(empty_ll, short_ll):
"""test merged list when one list is empty"""
assert ml(empty_ll, short_ll) == 8
assert len(short_ll) == 4
def test_merge_list_empty_first_values(empty_ll, short_ll):
"""test merged list when one list is empty"""
ml(empty_ll, short_ll)
assert short_ll.head.val == 8
assert short_ll.head._next.val == 7
| 29.130435
| 75
| 0.703483
| 346
| 2,010
| 3.783237
| 0.109827
| 0.139037
| 0.091673
| 0.122231
| 0.903743
| 0.735676
| 0.591291
| 0.591291
| 0.591291
| 0.540871
| 0
| 0.015951
| 0.189055
| 2,010
| 68
| 76
| 29.558824
| 0.787117
| 0.215423
| 0
| 0.216216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.567568
| 1
| 0.27027
| false
| 0
| 0.027027
| 0
| 0.297297
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c365c5f6a562d4aede79aea0f69f00400ee84f7a
| 38
|
py
|
Python
|
CanvasSync/settings/__init__.py
|
jnbli/CanvasSync
|
763eeb91d092aaaf225ea46abcfc5dd0a4a0f8c0
|
[
"MIT"
] | 34
|
2017-08-28T23:35:11.000Z
|
2022-03-29T00:09:50.000Z
|
CanvasSync/settings/__init__.py
|
jnbli/CanvasSync
|
763eeb91d092aaaf225ea46abcfc5dd0a4a0f8c0
|
[
"MIT"
] | 23
|
2017-02-07T16:42:46.000Z
|
2022-03-13T07:49:35.000Z
|
CanvasSync/settings/__init__.py
|
jnbli/CanvasSync
|
763eeb91d092aaaf225ea46abcfc5dd0a4a0f8c0
|
[
"MIT"
] | 26
|
2017-02-11T08:59:31.000Z
|
2022-03-15T09:20:05.000Z
|
""" CanvasSync by Mathias Perslev """
| 19
| 37
| 0.684211
| 4
| 38
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 38
| 1
| 38
| 38
| 0.8125
| 0.763158
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5ef4215a969655dd8506fb5e4bf815ff31aa21e9
| 145
|
py
|
Python
|
chap7/even_or_odd.py
|
wikilike7/python-crash-course
|
85cd7a2ab6e43a554c282b6e0c1c44c415cca3a3
|
[
"MIT"
] | null | null | null |
chap7/even_or_odd.py
|
wikilike7/python-crash-course
|
85cd7a2ab6e43a554c282b6e0c1c44c415cca3a3
|
[
"MIT"
] | null | null | null |
chap7/even_or_odd.py
|
wikilike7/python-crash-course
|
85cd7a2ab6e43a554c282b6e0c1c44c415cca3a3
|
[
"MIT"
] | 1
|
2019-03-05T09:31:27.000Z
|
2019-03-05T09:31:27.000Z
|
number = input('Enter a number: ')
number = int(number)
if number % 2 == 0:
print('The number is even')
else:
print('The number is odd')
| 20.714286
| 34
| 0.627586
| 23
| 145
| 3.956522
| 0.608696
| 0.175824
| 0.307692
| 0.351648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017699
| 0.22069
| 145
| 7
| 35
| 20.714286
| 0.787611
| 0
| 0
| 0
| 0
| 0
| 0.349315
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
6f0ce1607f3e882dea2f9fb9d0c7dd0008bbdd70
| 136
|
py
|
Python
|
doc/architecture/examples/attribute-control/proxied-attr.py
|
elmordo/Py3AMF
|
ac12211459d6e11de3fb4f03a43bc0e688c6c1f6
|
[
"MIT"
] | 87
|
2015-01-25T14:54:00.000Z
|
2021-11-16T13:12:40.000Z
|
doc/architecture/examples/attribute-control/proxied-attr.py
|
thijstriemstra/pyamf
|
d13915dfc68d06eb69ffc3e4e2a23257383568cc
|
[
"MIT"
] | 36
|
2015-01-05T01:24:59.000Z
|
2021-09-15T20:40:33.000Z
|
doc/architecture/examples/attribute-control/proxied-attr.py
|
thijstriemstra/pyamf
|
d13915dfc68d06eb69ffc3e4e2a23257383568cc
|
[
"MIT"
] | 37
|
2015-01-04T03:31:28.000Z
|
2022-01-20T04:38:49.000Z
|
import pyamf
class Person(object):
class __amf__:
proxy = ('address',)
pyamf.register_class(Person, 'com.acme.app.Person')
| 19.428571
| 51
| 0.683824
| 17
| 136
| 5.176471
| 0.705882
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 136
| 7
| 51
| 19.428571
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0.189781
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
6f39e4ffcd734d85bd6e06a14af21c8cd63041b3
| 1,229
|
py
|
Python
|
opencontrail/files/test_files/conftest.py
|
casek14/salt-formula-opencontrail
|
65d4ca1356cbb52341c94fdd44d5b79fc77f9392
|
[
"Apache-2.0"
] | 3
|
2017-06-15T15:29:03.000Z
|
2018-07-19T11:35:20.000Z
|
opencontrail/files/test_files/conftest.py
|
casek14/salt-formula-opencontrail
|
65d4ca1356cbb52341c94fdd44d5b79fc77f9392
|
[
"Apache-2.0"
] | 13
|
2017-02-15T06:28:22.000Z
|
2018-05-04T14:57:18.000Z
|
opencontrail/files/test_files/conftest.py
|
casek14/salt-formula-opencontrail
|
65d4ca1356cbb52341c94fdd44d5b79fc77f9392
|
[
"Apache-2.0"
] | 12
|
2017-02-05T23:11:33.000Z
|
2017-10-05T01:17:08.000Z
|
# pytest settings and fixtures
from stepler import *
from stepler.conftest import * # noqa
from stepler.conftest import __all__
from stepler.conftest import pytest_plugins
from stepler.glance.fixtures import * # noqa
from stepler.keystone.fixtures import * # noqa
from stepler.neutron.fixtures import * # noqa
from stepler.nova.fixtures import * # noqa
from vapor.fixtures.contrail import * # noqa
from vapor.fixtures.contrail_resources import * # noqa
from vapor.fixtures.different_tenants_resources import * # noqa
from vapor.fixtures.dns import * # noqa
from vapor.fixtures.images import * # noqa
from vapor.fixtures.instance_ip import * # noqa
from vapor.fixtures.ipams import * # noqa
from vapor.fixtures.lbaas import * # noqa
from vapor.fixtures.networks import * # noqa
from vapor.fixtures.nodes import * # noqa
from vapor.fixtures.policies import * # noqa
from vapor.fixtures.security_groups import * # noqa
from vapor.fixtures.service_chain import * # noqa
from vapor.fixtures.skip import * # noqa
from vapor.fixtures.subnets import * # noqa
from vapor.fixtures.system_services import * # noqa
from vapor.fixtures.virtual_interface import * # noqa
pytest_plugins = [
'vapor.plugins.xfail',
]
| 38.40625
| 64
| 0.771359
| 162
| 1,229
| 5.765432
| 0.246914
| 0.235546
| 0.314775
| 0.345824
| 0.620985
| 0.152034
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148088
| 1,229
| 31
| 65
| 39.645161
| 0.892073
| 0.112286
| 0
| 0
| 0
| 0
| 0.017807
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.892857
| 0
| 0.892857
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
6f67d691806c52489c531270cc293cfdb5b2a538
| 211
|
py
|
Python
|
appspider/spiders/wenshucourt/__init__.py
|
SullivanLin/appspider
|
116457ac93bd90e79c9eb4e9db37fcb3427fbd35
|
[
"MIT"
] | 10
|
2018-09-17T07:45:12.000Z
|
2020-08-01T18:33:32.000Z
|
appspider/spiders/wenshucourt/__init__.py
|
pingfangx/appspider
|
d974cfbf9d926b686e4e5f550f55d045955bb370
|
[
"MIT"
] | null | null | null |
appspider/spiders/wenshucourt/__init__.py
|
pingfangx/appspider
|
d974cfbf9d926b686e4e5f550f55d045955bb370
|
[
"MIT"
] | 6
|
2018-07-25T16:30:40.000Z
|
2020-08-01T18:36:12.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2018/4/12 20:16
# @Author : ddvv
# @Site : http://ddvv.life
# @File : __init__.py.py
# @Software: PyCharm
def main():
pass
if __name__ == "__main__":
main()
| 16.230769
| 29
| 0.545024
| 28
| 211
| 3.678571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075949
| 0.251185
| 211
| 13
| 30
| 16.230769
| 0.575949
| 0.649289
| 0
| 0
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
488e6f11e1a5bd7e3145a73f5e24ca20e637d24c
| 73
|
py
|
Python
|
tests/rest_framework/__init__.py
|
A-Ashiq/django-filter
|
5a2548afebba2d30f5fede12e7bf4e3f6ef16920
|
[
"BSD-3-Clause"
] | 2,512
|
2016-02-19T11:48:54.000Z
|
2022-03-30T03:26:15.000Z
|
tests/rest_framework/__init__.py
|
A-Ashiq/django-filter
|
5a2548afebba2d30f5fede12e7bf4e3f6ef16920
|
[
"BSD-3-Clause"
] | 979
|
2015-11-23T08:14:39.000Z
|
2022-03-26T02:54:18.000Z
|
tests/rest_framework/__init__.py
|
A-Ashiq/django-filter
|
5a2548afebba2d30f5fede12e7bf4e3f6ef16920
|
[
"BSD-3-Clause"
] | 572
|
2016-02-25T16:07:00.000Z
|
2022-02-24T20:49:47.000Z
|
default_app_config = 'tests.rest_framework.apps.RestFrameworkTestConfig'
| 36.5
| 72
| 0.876712
| 8
| 73
| 7.625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041096
| 73
| 1
| 73
| 73
| 0.871429
| 0
| 0
| 0
| 0
| 0
| 0.671233
| 0.671233
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
48a12d2bbb12271a0fabea408592390227552cb9
| 39,329
|
py
|
Python
|
parserutils/tests/collection_tests.py
|
consbio/parserutils
|
50e0e4b6afd807a7cf230b2b2fccfe0b287bc2ab
|
[
"BSD-3-Clause"
] | null | null | null |
parserutils/tests/collection_tests.py
|
consbio/parserutils
|
50e0e4b6afd807a7cf230b2b2fccfe0b287bc2ab
|
[
"BSD-3-Clause"
] | 1
|
2021-03-03T23:02:44.000Z
|
2021-03-17T19:20:32.000Z
|
parserutils/tests/collection_tests.py
|
consbio/parserutils
|
50e0e4b6afd807a7cf230b2b2fccfe0b287bc2ab
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from copy import deepcopy
from ..collections import accumulate_items, setdefaults
from ..collections import filter_empty, flatten_items
from ..collections import remove_duplicates, rfind, rindex, reduce_value, wrap_value
class DictsTestCase(unittest.TestCase):
def test_accumulate_items(self):
""" Tests accumulate_items with general inputs """
# Test with empty items
self.assertEqual(accumulate_items(None), {})
self.assertEqual(accumulate_items(b''), {})
self.assertEqual(accumulate_items(''), {})
self.assertEqual(accumulate_items(dict()), {})
self.assertEqual(accumulate_items(list()), {})
self.assertEqual(accumulate_items(set()), {})
self.assertEqual(accumulate_items(tuple()), {})
self.assertEqual(accumulate_items(x for x in b''), {})
self.assertEqual(accumulate_items((x for x in '')), {})
# Test with items containing single key/val
self.assertEqual(accumulate_items({(None, None)}), {None: [None]})
self.assertEqual(accumulate_items([(b'', '')]), {b'': ['']})
self.assertEqual(accumulate_items((['', b''],)), {'': [b'']})
self.assertEqual(accumulate_items((k, v) for k, v in [['key', 'val']]), {'key': ['val']})
self.assertEqual(accumulate_items(((k, v) for k, v in [(0, 1)])), {0: [1]})
# Test with items containing single key/val, reducing each
self.assertEqual(accumulate_items({(None, None)}, reduce_each=True), {None: None})
self.assertEqual(accumulate_items([(b'', '')], reduce_each=True), {b'': ''})
self.assertEqual(accumulate_items((['', b''],), reduce_each=True), {'': b''})
self.assertEqual(accumulate_items(((k, v) for k, v in [['key', 'val']]), reduce_each=True), {'key': 'val'})
self.assertEqual(accumulate_items(((k, v) for k, v in [(0, 1)]), reduce_each=True), {0: 1})
# Test with items containing single vals under multiple keys, with and without reduction
self.assertEqual(
accumulate_items([('key1', 'val'), ('key2', 'val'), ('key3', 'val')]),
{'key1': ['val'], 'key2': ['val'], 'key3': ['val']}
)
self.assertEqual(
accumulate_items([('key1', 'val'), ('key2', 'val'), ('key3', 'val')], reduce_each=True),
{'key1': 'val', 'key2': 'val', 'key3': 'val'}
)
# Test with items containing multiple vals under a single key, with and without reduction
self.assertEqual(
accumulate_items([('key', 'val1'), ('key', 'val2'), ('key', 'val3')]),
{'key': ['val1', 'val2', 'val3']}
)
self.assertEqual(
accumulate_items([('key', 'val1'), ('key', 'val2'), ('key', 'val3')], reduce_each=True),
{'key': ['val1', 'val2', 'val3']}
)
self.assertEqual(
accumulate_items(
[('key', 'val1'), ('key', 'val2'), ('key2', ['val1', 'val2']), ('key3', 'val3')], reduce_each=True
),
{'key': ['val1', 'val2'], 'key2': ['val1', 'val2'], 'key3': 'val3'}
)
# Test with items containing multiple vals under multiple keys, with and without reduction
self.assertEqual(
accumulate_items([('key3', 'val1'), ('key2', 'val2'), ('key1', 'val3')]),
{'key1': ['val3'], 'key2': ['val2'], 'key3': ['val1']}
)
self.assertEqual(
accumulate_items([('key3', 'val1'), ('key2', 'val2'), ('key1', 'val3')], reduce_each=True),
{'key1': 'val3', 'key2': 'val2', 'key3': 'val1'}
)
def test_setdefaults(self):
""" Tests setdefaults with general inputs """
# Test with invalid dict and empty defaults
self.assertEqual(setdefaults(None, None), None)
self.assertEqual(setdefaults(b'', None), b'')
self.assertEqual(setdefaults('', None), '')
self.assertEqual(setdefaults({}, None), {})
self.assertEqual([x for x in setdefaults((c for c in 'abc'), None)], [c for c in 'abc'])
# Test with invalid dict and valid defaults
self.assertEqual(setdefaults(None, 'x'), None)
self.assertEqual(setdefaults(b'', 'y'), b'')
self.assertEqual(setdefaults('', 'z'), '')
self.assertEqual([x for x in setdefaults((c for c in 'abc'), 'xyz')], [c for c in 'abc'])
# Test with empty dict and valid defaults
self.assertEqual(setdefaults({}, 'a'), {'a': None})
self.assertEqual(setdefaults({}, ['b']), {'b': None})
self.assertEqual(setdefaults({}, {'c': None}), {'c': None})
self.assertEqual(setdefaults({}, {'c': False}), {'c': False})
self.assertEqual(setdefaults({}, {'c': True}), {'c': True})
self.assertEqual(setdefaults({}, {'c': 0}), {'c': 0})
self.assertEqual(setdefaults({}, {'c': 1}), {'c': 1})
self.assertEqual(setdefaults({}, {'c': 2.3}), {'c': 2.3})
self.assertEqual(setdefaults({}, {'d': 'ddd'}), {'d': 'ddd'})
self.assertEqual(setdefaults({}, [{'e': 'eee'}, {'f': 'fff'}]), {'e': 'eee', 'f': 'fff'})
self.assertEqual(setdefaults({}, {'x': 'xxx', 'y': 'yyy'}), {'x': 'xxx', 'y': 'yyy'})
self.assertEqual(setdefaults({'z': 'zzz'}, None), {'z': 'zzz'})
def test_setdefaults_str(self):
""" Tests setdefaults with defaults specified as strings """
inputs = 'a.b'
d = {}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o) # Output should equal input
self.assertEqual(o, {'a': {'b': None}}) # Test against a hard value
self.assertEqual(setdefaults(d, inputs), o) # Test unchanged with multiple runs
d = {'a': 'xxx'}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o, {'a': 'xxx'})
self.assertEqual(setdefaults(d, inputs), o)
d = {'a': {'b': 'xxx'}}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o, {'a': {'b': 'xxx'}})
self.assertEqual(setdefaults(d, inputs), o)
d = {'c': 'xxx'}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o, {'a': {'b': None}, 'c': 'xxx'})
self.assertEqual(setdefaults(d, inputs), o)
inputs = 'a.b.c'
d = {}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o, {'a': {'b': {'c': None}}})
self.assertEqual(setdefaults(d, inputs), o)
d = {'a': {'b': {'c': 'xxx'}}}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o, {'a': {'b': {'c': 'xxx'}}})
self.assertEqual(setdefaults(d, inputs), o)
def test_setdefaults_dict_nested(self):
""" Tests setdefaults with nested defaults specified as dicts """
inputs = {'a.b': 'bbb', 'a.c': 'ccc'}
d = {}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o) # Output should equal input
self.assertEqual(o, {'a': {'b': 'bbb', 'c': 'ccc'}}) # Test against a hard value
self.assertEqual(setdefaults(d, inputs), o) # Test unchanged with multiple runs
d = {'a': 'xxx'}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o, {'a': 'xxx'})
self.assertEqual(setdefaults(d, inputs), o)
d = {'a': {'b': 'xxx'}}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o['a']['c'], 'ccc')
self.assertEqual(o['a']['b'], 'xxx')
self.assertEqual(setdefaults(d, inputs), o)
d = {'a': {'c': 'xxx'}}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o['a']['b'], 'bbb')
self.assertEqual(o['a']['c'], 'xxx')
self.assertEqual(setdefaults(d, inputs), o)
d = {'c': 'xxx'}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o['a'], {'b': 'bbb', 'c': 'ccc'})
self.assertEqual(o['a']['c'], 'ccc')
self.assertEqual(o['c'], 'xxx')
self.assertEqual(setdefaults(d, inputs), o)
inputs = {'a.b.c': True, 'd.e.f': [123.456]}
d = {}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o['a'], {'b': {'c': True}})
self.assertEqual(o['d'], {'e': {'f': [123.456]}})
self.assertEqual(setdefaults(d, inputs), o)
d = {'a': {'b': {'c': 'xxx'}}}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o['a'], {'b': {'c': 'xxx'}})
self.assertEqual(o['d'], {'e': {'f': [123.456]}})
self.assertEqual(setdefaults(d, inputs), o)
d = {'d': {'e': {'f': 'xxx'}}}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o['a'], {'b': {'c': True}})
self.assertEqual(o['d'], {'e': {'f': 'xxx'}})
self.assertEqual(setdefaults(d, inputs), o)
def test_setdefaults_dict_overlapping(self):
""" Tests setdefaults with overlapping defaults specified as dicts """
inputs = {'a.b.c': 'ccc', 'a.c.d.e': 'eee'}
d = {}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o) # Output should equal input
self.assertEqual(o, {'a': {'b': {'c': 'ccc'}, 'c': {'d': {'e': 'eee'}}}}) # Test against a hard value
self.assertEqual(setdefaults(d, inputs), o) # Test unchanged with multiple runs
d = {'a': 'xxx'}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o, {'a': 'xxx'})
self.assertEqual(setdefaults(d, inputs), o)
d = {'a': {'b': 'xxx'}}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o['a']['b'], 'xxx')
self.assertEqual(o['a']['c'], {'d': {'e': 'eee'}})
self.assertEqual(setdefaults(d, inputs), o)
d = {'a': {'c': 'xxx'}}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o['a']['b'], {'c': 'ccc'})
self.assertEqual(o['a']['c'], 'xxx')
self.assertEqual(setdefaults(d, inputs), o)
d = {'c': 'xxx'}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(o['a'], {'b': {'c': 'ccc'}, 'c': {'d': {'e': 'eee'}}})
self.assertEqual(o['c'], 'xxx')
self.assertEqual(setdefaults(d, inputs), o)
def test_setdefaults_other(self):
""" Tests setdefaults with defaults specified as list, set, and tuple """
inputs = ['a.b', 'a.c']
d = {}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o) # Output should equal input
self.assertEqual(setdefaults(d, inputs), {'a': {'b': None, 'c': None}}) # Test against a hard value
self.assertEqual(setdefaults(d, inputs), o) # Test unchanged with multiple runs
d = {'a': 'xxx'}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(setdefaults(d, inputs), {'a': 'xxx'})
self.assertEqual(setdefaults(d, inputs), o)
d = {'a': {'b': 'xxx'}}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(setdefaults(d, inputs)['a']['b'], 'xxx')
self.assertEqual(setdefaults(d, inputs)['a']['c'], None)
self.assertEqual(setdefaults(d, inputs), o)
d = {'c': 'xxx'}
o = deepcopy(setdefaults(d, inputs))
self.assertEqual(d, o)
self.assertEqual(setdefaults(d, inputs)['a'], {'b': None, 'c': None})
self.assertEqual(setdefaults(d, inputs)['c'], 'xxx')
self.assertEqual(setdefaults(d, inputs), o)
class ListTupleSetTestCase(unittest.TestCase):
def test_filter_empty(self):
""" Tests filter_empty with general inputs """
# Test None case: nothing to filter but default applies
self.assertEqual(filter_empty(None), None)
self.assertEqual(filter_empty(None, 'None'), 'None')
# Test empty string case: nothing to filter but default applies
self.assertEqual(filter_empty(b''), None)
self.assertEqual(filter_empty(b'', 'None'), 'None')
self.assertEqual(filter_empty(''), None)
self.assertEqual(filter_empty('', 'None'), 'None')
# Test empty collections case: nothing to filter but default applies
self.assertEqual(filter_empty(list()), None)
self.assertEqual(filter_empty(list(), 'None'), 'None')
self.assertEqual(filter_empty(set()), None)
self.assertEqual(filter_empty(set(), 'None'), 'None')
self.assertEqual(filter_empty(tuple()), None)
self.assertEqual(filter_empty(tuple(), 'None'), 'None')
self.assertEqual(filter_empty(x for x in ''), None)
self.assertEqual(filter_empty((x for x in ''), 'None'), 'None')
# Test when there's nothing to filter
self.assertEqual(filter_empty(False), False)
self.assertEqual(filter_empty(True), True)
self.assertEqual(filter_empty(0), 0)
self.assertEqual(filter_empty(1), 1)
self.assertEqual(filter_empty('a'), 'a')
self.assertEqual(filter_empty('abc'), 'abc')
self.assertEqual(filter_empty({'a': 'aaa'}), {'a': 'aaa'})
self.assertEqual(filter_empty({'b': 'bbb', 'c': 'ccc'}), {'b': 'bbb', 'c': 'ccc'})
self.assertEqual(filter_empty(c for c in 'abc'), ['a', 'b', 'c'])
self.assertEqual(filter_empty((c for c in 'abc')), ['a', 'b', 'c'])
# Test when there's nothing to filter, but with unused default
self.assertEqual(filter_empty(0, '0'), 0)
self.assertEqual(filter_empty(1, '1'), 1)
self.assertEqual(filter_empty('a', 'None'), 'a')
self.assertEqual(filter_empty('abc', 'None'), 'abc')
self.assertEqual(filter_empty((c for c in 'abc'), 'None'), ['a', 'b', 'c'])
# Test with filterable values
self.assertEqual(filter_empty([None]), None)
self.assertEqual(filter_empty({None}), None)
self.assertEqual(filter_empty((None,)), None)
self.assertEqual(filter_empty([b'']), None)
self.assertEqual(filter_empty({b''}), None)
self.assertEqual(filter_empty((b'',)), None)
self.assertEqual(filter_empty(['']), None)
self.assertEqual(filter_empty({''}), None)
self.assertEqual(filter_empty(('',)), None)
self.assertEqual(filter_empty(x for x in (None, b'', '')), None)
self.assertEqual(filter_empty((x for x in (None, b'', ''))), None)
# Test with filterable values and defaults
self.assertEqual(filter_empty([None, b'', ''], {}), {})
self.assertEqual(filter_empty({b'', None, ''}, []), [])
self.assertEqual(filter_empty((b'', '', None), []), [])
self.assertEqual(filter_empty([list(), set(), tuple(), dict()], {}), {})
self.assertEqual(filter_empty((tuple(), dict(), list(), set()), []), [])
self.assertEqual(filter_empty(x for x in (None, b'', '')), None)
self.assertEqual(filter_empty((x for x in (tuple(), dict(), list(), set())), {}), {})
# Test with values that should not be filtered
self.assertEqual(filter_empty([0]), [0])
self.assertEqual(filter_empty([1]), [1])
self.assertEqual(filter_empty(['x']), ['x'])
self.assertEqual(filter_empty({'y'}), {'y'})
self.assertEqual(filter_empty(('z',)), ('z',))
self.assertEqual(filter_empty(c for c in '0'), ['0'])
self.assertEqual(filter_empty((c for c in '1')), ['1'])
# Test with combinations of values
self.assertEqual(filter_empty([None, 0, '', 1]), [0, 1])
self.assertEqual(filter_empty(['a', None, 'b', b'', 'c']), ['a', 'b', 'c'])
self.assertEqual(filter_empty({None, '', 'a', 'b', 'c'}), {'a', 'b', 'c'})
self.assertEqual(filter_empty(('a', 'b', None, 'c', b'')), ('a', 'b', 'c'))
self.assertEqual(filter_empty(t for t in ('a', 'b', tuple(), 'c', set())), ['a', 'b', 'c'])
self.assertEqual(filter_empty((t for t in ('a', 'b', 'c', set(), list()))), ['a', 'b', 'c'])
# Test with non-filterable collections
self.assertEqual(filter_empty({'a': 'aaa'}), {'a': 'aaa'})
self.assertEqual([x for x in filter_empty(c for c in 'abc')], ['a', 'b', 'c'])
self.assertEqual([x for x in filter_empty((c for c in 'xyz'))], ['x', 'y', 'z'])
def test_flatten_items(self):
""" Tests flatten_items with general inputs """
# Test None case: nothing to filter but default applies
self.assertEqual(flatten_items(None), None)
self.assertEqual(flatten_items(None, True), None)
# Test empty string case: nothing to filter but default applies
self.assertEqual(flatten_items(b''), b'')
self.assertEqual(flatten_items(b'', True), b'')
self.assertEqual(flatten_items(''), '')
self.assertEqual(flatten_items('', True), '')
self.assertEqual(flatten_items(dict()), dict())
self.assertEqual(flatten_items(dict(), True), dict())
# Test empty collections case: nothing to flatten but default applies
self.assertEqual(flatten_items(list()), list())
self.assertEqual(flatten_items(list(), True), list())
self.assertEqual(flatten_items(set()), set())
self.assertEqual(flatten_items(set(), True), set())
self.assertEqual(flatten_items(tuple()), tuple())
self.assertEqual(flatten_items(tuple(), True), tuple())
# Test when there's nothing to flatten
self.assertEqual(flatten_items(False), False)
self.assertEqual(flatten_items(False, True), False)
self.assertEqual(flatten_items(True), True)
self.assertEqual(flatten_items(True, True), True)
self.assertEqual(flatten_items(0), 0)
self.assertEqual(flatten_items(0, True), 0)
self.assertEqual(flatten_items(1), 1)
self.assertEqual(flatten_items(1, True), 1)
self.assertEqual(flatten_items('a'), 'a')
self.assertEqual(flatten_items('a', True), 'a')
self.assertEqual(flatten_items('abc'), 'abc')
self.assertEqual(flatten_items('abc', True), 'abc')
self.assertEqual(flatten_items({'a': 'aaa'}), {'a': 'aaa'})
self.assertEqual(flatten_items({'a': 'aaa'}, True), {'a': 'aaa'})
self.assertEqual(flatten_items({'b': 'bbb', 'c': 'ccc'}), {'b': 'bbb', 'c': 'ccc'})
self.assertEqual(flatten_items({'b': 'bbb', 'c': 'ccc'}, True), {'b': 'bbb', 'c': 'ccc'})
# Test with single value collections with nothing to flatten, without defaults
for flat in (None, b'', '', 'abc', 0, 1, True, False):
self.assertEqual(flatten_items([flat]), [flat])
self.assertEqual(flatten_items([flat], True), [flat])
self.assertEqual(flatten_items({flat}), {flat})
self.assertEqual(flatten_items({flat}, True), {flat})
self.assertEqual(flatten_items((flat,)), (flat,))
self.assertEqual(flatten_items((flat,), True), (flat,))
self.assertEqual(flatten_items(f for f in [flat]), [flat])
self.assertEqual(flatten_items((f for f in (flat,)), True), [flat])
# Test with multiple values with nothing to flatten
for flat in ([None, b'', ''], (False, True, 0, 1, 'a'), {'False', 'True', '0', '1', 'a'}):
for flat_type in (list, tuple, set):
flat_in = flat_type(flat)
flat_out = flat_in
self.assertEqual(flatten_items(flat_in), flat_out)
self.assertEqual(flatten_items(flat_in, True), flat_out)
self.assertEqual(flatten_items(f for f in flat_in), list(flat_out))
self.assertEqual(flatten_items((f for f in flat_in), True), list(flat_out))
# Test with collection values (some unhashable) that should be flattened, but not recursed
for flat_type in (list, tuple):
flat_in = flat_type([tuple(), 'a', set(), 'bc', list(), b'xyz', dict()])
flat_out = flat_in
self.assertEqual(flatten_items(flat_in), flat_out)
self.assertEqual(flatten_items(flat_in, True), flat_out)
self.assertEqual(flatten_items(f for f in flat_in), list(flat_out))
self.assertEqual(flatten_items((f for f in flat_in), True), list(flat_out))
# Test with values that should be flattened and recursed in many combinations
self.assertEqual(flatten_items([('a', 'b', 'c'), 'd', {'e'}, ['f', 'g']]), ['a', 'b', 'c', 'd', 'e', 'f', 'g'])
self.assertEqual(flatten_items((0, [1, 2, 3], 4, 5, {6}, 7)), (0, 1, 2, 3, 4, 5, 6, 7))
self.assertEqual(
flatten_items(x for x in ((False, True), {'xyz'}, 7, 8, 9, ['10'])), [False, True, 'xyz', 7, 8, 9, '10']
)
not_yet_flat = [tuple(c for c in 'abc'), 'd', list(c for c in '123'), [None, {False}, {True}]]
for flat_type in (list, tuple):
flat_to_recurse = flat_type(not_yet_flat)
flat_no_recurse = flat_type(['a', 'b', 'c', 'd', '1', '2', '3', None, {False}, {True}])
flat_after_recurse = flat_type(['a', 'b', 'c', 'd', '1', '2', '3', None, False, True])
self.assertEqual(flatten_items(flat_to_recurse), flat_no_recurse)
self.assertEqual(flatten_items(flat_to_recurse, True), flat_after_recurse)
self.assertEqual(flatten_items(f for f in flat_to_recurse), list(flat_no_recurse))
self.assertEqual(flatten_items((f for f in flat_to_recurse), True), list(flat_after_recurse))
def test_remove_duplicates(self):
""" Tests remove_duplicates with general inputs """
# Test with non-iterable values
self.assertEqual(remove_duplicates(None), None)
self.assertEqual(remove_duplicates(b''), b'')
self.assertEqual(remove_duplicates(''), '')
self.assertEqual(remove_duplicates(0), 0)
self.assertEqual(remove_duplicates(1), 1)
self.assertEqual(remove_duplicates(False), False)
self.assertEqual(remove_duplicates(True), True)
self.assertEqual(remove_duplicates([]), [])
self.assertEqual(remove_duplicates({}), {})
self.assertEqual(remove_duplicates(tuple()), tuple())
self.assertEqual(remove_duplicates(set()), set())
# Test with iterable values with nothing to remove
self.assertEqual(remove_duplicates('abc'), 'abc')
self.assertEqual(remove_duplicates(b'abc'), b'abc')
self.assertEqual(remove_duplicates(['a', 'b', 'c']), ['a', 'b', 'c'])
self.assertEqual(remove_duplicates(('a', 'b', 'c')), ('a', 'b', 'c'))
self.assertEqual(remove_duplicates({'a', 'b', 'c'}), {'a', 'b', 'c'})
self.assertEqual(remove_duplicates(x for x in 'abc'), ['a', 'b', 'c'])
self.assertEqual(remove_duplicates({'a': 'aaa'}), {'a': 'aaa'})
self.assertEqual(remove_duplicates({'b': 'bbb', 'c': 'ccc'}), {'b': 'bbb', 'c': 'ccc'})
self.assertEqual(remove_duplicates([('a',), ('b', 'c')]), [('a',), ('b', 'c')])
# Test with iterable unhashable values with nothing to remove
self.assertEqual(remove_duplicates([{'a', 'b', 'c'}], is_unhashable=True), [{'a', 'b', 'c'}])
self.assertEqual(remove_duplicates([{'a': 'bc'}, {'d': 'ef'}], is_unhashable=True), [{'a': 'bc'}, {'d': 'ef'}])
# Test that unexpected unhashable values raise TypeError
with self.assertRaises(TypeError):
remove_duplicates([{'a', 'b', 'c'}], is_unhashable=False)
with self.assertRaises(TypeError):
remove_duplicates([{'a': 'bc'}, {'d': 'ef'}], is_unhashable=False)
# Test with iterable values with duplicates to remove
str_test = u'abcabcdefdefghiabcdef'
self.assertEqual(remove_duplicates(str_test), u'abcdefghi')
self.assertEqual(remove_duplicates(str_test, in_reverse=True), u'ghiabcdef')
bin_test = b'abcabcdefdefghiabcdef'
self.assertEqual(remove_duplicates(bin_test), b'abcdefghi')
self.assertEqual(remove_duplicates(bin_test, in_reverse=True), b'ghiabcdef')
list_test = [x for x in 'abcabcdefdefghiabcdef']
self.assertEqual(remove_duplicates(list_test), ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'])
self.assertEqual(remove_duplicates(list_test, in_reverse=True), ['g', 'h', 'i', 'a', 'b', 'c', 'd', 'e', 'f'])
tuple_test = tuple(x for x in 'abcabcdefdefghiabcdef')
self.assertEqual(remove_duplicates(tuple_test), ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'))
self.assertEqual(remove_duplicates(tuple_test, in_reverse=True), ('g', 'h', 'i', 'a', 'b', 'c', 'd', 'e', 'f'))
gen_test = (x for x in 'abcabcdefdefghiabcdef')
self.assertEqual(remove_duplicates(x for x in gen_test), ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'])
gen_test = (x for x in 'abcabcdefdefghiabcdef')
self.assertEqual(remove_duplicates(gen_test, in_reverse=True), ['g', 'h', 'i', 'a', 'b', 'c', 'd', 'e', 'f'])
# Test with iterable values with all unhashable duplicates to remove
list_test = [set(x) for x in 'abcdefabc']
self.assertEqual(remove_duplicates(list_test, is_unhashable=True), [{'a'}, {'b'}, {'c'}, {'d'}, {'e'}, {'f'}])
self.assertEqual(
remove_duplicates(list_test, in_reverse=True, is_unhashable=True),
[{'d'}, {'e'}, {'f'}, {'a'}, {'b'}, {'c'}]
)
tuple_test = tuple(set(x) for x in 'abcdefabc')
self.assertEqual(remove_duplicates(tuple_test, is_unhashable=True), ({'a'}, {'b'}, {'c'}, {'d'}, {'e'}, {'f'}))
self.assertEqual(
remove_duplicates(tuple_test, in_reverse=True, is_unhashable=True),
({'d'}, {'e'}, {'f'}, {'a'}, {'b'}, {'c'})
)
gen_test = (set(x) for x in 'abcdefabc')
self.assertEqual(
remove_duplicates((x for x in gen_test), is_unhashable=True),
[{'a'}, {'b'}, {'c'}, {'d'}, {'e'}, {'f'}]
)
gen_test = (set(x) for x in 'abcdefabc')
self.assertEqual(
remove_duplicates(gen_test, in_reverse=True, is_unhashable=True),
[{'d'}, {'e'}, {'f'}, {'a'}, {'b'}, {'c'}]
)
# Test with iterable values with some unhashable duplicates to remove
list_test = [{'a'}, 'b', {'c'}, 'b', {'a'}]
self.assertEqual(remove_duplicates(list_test, is_unhashable=True), [{'a'}, 'b', {'c'}])
self.assertEqual(remove_duplicates(list_test, in_reverse=True, is_unhashable=True), [{'c'}, 'b', {'a'}])
tuple_test = ({'a'}, 'b', {'c'}, 'b', {'a'})
self.assertEqual(remove_duplicates(tuple_test, is_unhashable=True), ({'a'}, 'b', {'c'}))
self.assertEqual(remove_duplicates(tuple_test, in_reverse=True, is_unhashable=True), ({'c'}, 'b', {'a'}))
gen_test = (x for x in ({'a'}, 'b', {'c'}, 'b', {'a'}))
self.assertEqual(remove_duplicates(gen_test, is_unhashable=True), [{'a'}, 'b', {'c'}])
gen_test = (x for x in ({'a'}, 'b', {'c'}, 'b', {'a'}))
self.assertEqual(remove_duplicates(gen_test, in_reverse=True, is_unhashable=True), [{'c'}, 'b', {'a'}])
def test_rfind(self):
""" Tests rfind with general inputs """
# Test empty cases: nothing to find
self.assertEqual(rfind(None, 'x'), -1)
self.assertEqual(rfind(b'', 'x'), -1)
self.assertEqual(rfind(b'', b'x'), -1)
self.assertEqual(rfind('', 'x'), -1)
self.assertEqual(rfind('', b'x'), -1)
self.assertEqual(rfind(list(), 'x'), -1)
self.assertEqual(rfind(tuple(), 'x'), -1)
self.assertEqual(rfind(set(), 'x'), -1)
self.assertEqual(rfind(dict(), 'x'), -1)
# Test missing cases: still nothing to find
self.assertEqual(rfind(b'abc', 'x'), -1)
self.assertEqual(rfind(b'abc', b'x'), -1)
self.assertEqual(rfind(u'abc', 'x'), -1)
self.assertEqual(rfind(u'abc', b'x'), -1)
self.assertEqual(rfind(['a', 'b', 'c'], 'x'), -1)
self.assertEqual(rfind(('a', 'b', 'c'), 'x'), -1)
self.assertEqual(rfind({'a', 'b', 'c'}, 'x'), -1)
self.assertEqual(rfind({'a': 'aaa', 'b': 'bbb', 'c': 'ccc'}, 'x'), -1)
# Test invalid cases: still nothing to find
self.assertEqual(rfind({'x', 'y', 'z'}, 'x'), -1)
self.assertEqual(rfind({'x': 'xxx', 'y': 'yyy', 'z': 'zzz'}, 'x'), -1)
# Test one match cases: find at first, middle and last
self.assertEqual(rfind(b'xyz', 'x'), 0)
self.assertEqual(rfind(b'yxz', 'x'), 1)
self.assertEqual(rfind(b'zyx', 'x'), 2)
self.assertEqual(rfind(b'xyz', b'x'), 0)
self.assertEqual(rfind(b'yxz', b'x'), 1)
self.assertEqual(rfind(b'zyx', b'x'), 2)
self.assertEqual(rfind(u'xyz', 'x'), 0)
self.assertEqual(rfind(u'yxz', 'x'), 1)
self.assertEqual(rfind(u'zyx', 'x'), 2)
self.assertEqual(rfind(u'xyz', b'x'), 0)
self.assertEqual(rfind(u'yxz', b'x'), 1)
self.assertEqual(rfind(u'zyx', b'x'), 2)
self.assertEqual(rfind(['x', 'y', 'z'], 'x'), 0)
self.assertEqual(rfind(['y', 'x', 'z'], 'x'), 1)
self.assertEqual(rfind(['z', 'y', 'x'], 'x'), 2)
self.assertEqual(rfind(('x', 'y', 'z'), 'x'), 0)
self.assertEqual(rfind(('y', 'x', 'z'), 'x'), 1)
self.assertEqual(rfind(('z', 'y', 'x'), 'x'), 2)
# Test multiple match cases: find at middle and last
self.assertEqual(rfind(b'xxz', 'x'), 1)
self.assertEqual(rfind(b'xyx', 'x'), 2)
self.assertEqual(rfind(b'xxz', b'x'), 1)
self.assertEqual(rfind(b'xyx', b'x'), 2)
self.assertEqual(rfind(u'xxz', 'x'), 1)
self.assertEqual(rfind(u'xyx', 'x'), 2)
self.assertEqual(rfind(u'xxz', b'x'), 1)
self.assertEqual(rfind(u'xyx', b'x'), 2)
self.assertEqual(rfind(['x', 'x', 'z'], 'x'), 1)
self.assertEqual(rfind(['x', 'y', 'x'], 'x'), 2)
self.assertEqual(rfind(('x', 'x', 'z'), 'x'), 1)
self.assertEqual(rfind(('x', 'y', 'x'), 'x'), 2)
def test_rindex(self):
""" Tests rindex with general inputs """
# Test valid empty cases: raise ValueError
for empty in (b'', '', list(), tuple()):
with self.assertRaises(ValueError):
rindex(empty, b'x')
with self.assertRaises(ValueError):
rindex(empty, u'x')
# Test invalid empty cases: raise TypeError
for empty in (None, set(), dict()):
with self.assertRaises(TypeError):
rindex(empty, b'x')
with self.assertRaises(TypeError):
rindex(empty, u'x')
# Test valid missing cases: raise ValueError
for empty in (b'abc', u'abc', ['a', 'b', 'c'], ('a', 'b', 'c')):
with self.assertRaises(ValueError):
rindex(empty, b'x')
with self.assertRaises(ValueError):
rindex(empty, u'x')
# Test invalid missing cases: raise TypeError
for empty in ({'a', 'b', 'c'}, {'a': 'aaa', 'b': 'bbb', 'c': 'ccc'}):
with self.assertRaises(TypeError):
rindex(empty, 'x')
# Test invalid matching cases: raise TypeError
for empty in ({'x', 'y', 'z'}, {'x': 'xxx', 'y': 'yyy', 'z': 'zzz'}):
with self.assertRaises(TypeError):
rindex(empty, 'x')
# Test one match cases: find at first, middle and last
self.assertEqual(rindex(b'xyz', 'x'), 0)
self.assertEqual(rindex(b'yxz', 'x'), 1)
self.assertEqual(rindex(b'zyx', 'x'), 2)
self.assertEqual(rindex(b'xyz', b'x'), 0)
self.assertEqual(rindex(b'yxz', b'x'), 1)
self.assertEqual(rindex(b'zyx', b'x'), 2)
self.assertEqual(rindex(u'xyz', 'x'), 0)
self.assertEqual(rindex(u'yxz', 'x'), 1)
self.assertEqual(rindex(u'zyx', 'x'), 2)
self.assertEqual(rindex(u'xyz', b'x'), 0)
self.assertEqual(rindex(u'yxz', b'x'), 1)
self.assertEqual(rindex(u'zyx', b'x'), 2)
self.assertEqual(rindex(['x', 'y', 'z'], 'x'), 0)
self.assertEqual(rindex(['y', 'x', 'z'], 'x'), 1)
self.assertEqual(rindex(['z', 'y', 'x'], 'x'), 2)
self.assertEqual(rindex(('x', 'y', 'z'), 'x'), 0)
self.assertEqual(rindex(('y', 'x', 'z'), 'x'), 1)
self.assertEqual(rindex(('z', 'y', 'x'), 'x'), 2)
# Test multiple match cases: find at middle and last
self.assertEqual(rfind(b'xxz', 'x'), 1)
self.assertEqual(rfind(b'xyx', 'x'), 2)
self.assertEqual(rfind(b'xxz', b'x'), 1)
self.assertEqual(rfind(b'xyx', b'x'), 2)
self.assertEqual(rfind(u'xxz', 'x'), 1)
self.assertEqual(rfind(u'xyx', 'x'), 2)
self.assertEqual(rfind(u'xxz', b'x'), 1)
self.assertEqual(rfind(u'xyx', b'x'), 2)
self.assertEqual(rfind(['x', 'x', 'z'], 'x'), 1)
self.assertEqual(rfind(['x', 'y', 'x'], 'x'), 2)
self.assertEqual(rfind(('x', 'x', 'z'), 'x'), 1)
self.assertEqual(rfind(('x', 'y', 'x'), 'x'), 2)
def test_reduce_value(self):
""" Tests reduce_value with general inputs """
# Test None case: nothing to reduce but default applies
self.assertEqual(reduce_value(None), '')
self.assertEqual(reduce_value(None, 'None'), 'None')
# Test empty string case: nothing to reduce but default applies
self.assertEqual(reduce_value(b''), '')
self.assertEqual(reduce_value(b'', 'None'), 'None')
self.assertEqual(reduce_value(''), '')
self.assertEqual(reduce_value('', 'None'), 'None')
# Test empty collections case: nothing to reduce but default applies
self.assertEqual(reduce_value(list()), '')
self.assertEqual(reduce_value(list(), 'None'), 'None')
self.assertEqual(reduce_value(set()), '')
self.assertEqual(reduce_value(set(), 'None'), 'None')
self.assertEqual(reduce_value(tuple()), '')
self.assertEqual(reduce_value(tuple(), 'None'), 'None')
# Test when there's nothing to reduce
self.assertEqual(reduce_value(0), 0)
self.assertEqual(reduce_value(1), 1)
self.assertEqual(reduce_value('a'), 'a')
self.assertEqual(reduce_value('abc'), 'abc')
self.assertEqual(reduce_value({'a': 'aaa'}), {'a': 'aaa'})
self.assertEqual(reduce_value({'b': 'bbb', 'c': 'ccc'}), {'b': 'bbb', 'c': 'ccc'})
# Test when there's nothing to reduce, but with unused default
self.assertEqual(reduce_value(0, None), 0)
self.assertEqual(reduce_value(1, None), 1)
self.assertEqual(reduce_value('a', None), 'a')
self.assertEqual(reduce_value('abc', None), 'abc')
# Test with reducible values
self.assertEqual(reduce_value([None]), None)
self.assertEqual(reduce_value([b'']), b'')
self.assertEqual(reduce_value(['']), '')
self.assertEqual(reduce_value([0]), 0)
self.assertEqual(reduce_value([1]), 1)
self.assertEqual(reduce_value(['x']), 'x')
self.assertEqual(reduce_value({'y'}), 'y')
self.assertEqual(reduce_value(('z',)), 'z')
# Test with non-reducible values
self.assertEqual(reduce_value([None, None]), [None, None])
self.assertEqual(reduce_value([b'', '']), [b'', ''])
self.assertEqual(reduce_value([0, 0]), [0, 0])
self.assertEqual(reduce_value([1, 1]), [1, 1])
self.assertEqual(reduce_value(['a', 'b', 'c']), ['a', 'b', 'c'])
self.assertEqual(reduce_value({'a', 'b', 'c'}), {'a', 'b', 'c'})
self.assertEqual(reduce_value(('a', 'b', 'c')), ('a', 'b', 'c'))
# Test with non-reducible collections
self.assertEqual(reduce_value({'a': 'aaa'}), {'a': 'aaa'})
self.assertEqual([x for x in reduce_value(c for c in 'abc')], [c for c in 'abc'])
def test_wrap_value(self):
""" Tests wrap_value with general inputs """
# Test when there's nothing to wrap
self.assertEqual(wrap_value(None), [])
self.assertEqual(wrap_value(b''), [])
self.assertEqual(wrap_value(''), [])
# Test with wrappable values
self.assertEqual(wrap_value(0), [0])
self.assertEqual(wrap_value(1), [1])
self.assertEqual(wrap_value('a'), ['a'])
self.assertEqual(wrap_value('abc'), ['abc'])
self.assertEqual(wrap_value({'a': 'aaa'}), [{'a': 'aaa'}])
self.assertEqual(wrap_value({'b': 'bbb', 'c': 'ccc'}), [{'b': 'bbb', 'c': 'ccc'}])
# Test with already wrapped values
self.assertEqual(wrap_value([0]), [0])
self.assertEqual(wrap_value([1]), [1])
self.assertEqual(wrap_value(['x']), ['x'])
self.assertEqual(wrap_value({'y'}), {'y'})
self.assertEqual(wrap_value(('z',)), ('z',))
# Test with empty collections
self.assertEqual(wrap_value(dict()), [])
self.assertEqual(wrap_value(list()), [])
self.assertEqual(wrap_value(set()), [])
self.assertEqual(wrap_value(tuple()), [])
# Test with non-empty collections, filtering out empty
self.assertEqual(wrap_value([None]), [])
self.assertEqual(wrap_value([b'']), [])
self.assertEqual(wrap_value(['']), [])
self.assertEqual(wrap_value([None, None]), [])
self.assertEqual(wrap_value([b'', '']), [])
# Test with non-empty collections, preserving empty
self.assertEqual(wrap_value([None], include_empty=True), [None])
self.assertEqual(wrap_value([b''], include_empty=True), [b''])
self.assertEqual(wrap_value([''], include_empty=True), [''])
self.assertEqual(wrap_value([None, None], include_empty=True), [None, None])
self.assertEqual(wrap_value([b'', ''], include_empty=True), [b'', ''])
# Test with non-empty collections
self.assertEqual(wrap_value([0, 1, 2]), [0, 1, 2])
self.assertEqual(wrap_value({0, 1, 2}), {0, 1, 2})
self.assertEqual(wrap_value((0, 1, 2)), (0, 1, 2))
self.assertEqual(wrap_value(['a', 'b', 'c']), ['a', 'b', 'c'])
self.assertEqual(wrap_value({'a', 'b', 'c'}), {'a', 'b', 'c'})
self.assertEqual(wrap_value(('a', 'b', 'c')), ('a', 'b', 'c'))
# Test with non-wrappable collections
self.assertEqual([x for x in wrap_value(c for c in 'abc')], [c for c in 'abc'])
def test_reduce_wrap_value(self):
""" Tests reduce_value after wrapping """
values = ([0], [1], ['a'], ['abc'], [{'a': 'aaa'}], [{'b': 'bbb', 'c': 'ccc'}])
for value in values:
self.assertEqual(wrap_value(reduce_value(value)), value)
def test_wrap_reduce_value(self):
""" Tests wrap_value after reducing """
values = (0, 1, 'a', 'abc', {'a': 'aaa'}, {'b': 'bbb', 'c': 'ccc'})
for value in values:
self.assertEqual(reduce_value(wrap_value(value)), value)
| 47.556227
| 119
| 0.559257
| 4,924
| 39,329
| 4.376726
| 0.038993
| 0.308338
| 0.010719
| 0.073593
| 0.860563
| 0.760986
| 0.672637
| 0.580484
| 0.529581
| 0.513712
| 0
| 0.009996
| 0.236874
| 39,329
| 826
| 120
| 47.613801
| 0.70806
| 0.103867
| 0
| 0.311475
| 0
| 0
| 0.058409
| 0.003592
| 0
| 0
| 0
| 0
| 0.742623
| 1
| 0.02459
| false
| 0
| 0.008197
| 0
| 0.036066
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
48bd61142d021f69ef1afbee8fc0c9bcc1de6924
| 97
|
py
|
Python
|
Trakttv.bundle/Contents/Libraries/Shared/elapsed/__init__.py
|
disrupted/Trakttv.bundle
|
24712216c71f3b22fd58cb5dd89dad5bb798ed60
|
[
"RSA-MD"
] | 1,346
|
2015-01-01T14:52:24.000Z
|
2022-03-28T12:50:48.000Z
|
Trakttv.bundle/Contents/Libraries/Shared/elapsed/__init__.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 474
|
2015-01-01T10:27:46.000Z
|
2022-03-21T12:26:16.000Z
|
Trakttv.bundle/Contents/Libraries/Shared/elapsed/__init__.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 191
|
2015-01-02T18:27:22.000Z
|
2022-03-29T10:49:48.000Z
|
from elapsed.main import setup, reset, clock, format_report, print_report
__version__ = '1.0.0'
| 24.25
| 73
| 0.773196
| 15
| 97
| 4.6
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035294
| 0.123711
| 97
| 3
| 74
| 32.333333
| 0.776471
| 0
| 0
| 0
| 0
| 0
| 0.051546
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 4
|
48caca64ece5ddf28eb6e7ebfd6c164999bd9e1a
| 201
|
py
|
Python
|
Python/count_digit.py
|
OluSure/Hacktoberfest2021-1
|
ad1bafb0db2f0cdeaae8f87abbaa716638c5d2ea
|
[
"MIT"
] | 215
|
2021-10-01T08:18:16.000Z
|
2022-03-29T04:12:03.000Z
|
Python/count_digit.py
|
OluSure/Hacktoberfest2021-1
|
ad1bafb0db2f0cdeaae8f87abbaa716638c5d2ea
|
[
"MIT"
] | 51
|
2021-10-01T08:16:42.000Z
|
2021-10-31T13:51:51.000Z
|
Python/count_digit.py
|
OluSure/Hacktoberfest2021-1
|
ad1bafb0db2f0cdeaae8f87abbaa716638c5d2ea
|
[
"MIT"
] | 807
|
2021-10-01T08:11:45.000Z
|
2021-11-21T18:57:09.000Z
|
# python program to find how many digit in given integer numbers e.g. 123->3 , 737327->6 digit first we take log of base 10 then add 1.
from math import log,floor
print(floor(log(int(input()),10)+1))
| 40.2
| 135
| 0.726368
| 40
| 201
| 3.65
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10119
| 0.164179
| 201
| 4
| 136
| 50.25
| 0.767857
| 0.661692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 4
|
48da7762aaf7b80b67e6478bb0ffad66cc035fe0
| 114
|
py
|
Python
|
BasicPythonPrograms/test.py
|
Pushkar745/PythonProgramming
|
ea60e97b70d46fb63ef203913c8b3f9570232dd3
|
[
"Apache-2.0"
] | null | null | null |
BasicPythonPrograms/test.py
|
Pushkar745/PythonProgramming
|
ea60e97b70d46fb63ef203913c8b3f9570232dd3
|
[
"Apache-2.0"
] | null | null | null |
BasicPythonPrograms/test.py
|
Pushkar745/PythonProgramming
|
ea60e97b70d46fb63ef203913c8b3f9570232dd3
|
[
"Apache-2.0"
] | null | null | null |
#name=input("Enter the name ")
phoneBook={(sam,99912222),(tom,11122222),(harry,12299933)}
print(phoneBook[0])
| 28.5
| 58
| 0.701754
| 15
| 114
| 5.333333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.240385
| 0.087719
| 114
| 4
| 59
| 28.5
| 0.528846
| 0.254386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
48fd834792ae8e326fe94bde2c5771cb50359178
| 882
|
py
|
Python
|
it_2/soluzione_it2/conto.py
|
StefanoExc/bank_repo
|
82a3da2ce804ce67726d650d92cc594d3009c495
|
[
"MIT"
] | null | null | null |
it_2/soluzione_it2/conto.py
|
StefanoExc/bank_repo
|
82a3da2ce804ce67726d650d92cc594d3009c495
|
[
"MIT"
] | null | null | null |
it_2/soluzione_it2/conto.py
|
StefanoExc/bank_repo
|
82a3da2ce804ce67726d650d92cc594d3009c495
|
[
"MIT"
] | null | null | null |
class Conto:
def __init__(self, numero_conto, cliente, saldo=0.00):
self.__numero_conto = numero_conto
self.__cliente = cliente
self.__saldo = saldo
# Definizione di getter e setter #
@property
def numero_conto(self):
return self.__numero_conto
@numero_conto.setter
def numero_conto(self, numero_conto):
self.__numero_conto = numero_conto
@property
def cliente(self):
return self.__cliente
@cliente.setter
def cliente(self, cliente):
self.__cliente = cliente
@property
def saldo(self):
return self.__saldo
@saldo.setter
def saldo(self, saldo):
self.__saldo = saldo
def __repr__(self):
return "Conto " + self.numero_conto + " intestato a cliente " + self.cliente.nome_cliente + " con saldo " + str(self.saldo) + "€"
| 25.941176
| 138
| 0.62585
| 103
| 882
| 5
| 0.23301
| 0.234951
| 0.174757
| 0.12233
| 0.201942
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004747
| 0.283447
| 882
| 34
| 138
| 25.941176
| 0.808544
| 0.034014
| 0
| 0.36
| 0
| 0
| 0.045936
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.32
| false
| 0
| 0
| 0.16
| 0.52
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
d299c2f39a30abf842318418f8031bff36cc4d16
| 56
|
py
|
Python
|
nest_py/knoweng/flask/accounts/__init__.py
|
KnowEnG/platform
|
7356fabf5e2db4171ef1f910514436b69ecaa701
|
[
"MIT"
] | 2
|
2020-02-12T22:20:51.000Z
|
2020-07-31T03:19:51.000Z
|
nest_py/knoweng/flask/accounts/__init__.py
|
KnowEnG/platform
|
7356fabf5e2db4171ef1f910514436b69ecaa701
|
[
"MIT"
] | 1
|
2021-06-02T00:29:02.000Z
|
2021-06-02T00:29:02.000Z
|
nest_py/knoweng/flask/accounts/__init__.py
|
KnowEnG/platform
|
7356fabf5e2db4171ef1f910514436b69ecaa701
|
[
"MIT"
] | 1
|
2018-01-03T22:56:27.000Z
|
2018-01-03T22:56:27.000Z
|
"""This package contains code for KnowEnG accounts.
"""
| 18.666667
| 51
| 0.732143
| 7
| 56
| 5.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 56
| 2
| 52
| 28
| 0.854167
| 0.857143
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d29cf2f8335f9f98296972e767763a003e28a942
| 259
|
py
|
Python
|
toontown/estate/DistributedGardenPlotAI.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 99
|
2019-11-02T22:25:00.000Z
|
2022-02-03T03:48:00.000Z
|
toontown/estate/DistributedGardenPlotAI.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 42
|
2019-11-03T05:31:08.000Z
|
2022-03-16T22:50:32.000Z
|
toontown/estate/DistributedGardenPlotAI.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 57
|
2019-11-03T07:47:37.000Z
|
2022-03-22T00:41:49.000Z
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class DistributedGardenPlotAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGardenPlotAI')
| 43.166667
| 83
| 0.880309
| 19
| 259
| 12
| 0.578947
| 0.087719
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069498
| 259
| 5
| 84
| 51.8
| 0.946058
| 0
| 0
| 0
| 0
| 0
| 0.088803
| 0.088803
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d29fc6d65e348fc08e6fc81315621b39f17a5d32
| 609
|
py
|
Python
|
Aulas/Aula08/poli_ex1.py
|
matheusmenezs/com220
|
d699f00892df1259249ae012aa2a02f63ae0f06f
|
[
"MIT"
] | null | null | null |
Aulas/Aula08/poli_ex1.py
|
matheusmenezs/com220
|
d699f00892df1259249ae012aa2a02f63ae0f06f
|
[
"MIT"
] | null | null | null |
Aulas/Aula08/poli_ex1.py
|
matheusmenezs/com220
|
d699f00892df1259249ae012aa2a02f63ae0f06f
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class Documento(ABC):
def __init__(self, nome):
self.__nome = nome
def getNome(self):
return self.__nome
@abstractmethod
def visualizar(self):
pass
class Pdf(Documento):
def visualizar(self):
return 'Mostra no Adobe Acrobat'
class Word(Documento):
def visualizar(self):
return 'Mostra no Word'
if __name__ == "__main__":
documentos = [Pdf('PDF1'), Word('DOC1'), Pdf('PDF2')]
for documento in documentos:
print('{}: {}'.format(documento.getNome(), documento.visualizar()))
| 24.36
| 75
| 0.62069
| 67
| 609
| 5.402985
| 0.462687
| 0.066298
| 0.140884
| 0.143646
| 0.220994
| 0.220994
| 0.220994
| 0
| 0
| 0
| 0
| 0.006608
| 0.254516
| 609
| 25
| 75
| 24.36
| 0.790749
| 0
| 0
| 0.157895
| 0
| 0
| 0.103279
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.263158
| false
| 0.052632
| 0.052632
| 0.157895
| 0.631579
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
d2c6a60bce75b3b4e4f4c2c3aa919a6c52104d3b
| 333
|
py
|
Python
|
glycowork/motif.py
|
Old-Shatterhand/glycowork
|
544fde03dd38cf95fb97792e050d7ff68f5637b1
|
[
"MIT"
] | 22
|
2021-04-22T23:53:26.000Z
|
2022-03-21T00:36:32.000Z
|
glycowork/motif.py
|
Old-Shatterhand/glycowork
|
544fde03dd38cf95fb97792e050d7ff68f5637b1
|
[
"MIT"
] | 3
|
2021-04-23T13:01:07.000Z
|
2022-03-16T19:13:12.000Z
|
glycowork/motif.py
|
Old-Shatterhand/glycowork
|
544fde03dd38cf95fb97792e050d7ff68f5637b1
|
[
"MIT"
] | 2
|
2021-07-06T14:13:40.000Z
|
2021-12-15T15:12:37.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 04_motif.ipynb (unless otherwise specified).
__all__ = []
# Cell
from .motif.analysis import *
from .motif.annotate import *
from .motif.graph import *
from .motif.processing import *
from .motif.query import *
from .motif.tokenization import *
from .glycan_data.loader import df_species
| 27.75
| 88
| 0.765766
| 46
| 333
| 5.391304
| 0.586957
| 0.217742
| 0.302419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006969
| 0.138138
| 333
| 12
| 89
| 27.75
| 0.857143
| 0.273273
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.875
| 0
| 0.875
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d2cbe87acfd56206b5acbacd66973eb038ef52e7
| 2,196
|
py
|
Python
|
core/users/serializers.py
|
sharebears/pulsar-core
|
430e879c7a4c4a758641af56da552355e6c76a45
|
[
"MIT"
] | null | null | null |
core/users/serializers.py
|
sharebears/pulsar-core
|
430e879c7a4c4a758641af56da552355e6c76a45
|
[
"MIT"
] | null | null | null |
core/users/serializers.py
|
sharebears/pulsar-core
|
430e879c7a4c4a758641af56da552355e6c76a45
|
[
"MIT"
] | null | null | null |
from core.mixins import Attribute, Serializer
from core.users.permissions import (
ApikeyPermissions,
InvitePermissions,
UserPermissions,
)
class UserSerializer(Serializer):
id = Attribute()
username = Attribute()
enabled = Attribute()
user_class = Attribute()
secondary_classes = Attribute()
uploaded = Attribute()
downloaded = Attribute()
email = Attribute(permission=UserPermissions.MODERATE)
locked = Attribute(permission=UserPermissions.MODERATE)
invites = Attribute(permission=UserPermissions.MODERATE)
inviter = Attribute(
permission=UserPermissions.MODERATE, self_access=False, nested=False
)
api_keys = Attribute(permission=UserPermissions.MODERATE, nested=False)
basic_permissions = Attribute(
permission=UserPermissions.MODERATE, self_access=False, nested=False
)
permissions = Attribute(
permission=UserPermissions.MODERATE_ADVANCED, nested=False
)
class InviteSerializer(Serializer):
code = Attribute(permission=InvitePermissions.VIEW_OTHERS)
email = Attribute(permission=InvitePermissions.VIEW_OTHERS)
time_sent = Attribute(permission=InvitePermissions.VIEW_OTHERS)
expired = Attribute(permission=InvitePermissions.VIEW_OTHERS)
invitee = Attribute(permission=InvitePermissions.VIEW_OTHERS)
from_ip = Attribute(
permission=InvitePermissions.VIEW_OTHERS, self_access=False
)
inviter = Attribute(
permission=InvitePermissions.VIEW_OTHERS,
nested=False,
self_access=False,
)
class APIKeySerializer(Serializer):
hash = Attribute(permission=ApikeyPermissions.VIEW_OTHERS)
user_id = Attribute(permission=ApikeyPermissions.VIEW_OTHERS)
last_used = Attribute(permission=ApikeyPermissions.VIEW_OTHERS)
ip = Attribute(permission=ApikeyPermissions.VIEW_OTHERS)
user_agent = Attribute(permission=ApikeyPermissions.VIEW_OTHERS)
revoked = Attribute(permission=ApikeyPermissions.VIEW_OTHERS)
permanent = Attribute(permission=ApikeyPermissions.VIEW_OTHERS)
timeout = Attribute(permission=ApikeyPermissions.VIEW_OTHERS)
permissions = Attribute(permission=ApikeyPermissions.VIEW_OTHERS)
| 37.862069
| 76
| 0.765483
| 200
| 2,196
| 8.255
| 0.26
| 0.264688
| 0.196245
| 0.21805
| 0.571775
| 0.142944
| 0.082374
| 0.082374
| 0.082374
| 0
| 0
| 0
| 0.155738
| 2,196
| 57
| 77
| 38.526316
| 0.890507
| 0
| 0
| 0.078431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.039216
| 0
| 0.686275
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
960c425b50d6d2abbe01bf06fae09064e31c3bb4
| 13,578
|
py
|
Python
|
naoqi_proxy_python_classes/ALPeoplePerception.py
|
FabianGroeger96/hslu-roblab-hs18
|
60fca783609f04dee785a96356646a586a63b768
|
[
"MIT"
] | null | null | null |
naoqi_proxy_python_classes/ALPeoplePerception.py
|
FabianGroeger96/hslu-roblab-hs18
|
60fca783609f04dee785a96356646a586a63b768
|
[
"MIT"
] | null | null | null |
naoqi_proxy_python_classes/ALPeoplePerception.py
|
FabianGroeger96/hslu-roblab-hs18
|
60fca783609f04dee785a96356646a586a63b768
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Class autogenerated from /home/sam/Downloads/aldebaran_sw/nao/naoqi-sdk-2.1.4.13-linux64/include/alproxies/alpeopleperceptionproxy.h
# by Sammy Pfeiffer's <Sammy.Pfeiffer at student.uts.edu.au> generator
# You need an ALBroker running
from naoqi import ALProxy
class ALPeoplePerception(object):
def __init__(self, session):
self.proxy = None
self.session = session
def force_connect(self):
self.proxy = self.session.service("ALPeoplePerception")
def getCurrentPeriod(self):
"""Gets the current period.
:returns int: Refresh period (in milliseconds).
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.getCurrentPeriod()
def getCurrentPrecision(self):
"""Gets the current precision.
:returns float: Precision of the extractor.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.getCurrentPrecision()
def getEventList(self):
"""Get the list of events updated in ALMemory.
:returns std::vector<std::string>: Array of events updated by this extractor in ALMemory
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.getEventList()
def getMaximumBodyHeight(self):
"""Gets the current maximum body height used for human detection (3D mode only).
:returns float: Maximum height in meters.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.getMaximumBodyHeight()
def getMaximumDetectionRange(self):
"""Gets the current maximum detection and tracking range.
:returns float: Maximum range in meters.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.getMaximumDetectionRange()
def getMemoryKeyList(self):
"""Get the list of events updated in ALMemory.
:returns std::vector<std::string>: Array of events updated by this extractor in ALMemory
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.getMemoryKeyList()
def getMinimumBodyHeight(self):
"""Gets the current minimum body height used for human detection (3D mode only).
:returns float: Minimum height in meters.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.getMinimumBodyHeight()
def getMyPeriod(self, name):
"""Gets the period for a specific subscription.
:param str name: Name of the module which has subscribed.
:returns int: Refresh period (in milliseconds).
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.getMyPeriod(name)
def getMyPrecision(self, name):
"""Gets the precision for a specific subscription.
:param str name: name of the module which has subscribed
:returns float: precision of the extractor
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.getMyPrecision(name)
def getOutputNames(self):
"""Get the list of values updated in ALMemory.
:returns std::vector<std::string>: Array of values updated by this extractor in ALMemory
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.getOutputNames()
def getSubscribersInfo(self):
"""Gets the parameters given by the module.
:returns AL::ALValue: Array of names and parameters of all subscribers.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.getSubscribersInfo()
def getTimeBeforePersonDisappears(self):
"""Gets the time after which a person, supposed not to be in the field of view of the camera disappears if it has not been detected.
:returns float: Time in seconds.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.getTimeBeforePersonDisappears()
def getTimeBeforeVisiblePersonDisappears(self):
"""Gets the time after which a person, supposed to be in the field of view of the camera disappears if it has not been detected.
:returns float: Time in seconds.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.getTimeBeforeVisiblePersonDisappears()
def isFaceDetectionEnabled(self):
"""Gets the current status of face detection.
:returns bool: True if face detection is enabled, False otherwise.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.isFaceDetectionEnabled()
def isFastModeEnabled(self):
"""Gets the current status of fast mode.
:returns bool: True if fast mode is enabled, False otherwise.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.isFastModeEnabled()
def isGraphicalDisplayEnabled(self):
"""Gets the current status of graphical display in Choregraphe.
:returns bool: True if graphical display is enabled, False otherwise.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.isGraphicalDisplayEnabled()
def isMovementDetectionEnabled(self):
"""Gets the current status of movement detection in Choregraphe.
:returns bool: True if movement detection is enabled, False otherwise.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.isMovementDetectionEnabled()
def isPaused(self):
"""Gets extractor pause status
:returns bool: True if the extractor is paused, False if not
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.isPaused()
def isProcessing(self):
"""Gets extractor running status
:returns bool: True if the extractor is currently processing images, False if not
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.isProcessing()
def pause(self, status):
"""Changes the pause status of the extractor
:param bool status: New pause satus
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.pause(status)
def ping(self):
"""Just a ping. Always returns true
:returns bool: returns true
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.ping()
def resetPopulation(self):
"""Empties the tracked population.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.resetPopulation()
def setFaceDetectionEnabled(self, enable):
"""Turns face detection on or off.
:param bool enable: True to turn it on, False to turn it off.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.setFaceDetectionEnabled(enable)
def setFastModeEnabled(self, enable):
"""Turns fast mode on or off.
:param bool enable: True to turn it on, False to turn it off.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.setFastModeEnabled(enable)
def setGraphicalDisplayEnabled(self, enable):
"""Turns graphical display in Choregraphe on or off.
:param bool enable: True to turn it on, False to turn it off.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.setGraphicalDisplayEnabled(enable)
def setMaximumBodyHeight(self, height):
"""Sets the maximum human body height (3D mode only).
:param float height: Maximum height in meters.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.setMaximumBodyHeight(height)
def setMaximumDetectionRange(self, range):
"""Sets the maximum range for human detection and tracking.
:param float range: Maximum range in meters.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.setMaximumDetectionRange(range)
def setMinimumBodyHeight(self, height):
"""Sets the minimum human body height (3D mode only).
:param float height: Minimum height in meters.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.setMinimumBodyHeight(height)
def setMovementDetectionEnabled(self, enable):
"""Turns movement detection on or off.
:param bool enable: True to turn it on, False to turn it off.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.setMovementDetectionEnabled(enable)
def setTimeBeforePersonDisappears(self, seconds):
"""Sets the time after which a person, supposed not to be in the field of view of the camera disappears if it has not been detected.
:param float seconds: Time in seconds.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.setTimeBeforePersonDisappears(seconds)
def setTimeBeforeVisiblePersonDisappears(self, seconds):
"""Sets the time after which a person, supposed to be in the field of view of the camera disappears if it has not been detected.
:param float seconds: Time in seconds.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.setTimeBeforeVisiblePersonDisappears(seconds)
def subscribe(self, name, period, precision):
"""Subscribes to the extractor. This causes the extractor to start writing information to memory using the keys described by getOutputNames(). These can be accessed in memory using ALMemory.getData("keyName"). In many cases you can avoid calling subscribe on the extractor by just calling ALMemory.subscribeToEvent() supplying a callback method. This will automatically subscribe to the extractor for you.
:param str name: Name of the module which subscribes.
:param int period: Refresh period (in milliseconds) if relevant.
:param float precision: Precision of the extractor if relevant.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.subscribe(name, period, precision)
def subscribe2(self, name):
"""Subscribes to the extractor. This causes the extractor to start writing information to memory using the keys described by getOutputNames(). These can be accessed in memory using ALMemory.getData("keyName"). In many cases you can avoid calling subscribe on the extractor by just calling ALMemory.subscribeToEvent() supplying a callback method. This will automatically subscribe to the extractor for you.
:param str name: Name of the module which subscribes.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.subscribe(name)
def unsubscribe(self, name):
"""Unsubscribes from the extractor.
:param str name: Name of the module which had subscribed.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.unsubscribe(name)
def updatePeriod(self, name, period):
"""Updates the period if relevant.
:param str name: Name of the module which has subscribed.
:param int period: Refresh period (in milliseconds).
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.updatePeriod(name, period)
def updatePrecision(self, name, precision):
"""Updates the precision if relevant.
:param str name: Name of the module which has subscribed.
:param float precision: Precision of the extractor.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.updatePrecision(name, precision)
def version(self):
"""Returns the version of the module.
:returns str: A string containing the version of the module.
"""
if not self.proxy:
self.proxy = self.session.service("ALPeoplePerception")
return self.proxy.version()
| 38.355932
| 413
| 0.657608
| 1,550
| 13,578
| 5.756774
| 0.143871
| 0.113975
| 0.109268
| 0.085173
| 0.673092
| 0.660988
| 0.637566
| 0.637566
| 0.613359
| 0.602376
| 0
| 0.00119
| 0.257107
| 13,578
| 353
| 414
| 38.464589
| 0.883414
| 0.390264
| 0
| 0.483871
| 1
| 0
| 0.092059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.251613
| false
| 0
| 0.006452
| 0
| 0.503226
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
8251c5b3dc0d72ef24d09797185a3b2bc3b9df0b
| 488
|
py
|
Python
|
src/python/psic/common/tests/test_h.py
|
UNCG-CSE/Poststorm_Imagery
|
f963d451050b793d7e8350137e0b145c80e739b5
|
[
"MIT"
] | 7
|
2019-09-21T15:14:58.000Z
|
2019-11-04T18:52:37.000Z
|
src/python/psic/common/tests/test_h.py
|
UNCG-CSE/Poststorm_Imagery
|
f963d451050b793d7e8350137e0b145c80e739b5
|
[
"MIT"
] | 744
|
2019-09-11T01:15:44.000Z
|
2020-06-09T23:56:16.000Z
|
src/python/psic/common/tests/test_h.py
|
UNCG-CSE/Poststorm_Imagery
|
f963d451050b793d7e8350137e0b145c80e739b5
|
[
"MIT"
] | 6
|
2019-10-09T11:08:15.000Z
|
2020-09-16T06:57:33.000Z
|
from unittest import TestCase
from psic.common import h
class TestHelper(TestCase):
def test_to_readable_bytes(self):
self.assertIn('???', h.to_readable_bytes('taco'))
self.assertIn('???', h.to_readable_bytes(None))
self.assertIn('KiB', h.to_readable_bytes(1))
self.assertIn('KiB', h.to_readable_bytes(1024 ** 1 + 1))
self.assertIn('MiB', h.to_readable_bytes(1024 ** 2 + 1))
self.assertIn('GiB', h.to_readable_bytes(1024 ** 3 + 1))
| 32.533333
| 64
| 0.651639
| 70
| 488
| 4.328571
| 0.357143
| 0.231023
| 0.346535
| 0.316832
| 0.534653
| 0.389439
| 0.20462
| 0
| 0
| 0
| 0
| 0.04798
| 0.188525
| 488
| 14
| 65
| 34.857143
| 0.717172
| 0
| 0
| 0
| 0
| 0
| 0.045082
| 0
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
82572d225631b3591fc4f4fb9e4b55a355626503
| 10,418
|
py
|
Python
|
ecl/tests/unit/compute/v2/test_limits.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | 5
|
2017-04-07T06:23:04.000Z
|
2019-11-19T00:52:34.000Z
|
ecl/tests/unit/compute/v2/test_limits.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | 16
|
2018-09-12T11:14:40.000Z
|
2021-04-19T09:02:44.000Z
|
ecl/tests/unit/compute/v2/test_limits.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | 14
|
2017-05-11T14:26:26.000Z
|
2021-07-14T14:00:06.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from ecl.compute.v2 import limits
ABSOLUTE_LIMITS = {
"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
"maxServerGroups": 10,
"maxServerGroupMembers": 10,
"totalFloatingIpsUsed": 1,
"totalSecurityGroupsUsed": 2,
"totalRAMUsed": 4,
"totalInstancesUsed": 5,
"totalServerGroupsUsed": 6,
"totalCoresUsed": 7,
#: New missing attributes
"totalSnapshotsUsed": 0,
"maxTotalBackups": 0,
"maxTotalVolumeGigabytes": 0,
"maxTotalSnapshots": 0,
"maxTotalBackupGigabytes": 0,
"totalBackupGigabytesUsed": 0,
"maxTotalVolumes": 0,
"totalVolumesUsed": 0,
"totalBackupsUsed": 0,
"totalGigabytesUsed": 0,
}
RATE_LIMIT = {
"limit": [
{
"next-available": "2012-11-27T17:22:18Z",
"remaining": 120,
"unit": "MINUTE",
"value": 120,
"verb": "POST"
},
],
"regex": ".*",
"uri": "*"
}
LIMITS_BODY = {
"limits": {
"absolute": ABSOLUTE_LIMITS,
"rate": [RATE_LIMIT]
}
}
class TestAbsoluteLimits(testtools.TestCase):
def test_basic(self):
sot = limits.AbsoluteLimits()
self.assertIsNone(sot.resource_key)
self.assertIsNone(sot.resources_key)
self.assertEqual("", sot.base_path)
self.assertIsNone(sot.service)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_get)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertFalse(sot.allow_list)
def test_make_it(self):
sot = limits.AbsoluteLimits(**ABSOLUTE_LIMITS)
self.assertEqual(ABSOLUTE_LIMITS["maxImageMeta"], sot.image_meta)
self.assertEqual(ABSOLUTE_LIMITS["maxPersonality"], sot.personality)
self.assertEqual(ABSOLUTE_LIMITS["maxPersonalitySize"],
sot.personality_size)
self.assertEqual(ABSOLUTE_LIMITS["maxSecurityGroupRules"],
sot.security_group_rules)
self.assertEqual(ABSOLUTE_LIMITS["maxSecurityGroups"],
sot.security_groups)
self.assertEqual(ABSOLUTE_LIMITS["maxServerMeta"], sot.server_meta)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalCores"], sot.cores)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalFloatingIps"],
sot.floating_ips)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalInstances"],
sot.instances)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalKeypairs"],
sot.keypairs)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalRAMSize"],
sot.ram)
self.assertEqual(ABSOLUTE_LIMITS["maxServerGroups"], sot.server_groups)
self.assertEqual(ABSOLUTE_LIMITS["maxServerGroupMembers"],
sot.server_group_members)
self.assertEqual(ABSOLUTE_LIMITS["totalFloatingIpsUsed"],
sot.floating_ips_used)
self.assertEqual(ABSOLUTE_LIMITS["totalSecurityGroupsUsed"],
sot.security_groups_used)
self.assertEqual(ABSOLUTE_LIMITS["totalRAMUsed"], sot.ram_used)
self.assertEqual(ABSOLUTE_LIMITS["totalInstancesUsed"],
sot.instances_used)
self.assertEqual(ABSOLUTE_LIMITS["totalServerGroupsUsed"],
sot.server_groups_used)
self.assertEqual(ABSOLUTE_LIMITS["totalCoresUsed"],
sot.cores_used)
#: new missing attributes
self.assertEqual(ABSOLUTE_LIMITS["totalSnapshotsUsed"],
sot.snapshots_used)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalBackups"],
sot.backups)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalVolumeGigabytes"],
sot.volume_gigabytes)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalSnapshots"],
sot.snapshots)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalBackupGigabytes"],
sot.backup_gigabytes)
self.assertEqual(ABSOLUTE_LIMITS["totalBackupGigabytesUsed"],
sot.backup_gigabytes_used)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalVolumes"],
sot.volumes)
self.assertEqual(ABSOLUTE_LIMITS["totalVolumesUsed"],
sot.volumes_used)
self.assertEqual(ABSOLUTE_LIMITS["totalBackupsUsed"],
sot.backups_used)
self.assertEqual(ABSOLUTE_LIMITS["totalGigabytesUsed"],
sot.gigabytes_used)
class TestRateLimit(testtools.TestCase):
def test_basic(self):
sot = limits.RateLimit()
self.assertIsNone(sot.resource_key)
self.assertIsNone(sot.resources_key)
self.assertEqual("", sot.base_path)
self.assertIsNone(sot.service)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_get)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertFalse(sot.allow_list)
def test_make_it(self):
sot = limits.RateLimit(**RATE_LIMIT)
self.assertEqual(RATE_LIMIT["regex"], sot.regex)
self.assertEqual(RATE_LIMIT["uri"], sot.uri)
self.assertEqual(RATE_LIMIT["limit"], sot.limits)
class TestLimits(testtools.TestCase):
def test_basic(self):
sot = limits.Limits()
self.assertEqual("limits", sot.resource_key)
self.assertEqual("/limits", sot.base_path)
self.assertEqual("compute", sot.service.service_type)
self.assertTrue(sot.allow_get)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertFalse(sot.allow_list)
def test_get(self):
sess = mock.Mock()
resp = mock.Mock()
sess.get.return_value = resp
resp.json.return_value = LIMITS_BODY
sot = limits.Limits().get(sess)
self.assertEqual(ABSOLUTE_LIMITS["maxImageMeta"],
sot.absolute.image_meta)
self.assertEqual(ABSOLUTE_LIMITS["maxPersonality"],
sot.absolute.personality)
self.assertEqual(ABSOLUTE_LIMITS["maxPersonalitySize"],
sot.absolute.personality_size)
self.assertEqual(ABSOLUTE_LIMITS["maxSecurityGroupRules"],
sot.absolute.security_group_rules)
self.assertEqual(ABSOLUTE_LIMITS["maxSecurityGroups"],
sot.absolute.security_groups)
self.assertEqual(ABSOLUTE_LIMITS["maxServerMeta"],
sot.absolute.server_meta)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalCores"],
sot.absolute.cores)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalFloatingIps"],
sot.absolute.floating_ips)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalInstances"],
sot.absolute.instances)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalKeypairs"],
sot.absolute.keypairs)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalRAMSize"],
sot.absolute.ram)
self.assertEqual(ABSOLUTE_LIMITS["maxServerGroups"],
sot.absolute.server_groups)
self.assertEqual(ABSOLUTE_LIMITS["maxServerGroupMembers"],
sot.absolute.server_group_members)
self.assertEqual(ABSOLUTE_LIMITS["totalFloatingIpsUsed"],
sot.absolute.floating_ips_used)
self.assertEqual(ABSOLUTE_LIMITS["totalSecurityGroupsUsed"],
sot.absolute.security_groups_used)
self.assertEqual(ABSOLUTE_LIMITS["totalRAMUsed"],
sot.absolute.ram_used)
self.assertEqual(ABSOLUTE_LIMITS["totalInstancesUsed"],
sot.absolute.instances_used)
self.assertEqual(ABSOLUTE_LIMITS["totalServerGroupsUsed"],
sot.absolute.server_groups_used)
self.assertEqual(ABSOLUTE_LIMITS["totalCoresUsed"],
sot.absolute.cores_used)
self.assertEqual(ABSOLUTE_LIMITS["totalSnapshotsUsed"],
sot.absolute.snapshots_used)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalBackups"],
sot.absolute.backups)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalVolumeGigabytes"],
sot.absolute.volume_gigabytes)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalSnapshots"],
sot.absolute.snapshots)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalBackupGigabytes"],
sot.absolute.backup_gigabytes)
self.assertEqual(ABSOLUTE_LIMITS["totalBackupGigabytesUsed"],
sot.absolute.backup_gigabytes_used)
self.assertEqual(ABSOLUTE_LIMITS["maxTotalVolumes"],
sot.absolute.volumes)
self.assertEqual(ABSOLUTE_LIMITS["totalVolumesUsed"],
sot.absolute.volumes_used)
self.assertEqual(ABSOLUTE_LIMITS["totalBackupsUsed"],
sot.absolute.backups_used)
self.assertEqual(ABSOLUTE_LIMITS["totalGigabytesUsed"],
sot.absolute.gigabytes_used)
self.assertEqual(RATE_LIMIT["uri"], sot.rate[0].uri)
self.assertEqual(RATE_LIMIT["regex"], sot.rate[0].regex)
self.assertEqual(RATE_LIMIT["limit"], sot.rate[0].limits)
| 41.177866
| 79
| 0.629775
| 939
| 10,418
| 6.816826
| 0.200213
| 0.161693
| 0.208405
| 0.262771
| 0.707077
| 0.703953
| 0.645212
| 0.499766
| 0.23012
| 0.101547
| 0
| 0.010258
| 0.270109
| 10,418
| 252
| 80
| 41.34127
| 0.831536
| 0.054713
| 0
| 0.341122
| 0
| 0
| 0.168599
| 0.04759
| 0
| 0
| 0
| 0
| 0.420561
| 1
| 0.028037
| false
| 0
| 0.014019
| 0
| 0.056075
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
825f522e3e610055105d809ea60488f5782849c8
| 2,636
|
py
|
Python
|
tests/test_2d.py
|
John-Hennig/KDE-diffusion
|
f0daee3294533808786ad6287fc1d70211bcc6dd
|
[
"MIT"
] | 5
|
2020-05-13T00:57:08.000Z
|
2021-08-29T12:39:03.000Z
|
tests/test_2d.py
|
john-hen/KDE-diffusion
|
5a540fe863fb022e6cc68b5737f732e8bee99f96
|
[
"MIT"
] | 1
|
2021-05-16T09:41:13.000Z
|
2021-05-16T09:41:13.000Z
|
tests/test_2d.py
|
john-hen/KDE-diffusion
|
5a540fe863fb022e6cc68b5737f732e8bee99f96
|
[
"MIT"
] | 2
|
2022-01-03T15:03:57.000Z
|
2022-02-07T08:43:46.000Z
|
"""Tests the 2d kernel density estimation."""
########################################
# Dependencies #
########################################
from kde_diffusion import kde2d
from pathlib import Path
from numpy import isclose, load
from pytest import raises
########################################
# Fixtures #
########################################
reference = None
def setup_module():
global reference
here = Path(__file__).parent
reference = load(here/'reference2d.npz')
########################################
# Test #
########################################
def test_reference():
x = reference['x']
y = reference['y']
N = reference['N']
assert N == len(x)
n = reference['n']
xmin = reference['xmin']
xmax = reference['xmax']
ymin = reference['ymin']
ymax = reference['ymax']
(density, grid, bandwidth) = kde2d(x, y, n, ((xmin, xmax), (ymin, ymax)))
assert isclose(grid[0].min(), xmin)
assert isclose(grid[0].max(), xmax - (xmax-xmin)/n)
assert isclose(grid[1].min(), ymin)
assert isclose(grid[1].max(), ymax - (ymax-ymin)/n)
assert isclose(density, reference['density']).all()
assert isclose(grid, reference['grid']).all()
assert isclose(bandwidth, reference['bandwidth']).all()
def test_arguments():
samples = [-2, -1, 0, +1, +2]
(density, grid, bandwith) = kde2d(samples*5, samples*5, 16)
assert len(grid[0]) == 16
assert len(grid[1]) == 16
assert isclose(grid[0].min(), -3.0)
assert isclose(grid[0].max(), +2.625)
assert isclose(grid[1].min(), -3.0)
assert isclose(grid[1].max(), +2.625)
(density, grid, bandwidth) = kde2d(samples*5, samples*5, 16, (2, None))
assert isclose(grid[0].min(), -2)
assert isclose(grid[0].max(), +1.75)
assert isclose(grid[1].min(), -3)
assert isclose(grid[1].max(), +2.625)
(density, grid, bandwidth) = kde2d(samples*5, samples*5, 16, (None, 2))
assert isclose(grid[0].min(), -3)
assert isclose(grid[0].max(), +2.625)
assert isclose(grid[1].min(), -2)
assert isclose(grid[1].max(), +1.75)
(density, grid, bandwidth) = kde2d(samples*5, samples*5, 16, 2)
assert isclose(grid[0].min(), -2)
assert isclose(grid[0].max(), +1.75)
assert isclose(grid[1].min(), -2)
assert isclose(grid[1].max(), +1.75)
with raises(ValueError):
kde2d(samples, samples*2, 16)
with raises(ValueError):
kde2d(samples, samples, 16)
| 34.233766
| 78
| 0.518589
| 315
| 2,636
| 4.31746
| 0.190476
| 0.219853
| 0.2625
| 0.132353
| 0.508088
| 0.446324
| 0.336029
| 0.336029
| 0.336029
| 0.336029
| 0
| 0.050914
| 0.232549
| 2,636
| 76
| 79
| 34.684211
| 0.62086
| 0.030349
| 0
| 0.254545
| 0
| 0
| 0.025534
| 0
| 0
| 0
| 0
| 0
| 0.472727
| 0
| null | null | 0
| 0.072727
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
827ebef698412f9042922d3f464e790e6b1e0672
| 470
|
py
|
Python
|
Python/euler006/euler006_test.py
|
troberson/exercises-euler
|
03ffafb1016d252ca297f2ab6f02552df1377496
|
[
"BSD-3-Clause"
] | 1
|
2020-02-12T20:40:39.000Z
|
2020-02-12T20:40:39.000Z
|
Python/euler006/euler006_test.py
|
troberson/exercises-euler
|
03ffafb1016d252ca297f2ab6f02552df1377496
|
[
"BSD-3-Clause"
] | null | null | null |
Python/euler006/euler006_test.py
|
troberson/exercises-euler
|
03ffafb1016d252ca297f2ab6f02552df1377496
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import euler006
def test_sum_of_squares_of_1_to_10_is_385():
assert euler006.sum_of_squares(range(1, 11)) == 385
def test_square_of_sum_of_1_to_10_is_3025():
assert euler006.square_of_sum(range(1, 11)) == 3025
def test_sum_square_difference_of_1_to_10_is_2640():
assert euler006.sum_square_difference(range(1, 11)) == 2640
def test_final_sum_square_difference_of_1_to_100_is_25164150():
assert euler006.main() == 25164150
| 23.5
| 63
| 0.782979
| 82
| 470
| 3.963415
| 0.329268
| 0.086154
| 0.061538
| 0.064615
| 0.215385
| 0.147692
| 0
| 0
| 0
| 0
| 0
| 0.183575
| 0.119149
| 470
| 19
| 64
| 24.736842
| 0.601449
| 0.044681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 1
| 0.444444
| true
| 0
| 0.111111
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
82941e7ba7132e8fb1137a78c937ea721d640dd5
| 890
|
py
|
Python
|
tests/test_alphabetmatcher.py
|
natemarks/alphabetmatcher
|
9620b36c4e4ed702e339ea3d701b19745b2c2554
|
[
"MIT"
] | null | null | null |
tests/test_alphabetmatcher.py
|
natemarks/alphabetmatcher
|
9620b36c4e4ed702e339ea3d701b19745b2c2554
|
[
"MIT"
] | null | null | null |
tests/test_alphabetmatcher.py
|
natemarks/alphabetmatcher
|
9620b36c4e4ed702e339ea3d701b19745b2c2554
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `alphabetmatcher` package."""
import pytest
def test_empty_string():
from alphabetmatcher.alphabetmatcher import Matcher
dd = Matcher('')
assert not dd.success()
def test_exact_match():
from alphabetmatcher.alphabetmatcher import Matcher
dd = Matcher('abcdefghijklmnopqrstuvwxyz')
assert dd.success()
def test_case_mixing():
from alphabetmatcher.alphabetmatcher import Matcher
dd = Matcher('abcdefGhijklmnopqrStuvwxyz')
assert dd.success()
def test_reorder_and_junk():
from alphabetmatcher.alphabetmatcher import Matcher
dd = Matcher('CBAhjsvdf734y4tu9820h%$%$&%defGhijklmnopqrStuvwxyz')
assert dd.success()
def test_repeats():
from alphabetmatcher.alphabetmatcher import Matcher
dd = Matcher('aaaaaaaaaabcdefGhijklmnopqrStuvwxyz')
assert dd.success()
| 20.227273
| 70
| 0.733708
| 89
| 890
| 7.224719
| 0.393258
| 0.054432
| 0.264386
| 0.311042
| 0.618974
| 0.584759
| 0.584759
| 0.323484
| 0.323484
| 0.323484
| 0
| 0.012129
| 0.166292
| 890
| 43
| 71
| 20.697674
| 0.854447
| 0.088764
| 0
| 0.428571
| 0
| 0
| 0.170398
| 0.170398
| 0
| 0
| 0
| 0
| 0.238095
| 1
| 0.238095
| false
| 0
| 0.285714
| 0
| 0.52381
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
829e162fc8ce3fb260c319fe7d7b1789e33d5ae3
| 185
|
py
|
Python
|
addons/dropbox/settings/defaults.py
|
gaybro8777/osf.io
|
30408511510a40bc393565817b343ef5fd76ab14
|
[
"Apache-2.0"
] | 628
|
2015-01-15T04:33:22.000Z
|
2022-03-30T06:40:10.000Z
|
addons/dropbox/settings/defaults.py
|
gaybro8777/osf.io
|
30408511510a40bc393565817b343ef5fd76ab14
|
[
"Apache-2.0"
] | 4,712
|
2015-01-02T01:41:53.000Z
|
2022-03-30T14:18:40.000Z
|
addons/dropbox/settings/defaults.py
|
Johnetordoff/osf.io
|
de10bf249c46cede04c78f7e6f7e352c69e6e6b5
|
[
"Apache-2.0"
] | 371
|
2015-01-12T16:14:08.000Z
|
2022-03-31T18:58:29.000Z
|
# OAuth app keys
DROPBOX_KEY = None
DROPBOX_SECRET = None
DROPBOX_AUTH_CSRF_TOKEN = 'dropbox-auth-csrf-token'
# Max file size permitted by frontend in megabytes
MAX_UPLOAD_SIZE = 150
| 20.555556
| 51
| 0.794595
| 29
| 185
| 4.827586
| 0.689655
| 0.157143
| 0.214286
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018987
| 0.145946
| 185
| 8
| 52
| 23.125
| 0.867089
| 0.340541
| 0
| 0
| 0
| 0
| 0.193277
| 0.193277
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
82bc386f8ea6078c89409c1bed3f32733e9ec8ce
| 269
|
py
|
Python
|
tests/test_test1.py
|
inTestiGator/pytest-focus
|
5c136a8c7af3f3a4a149aae5e5e2512c5bf2d9ea
|
[
"MIT"
] | 3
|
2019-04-03T01:29:27.000Z
|
2021-10-01T06:33:01.000Z
|
tests/test_test1.py
|
inTestiGator/pytest-focus
|
5c136a8c7af3f3a4a149aae5e5e2512c5bf2d9ea
|
[
"MIT"
] | 26
|
2019-04-02T19:12:22.000Z
|
2019-05-05T01:03:34.000Z
|
tests/test_test1.py
|
inTestiGator/pytest-focus
|
5c136a8c7af3f3a4a149aae5e5e2512c5bf2d9ea
|
[
"MIT"
] | 1
|
2019-05-04T21:52:23.000Z
|
2019-05-04T21:52:23.000Z
|
"""
practice test cases for testing plugin
"""
def test_iequals1():
"""
practice test case 1 for testing plugin
"""
i = 1
assert i == 1
def test_iequals2():
"""
practice test case 2 for testing plugin
"""
i = 2
assert i == 2
| 13.45
| 43
| 0.561338
| 36
| 269
| 4.138889
| 0.388889
| 0.241611
| 0.322148
| 0.228188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044199
| 0.327138
| 269
| 19
| 44
| 14.157895
| 0.779006
| 0.438662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
7d7836806c92e26e325f2b91aaef484d06b69690
| 659
|
py
|
Python
|
tests/models.py
|
joebowen/channelwormdjango
|
b5d940c5ca3c48afaac328de75971f2dc9f35956
|
[
"MIT"
] | null | null | null |
tests/models.py
|
joebowen/channelwormdjango
|
b5d940c5ca3c48afaac328de75971f2dc9f35956
|
[
"MIT"
] | null | null | null |
tests/models.py
|
joebowen/channelwormdjango
|
b5d940c5ca3c48afaac328de75971f2dc9f35956
|
[
"MIT"
] | null | null | null |
import sciunit
import capabilities
class IonChannelModel(sciunit.Model, capabilities.Generates_IV_Curve,
capabilities.Receives_Current, capabilities.Generates_Membrane_Potential):
"""A generic ion channel model."""
def __init__(self, name=None, iv_curve=None, current=None, voltage=None):
super(IonChannelModel, self).__init__(name=name)
def generate_iv_curve(self):
return self.iv_curve
def receive_current(self):
#Not clear what this should return
pass
def generate_membrane_potential(self):
#Not clear what this should return
pass
| 31.380952
| 82
| 0.672231
| 74
| 659
| 5.716216
| 0.445946
| 0.066194
| 0.056738
| 0.07565
| 0.170213
| 0.170213
| 0.170213
| 0.170213
| 0
| 0
| 0
| 0
| 0.256449
| 659
| 20
| 83
| 32.95
| 0.863265
| 0.144158
| 0
| 0.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0.166667
| 0.083333
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
7d892d9a0b5782d58d6d5b58b028fd88a0001c8c
| 48
|
py
|
Python
|
settings.py
|
mdgrotheer/twitter-intelligence
|
a256081be6ca5a38c5e34a019438792175374317
|
[
"MIT"
] | 202
|
2018-07-06T11:56:32.000Z
|
2022-02-16T22:19:26.000Z
|
settings.py
|
mdgrotheer/twitter-intelligence
|
a256081be6ca5a38c5e34a019438792175374317
|
[
"MIT"
] | 7
|
2018-09-28T09:47:21.000Z
|
2021-10-01T15:05:10.000Z
|
settings.py
|
mdgrotheer/twitter-intelligence
|
a256081be6ca5a38c5e34a019438792175374317
|
[
"MIT"
] | 56
|
2018-08-19T18:56:05.000Z
|
2022-03-26T11:41:36.000Z
|
GOOGLE_MAP_API_KEY = 'YOUR_API_KEY'
PORT = 5000
| 16
| 35
| 0.791667
| 9
| 48
| 3.666667
| 0.777778
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 0.125
| 48
| 2
| 36
| 24
| 0.690476
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
7d9c91e5e12108694668a3c18558e8f0bebe6b19
| 154
|
py
|
Python
|
gui.py
|
bsivanantham/WorldModels
|
188a58f6e056bec2320eb5ae45dac1bcb109af8a
|
[
"MIT"
] | null | null | null |
gui.py
|
bsivanantham/WorldModels
|
188a58f6e056bec2320eb5ae45dac1bcb109af8a
|
[
"MIT"
] | null | null | null |
gui.py
|
bsivanantham/WorldModels
|
188a58f6e056bec2320eb5ae45dac1bcb109af8a
|
[
"MIT"
] | null | null | null |
from tkinter import *
master = Tk()
w = Scale(master, from_=0, to=42)
w.pack()
w = Scale(master, from_=0, to=200, orient=HORIZONTAL)
w.pack()
mainloop()
| 17.111111
| 53
| 0.675325
| 26
| 154
| 3.923077
| 0.576923
| 0.117647
| 0.235294
| 0.313725
| 0.372549
| 0.372549
| 0
| 0
| 0
| 0
| 0
| 0.05303
| 0.142857
| 154
| 9
| 54
| 17.111111
| 0.719697
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
7dcfe03e6c432ecac8cc4f5820a66a8051e09fed
| 79
|
py
|
Python
|
instance/config.py
|
amiinegal/News-app
|
0c793604f097167fae53ff91e0d296d2e893108f
|
[
"MIT",
"Unlicense"
] | null | null | null |
instance/config.py
|
amiinegal/News-app
|
0c793604f097167fae53ff91e0d296d2e893108f
|
[
"MIT",
"Unlicense"
] | null | null | null |
instance/config.py
|
amiinegal/News-app
|
0c793604f097167fae53ff91e0d296d2e893108f
|
[
"MIT",
"Unlicense"
] | null | null | null |
export NEWS_API_KEY='cb4e9f2ecd7343a19992a9b5043a14db'
export SECRET_KEY='2030'
| 39.5
| 54
| 0.886076
| 9
| 79
| 7.444444
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.302632
| 0.037975
| 79
| 2
| 55
| 39.5
| 0.578947
| 0
| 0
| 0
| 0
| 0
| 0.45
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
7dd5457bc86e8c6ca79122b74de713080dc741a7
| 458
|
py
|
Python
|
src/encoded/tests/test_schema_computational_model.py
|
procha2/encoded
|
e9f122362b71f3b8641023b8d2d5ad531d3484b7
|
[
"MIT"
] | 102
|
2015-05-20T01:17:43.000Z
|
2022-03-07T06:03:55.000Z
|
src/encoded/tests/test_schema_computational_model.py
|
procha2/encoded
|
e9f122362b71f3b8641023b8d2d5ad531d3484b7
|
[
"MIT"
] | 901
|
2015-01-07T23:11:57.000Z
|
2022-03-18T13:56:12.000Z
|
src/encoded/tests/test_schema_computational_model.py
|
procha2/encoded
|
e9f122362b71f3b8641023b8d2d5ad531d3484b7
|
[
"MIT"
] | 65
|
2015-02-06T23:00:26.000Z
|
2022-01-22T07:58:44.000Z
|
import pytest
def test_unique_software(testapp, computational_model_unique_software):
res = testapp.post_json('/computational_model', computational_model_unique_software, expect_errors=True)
assert res.status_code == 201
def test_non_unique_software(testapp, computational_model_non_unique_software):
res = testapp.post_json('/computational_model',computational_model_non_unique_software, expect_errors=True)
assert res.status_code == 422
| 41.636364
| 111
| 0.829694
| 59
| 458
| 5.983051
| 0.355932
| 0.23796
| 0.144476
| 0.192635
| 0.90085
| 0.66289
| 0.66289
| 0.66289
| 0.66289
| 0.385269
| 0
| 0.014493
| 0.09607
| 458
| 10
| 112
| 45.8
| 0.838164
| 0
| 0
| 0
| 0
| 0
| 0.087336
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.285714
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
815cddf45be731d8cf2f66e4d25f571763a19a3c
| 818
|
py
|
Python
|
game/khet/model/pieces/piece.py
|
xelahalo/khet
|
c4aca94703c24c01d106959849240b890fa6744b
|
[
"MIT"
] | null | null | null |
game/khet/model/pieces/piece.py
|
xelahalo/khet
|
c4aca94703c24c01d106959849240b890fa6744b
|
[
"MIT"
] | null | null | null |
game/khet/model/pieces/piece.py
|
xelahalo/khet
|
c4aca94703c24c01d106959849240b890fa6744b
|
[
"MIT"
] | 1
|
2022-03-19T22:25:54.000Z
|
2022-03-19T22:25:54.000Z
|
from abc import ABC, abstractclassmethod
from game.util.constants import Color
class Piece(ABC):
def __init__(self, color, rotation):
self._color = color
self._rotation = rotation
@property
def color(self):
return self._color
@property
def rotation(self):
return self._rotation
@rotation.setter
def rotation(self, value):
self._rotation = value
@abstractclassmethod
def get_value(self):
pass
@abstractclassmethod
def copy(self):
pass
@abstractclassmethod
def on_hit(self, source_dir):
"""
Returns:
True if it should be destroyed, reflected direction otherwise
"""
pass
def __str__(self, char):
return self._color.value + str(char) + Color.RESET.value
| 21.526316
| 69
| 0.627139
| 90
| 818
| 5.511111
| 0.422222
| 0.072581
| 0.080645
| 0.120968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.292176
| 818
| 38
| 70
| 21.526316
| 0.856649
| 0.085575
| 0
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0.115385
| 0.076923
| 0.115385
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
c4a70002d1dd7fd344e6e1d96e9a073cf2e15074
| 50
|
py
|
Python
|
reinforcement_learning/ppoc/__init__.py
|
mizolotu/izi
|
d2d00813919259aad3dcdbc54039c30cbb16b125
|
[
"MIT"
] | null | null | null |
reinforcement_learning/ppoc/__init__.py
|
mizolotu/izi
|
d2d00813919259aad3dcdbc54039c30cbb16b125
|
[
"MIT"
] | null | null | null |
reinforcement_learning/ppoc/__init__.py
|
mizolotu/izi
|
d2d00813919259aad3dcdbc54039c30cbb16b125
|
[
"MIT"
] | null | null | null |
from reinforcement_learning.ppo2.ppo2 import PPO2
| 25
| 49
| 0.88
| 7
| 50
| 6.142857
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 0.08
| 50
| 1
| 50
| 50
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
c4caccc931cd14eb8356fedc07f913f67eece1a6
| 759
|
py
|
Python
|
data/smart_garden/exceptions.py
|
linxaddict/pytomatoes
|
fe281c6ba1a1f7d8ba87a5286afde9d1bd4f4d58
|
[
"MIT"
] | null | null | null |
data/smart_garden/exceptions.py
|
linxaddict/pytomatoes
|
fe281c6ba1a1f7d8ba87a5286afde9d1bd4f4d58
|
[
"MIT"
] | 3
|
2020-04-24T21:18:24.000Z
|
2020-05-21T19:58:55.000Z
|
data/smart_garden/exceptions.py
|
linxaddict/pytomatoes
|
fe281c6ba1a1f7d8ba87a5286afde9d1bd4f4d58
|
[
"MIT"
] | null | null | null |
from typing import Optional
from aiohttp import ClientResponse
class SmartGardenException(Exception):
def __init__(self, internal_error: Optional[Exception] = None, response: Optional[ClientResponse] = None,
*args: object) -> None:
super().__init__(*args)
self.internal_error = internal_error
self.response = response
def __str__(self) -> str:
return str(self.response)
class SmartGardenResponseError(SmartGardenException):
pass
class SmartGardenUnauthorizedError(SmartGardenException):
pass
class SmartGardenConnectionError(SmartGardenException):
pass
class SmartGardenPayloadError(SmartGardenException):
pass
class SmartGardenInvalidUrl(SmartGardenException):
pass
| 21.083333
| 109
| 0.73913
| 65
| 759
| 8.4
| 0.415385
| 0.21978
| 0.212454
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189723
| 759
| 35
| 110
| 21.685714
| 0.887805
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0.25
| 0.1
| 0.05
| 0.55
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
f218cb7db1e92faf46e8fa3abb1e13c80a412b8d
| 63
|
py
|
Python
|
tf_netbuilder/__init__.py
|
thunfischtoast/tf_netbuilder
|
728826ac5e4e58a39ea862cecc86ad249a19e278
|
[
"Apache-2.0"
] | 10
|
2020-11-06T13:44:44.000Z
|
2021-11-20T11:20:23.000Z
|
tf_netbuilder/__init__.py
|
thunfischtoast/tf_netbuilder
|
728826ac5e4e58a39ea862cecc86ad249a19e278
|
[
"Apache-2.0"
] | 2
|
2021-01-11T06:41:54.000Z
|
2021-03-31T10:41:49.000Z
|
tf_netbuilder/__init__.py
|
thunfischtoast/tf_netbuilder
|
728826ac5e4e58a39ea862cecc86ad249a19e278
|
[
"Apache-2.0"
] | 10
|
2020-11-12T23:02:28.000Z
|
2022-01-29T12:07:41.000Z
|
from .builder_cfg import NetBuilderConfig
from . import config
| 21
| 41
| 0.84127
| 8
| 63
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126984
| 63
| 2
| 42
| 31.5
| 0.945455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
480eec09f650fb02f729ef7913c9a06e9683a75b
| 177
|
py
|
Python
|
tests/unit/array/mixins/test_empty.py
|
sugatoray/docarray
|
e62c1ad045ea7236912c702aebe87c3a25db110d
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/array/mixins/test_empty.py
|
sugatoray/docarray
|
e62c1ad045ea7236912c702aebe87c3a25db110d
|
[
"Apache-2.0"
] | 1
|
2022-01-11T00:59:52.000Z
|
2022-01-11T00:59:52.000Z
|
tests/unit/array/mixins/test_empty.py
|
sugatoray/docarray
|
e62c1ad045ea7236912c702aebe87c3a25db110d
|
[
"Apache-2.0"
] | null | null | null |
from docarray import DocumentArray
def test_empty_non_zero():
da = DocumentArray.empty(10)
assert len(da) == 10
da = DocumentArray.empty()
assert len(da) == 0
| 19.666667
| 34
| 0.677966
| 24
| 177
| 4.875
| 0.583333
| 0.25641
| 0.34188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035971
| 0.214689
| 177
| 8
| 35
| 22.125
| 0.805755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
481f03491bcab44bab66cdcc98df28fc68505154
| 275
|
py
|
Python
|
stl/utility/__init__.py
|
pieter-hendriks/STL-monitoring
|
114b73b1f4b0687b11b8842b3c4a1c8af7b0d9df
|
[
"MIT"
] | null | null | null |
stl/utility/__init__.py
|
pieter-hendriks/STL-monitoring
|
114b73b1f4b0687b11b8842b3c4a1c8af7b0d9df
|
[
"MIT"
] | null | null | null |
stl/utility/__init__.py
|
pieter-hendriks/STL-monitoring
|
114b73b1f4b0687b11b8842b3c4a1c8af7b0d9df
|
[
"MIT"
] | null | null | null |
""" Utility functions / helpers for STL implementation. """
from .interval import Interval
from .helpers import cm2inch, getTimeListIntersection
from .singleton import Singleton
from .plothelper import PlotHelper
from .linesegment import LineSegment
from .point import Point
| 34.375
| 59
| 0.821818
| 31
| 275
| 7.290323
| 0.483871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004149
| 0.123636
| 275
| 7
| 60
| 39.285714
| 0.93361
| 0.185455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
482af87c62026dbba8ed7391038adcded047cb33
| 3,471
|
py
|
Python
|
tests/contracts/KT1At3oM7k94ccMmFCqjAZy42QyaDh2uNqhD/test_micheline_coding_KT1At3.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2021-05-20T16:52:08.000Z
|
2021-05-20T16:52:08.000Z
|
tests/contracts/KT1At3oM7k94ccMmFCqjAZy42QyaDh2uNqhD/test_micheline_coding_KT1At3.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-12-30T16:44:56.000Z
|
2020-12-30T16:44:56.000Z
|
tests/contracts/KT1At3oM7k94ccMmFCqjAZy42QyaDh2uNqhD/test_micheline_coding_KT1At3.py
|
tqtezos/pytezos
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
[
"MIT"
] | 1
|
2022-03-20T19:01:00.000Z
|
2022-03-20T19:01:00.000Z
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline
class MichelineCodingTestKT1At3(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.code = get_data(
path='contracts/KT1At3oM7k94ccMmFCqjAZy42QyaDh2uNqhD/code_KT1At3.json')
cls.schema = dict(
parameter=build_schema(cls.code[0]),
storage=build_schema(cls.code[1])
)
def test_micheline_inverse_storage_KT1At3(self):
expected = get_data(
path='contracts/KT1At3oM7k94ccMmFCqjAZy42QyaDh2uNqhD/storage_KT1At3.json')
decoded = decode_micheline(expected, self.code[1], self.schema['storage'])
actual = encode_micheline(decoded, self.schema['storage'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_oozfkT(self):
expected = get_data(
path='contracts/KT1At3oM7k94ccMmFCqjAZy42QyaDh2uNqhD/parameter_oozfkT.json')
decoded = decode_micheline(expected, self.code[0], self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_oo2UMR(self):
expected = get_data(
path='contracts/KT1At3oM7k94ccMmFCqjAZy42QyaDh2uNqhD/parameter_oo2UMR.json')
decoded = decode_micheline(expected, self.code[0], self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_onwvUM(self):
expected = get_data(
path='contracts/KT1At3oM7k94ccMmFCqjAZy42QyaDh2uNqhD/parameter_onwvUM.json')
decoded = decode_micheline(expected, self.code[0], self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_opGhNz(self):
expected = get_data(
path='contracts/KT1At3oM7k94ccMmFCqjAZy42QyaDh2uNqhD/parameter_opGhNz.json')
decoded = decode_micheline(expected, self.code[0], self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_oowDU8(self):
expected = get_data(
path='contracts/KT1At3oM7k94ccMmFCqjAZy42QyaDh2uNqhD/parameter_oowDU8.json')
decoded = decode_micheline(expected, self.code[0], self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_onpSQy(self):
expected = get_data(
path='contracts/KT1At3oM7k94ccMmFCqjAZy42QyaDh2uNqhD/parameter_onpSQy.json')
decoded = decode_micheline(expected, self.code[0], self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_oopBY6(self):
expected = get_data(
path='contracts/KT1At3oM7k94ccMmFCqjAZy42QyaDh2uNqhD/parameter_oopBY6.json')
decoded = decode_micheline(expected, self.code[0], self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
| 46.905405
| 88
| 0.715356
| 351
| 3,471
| 6.868946
| 0.136752
| 0.066363
| 0.110328
| 0.074658
| 0.81377
| 0.790543
| 0.774782
| 0.729158
| 0.505599
| 0.480299
| 0
| 0.033958
| 0.185537
| 3,471
| 73
| 89
| 47.547945
| 0.818889
| 0
| 0
| 0.491803
| 0
| 0
| 0.214636
| 0.174301
| 0
| 0
| 0
| 0
| 0.131148
| 1
| 0.147541
| false
| 0
| 0.04918
| 0
| 0.213115
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
48462ff8c9477e614ea783c1bb87468aa265722b
| 703
|
py
|
Python
|
python3/lib/python3.6/site-packages/tensorflow/_api/v1/xla/experimental/__init__.py
|
TruongThuyLiem/keras2tensorflow
|
726f2370160701081cb43fbd8b56154c10d7ad63
|
[
"MIT"
] | 3
|
2020-10-12T15:47:01.000Z
|
2022-01-14T19:51:26.000Z
|
python3/lib/python3.6/site-packages/tensorflow/_api/v1/xla/experimental/__init__.py
|
TruongThuyLiem/keras2tensorflow
|
726f2370160701081cb43fbd8b56154c10d7ad63
|
[
"MIT"
] | null | null | null |
python3/lib/python3.6/site-packages/tensorflow/_api/v1/xla/experimental/__init__.py
|
TruongThuyLiem/keras2tensorflow
|
726f2370160701081cb43fbd8b56154c10d7ad63
|
[
"MIT"
] | 2
|
2020-08-03T13:02:06.000Z
|
2020-11-04T03:15:44.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.xla.experimental namespace.
"""
from __future__ import print_function as _print_function
from tensorflow.python.compiler.xla.jit import experimental_jit_scope as jit_scope
from tensorflow.python.compiler.xla.xla import compile
del _print_function
import sys as _sys
from tensorflow.python.util import deprecation_wrapper as _deprecation_wrapper
if not isinstance(_sys.modules[__name__], _deprecation_wrapper.DeprecationWrapper):
_sys.modules[__name__] = _deprecation_wrapper.DeprecationWrapper(
_sys.modules[__name__], "xla.experimental")
| 37
| 83
| 0.825036
| 93
| 703
| 5.83871
| 0.462366
| 0.117864
| 0.110497
| 0.103131
| 0.324125
| 0.209945
| 0.209945
| 0.209945
| 0.209945
| 0
| 0
| 0
| 0.099573
| 703
| 18
| 84
| 39.055556
| 0.85782
| 0.244666
| 0
| 0
| 1
| 0
| 0.030593
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.555556
| 0
| 0.555556
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
485feaa51217869955fe58e3b35fdcdfdd385268
| 210
|
py
|
Python
|
python/problem2_better.py
|
mo/project-euler
|
7d6e59c0d82216b9a1d59e4f2472e53b8f330574
|
[
"MIT"
] | null | null | null |
python/problem2_better.py
|
mo/project-euler
|
7d6e59c0d82216b9a1d59e4f2472e53b8f330574
|
[
"MIT"
] | null | null | null |
python/problem2_better.py
|
mo/project-euler
|
7d6e59c0d82216b9a1d59e4f2472e53b8f330574
|
[
"MIT"
] | null | null | null |
special_sum = 0
n_minus_1 = n_minus_2 = 1
fib_n = 0
while fib_n < 1000000:
fib_n, n_minus_1, n_minus_2 = n_minus_1, n_minus_2, n_minus_1 + n_minus_2
if fib_n % 2 == 0: special_sum += fib_n
print(special_sum)
| 26.25
| 74
| 0.733333
| 49
| 210
| 2.653061
| 0.244898
| 0.369231
| 0.215385
| 0.246154
| 0.430769
| 0.430769
| 0.323077
| 0.323077
| 0.323077
| 0.323077
| 0
| 0.114943
| 0.171429
| 210
| 7
| 75
| 30
| 0.632184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
48704e2e1ef7305aa5a6a78d426b35e7c88258f2
| 4,990
|
py
|
Python
|
tests/fixtures/rsa_pair.py
|
pdyba/lambdalizator
|
0371b8d3e25249096a9c7e7cf90fc590a99ad536
|
[
"MIT"
] | 3
|
2020-09-26T11:05:32.000Z
|
2021-09-25T08:58:10.000Z
|
tests/fixtures/rsa_pair.py
|
pdyba/lambdalizator
|
0371b8d3e25249096a9c7e7cf90fc590a99ad536
|
[
"MIT"
] | 15
|
2020-09-29T12:10:55.000Z
|
2021-11-17T10:42:21.000Z
|
tests/fixtures/rsa_pair.py
|
pdyba/lambdalizator
|
0371b8d3e25249096a9c7e7cf90fc590a99ad536
|
[
"MIT"
] | 1
|
2020-09-26T11:05:38.000Z
|
2020-09-26T11:05:38.000Z
|
EXPECTED_TOKEN = "eyJhbGciOiJSUzI1NiIsImtpZCI6Ijk0OTRhZDc1LTNmNTQtNDE1NS04NGZhLWMxYTE3ZGEyMmIzNSIsInR5cCI6IkpXVCJ9.eyJhbGxvdyI6eyIqIjoiKiJ9LCJkZW55Ijp7fX0.nDqCxO2Q1iXpxzbH7syxuyqw7kCY0sDfi9RX-VSUMTRN5aWTLt1bcPw4oN_jx89-YHBzDwnwBc07RsMgpFuo4zz2LU9PF0ciYxMNX-atTNsaIn05NkXT08au2AYb0DRCDS76MZ4QNi-4mRpLrj1SD4mSCwGtc2WNw9f0J0Vm4ZCYPVW6BqpcHcaFXzcFZ6EIoooaK6GvdTOjy498lWsAXjAen2U6Jles_BwFjqW1lW_ky4WV4J9NnK3v5wWKgR1Pg4R4LpnhIXe0dU_l64JHoJA3YcYxl-qilHfoBduc3La4kRKk7FAQDIqbOv4uN03BIoDXLH5t2uJ1Sm79Pe0ngGd5pSBmfUDKOGsHtx_3_9ZKfp-E2IVS0C7r36p4Ue0gKQzn0pXxa591bxm_puJAQ399SdbmlOJsM2cVFYAtlUQvWgErc57WcUJ0Qe4jEycury7hagNbP2fLn-7Gg4gZHiZ_Ul7L6GukbDfCHnhxSS4P3t3cVtWuslZi16hDhNbOTKD95y7PXvHePvI57ALV2v0RecQ5Blwurt1OuDRSjCYXyO6U4Y9MBHcd1wMtDoVW0jjvjXvqkEhuB52Zajh_yTNnJo0OAHpuK5wldVpECGFVx1rkW1ypKqlukGIgD--m6ElKnl6jw5VWSbdh2TJsZHnzjovbQUeqZOeMxwX6SE8"
SAMPLE_PRIVATE_KEY = {
"p": "5Cwk4zIvta07E37iZVlnzqeQX2jV1GaHKSUFtUVeaMVY_FBQ5Yr5ux7bxTKrikSs22QI8z1x6GVuEH3MGeh3qjOyUTfJJyOcS_RVmQxdYwjkxOsN953SWMvPRhkLd-svB8LI4Ylwo1NrlSBJOQqM2xvtoQ7KkEAJquvX_UHkTuMuSCcpuDyp-qUkvgfbSUgvCacmwNf-bXh2kKVM14YKt3el4vYITetyy96jzSLKf6V36AX8PFbSOS7oZbeO3dgfGp0MDcpF0flu8McaSipBXoHbrjtxx0MOJjpU3Fhpy6Q7o7TX3-OC7fKplseHol4dxkRCsiDoyCxjhUzPHShy8Q",
"kty": "RSA",
"q": "1PdEoZDjRNefrOxin6a4HEMnooopdLLbNrTmFcd3k3vIiKKK"
"--8eg0zMQMYNe5QXDsiDuly3GTZkptDC93CxWQfjRftmC9gaz_pmdMhOPncgMfGfJAt0Ic57d023rmoZzBQecGKr_3lFxV29cs8bB1ppGxdIlweCXfvvbTubcU3CUyZrAjZqfXqxj4B78PQ96BIbFBTgtCpvSW0YhaswMMFpRnb8grMDQzkB0pXnq-GiBGc3wWKrWWrQjW5sLklwZUsQJz0GdZbLkHiq2nSM_wera6FgshyHvJFHSa9gpREq5rZdMsERp1C2Dd5h8W2cwohpuWqjQQVYGqSXgbN30Q",
"d": "eYbYTGZ5uklUa2c2LUvbWyRLe8fL3fjFVG87xV76AwxkJZXmn_Mzv0c2F3rbiFjAL"
"-BAWRwK2hrojzWMAztN0u3o13rQh4LasNrz9nPA"
"-jCzIu1JnmBwwNBfY6x2LOOQPlrXI9SBbe94gB4xQUkb8yhzIlWk7jpPzbcKxi19r0SG06UVOEpB6z06cPGOEFrpKpgEaOulYx0H2G1s4vBADQaEFvamN94-sE_PhNjve-HjS3Lz3lhnm0ajFjpLsUqE0dIpeLWmEM3jMU7Zz6c9mI3V2aBUiuNYi9MeUvs9usmbX5krvijfJxDJsxmt1OK3EJSW_kju00hQYRi3jvmKqKnId7bHQSEaXa3FX5FTaardaaCUZSoEl6_WiypxyYg4PHp37HbXnZClBKWZot5lPWHn-Uf-2E2TXdqvysZRfc8NiBVrvVOzQDAkndR-wU3QgL5Efi6vrAFGkb5Ra7WvZ5fjkbQvb5VjKj0kKjJS3__CzLfAp_mQymy6U2wJE_YRxwwavFfaa53YhUdhiYM5P83Tj1UNG48ilVEFIzXWjgjfBMyNsgGF70mINJcc47GUXXh49GeWsHB9DqUMqyEbcVlEr_7tgkVHI7Dko8QlFjlLZRRa-JZApYlcRhaQGni0oUOMELP9ZRtArP3ib8hfdqqcBratNM_BB0bczBAJRQE",
"e": "AQAB",
"kid": "9494ad75-3f54-4155-84fa-c1a17da22b35",
"qi": "znwFS129727wjq6Whq6wfD1cxkLs9hnS7"
"-cEbZ8k_p7gE2vXucaK_AN1hIeU6HAGHgoP1RH7rUMN8"
"-YzxfAsj8X0g0u8Qte7evf7jaj4YqPDGy7dTTA2ALLcWAD"
"-djrDAU40ZJXnzM6hIIPbn_uMXAB5W0q_icV5xoIwxJog4ReUle0qMRS0iQCNocPBHydWt0zP"
"-Cqz9MIPd0ctNEf8E55Go90_yAWibQlo4PgLF48UI5BG0NhmPa1nI9Kpt36TwAoyKwscGysSgPGNxgzj4t2PxatXO7Xm47dfNi_yhBSHq9TNcw2laMs9e77G9gluTAOPqYC06zRC7Rr3m9lXOA",
"dp": "Z_3KjhW4ctfR_e"
"-tZT2bNy9deG6CTjywS0tJT7We8qdHCC_evs9ZRDQrO7P9RJZKJe9wuNN_T8iyoieDVyeBKnxHQAbp0cHEIUXpoUhmY5WRFkJ-6iTu0nOJM0yE0pHIrIPVJB2MzZNei-fcF3g8fDw9UFM6dQYKofC9TvqyAFZAKLhYplRXsBmGJmnUQpD4hzC8U9Xdaq0ldIUyAWRhC_8nBsrVPBYcCtic1QiPPCABByl7LVDwnQlI99rx7R_sBSggb0SKD8ncCzbjP3wEsPsEUWNcVtGz6C5bsNVG2n4uhE0OukapzKL1MfgcVB8K-Orxbtfa4CiC7yTznDlsMQ",
"dq": "Pl5f-hUNiea_"
"-4uK4oiX2KcOH3ro4yVSL7ZQv8YXzdhthR5dJ6UCwZ8nHj0iS7O2AP1WHqjycm7MkVIIFyEovxMhSyhx3TwfthL2GHNk_sQyaI4DdjHog9INtIXNKkYmYe7ubylmh74DYeavCcV_e-rNZ0KtXpWzZ0TV_J59SnRkWaehpRc8npzlDUqqgYl169YJmhr3J6xZxR4vFU5qIY0zAJDuKHS2muRCFWMTYvIEWdfEq1zzI4-1ngXdpryZLwEJrQQhNSTBXwEHwExr0nBzkmTDhcX3NpExWHIFErJxZvm3V5rVSbPIbU1YT7UzOIFsvQFu6Cbhg4P6XuCpUQ",
"n": "vdDyT3d33_NmNKBsF4OjIjtOsyOMrH8xhL9C8Jx6yvPcHNWkBHorIEyp7CcKp2gCo7jch6TlkH483cEwhw1GyyfrMPKh4P1uwNHpFI2eEDPqw_wyORVIIT8FPb_QhDxV5GFiWWecr_0DWNf9murqa_T7p5YUK3XVIhewFPDf0iHsV87OJLB8AoIsUfOCym5tvskTuxsMaIpYJZETe7upE_Xg-nVhyXhpFAJEw7RlYebrSEtoFpN6TwYuxutocZ4jNLn1x5t-YHWnyLYUIxN5_fuuzVGIAleY9T3WJXurYGnCUwQjgT7OvqM2K_xej0vFOp_P5C2YSxBX0SZG2322gDQiEqz7G2BCZ7I3PA4XVWV0KENwhxgz5GS7zjuZPIWm4oKwBRlYluTdWpc7A9w7LRs3tCJl6t_ReTlblnT9Dq7l5Na36IBOpesY77apE4BFlFdJZvhF_qrkHEQwo4ckOGrFlG8M3iV8UclHvVzBOPTi-sZoOOiytuSKbn5HikB2CV7k0GIfAJS6Q-RUHSYVFEH7IfmbvE0YkwglDggJWIfwtZG8IVuOFZunmWrYaKdMAvIJQjjv7oAwqrRD6HlXAPGHiTb_BRVaYqOD8ugjZ9ZO2tags12QyMOJq8XQ1mjegp0F-MNKlp_zff5xJbKETu2VUDFQmBqU6gLw_2r23cE",
}
SAMPLE_PUBLIC_KEY = {
"kty": "RSA",
"e": "AQAB",
"kid": "9494ad75-3f54-4155-84fa-c1a17da22b35",
"n": "vdDyT3d33_NmNKBsF4OjIjtOsyOMrH8xhL9C8Jx6yvPcHNWkBHorIEyp7CcKp2gCo7jch6TlkH483cEwhw1GyyfrMPKh4P1uwNHpFI2eEDPqw_wyORVIIT8FPb_QhDxV5GFiWWecr_0DWNf9murqa_T7p5YUK3XVIhewFPDf0iHsV87OJLB8AoIsUfOCym5tvskTuxsMaIpYJZETe7upE_Xg-nVhyXhpFAJEw7RlYebrSEtoFpN6TwYuxutocZ4jNLn1x5t-YHWnyLYUIxN5_fuuzVGIAleY9T3WJXurYGnCUwQjgT7OvqM2K_xej0vFOp_P5C2YSxBX0SZG2322gDQiEqz7G2BCZ7I3PA4XVWV0KENwhxgz5GS7zjuZPIWm4oKwBRlYluTdWpc7A9w7LRs3tCJl6t_ReTlblnT9Dq7l5Na36IBOpesY77apE4BFlFdJZvhF_qrkHEQwo4ckOGrFlG8M3iV8UclHvVzBOPTi-sZoOOiytuSKbn5HikB2CV7k0GIfAJS6Q-RUHSYVFEH7IfmbvE0YkwglDggJWIfwtZG8IVuOFZunmWrYaKdMAvIJQjjv7oAwqrRD6HlXAPGHiTb_BRVaYqOD8ugjZ9ZO2tags12QyMOJq8XQ1mjegp0F-MNKlp_zff5xJbKETu2VUDFQmBqU6gLw_2r23cE",
}
| 172.068966
| 839
| 0.938477
| 158
| 4,990
| 29.170886
| 0.778481
| 0.002604
| 0.003471
| 0.006943
| 0.305923
| 0.305923
| 0.305923
| 0.305923
| 0.288566
| 0.288566
| 0
| 0.15567
| 0.028056
| 4,990
| 28
| 840
| 178.214286
| 0.794639
| 0
| 0
| 0.285714
| 0
| 0.035714
| 0.93988
| 0.926854
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4880034d3f0a1caff60f4b886038b5b54c1a617f
| 97
|
py
|
Python
|
src/python/WMCore/Agent/Daemon/__init__.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 21
|
2015-11-19T16:18:45.000Z
|
2021-12-02T18:20:39.000Z
|
src/python/WMCore/Agent/Daemon/__init__.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 5,671
|
2015-01-06T14:38:52.000Z
|
2022-03-31T22:11:14.000Z
|
src/python/WMCore/Agent/Daemon/__init__.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 67
|
2015-01-21T15:55:38.000Z
|
2022-02-03T19:53:13.000Z
|
#!/usr/bin/env python
"""
__init__
Module containing methods for daemonizing
applications.
"""
| 10.777778
| 41
| 0.742268
| 11
| 97
| 6.181818
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134021
| 97
| 8
| 42
| 12.125
| 0.809524
| 0.886598
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
6f9dd510493d691a79dc8b2b14bd6c8fae68f816
| 3,682
|
py
|
Python
|
test/distributed/pipeline/sync/skip/test_verify_skippables.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 60,067
|
2017-01-18T17:21:31.000Z
|
2022-03-31T21:37:45.000Z
|
test/distributed/pipeline/sync/skip/test_verify_skippables.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 66,955
|
2017-01-18T17:21:38.000Z
|
2022-03-31T23:56:11.000Z
|
test/distributed/pipeline/sync/skip/test_verify_skippables.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 19,210
|
2017-01-18T17:45:04.000Z
|
2022-03-31T23:51:56.000Z
|
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from torch import nn
from torch.distributed.pipeline.sync.skip import Namespace, skippable, verify_skippables
def test_matching():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
verify_skippables(nn.Sequential(Layer1(), Layer2()))
def test_stash_not_pop():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1()))
assert "no module declared 'foo' as poppable but stashed" in str(e.value)
def test_pop_unknown():
@skippable(pop=["foo"])
class Layer1(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1()))
assert "'0' declared 'foo' as poppable but it was not stashed" in str(e.value)
def test_stash_again():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(stash=["foo"])
class Layer2(nn.Module):
pass
@skippable(pop=["foo"])
class Layer3(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3()))
assert "'1' redeclared 'foo' as stashable" in str(e.value)
def test_pop_again():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
@skippable(pop=["foo"])
class Layer3(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3()))
assert "'2' redeclared 'foo' as poppable" in str(e.value)
def test_stash_pop_together_different_names():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"], stash=["bar"])
class Layer2(nn.Module):
pass
@skippable(pop=["bar"])
class Layer3(nn.Module):
pass
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3()))
def test_stash_pop_together_same_name():
@skippable(stash=["foo"], pop=["foo"])
class Layer1(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1()))
assert "'0' declared 'foo' both as stashable and as poppable" in str(e.value)
def test_double_stash_pop():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
@skippable(stash=["foo"])
class Layer3(nn.Module):
pass
@skippable(pop=["foo"])
class Layer4(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3(), Layer4()))
assert "'2' redeclared 'foo' as stashable" in str(e.value)
assert "'3' redeclared 'foo' as poppable" in str(e.value)
def test_double_stash_pop_but_isolated():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
@skippable(stash=["foo"])
class Layer3(nn.Module):
pass
@skippable(pop=["foo"])
class Layer4(nn.Module):
pass
ns1 = Namespace()
ns2 = Namespace()
verify_skippables(
nn.Sequential(Layer1().isolate(ns1), Layer2().isolate(ns1), Layer3().isolate(ns2), Layer4().isolate(ns2),)
)
| 24.065359
| 114
| 0.636611
| 467
| 3,682
| 4.937902
| 0.197002
| 0.076323
| 0.114484
| 0.118387
| 0.776236
| 0.722463
| 0.722463
| 0.670859
| 0.640503
| 0.593235
| 0
| 0.020891
| 0.219989
| 3,682
| 152
| 115
| 24.223684
| 0.782033
| 0.058664
| 0
| 0.721154
| 0
| 0
| 0.102631
| 0
| 0
| 0
| 0
| 0
| 0.067308
| 1
| 0.086538
| false
| 0.211538
| 0.028846
| 0
| 0.326923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
6fca82fb7a3cb4065768ef651df5a3e1e00bf4c5
| 253
|
py
|
Python
|
mailto/tasks.py
|
hckjck/django-mailto
|
3f36661a6a345ce2e87b16d14c3060f75f4da467
|
[
"BSD-3-Clause"
] | null | null | null |
mailto/tasks.py
|
hckjck/django-mailto
|
3f36661a6a345ce2e87b16d14c3060f75f4da467
|
[
"BSD-3-Clause"
] | 2
|
2015-04-01T09:44:30.000Z
|
2015-04-01T11:01:51.000Z
|
mailto/tasks.py
|
hckjck/django-mailto
|
3f36661a6a345ce2e87b16d14c3060f75f4da467
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from mailto.models import mailto
try:
from celery import shared_task
@shared_task
def task_mailto(args, kwargs):
mailto(*args, **kwargs)
except ImportError:
pass
| 16.866667
| 38
| 0.6917
| 32
| 253
| 5.21875
| 0.59375
| 0.11976
| 0.191617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005051
| 0.217391
| 253
| 15
| 39
| 16.866667
| 0.838384
| 0.083004
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0.111111
| 0.444444
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 4
|
6ff32bfc9f9631144f5ffe2cc6345bda2076d633
| 2,234
|
py
|
Python
|
timingCalculations/SET/GaussianPFAcalc.py
|
annafriebe/timing
|
685d05417bcd6bb1b640e30b37e52b33c7da66d8
|
[
"MIT"
] | null | null | null |
timingCalculations/SET/GaussianPFAcalc.py
|
annafriebe/timing
|
685d05417bcd6bb1b640e30b37e52b33c7da66d8
|
[
"MIT"
] | null | null | null |
timingCalculations/SET/GaussianPFAcalc.py
|
annafriebe/timing
|
685d05417bcd6bb1b640e30b37e52b33c7da66d8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import math
import numpy as np
import scipy.stats as stats
from matplotlib import pyplot as plt
from PoissonPFAcalc import calcT
nGenerated = 100
def calcPFAGaussian(z):
zMean = np.mean(z)
print("mean: ", zMean)
zStdDev = math.sqrt(np.var(z))
print("stddev: ", zStdDev)
generatedData = np.random.normal(zMean, zStdDev, (nGenerated, len(z)))
dataProbabilities = stats.norm.pdf(z, zMean, zStdDev)
logLikelihoodsData = np.log(dataProbabilities)
generatedDataProbabilities =stats.norm.pdf(generatedData, zMean, zStdDev)
logLikelihoodsGen = np.log(generatedDataProbabilities)
expW = np.mean(logLikelihoodsGen)
varW = np.var(logLikelihoodsGen)
measuredT = calcT(logLikelihoodsData, expW, varW)
generatedT = np.zeros(nGenerated)
for k in range(nGenerated):
generatedT[k] = calcT(logLikelihoodsGen[k], expW, varW)
beta = np.count_nonzero(generatedT <= measuredT)/nGenerated
print("Beta:", beta)
PFA = min(beta, 1-beta)
print("PFA:", PFA)
return zMean, zStdDev
def calcPFASkewNorm(z):
a, loc, scale = stats.skewnorm.fit(z)
print("a", a)
print("loc", loc)
print("scale", scale)
zMean = np.mean(z)
print("mean: ", zMean)
zStdDev = math.sqrt(np.var(z))
print("stddev: ", zStdDev)
generatedData = stats.skewnorm(a, loc, scale).rvs((nGenerated, len(z)))
# np.random.normal(zMean, zStdDev, (nGenerated, len(z)))
dataProbabilities = stats.skewnorm.pdf(z, a, loc, scale)
logLikelihoodsData = np.log(dataProbabilities)
generatedDataProbabilities =stats.skewnorm.pdf(generatedData, a, loc, scale)
logLikelihoodsGen = np.log(generatedDataProbabilities)
expW = np.mean(logLikelihoodsGen)
varW = np.var(logLikelihoodsGen)
measuredT = calcT(logLikelihoodsData, expW, varW)
generatedT = np.zeros(nGenerated)
for k in range(nGenerated):
generatedT[k] = calcT(logLikelihoodsGen[k], expW, varW)
beta = np.count_nonzero(generatedT <= measuredT)/nGenerated
print("Beta:", beta)
PFA = min(beta, 1-beta)
print("PFA:", PFA)
return a, loc, scale
#TODO, draw z and probability distribution
#print(generatedData)
| 31.914286
| 82
| 0.681289
| 263
| 2,234
| 5.779468
| 0.258555
| 0.055263
| 0.029605
| 0.015789
| 0.702632
| 0.702632
| 0.609211
| 0.609211
| 0.609211
| 0.609211
| 0
| 0.002776
| 0.193823
| 2,234
| 69
| 83
| 32.376812
| 0.841199
| 0.060877
| 0
| 0.615385
| 1
| 0
| 0.026278
| 0
| 0
| 0
| 0
| 0.014493
| 0
| 1
| 0.038462
| false
| 0
| 0.096154
| 0
| 0.173077
| 0.211538
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b5003da2268625bc95d8dceef5949216a61e4057
| 87
|
py
|
Python
|
cancan/apps.py
|
pgorecki/django-cancango
|
c2859db27169af57862d7974b326140f253465f1
|
[
"MIT"
] | 34
|
2020-09-02T11:28:03.000Z
|
2022-03-17T08:18:02.000Z
|
cancan/apps.py
|
pgorecki/django-cancango
|
c2859db27169af57862d7974b326140f253465f1
|
[
"MIT"
] | 2
|
2020-09-23T12:51:20.000Z
|
2022-02-10T14:42:46.000Z
|
cancan/apps.py
|
pgorecki/django-cancango
|
c2859db27169af57862d7974b326140f253465f1
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class CanCanConfig(AppConfig):
name = "cancan"
| 14.5
| 33
| 0.747126
| 10
| 87
| 6.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 87
| 5
| 34
| 17.4
| 0.902778
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
82eb9e4a3ecf573f68cb616c0cb06f994216d93b
| 276
|
py
|
Python
|
osuapi/endpoints.py
|
khazhyk/osssss
|
6286e4c61cc9510f791256e8a2598bbee13cda7f
|
[
"MIT"
] | 20
|
2017-03-21T06:04:32.000Z
|
2021-11-04T21:11:58.000Z
|
osuapi/endpoints.py
|
khazhyk/osuapi
|
6286e4c61cc9510f791256e8a2598bbee13cda7f
|
[
"MIT"
] | 31
|
2016-08-05T02:12:20.000Z
|
2021-01-11T21:12:47.000Z
|
osuapi/endpoints.py
|
khazhyk/osuapi
|
6286e4c61cc9510f791256e8a2598bbee13cda7f
|
[
"MIT"
] | 19
|
2016-08-03T18:34:02.000Z
|
2021-12-06T09:20:00.000Z
|
"""API endpoints."""
API_BASE = "https://osu.ppy.sh/api"
USER = API_BASE + "/get_user"
USER_BEST = API_BASE + "/get_user_best"
USER_RECENT = API_BASE + "/get_user_recent"
SCORES = API_BASE + "/get_scores"
BEATMAPS = API_BASE + "/get_beatmaps"
MATCH = API_BASE + "/get_match"
| 27.6
| 43
| 0.702899
| 43
| 276
| 4.116279
| 0.325581
| 0.276836
| 0.338983
| 0.237288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 276
| 9
| 44
| 30.666667
| 0.7375
| 0.050725
| 0
| 0
| 0
| 0
| 0.371094
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
82f1bac901fd9d43bcbaf44c4d6bac9556909ab8
| 97
|
py
|
Python
|
mlm/subset_model/__main__.py
|
ririw/kaggle-bimbo-pymc3
|
fbf016751e2459b9fa6c8d058aad9c75fca57731
|
[
"MIT"
] | null | null | null |
mlm/subset_model/__main__.py
|
ririw/kaggle-bimbo-pymc3
|
fbf016751e2459b9fa6c8d058aad9c75fca57731
|
[
"MIT"
] | null | null | null |
mlm/subset_model/__main__.py
|
ririw/kaggle-bimbo-pymc3
|
fbf016751e2459b9fa6c8d058aad9c75fca57731
|
[
"MIT"
] | null | null | null |
from mlm.subset_model import SubsetModelCLI
if __name__ == '__main__':
SubsetModelCLI.run()
| 19.4
| 43
| 0.762887
| 11
| 97
| 5.909091
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14433
| 97
| 4
| 44
| 24.25
| 0.783133
| 0
| 0
| 0
| 0
| 0
| 0.082474
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
82f6a0cbbf2b7e08a8c33dbf03ef202f8e47dd20
| 421
|
py
|
Python
|
tests/test_maybe/test_maybe_unwrap.py
|
ksurta/returns
|
9746e569303f214d035462ae3dffe5c49abdcfa7
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_maybe/test_maybe_unwrap.py
|
ksurta/returns
|
9746e569303f214d035462ae3dffe5c49abdcfa7
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_maybe/test_maybe_unwrap.py
|
ksurta/returns
|
9746e569303f214d035462ae3dffe5c49abdcfa7
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
from returns.maybe import Nothing, Some
from returns.primitives.exceptions import UnwrapFailedError
def test_unwrap_success():
"""Ensures that unwrap works for Some container."""
assert Some(5).unwrap() == 5
def test_unwrap_failure():
"""Ensures that unwrap works for Nothing container."""
with pytest.raises(UnwrapFailedError):
assert Nothing.unwrap()
| 23.388889
| 59
| 0.71734
| 51
| 421
| 5.843137
| 0.529412
| 0.073826
| 0.087248
| 0.147651
| 0.167785
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008596
| 0.171021
| 421
| 17
| 60
| 24.764706
| 0.845272
| 0.27791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.375
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
82fc2f9abb6c40b0c27c9515fdac1aac4a77ba41
| 131
|
py
|
Python
|
tests/conftest.py
|
nylas/mypy-tools
|
5c5a7a18fd38a469372a99fd64e84ef3851cf15f
|
[
"MIT"
] | 27
|
2017-10-12T02:32:59.000Z
|
2021-07-01T04:38:17.000Z
|
tests/conftest.py
|
nylas/mypy-tools
|
5c5a7a18fd38a469372a99fd64e84ef3851cf15f
|
[
"MIT"
] | 6
|
2017-10-16T23:20:47.000Z
|
2021-03-25T21:44:30.000Z
|
tests/conftest.py
|
nylas/mypy-tools
|
5c5a7a18fd38a469372a99fd64e84ef3851cf15f
|
[
"MIT"
] | 5
|
2017-10-12T02:33:04.000Z
|
2018-12-13T05:57:12.000Z
|
import sys
collect_ignore = []
if sys.version_info[0] > 2:
collect_ignore.append("py2")
else:
collect_ignore.append("py3")
| 18.714286
| 32
| 0.70229
| 19
| 131
| 4.631579
| 0.684211
| 0.443182
| 0.431818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036036
| 0.152672
| 131
| 7
| 33
| 18.714286
| 0.756757
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
82fd70d563323f0a082928808a18d3e295a11aeb
| 327
|
py
|
Python
|
utilities/__init__.py
|
jakeaylmer/ice_edge_latitude
|
327aecbfc742b8deb7c055ed57beab8b9bb931f6
|
[
"MIT"
] | null | null | null |
utilities/__init__.py
|
jakeaylmer/ice_edge_latitude
|
327aecbfc742b8deb7c055ed57beab8b9bb931f6
|
[
"MIT"
] | null | null | null |
utilities/__init__.py
|
jakeaylmer/ice_edge_latitude
|
327aecbfc742b8deb7c055ed57beab8b9bb931f6
|
[
"MIT"
] | null | null | null |
"""
---------------------------------------------------------
For sub-package documentation, refer the relevant
function documentation.
---------------------------------------------------------
"""
import sys
import os
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
| 25.153846
| 62
| 0.48318
| 30
| 327
| 5
| 0.533333
| 0.12
| 0.173333
| 0.2
| 0.253333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06422
| 327
| 12
| 63
| 27.25
| 0.490196
| 0.577982
| 0
| 0
| 0
| 0
| 0.015385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d206208dc77cb07161822176c5a7566e3dafea64
| 388
|
py
|
Python
|
src/SPH/SolidSPHHydroBaseInst.cc.py
|
markguozhiming/spheral
|
bbb982102e61edb8a1d00cf780bfa571835e1b61
|
[
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 1
|
2020-10-21T01:56:55.000Z
|
2020-10-21T01:56:55.000Z
|
src/SPH/SolidSPHHydroBaseInst.cc.py
|
markguozhiming/spheral
|
bbb982102e61edb8a1d00cf780bfa571835e1b61
|
[
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | null | null | null |
src/SPH/SolidSPHHydroBaseInst.cc.py
|
markguozhiming/spheral
|
bbb982102e61edb8a1d00cf780bfa571835e1b61
|
[
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | null | null | null |
text = """
//------------------------------------------------------------------------------
// Explict instantiation.
//------------------------------------------------------------------------------
#include "SolidSPHHydroBase.cc"
#include "SolidSPHEvaluateDerivatives.cc"
#include "Geometry/Dimension.hh"
namespace Spheral {
template class SolidSPHHydroBase< Dim< %(ndim)s > >;
}
"""
| 29.846154
| 80
| 0.427835
| 21
| 388
| 7.904762
| 0.809524
| 0.108434
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074742
| 388
| 12
| 81
| 32.333333
| 0.462396
| 0
| 0
| 0.181818
| 0
| 0
| 0.963918
| 0.610825
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d221e61c12e17b97a96e4ce6b93655b302f747d2
| 690
|
py
|
Python
|
5kyu/last-digit-of-a-big-number.py
|
PabloCorbCon/codewars-examples
|
b792a73d78d764aeb3fc2231f97e06f976136854
|
[
"Apache-2.0"
] | 1
|
2021-02-26T16:29:04.000Z
|
2021-02-26T16:29:04.000Z
|
5kyu/last-digit-of-a-big-number.py
|
PabloCorbCon/codewars-examples
|
b792a73d78d764aeb3fc2231f97e06f976136854
|
[
"Apache-2.0"
] | null | null | null |
5kyu/last-digit-of-a-big-number.py
|
PabloCorbCon/codewars-examples
|
b792a73d78d764aeb3fc2231f97e06f976136854
|
[
"Apache-2.0"
] | null | null | null |
# Define a function that takes in two non-negative integers a and b and returns the last decimal digit of a^b.
# Note that a and b may be very large!
# For example, the last decimal digit of 9^7 is 9, since 9^7=4782969.
# The last decimal digit of (2^200)^2300, which has over 10^92 decimal digits, is 6.
# Also, please take 0^0 = 1
#
# You may assume that the input will always be valid.
# Examples
#
# last_digit(4, 1) # returns 4
# last_digit(4, 2) # returns 6
# last_digit(9, 7) # returns 9
# last_digit(10, 10 ** 10) # returns 0
# last_digit(2 ** 200, 2 ** 300) # returns 6
def last_digit(a, b):
return pow(a, b, 10)
| 40.588235
| 111
| 0.62029
| 123
| 690
| 3.430894
| 0.463415
| 0.127962
| 0.099526
| 0.135071
| 0.149289
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110442
| 0.278261
| 690
| 16
| 112
| 43.125
| 0.736948
| 0.885507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
d229d2c9dcb0d06fc42d31d488172e38cbb6d68b
| 144
|
py
|
Python
|
techtest/social_media/urls.py
|
vittoriozamboni/techtest-backend
|
783a55a8ea18738c92445ace3e218402b1731fa0
|
[
"MIT"
] | null | null | null |
techtest/social_media/urls.py
|
vittoriozamboni/techtest-backend
|
783a55a8ea18738c92445ace3e218402b1731fa0
|
[
"MIT"
] | null | null | null |
techtest/social_media/urls.py
|
vittoriozamboni/techtest-backend
|
783a55a8ea18738c92445ace3e218402b1731fa0
|
[
"MIT"
] | null | null | null |
from django.conf.urls import include, url
urlpatterns = [
url(r'^api/', include('social_media.api.urls', namespace='social_media_api'))
]
| 20.571429
| 81
| 0.715278
| 20
| 144
| 5
| 0.65
| 0.22
| 0.28
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 144
| 6
| 82
| 24
| 0.793651
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 0.145833
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d22a222c63dedf14c9a5365be0ce520b0e648da1
| 105
|
py
|
Python
|
bdpy/__init__.py
|
birkin/borrowdirect.py
|
98b7e605c1f2a97eea7ed049b6f04914197a48e8
|
[
"Unlicense",
"MIT"
] | 2
|
2015-11-17T15:27:22.000Z
|
2016-10-28T17:13:30.000Z
|
bdpy/__init__.py
|
birkin/borrowdirect.py
|
98b7e605c1f2a97eea7ed049b6f04914197a48e8
|
[
"Unlicense",
"MIT"
] | 1
|
2015-05-28T19:04:48.000Z
|
2015-11-02T21:14:15.000Z
|
bdpy/__init__.py
|
birkin/borrowdirect.py
|
98b7e605c1f2a97eea7ed049b6f04914197a48e8
|
[
"Unlicense",
"MIT"
] | 1
|
2015-03-27T20:52:45.000Z
|
2015-03-27T20:52:45.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .borrowdirect import BorrowDirect
| 17.5
| 39
| 0.761905
| 12
| 105
| 6.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 0.142857
| 105
| 5
| 40
| 21
| 0.822222
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d234b6c93697c102ff4480721422bea295483173
| 2,500
|
py
|
Python
|
marlgrid/pz_envs/yummyyucky.py
|
aivaslab/marlgrid
|
10b53d27ce224fadeeb5830d6034350a69feb4b4
|
[
"Apache-2.0"
] | null | null | null |
marlgrid/pz_envs/yummyyucky.py
|
aivaslab/marlgrid
|
10b53d27ce224fadeeb5830d6034350a69feb4b4
|
[
"Apache-2.0"
] | null | null | null |
marlgrid/pz_envs/yummyyucky.py
|
aivaslab/marlgrid
|
10b53d27ce224fadeeb5830d6034350a69feb4b4
|
[
"Apache-2.0"
] | null | null | null |
from ..base_AEC import *
from ..objects import *
from random import randrange
import random
import math
class YummyYuckyEnv0(para_MultiGridEnv):
"""
"""
mission = "yummy yucky simple: go to the correct color, of 2."
metadata = {}
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = MultiGrid((width, height))
chosen = 0 # choose green as the good color
mirror1 = random.choice([-1,1])
c = ['green', 'blue']
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
for x in range(2):
r = 1 if x == chosen else -1
self.put_obj(Goal(color=c[x], reward=r), width//2 + 1*(x*2-1)*mirror1, height//2)
self.agent_spawn_kwargs = {"top":(1,1)}
self.place_agents(**self.agent_spawn_kwargs)
class YummyYuckyEnv1(para_MultiGridEnv):
"""
"""
mission = "yummy yucky"
metadata = {}
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = MultiGrid((width, height))
chosen = 0#random.choice([0,1])
mirror1 = random.choice([-1,1])
mirror2 = random.choice([-1,1])
c = ['green', 'blue']
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
for x in range(2):
r = 1 if x == chosen else -1
self.put_obj(Goal(color=c[x], reward=r), width//2 + 3*(x*2-1)*mirror1, height//2)
for x in range(2):
r = 1 if x == chosen else -1
self.put_obj(Goal(color=c[x], reward=r), width//2 + 3*(x*2-1), height//2-3*(x*2-1)*mirror2)
self.put_obj(Goal(color=c[not x], reward=r), width//2 + 3*(x*2-1), height//2+3*(x*2-1)*mirror2)
self.agent_spawn_kwargs = {"top":(1,1)}
self.place_agents(**self.agent_spawn_kwargs)
class YummyYuckyEnv3(para_MultiGridEnv):
"""
"""
mission = "yummy yucky"
metadata = {}
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = MultiGrid((width, height))
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
for x in range(4):
r = 1 if x == chosen else -1
self.put_obj(Goal(color=c[x], reward=r), width//2 + int(3*math.cos(x*3.14/2)), height//2 + int(3*math.sin(x*3.14/2)))
self.agent_spawn_kwargs = {"color":"green", "view_offset": 0}
self.place_agents(**self.agent_spawn_kwargs)
| 28.735632
| 129
| 0.5748
| 365
| 2,500
| 3.843836
| 0.216438
| 0.070563
| 0.01283
| 0.085531
| 0.810406
| 0.75196
| 0.71846
| 0.693514
| 0.693514
| 0.693514
| 0
| 0.044981
| 0.2708
| 2,500
| 86
| 130
| 29.069767
| 0.72463
| 0.0824
| 0
| 0.653061
| 0
| 0
| 0.051954
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.102041
| 0
| 0.346939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d238445108b6a7a1476130f941cdfead6a1e2990
| 369
|
py
|
Python
|
Level1/Lessons76501/gamjapark.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | null | null | null |
Level1/Lessons76501/gamjapark.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | null | null | null |
Level1/Lessons76501/gamjapark.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | 1
|
2021-04-05T07:35:59.000Z
|
2021-04-05T07:35:59.000Z
|
# 음양 더하기
def solution(absolutes, signs):
return sum([x if y else -x for x, y in zip(absolutes, signs)])
'''
테스트 1 〉 통과 (0.11ms, 10.2MB)
테스트 2 〉 통과 (0.12ms, 10.2MB)
테스트 3 〉 통과 (0.11ms, 10.2MB)
테스트 4 〉 통과 (0.11ms, 10.3MB)
테스트 5 〉 통과 (0.12ms, 10.3MB)
테스트 6 〉 통과 (0.11ms, 10.2MB)
테스트 7 〉 통과 (0.10ms, 10.2MB)
테스트 8 〉 통과 (0.12ms, 10.3MB)
테스트 9 〉 통과 (0.12ms, 10.2MB)
'''
| 23.0625
| 66
| 0.585366
| 92
| 369
| 2.445652
| 0.369565
| 0.12
| 0.16
| 0.142222
| 0.515556
| 0.471111
| 0.355556
| 0
| 0
| 0
| 0
| 0.217241
| 0.214092
| 369
| 16
| 67
| 23.0625
| 0.527586
| 0.01626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
d24ed0652ef6bf199feeca107d971a6eb055a99b
| 297
|
py
|
Python
|
maatpy/classifiers/__init__.py
|
sanzgiri/MaatPy
|
381a0d31f1afdd2c53b9ccbb410eb0df6b4b9965
|
[
"MIT"
] | 11
|
2019-05-17T03:50:18.000Z
|
2021-08-23T22:18:23.000Z
|
maatpy/classifiers/__init__.py
|
sanzgiri/MaatPy
|
381a0d31f1afdd2c53b9ccbb410eb0df6b4b9965
|
[
"MIT"
] | 3
|
2021-04-08T14:01:15.000Z
|
2021-06-21T15:41:31.000Z
|
maatpy/classifiers/__init__.py
|
sanzgiri/MaatPy
|
381a0d31f1afdd2c53b9ccbb410eb0df6b4b9965
|
[
"MIT"
] | 7
|
2019-06-09T06:16:59.000Z
|
2021-11-12T01:45:52.000Z
|
from .smoteboost import SMOTEBoost
from .smotebagging import SMOTEBagging
from .adacost import AdaCost
from .balanced_random_forest import BalancedRandomForestClassifier
__all__ = ['BalancedBaggingClassifier', 'SMOTEBoost', 'SMOTEBagging',
'AdaCost', 'BalancedRandomForestClassifier']
| 37.125
| 69
| 0.811448
| 24
| 297
| 9.791667
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117845
| 297
| 7
| 70
| 42.428571
| 0.896947
| 0
| 0
| 0
| 0
| 0
| 0.282828
| 0.185185
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
d264013af4cc5dd8eb56627916a5d6c9d605e8ff
| 871
|
py
|
Python
|
1.py
|
2spmohanty/Performance
|
5a86a56f40bad1e12654fe1bb737affb4e0edd26
|
[
"Apache-2.0"
] | null | null | null |
1.py
|
2spmohanty/Performance
|
5a86a56f40bad1e12654fe1bb737affb4e0edd26
|
[
"Apache-2.0"
] | null | null | null |
1.py
|
2spmohanty/Performance
|
5a86a56f40bad1e12654fe1bb737affb4e0edd26
|
[
"Apache-2.0"
] | null | null | null |
from collections import OrderedDict, namedtuple, defaultdict
import glob
instance_data = {}
instance_dict= {'1': ',PRIMARY_LDU_NAME:10.172.109.23,PRIMARY_LDU_USER_NAME:administrator@skyscraper.local,PRIMARY_LDU_PASSWD:vc_password,DATACENTER:Datacenter3,CLUSTER:cls,HOST_NAME:w1-hs4-n2203.eng.vmware.com,SRC_PNIC:vmnic1,DATASTORE:vsanDatastore,SRC_DISK:vmhba2,DEST_DATACENTER:Datacenter3,DEST_CLUSTER:cls,DEST_HOST_NAME:w1-hs4-n2204.eng.vmware.com,PNIC:vmnic1,DEST_DATASTORE:vsanDatastore,DEST_DISK:vmhba2,STAT_COLLLECTION_LIST:pnic,datastore,mem,disk'}
"""
for instance in instance_dict:
print instance
instance_data[instance] = dict( (x, y) for x, y in (item.split(":") for item in instance_dict[instance].strip(",").split(",")))
instance = '1'
for x,y in (item.split(":") for item in instance_dict[instance].strip(",").split(",")):
"""
z = dict(1,2)
| 43.55
| 460
| 0.769231
| 129
| 871
| 5
| 0.457364
| 0.093023
| 0.065116
| 0.074419
| 0.170543
| 0.170543
| 0.170543
| 0.170543
| 0.170543
| 0.170543
| 0
| 0.0399
| 0.079219
| 871
| 19
| 461
| 45.842105
| 0.764339
| 0
| 0
| 0
| 0
| 0.2
| 0.768014
| 0.766257
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.2
| 0.4
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
d272041d547cbdbd15af0ea5dd177119519b09bf
| 1,839
|
py
|
Python
|
sniffersapp/equipment/migrations/0005_auto_20180812_1938.py
|
jamesokane/Oneworksite-Application
|
1749ffa89430be75394ae0d43905f3dd30a24fc6
|
[
"MIT"
] | null | null | null |
sniffersapp/equipment/migrations/0005_auto_20180812_1938.py
|
jamesokane/Oneworksite-Application
|
1749ffa89430be75394ae0d43905f3dd30a24fc6
|
[
"MIT"
] | 7
|
2020-06-05T19:27:52.000Z
|
2022-03-11T23:34:52.000Z
|
sniffersapp/equipment/migrations/0005_auto_20180812_1938.py
|
jamesokane/Oneworksite-Application
|
1749ffa89430be75394ae0d43905f3dd30a24fc6
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.6 on 2018-08-12 09:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('equipment', '0004_auto_20180703_2320'),
]
operations = [
migrations.RemoveField(
model_name='equipment_additionalinfo',
name='created_user',
),
migrations.RemoveField(
model_name='equipment_additionalinfo',
name='equipment_id',
),
migrations.RemoveField(
model_name='equipment',
name='fuel',
),
migrations.RemoveField(
model_name='equipment',
name='height_restrictor',
),
migrations.RemoveField(
model_name='equipment',
name='maintenance',
),
migrations.RemoveField(
model_name='equipment',
name='make',
),
migrations.RemoveField(
model_name='equipment',
name='model',
),
migrations.RemoveField(
model_name='equipment',
name='purchase_amount',
),
migrations.RemoveField(
model_name='equipment',
name='purchase_date',
),
migrations.RemoveField(
model_name='equipment',
name='rubber_tracks',
),
migrations.RemoveField(
model_name='equipment',
name='size',
),
migrations.RemoveField(
model_name='equipment',
name='year',
),
migrations.AddField(
model_name='equipment',
name='description',
field=models.CharField(blank=True, max_length=120),
),
migrations.DeleteModel(
name='Equipment_AdditionalInfo',
),
]
| 26.271429
| 63
| 0.527461
| 141
| 1,839
| 6.695035
| 0.390071
| 0.206568
| 0.247881
| 0.381356
| 0.59322
| 0.59322
| 0.228814
| 0
| 0
| 0
| 0
| 0.02911
| 0.364872
| 1,839
| 69
| 64
| 26.652174
| 0.77911
| 0.02447
| 0
| 0.619048
| 1
| 0
| 0.183036
| 0.053013
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015873
| 0
| 0.063492
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d2729b5f01f7cf3acb36db7aab4ed90fb10ad701
| 144
|
py
|
Python
|
pymatflow/cp2k/__init__.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 6
|
2020-03-06T16:13:08.000Z
|
2022-03-09T07:53:34.000Z
|
pymatflow/cp2k/__init__.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-10-02T02:23:08.000Z
|
2021-11-08T13:29:37.000Z
|
pymatflow/cp2k/__init__.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-07-10T16:28:14.000Z
|
2021-07-10T16:28:14.000Z
|
from .cp2k import Cp2k
from .static import StaticRun
from .opt import OptRun
from .phonopy import PhonopyRun
from .md import MdRun
| 14.4
| 32
| 0.736111
| 20
| 144
| 5.3
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018018
| 0.229167
| 144
| 9
| 33
| 16
| 0.936937
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
964897838ab34a87d4f61c62299d67615a7c424a
| 142
|
py
|
Python
|
ds4se/infoxplainer/causality/eval/traceability.py
|
WM-CSCI-435-F19/data-science-4-software-engineering
|
3692163df710550d4ee5b399a2a184968a0f18c6
|
[
"Apache-2.0"
] | 5
|
2020-12-08T00:38:24.000Z
|
2021-11-16T20:00:59.000Z
|
ds4se/infoxplainer/causality/eval/traceability.py
|
WM-CSCI-435-F19/data-science-4-software-engineering
|
3692163df710550d4ee5b399a2a184968a0f18c6
|
[
"Apache-2.0"
] | 110
|
2020-09-26T18:36:35.000Z
|
2022-03-12T00:54:35.000Z
|
ds4se/infoxplainer/causality/eval/traceability.py
|
WM-CSCI-435-F19/data-science-4-software-engineering
|
3692163df710550d4ee5b399a2a184968a0f18c6
|
[
"Apache-2.0"
] | 3
|
2020-12-09T19:23:10.000Z
|
2021-02-16T12:54:16.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/4.4_infoxplainer.causality.eval.traceability.ipynb (unless otherwise specified).
__all__ = []
| 47.333333
| 128
| 0.78169
| 19
| 142
| 5.578947
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015748
| 0.105634
| 142
| 3
| 129
| 47.333333
| 0.818898
| 0.887324
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.