hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
050d0065c6551bdc4fb38c58599bd6f7f819f7e7
741
py
Python
clutch/schema/request/session/shared.py
ExeArco/clutch
223244b1294b496ba16c8a2433eb453cce593a62
[ "MIT" ]
34
2018-04-14T12:53:07.000Z
2021-08-21T12:52:23.000Z
clutch/schema/request/session/shared.py
ExeArco/clutch
223244b1294b496ba16c8a2433eb453cce593a62
[ "MIT" ]
18
2018-04-30T20:32:44.000Z
2021-10-03T15:24:33.000Z
clutch/schema/request/session/shared.py
ExeArco/clutch
223244b1294b496ba16c8a2433eb453cce593a62
[ "MIT" ]
10
2019-01-31T20:21:13.000Z
2021-10-03T10:01:26.000Z
from typing import Tuple from pydantic import BaseModel, Field from clutch.compat import Literal DataRateUnits = Tuple[ Literal["KB/s"], Literal["MB/s"], Literal["GB/s"], Literal["TB/s"] ] DataSizeUnits = Tuple[Literal["KB"], Literal["MB"], Literal["GB"], Literal["TB"]] ByteDefinition = Literal[1000, 1024] class UnitsRequest(BaseModel): speed_units: DataRateUnits = Field(..., alias="speed-units") speed_bytes: ByteDefinition = Field(..., alias="speed-bytes") size_units: DataSizeUnits = Field(..., alias="size-units") size_bytes: ByteDefinition = Field(..., alias="size-bytes") memory_units: DataSizeUnits = Field(..., alias="memory-units") memory_bytes: ByteDefinition = Field(..., alias="memory-bytes")
35.285714
81
0.696356
87
741
5.862069
0.321839
0.117647
0.141176
0.170588
0
0
0
0
0
0
0
0.012403
0.129555
741
20
82
37.05
0.778295
0
0
0
0
0
0.121457
0
0
0
0
0
0
1
0
false
0
0.2
0
0.666667
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
05144cf1f77433a7caac5fc8ed5d6205e9dd98fc
4,242
py
Python
PyOpenWorm/capability.py
BioComSoftware/PyOpenWorm
32084f3570b4ea7fbdb1a4d20bd469d4af6ab28f
[ "MIT" ]
1
2019-03-22T12:02:36.000Z
2019-03-22T12:02:36.000Z
PyOpenWorm/capability.py
BioComSoftware/PyOpenWorm
32084f3570b4ea7fbdb1a4d20bd469d4af6ab28f
[ "MIT" ]
null
null
null
PyOpenWorm/capability.py
BioComSoftware/PyOpenWorm
32084f3570b4ea7fbdb1a4d20bd469d4af6ab28f
[ "MIT" ]
null
null
null
''' Defines 'capabilities', pieces of functionality that an object needs which must be injected. A given capability can be provided by more than one capability provider, but, for a given set of providers, only one will be bound at a time. Logically, each provider that provides the capability is asked, in a user-provided preference order, whether it can provide the capability for the *specific* object and the first one which can provide the capability is bound to the object. The core idea is dependency injection: a capability does not modify the object: the object receives the provider and an identifier for the capability provided, but how the object uses the provider is up to the object. This is important because the user of the object should not condition its behavior on the particular capability provider used, although it may know about which capabilities the object has. Note, that there may be some providers that lose their ability to provide a capability. This loss should be communicated with a 'CannotProvideCapability' exception when the relevant methods are called on the provider. This *may* allow certain operations to be retried with a provider lower on the capability order, *but* a provider that throws CannotProvide may validly be asked if it can provide the capability again -- if it *still* cannot provide the capability, it should communicate that when asked. Providers may keep state between calls to provide a capability, but their correctness must not depend on any ordering of method calls except that, of course, their ``__init__`` is called first. ''' import six from yarom.utils import FCN class _Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(_Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class Capability(six.with_metaclass(_Singleton)): def __str__(self): return FCN(type(self)) class Provider(object): def provides(self, cap): ''' Returns a SupportChecker if the provider provides for the given capability; otherwise, returns None ''' if cap in getattr(self, 'provided_capabilities', ()): return self class SupportChecker(object): def __call__(self, ob): ''' Returns an object that actually provides the capability ''' return None def to(self, ob): ''' For use in a 'fluent' API. Ex: a.provides(cap).to(obj) ''' return self(ob) class Capable(object): @property def needed_capabilities(self): return [] def accept_capability_provider(self, cap, provider): ''' The Capable should replace any previously accepted provider with the one given. ''' raise NotImplementedError() class CannotProvideCapability(Exception): ''' Thrown by a *provider* when it cannot provide the capability during the object's execution ''' def __init__(self, cap, provider): super(CannotProvideCapability, self).__init__('Provider, {}, cannot, now, provide the capability, {}' .format(provider, cap)) self._cap = cap self._provider = provider class NoProviderAvailable(Exception): def __init__(self, cap, receiver=None): super(NoProviderAvailable, self).__init__('No providers currently provide {}{}' .format(cap, ' for ' + repr(receiver) if receiver else '')) self._cap = cap def provide(ob, provs): if is_capable(ob): unsafe_provide(ob, provs) def unsafe_provide(ob, provs): for cap in ob.needed_capabilities: provider = get_provider(ob, cap, provs) if not provider: raise NoProviderAvailable(cap, ob) ob.accept_capability_provider(cap, provider) def get_providers(cap, provs): for p in provs: provfn = p.provides(cap) if provfn: yield p, provfn def get_provider(ob, cap, provs): for p, provides_to in get_providers(cap, provs): provfn = provides_to(ob) if provfn: return provfn def is_capable(ob): return isinstance(ob, Capable)
32.381679
109
0.690005
561
4,242
5.103387
0.317291
0.045407
0.041914
0.024101
0.032134
0
0
0
0
0
0
0
0.235502
4,242
130
110
32.630769
0.882825
0.464168
0
0.068966
0
0
0.052486
0.009669
0
0
0
0
0
1
0.241379
false
0
0.034483
0.051724
0.551724
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
05192c33194bf4769bd738c48e10fc6868bce444
1,528
py
Python
app/app.py
EricDalrymple91/simple-flask-api-with-sqlalchemy
ebedb7c0ca6f345931691b4d25cf9d05dae147c0
[ "MIT" ]
null
null
null
app/app.py
EricDalrymple91/simple-flask-api-with-sqlalchemy
ebedb7c0ca6f345931691b4d25cf9d05dae147c0
[ "MIT" ]
null
null
null
app/app.py
EricDalrymple91/simple-flask-api-with-sqlalchemy
ebedb7c0ca6f345931691b4d25cf9d05dae147c0
[ "MIT" ]
null
null
null
""" python app/app.py -> http://0.0.0.0:8080/ """ from app.models.database import db, ma from flask_session import Session from flask_api import FlaskAPI, status from flask_assets import Environment from flask_cors import CORS from flask import jsonify import logging import time from routes.main_db import main_db_bp from routes.secondary_db import secondary_db_bp app = FlaskAPI(__name__) app.logger.setLevel(logging.INFO) CORS(app, resources=r'/api/*', supports_credentials=True) app.config.from_object('config') Environment(app) db.init_app(app) ma.init_app(app) Session(app) app.register_blueprint(main_db_bp) app.register_blueprint(secondary_db_bp) # Server status @app.route("/") def server_status(): # Across config.py, app.py, ../setup.py return jsonify({'status': 'ONLINE', 'version': '0.1'}), status.HTTP_200_OK # For timeout testing @app.route("/timeout_test/<seconds>") def timeout_test(seconds): time.sleep(int(seconds)) return jsonify({'timeout_test': f'{seconds} seconds'}), status.HTTP_200_OK # Error handling routes (Can't use blueprints) @app.errorhandler(400) def bad_request(_): return jsonify({'error': 'Bad request'}), status.HTTP_400_BAD_REQUEST @app.errorhandler(404) def not_found(_): return jsonify({'error': 'Not found'}), status.HTTP_404_NOT_FOUND @app.errorhandler(405) def not_allowed(_): return jsonify({'error': 'Method not allowed'}), status.HTTP_405_METHOD_NOT_ALLOWED if __name__ == "__main__": app.run(debug=False, host='0.0.0.0', port=8080)
24.253968
87
0.746728
230
1,528
4.717391
0.356522
0.01106
0.01106
0.007373
0
0
0
0
0
0
0
0.031065
0.115183
1,528
62
88
24.645161
0.77145
0.104058
0
0
0
0
0.114223
0.016949
0
0
0
0
0
1
0.131579
false
0
0.263158
0.105263
0.526316
0.052632
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
2
0521a62c712af1bac9e5fc5f7127ba53182cf6f0
695
py
Python
networkx-d3-v2/lib/appengine_sessions/views.py
suraj-testing2/Clock_Websites
0e65331da40cfd3766f1bde17f0a9c7ff6666dea
[ "Apache-2.0" ]
null
null
null
networkx-d3-v2/lib/appengine_sessions/views.py
suraj-testing2/Clock_Websites
0e65331da40cfd3766f1bde17f0a9c7ff6666dea
[ "Apache-2.0" ]
null
null
null
networkx-d3-v2/lib/appengine_sessions/views.py
suraj-testing2/Clock_Websites
0e65331da40cfd3766f1bde17f0a9c7ff6666dea
[ "Apache-2.0" ]
null
null
null
from appengine_sessions.backends.db import SessionStore from appengine_sessions.mapper import DeleteMapper from appengine_sessions.models import Session from datetime import datetime from django.contrib.sessions.backends.base import SessionBase from django.http import HttpResponse from django.views.generic.base import View class SessionCleanUpCron(View): """ View used by cron to clear sessions that have expired """ def get(self, request, *args, **kwargs): mapper = DeleteMapper(Session, filters={ 'lt': ('expire_date', datetime.utcnow())}) mapper.start() return HttpResponse('Session cleaner mapper started')
30.217391
61
0.716547
79
695
6.253165
0.582278
0.078947
0.12753
0
0
0
0
0
0
0
0
0
0.204317
695
22
62
31.590909
0.893309
0.076259
0
0
0
0
0.069132
0
0
0
0
0
0
1
0.076923
false
0
0.538462
0
0.769231
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
05221801aa6a3eded4f2603e43dd16fb0815d68d
220
py
Python
cloud_formation/matlab_test.py
AgenttiX/fys-1320
fb598ece7a3e180bf25af320eb210a34394f6309
[ "MIT" ]
2
2021-02-01T15:35:07.000Z
2021-10-17T19:06:34.000Z
cloud_formation/matlab_test.py
AgenttiX/fys-1320
fb598ece7a3e180bf25af320eb210a34394f6309
[ "MIT" ]
null
null
null
cloud_formation/matlab_test.py
AgenttiX/fys-1320
fb598ece7a3e180bf25af320eb210a34394f6309
[ "MIT" ]
null
null
null
# This file is simply for testing, whether Matlab API for Python works correctly # It should run without any output or errors # pylint: disable=import-error import matlab.engine matlabeng = matlab.engine.start_matlab()
31.428571
80
0.795455
33
220
5.272727
0.818182
0.137931
0
0
0
0
0
0
0
0
0
0
0.145455
220
6
81
36.666667
0.925532
0.681818
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
052492bc3be7d6e95961ccfa3571ce5dd96e578e
1,566
py
Python
examples/miniapps/movie_lister/app_csv.py
vlad-ghita/python-dependency-injector
5cf5bdda24851dd97cfa0f5054f4a8c35ddac014
[ "BSD-3-Clause" ]
null
null
null
examples/miniapps/movie_lister/app_csv.py
vlad-ghita/python-dependency-injector
5cf5bdda24851dd97cfa0f5054f4a8c35ddac014
[ "BSD-3-Clause" ]
null
null
null
examples/miniapps/movie_lister/app_csv.py
vlad-ghita/python-dependency-injector
5cf5bdda24851dd97cfa0f5054f4a8c35ddac014
[ "BSD-3-Clause" ]
null
null
null
"""A naive example of dependency injection on Python. Example implementation of dependency injection in Python from Martin Fowler's article about dependency injection and inversion of control: http://www.martinfowler.com/articles/injection.html This mini application uses ``movies`` library, that is configured to work with csv file movies database. """ import movies import movies.finders import example.db import example.main import settings import fixtures import dependency_injector.containers as containers import dependency_injector.providers as providers @containers.override(movies.MoviesModule) class MyMoviesModule(containers.DeclarativeContainer): """IoC container for overriding movies module component providers.""" finder = providers.Factory(movies.finders.CsvMovieFinder, csv_file_path=settings.MOVIES_CSV_PATH, delimiter=',', **movies.MoviesModule.finder.kwargs) class CsvApplication(containers.DeclarativeContainer): """IoC container of csv application component providers.""" main = providers.Callable(example.main.main, movie_lister=movies.MoviesModule.lister) init_db = providers.Callable(example.db.init_csv, movies_data=fixtures.MOVIES_SAMPLE_DATA, csv_file_path=settings.MOVIES_CSV_PATH, delimiter=',') if __name__ == '__main__': CsvApplication.init_db() CsvApplication.main()
31.32
78
0.690294
163
1,566
6.478528
0.466258
0.053977
0.039773
0.079545
0.077652
0.077652
0.077652
0.077652
0
0
0
0
0.237548
1,566
49
79
31.959184
0.884422
0.298212
0
0.083333
0
0
0.009242
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.541667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
0524a4a8cf744b6c238a820ab0b7c57ade94346c
1,141
py
Python
src/calculator.py
HSShin0/integer_calculator
0e6f7ee86e0d69781666543833aaa5328ec2829a
[ "MIT" ]
null
null
null
src/calculator.py
HSShin0/integer_calculator
0e6f7ee86e0d69781666543833aaa5328ec2829a
[ "MIT" ]
1
2020-09-15T07:10:48.000Z
2020-09-15T07:10:48.000Z
src/calculator.py
HSShin0/integer_calculator
0e6f7ee86e0d69781666543833aaa5328ec2829a
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Basic calculation classes including add, sub, mul, and div. - Author: Hyungseok Shin - Contact: hsshin@jmarple.ai """ from abc import ABC class Calculator(ABC): """An abstract class of basic computations.""" def operate(self: "Calculator", left: int, right: int) -> int: """Operate the defined calcuation with two given operands.""" raise NotImplementedError class Adder(Calculator): """Addition.""" def operate(self: "Adder", left: int, right: int) -> int: """Add two integers.""" return left + right class Subtractor(Calculator): """Subtraction.""" def operate(self: "Subtractor", left: int, right: int) -> int: """Subtract two integers.""" return left - right class Multiplier(Calculator): """Muliplication.""" def operate(self: "Multiplier", left: int, right: int) -> int: """Multiply two integers.""" return left * right class Divider(Calculator): """Division.""" def operate(self: "Divider", left: int, right: int) -> int: """Divide two integers.""" return left // right
23.285714
69
0.613497
127
1,141
5.511811
0.440945
0.071429
0.1
0.107143
0.298571
0.132857
0
0
0
0
0
0.001143
0.233129
1,141
48
70
23.770833
0.798857
0.321648
0
0
0
0
0.058577
0
0
0
0
0
0
1
0.3125
false
0
0.0625
0
0.9375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
0526aef62f1678e7e49ee86c745b6fe48131f60c
890
py
Python
glue_genomics_data/bedpe_factory.py
gluesolutions/glue-genomics-data
0e8b055908727b2c385ef64ce4d116de5cd995a9
[ "BSD-3-Clause" ]
null
null
null
glue_genomics_data/bedpe_factory.py
gluesolutions/glue-genomics-data
0e8b055908727b2c385ef64ce4d116de5cd995a9
[ "BSD-3-Clause" ]
15
2021-07-22T14:47:12.000Z
2021-09-28T14:03:26.000Z
glue_genomics_data/bedpe_factory.py
gluesolutions/glue-genomics-data
0e8b055908727b2c385ef64ce4d116de5cd995a9
[ "BSD-3-Clause" ]
null
null
null
from glue.config import data_factory from glue.core import Data import pandas as pd from pathlib import Path from glue_genomics_viewers.data import BedPeData __all__ = ['is_bedpe', 'read_bedpe'] def is_bedpe(filename, **kwargs): return filename.endswith('.bedpe') @data_factory('BEDPE data loader', is_bedpe, priority=999) def read_bedpe(file_name): """ Read a bed paired-end file denoting linkages between regions Most of the time these are large datasets we want to display on the GenomeTrackViewer and so we load them as the custom BedPeData type that knows how to handled tiled/multi-resolution data. Although alternatively we could view them as simple datasets that we might want to filter by strength. """ data = BedPeData(file_name) data.engine.index() #This returns quickly if file is already indexed return data
29.666667
72
0.739326
132
890
4.871212
0.613636
0.037325
0
0
0
0
0
0
0
0
0
0.004219
0.201124
890
30
73
29.666667
0.900141
0.452809
0
0
0
0
0.092551
0
0
0
0
0
0
1
0.153846
false
0
0.384615
0.076923
0.692308
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
05548dfc3b346930922e63cbc3666895eb15c3b2
5,733
py
Python
xmppserver/conf.py
ovekaaven/django-xmpp-server
aa391173b4cdfc98e2f6de29d24aa4273b3620c3
[ "MIT" ]
null
null
null
xmppserver/conf.py
ovekaaven/django-xmpp-server
aa391173b4cdfc98e2f6de29d24aa4273b3620c3
[ "MIT" ]
null
null
null
xmppserver/conf.py
ovekaaven/django-xmpp-server
aa391173b4cdfc98e2f6de29d24aa4273b3620c3
[ "MIT" ]
null
null
null
from django.conf import settings as django_settings class Settings(object): def __getattribute__(self, attr): if attr == attr.upper(): try: # try django settings first return getattr(django_settings, 'XMPP_' + attr) except AttributeError: # fallback to our defaults pass return super(Settings, self).__getattribute__(attr) DOMAIN = None """ XMPP domain name of your server. Should normally be set to your primary domain name. If you do not set this, the server will resort to using the domain name provided by the client. """ ALLOW_REGISTRATION = False """ Whether to allow in-band registration (XEP-0077). Enabling this will allow anyone to create users on your Django site through XMPP, unless you restrict it with a custom :ref:`authentication hook <hooks>`. """ REGISTRATION_URL = None """ URL to your user registration page. If you do not allow in-band registration, users who want to register will be told to visit this URL instead. If unset, an URL will be constructed from the XMPP domain. """ ALLOW_ANONYMOUS_LOGIN = False """ Whether to allow anonymous logins. """ ALLOW_PLAIN_PASSWORD = True """ Whether to allow XMPP authentication with plaintext passwords. If you use HTTPS, then this is typically OK. """ ALLOW_WEBUSER_LOGIN = True """ Whether to allow XMPP authentication through session cookies. The XMPP client must supply an empty password, otherwise regular password checking is done. This option does not apply if you're relying on BOSH prebinding or session tokens. If so, you may want to set it to False, to avoid redundant Django session database lookups. """ ALLOW_LEGACY_AUTH = False """ Whether to allow non-SASL authentication (XEP-0078). This is a compatibility option and should normally not be needed. """ CREDENTIALS_URL = None """ The URL to the session token generation view. Used by the template tags. To avoid issues with browser same-origin policies, this URL should not have a hostname. If unset, the URL will be deduced from your project's URLconf. """ CREDENTIALS_MAX_AGE = 30 """ Expiration time, in seconds, of session tokens. Should only be long enough for the XMPP client to retrieve a token and use it to log in. Currently, these are stateless HMAC tokens, meaning they could be used more than once. To reduce the chances of this, the expiration time should be short. (Unless you use an :ref:`authentication hook <hooks>` that makes sure tokens can only be used once, but xmppserver does not currently provide such a hook.) """ BOSH_URL = None """ The URL to the BOSH consumer. Used by the template tags. If unset, the URL will be deduced from your project's URLconf. """ BOSH_PREBIND_URL = None """ The URL to the BOSH prebind view. Used by the template tags. To avoid issues with browser same-origin policies, this URL should not have a hostname. If unset, the URL will be deduced from your project's URLconf. """ BOSH_MIN_WAIT = 10 """ Minimum allowed wait time for BOSH requests, in seconds. Lower values may improve reliability slightly, but also increase bandwidth usage and server load. """ BOSH_MAX_WAIT = 60 """ Maximum allowed wait time for BOSH requests, in seconds. Lower values may improve reliability slightly, but also increase bandwidth usage and server load. """ BOSH_MAX_HOLD = 2 """ Maximum number of waiting BOSH requests. Higher values may improve throughput slightly, but also increase server load. """ BOSH_MAX_INACTIVITY = 120 """ Time before an inactive BOSH client is presumed dead, in seconds. """ WEBSOCKETS_URL = None """ The URL to the WebSockets consumer. Used by the template tags. If unset, the URL will be deduced from your project's URLconf. """ TCP_SERVER = True """ Whether to allow starting the plain XMPP server. To actually start it, you must also add the following to your ``routing.py``:: from xmppserver import xmpp_server xmpp_server.start_xmpp_server() """ TCP_CLIENT_PORT = 5222 """ The XMPP client-to-server port to listen on. """ TCP_SERVER_PORT = 5269 """ The XMPP server-to-server port to listen on. This feature is not yet implemented. """ TCP_REQUIRE_TLS = True """ Whether to require TLS-secured connections. """ TLS_CERT_PATH = None """ Path to the X.509 certificate, in PEM format. Required for TLS. """ TLS_PRIV_KEY_PATH = None """ Path to the X.509 private key, in PEM format. Required for TLS. """ TLS_CACERT_PATHS = [] """ Paths to CA certificates to be used for validating client certificates, in PEM format. This feature is not yet implemented. """ SERVER = None """ If you need the template tags to return a full URL, you can set this to the hostname of your XMPP server. You shouldn't do this unless you have to, since the browser's same-origin policies may kick in. The XMPP Server does alleviate this by supporting CORS, but not all browsers support it. And even if they do, BOSH connections will take longer to establish. (WebSockets are not affected, though.) """ SERVER_SECURE = True """ Whether your XMPP server uses HTTPS. Used by the template tags if XMPP_SERVER is set. """ settings = Settings()
30.822581
82
0.671027
807
5,733
4.697646
0.33829
0.02374
0.022158
0.022422
0.286732
0.286732
0.224743
0.187286
0.187286
0.187286
0
0.00761
0.266527
5,733
185
83
30.989189
0.893936
0.008721
0
0
0
0
0.004129
0
0
0
0
0
0
1
0.028571
false
0.057143
0.028571
0
0.857143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
2
0557578a3652e17b2c2606979622a652d6656714
1,831
py
Python
gtfsviewer/gtfs.py
ayayron/gtfs-streamlit
1743895a9bb50394b5edb7a5f0472345ad256581
[ "MIT" ]
null
null
null
gtfsviewer/gtfs.py
ayayron/gtfs-streamlit
1743895a9bb50394b5edb7a5f0472345ad256581
[ "MIT" ]
1
2022-03-12T00:38:31.000Z
2022-03-12T00:38:31.000Z
gtfsviewer/gtfs.py
ayayron/gtfs-streamlit
1743895a9bb50394b5edb7a5f0472345ad256581
[ "MIT" ]
null
null
null
from dataclasses import dataclass from enum import Enum from typing import Optional @dataclass(frozen=True) class Agency: id: str name: str url: str tz: str lang: str @dataclass(frozen=True) class Calendar: service_id: int monday: bool tuesday: bool wednesday: bool tursday: bool friday: bool saturday: bool sunday: bool start_date: str end_date: str @dataclass(frozen=True) class CalendarDates: service_id: int date: str exception_type: int @dataclass(frozen=True) class FareAttributes: id: int price: float currency: str payment_method: str transfers: Optional[int] transfer_duration: Optional[int] # time in seconds @dataclass(frozen=True) class FareRules: fare_id: int route_id: int origin_id: int destination_id: int contains_id: int class RouteType(Enum): express = "EXPRESS" rapid = "RAPID" @dataclass(frozen=True) class Routes: id: int agency_id: str short_name: str long_name: str description: str type: RouteType url: str color: str text_color: str @dataclass(frozen=True) class Shapes: id: int lon: float lat: float seq: int distance_from_start: int @dataclass(frozen=True) class StopTimes: trip_id: int arrival_time: str departure_time: str stop_id: str stop_sequence: str stop_headsign: str pickup_type: str drop_off_type: str shape_dist_traveled: Optional[int] @dataclass(frozen=True) class Stops: id: int name: str description: str lat: float lon: float zone_id: int url: str @dataclass(frozen=True) class Trips: route_id: int service_id: int trip_id: int trip_headsign: str direction_id: int block_id: int shape_id: int
15.921739
55
0.66521
244
1,831
4.840164
0.344262
0.08044
0.160881
0.203218
0.160034
0
0
0
0
0
0
0
0.262698
1,831
114
56
16.061404
0.874815
0.008192
0
0.347826
0
0
0.006615
0
0
0
0
0
0
1
0
false
0
0.032609
0
0.891304
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
055b7dd58ab75ff5661129b37b2b775ba09df9c8
665
py
Python
hanabi_ai/players/example_discarder.py
akaps/hanabi_ai
474ec9a827f1a28b5e06cbf8139fbce1d8e5e1e1
[ "MIT" ]
1
2019-06-02T13:36:56.000Z
2019-06-02T13:36:56.000Z
hanabi_ai/players/example_discarder.py
akaps/hanabi_ai
474ec9a827f1a28b5e06cbf8139fbce1d8e5e1e1
[ "MIT" ]
106
2018-08-29T07:17:38.000Z
2021-02-13T18:28:31.000Z
hanabi_ai/players/example_discarder.py
akaps/hanabi_ai
474ec9a827f1a28b5e06cbf8139fbce1d8e5e1e1
[ "MIT" ]
1
2018-09-01T22:10:59.000Z
2018-09-01T22:10:59.000Z
import hanabi_ai.model.table as game import hanabi_ai.model.moves as moves from hanabi_ai.players.player import HanabiPlayer # To create an AI for Hanabi, make a new class in the ai folder, # extend HanabiPlayer, and implement do_turn class Discarder(HanabiPlayer): def __init__(self): pass def do_turn(self, player_index, game_info): if game_info.disclosures < game.NUM_DISCLOSURES: return moves.HanabiDiscardAction(player_index, 0) return moves.HanabiDiscloseRankAction(player_index, game_info.next_player(player_index), 1)
36.944444
82
0.654135
81
665
5.148148
0.54321
0.105516
0.067146
0.091127
0
0
0
0
0
0
0
0.004228
0.288722
665
17
83
39.117647
0.877378
0.157895
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0.083333
0.25
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
2
0576e4be852d31a412de6b56f511d3469cb9ef4a
787
py
Python
autoperf/harpy/timings.py
mingyuchoo/django_study
608e6bddb37b03329a24a334184143a9881b3676
[ "MIT" ]
null
null
null
autoperf/harpy/timings.py
mingyuchoo/django_study
608e6bddb37b03329a24a334184143a9881b3676
[ "MIT" ]
7
2019-12-04T23:52:28.000Z
2022-02-10T09:23:02.000Z
autoperf/harpy/timings.py
mingyuchoo/django_study
608e6bddb37b03329a24a334184143a9881b3676
[ "MIT" ]
null
null
null
class Timings(object): def __init__(self, j): self.raw = j if "blocked" in self.raw: self.blocked = self.raw["blocked"] else: self.blocked = -1 if "dns" in self.raw: self.dns = self.raw["dns"] else: self.dns = -1 if "connect" in self.raw: self.connect = self.raw["connect"] else: self.connect = -1 self.send = self.raw["send"] self.wait = self.raw["wait"] self.receive = self.raw["receive"] if "ssl" in self.raw: self.ssl = self.raw["ssl"] else: self.ssl = -1 if "comment" in self.raw: self.comment = self.raw["comment"] else: self.comment = ''
23.848485
46
0.466328
94
787
3.861702
0.212766
0.269972
0.123967
0.179063
0
0
0
0
0
0
0
0.008457
0.398983
787
32
47
24.59375
0.758985
0
0
0.192308
0
0
0.087675
0
0
0
0
0
0
1
0.038462
false
0
0
0
0.076923
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
05834f1f9f9f327826c893bf9c0e5c2a90cc1fac
768
py
Python
cobweb/scrapyLearn/mysqlpjt/mysqlpjt/pipelines.py
ljm516/python-repo
a1071cbf4e4a021cc1ccd902edd4a60246ea4fcc
[ "Apache-2.0" ]
null
null
null
cobweb/scrapyLearn/mysqlpjt/mysqlpjt/pipelines.py
ljm516/python-repo
a1071cbf4e4a021cc1ccd902edd4a60246ea4fcc
[ "Apache-2.0" ]
6
2020-03-24T16:48:36.000Z
2022-02-11T03:42:50.000Z
cobweb/scrapyLearn/mysqlpjt/mysqlpjt/pipelines.py
ljm516/python-repo
a1071cbf4e4a021cc1ccd902edd4a60246ea4fcc
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymysql class MysqlpjtPipeline(object): def __init__(self): # 与数据库建立连接 self.conn = pymysql.connect(host='localhost', user='root', passwd='root', db='cobweb') def process_item(self, item, spider): name = item['name'][0] key = item['keywd'][0] self.conn.autocommit(True) sql = "insert into mytb (title, keywd) VALUES ('" + name + "', '" + key + "')" print('sql: {sql}'.format(sql=sql)) self.conn.query(sql) return item def close_spider(self, spider): self.conn.connect()
26.482759
94
0.611979
99
768
4.676768
0.626263
0.069114
0
0
0
0
0
0
0
0
0
0.005085
0.231771
768
28
95
27.428571
0.779661
0.247396
0
0
0
0
0.155867
0
0
0
0
0
0
1
0.214286
false
0.071429
0.071429
0
0.428571
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
2
554c4083cd12a673c39d24cb3994a9098b754fc8
370
py
Python
chapter16/tests/unit_tests.py
PacktPublishing/Mastering-Azure-Machine-Learning-Second-Edition
1ca0cf19fd49f6781c589ae4d9bde56135791cf1
[ "MIT" ]
1
2022-03-07T20:15:08.000Z
2022-03-07T20:15:08.000Z
chapter16/tests/unit_tests.py
PacktPublishing/Mastering-Azure-Machine-Learning-Second-Edition
1ca0cf19fd49f6781c589ae4d9bde56135791cf1
[ "MIT" ]
null
null
null
chapter16/tests/unit_tests.py
PacktPublishing/Mastering-Azure-Machine-Learning-Second-Edition
1ca0cf19fd49f6781c589ae4d9bde56135791cf1
[ "MIT" ]
1
2022-03-22T17:57:41.000Z
2022-03-22T17:57:41.000Z
import unittest import pandas as pd class TestDataFrameStats(unittest.TestCase): def setUp(self): # initialize and load df self.df = pd.DataFrame(data={'data': [0,1,2,3]}) def test_min(self): self.assertGreaterEqual(self.df.min().values[0], 0) def test_max(self): self.assertLessEqual(self.df.max().values[0], 100)
26.428571
59
0.637838
51
370
4.588235
0.54902
0.076923
0
0
0
0
0
0
0
0
0
0.034722
0.221622
370
14
60
26.428571
0.777778
0.059459
0
0
0
0
0.011527
0
0
0
0
0
0.222222
1
0.333333
false
0
0.222222
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
555419e216f1f582d11a2be27e87821b30029b6b
32,565
py
Python
tests/pykafka/test_protocol.py
wikimedia/operations-debs-python-pykafka
d8380957a923e67a9d66c2e1e48312dd10c5271d
[ "Apache-2.0" ]
null
null
null
tests/pykafka/test_protocol.py
wikimedia/operations-debs-python-pykafka
d8380957a923e67a9d66c2e1e48312dd10c5271d
[ "Apache-2.0" ]
null
null
null
tests/pykafka/test_protocol.py
wikimedia/operations-debs-python-pykafka
d8380957a923e67a9d66c2e1e48312dd10c5271d
[ "Apache-2.0" ]
null
null
null
import operator import unittest2 from pykafka import protocol from pykafka.common import CompressionType from pykafka.utils.compat import buffer class TestMetadataAPI(unittest2.TestCase): maxDiff = None def test_request(self): req = protocol.MetadataRequest() msg = req.get_bytes() self.assertEqual( msg, bytearray( # header b'\x00\x00\x00\x15' # len(buffer) b'\x00\x03' # ApiKey b'\x00\x00' # api version b'\x00\x00\x00\x00' # correlation id b'\x00\x07' # len(client id) b'pykafka' # client id # end header b'\x00\x00\x00\x00' # len(topics) ) ) def test_response(self): cluster = protocol.MetadataResponse( buffer( b'\x00\x00\x00\x01' # len(brokers) b'\x00\x00\x00\x00' # node id b'\x00\x09' # len(host) b'localhost' # host b'\x00\x00#\x84' # port b'\x00\x00\x00\x01' # len(topic metadata) b'\x00\x00' # error code b'\x00\x04' # len(topic name) b'test' # topic name b'\x00\x00\x00\x02' # len(partition metadata) b'\x00\x00' # partition error code b'\x00\x00\x00\x00' # partition id b'\x00\x00\x00\x00' # leader b'\x00\x00\x00\x01' # len(replicas) b'\x00\x00\x00\x00' # replica b'\x00\x00\x00\x01' # len(isr) b'\x00\x00\x00\x00' # isr b'\x00\x00' # partition error code b'\x00\x00\x00\x01' # partition id b'\x00\x00\x00\x00' # leader b'\x00\x00\x00\x01' # len(replicas) b'\x00\x00\x00\x00' # replicas b'\x00\x00\x00\x01' # len(isr) b'\x00\x00\x00\x00' # isr ) ) self.assertEqual(cluster.brokers[0].host, b'localhost') self.assertEqual(cluster.brokers[0].port, 9092) self.assertEqual(cluster.topics[b'test'].partitions[0].leader, cluster.brokers[0].id) self.assertEqual(cluster.topics[b'test'].partitions[0].replicas, [cluster.brokers[0].id]) self.assertEqual(cluster.topics[b'test'].partitions[0].isr, [cluster.brokers[0].id]) def test_partition_error(self): # Response has a UnknownTopicOrPartition error for test/0 response = protocol.MetadataResponse( buffer( b'\x00\x00\x00\x01' # len(brokers) b'\x00\x00\x00\x00' # node is b'\x00\x09' # len(host) b'localhost' # host b'\x00\x00#\x84' # port b'\x00\x00\x00\x01' # len(topic metadata) b'\x00\x00' # error code b'\x00\x04' # len(topic name) b'test' # topic name b'\x00\x00\x00\x02' # len(partition metadata) b'\x00\x03' # partition error code b'\x00\x00\x00\x00' # partition id b'\x00\x00\x00\x00' # leader b'\x00\x00\x00\x01' # len(replicas) b'\x00\x00\x00\x00' # replica b'\x00\x00\x00\x01' # len(isr) b'\x00\x00\x00\x00' # isr b'\x00\x00' # partition error code b'\x00\x00\x00\x01' # partition id b'\x00\x00\x00\x00' # leader b'\x00\x00\x00\x01' # len(replicas) b'\x00\x00\x00\x00' # replica b'\x00\x00\x00\x01' # len(isr) b'\x00\x00\x00\x00' # isr ) ) self.assertEqual(response.topics[b'test'].partitions[0].err, 3) def test_topic_error(self): # Response has a UnknownTopicOrPartition error for test/0 response = protocol.MetadataResponse( buffer( b'\x00\x00\x00\x01' # len(brokers) b'\x00\x00\x00\x00' # node id b'\x00\x09' # len(host) b'localhost' # host b'\x00\x00#\x84' # port b'\x00\x00\x00\x01' # len(topic metadata) b'\x00\x03' # error code b'\x00\x04' # len(topic name) b'test' # topic name b'\x00\x00\x00\x02' # len(partition metadata) b'\x00\x00' # partition error code b'\x00\x00\x00\x00' # partition id b'\x00\x00\x00\x00' # leader b'\x00\x00\x00\x01' # len(replicas) b'\x00\x00\x00\x00' # replica b'\x00\x00\x00\x01' # len(isr) b'\x00\x00\x00\x00' # isr b'\x00\x00' # partition error code b'\x00\x00\x00\x01' # partition id b'\x00\x00\x00\x00' # leader b'\x00\x00\x00\x01' # len(replicas) b'\x00\x00\x00\x00' # replicas b'\x00\x00\x00\x01' # len(isr) b'\x00\x00\x00\x00' # isr ) ) self.assertEqual(response.topics[b'test'].err, 3) class TestProduceAPI(unittest2.TestCase): maxDiff = None test_messages = [ protocol.Message(b'this is a test message', partition_key=b'asdf'), protocol.Message(b'this is also a test message', partition_key=b'test_key'), protocol.Message(b"this doesn't have a partition key"), ] def test_request(self): message = self.test_messages[0] req = protocol.ProduceRequest() req.add_message(message, b'test', 0) msg = req.get_bytes() self.assertEqual( msg, bytearray( b"\x00\x00\x00a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07pykafka" # header b'\x00\x01' # required acks b'\x00\x00\'\x10' # timeout b'\x00\x00\x00\x01' # len(topics) b'\x00\x04' # len(topic name) b'test' # topic name b'\x00\x00\x00\x01' # len (partitions) b'\x00\x00\x00\x00' # partition b'\x00\x00\x004' # message set size b'\xff\xff\xff\xff\xff\xff\xff\xff' # offset b'\x00\x00\x00(' # message size b'\x0e\x8a\x19O' # crc b'\x00' # magic byte b'\x00' # attributes b'\x00\x00\x00\x04' # len(key) b'asdf' # key b'\x00\x00\x00\x16' # len(value) b"this is a test message" # value ) ) def test_gzip_compression(self): req = protocol.ProduceRequest(compression_type=CompressionType.GZIP) [req.add_message(m, b'test_gzip', 0) for m in self.test_messages] msg = req.get_bytes() self.assertEqual(len(msg), 207) # this isn't a good test def test_snappy_compression(self): req = protocol.ProduceRequest(compression_type=CompressionType.SNAPPY) [req.add_message(m, b'test_snappy', 0) for m in self.test_messages] msg = req.get_bytes() self.assertEqual(len(msg), 212) # this isn't a good test def test_partition_error(self): # Response has a UnknownTopicOrPartition error for test/0 response = protocol.ProduceResponse( buffer( b'\x00\x00\x00\x01' # len(topics) b'\x00\x04' # len(topic name) b'test' # topic name b'\x00\x00\x00\x01' # len(partitions) b'\x00\x00\x00\x00' # partition b'\x00\x03' # error code b'\x00\x00\x00\x00\x00\x00\x00\x02' # offset ) ) self.assertEqual(response.topics[b'test'][0].err, 3) def test_response(self): response = protocol.ProduceResponse( buffer( b'\x00\x00\x00\x01' # len(topics) b'\x00\x04' # len(topic name) b'test' # topic name b'\x00\x00\x00\x01' # len(partitions) b'\x00\x00\x00\x00' # partition b'\x00\x00' # error code b'\x00\x00\x00\x00\x00\x00\x00\x02' # offset ) ) self.assertEqual( response.topics, {b'test': {0: protocol.ProducePartitionResponse(0, 2)}} ) class TestFetchAPI(unittest2.TestCase): maxDiff = None expected_data = [ { 'partition_key': b'asdf', 'compression_type': 0, 'value': b'this is a test message', 'offset': 0, 'partition_id': 0, 'produce_attempt': 0, 'delivery_report_q': None, 'partition': None }, { 'partition_key': b'test_key', 'compression_type': 0, 'value': b'this is also a test message', 'offset': 1, 'partition_id': 0, 'produce_attempt': 0, 'delivery_report_q': None, 'partition': None }, { 'partition_key': None, 'compression_type': 0, 'value': b"this doesn't have a partition key", 'offset': 2, 'partition_id': 0, 'produce_attempt': 0, 'delivery_report_q': None, 'partition': None }] def msg_to_dict(self, msg): """Helper to extract data from Message slots""" attr_names = protocol.Message.__slots__ f = operator.attrgetter(*attr_names) return dict(zip(attr_names, f(msg))) def test_request(self): preq = protocol.PartitionFetchRequest(b'test', 0, 1) req = protocol.FetchRequest(partition_requests=[preq, ]) msg = req.get_bytes() self.assertEqual( msg, bytearray( b'\x00\x00\x00;\x00\x01\x00\x00\x00\x00\x00\x00\x00\x07pykafka' # header b'\xff\xff\xff\xff' # replica id b'\x00\x00\x03\xe8' # max wait time b'\x00\x00\x04\x00' # min bytes b'\x00\x00\x00\x01' # len(topics) b'\x00\x04' # len(topic name) b'test' # topic name b'\x00\x00\x00\x01' # len(partitions) b'\x00\x00\x00\x00' # partition b'\x00\x00\x00\x00\x00\x00\x00\x01' # fetch offset b'\x00\x10\x00\x00' # max bytes ) ) def test_partition_error(self): # Response has a UnknownTopicOrPartition error for test/0 response = protocol.FetchResponse( buffer( b'\x00\x00\x00\x01' # len(topics) b'\x00\x04' # len(topic name) b'test' # len(topic) b'\x00\x00\x00\x01' # len (partitions) b'\x00\x00\x00\x00' # partition id b'\x00\x03' # error code b'\x00\x00\x00\x00\x00\x00\x00\x02' # highwater mark offset b'\x00\x00\x00B' # message set size b'\x00\x00\x00\x00\x00\x00\x00\x01' # offset b'\x00\x00\x006' # message size b'\xa3 ^B' # crc b'\x00' # magic byte b'\x00' # attributes b'\x00\x00\x00\x12' # len(key) b'test_partition_key' # key b'\x00\x00\x00\x16' # len(value) b'this is a test message' # value ) ) self.assertEqual(response.topics[b'test'][0].err, 3) def test_response(self): resp = protocol.FetchResponse( buffer( b'\x00\x00\x00\x01' # len(topics) b'\x00\x04' # len(topic name) b'test' # len(topic) b'\x00\x00\x00\x01' # len (partitions) b'\x00\x00\x00\x00' # partition id b'\x00\x00' # error code b'\x00\x00\x00\x00\x00\x00\x00\x02' # highwater mark offset b'\x00\x00\x00B' # message set size b'\x00\x00\x00\x00\x00\x00\x00\x01' # offset b'\x00\x00\x006' # message size b'\xa3 ^B' # crc b'\x00' # magic byte b'\x00' # attributes b'\x00\x00\x00\x12' # len(key) b'test_partition_key' # key b'\x00\x00\x00\x16' # len(value) b'this is a test message' # value ) ) self.assertEqual(len(resp.topics[b'test'][0].messages), 1) self.assertEqual(resp.topics[b'test'][0].max_offset, 2) message = resp.topics[b'test'][0].messages[0] self.assertEqual(message.value, b'this is a test message') self.assertEqual(message.partition_key, b'test_partition_key') self.assertEqual(message.compression_type, 0) self.assertEqual(message.offset, 1) def test_gzip_decompression(self): msg = b''.join([ b'\x00\x00\x00\x01' # len(topics) b'\x00\t' # len(topic name) b'test_gzip' # topic name b'\x00\x00\x00\x01' # len(partitions) b'\x00\x00\x00\x00' # partition b'\x00\x00' # error code b'\x00\x00\x00\x00\x00\x00\x00\x03' # highwater mark offset b'\x00\x00\x00\x9b' # message set size b'\x00\x00\x00\x00\x00\x00\x00\x02' # offset b'\x00\x00\x00\x8f' # message size b'\xbb\xe7\x1f\xb8' # crc b'\x00' # magic byte b'\x01' # attributes b'\xff\xff\xff\xff' # len(key) b'\x00\x00\x00\x81' # len(value) b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x00c`\x80\x03\r\xbe.I\x7f0\x8b%\xb18%\rH\x8b\x95dd\x16+\x00Q\xa2BIjq\x89Bnjqqbz*T=#\x10\x1b\xb2\xf3\xcb\xf4\x81y\x1c \x15\xf1\xd9\xa9\x95@\xb64\\_Nq>v\xcdL@\xac\x7f\xb5(\xd9\x98\x81\xe1?\x10\x00y\x8a`M)\xf9\xa9\xc5y\xea%\n\x19\x89e\xa9@\x9d\x05\x89E%\x99%\x99\xf9y\n@\x93\x01N1\x9f[\xac\x00\x00\x00' # value ]) response = protocol.FetchResponse(msg) for i in range(len(self.expected_data)): self.assertDictEqual( self.msg_to_dict(response.topics[b'test_gzip'][0].messages[i]), self.expected_data[i]) def test_snappy_decompression(self): msg = b''.join([ b'\x00\x00\x00\x01' # len(topics) b'\x00\x0b' # len(topic name) b'test_snappy' # topic name b'\x00\x00\x00\x01' # len(partitions) b'\x00\x00\x00\x00' # partition b'\x00\x00' # error code b'\x00\x00\x00\x00\x00\x00\x00\x03' # highwater mark offset b'\x00\x00\x00\xb5' # message set size b'\x00\x00\x00\x00\x00\x00\x00\x02' # offset b'\x00\x00\x00\xa9' # message size b'\xc1\xf2\xa3\xe1' # crc b'\x00' # magic bytes b'\x02' # attributes b'\xff\xff\xff\xff' # len(key) b'\x00\x00\x00\x9b' # len(value) b'\x82SNAPPY\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x87\xac\x01\x00\x00\x19\x01\x10(\x0e\x8a\x19O\x05\x0fx\x04asdf\x00\x00\x00\x16this is a test message\x05$(\x00\x00\x01\x00\x00\x001\x07\x0f\x1c\x8e\x05\x10\x00\x08\x01"\x1c_key\x00\x00\x00\x1b\x158\x08lsoV=\x00H\x02\x00\x00\x00/\xd5rc3\x00\x00\xff\xff\xff\xff\x00\x00\x00!\x055ldoesn\'t have a partition key' # value ]) response = protocol.FetchResponse(msg) for i in range(len(self.expected_data)): self.assertDictEqual( self.msg_to_dict(response.topics[b'test_snappy'][0].messages[i]), self.expected_data[i]) class TestOffsetAPI(unittest2.TestCase): maxDiff = None def test_request(self): preq = protocol.PartitionOffsetRequest(b'test', 0, -1, 1) req = protocol.OffsetRequest(partition_requests=[preq, ]) msg = req.get_bytes() self.assertEqual( msg, bytearray( b'\x00\x00\x003\x00\x02\x00\x00\x00\x00\x00\x00\x00\x07pykafka' # header b'\xff\xff\xff\xff' # replica id b'\x00\x00\x00\x01' # len(topics) b'\x00\x04' # len(topic name) b'test' # topic name b'\x00\x00\x00\x01' # len(partitions) b'\x00\x00\x00\x00' # partition b'\xff\xff\xff\xff\xff\xff\xff\xff' # time b'\x00\x00\x00\x01' # max number of offsets ) ) def test_partition_error(self): # Response has a UnknownTopicOrPartition error for test/0 response = protocol.OffsetResponse( buffer( b'\x00\x00\x00\x01' # len(topics) b'\x00\x04' # len(topic name) b'test' # topic name b'\x00\x00\x00\x01' # len(partitions) b'\x00\x00\x00\x00' # partitoin b'\x00\x03' # error code b'\x00\x00\x00\x01' # len(offsets) b'\x00\x00\x00\x00\x00\x00\x00\x02' # offset ) ) self.assertEqual(response.topics[b'test'][0].err, 3) def test_response(self): resp = protocol.OffsetResponse( buffer( b'\x00\x00\x00\x01' # len(topics) b'\x00\x04' # len(topic name) b'test' # topic name b'\x00\x00\x00\x01' # len(partitions) b'\x00\x00\x00\x00' # partitoin b'\x00\x00' # error code b'\x00\x00\x00\x01' # len(offsets) b'\x00\x00\x00\x00\x00\x00\x00\x02' # offset ) ) self.assertEqual(resp.topics[b'test'][0].offset, [2]) class TestOffsetCommitFetchAPI(unittest2.TestCase): maxDiff = None def test_consumer_metadata_request(self): req = protocol.GroupCoordinatorRequest(b'test') msg = req.get_bytes() self.assertEqual( msg, bytearray( b'\x00\x00\x00\x17\x00\n\x00\x00\x00\x00\x00\x00\x00\x07pykafka' # header b'\x00\x04' # len(group id) b'test' # group id ) ) def test_consumer_metadata_response(self): response = protocol.GroupCoordinatorResponse( buffer( b'\x00\x00' # error code b'\x00\x00\x00\x00' # coordinator id b'\x00\r' # len(coordinator host) b'emmett-debian' # coordinator host b'\x00\x00#\x84' # coordinator port ) ) self.assertEqual(response.coordinator_id, 0) self.assertEqual(response.coordinator_host, b'emmett-debian') self.assertEqual(response.coordinator_port, 9092) def test_offset_commit_request(self): preq = protocol.PartitionOffsetCommitRequest( b'test', 0, 68, 1426632066, b'testmetadata') req = protocol.OffsetCommitRequest( b'test', 1, b'pykafka', partition_requests=[preq, ]) msg = req.get_bytes() self.assertEqual( msg, bytearray( b'\x00\x00\x00T\x00\x08\x00\x01\x00\x00\x00\x00\x00\x07pykafka' b'\x00\x04' # len(consumer group id) b'test' # consumer group id b'\x00\x00\x00\x01' # consumer group generation id b'\x00\x07' # len(consumer id) b'pykafka' # consumer id b'\x00\x00\x00\x01' # len(topics) b'\x00\x04' # len(topic name) b'test' # topic name b'\x00\x00\x00\x01' # len(partitions) b'\x00\x00\x00\x00' # partition b'\x00\x00\x00\x00\x00\x00\x00D' # offset b'\x00\x00\x00\x00U\x08\xad\x82' # timestamp b'\x00\x0c' # len(metadata) b'testmetadata' # metadata ) ) def test_offset_commit_response(self): response = protocol.OffsetCommitResponse( buffer( b'\x00\x00\x00\x01' # len(topics) b'\x00\x0c' # len(topic name) b'emmett.dummy' # topic name b'\x00\x00\x00\x01' # len(partitions) b'\x00\x00\x00\x00' # partition b'\x00\x00' # error code ) ) self.assertEqual(response.topics[b'emmett.dummy'][0].err, 0) def test_offset_fetch_request(self): preq = protocol.PartitionOffsetFetchRequest(b'testtopic', 0) req = protocol.OffsetFetchRequest(b'test', partition_requests=[preq, ]) msg = req.get_bytes() self.assertEqual( msg, bytearray( b'\x00\x00\x00.\x00\t\x00\x01\x00\x00\x00\x00\x00\x07pykafka' # header b'\x00\x04' # len(consumer group) b'test' # consumer group b'\x00\x00\x00\x01' # len(topics) b'\x00\t' # len(topic name) b'testtopic' # topic name b'\x00\x00\x00\x01' # len(partitions) b'\x00\x00\x00\x00' # partition ) ) def test_offset_fetch_response(self): response = protocol.OffsetFetchResponse( buffer( b'\x00\x00\x00\x01' # len(topics) b'\x00\x0c' # len(topic name) b'emmett.dummy' # topic name b'\x00\x00\x00\x01' # len(partitions) b'\x00\x00\x00\x00' # partition b'\x00\x00\x00\x00\x00\x00\x00\x01' # offset b'\x00\x00' # len(metadata) b'\x00\x00' # error code ) ) self.assertEqual(response.topics[b'emmett.dummy'][0].metadata, b'') self.assertEqual(response.topics[b'emmett.dummy'][0].offset, 1) class TestGroupMembershipAPI(unittest2.TestCase): maxDiff = None def test_consumer_group_protocol_metadata(self): meta = protocol.ConsumerGroupProtocolMetadata() msg = meta.get_bytes() self.assertEqual( msg, bytearray( b'\x00\x00\x00\x00' # version b'\x00\x01' # len(subscription) b'\x00\n' # len(topic name) b'dummytopic' # topic name b'\x00\x00\x00\x0c' # len(userdata) b'testuserdata') # userdata ) def test_join_group_request(self): req = protocol.JoinGroupRequest(b'dummygroup', member_id=b'testmember') msg = req.get_bytes() self.assertEqual( msg, bytearray( b'\x00\x00\x00|\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x07pykafka' # header b'\x00\n' # len(groupid) b'dummygroup' # groupid b'\x00\x00u0' # session timeout b'\x00\n' # len(memberid) b'testmember' # memberid b'\x00\x08' # len(protocol type) b'consumer' # protocol type b'\x00\x00\x00\x01' # len(group protocols) b'\x00\x19' # len(protocol name) b'pykafkaassignmentstrategy' # protocol name b'\x00\x00\x00"' # len(protocol metadata) b'\x00\x00\x00\x00\x00\x01\x00\ndummytopic\x00\x00\x00\x0ctestuserdata' # protocol metadata ) ) def test_join_group_response(self): response = protocol.JoinGroupResponse( bytearray( b'\x00\x00' # error code b'\x00\x00\x00\x01' # generation id b'\x00\x17' # len (group protocol) b'dummyassignmentstrategy' # group protocol b'\x00,' # len(leader id) b'pykafka-b2361322-674c-4e26-9194-305962636e57' # leader id b'\x00,' # len(member id) b'pykafka-b2361322-674c-4e26-9194-305962636e57' # member id b'\x00\x00\x00\x01' # leb(members) b'\x00,' # len(member id) b'pykafka-b2361322-674c-4e26-9194-305962636e57' # member id b'\x00\x00\x00"' # len(member metadata) b'\x00\x00\x00\x00\x00\x01\x00\ndummytopic\x00\x00\x00\x0ctestuserdata\x00\x00\x00\x00' # member metadata ) ) self.assertEqual(response.generation_id, 1) self.assertEqual(response.group_protocol, b'dummyassignmentstrategy') self.assertEqual(response.leader_id, b'pykafka-b2361322-674c-4e26-9194-305962636e57') self.assertEqual(response.member_id, b'pykafka-b2361322-674c-4e26-9194-305962636e57') self.assertEqual(response.members, {b'pykafka-b2361322-674c-4e26-9194-305962636e57': b'\x00\x00\x00\x00\x00\x01\x00\ndummytopic\x00\x00\x00\x0ctestuserdata'}) def test_member_assignment_construction(self): assignment = protocol.MemberAssignment([(b"mytopic1", [3, 5, 7, 9]), (b"mytopic2", [2, 4, 6, 8])]) msg = assignment.get_bytes() self.assertEqual( msg, bytearray( b'\x00\x01' # version b'\x00\x00\x00\x02' # len(partition assignment) b'\x00\x08' # len(topic) b'mytopic1' # topic b'\x00\x00\x00\x04' # len(partitions) b'\x00\x00\x00\x03' # partition b'\x00\x00\x00\x05' # partition b'\x00\x00\x00\x07' # partition b'\x00\x00\x00\t' # partition b'\x00\x08' # len(topic) b'mytopic2' # topic b'\x00\x00\x00\x04' # len(partitions) b'\x00\x00\x00\x02' # partition b'\x00\x00\x00\x04' # partition b'\x00\x00\x00\x06' # partition b'\x00\x00\x00\x08' # partition ) ) def test_sync_group_request(self): req = protocol.SyncGroupRequest( b'dummygroup', 1, b'testmember1', [ protocol.MemberAssignment([(b"mytopic1", [3, 5, 7, 9]), (b"mytopic2", [3, 5, 7, 9])], member_id=b"a"), protocol.MemberAssignment([(b"mytopic1", [2, 4, 6, 8]), (b"mytopic2", [2, 4, 6, 8])], member_id=b"b") ]) msg = req.get_bytes() self.assertEqual( msg, bytearray( b'\x00\x00\x00\xc4\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x07pykafka' # header b'\x00\n' # len(group id) b'dummygroup' # group id b'\x00\x00\x00\x01' # generation id b'\x00\x0b' # len(member id) b'testmember1' # member id b'\x00\x00\x00\x02' # len(group assignment) b'\x00\x01' # len(member id) b'a' # member id b'\x00\x00\x00B' # len(member assignment) b'\x00\x01\x00\x00\x00\x02\x00\x08mytopic1\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x05\x00\x00\x00\x07\x00\x00\x00\t\x00\x08mytopic2\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x05\x00\x00\x00\x07\x00\x00\x00\t' # member assignment b'\x00\x01' # len(member id) b'b' # member id b'\x00\x00\x00B' # len(member assignment) b'\x00\x01\x00\x00\x00\x02\x00\x08mytopic1\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x08mytopic2\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08' # member assignment ) ) def test_sync_group_response(self): response = protocol.SyncGroupResponse( bytearray( b'\x00\x00' # error code b'\x00\x00\x00H' # len(member assignment) b'\x00\x01\x00\x00\x00\x01\x00\x14testtopic_replicated\x00\x00\x00\n\x00\x00\x00\x06\x00\x00\x00\x07\x00\x00\x00\x08\x00\x00\x00\t\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x05,pyk' # member assignment ) ) self.assertEqual(response.error_code, 0) expected_assignment = [(b'testtopic_replicated', [6, 7, 8, 9, 0, 1, 2, 3, 4, 5])] self.assertEqual(response.member_assignment.partition_assignment, expected_assignment) def test_heartbeat_request(self): req = protocol.HeartbeatRequest(b'dummygroup', 1, b'testmember') msg = req.get_bytes() self.assertEqual( msg, bytearray( b'\x00\x00\x00-\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x07pykafka' # header b'\x00\n' # len(group id) b'dummygroup' # group id b'\x00\x00\x00\x01' # generation id b'\x00\n' # len(member id) b'testmember' # member id ) ) def test_heartbeat_response(self): response = protocol.HeartbeatResponse( bytearray( b'\x00\x00' # error code ) ) self.assertEqual(response.error_code, 0) def test_leave_group_request(self): req = protocol.LeaveGroupRequest(b'dummygroup', b'testmember') msg = req.get_bytes() self.assertEqual( msg, bytearray( b'\x00\x00\x00)\x00\r\x00\x00\x00\x00\x00\x00\x00\x07pykafka' # header b'\x00\n' # len(group id) b'dummygroup' # group id b'\x00\n' # len(member id) b'testmember' # member id ) ) def test_leave_group_response(self): response = protocol.LeaveGroupResponse( bytearray( b'\x00\x00' # error code ) ) self.assertEqual(response.error_code, 0) if __name__ == '__main__': unittest2.main()
44.917241
410
0.477691
3,712
32,565
4.140894
0.088631
0.24904
0.22074
0.104743
0.726368
0.676729
0.64921
0.617136
0.574458
0.559365
0
0.148214
0.398956
32,565
724
411
44.979282
0.637102
0.138216
0
0.608434
0
0.027108
0.269503
0.109944
0
0
0
0
0.075301
1
0.051205
false
0
0.00753
0
0.081325
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
5562848a302f8f16f948dbeb9d46bc69fdc98ff8
483
py
Python
examples/mouse/make_reports.py
jpkulasingham/Eelbrain
1061ce0b781a8e55ec187723b58491a5cde32e08
[ "BSD-3-Clause" ]
32
2015-04-05T15:38:55.000Z
2022-03-12T02:35:09.000Z
examples/mouse/make_reports.py
jpkulasingham/Eelbrain
1061ce0b781a8e55ec187723b58491a5cde32e08
[ "BSD-3-Clause" ]
22
2015-07-14T14:55:44.000Z
2022-03-15T17:46:11.000Z
examples/mouse/make_reports.py
jpkulasingham/Eelbrain
1061ce0b781a8e55ec187723b58491a5cde32e08
[ "BSD-3-Clause" ]
29
2015-04-23T13:51:09.000Z
2021-08-12T11:28:21.000Z
# skip test: data unavailable """Generate group level result resports for the Mouse experiment This script generates HTML reports based on the experiment defined in ``mouse.py``. """ from mouse import e e.set(epoch='target', inv='fixed-3-dSPM') e.make_report('surprise', mask='frontotemporal-lh', pmin=0.05, baseline=False, tstart=0.3, tstop=0.5, raw='1-40') e.make_report('surprise', mask='frontotemporal-lh', pmin=0.05, baseline=False, tstart=0.3, tstop=0.5, raw='fastica1-40')
37.153846
120
0.73499
80
483
4.4125
0.6375
0.028329
0.062323
0.107649
0.436261
0.436261
0.436261
0.436261
0.436261
0.436261
0
0.048499
0.10352
483
12
121
40.25
0.766744
0.362319
0
0
1
0
0.275748
0
0
0
0
0
0
1
0
true
0
0.25
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
5575bd4041f8e2638bc9fdd187ab3a9422fab620
818
py
Python
code/kg2/generate_umls2rdf_conf.py
meghasin/RTX
4e7b73263f60840d206ce759111214c28bfc999d
[ "MIT" ]
3
2021-11-09T19:41:40.000Z
2021-12-26T21:51:38.000Z
code/kg2/generate_umls2rdf_conf.py
meghasin/RTX
4e7b73263f60840d206ce759111214c28bfc999d
[ "MIT" ]
190
2021-05-22T01:25:49.000Z
2022-03-20T05:05:37.000Z
code/kg2/generate_umls2rdf_conf.py
meghasin/RTX
4e7b73263f60840d206ce759111214c28bfc999d
[ "MIT" ]
1
2021-05-26T22:51:26.000Z
2021-05-26T22:51:26.000Z
#!/usr/bin/env python3 import MySQLdb import getpass # Generate configuration for the "umls2rdf" software, by querying which data sources are # available in an installed UMLS mysql database; this script prints the configuration # information as CSV to stdout. password = getpass.getpass("Please enter the password for accessing a UMLS mysql database: ") conn = MySQLdb.connect(host='kg2dev.saramsey.org', user='ubuntu', passwd=password, db='umls', use_unicode=True) cursor = conn.cursor() cursor.execute("select distinct RSAB from MRSAB") # SAR: add where clause to specify language = ENG? for record in cursor.fetchall(): sab = record[0] print(sab + ',' + 'umls-' + sab.lower() + '.ttl' + ',' + 'load_on_codes')
37.181818
101
0.649144
101
818
5.227723
0.752475
0.034091
0.064394
0
0
0
0
0
0
0
0
0.006483
0.245721
818
21
102
38.952381
0.849271
0.331296
0
0
1
0
0.271218
0
0
0
0
0
0
1
0
false
0.230769
0.153846
0
0.153846
0.076923
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
557a8da7abc7823e3358cd28e1c0d3d091116a4d
1,017
py
Python
app/models/admin.py
redsnowc/odst
ae49802477d6bfccdde39e3445bfe55468ce9d5c
[ "MIT" ]
null
null
null
app/models/admin.py
redsnowc/odst
ae49802477d6bfccdde39e3445bfe55468ce9d5c
[ "MIT" ]
1
2021-06-02T00:03:50.000Z
2021-06-02T00:03:50.000Z
app/models/admin.py
redsnowc/odst
ae49802477d6bfccdde39e3445bfe55468ce9d5c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ :author: 秋荏苒 :copyright: © 2019 by 秋荏苒 <nuanyang.44@gmail.com>. :license: MIT, see LICENSE for more details. """ from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash from app.libs.extensions import db from app.models.base import Base class Admin(Base, UserMixin): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(20)) password_hash = db.Column(db.String(256)) blog_title = db.Column(db.String(60)) blog_sub_title = db.Column(db.String(100)) name = db.Column(db.String(30)) about = db.Column(db.Text) theme = db.Column(db.String(10), default='darkly') blog_index_image_url = db.Column(db.String(256)) blog_description = db.Column(db.String(200)) def set_password(self, password): self.password_hash = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.password_hash, password)
32.806452
73
0.709931
148
1,017
4.736486
0.472973
0.114123
0.142653
0.182596
0.125535
0.065621
0
0
0
0
0
0.031765
0.164208
1,017
30
74
33.9
0.791765
0.12881
0
0
0
0
0.006928
0
0
0
0
0
0
1
0.105263
false
0.315789
0.210526
0.052632
0.947368
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
557f2f3ff719147960ada82a33b5ec7f049814f7
4,931
py
Python
src/Army.py
gle8098/hundred-years-war
c74820337dd4ea6129c0b2aaa012be5666814e76
[ "WTFPL" ]
null
null
null
src/Army.py
gle8098/hundred-years-war
c74820337dd4ea6129c0b2aaa012be5666814e76
[ "WTFPL" ]
null
null
null
src/Army.py
gle8098/hundred-years-war
c74820337dd4ea6129c0b2aaa012be5666814e76
[ "WTFPL" ]
null
null
null
from src.ArmyEconomics import ArmyEconomics from src.Squad import Squad from src.ArmyEconomicsProxy import ArmyEconomicsProxy from src.CountriesConstants import CountryConstants from src.CombatElement import CombatElement from src.BattleEventsListener import BattleEventsListener from src import GameplayParameters class Army(CombatElement): _country: CountryConstants = None _squads = None _unit_factory = None _coins = GameplayParameters.INITIAL_MONEY_COUNT _is_battle_state = False _economics = None _subscribers = None def __init__(self, country, unit_factory): self._squads = [] self._country = country self._unit_factory = unit_factory self._economics = ArmyEconomicsProxy(self, ArmyEconomics(self)) self._subscribers = [] # Base Getters def get_country(self): return self._country def get_unit_factory(self): return self._unit_factory def get_coins_count(self): return self._coins def economics_development(self): return self._economics def is_in_battle(self): return self._is_battle_state def get_health(self): result = 0 for squad in self._squads: result += squad.get_health() return result # Coins Work def spend_coins(self, amount): if self._coins >= amount: self._coins -= amount return True return False def income(self, coins): self._coins += coins # Squad Related def add_squad(self, squad): self._squads.append(squad) def _get_alive_squards(self): result = [] for squad in self._squads: if squad.is_alive(): result.append(squad) return result def _find_most_squad(self, comparator) -> Squad: result = None for squad in self._get_alive_squards(): if result is None or comparator(squad.get_health(), result.get_health()): result = squad return result def find_least_healthy_squad(self) -> Squad: return self._find_most_squad(lambda x, y: x < y) def find_most_healthy_squad(self) -> Squad: return self._find_most_squad(lambda x, y: x > y) def get_random_squad(self): from random import randint arr = self._get_alive_squards() if len(arr) is 0: return None return arr[randint(0, len(arr) - 1)] def get_squads(self): return tuple(self._squads) # Event Bus Related def add_battle_events_subscriber(self, subscriber): if not isinstance(subscriber, BattleEventsListener): raise TypeError("Subscriber is not instance of BattleEventsListener") self._subscribers.append(subscriber) def remove_battle_events_subscriber(self, subscriber): if subscriber in self._subscribers: self._subscribers.remove(subscriber) return True return False def _send_event(self, event_sender): for subscriber in self._subscribers: event_sender(subscriber) ATTACK_EVENT_TYPES = {'reflect': 1, 'damage': 2, 'unit_killed': 3, 'squad_killed': 4} def on_attacked_result(self, attack_obj, event_type): def handler(listener): if event_type == Army.ATTACK_EVENT_TYPES['reflect']: listener.on_reflected_attack(attack_obj) elif event_type == Army.ATTACK_EVENT_TYPES['damage']: listener.on_unit_damaged(attack_obj) elif event_type == Army.ATTACK_EVENT_TYPES['unit_killed']: listener.on_unit_killed(attack_obj) elif event_type == Army.ATTACK_EVENT_TYPES['squad_killed']: listener.on_squad_killed(attack_obj) else: raise ValueError("Unknown event type") self._send_event(handler) # Battle Related def set_martial_law(self): self._is_battle_state = True self._send_event(lambda listener: listener.on_battle_begins()) def remove_martial_law(self): self._is_battle_state = False self._send_event(lambda listener: listener.on_battle_ends()) self.reset_battle_parameters() def take_strike(self, attack): if attack.attacking not in self._squads: raise ValueError("Attacked squad in not is this army") attack.attacking.take_strike(attack) return self.is_alive() def reset_battle_parameters(self): for squad in self._squads: squad.reset_battle_parameters() # To String def __str__(self): res = 'Army[coins={}]:\n'.format(self._coins) for squad in self._squads: res += ' ' + str(squad) + "\n" return res
31.608974
90
0.630704
565
4,931
5.210619
0.20708
0.027174
0.023777
0.023777
0.241508
0.167459
0.131793
0.110734
0.081522
0.038723
0
0.00229
0.291624
4,931
155
91
31.812903
0.840538
0.016224
0
0.095652
0
0
0.041587
0
0
0
0
0
0
1
0.226087
false
0
0.069565
0.069565
0.53913
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
55834af41ed7ec29168673a0b9eb32a326abe92b
89
py
Python
django_python3_ldap/__init__.py
akhoslabinary/ldap
bdaf747b20cc6f30aa78c58a753244434742d213
[ "BSD-3-Clause" ]
null
null
null
django_python3_ldap/__init__.py
akhoslabinary/ldap
bdaf747b20cc6f30aa78c58a753244434742d213
[ "BSD-3-Clause" ]
null
null
null
django_python3_ldap/__init__.py
akhoslabinary/ldap
bdaf747b20cc6f30aa78c58a753244434742d213
[ "BSD-3-Clause" ]
null
null
null
""" Django LDAP user authentication backend for Python 3. """ __version__ = (0, 11, 2)
12.714286
53
0.674157
12
89
4.666667
1
0
0
0
0
0
0
0
0
0
0
0.069444
0.191011
89
6
54
14.833333
0.708333
0.595506
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
55858905a4fd59663d2bf2214125da7846ecdc2d
3,193
py
Python
tests/test_compare.py
digitalkaoz/py_human_name_compare
d1d53c81f3c109500a0036deb69e3f7f70f32dd8
[ "MIT" ]
null
null
null
tests/test_compare.py
digitalkaoz/py_human_name_compare
d1d53c81f3c109500a0036deb69e3f7f70f32dd8
[ "MIT" ]
null
null
null
tests/test_compare.py
digitalkaoz/py_human_name_compare
d1d53c81f3c109500a0036deb69e3f7f70f32dd8
[ "MIT" ]
null
null
null
import unittest from human_name_compare import match_name name_sets = [ "Robert Schönthal", "Robert R. Schönthal", "Robert R Schönthal", "Robert R Peter Schönthal", "R. Schönthal", "R Schönthal", "Robert Robert Schönthal" ] no_sets = [ "Robert Müller", "Peter Schönthal", ] class CompareTestCase(unittest.TestCase): def test_names(self): self.assertTrue(match_name('Dr. med. Claudius Gall, Bac. phil. MBA', 'Claudius Gall')) self.assertFalse(match_name('Martin Auer', 'Martin Bauer')) self.assertTrue(match_name('Martin Drewes', 'Martin J Drewes')) self.assertTrue(match_name('Dagmar Fuhrer', 'Dagmar Führer-Sakel')) self.assertTrue(match_name('Ch Grohé', 'Christian Grohé')) self.assertTrue(match_name('Young Jun Kim', 'Youngjun Kim')) self.assertTrue(match_name('T Majic', 'Tomislav Maji')) self.assertTrue(match_name('Susan Kralisch', 'Susann Kralisch')) self.assertTrue(match_name('Sara Kammerer', 'Sarah Kammerer')) self.assertFalse(match_name('Bernhard König','Burkhard König')) self.assertFalse(match_name('Uwe Töpfer', 'Udo Töpfer')) self.assertFalse(match_name('Maria-Christina Jung', 'Carin Jung')) self.assertTrue(match_name('Darius Schlemmer', 'Dariusz Schlemmer')) self.assertTrue(match_name('Horst von Schlemmer', 'Horst Schlemmer')) self.assertTrue(match_name('Horst-Eugen Schlemmer', 'HORST SCHLEMMER')) self.assertTrue(match_name('Horst-Eugen Schlemmer', 'Horst Eugen Schlemmer')) self.assertTrue(match_name('Horst E. Schlemmer', 'Horst Eugen Schlemmer')) self.assertFalse(match_name('Horst E. Schlemmer', 'Horst Klaus Schlemmer')) self.assertTrue(match_name('Jörn Müller', 'Joern Mueller')) self.assertTrue(match_name('Andrè Müller', 'Andre Mueller')) self.assertTrue(match_name('H.-Eberhard Börngen', 'Horst E Börngen')) self.assertFalse(match_name('Lutz König', 'Lars König')) self.assertFalse(match_name('Kerstin König', 'Kristin König')) for n in name_sets: for on in name_sets: self.assertTrue(match_name(n, on), "{} matches on {}".format(n, on)) def test_names_no_matches(self): for n in name_sets: for on in no_sets: self.assertFalse(match_name(n, on), "{} must not match {}".format(n, on)) def test_split_medic_names(self): name = "Victor-Felix Hugo Mauthner" expected = ['%Mauthner', 'Victor% Mauthner', 'V% Mauthner', 'Victor% Felix% Mauthner', 'Victor% F% Mauthner', 'V% F% Mauthner', 'Victor% Felix% Hugo% Mauthner', 'Victor% Felix% H% Mauthner', 'Victor% F% Hugo% Mauthner', 'Victor% F% H% Mauthner', 'V% F% Hugo% Mauthner', 'V% F% H% Mauthner' ] for n in expected: self.assertTrue(match_name(name, n.replace('%', '')), n) if __name__ == '__main__': unittest.main()
40.417722
94
0.606013
363
3,193
5.192837
0.292011
0.128912
0.181432
0.219629
0.285942
0.177188
0.089125
0.089125
0.066844
0.066844
0
0
0.261823
3,193
78
95
40.935897
0.799745
0
0
0.030303
0
0
0.357344
0
0
0
0
0
0.393939
1
0.045455
false
0
0.030303
0
0.090909
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
55891ba9a79529570cd750d4cc9fdbd7ca107075
6,588
py
Python
myagents.py
obedotto/aiagent
f882ad524cf17fa1d88adf5ed27e538977be3a07
[ "BSD-3-Clause" ]
null
null
null
myagents.py
obedotto/aiagent
f882ad524cf17fa1d88adf5ed27e538977be3a07
[ "BSD-3-Clause" ]
null
null
null
myagents.py
obedotto/aiagent
f882ad524cf17fa1d88adf5ed27e538977be3a07
[ "BSD-3-Clause" ]
null
null
null
import random class Thing: """ This represents any physical object that can appear in an Environment. """ def is_alive(self): """Things that are 'alive' should return true.""" return hasattr(self, 'alive') and self.alive def show_state(self): """Display the agent's internal state. Subclasses should override.""" print("I don't know how to show_state.") class Agent(Thing): """ An Agent is a subclass of Thing """ def __init__(self, program=None): self.alive = True self.performance = 0 self.program = program def can_grab(self, thing): """Return True if this agent can grab this thing. Override for appropriate subclasses of Agent and Thing.""" return False def TableDrivenAgentProgram(table): """ [Figure 2.7] This agent selects an action based on the percept sequence. It is practical only for tiny domains. To customize it, provide as table a dictionary of all {percept_sequence:action} pairs. """ percepts = [] def program(percept): action =None """ Write your code here """ return action return program loc_A, loc_B = (0, 0), (1, 0) # The two locations for the Vacuum world def TableDrivenVacuumAgent(): """ Tabular approach towards vacuum world """ table = {((loc_A, 'Clean'),): 'Right', ((loc_A, 'Dirty'),): 'Suck', ((loc_B, 'Clean'),): 'Left', ((loc_B, 'Dirty'),): 'Suck', ((loc_A, 'Dirty'), (loc_A, 'Clean')): 'Right', ((loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck', ((loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck', ((loc_B, 'Dirty'), (loc_B, 'Clean')): 'Left', ((loc_A, 'Dirty'), (loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck', ((loc_B, 'Dirty'), (loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck'} return Agent(TableDrivenAgentProgram(table)) class Environment: """Abstract class representing an Environment. 'Real' Environment classes inherit from this. Your Environment will typically need to implement: percept: Define the percept that an agent sees. execute_action: Define the effects of executing an action. Also update the agent.performance slot. The environment keeps a list of .things and .agents (which is a subset of .things). Each agent has a .performance slot, initialized to 0. Each thing has a .location slot, even though some environments may not need this.""" def __init__(self): self.things = [] self.agents = [] def percept(self, agent): """Return the percept that the agent sees at this point. (Implement this.)""" raise NotImplementedError def execute_action(self, agent, action): """Change the world to reflect this action. (Implement this.)""" raise NotImplementedError def default_location(self, thing): """Default location to place a new thing with unspecified location.""" return None def is_done(self): """By default, we're done when we can't find a live agent.""" return not any(agent.is_alive() for agent in self.agents) def step(self): """Run the environment for one time step. If the actions and exogenous changes are independent, this method will do. If there are interactions between them, you'll need to override this method.""" if not self.is_done(): actions = [] for agent in self.agents: if agent.alive: actions.append(agent.program(self.percept(agent))) else: actions.append("") for (agent, action) in zip(self.agents, actions): self.execute_action(agent, action) def run(self, steps=1000): """Run the Environment for given number of time steps.""" for step in range(steps): if self.is_done(): return self.step() def add_thing(self, thing, location=None): """Add a thing to the environment, setting its location. For convenience, if thing is an agent program we make a new agent for it. (Shouldn't need to override this.)""" if not isinstance(thing, Thing): thing = Agent(thing) if thing in self.things: print("Can't add the same thing twice") else: thing.location = location if location is not None else self.default_location(thing) self.things.append(thing) if isinstance(thing, Agent): thing.performance = 0 self.agents.append(thing) def delete_thing(self, thing): """Remove a thing from the environment.""" try: self.things.remove(thing) except ValueError as e: print(e) print(" in Environment delete_thing") print(" Thing to be removed: {} at {}".format(thing, thing.location)) print(" from list: {}".format([(thing, thing.location) for thing in self.things])) if thing in self.agents: self.agents.remove(thing) class TrivialVacuumEnvironment(Environment): """This environment has two locations, A and B. Each can be Dirty or Clean. The agent perceives its location and the location's status. This serves as an example of how to implement a simple Environment.""" def __init__(self): super().__init__() self.status = {loc_A: random.choice(['Clean', 'Dirty']), loc_B: random.choice(['Clean', 'Dirty'])} def thing_classes(self): return [ TableDrivenVacuumAgent] def percept(self, agent): """Returns the agent's location, and the location status (Dirty/Clean).""" return agent.location, self.status[agent.location] def execute_action(self, agent, action): """Change agent's location and/or location's status; track performance. Score 10 for each dirt cleaned; -1 for each move.""" """ Write your code here """ def default_location(self, thing): """Agents start in either location at random.""" return random.choice([loc_A, loc_B]) if __name__ == "__main__": agent = TableDrivenVacuumAgent() environment = TrivialVacuumEnvironment() environment.add_thing(agent) print(environment.status) environment.run() print(agent.performance)
34.673684
95
0.600638
811
6,588
4.794081
0.260173
0.012346
0.011574
0.013374
0.114712
0.064815
0.048611
0.02392
0.013374
0
0
0.003417
0.289162
6,588
189
96
34.857143
0.82682
0.344414
0
0.121212
0
0
0.075895
0
0
0
0
0.010582
0
1
0.212121
false
0
0.010101
0.010101
0.373737
0.080808
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
0
0
0
0
2
559b470fd09bf54c6a5d3e70ac07bb9e3353dda1
392
py
Python
Python Projects/django_login_reg/log_reg_app/migrations/0002_user_password.py
that-other-alexa/bootcamp-projects
68c1fc700c879b2742f33241998e815fe54dea99
[ "MIT" ]
null
null
null
Python Projects/django_login_reg/log_reg_app/migrations/0002_user_password.py
that-other-alexa/bootcamp-projects
68c1fc700c879b2742f33241998e815fe54dea99
[ "MIT" ]
null
null
null
Python Projects/django_login_reg/log_reg_app/migrations/0002_user_password.py
that-other-alexa/bootcamp-projects
68c1fc700c879b2742f33241998e815fe54dea99
[ "MIT" ]
null
null
null
# Generated by Django 3.1.1 on 2020-09-13 05:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('log_reg_app', '0001_initial'), ] operations = [ migrations.AddField( model_name='user', name='password', field=models.CharField(default=None, max_length=255), ), ]
20.631579
65
0.596939
44
392
5.204545
0.840909
0
0
0
0
0
0
0
0
0
0
0.078571
0.285714
392
18
66
21.777778
0.739286
0.114796
0
0
1
0
0.101449
0
0
0
0
0
0
1
0
false
0.083333
0.083333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
55adac48d4d4e534437c38ca12f50657368f2137
400
py
Python
c05/p158.py
HiAwesome/python-algorithm
eda61b52b4b25940caf58a40641122772b8c9203
[ "Apache-2.0" ]
1
2020-11-05T08:55:30.000Z
2020-11-05T08:55:30.000Z
c05/p158.py
HiAwesome/python-algorithm
eda61b52b4b25940caf58a40641122772b8c9203
[ "Apache-2.0" ]
null
null
null
c05/p158.py
HiAwesome/python-algorithm
eda61b52b4b25940caf58a40641122772b8c9203
[ "Apache-2.0" ]
null
null
null
def sequentialSearch(alist, item): pos = 0 found = False while pos < len(alist) and not found: if alist[pos] == item: found = True else: pos += 1 return found if __name__ == '__main__': test_list = [1, 2, 32, 8, 17, 19, 42, 13, 0] print(sequentialSearch(test_list, 3)) print(sequentialSearch(test_list, 13)) """ False True """
17.391304
48
0.56
52
400
4.096154
0.576923
0.112676
0.234742
0.2723
0
0
0
0
0
0
0
0.069091
0.3125
400
22
49
18.181818
0.705455
0
0
0
0
0
0.020942
0
0
0
0
0
0
1
0.076923
false
0
0
0
0.153846
0.153846
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e9521d67a905dc22c3b2e0748a513a9a3afc403a
912
py
Python
md2html/__version__.py
g0man/md2html
c7f021298556e60984497464c1f523ac2443e868
[ "MIT" ]
1
2018-08-03T01:25:38.000Z
2018-08-03T01:25:38.000Z
md2html/__version__.py
g0man/md2html
c7f021298556e60984497464c1f523ac2443e868
[ "MIT" ]
null
null
null
md2html/__version__.py
g0man/md2html
c7f021298556e60984497464c1f523ac2443e868
[ "MIT" ]
null
null
null
# version_info should conform to PEP 386 # (major, minor, micro, alpha/beta/rc/final, #) # (1, 1, 2, 'alpha', 0) => "1.1.2.dev" # (1, 2, 0, 'beta', 2) => "1.2b2" __version_info__ = (0, 1, 0, 'alpha', 0) def _get_version(): # pragma: no cover " Returns a PEP 386-compliant version number from version_info. " assert len(__version_info__) == 5 assert __version_info__[3] in ('alpha', 'beta', 'rc', 'final') parts = 2 if __version_info__[2] == 0 else 3 main = '.'.join(map(str, __version_info__[:parts])) sub = '' if __version_info__[3] == 'alpha' and __version_info__[4] == 0: # TODO: maybe append some sort of git info here?? sub = '.dev' elif __version_info__[3] != 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'} sub = mapping[__version_info__[3]] + str(__version_info__[4]) return str(main + sub) __version__ = _get_version()
35.076923
69
0.605263
133
912
3.699248
0.428571
0.268293
0.097561
0.065041
0
0
0
0
0
0
0
0.05042
0.217105
912
26
70
35.076923
0.638655
0.308114
0
0
0
0
0.164244
0
0
0
0
0.038462
0.133333
1
0.066667
false
0
0
0
0.133333
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
2
e954e12e98a793a8f412d1373ab4543b2c8f2aca
237
py
Python
lib/castiron/options.py
wijjo/castiron
6cb8b542f887546e3569ca5292b721a7b42e2e3e
[ "Apache-2.0" ]
null
null
null
lib/castiron/options.py
wijjo/castiron
6cb8b542f887546e3569ca5292b721a7b42e2e3e
[ "Apache-2.0" ]
null
null
null
lib/castiron/options.py
wijjo/castiron
6cb8b542f887546e3569ca5292b721a7b42e2e3e
[ "Apache-2.0" ]
null
null
null
class Options(object): def __init__(self, dry_run=False, unoptimized=False, verbose=False, debug=False): self.dry_run = dry_run self.unoptimized = unoptimized self.verbose = verbose self.debug = debug
33.857143
85
0.670886
29
237
5.241379
0.413793
0.118421
0.131579
0
0
0
0
0
0
0
0
0
0.236287
237
6
86
39.5
0.839779
0
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e96525f4438647fcdf889ae8212d6957df7f141f
240
py
Python
PhysicsTools/PatAlgos/python/recoLayer0/duplicatedElectrons_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
PhysicsTools/PatAlgos/python/recoLayer0/duplicatedElectrons_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
PhysicsTools/PatAlgos/python/recoLayer0/duplicatedElectrons_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms # Remove duplicates from the electron list electronsNoDuplicates = cms.EDFilter("DuplicatedElectronCleaner", ## reco electron input source electronSource = cms.InputTag("gsfElectrons"), )
26.666667
65
0.779167
24
240
7.791667
0.875
0
0
0
0
0
0
0
0
0
0
0
0.141667
240
8
66
30
0.907767
0.279167
0
0
0
0
0.218935
0.147929
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e96b52ade17ce44056a0244362f9b82c48a240f4
188
py
Python
CS1410/Code/mode.py
Davidjbennett/DavidBennett.github.io
09a2652b7ace8741bf23c6432abd58ee790b9f0c
[ "MIT" ]
3
2021-05-18T16:17:29.000Z
2022-01-20T15:46:59.000Z
CS1410/Code/mode.py
Davidjbennett/DavidBennett
09a2652b7ace8741bf23c6432abd58ee790b9f0c
[ "MIT" ]
null
null
null
CS1410/Code/mode.py
Davidjbennett/DavidBennett
09a2652b7ace8741bf23c6432abd58ee790b9f0c
[ "MIT" ]
null
null
null
words = [word.upper() for word in open('gettysburg.txt').read().split()] theDictionary = {} for word in words: theDictionary[word] = theDictionary.get(word,0) + 1 print(theDictionary)
31.333333
72
0.707447
25
188
5.32
0.6
0.105263
0.135338
0
0
0
0
0
0
0
0
0.012195
0.12766
188
5
73
37.6
0.79878
0
0
0
0
0
0.074468
0
0
0
0
0
0
1
0
false
0
0
0
0
0.2
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e9866a497ac17b3733cabd6268dbaa332b40695b
3,456
py
Python
support/sendmail.py
plsang/tvmed-framework
212f938941550ae3d4d8f48e9c88008ff6203a0a
[ "Unlicense" ]
null
null
null
support/sendmail.py
plsang/tvmed-framework
212f938941550ae3d4d8f48e9c88008ff6203a0a
[ "Unlicense" ]
null
null
null
support/sendmail.py
plsang/tvmed-framework
212f938941550ae3d4d8f48e9c88008ff6203a0a
[ "Unlicense" ]
null
null
null
import smtplib import os import sys from email.MIMEMultipart import MIMEMultipart from email.MIMEBase import MIMEBase from email.MIMEText import MIMEText from email.Utils import COMMASPACE, formatdate from email import Encoders from optparse import OptionParser parser = OptionParser() parser.add_option( "-x", "--server", dest = "server", default = "smtp.gmail.com", help = "specify SMTP server URL", metavar = "URL") parser.add_option( "-v", "--verbose", dest = "verb", action = "count", help = "increase verbosity level") parser.add_option( "-r", "--port", dest = "port", default = 587, type = "int", help = "specify SMTP server port", metavar = "PORT") parser.add_option( "-p", "--password", dest = "password", default = "", help = "specify SMTP password", metavar = "PASSWD") parser.add_option( "-u", "--username", dest = "username", default = "", help = "specify SMTP user name", metavar = "NAME") parser.add_option( "-f", "--from", dest = "send_from", default = "", help = "specify the FROM field") parser.add_option( "-t", "--to", dest = "send_to", default = "", help = "specify the TO field") parser.add_option( "-b", "--body", dest = "body", default = "", help = "specify the body", metavar = "TXT") parser.add_option( "-s", "--subject", dest = "subject", default = "", help = "specify the subject") # -------------------------------------------------------------------- def make_msg(send_from, send_to, subject, body, files): # -------------------------------------------------------------------- assert type(send_to)==list assert type(files)==list msg = MIMEMultipart() msg['From'] = send_from msg['To'] = COMMASPACE.join(send_to) msg['Date'] = formatdate(localtime=True) msg['Subject'] = subject msg.attach(MIMEText(body)) for f in files: part = MIMEBase('application', "octet-stream") part.set_payload(open(f,"rb").read()) Encoders.encode_base64(part) part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f)) msg.attach(part) if options.verb: print msg return msg # -------------------------------------------------------------------- def send_tsl(msg, server, port, username, password): # -------------------------------------------------------------------- smtp = smtplib.SMTP(server, port) smtp.ehlo() smtp.starttls() smtp.ehlo() smtp.login(username, password) smtp.sendmail(msg['From'], msg['To'], msg.as_string()) smtp.close() # -------------------------------------------------------------------- if __name__ == "__main__": # -------------------------------------------------------------------- (options, args) = parser.parse_args() msg = make_msg(options.send_from, [options.send_to], options.subject, options.body, args) send_tsl(msg, options.server, options.port, options.username, options.password) ; sys.exit(0)
24.863309
74
0.478299
324
3,456
4.993827
0.333333
0.050062
0.083436
0.051916
0
0
0
0
0
0
0
0.002414
0.280671
3,456
138
75
25.043478
0.648431
0.119502
0
0.155963
0
0
0.156785
0
0
0
0
0
0.018349
0
null
null
0.06422
0.082569
null
null
0.009174
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
2
e98780c12fbbeccfab047f43580583cd576c0c6f
305
py
Python
tests/test_main.py
postrational/osmtilecalc
a744696e05c885c935d410522244ad5409dd104c
[ "MIT" ]
null
null
null
tests/test_main.py
postrational/osmtilecalc
a744696e05c885c935d410522244ad5409dd104c
[ "MIT" ]
null
null
null
tests/test_main.py
postrational/osmtilecalc
a744696e05c885c935d410522244ad5409dd104c
[ "MIT" ]
null
null
null
from tile_calculator.main import parse_args def test_main(): parser = parse_args(["54", "18", "1", "10"]) assert parser.latitude == 54 assert parser.longitude == 18 assert parser.radius == 1 assert parser.zoom_level == 10 assert not parser.miles assert not parser.total_only
25.416667
48
0.678689
43
305
4.674419
0.55814
0.238806
0.149254
0
0
0
0
0
0
0
0
0.058091
0.209836
305
11
49
27.727273
0.775934
0
0
0
0
0
0.022951
0
0
0
0
0
0.666667
1
0.111111
false
0
0.111111
0
0.222222
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
2
e98dae30aa9dadfccd3eb1fd79e4172c895611a1
214
py
Python
gunicorn.conf.py
barry-ran/werobot
6836d4705a16a0e5aef4ffacebf6a5c8aa66d7fd
[ "MIT" ]
1
2021-10-20T07:46:57.000Z
2021-10-20T07:46:57.000Z
gunicorn.conf.py
barry-ran/werobot
6836d4705a16a0e5aef4ffacebf6a5c8aa66d7fd
[ "MIT" ]
null
null
null
gunicorn.conf.py
barry-ran/werobot
6836d4705a16a0e5aef4ffacebf6a5c8aa66d7fd
[ "MIT" ]
4
2021-08-10T01:13:10.000Z
2021-12-22T03:41:21.000Z
import multiprocessing debug = False bind = "0.0.0.0:5000" pidfile = "gunicorn.pid" workers = multiprocessing.cpu_count()*2 + 1 worker_class = "gevent" # daemon=True 在docker中不需要daemon运行,反而会导致看不到gunicorn输出而增加排查问题的难度
30.571429
62
0.78972
26
214
6.423077
0.846154
0.035928
0.035928
0
0
0
0
0
0
0
0
0.051813
0.098131
214
7
62
30.571429
0.813472
0.280374
0
0
0
0
0.196078
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e9972e902c7d4ad49dfe5773af7119925b6c7968
2,891
py
Python
ratatosk/lib/variation/tabix.py
percyfal/ratatosk
71ee4741952219c1fccf9cb6c172a354610d499c
[ "Apache-2.0" ]
6
2015-07-13T16:58:27.000Z
2016-09-01T11:56:30.000Z
ratatosk/lib/variation/tabix.py
percyfal/ratatosk
71ee4741952219c1fccf9cb6c172a354610d499c
[ "Apache-2.0" ]
null
null
null
ratatosk/lib/variation/tabix.py
percyfal/ratatosk
71ee4741952219c1fccf9cb6c172a354610d499c
[ "Apache-2.0" ]
4
2015-01-14T06:49:34.000Z
2019-05-11T23:43:50.000Z
# Copyright (c) 2013 Per Unneberg # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """ Provide wrappers for `tabix <http://sourceforge.net/projects/samtools/files/tabix/>`_ Classes ------- """ import os import luigi import ratatosk.lib.files.input from ratatosk.job import JobTask, JobWrapperTask from ratatosk.jobrunner import DefaultShellJobRunner from ratatosk.utils import rreplace, fullclassname from ratatosk.log import get_logger logger = get_logger() class InputVcfFile(ratatosk.lib.files.input.InputVcfFile): pass class TabixJobRunner(DefaultShellJobRunner): pass class TabixJobTask(JobTask): executable = "" def job_runner(self): return TabixJobRunner() def exe(self): return self.sub_executable def main(self): return None class Bgzip(TabixJobTask): sub_executable = luigi.Parameter(default="bgzip") parent_task = luigi.Parameter(default="ratatosk.lib.variation.tabix.InputVcfFile") suffix = luigi.Parameter(default=".vcf.gz") options = luigi.Parameter(default=("-f",)) def args(self): return [self.input()[0]] # Since this is such a common operation, add the task here class BgUnzip(TabixJobTask): sub_executable = luigi.Parameter(default="bgzip") parent_task = luigi.Parameter(default="ratatosk.lib.variation.tabix.Bgzip") suffix = luigi.Parameter(default=".vcf") def opts(self): retval = list(self.options) if not "-d" in retval: retval += ["-d"] return retval def args(self): return [self.input()[0]] class Tabix(TabixJobTask): sub_executable = luigi.Parameter(default="tabix") parent_task = luigi.Parameter(default="ratatosk.lib.variation.tabix.Bgzip") suffix = luigi.Parameter(default=".vcf.gz.tbi") def args(self): return [self.input()[0]] class IndexedBgzip(JobWrapperTask): suffix = luigi.Parameter(default=(".vcf.gz", ".vcf.gz.tbi"), is_list=True) parent_task = luigi.Parameter(default="ratatosk.lib.variation.tabix.Bgzip") def requires(self): zipcls = ratatosk.lib.variation.tabix.Bgzip indexcls = ratatosk.lib.variation.tabix.Tabix return [zipcls(target=self.source()[0]), indexcls(target=rreplace(self.source()[0], zipcls().sfx(), indexcls().sfx(), 1), parent_task=fullclassname(zipcls))]
30.755319
103
0.695607
361
2,891
5.531856
0.385042
0.084126
0.126189
0.075113
0.317476
0.302454
0.24637
0.232849
0.200801
0.200801
0
0.005955
0.186787
2,891
93
104
31.086022
0.843471
0.245936
0
0.245283
0
0
0.094532
0.066265
0
0
0
0
0
1
0.150943
false
0.037736
0.132075
0.113208
0.811321
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
2
e9a688faf71a2e8bd61f4efc91959e21628d1db5
841
py
Python
server/forestgame/test_colour.py
Nick-Pearson/forestgame
8a37225adbe6da9df7851eba34ad06806da0ce48
[ "0BSD" ]
null
null
null
server/forestgame/test_colour.py
Nick-Pearson/forestgame
8a37225adbe6da9df7851eba34ad06806da0ce48
[ "0BSD" ]
5
2021-03-10T14:18:45.000Z
2022-03-12T00:28:29.000Z
server/forestgame/test_colour.py
Nick-Pearson/forestgame
8a37225adbe6da9df7851eba34ad06806da0ce48
[ "0BSD" ]
null
null
null
import unittest from forestgame.colour import colour_to_hex class ToHexTest(unittest.TestCase): def test_convert_solid_black(self): hex_code = colour_to_hex((0, 0, 0)) self.assertEqual(hex_code, "#000000") def test_convert_solid_whex_codeite(self): hex_code = colour_to_hex((255, 255, 255)) self.assertEqual(hex_code, "#FFFFFF") def test_convert_solid_red(self): hex_code = colour_to_hex((255, 0, 0)) self.assertEqual(hex_code, "#FF0000") def test_convert_solid_green(self): hex_code = colour_to_hex((0, 255, 0)) self.assertEqual(hex_code, "#00FF00") def test_convert_solid_blue(self): hex_code = colour_to_hex((0, 0, 255)) self.assertEqual(hex_code, "#0000FF") def test_convert_low_values(self): hex_code = colour_to_hex((15, 15, 15)) self.assertEqual(hex_code, "#0F0F0F")
29
45
0.719382
128
841
4.375
0.265625
0.15
0.1375
0.182143
0.469643
0.339286
0.216071
0.085714
0
0
0
0.075843
0.153389
841
28
46
30.035714
0.710674
0
0
0
0
0
0.049941
0
0
0
0
0
0.285714
1
0.285714
false
0
0.095238
0
0.428571
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
2
e9b1ada3f713ed4dd315cbbcbd80498751208753
606
py
Python
netbox_rbac/macros.py
shakefu/netbox-rbac
ed7acbf00ae0a289d417465addfb723f8dca8cec
[ "MIT" ]
5
2020-01-07T08:22:07.000Z
2020-06-02T16:00:06.000Z
netbox_rbac/macros.py
shakefu/netbox-rbac
ed7acbf00ae0a289d417465addfb723f8dca8cec
[ "MIT" ]
5
2020-05-05T17:08:01.000Z
2021-10-29T21:13:46.000Z
netbox_rbac/macros.py
shakefu/netbox-rbac
ed7acbf00ae0a289d417465addfb723f8dca8cec
[ "MIT" ]
2
2020-05-08T00:39:12.000Z
2020-11-19T21:53:46.000Z
from wcmatch.fnmatch import fnmatch def get(obj, path): for attr in path.split("."): if not hasattr(obj, attr): return None obj = getattr(obj, attr) return obj def match(obj, *args): *paths, values = args return _walk_match(obj, paths, values, False) def match_or_none(obj, *args): *paths, values = args return _walk_match(obj, paths, values, True) def _walk_match(obj, paths, values, default): for path in paths: value = get(obj, path) if value is not None: return fnmatch(value, values) return default
17.823529
49
0.615512
83
606
4.39759
0.349398
0.150685
0.09863
0.139726
0.342466
0.279452
0.279452
0.279452
0.279452
0.279452
0
0
0.283828
606
33
50
18.363636
0.841014
0
0
0.105263
0
0
0.00165
0
0
0
0
0
0
1
0.210526
false
0
0.052632
0
0.578947
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
75686236f633eaa74d39b52bb49f39c0d83f27f0
378
py
Python
orders/choices.py
Zadigo/mycommerce
145031ebb359389e680a820577a4b6b2d382646d
[ "MIT" ]
null
null
null
orders/choices.py
Zadigo/mycommerce
145031ebb359389e680a820577a4b6b2d382646d
[ "MIT" ]
null
null
null
orders/choices.py
Zadigo/mycommerce
145031ebb359389e680a820577a4b6b2d382646d
[ "MIT" ]
null
null
null
from django.db.models import Choices class ShipmentChoices(Choices): CHRONOPOST = 'Chronopost' COLISSIMO_STANDARD = 'Colissimo - Livraison standard' COLISSIMO_INTERNATIONAL = 'Colissimo International' COLIPOSTE_DOM_TOM = 'Coliposte DOM-TOM' COLIPOSTE_INTERNATIONAL = 'Coliposte International' RELAIS_COLIS = 'Relais colis' EN_MAGASIN = 'En magasin'
34.363636
57
0.756614
38
378
7.342105
0.526316
0.121864
0.107527
0.172043
0
0
0
0
0
0
0
0
0.169312
378
10
58
37.8
0.888535
0
0
0
0
0
0.330688
0
0
0
0
0
0
1
0
false
0
0.111111
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
756f0213852e33c13d8f43a7a2f1fe6b016c8670
652
py
Python
ServerComponent/Analysers/Models.py
CDU55/FakeNews
707bd48dd78851081d98ad21bbdadfc2720bd644
[ "MIT" ]
null
null
null
ServerComponent/Analysers/Models.py
CDU55/FakeNews
707bd48dd78851081d98ad21bbdadfc2720bd644
[ "MIT" ]
37
2020-10-20T08:30:53.000Z
2020-12-22T13:15:45.000Z
ServerComponent/Analysers/Models.py
CDU55/FakeNews
707bd48dd78851081d98ad21bbdadfc2720bd644
[ "MIT" ]
1
2020-10-19T14:55:23.000Z
2020-10-19T14:55:23.000Z
class AnalysisElement: def __init__(self, validationResult, validationMessage): self.validation_result = validationResult self.validation_message = validationMessage class AnalysisResult: def __init__(self): self.elements = [] def add_element(self, element: AnalysisElement): self.elements.append(element) class DataSetEntry: def __init__(self, index, classification): self.index = index self.classification = classification class DataSetEntries: def __init__(self): self.entries = [] def add_element(self, entry: DataSetEntry): self.entries.append(entry)
24.148148
60
0.696319
62
652
7
0.33871
0.064516
0.101382
0.069124
0
0
0
0
0
0
0
0
0.220859
652
26
61
25.076923
0.854331
0
0
0.111111
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
75745e1d864863a9fd3849bbdcd4cb89b05d83db
502
py
Python
backend/app/models/user.py
xi102/annotation_tools
eb7e195651e5d7738f48f303ee5d4ecb1b6d10e1
[ "MIT" ]
null
null
null
backend/app/models/user.py
xi102/annotation_tools
eb7e195651e5d7738f48f303ee5d4ecb1b6d10e1
[ "MIT" ]
null
null
null
backend/app/models/user.py
xi102/annotation_tools
eb7e195651e5d7738f48f303ee5d4ecb1b6d10e1
[ "MIT" ]
null
null
null
import random from app import db class User(db.Model): __tablename__ = 'user' id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(16), unique=True) password = db.Column(db.String(20)) email = db.Column(db.String(20), unique=True) code = str(int(random.uniform(0, 1) * 1000000)) def __repr__(self): return '<Role %r>' % self.username def forgetpass(self): codes = str(int(random.uniform(0, 1) * 1000000)) return codes
25.1
56
0.63745
71
502
4.380282
0.521127
0.102894
0.128617
0.154341
0.29582
0.180064
0.180064
0
0
0
0
0.061224
0.219124
502
19
57
26.421053
0.732143
0
0
0
0
0
0.025896
0
0
0
0
0
0
1
0.142857
false
0.142857
0.142857
0.071429
0.928571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
2
7576b7bfaa4141e7e958dc5ad82c566881c44d4d
1,617
py
Python
tests/pdf/test_pdf_extraction.py
mikesperry/IndicoIo-python
caa155b8b31b76df3f86f559ce5324f061a03e40
[ "MIT" ]
4
2015-08-20T22:42:19.000Z
2016-03-14T01:28:45.000Z
tests/pdf/test_pdf_extraction.py
mikesperry/IndicoIo-python
caa155b8b31b76df3f86f559ce5324f061a03e40
[ "MIT" ]
null
null
null
tests/pdf/test_pdf_extraction.py
mikesperry/IndicoIo-python
caa155b8b31b76df3f86f559ce5324f061a03e40
[ "MIT" ]
null
null
null
import os.path import PIL from PIL import Image from PIL.PpmImagePlugin import PpmImageFile import six from indicoio import pdf_extraction from .indico_pdf_base import PDFTestCase DIR = os.path.dirname(os.path.realpath(__file__)) PDF = os.path.join(DIR, 'data', 'test.pdf') class PDFExtractionTestCase(PDFTestCase): def test_pdf_extraction(self): results = pdf_extraction(PDF) assert 'text' in results.keys() assert 'metadata' in results.keys() assert isinstance(results.get('text'), six.string_types) assert isinstance(results.get('metadata'), dict) def test_image_support(self): results = pdf_extraction(PDF, images=True) assert 'text' in results.keys() assert 'metadata' in results.keys() assert 'images' in results.keys() assert isinstance(results.get('images'), list) assert isinstance(results.get('images')[0], PIL.JpegImagePlugin.JpegImageFile) def test_table_support(self): results = pdf_extraction(PDF, tables=True) assert 'text' in results.keys() assert 'metadata' in results.keys() assert 'tables' in results.keys() assert isinstance(results.get('tables'), list) def test_url_support(self): url = "https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf" results = pdf_extraction(url) assert 'text' in results.keys() assert 'metadata' in results.keys() assert isinstance(results.get('text'), six.string_types) assert isinstance(results.get('metadata'), dict)
35.152174
117
0.688312
200
1,617
5.46
0.3
0.082418
0.119048
0.173993
0.544872
0.485348
0.423077
0.351648
0.351648
0.351648
0
0.003852
0.197279
1,617
45
118
35.933333
0.837442
0
0
0.333333
0
0.027778
0.132962
0
0
0
0
0
0.472222
1
0.111111
false
0
0.194444
0
0.333333
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
2
7580d57583a4f93e3374285e5bae1fdc9be25742
269
py
Python
polls/migrations/0010_merge_20190616_1730.py
WarwickAnimeSoc/aniMango
f927c2bc6eb484561ab38172ebebee6f03c8b13b
[ "MIT" ]
null
null
null
polls/migrations/0010_merge_20190616_1730.py
WarwickAnimeSoc/aniMango
f927c2bc6eb484561ab38172ebebee6f03c8b13b
[ "MIT" ]
6
2016-10-18T14:52:05.000Z
2020-06-18T15:14:41.000Z
polls/migrations/0010_merge_20190616_1730.py
WarwickAnimeSoc/aniMango
f927c2bc6eb484561ab38172ebebee6f03c8b13b
[ "MIT" ]
6
2020-02-07T17:37:37.000Z
2021-01-15T00:01:43.000Z
# Generated by Django 2.2.2 on 2019-06-16 16:30 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('polls', '0009_auto_20180927_2204'), ('polls', '0008_auto_20181215_1420'), ] operations = [ ]
17.933333
47
0.643123
33
269
5.060606
0.757576
0.023952
0
0
0
0
0
0
0
0
0
0.228155
0.234201
269
14
48
19.214286
0.582524
0.167286
0
0
1
0
0.252252
0.207207
0
0
0
0
0
1
0
false
0
0.125
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
758de34e4320003f1d672bd03309c80db5640ae1
1,108
py
Python
Chapter11/Exercise_11.05/bookr/reviews/api_views.py
lmoshood/The-Django-Workshop
52e86a8f93cb38bf70d50e9b8d2c6d7dac416f62
[ "MIT" ]
null
null
null
Chapter11/Exercise_11.05/bookr/reviews/api_views.py
lmoshood/The-Django-Workshop
52e86a8f93cb38bf70d50e9b8d2c6d7dac416f62
[ "MIT" ]
null
null
null
Chapter11/Exercise_11.05/bookr/reviews/api_views.py
lmoshood/The-Django-Workshop
52e86a8f93cb38bf70d50e9b8d2c6d7dac416f62
[ "MIT" ]
1
2020-05-27T13:41:58.000Z
2020-05-27T13:41:58.000Z
from django.contrib.auth import authenticate from rest_framework import viewsets from rest_framework.authtoken.models import Token from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from rest_framework.status import HTTP_404_NOT_FOUND, HTTP_200_OK from rest_framework.views import APIView from .models import Book from .serializers import BookSerializer class Login(APIView): def post(self, request): user = authenticate(username=request.data.get("username"), password=request.data.get("password")) if not user: return Response({'error': 'Credentials are incorrect or user does not exist'}, status=HTTP_404_NOT_FOUND) token, _ = Token.objects.get_or_create(user=user) return Response({'token': token.key}, status=HTTP_200_OK) class BookViewSet(viewsets.ReadOnlyModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer authentication_classes = [TokenAuthentication] permission_classes = [IsAuthenticated]
36.933333
117
0.784296
134
1,108
6.313433
0.440299
0.066194
0.140662
0.035461
0
0
0
0
0
0
0
0.012618
0.141697
1,108
29
118
38.206897
0.876972
0
0
0
0
0
0.066847
0
0
0
0
0
0
1
0.045455
false
0.045455
0.454545
0
0.863636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
758ea46ad07de73a3db7423b437adc1e8df2dffb
420
py
Python
tests/conftest.py
MVilstrup/remake
ddeacae55ef3c329a7780c9ad4e508833cd9112e
[ "MIT" ]
null
null
null
tests/conftest.py
MVilstrup/remake
ddeacae55ef3c329a7780c9ad4e508833cd9112e
[ "MIT" ]
null
null
null
tests/conftest.py
MVilstrup/remake
ddeacae55ef3c329a7780c9ad4e508833cd9112e
[ "MIT" ]
null
null
null
""" Dummy conftest.py for remake. If you don't know what this is for, just leave it empty. Read more about conftest.py under: - https://docs.pytest.org/en/stable/fixture.html - https://docs.pytest.org/en/stable/writing_plugins.html """ import pytest import pandas as pd @pytest.fixture def abc_df(): return pd.DataFrame({ "letters": ["a", "b", "c"], "numbers": [1, 2, 3] })
23.333333
60
0.62619
63
420
4.142857
0.761905
0.076628
0.114943
0.137931
0.199234
0.199234
0
0
0
0
0
0.009174
0.221429
420
18
61
23.333333
0.788991
0.542857
0
0
0
0
0.10303
0
0
0
0
0
0
1
0.125
true
0
0.25
0.125
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
1
0
0
0
2
7596cc3e44d3f7c3bc764887fc9388014519281c
935
py
Python
libs/pubfiles.py
dpttw/SEMimage
08492dee6c1ef8d494f5a2fcd5f9621e815ba8a0
[ "MIT" ]
null
null
null
libs/pubfiles.py
dpttw/SEMimage
08492dee6c1ef8d494f5a2fcd5f9621e815ba8a0
[ "MIT" ]
null
null
null
libs/pubfiles.py
dpttw/SEMimage
08492dee6c1ef8d494f5a2fcd5f9621e815ba8a0
[ "MIT" ]
2
2016-11-08T17:00:36.000Z
2021-10-03T23:01:52.000Z
# by Guillaume Sousa Amaral from pymongo import MongoClient MONGO_MGI_USER = "mgi_user" MONGO_MGI_PASSWORD = "mgi_password" MGI_DB = "mgi" MONGODB_URI = "mongodb://" + MONGO_MGI_USER + ":" + MONGO_MGI_PASSWORD + "@localhost/" + MGI_DB def _connect(): """ Connect to the database :return: database connection """ try: # Connect to mongodb print 'Attempt connection to database...' client = MongoClient(MONGODB_URI) print 'Connected to database with success.' try: # connect to the db 'mgi' print 'Attempt connection to collection...' db = client[MGI_DB] print 'Connected to collection with success.' return db except Exception, e: print 'Unable to connect to the collection.' except Exception, e: print 'Unable to connect to MongoDB.' db = _connect() xml_data_col = db['xmldata'] xml_data_col.update({}, {"$set": {"ispublished": True}}, upsert=False, multi=True)
28.333333
96
0.685561
121
935
5.123967
0.380165
0.072581
0.058065
0.048387
0.196774
0.122581
0.122581
0.122581
0
0
0
0
0.20107
935
32
97
29.21875
0.829987
0.072727
0
0.181818
0
0
0.354167
0
0
0
0
0
0
0
null
null
0.090909
0.045455
null
null
0.272727
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
2
75984d82bba1182d73a82d9c5b06ea48fbb3a95c
1,449
py
Python
deploy/tensorrt/test.py
tienln4/PaddleOCR-1
786448503b3f23140798bdecde19e985027257b2
[ "DOC", "Apache-2.0" ]
null
null
null
deploy/tensorrt/test.py
tienln4/PaddleOCR-1
786448503b3f23140798bdecde19e985027257b2
[ "DOC", "Apache-2.0" ]
null
null
null
deploy/tensorrt/test.py
tienln4/PaddleOCR-1
786448503b3f23140798bdecde19e985027257b2
[ "DOC", "Apache-2.0" ]
null
null
null
import cv2 import os import onnxruntime # img_dir = "valid/images" # for fn in os.listdir(img_dir): # fp =os.path.join(img_dir, fn) # img = cv2.imread(fp) # H, W, C = img.shape # r = W/H # if r < 3.2 and r > 2.9: # img = cv2.resize(img, (100, 32)) # cv2.imwrite(os.path.join("X/images", fn), img) # list_img = os.listdir("X/images") # gt = open("X/gt.txt", "a") # lb = open("valid/gt.txt") # for line in lb: # line_ = line.split("\t") # img_name = line_[0].split('/')[1] # if img_name in list_img: # gt.write(line) import onnxruntime as ort import onnx # from caffe2.python.onnx import backend import numpy as np onnx_path = "/data/tienln/workspace/vehicle-analysis/models/lpr/inference.onnx" predictor = onnx.load(onnx_path) # onnx.checker.check_model(predictor) # onnx.helper.printable_graph(predictor.graph) # predictor = backend.prepare(predictor, device="CPU") # default CPU ort_session = ort.InferenceSession(onnx_path) # input_name = ort_session.get_inputs()[0].name # result_path = "./detect_imgs_results_onnx" # orig_image = cv2.imread("") # image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB) # image = cv2.resize(image, (320, 240)) # image_mean = np.array([127, 127, 127]) # image = (image - image_mean) / 128 # image = np.transpose(image, [2, 0, 1]) # image = np.expand_dims(image, axis=0) # image = image.astype(np.float32) # x = ort_session.run(None, {input_name: image})
30.1875
79
0.665977
227
1,449
4.118943
0.444934
0.034225
0.02139
0
0
0
0
0
0
0
0
0.038143
0.167702
1,449
48
80
30.1875
0.737148
0.773637
0
0
0
0
0.220339
0.220339
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
75a2f21b09f6a7d7c0521259fcd45019b58911f9
131
py
Python
tests/__init__.py
plattrap/script-venv
3e576ba3a74da94d8ec354081ae52fd05702ebc8
[ "BSD-3-Clause" ]
2
2018-03-22T20:57:54.000Z
2019-02-07T21:21:06.000Z
tests/__init__.py
plattrap/script-venv
3e576ba3a74da94d8ec354081ae52fd05702ebc8
[ "BSD-3-Clause" ]
611
2018-03-18T20:24:16.000Z
2022-03-31T02:24:15.000Z
tests/__init__.py
plattrap/script-venv
3e576ba3a74da94d8ec354081ae52fd05702ebc8
[ "BSD-3-Clause" ]
3
2018-03-21T23:54:19.000Z
2020-08-06T03:36:13.000Z
# -*- coding: utf-8 -*- """Tests package for Script Venv.""" __author__ = """Struan Lyall Judd""" __email__ = 'sv@scifi.geek.nz'
18.714286
36
0.625954
17
131
4.352941
1
0
0
0
0
0
0
0
0
0
0
0.009009
0.152672
131
6
37
21.833333
0.657658
0.40458
0
0
0
0
0.458333
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
75b1e632346b67ff88749762c7e6df8f20e5bbbe
3,258
py
Python
app/main/forms.py
martingathu/Blog
0ea51a12e6ccf6e325bfe72237ab52e8ebf7fdcc
[ "MIT" ]
null
null
null
app/main/forms.py
martingathu/Blog
0ea51a12e6ccf6e325bfe72237ab52e8ebf7fdcc
[ "MIT" ]
null
null
null
app/main/forms.py
martingathu/Blog
0ea51a12e6ccf6e325bfe72237ab52e8ebf7fdcc
[ "MIT" ]
null
null
null
from flask_wtf import FlaskForm from wtforms import StringField, PasswordField, SubmitField, TextAreaField, SelectField, BooleanField from wtforms.validators import InputRequired, Length, EqualTo, ValidationError, Email, Required from ..models import User from wtforms import ValidationError from flask_wtf.file import FileField, FileAllowed def invalid_credentials(form, field): """Username and password checker""" username_entered =form.username.data password_entered = field.data user_object = User.query.filter_by(username=username_entered).first() if user_object is None or not user_object.verify_password(password_entered): raise ValidationError("Username or Password is incorrect") class RegistrationForm(FlaskForm): """Restration form""" email = StringField('Your Email Address',validators=[InputRequired(),Email()]) username = StringField('username', validators=[InputRequired(message='Username required'), Length(min=4, max=25, message='Username must be between 4 and 25 characters')]) password = PasswordField('password', validators=[InputRequired(message='Password required'), Length(min=4, max=25, message='Password must be between 4 and 25 characters')]) confirm_pswd = PasswordField('confirm Password', validators=[InputRequired(message='Password required'), EqualTo('password', message="Passwords must match")]) remember = BooleanField('Subscribe') submit_button = SubmitField('Sign up') def validate_email(self,data_field): if User.query.filter_by(email =data_field.data).first(): raise ValidationError('There is an account with that email') def validate_username(self,data_field): if User.query.filter_by(username = data_field.data).first(): raise ValidationError('That username is taken') class LoginForm(FlaskForm): """Login form""" username = StringField('username', validators=[InputRequired(message="Username required")]) password = PasswordField('password', validators=[InputRequired(message="Password required"), invalid_credentials]) remember = BooleanField('Remember Me') submit_button = SubmitField('Sign In') class PostForm(FlaskForm): topic = StringField('Topic', validators=[InputRequired(message="Topic required")]) category = SelectField('Category', choices=[('product', 'product'), ('interview', 'interview'), ('promotion', 'promotion')], validators=[InputRequired(message="Category required")]) description = StringField('Description', validators=[InputRequired(message="Description required")]) submit= SubmitField('Post') class UpdateProfile(FlaskForm): bio = TextAreaField('Tell us about you.',validators = [Required()]) email = StringField('Your Email Address',validators=[InputRequired(),Email()]) username = StringField('username', validators=[InputRequired(message='Username required'), Length(min=4, max=25, message='Username must be between 4 and 25 characters')]) profile_pic_path = FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png'])]) submit = SubmitField('Submit') class CommentsForm(FlaskForm): comment = TextAreaField('Type Comment.',validators = [Required()]) submit = SubmitField('Submit')
54.3
185
0.741559
351
3,258
6.809117
0.310541
0.105858
0.112971
0.021339
0.370293
0.356485
0.302092
0.280753
0.160669
0.160669
0
0.006374
0.133211
3,258
60
186
54.3
0.839943
0.017188
0
0.136364
0
0
0.212739
0
0
0
0
0
0
1
0.068182
false
0.159091
0.136364
0
0.795455
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
2
75c26b42b2af4b7444091c13e6ecc04c1a1f63d9
18,761
py
Python
scenegraph/exp-official/taskographyv5tiny5bagslots5_hierarchical/hierarchical_test_stats.py
taskography/3dscenegraph-dev
2c261241230fbea1f1c687ff793478248f25c02c
[ "MIT" ]
1
2022-01-30T22:06:57.000Z
2022-01-30T22:06:57.000Z
scenegraph/exp-official/taskographyv5tiny5bagslots5_hierarchical/hierarchical_test_stats.py
taskography/3dscenegraph-dev
2c261241230fbea1f1c687ff793478248f25c02c
[ "MIT" ]
null
null
null
scenegraph/exp-official/taskographyv5tiny5bagslots5_hierarchical/hierarchical_test_stats.py
taskography/3dscenegraph-dev
2c261241230fbea1f1c687ff793478248f25c02c
[ "MIT" ]
null
null
null
STATS = [ { "num_node_expansions": 0, "search_time": 0.0260686, "total_time": 0.161712, "plan_length": 64, "plan_cost": 64, "objects_used": 261, "objects_total": 379, "neural_net_time": 0.10646533966064453, "num_replanning_steps": 3, "wall_time": 2.435692071914673 }, { "num_node_expansions": 0, "search_time": 0.0279492, "total_time": 0.17491, "plan_length": 52, "plan_cost": 52, "objects_used": 276, "objects_total": 379, "neural_net_time": 0.058258056640625, "num_replanning_steps": 4, "wall_time": 3.361152410507202 }, { "num_node_expansions": 0, "search_time": 0.0313502, "total_time": 0.245342, "plan_length": 61, "plan_cost": 61, "objects_used": 273, "objects_total": 379, "neural_net_time": 0.06083822250366211, "num_replanning_steps": 4, "wall_time": 4.374004602432251 }, { "num_node_expansions": 0, "search_time": 0.0462309, "total_time": 0.237932, "plan_length": 59, "plan_cost": 59, "objects_used": 243, "objects_total": 379, "neural_net_time": 0.05759286880493164, "num_replanning_steps": 3, "wall_time": 2.5270650386810303 }, { "num_node_expansions": 0, "search_time": 0.0368982, "total_time": 0.233488, "plan_length": 54, "plan_cost": 54, "objects_used": 272, "objects_total": 379, "neural_net_time": 0.05734562873840332, "num_replanning_steps": 4, "wall_time": 3.6436092853546143 }, { "num_node_expansions": 0, "search_time": 0.0155738, "total_time": 0.0682826, "plan_length": 73, "plan_cost": 73, "objects_used": 117, "objects_total": 217, "neural_net_time": 0.029698610305786133, "num_replanning_steps": 1, "wall_time": 0.906505823135376 }, { "num_node_expansions": 0, "search_time": 0.0171021, "total_time": 0.0927101, "plan_length": 63, "plan_cost": 63, "objects_used": 122, "objects_total": 217, "neural_net_time": 0.05449986457824707, "num_replanning_steps": 1, "wall_time": 1.0803697109222412 }, { "num_node_expansions": 0, "search_time": 0.020779, "total_time": 0.09933, "plan_length": 55, "plan_cost": 55, "objects_used": 124, "objects_total": 217, "neural_net_time": 0.03200268745422363, "num_replanning_steps": 1, "wall_time": 1.2762155532836914 }, { "num_node_expansions": 0, "search_time": 0.116974, "total_time": 0.160087, "plan_length": 118, "plan_cost": 118, "objects_used": 113, "objects_total": 217, "neural_net_time": 0.032900094985961914, "num_replanning_steps": 1, "wall_time": 0.9846065044403076 }, { "num_node_expansions": 0, "search_time": 0.00864599, "total_time": 0.0618746, "plan_length": 51, "plan_cost": 51, "objects_used": 121, "objects_total": 217, "neural_net_time": 0.03256845474243164, "num_replanning_steps": 1, "wall_time": 0.9345319271087646 }, { "num_node_expansions": 0, "search_time": 0.0150995, "total_time": 0.139498, "plan_length": 55, "plan_cost": 55, "objects_used": 184, "objects_total": 320, "neural_net_time": 0.04656338691711426, "num_replanning_steps": 1, "wall_time": 1.576615810394287 }, { "num_node_expansions": 0, "search_time": 0.0195704, "total_time": 0.0776481, "plan_length": 76, "plan_cost": 76, "objects_used": 157, "objects_total": 320, "neural_net_time": 0.044793128967285156, "num_replanning_steps": 1, "wall_time": 0.9117276668548584 }, { "num_node_expansions": 0, "search_time": 0.0218047, "total_time": 0.0813003, "plan_length": 91, "plan_cost": 91, "objects_used": 169, "objects_total": 320, "neural_net_time": 0.0447840690612793, "num_replanning_steps": 1, "wall_time": 1.0234675407409668 }, { "num_node_expansions": 0, "search_time": 0.0318204, "total_time": 0.112719, "plan_length": 82, "plan_cost": 82, "objects_used": 168, "objects_total": 320, "neural_net_time": 0.048575401306152344, "num_replanning_steps": 1, "wall_time": 1.182650089263916 }, { "num_node_expansions": 0, "search_time": 0.0214701, "total_time": 0.12052, "plan_length": 68, "plan_cost": 68, "objects_used": 193, "objects_total": 305, "neural_net_time": 0.046851396560668945, "num_replanning_steps": 1, "wall_time": 1.3422448635101318 }, { "num_node_expansions": 0, "search_time": 0.0238054, "total_time": 0.0993613, "plan_length": 53, "plan_cost": 53, "objects_used": 214, "objects_total": 305, "neural_net_time": 0.04596352577209473, "num_replanning_steps": 1, "wall_time": 1.352567434310913 }, { "num_node_expansions": 0, "search_time": 0.0134198, "total_time": 0.0667924, "plan_length": 86, "plan_cost": 86, "objects_used": 169, "objects_total": 305, "neural_net_time": 0.043943166732788086, "num_replanning_steps": 1, "wall_time": 0.9647877216339111 }, { "num_node_expansions": 0, "search_time": 0.231443, "total_time": 0.312803, "plan_length": 84, "plan_cost": 84, "objects_used": 194, "objects_total": 305, "neural_net_time": 0.04476618766784668, "num_replanning_steps": 1, "wall_time": 1.5609796047210693 }, { "num_node_expansions": 0, "search_time": 0.0272906, "total_time": 0.101185, "plan_length": 63, "plan_cost": 63, "objects_used": 211, "objects_total": 305, "neural_net_time": 0.04451155662536621, "num_replanning_steps": 1, "wall_time": 1.2499635219573975 }, { "num_node_expansions": 0, "search_time": 0.0767934, "total_time": 0.113212, "plan_length": 156, "plan_cost": 156, "objects_used": 114, "objects_total": 212, "neural_net_time": 0.03154778480529785, "num_replanning_steps": 1, "wall_time": 0.8447625637054443 }, { "num_node_expansions": 0, "search_time": 0.665671, "total_time": 0.694063, "plan_length": 92, "plan_cost": 92, "objects_used": 109, "objects_total": 212, "neural_net_time": 0.029831647872924805, "num_replanning_steps": 0, "wall_time": 1.1394996643066406 }, { "num_node_expansions": 0, "search_time": 0.0104928, "total_time": 0.0622191, "plan_length": 68, "plan_cost": 68, "objects_used": 159, "objects_total": 365, "neural_net_time": 0.05487489700317383, "num_replanning_steps": 1, "wall_time": 0.8508009910583496 }, { "num_node_expansions": 0, "search_time": 0.0328946, "total_time": 0.126837, "plan_length": 89, "plan_cost": 89, "objects_used": 203, "objects_total": 365, "neural_net_time": 0.053848981857299805, "num_replanning_steps": 2, "wall_time": 1.7260208129882812 }, { "num_node_expansions": 0, "search_time": 0.0234049, "total_time": 0.0851975, "plan_length": 75, "plan_cost": 75, "objects_used": 168, "objects_total": 365, "neural_net_time": 0.053121328353881836, "num_replanning_steps": 1, "wall_time": 1.030031681060791 }, { "num_node_expansions": 0, "search_time": 0.013401, "total_time": 0.0713078, "plan_length": 59, "plan_cost": 59, "objects_used": 169, "objects_total": 365, "neural_net_time": 0.05536675453186035, "num_replanning_steps": 1, "wall_time": 1.1071906089782715 }, { "num_node_expansions": 0, "search_time": 0.104847, "total_time": 0.195061, "plan_length": 52, "plan_cost": 52, "objects_used": 167, "objects_total": 302, "neural_net_time": 0.04499459266662598, "num_replanning_steps": 1, "wall_time": 1.4144911766052246 }, { "num_node_expansions": 0, "search_time": 0.0765767, "total_time": 0.131634, "plan_length": 66, "plan_cost": 66, "objects_used": 159, "objects_total": 302, "neural_net_time": 0.0444490909576416, "num_replanning_steps": 1, "wall_time": 1.1225917339324951 }, { "num_node_expansions": 0, "search_time": 0.0633749, "total_time": 0.159814, "plan_length": 70, "plan_cost": 70, "objects_used": 163, "objects_total": 302, "neural_net_time": 0.044997453689575195, "num_replanning_steps": 1, "wall_time": 1.3655714988708496 }, { "num_node_expansions": 0, "search_time": 0.134185, "total_time": 0.447814, "plan_length": 68, "plan_cost": 68, "objects_used": 230, "objects_total": 365, "neural_net_time": 0.05536222457885742, "num_replanning_steps": 3, "wall_time": 4.771425485610962 }, { "num_node_expansions": 0, "search_time": 0.0165639, "total_time": 0.0940178, "plan_length": 62, "plan_cost": 62, "objects_used": 183, "objects_total": 365, "neural_net_time": 0.05884861946105957, "num_replanning_steps": 1, "wall_time": 1.2651619911193848 }, { "num_node_expansions": 0, "search_time": 0.124357, "total_time": 0.248529, "plan_length": 81, "plan_cost": 81, "objects_used": 214, "objects_total": 365, "neural_net_time": 0.05655694007873535, "num_replanning_steps": 3, "wall_time": 2.9309194087982178 }, { "num_node_expansions": 0, "search_time": 0.0150311, "total_time": 0.101787, "plan_length": 63, "plan_cost": 63, "objects_used": 192, "objects_total": 365, "neural_net_time": 0.0555422306060791, "num_replanning_steps": 2, "wall_time": 1.666846752166748 }, { "num_node_expansions": 0, "search_time": 0.0150945, "total_time": 0.0514842, "plan_length": 90, "plan_cost": 90, "objects_used": 175, "objects_total": 365, "neural_net_time": 0.05377364158630371, "num_replanning_steps": 1, "wall_time": 0.8902196884155273 }, { "num_node_expansions": 0, "search_time": 0.0176192, "total_time": 0.0796522, "plan_length": 92, "plan_cost": 92, "objects_used": 178, "objects_total": 362, "neural_net_time": 0.05463862419128418, "num_replanning_steps": 1, "wall_time": 1.0109508037567139 }, { "num_node_expansions": 0, "search_time": 0.0104213, "total_time": 0.0470441, "plan_length": 77, "plan_cost": 77, "objects_used": 160, "objects_total": 362, "neural_net_time": 0.054520368576049805, "num_replanning_steps": 1, "wall_time": 0.7542357444763184 }, { "num_node_expansions": 0, "search_time": 0.00448661, "total_time": 0.0177422, "plan_length": 84, "plan_cost": 84, "objects_used": 136, "objects_total": 362, "neural_net_time": 0.05472111701965332, "num_replanning_steps": 0, "wall_time": 0.4157280921936035 }, { "num_node_expansions": 0, "search_time": 0.045955, "total_time": 0.190108, "plan_length": 74, "plan_cost": 74, "objects_used": 221, "objects_total": 322, "neural_net_time": 0.047133445739746094, "num_replanning_steps": 1, "wall_time": 1.6114940643310547 }, { "num_node_expansions": 0, "search_time": 0.0459863, "total_time": 0.178842, "plan_length": 103, "plan_cost": 103, "objects_used": 215, "objects_total": 441, "neural_net_time": 0.06811332702636719, "num_replanning_steps": 1, "wall_time": 1.7026808261871338 }, { "num_node_expansions": 0, "search_time": 0.0375162, "total_time": 0.17094, "plan_length": 71, "plan_cost": 71, "objects_used": 216, "objects_total": 441, "neural_net_time": 0.06928086280822754, "num_replanning_steps": 1, "wall_time": 3.0769970417022705 }, { "num_node_expansions": 0, "search_time": 0.15904, "total_time": 0.361705, "plan_length": 123, "plan_cost": 123, "objects_used": 224, "objects_total": 441, "neural_net_time": 0.06914591789245605, "num_replanning_steps": 2, "wall_time": 2.922773838043213 }, { "num_node_expansions": 0, "search_time": 0.127007, "total_time": 0.591304, "plan_length": 97, "plan_cost": 97, "objects_used": 249, "objects_total": 441, "neural_net_time": 0.06865596771240234, "num_replanning_steps": 2, "wall_time": 5.019821643829346 }, { "num_node_expansions": 0, "search_time": 0.0700383, "total_time": 0.268587, "plan_length": 95, "plan_cost": 95, "objects_used": 227, "objects_total": 441, "neural_net_time": 0.06840014457702637, "num_replanning_steps": 1, "wall_time": 2.3747305870056152 }, { "num_node_expansions": 0, "search_time": 0.0111234, "total_time": 0.0605018, "plan_length": 76, "plan_cost": 76, "objects_used": 171, "objects_total": 417, "neural_net_time": 0.06322216987609863, "num_replanning_steps": 1, "wall_time": 1.0112266540527344 }, { "num_node_expansions": 0, "search_time": 0.0183385, "total_time": 0.106249, "plan_length": 87, "plan_cost": 87, "objects_used": 194, "objects_total": 417, "neural_net_time": 0.0725412368774414, "num_replanning_steps": 1, "wall_time": 1.364563226699829 }, { "num_node_expansions": 0, "search_time": 0.0190809, "total_time": 0.111638, "plan_length": 58, "plan_cost": 58, "objects_used": 216, "objects_total": 417, "neural_net_time": 0.06367969512939453, "num_replanning_steps": 3, "wall_time": 2.054568290710449 }, { "num_node_expansions": 0, "search_time": 0.210535, "total_time": 0.334857, "plan_length": 123, "plan_cost": 123, "objects_used": 203, "objects_total": 417, "neural_net_time": 0.06560730934143066, "num_replanning_steps": 2, "wall_time": 2.3334832191467285 }, { "num_node_expansions": 0, "search_time": 0.0219698, "total_time": 0.130103, "plan_length": 62, "plan_cost": 62, "objects_used": 197, "objects_total": 417, "neural_net_time": 0.06369471549987793, "num_replanning_steps": 1, "wall_time": 1.6010661125183105 }, { "num_node_expansions": 0, "search_time": 0.0101631, "total_time": 0.0415831, "plan_length": 76, "plan_cost": 76, "objects_used": 112, "objects_total": 232, "neural_net_time": 0.03584003448486328, "num_replanning_steps": 1, "wall_time": 0.7372550964355469 }, { "num_node_expansions": 0, "search_time": 0.0111653, "total_time": 0.0418656, "plan_length": 53, "plan_cost": 53, "objects_used": 110, "objects_total": 232, "neural_net_time": 0.032355308532714844, "num_replanning_steps": 0, "wall_time": 0.5313618183135986 }, { "num_node_expansions": 0, "search_time": 0.00524832, "total_time": 0.0220788, "plan_length": 56, "plan_cost": 56, "objects_used": 106, "objects_total": 232, "neural_net_time": 0.03553891181945801, "num_replanning_steps": 1, "wall_time": 0.7103500366210938 }, { "num_node_expansions": 0, "search_time": 0.0136476, "total_time": 0.0672117, "plan_length": 78, "plan_cost": 78, "objects_used": 130, "objects_total": 212, "neural_net_time": 0.02926468849182129, "num_replanning_steps": 1, "wall_time": 0.8608791828155518 }, { "num_node_expansions": 0, "search_time": 0.016218, "total_time": 0.0887494, "plan_length": 75, "plan_cost": 75, "objects_used": 133, "objects_total": 212, "neural_net_time": 0.029755353927612305, "num_replanning_steps": 1, "wall_time": 1.1875898838043213 }, { "num_node_expansions": 0, "search_time": 0.0276188, "total_time": 0.0960435, "plan_length": 87, "plan_cost": 87, "objects_used": 132, "objects_total": 212, "neural_net_time": 0.04944968223571777, "num_replanning_steps": 2, "wall_time": 1.4252169132232666 }, { "num_node_expansions": 0, "search_time": 0.00914205, "total_time": 0.0572312, "plan_length": 60, "plan_cost": 60, "objects_used": 125, "objects_total": 212, "neural_net_time": 0.05208992958068848, "num_replanning_steps": 1, "wall_time": 0.8581404685974121 } ]
28.863077
48
0.550717
1,999
18,761
4.817409
0.167584
0.0919
0.095327
0.100935
0.592212
0.578401
0.561578
0.099792
0
0
0
0.262209
0.316774
18,761
650
49
28.863077
0.489078
0
0
0.36
0
0
0.371282
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
75c6cb3c332857eb979642c87cb41bc755711d03
549
py
Python
OSBot-GraphDB/tests/api/test_Graph_SV.py
pbx-gs/OSBot-jira
7677afee1f80398ddcccd6b45423bf6adc20b970
[ "Apache-2.0" ]
1
2019-08-06T15:40:45.000Z
2019-08-06T15:40:45.000Z
OSBot-GraphDB/tests/api/test_Graph_SV.py
pbx-gs/OSBot-jira
7677afee1f80398ddcccd6b45423bf6adc20b970
[ "Apache-2.0" ]
1
2021-09-03T09:55:39.000Z
2021-09-03T09:55:39.000Z
OSBot-GraphDB/tests/api/test_Graph_SV.py
filetrust/OSBot-jira
d753fff59cf938cf94a51bf8bc7981691524b686
[ "Apache-2.0" ]
2
2021-04-02T05:58:29.000Z
2021-09-03T09:43:29.000Z
from unittest import TestCase from osbot_utils.utils.Dev import Dev from osbot_graphsv.api.Graph_SV import Graph_SV from osbot_graphsv.demo.Demo_Data import Demo_Data class test_Graph_SV(TestCase): def setUp(self): self.demo_data = Demo_Data() self.graph_sv = Graph_SV(self.demo_data.root_folder) self.result = None def tearDown(self): if self.result is not None: Dev.pprint(self.result) def test__init__(self): assert type(self.graph_sv.file_system).__name__ == 'File_System'
26.142857
72
0.708561
82
549
4.426829
0.402439
0.115702
0.088154
0
0
0
0
0
0
0
0
0
0.209472
549
20
73
27.45
0.836406
0
0
0
0
0
0.020036
0
0
0
0
0
0.071429
1
0.214286
false
0
0.285714
0
0.571429
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
75d7468ce9e5ecfed680e1d3ce9872582acf7a04
869
py
Python
drf_spectacular/validation/__init__.py
matthewshirley/drf-spectacular
8856ab73ba1cdf99cc6ef8dd9c2762dd86721f86
[ "BSD-3-Clause" ]
1
2021-03-08T21:42:45.000Z
2021-03-08T21:42:45.000Z
drf_spectacular/validation/__init__.py
matthewshirley/drf-spectacular
8856ab73ba1cdf99cc6ef8dd9c2762dd86721f86
[ "BSD-3-Clause" ]
null
null
null
drf_spectacular/validation/__init__.py
matthewshirley/drf-spectacular
8856ab73ba1cdf99cc6ef8dd9c2762dd86721f86
[ "BSD-3-Clause" ]
1
2020-05-03T21:40:45.000Z
2020-05-03T21:40:45.000Z
import json import os import jsonschema JSON_SCHEMA_SPEC_PATH = os.path.join(os.path.dirname(__file__), 'openapi3_schema.json') def validate_schema(api_schema): """ Validate generated API schema against OpenAPI 3.0.X json schema specification. Note: On conflict, the written specification always wins over the json schema. OpenApi3 schema specification taken from: https://github.com/OAI/OpenAPI-Specification/blob/master/schemas/v3.0/schema.json https://github.com/OAI/OpenAPI-Specification/blob/6d17b631fff35186c495b9e7d340222e19d60a71/schemas/v3.0/schema.json """ with open(JSON_SCHEMA_SPEC_PATH) as fh: openapi3_schema_spec = json.load(fh) # coerce any remnants of objects to basic types api_schema = json.loads(json.dumps(api_schema)) jsonschema.validate(instance=api_schema, schema=openapi3_schema_spec)
34.76
119
0.7687
119
869
5.445378
0.470588
0.069444
0.04321
0.055556
0.188272
0.126543
0.126543
0
0
0
0
0.050938
0.141542
869
24
120
36.208333
0.817694
0.512083
0
0
1
0
0.05102
0
0
0
0
0
0
1
0.111111
false
0
0.333333
0
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
75fea4ce328064ceb779c9f7141d957fdebc0fd5
371
py
Python
src/astbuilder/cpp_gen_fwd_decl.py
mballance/pyastgen
08f85da180fefd9c602ef911a8c225c11f5dd4a0
[ "Apache-2.0" ]
null
null
null
src/astbuilder/cpp_gen_fwd_decl.py
mballance/pyastgen
08f85da180fefd9c602ef911a8c225c11f5dd4a0
[ "Apache-2.0" ]
null
null
null
src/astbuilder/cpp_gen_fwd_decl.py
mballance/pyastgen
08f85da180fefd9c602ef911a8c225c11f5dd4a0
[ "Apache-2.0" ]
null
null
null
''' Created on May 28, 2022 @author: mballance ''' from astbuilder.visitor import Visitor from astbuilder.ast import Ast class CppGenFwdDecl(Visitor): def __init__(self, out): self.out = out def gen(self, ast : Ast): ast.accept(self) def visitAstClass(self, c): self.out.println("class I%s;" % c.name)
19.526316
47
0.598383
47
371
4.638298
0.553191
0.09633
0
0
0
0
0
0
0
0
0
0.022727
0.28841
371
19
48
19.526316
0.80303
0.115903
0
0
0
0
0.031153
0
0
0
0
0
0
1
0.333333
false
0
0.222222
0
0.666667
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
75fecc08c0b59f462e5cbb954dd13e8aaca1a951
218
py
Python
pygate/__init__.py
isabella232/pygate-webapp
c6ebfd0ca29457d3d39e97df1ba8ad16999ad913
[ "MIT" ]
6
2020-07-07T05:43:56.000Z
2020-10-14T23:23:58.000Z
pygate/__init__.py
isabella232/pygate-webapp
c6ebfd0ca29457d3d39e97df1ba8ad16999ad913
[ "MIT" ]
9
2020-07-28T21:52:15.000Z
2020-09-24T16:51:37.000Z
pygate/__init__.py
isabella232/pygate-webapp
c6ebfd0ca29457d3d39e97df1ba8ad16999ad913
[ "MIT" ]
2
2020-08-18T18:36:56.000Z
2022-03-24T07:23:28.000Z
""" Intialize the Pygate application """ from flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) app.config.from_object("config") db = SQLAlchemy(app) from pygate import routes, models
16.769231
39
0.779817
29
218
5.655172
0.517241
0.109756
0
0
0
0
0
0
0
0
0
0
0.133028
218
12
40
18.166667
0.867725
0.146789
0
0
0
0
0.033708
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
f949e155111c48dd8234f83ea771004bf9bcb53e
400
py
Python
python/estrutura de dados/array/ExemploAula.py
luiscarlosjunior/100-days-of-code
80d0b42fb773ad0813cfa452da2e1ded6dbb0c3b
[ "MIT" ]
null
null
null
python/estrutura de dados/array/ExemploAula.py
luiscarlosjunior/100-days-of-code
80d0b42fb773ad0813cfa452da2e1ded6dbb0c3b
[ "MIT" ]
null
null
null
python/estrutura de dados/array/ExemploAula.py
luiscarlosjunior/100-days-of-code
80d0b42fb773ad0813cfa452da2e1ded6dbb0c3b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Fri Apr 24 10:04:30 2020 @author: luisc """ #x = 'Estou aqui' try: print(p) except NameError: print('Ops, deu um erro de variável não inicializada') except: print('Outro tipo de erro') finally: print('Estou sempre aqui') # Raise - lança uma exceção para o usuário """ valor = -10 if valor < 0: raise Exception('O valor é negativo') """
15.384615
58
0.6325
60
400
4.216667
0.783333
0
0
0
0
0
0
0
0
0
0
0.051447
0.2225
400
26
59
15.384615
0.762058
0.3275
0
0
0
0
0.434783
0
0
0
0
0
0
1
0
true
0
0
0
0
0.5
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
2
f96d501ddd0bef41fa87ebf9409aea7f8808acc8
2,187
py
Python
members/crm/migrations/0002_auto_20171121_0725.py
ocwc/ocwc-members
3ede8e0ff830e2aaff4ae09f9aaefd3eaa83146b
[ "MIT" ]
null
null
null
members/crm/migrations/0002_auto_20171121_0725.py
ocwc/ocwc-members
3ede8e0ff830e2aaff4ae09f9aaefd3eaa83146b
[ "MIT" ]
7
2015-11-27T15:59:52.000Z
2022-01-13T00:38:38.000Z
members/crm/migrations/0002_auto_20171121_0725.py
ocwc/ocwc-members
3ede8e0ff830e2aaff4ae09f9aaefd3eaa83146b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('crm', '0001_initial'), ] operations = [ migrations.AlterField( model_name='membershipapplication', name='membership_type', field=models.IntegerField(default=None, null=True, blank=True, choices=[(5, b'Institutional Members'), (10, b'Institutional Members - MRC'), (11, b'Institutional Members - DC'), (12, b'Institutional Members - DC - MRC'), (9, b'Associate Institutional Members'), (17, b'Associate Institutional Members - DC'), (6, b'Organizational Members'), (13, b'Organizational Members - DC'), (18, b'Organizational Members - MRC'), (7, b'Associate Consortium Members'), (14, b'Associate Consortium Members - DC'), (8, b'Corporate Members - Basic'), (15, b'Corporate Members - Premium'), (16, b'Corporate Members - Sustaining')]), ), migrations.AlterField( model_name='organization', name='membership_status', field=models.IntegerField(choices=[(1, b'Applied'), (2, b'Current'), (3, b'Grace'), (4, b'Expired'), (5, b'Pending'), (6, b'Cancelled'), (7, b'Sustaining'), (99, b'Example')]), ), migrations.AlterField( model_name='organization', name='membership_type', field=models.IntegerField(choices=[(5, b'Institutional Members'), (10, b'Institutional Members - MRC'), (11, b'Institutional Members - DC'), (12, b'Institutional Members - DC - MRC'), (9, b'Associate Institutional Members'), (17, b'Associate Institutional Members - DC'), (6, b'Organizational Members'), (13, b'Organizational Members - DC'), (18, b'Organizational Members - MRC'), (7, b'Associate Consortium Members'), (14, b'Associate Consortium Members - DC'), (8, b'Corporate Members - Basic'), (15, b'Corporate Members - Premium'), (16, b'Corporate Members - Sustaining')]), ), migrations.AlterField( model_name='organization', name='slug', field=models.CharField(default=b'', unique=True, max_length=60), ), ]
62.485714
627
0.638317
248
2,187
5.572581
0.314516
0.173661
0.121563
0.083936
0.719971
0.719971
0.670767
0.623734
0.623734
0.623734
0
0.035429
0.199817
2,187
34
628
64.323529
0.754286
0.009602
0
0.464286
0
0
0.44732
0.009704
0
0
0
0
0
1
0
false
0
0.071429
0
0.178571
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
f97a197a5ace1c26bd7ec72569622d579f98f065
880
py
Python
perftracker/migrations/0038_auto_20181001_0209.py
istillc0de/perftracker-perfguru
79d0f1740f4072503ba1bf06b871590de6bcf470
[ "Apache-2.0" ]
11
2018-03-12T05:40:02.000Z
2020-01-30T10:13:36.000Z
perftracker/migrations/0038_auto_20181001_0209.py
istillc0de/perftracker-perfguru
79d0f1740f4072503ba1bf06b871590de6bcf470
[ "Apache-2.0" ]
4
2019-05-12T18:38:22.000Z
2020-07-20T07:13:34.000Z
perftracker/migrations/0038_auto_20181001_0209.py
istillc0de/perftracker-perfguru
79d0f1740f4072503ba1bf06b871590de6bcf470
[ "Apache-2.0" ]
25
2018-07-19T12:12:09.000Z
2022-02-15T09:16:31.000Z
# Generated by Django 2.0.3 on 2018-09-30 23:09 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('perftracker', '0037_auto_20181001_0118'), ] operations = [ migrations.AddField( model_name='artifactmetamodel', name='compression', field=models.BooleanField(default=False, help_text='Decompress on download/view'), ), migrations.AddField( model_name='artifactmetamodel', name='inline', field=models.BooleanField(default=False, help_text='View document in browser (do not download)'), ), migrations.AlterField( model_name='artifactmetamodel', name='mime', field=models.CharField(default='', help_text='Artifact file mime type', max_length=32), ), ]
30.344828
109
0.614773
88
880
6.034091
0.613636
0.050847
0.146893
0.169492
0.34275
0.34275
0.161959
0
0
0
0
0.051563
0.272727
880
28
110
31.428571
0.778125
0.051136
0
0.363636
1
0
0.237695
0.027611
0
0
0
0
0
1
0
false
0
0.045455
0
0.181818
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
f996ca79275f8904af11339d8d0d55f901235ab1
75
py
Python
py/areaDoCirculo.py
Ellian-aragao/URI
e53b9380c5be0e59fdd002553ea33a04a7c35439
[ "Unlicense" ]
null
null
null
py/areaDoCirculo.py
Ellian-aragao/URI
e53b9380c5be0e59fdd002553ea33a04a7c35439
[ "Unlicense" ]
null
null
null
py/areaDoCirculo.py
Ellian-aragao/URI
e53b9380c5be0e59fdd002553ea33a04a7c35439
[ "Unlicense" ]
null
null
null
area = float(input()) area *= 3.14159 * area print('A={:.4f}'.format(area))
25
30
0.613333
12
75
3.833333
0.75
0
0
0
0
0
0
0
0
0
0
0.104478
0.106667
75
3
30
25
0.58209
0
0
0
0
0
0.105263
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
f9bb01c2946f782aad448ea6ffa47adeddfb3192
1,240
py
Python
one_page/customapp/templatetags/testtag.py
aptuz/mezzanine_onepage_theme
485ecb79ee46a4dce2a961bdaecd6ce360e33a01
[ "MIT" ]
null
null
null
one_page/customapp/templatetags/testtag.py
aptuz/mezzanine_onepage_theme
485ecb79ee46a4dce2a961bdaecd6ce360e33a01
[ "MIT" ]
null
null
null
one_page/customapp/templatetags/testtag.py
aptuz/mezzanine_onepage_theme
485ecb79ee46a4dce2a961bdaecd6ce360e33a01
[ "MIT" ]
null
null
null
from __future__ import unicode_literals from future.builtins import int from collections import defaultdict from django.core.urlresolvers import reverse from django.template.defaultfilters import linebreaksbr, urlize from mezzanine import template from mezzanine.conf import settings from mezzanine.generic.forms import ThreadedCommentForm from mezzanine.generic.models import ThreadedComment from mezzanine.utils.importing import import_dotted_path from mezzanine.pages.models import Page, RichTextPage register = template.Library() @register.assignment_tag def allpages(): page_fields = [ 'content', 'created', 'description', 'expiry_date', 'gen_description', u'id', 'keywords', u'keywords_string', 'publish_date', 'short_url', 'slug', 'status', 'title', 'titles', 'updated'] output = [] # import pdb;pdb.set_trace() AllPages = RichTextPage.objects.all() for item in AllPages: temp = {} for fld in page_fields: temp[fld] = getattr(item, fld) output.append(temp) return { 'pages': output } @register.filter() def remove_slash(value): return '#' + value[1:-1] @register.filter() def lower(value): # import pdb;pdb.set_trace() return value.lower()
30.243902
208
0.724194
149
1,240
5.90604
0.516779
0.088636
0.045455
0.034091
0.045455
0
0
0
0
0
0
0.001938
0.167742
1,240
41
209
30.243902
0.850775
0.042742
0
0.064516
0
0
0.110549
0
0
0
0
0
0
1
0.096774
false
0
0.354839
0.064516
0.548387
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
f9c42903d1d9fecf7020794b2db23282c3b7fc56
794
py
Python
src/cython/using.py
Brunopaes/python-sandbox
c51f716785bf11789b13f9bb24bfd76108eaec08
[ "MIT" ]
5
2020-08-04T00:13:06.000Z
2022-03-18T09:18:29.000Z
src/cython/using.py
Brunopaes/python-sandbox
c51f716785bf11789b13f9bb24bfd76108eaec08
[ "MIT" ]
null
null
null
src/cython/using.py
Brunopaes/python-sandbox
c51f716785bf11789b13f9bb24bfd76108eaec08
[ "MIT" ]
2
2020-06-26T13:42:17.000Z
2020-08-04T00:13:10.000Z
import run_python import run_cython import datetime import logging logging.basicConfig(filename="benchmark.log", level=logging.NOTSET) iterations = [ 100, 1000, 5000, 10000, 15000, 20000, 25000, 30000, ] logging.info('Timestamp, Python, cython, Iterations') for iteration in iterations: begin = datetime.datetime.now() for i in range(iteration): run_cython.image_transformation() python_d = abs(begin - datetime.datetime.now()) begin = datetime.datetime.now() for i in range(iteration): run_python.image_transformation() cython_d = abs(begin - datetime.datetime.now()) msg = '{}, {}, {}, {}'.format(datetime.datetime.now(), python_d, cython_d, iteration) logging.info(msg)
22.055556
78
0.644836
90
794
5.577778
0.4
0.159363
0.189243
0.191235
0.298805
0.298805
0.187251
0.187251
0.187251
0.187251
0
0.059308
0.235516
794
35
79
22.685714
0.76771
0
0
0.142857
0
0
0.080605
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
f9e3b8381dabcfc6210cdce46b7fae1ba5d1813f
203
py
Python
torabot/mods/feed/ut.py
Answeror/torabot
b6260190ec1f0dc8bf3f7ba3512c0522668c59ed
[ "MIT" ]
42
2015-01-20T10:45:08.000Z
2021-04-17T05:10:27.000Z
torabot/mods/feed/ut.py
Answeror/torabot
b6260190ec1f0dc8bf3f7ba3512c0522668c59ed
[ "MIT" ]
4
2015-01-23T05:40:44.000Z
2016-12-19T03:52:20.000Z
torabot/mods/feed/ut.py
Answeror/torabot
b6260190ec1f0dc8bf3f7ba3512c0522668c59ed
[ "MIT" ]
8
2015-05-07T03:51:05.000Z
2019-03-20T05:40:47.000Z
def entry_id(entry): for field in ['id', 'link']: ret = getattr(entry, field, None) if ret: return ret raise Exception('no id field found in entry: {}'.format(entry))
29
67
0.571429
28
203
4.107143
0.607143
0
0
0
0
0
0
0
0
0
0
0
0.295567
203
6
68
33.833333
0.804196
0
0
0
0
0
0.17734
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
dda9b2b073293fee194d0918db8aeffa28c0308a
212
py
Python
1401-1500/1413-Minimum Value to Get Positive Step by Step Sum/1413-Minimum Value to Get Positive Step by Step Sum.py
jiadaizhao/LeetCode
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
[ "MIT" ]
49
2018-05-05T02:53:10.000Z
2022-03-30T12:08:09.000Z
1401-1500/1413-Minimum Value to Get Positive Step by Step Sum/1413-Minimum Value to Get Positive Step by Step Sum.py
jolly-fellow/LeetCode
ab20b3ec137ed05fad1edda1c30db04ab355486f
[ "MIT" ]
11
2017-12-15T22:31:44.000Z
2020-10-02T12:42:49.000Z
1401-1500/1413-Minimum Value to Get Positive Step by Step Sum/1413-Minimum Value to Get Positive Step by Step Sum.py
jolly-fellow/LeetCode
ab20b3ec137ed05fad1edda1c30db04ab355486f
[ "MIT" ]
28
2017-12-05T10:56:51.000Z
2022-01-26T18:18:27.000Z
class Solution: def minStartValue(self, nums: List[int]) -> int: total = minSum = 0 for num in nums: total += num minSum = min(minSum, total) return 1 - minSum
26.5
52
0.528302
25
212
4.48
0.68
0
0
0
0
0
0
0
0
0
0
0.015152
0.377358
212
7
53
30.285714
0.833333
0
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
false
0
0
0
0.428571
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
ddb2972f36ed8e2d4a692066c4a74f8cffcd09ee
1,365
py
Python
extra/fit_functions/br8.py
cyankaet/bumps
427d077fd95f2d9a09eeb8677d045547061cff42
[ "MIT" ]
44
2015-03-28T06:48:43.000Z
2022-01-09T11:29:00.000Z
extra/fit_functions/br8.py
cyankaet/bumps
427d077fd95f2d9a09eeb8677d045547061cff42
[ "MIT" ]
68
2015-08-21T11:28:54.000Z
2022-03-30T22:14:13.000Z
extra/fit_functions/br8.py
cyankaet/bumps
427d077fd95f2d9a09eeb8677d045547061cff42
[ "MIT" ]
27
2015-06-22T19:25:27.000Z
2021-06-15T18:20:06.000Z
#!/usr/bin/env python """ Bevington & Robinson's model of dual exponential decay References:: [5] Bevington & Robinson (1992). Data Reduction and Error Analysis for the Physical Sciences, Second Edition, McGraw-Hill, Inc., New York. """ from numpy import exp, sqrt, vstack, array, asarray def dual_exponential(t, A, B, C, tauA, tauB): """ Computes dual exponential decay. y = A exp(-t/tauA) + B exp(-t/tauB) + C """ t = asarray(t) return C + A*exp(-t/tauA) + B*exp(-t/tauB) # data from Chapter 8 of [5]. data = array([[15, 775], [30, 479], [45, 380], [60, 302], [75, 185], [90, 157], [105,137], [120, 119], [135, 110], [150, 89], [165, 74], [180, 61], [195, 66], [210, 68], [225, 48], [240, 54], [255, 51], [270, 46], [285, 55], [300, 29], [315, 28], [330, 37], [345, 49], [360, 26], [375, 35], [390, 29], [405, 31], [420, 24], [435, 25], [450, 35], [465, 24], [480, 30], [495, 26], [510, 28], [525, 21], [540, 18], [555, 20], [570, 27], [585, 17], [600, 17], [615, 14], [630, 17], [645, 24], [660, 11], [675, 22], [690, 17], [705, 12], [720, 10], [735, 13], [750, 16], [765, 9], [780, 9], [795, 14], [810, 21], [825, 17], [840, 13], [855, 12], [870, 18], [885, 10]]) # Set uncertainty to sqrt(counts) data = { 'x': data[0], 'y': data[1], 'dy': sqrt(data[1]) } #coeff = {'A': 1, 'B': 1, 'C': 1, 'tauA': 1, 'tauB': 1}
33.292683
64
0.532601
230
1,365
3.156522
0.695652
0.022039
0.055096
0.024793
0.049587
0.049587
0.049587
0.049587
0
0
0
0.284799
0.2
1,365
40
65
34.125
0.380037
0.316484
0
0
0
0
0.00443
0
0
0
0
0
0
1
0.058824
false
0
0.058824
0
0.176471
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
ddb3c6668f58416007498d2c442645d8eba37c50
870
py
Python
users/admin.py
TCKACHIKSIS/lms
fd06eb7a2baa9b9f82caa5223c86ba500f88333c
[ "MIT" ]
8
2021-02-09T12:15:27.000Z
2022-03-14T07:41:02.000Z
users/admin.py
TCKACHIKSIS/lms
fd06eb7a2baa9b9f82caa5223c86ba500f88333c
[ "MIT" ]
70
2021-04-14T12:45:17.000Z
2021-08-04T04:51:34.000Z
users/admin.py
TCKACHIKSIS/lms
fd06eb7a2baa9b9f82caa5223c86ba500f88333c
[ "MIT" ]
3
2021-08-03T08:22:01.000Z
2022-02-27T23:20:05.000Z
from django.contrib import admin from django.contrib.auth.admin import UserAdmin from django.contrib.auth.forms import ReadOnlyPasswordHashField from django.contrib.auth.forms import UserChangeForm as UserChangeFormDefault from users.models import User as MyUser class UserChangeForm(UserChangeFormDefault): """A form for updating users. Includes all the fields on the user, but replaces the password field with admin's password hash display field. """ password = ReadOnlyPasswordHashField( label=("Password"), help_text=( "Raw passwords are not stored, so there is no way to see " "this user's password, but you can change the password " "using <a href=\"../password/\">this form</a>.") ) class MyUserAdmin(UserAdmin): form = UserChangeForm admin.site.register(MyUser, MyUserAdmin)
31.071429
77
0.717241
107
870
5.82243
0.542056
0.064205
0.109149
0.101124
0.102729
0.102729
0
0
0
0
0
0
0.203448
870
27
78
32.222222
0.89899
0.157471
0
0
0
0
0.207283
0
0
0
0
0
0
1
0
false
0.375
0.3125
0
0.5625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
0
0
0
2
ddbd603df4d50ff702a926989598bb5193036223
360
py
Python
mergeit/extras/filters/version_redmine_filter.py
insolite/mergeit
27ca0eacab9b1d2fe6bafe5a43184a80e6169cb5
[ "MIT" ]
2
2016-07-04T13:32:30.000Z
2016-07-16T02:51:54.000Z
mergeit/extras/filters/version_redmine_filter.py
insolite/mergeit
27ca0eacab9b1d2fe6bafe5a43184a80e6169cb5
[ "MIT" ]
1
2016-08-06T12:47:28.000Z
2016-08-06T12:47:28.000Z
mergeit/extras/filters/version_redmine_filter.py
insolite/mergeit
27ca0eacab9b1d2fe6bafe5a43184a80e6169cb5
[ "MIT" ]
null
null
null
from mergeit.extras.filters import RedmineFilter class VersionRedmineFilter(RedmineFilter): def run(self, source_match, source_branch, target_branch): task = self.get_task(source_match.groupdict()['task_id']) # TODO: compare commit message task with branch name? return target_branch.format(redmine_version=task['fixed_version']['name'])
45
119
0.766667
45
360
5.933333
0.666667
0.082397
0
0
0
0
0
0
0
0
0
0
0.130556
360
8
120
45
0.853035
0.141667
0
0
0
0
0.077922
0
0
0
0
0.125
0
1
0.2
false
0
0.2
0
0.8
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
2
ddc2e4480e106c32f1e4eca626f14364f1473804
345
py
Python
ketu/k2/__init__.py
dfm/turnstile
13a9a3b489b458396a6ad1e8a2d1e89a0dd6312d
[ "MIT" ]
10
2015-02-19T09:13:24.000Z
2020-04-25T10:50:38.000Z
ketu/k2/__init__.py
dfm/turnstile
13a9a3b489b458396a6ad1e8a2d1e89a0dd6312d
[ "MIT" ]
1
2015-07-10T19:50:31.000Z
2015-07-11T03:51:15.000Z
ketu/k2/__init__.py
dfm/turnstile
13a9a3b489b458396a6ad1e8a2d1e89a0dd6312d
[ "MIT" ]
7
2015-04-20T06:42:28.000Z
2019-02-25T03:04:45.000Z
# -*- coding: utf-8 -*- __all__ = ["photometry", "epic", "Data", "Inject", "Likelihood", "Summary", "FP", "fit_traptransit"] from . import photometry, epic from .data import Data from .inject import Inject from .likelihood import Likelihood from .summary import Summary from .fp import FP from .traptransit import fit_traptransit
23
75
0.704348
42
345
5.642857
0.357143
0.118143
0
0
0
0
0
0
0
0
0
0.003497
0.171014
345
14
76
24.642857
0.825175
0.06087
0
0
0
0
0.180124
0
0
0
0
0
0
1
0
false
0
0.777778
0
0.777778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
ddf32fe03a5fb599a47b798f1592d170ddca1b42
156
py
Python
lico/__init__.py
sjoerdk/lico
86045f4df4f6a9d6155032a84bfc959b6eb7edd3
[ "MIT" ]
null
null
null
lico/__init__.py
sjoerdk/lico
86045f4df4f6a9d6155032a84bfc959b6eb7edd3
[ "MIT" ]
null
null
null
lico/__init__.py
sjoerdk/lico
86045f4df4f6a9d6155032a84bfc959b6eb7edd3
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- '''Top-level package for lico.''' __author__ = '''Sjoerd Kerkstra''' __email__ = 'w.s.kerkstra@example.com' __version__ = '0.1.1'
19.5
38
0.634615
21
156
4.142857
0.904762
0
0
0
0
0
0
0
0
0
0
0.02963
0.134615
156
7
39
22.285714
0.614815
0.320513
0
0
0
0
0.44
0.24
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
ddf4aeea631029e7987b9caa0550304bdff44ccf
1,000
py
Python
timeseries/features/orderbook_features.py
sarb9/volatility-prediction-with-orderbook-data
bf47f26632502af82dd6196e64582c375df876b7
[ "MIT" ]
null
null
null
timeseries/features/orderbook_features.py
sarb9/volatility-prediction-with-orderbook-data
bf47f26632502af82dd6196e64582c375df876b7
[ "MIT" ]
null
null
null
timeseries/features/orderbook_features.py
sarb9/volatility-prediction-with-orderbook-data
bf47f26632502af82dd6196e64582c375df876b7
[ "MIT" ]
null
null
null
from typing import Dict, Tuple from pathlib import Path from data_service.orderbook_dataset import OrderbookDataset from timeseries.features.feature import register_feature @register_feature("orderbook") def read_orderbook_from_file( dataset_file: Dict = {}, save: Dict = {}, load: Dict = {}, ): path: Path = Path(dataset_file["directory"]) / dataset_file["file_name"] dataset: OrderbookDataset = OrderbookDataset() dataset.load_from_file(path, save=save, load=load) return dataset.books @register_feature("spread", input_functions=["orderbook", "mid_price"]) def extract_spread(orderbook, mid_price): return abs(orderbook[:, 1, 0, 0] - orderbook[:, 0, 0, 0]) / mid_price @register_feature("bid_volume", input_functions=["orderbook"]) def extract_bid_volume(orderbook): return orderbook[:, 0, 1, :].sum(axis=1) @register_feature("ask_volume", input_functions=["orderbook"]) def extract_ask_volume(orderbook): return orderbook[:, 1, 1, :].sum(axis=1)
29.411765
76
0.73
127
1,000
5.519685
0.307087
0.10699
0.098431
0.082739
0.11127
0.11127
0
0
0
0
0
0.013857
0.134
1,000
33
77
30.30303
0.795612
0
0
0
0
0
0.089
0
0
0
0
0
0
1
0.173913
false
0
0.173913
0.130435
0.521739
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
2
ddf95f32e06fc2c51f71e92f91be786fe2f62eb8
761
bzl
Python
build_defs/internal_do_not_use/j2kt_common.bzl
dymecard/j2cl
ee2191aea6a795853f08e8864e2ccd553da03b0a
[ "Apache-2.0" ]
null
null
null
build_defs/internal_do_not_use/j2kt_common.bzl
dymecard/j2cl
ee2191aea6a795853f08e8864e2ccd553da03b0a
[ "Apache-2.0" ]
null
null
null
build_defs/internal_do_not_use/j2kt_common.bzl
dymecard/j2cl
ee2191aea6a795853f08e8864e2ccd553da03b0a
[ "Apache-2.0" ]
null
null
null
"""Stub implementation containing j2kt_provider helpers.""" def _to_j2kt_jvm_name(name): """Convert a label name used in j2cl to be used in j2kt jvm""" if name.endswith("-j2cl"): name = name[:-5] return "%s-j2kt-jvm" % name def _to_j2kt_native_name(name): """Convert a label name used in j2cl to be used in j2kt native""" if name.endswith("-j2cl"): name = name[:-5] return "%s-j2kt-native" % name def _compile(**kwargs): pass def _native_compile(**kwargs): pass def _jvm_compile(**kwargs): pass j2kt_common = struct( to_j2kt_jvm_name = _to_j2kt_jvm_name, to_j2kt_native_name = _to_j2kt_native_name, compile = _compile, native_compile = _native_compile, jvm_compile = _jvm_compile, )
24.548387
69
0.674113
111
761
4.297297
0.252252
0.075472
0.092243
0.081761
0.486373
0.431866
0.36478
0.36478
0.36478
0.36478
0
0.0299
0.208936
761
30
70
25.366667
0.762458
0.22339
0
0.333333
0
0
0.06087
0
0
0
0
0
0
1
0.238095
false
0.142857
0
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
2
fb060eda833b066929efa3710bdae928c70955d1
207
py
Python
gadget/plugins/bilibili/__init__.py
SRainot/gadgetBot
f8e31882229f0033356a7751c8a2b57858eb817c
[ "MIT" ]
null
null
null
gadget/plugins/bilibili/__init__.py
SRainot/gadgetBot
f8e31882229f0033356a7751c8a2b57858eb817c
[ "MIT" ]
null
null
null
gadget/plugins/bilibili/__init__.py
SRainot/gadgetBot
f8e31882229f0033356a7751c8a2b57858eb817c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Author:w k import nonebot as rcnb __plugin_name = 'Bilibili' Bilibili = rcnb.CommandGroup('Bilibili', only_to_me=False) from . import get_cover from . import live_subscription
15.923077
58
0.729469
29
207
4.965517
0.793103
0.138889
0
0
0
0
0
0
0
0
0
0.005714
0.154589
207
12
59
17.25
0.817143
0.154589
0
0
0
0
0.093023
0
0
0
0
0
0
1
0
false
0
0.6
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
fb1454faf08dc5f59104f45a90d238eaafc1ab03
1,767
py
Python
src/infi/diskmanagement/wmi/model/__init__.py
Infinidat/infi.diskmanagement
f0f6cbc2bf9e7a24a288cf66a7f2d133bd2833d5
[ "BSD-3-Clause" ]
12
2015-05-20T14:22:28.000Z
2022-01-20T04:42:47.000Z
src/infi/diskmanagement/wmi/model/__init__.py
Infinidat/infi.diskmanagement
f0f6cbc2bf9e7a24a288cf66a7f2d133bd2833d5
[ "BSD-3-Clause" ]
2
2017-08-15T08:27:40.000Z
2021-06-02T16:07:41.000Z
src/infi/diskmanagement/wmi/model/__init__.py
Infinidat/infi.diskmanagement
f0f6cbc2bf9e7a24a288cf66a7f2d133bd2833d5
[ "BSD-3-Clause" ]
5
2015-05-20T14:22:36.000Z
2018-11-06T04:57:35.000Z
from .. import WmiObject from infi.pyutils.lazy import cached_method DISKDRIVES_QUERY = "SELECT * FROM Win32_DiskDrive" VOLUME_QUERY = "SELECT * FROM Win32_Volume" VOLUME_CLUSTER_SIZE_QUERY = "SELECT Name, Blocksize FROM Win32_Volume WHERE FileSystem='NTFS'" DISKDRIVE_TO_DISKPARTITIONS_QUERY = r'SELECT * FROM Win32_DiskDriveToDiskPartition WHERE Antecedent="{}"' DISKPARTITION_QUERY = r'SELECT * FROM Win32_DiskPartition WHERE DeviceID={}' class DiskPartition(WmiObject): @property def Name(self): return self.get_wmi_attribute("Name") def __repr__(self): return "DiskPartition <{}>".format(self.Name) class DiskDrive(WmiObject): @property def Name(self): return self.get_wmi_attribute("Name") @property def SerialNumber(self): return self.get_wmi_attribute("SerialNumber") def __repr__(self): return "DiskDrive <{}>".format(self.Name) class Volume(WmiObject): @property def DeviceID(self): return self.get_wmi_attribute("DeviceID") @property def Name(self): return self.get_wmi_attribute("Name") @property def Blocksize(self): return self.get_wmi_attribute("Blocksize") def Format(self, ClusterSize=0, EnableCompression=False, FileSystem="NTFS", QuickFormat=True): method = self._object.Methods_("Format") parameters = method.InParameters.SpawnInstance_() parameters.Properties_.Item("ClusterSize").Value = ClusterSize parameters.Properties_.Item("EnableCompression").Value = EnableCompression parameters.Properties_.Item("FileSystem").Value = FileSystem parameters.Properties_.Item("QuickFormat").Value = QuickFormat _ = self._object.ExecMethod_(method.Name, parameters)
33.339623
105
0.7176
190
1,767
6.436842
0.284211
0.065413
0.068684
0.083401
0.255928
0.221586
0.15045
0.15045
0.15045
0.15045
0
0.007555
0.176005
1,767
52
106
33.980769
0.832418
0
0
0.35
0
0
0.208499
0.016997
0
0
0
0
0
1
0.225
false
0
0.05
0.2
0.55
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
fb158b888220edaef6d37784f625154c58da18b3
400
py
Python
tet-moments/triangle.py
hertzsprung/high-order-transport
50d9633642dcd4d8fba54b9b408e69dc0f12d9e7
[ "MIT" ]
null
null
null
tet-moments/triangle.py
hertzsprung/high-order-transport
50d9633642dcd4d8fba54b9b408e69dc0f12d9e7
[ "MIT" ]
null
null
null
tet-moments/triangle.py
hertzsprung/high-order-transport
50d9633642dcd4d8fba54b9b408e69dc0f12d9e7
[ "MIT" ]
1
2020-02-13T09:16:36.000Z
2020-02-13T09:16:36.000Z
import numpy as np import math from simplex import Simplex class Triangle(Simplex): def area(self): # https://math.stackexchange.com/a/516223/89878 return 0.5 * self._detA() def moment(self, p, q, r): return self._moment(p, q, r, d=2) def __str__(self): return 'Triangle(' + str(self.a) + ', ' + \ str(self.b) + ', ' + str(self.c) + ')'
25
55
0.56
56
400
3.892857
0.553571
0.12844
0.027523
0
0
0
0
0
0
0
0
0.048611
0.28
400
15
56
26.666667
0.708333
0.1125
0
0
0
0
0.03966
0
0
0
0
0
0
1
0.272727
false
0
0.272727
0.272727
0.909091
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
fb25b0fc2011daff4149e3e9da440cee5e5f8059
555
py
Python
old/PPO with Sonic the Hedgehog/utilities.py
sirmammingtonham/droneee
1c0e1921a902b26958d298f3a0204465bf3e960d
[ "Unlicense" ]
null
null
null
old/PPO with Sonic the Hedgehog/utilities.py
sirmammingtonham/droneee
1c0e1921a902b26958d298f3a0204465bf3e960d
[ "Unlicense" ]
null
null
null
old/PPO with Sonic the Hedgehog/utilities.py
sirmammingtonham/droneee
1c0e1921a902b26958d298f3a0204465bf3e960d
[ "Unlicense" ]
null
null
null
import tensorflow as tf # Get the variables def find_trainable_variables(key): with tf.variable_scope(key): return tf.trainable_variables() # Make directory def make_path(f): # exist_ok: if the folder already exist makes no exception error return os.makedirs(f, exist_ok=True) def discount_with_dones(rewards, dones, gamma): discounted = [] r = 0 for reward, done in zip(rewards[::-1], dones[::-1]): r = reward + gamma*r*(1.-done) # fixed off by one bug discounted.append(r) return discounted[::-1]
26.428571
68
0.672072
81
555
4.493827
0.617284
0.098901
0.043956
0
0
0
0
0
0
0
0
0.011468
0.214414
555
20
69
27.75
0.823395
0.209009
0
0
0
0
0
0
0
0
0
0
0
1
0.230769
false
0
0.076923
0.076923
0.538462
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
fb371b6c6c962a5f7a0accdfedb6f97b7982c73b
1,983
py
Python
tests/test_modules.py
illegalbyte/fullpod
6c10cff38cfaf96a2c3f72a1c1c9887e8efb6c92
[ "MIT" ]
3
2022-01-07T16:40:24.000Z
2022-03-22T11:55:42.000Z
tests/test_modules.py
illegalbyte/allcasts
6c10cff38cfaf96a2c3f72a1c1c9887e8efb6c92
[ "MIT" ]
null
null
null
tests/test_modules.py
illegalbyte/allcasts
6c10cff38cfaf96a2c3f72a1c1c9887e8efb6c92
[ "MIT" ]
null
null
null
import unittest import os class TestWget(unittest.TestCase): def test_wget_installed(self): ''' wget should be available ''' try: import wget except ImportError: self.fail("wget not available - run 'pip install wget'") def test_wget_download(self): ''' wget should be able to download a file ''' import wget wget.download("https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png", "google.png") self.assertTrue(os.path.isfile("google.png")) os.remove("google.png") class TestXMLtoDict(unittest.TestCase): def test_xmltodict_installed(self): ''' xmltodict should be available ''' try: import xmltodict except ImportError: self.fail("xmltodict not available - run: 'pip install xmltodict'") def test_xmltodict_parse(self): ''' xmltodict should be able to parse a file ''' import xmltodict with open('example_rss.xml', 'r') as f: podcast_dict = xmltodict.parse(f.read()) self.assertTrue(podcast_dict) def test_xmltodict_parse_url(self): ''' xmltodict should be able to parse a url ''' import urllib import xmltodict with urllib.request.urlopen('https://www.npr.org/rss/podcast.php?id=510289') as url: podcast_dict = xmltodict.parse(url.read()) self.assertTrue(podcast_dict) class TestColorama(unittest.TestCase): def test_colorama_installed(self): ''' colorama should be available ''' try: import colorama except ImportError: self.fail("colorama not available - run: 'pip install colorama'") def test_colorama_init(self): ''' colorama should be able to init ''' import colorama colorama.init() def test_colorama_foreground(self): ''' colorama should be able to set foreground color ''' import colorama as col col.init() col.Fore.GREEN col.Fore.RESET def test_colorama_background(self): ''' colorama should be able to set background color ''' import colorama as col col.init() col.Back.GREEN col.Back.RESET
23.05814
115
0.714574
268
1,983
5.190299
0.283582
0.045291
0.051761
0.060388
0.308411
0.156722
0.13803
0.096334
0
0
0
0.007313
0.172466
1,983
86
116
23.05814
0.840341
0.166919
0
0.387755
0
0
0.207341
0
0
0
0
0
0.061224
1
0.183673
false
0
0.306122
0
0.55102
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
34a4840aad0f1a2df60de9fdd28cbc73705b7c03
1,028
py
Python
lintcode/107.1.py
jianershi/algorithm
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
[ "MIT" ]
1
2021-01-08T06:57:49.000Z
2021-01-08T06:57:49.000Z
lintcode/107.1.py
jianershi/algorithm
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
[ "MIT" ]
null
null
null
lintcode/107.1.py
jianershi/algorithm
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
[ "MIT" ]
1
2021-01-08T06:57:52.000Z
2021-01-08T06:57:52.000Z
""" 107. Word Break https://www.lintcode.com/problem/word-break/description?_from=ladder&&fromId=160 Memoization """ class Solution: """ @param: s: A string @param: dict: A dictionary of words dict @return: A boolean """ def wordBreak(self, s, dict): # write your code here if dict: max_dict_word_length = max([len(x) for x in dict]) else: max_dict_word_length = 0 return self.can_break(s, 0, dict, max_dict_word_length, {}) def can_break(self, s, i, dict, max_dict_word_length, memo): if i in memo: return memo[i] if len(s) == i: return True for index in range(i, len(s)): if index - i > max_dict_word_length: break if s[i:index + 1] not in dict: continue if self.can_break(s, index + 1, dict, max_dict_word_length, memo): memo[i] = True return memo[i] return False
28.555556
80
0.539883
141
1,028
3.780142
0.368794
0.078799
0.123827
0.19137
0.172608
0.093809
0
0
0
0
0
0.015152
0.357977
1,028
36
81
28.555556
0.792424
0.20428
0
0.095238
0
0
0
0
0
0
0
0.027778
0
1
0.095238
false
0
0
0
0.380952
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
2
34ad0313dfd9f10ea89d9d002684c0c9cc5228a4
340
py
Python
src/game_of_life/python_coderetreat_socramob/cr_socramob05/coordinate.py
hemmerling/codingdojo
3e8860b78e96ac15cde6a12db3b2431e8b63714f
[ "Apache-2.0" ]
null
null
null
src/game_of_life/python_coderetreat_socramob/cr_socramob05/coordinate.py
hemmerling/codingdojo
3e8860b78e96ac15cde6a12db3b2431e8b63714f
[ "Apache-2.0" ]
null
null
null
src/game_of_life/python_coderetreat_socramob/cr_socramob05/coordinate.py
hemmerling/codingdojo
3e8860b78e96ac15cde6a12db3b2431e8b63714f
[ "Apache-2.0" ]
null
null
null
class Coordinate: coordX = 0 coordY = 0 def __init__(self, coordX=0, coordY=0): self.coordX = coordX self.coordY = coordY pass def set(self, coordX, coordY): self.coordX = coordX self.coordY = coordY return coordX, coordY if __name__ == '__main__': pass
20
44
0.552941
38
340
4.631579
0.368421
0.227273
0.147727
0.159091
0.363636
0.363636
0
0
0
0
0
0.018433
0.361765
340
16
45
21.25
0.792627
0
0
0.461538
0
0
0.024691
0
0
0
0
0
0
1
0.153846
false
0.153846
0
0
0.461538
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
34ba4fa4a35f6d44c71a62a56834c29bd6667d43
236
py
Python
linux/bin/hex2bin.py
nevesnunes/env
7a5e3816334337e04a87e1a2e4dc322215901744
[ "MIT" ]
4
2020-04-07T14:45:02.000Z
2021-12-28T22:43:16.000Z
linux/bin/hex2bin.py
nevesnunes/env
7a5e3816334337e04a87e1a2e4dc322215901744
[ "MIT" ]
null
null
null
linux/bin/hex2bin.py
nevesnunes/env
7a5e3816334337e04a87e1a2e4dc322215901744
[ "MIT" ]
2
2020-04-08T03:12:06.000Z
2021-03-04T20:33:03.000Z
#!/usr/bin/env python3 import binascii import sys hex_bytes = sys.stdin.buffer.read() hex_bytes_clean = ''.join(str(hex_bytes, encoding='UTF8').split()) raw_bytes = binascii.a2b_hex(hex_bytes_clean) sys.stdout.buffer.write(raw_bytes)
23.6
66
0.771186
38
236
4.552632
0.578947
0.184971
0.150289
0
0
0
0
0
0
0
0
0.013825
0.080508
236
9
67
26.222222
0.78341
0.088983
0
0
0
0
0.018692
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
34cf72e3b0e33aed734201ad91556ce487d89f75
691
py
Python
tests/Traverser/mainsrt.py
goshow-jp/Kraken
7088b474b6cc2840cea7ab642c5938e4a3290b6c
[ "BSD-3-Clause" ]
null
null
null
tests/Traverser/mainsrt.py
goshow-jp/Kraken
7088b474b6cc2840cea7ab642c5938e4a3290b6c
[ "BSD-3-Clause" ]
null
null
null
tests/Traverser/mainsrt.py
goshow-jp/Kraken
7088b474b6cc2840cea7ab642c5938e4a3290b6c
[ "BSD-3-Clause" ]
1
2021-12-08T08:31:48.000Z
2021-12-08T08:31:48.000Z
from kraken import plugins from kraken.core.maths import Vec3 from kraken.core.objects.object_3d import Object3D from kraken.core.objects.locator import Locator from kraken.core.objects.constraints.pose_constraint import PoseConstraint from kraken.core.traverser.traverser import Traverser from kraken_examples.mainSrt_component import MainSrtComponentRig mainComponent = MainSrtComponentRig('main') trav = Traverser() trav.addRootItem(mainComponent) items = trav.traverse(discoverCallback = trav.discoverChildren) trav.reset() trav.addRootItems(items) def callback(**args): item = args.get('item', None) print 'Visited '+item.getPath() trav.traverse(itemCallback = callback)
27.64
74
0.811867
83
691
6.710843
0.493976
0.125673
0.125673
0.113106
0
0
0
0
0
0
0
0.004823
0.099855
691
24
75
28.791667
0.890675
0
0
0
0
0
0.023188
0
0
0
0
0
0
0
null
null
0
0.411765
null
null
0.058824
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
2
34d0622610259a0dd008168079dfa5962bd64938
5,056
py
Python
facet/database/base.py
edponce/FACET
0dca2d728813a4865e72b2e8fd6b114a0c63d5b0
[ "MIT" ]
2
2020-10-16T19:27:21.000Z
2021-11-04T15:06:54.000Z
facet/database/base.py
edponce/FACET
0dca2d728813a4865e72b2e8fd6b114a0c63d5b0
[ "MIT" ]
24
2020-07-13T01:49:36.000Z
2020-10-30T21:54:00.000Z
facet/database/base.py
edponce/FACET
0dca2d728813a4865e72b2e8fd6b114a0c63d5b0
[ "MIT" ]
null
null
null
from abc import ( ABC, abstractmethod, ) __all__ = [ 'BaseDatabase', 'BaseKVDatabase', ] class BaseDatabase(ABC): """Interface with basic database commands. Notes: * Follows the framework design pattern - parent class controls the execution flow and subclass provides the details. * 'open' method is an alias to 'connect', so derived classes should only provide the latter one. * Supports context manager schemes using 'open()' or 'connect()': >>> with open(...) as db >>> ... * Semantics of database connections: * An instance represents a single connection to a database. * Configuration of instance persists during disconnects, so reconnections are supported. * Configuration options can be changed during reconnection. * Commits, disconnects, and close operations can be invoked repeatedly without triggering exceptions. * Close operation performs a commit, then disconnects. """ def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def open(self, *args, **kwargs): self.connect(*args, **kwargs) def close(self, *args, commit_kwargs=None, **kwargs): if commit_kwargs is None: commit_kwargs = {} self.commit(**commit_kwargs) self.disconnect(*args, **kwargs) def commit(self): pass @property @abstractmethod def backend(self): """Returns a reference to the backend connection/database object.""" pass @abstractmethod def configuration(self): """Returns general configuration information as a mapping.""" pass @abstractmethod def connect(self): pass @abstractmethod def disconnect(self): pass @abstractmethod def clear(self): pass @abstractmethod def ping(self): pass class BaseKVDatabase(BaseDatabase): """Interface for key-value store database. Notes: * Follows the framework design pattern - parent class controls the execution flow and subclass provides the details. * API is similar to Python dictionaries, but with limited functionalities for performance reasons. """ def __getitem__(self, key): return self.get(key) def __setitem__(self, key, value): self.set(key, value) def __delitem__(self, key): self.delete(key) def __iter__(self): return iter(self.keys()) def items(self): # NOTE: Derived databases might have direct methods to key/values. return ((k, self.get(k)) for k in self.keys()) def values(self): # NOTE: Derived databases might have direct methods to key/values. return (self.get(k) for k in self.keys()) def update(self, data): if hasattr(data, 'keys'): for k in data.keys(): self.set(k, data[k]) else: for k, v in data: self.set(k, v) def setdefault(self, key, value=None): if key in self: value = self.get(key) else: self.set(key, value) return value def copy( self, database: 'BaseKVDatabase', *, bulk_size: int = 1000, nrows: int = -1, **kwargs, ): """Copy rows from current database to another one. Args: database (BaseKVDatabase): Database to populate with a copy of items in current database. bulk_size (int): Number of rows to process before committing data. nrows (int): Max number of rows to copy. If negative value, all rows are copied. Kwargs: Options forwarded to 'database.commit()'. Notes: * Checks if database objects are the same but cannot detect if both objects refer to the same backend database. """ # Early exit to skip commit operation when self-referencing or # no rows are requested, if self is database: return if nrows < 0: nrows = len(self) # NOTE: (Idea) If databases use the same serializers, then data # could be copied as is. To support this, disable serializers # before copying data and enable them after copy completes. for i, (k, v) in zip(range(1, nrows + 1), self.items()): database.set(k, v) if i % bulk_size == 0: database.commit(**kwargs) database.commit(**kwargs) @abstractmethod def __len__(self): pass @abstractmethod def __contains__(self, key): pass @abstractmethod def get(self, key): pass @abstractmethod def set(self, key, value): pass @abstractmethod def keys(self): pass @abstractmethod def delete(self, key): pass
25.535354
78
0.586432
573
5,056
5.099476
0.335079
0.069815
0.071869
0.042779
0.148528
0.129363
0.129363
0.129363
0.129363
0.112252
0
0.002652
0.328718
5,056
197
79
25.664975
0.858279
0.430182
0
0.303922
0
0
0.016504
0
0
0
0
0
0
1
0.254902
false
0.127451
0.009804
0.04902
0.352941
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
2
34d08c3567651ec09a3d6cdc71714bd4c6ee509f
5,018
py
Python
train.py
BobAnkh/std-project3
b27589b2c43412943b4e46302881a57df3990637
[ "MIT" ]
1
2021-09-12T06:01:49.000Z
2021-09-12T06:01:49.000Z
train.py
BobAnkh/std-project3
b27589b2c43412943b4e46302881a57df3990637
[ "MIT" ]
null
null
null
train.py
BobAnkh/std-project3
b27589b2c43412943b4e46302881a57df3990637
[ "MIT" ]
null
null
null
import json import os import pytorch_lightning as pl from pytorch_lightning.callbacks import ModelCheckpoint from torch.utils.data import DataLoader, random_split from src.dataloader import ActionTrainDataset, AudioTrainDataset, VideoTrainDataset from src.image_center import pre_process from src.model import ActionAngle, ActionLoc, AudioClassifier, ImageClassifier def audio_train(path): print('Starting training audio classification!') trainData = AudioTrainDataset(path) dataset_sizes = { 'train': round(0.8 * len(trainData)), 'val': len(trainData) - round(0.8 * len(trainData)) } [train, val] = random_split(trainData, dataset_sizes.values()) trainLoader = DataLoader(train, batch_size=64, num_workers=os.cpu_count()) valLoader = DataLoader(val, batch_size=64, num_workers=os.cpu_count()) checkpoint_callback = ModelCheckpoint( monitor='val_acc', dirpath='lightning_logs/', filename='audio-{epoch:02d}-{val_acc:.4f}-{train_acc:.4f}', save_top_k=1, mode='max', ) trainer = pl.Trainer(gpus=2, accelerator='ddp', max_epochs=100, callbacks=[checkpoint_callback]) model = AudioClassifier() trainer.fit(model, train_dataloader=trainLoader, val_dataloaders=valLoader) print('Finish training. Model saved in folder lightning_logs!') def action_angle_train(path, mask_processed_path): print('Starting training action angle!') trainData = ActionTrainDataset(mask_processed_path, path) dataset_sizes = { 'train': round(0.8 * len(trainData)), 'val': len(trainData) - round(0.8 * len(trainData)) } [train, val] = random_split(trainData, dataset_sizes.values()) trainLoader = DataLoader(train, batch_size=4, num_workers=os.cpu_count()) valLoader = DataLoader(val, batch_size=4, num_workers=os.cpu_count()) checkpoint_callback = ModelCheckpoint( monitor='val_sim', dirpath='lightning_logs/', filename='action-angle-{epoch:02d}-{val_sim:.4f}', save_top_k=1, mode='max', ) trainer = pl.Trainer(gpus=2, accelerator='ddp', max_epochs=500, callbacks=[checkpoint_callback]) model = ActionAngle() trainer.fit(model, train_dataloader=trainLoader, val_dataloaders=valLoader) print('Finish training. Model saved in folder lightning_logs!') def action_loc_train(path, mask_processed_path): print('Starting training action location!') trainData = ActionTrainDataset(mask_processed_path, path) dataset_sizes = { 'train': round(0.8 * len(trainData)), 'val': len(trainData) - round(0.8 * len(trainData)) } [train, val] = random_split(trainData, dataset_sizes.values()) trainLoader = DataLoader(train, batch_size=4, num_workers=os.cpu_count()) valLoader = DataLoader(val, batch_size=4, num_workers=os.cpu_count()) checkpoint_callback = ModelCheckpoint( monitor='val_loss', dirpath='lightning_logs/', filename='loc-action-{epoch:02d}-{val_loss:.4f}-{train_loss:.4f}', save_top_k=3, mode='min', ) trainer = pl.Trainer(gpus=2, accelerator='ddp', max_epochs=500, callbacks=[checkpoint_callback]) model = ActionLoc() trainer.fit(model, train_dataloader=trainLoader, val_dataloaders=valLoader) print('Finish training. Model saved in folder lightning_logs!') def image_train(path): print('Starting training image classification!') trainData = VideoTrainDataset(path) dataset_sizes = { 'train': round(0.8 * len(trainData)), 'val': len(trainData) - round(0.8 * len(trainData)) } [train, val] = random_split(trainData, dataset_sizes.values()) trainLoader = DataLoader(train, batch_size=128, num_workers=os.cpu_count()) valLoader = DataLoader(val, batch_size=128, num_workers=os.cpu_count()) checkpoint_callback = ModelCheckpoint( monitor='val_acc', dirpath='lightning_logs/', filename='image-{epoch:02d}-{val_acc:.4f}-{train_acc:.4f}', save_top_k=1, mode='max', ) trainer = pl.Trainer(gpus=2, accelerator='ddp', max_epochs=50, callbacks=[checkpoint_callback]) model = ImageClassifier() trainer.fit(model, train_dataloader=trainLoader, val_dataloaders=valLoader) print('Finish training. Model saved in folder lightning_logs!') if __name__ == '__main__': object_info, object_list = pre_process('dataset/train') json.dump(object_info, open('mask_processed.json', 'w', encoding='utf-8'), ensure_ascii=False) audio_train('./dataset/train') action_angle_train('./dataset/train', 'mask_processed.json') action_loc_train('./dataset/train', 'mask_processed.json') image_train('./dataset/train')
40.467742
98
0.66102
572
5,018
5.582168
0.195804
0.045099
0.017538
0.025055
0.720013
0.701221
0.679925
0.679925
0.676793
0.643595
0
0.016544
0.217019
5,018
123
99
40.796748
0.796131
0
0
0.540541
0
0
0.166202
0.037067
0
0
0
0
0
1
0.036036
false
0
0.072072
0
0.108108
0.072072
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
34ee7c4232c5f790ebb690a5eec409ab19352905
1,091
py
Python
helpdesk/admin.py
bobbybabu007/django-simple-helpdesk
d507682e82808ba182736d33e26824dcad8d801f
[ "BSD-2-Clause" ]
14
2016-10-11T21:29:32.000Z
2021-09-21T13:51:16.000Z
helpdesk/admin.py
bobbybabu007/django-simple-helpdesk
d507682e82808ba182736d33e26824dcad8d801f
[ "BSD-2-Clause" ]
7
2015-11-14T19:15:47.000Z
2021-06-24T18:35:42.000Z
helpdesk/admin.py
bobbybabu007/django-simple-helpdesk
d507682e82808ba182736d33e26824dcad8d801f
[ "BSD-2-Clause" ]
17
2015-06-01T17:35:58.000Z
2021-09-11T22:01:19.000Z
from django.contrib import admin from django.contrib.contenttypes.admin import GenericTabularInline from helpdesk.models import Ticket, Project, State, Comment, HelpdeskProfile, MailAttachment, ProjectAlias class AttachmentInline(GenericTabularInline): model = MailAttachment @admin.register(Ticket) class TicketAdmin(admin.ModelAdmin): list_display = ('title', 'project', 'state', 'priority', 'assignee') readonly_fields = ('created', 'updated') inlines = [AttachmentInline] @admin.register(Project) class ProjectAdmin(admin.ModelAdmin): list_display = ('machine_name', 'title') @admin.register(State) class StateAdmin(admin.ModelAdmin): list_display = ('machine_name', 'title') @admin.register(Comment) class CommentAdmin(admin.ModelAdmin): list_display = ('created', 'author', 'ticket') @admin.register(HelpdeskProfile) class HelpdeskProfileAdmin(admin.ModelAdmin): list_display = ('user', 'label') raw_id_fields = ['user'] @admin.register(ProjectAlias) class ProjectAliasAdmin(admin.ModelAdmin): list_display = ('email', 'project')
25.97619
106
0.749771
110
1,091
7.336364
0.409091
0.096654
0.141264
0.193309
0.136307
0.136307
0.136307
0.136307
0.136307
0
0
0
0.122823
1,091
41
107
26.609756
0.84326
0
0
0.076923
0
0
0.114574
0
0
0
0
0
0
1
0
false
0
0.115385
0
0.769231
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
34f7fc5bc44aedfa4436541c24a16188a8d79a89
204
py
Python
stimuli/Python/one_file_per_item/en/102_# str_if 12.py
ALFA-group/neural_program_comprehension
0253911f376cf282af5a5627e38e0a591ad38860
[ "MIT" ]
6
2020-04-24T08:16:51.000Z
2021-11-01T09:50:46.000Z
stimuli/Python/one_file_per_item/en/102_# str_if 12.py
ALFA-group/neural_program_comprehension
0253911f376cf282af5a5627e38e0a591ad38860
[ "MIT" ]
null
null
null
stimuli/Python/one_file_per_item/en/102_# str_if 12.py
ALFA-group/neural_program_comprehension
0253911f376cf282af5a5627e38e0a591ad38860
[ "MIT" ]
4
2021-02-17T20:21:31.000Z
2022-02-14T12:43:23.000Z
letters = ["a", "e", "t", "o", "u"] word = "CreepyNuts" if (word[1] in letters) and (word[6] in letters): print(0) elif (word[1] in letters) or (word[6] in letters): print(1) else: print(2)
18.545455
50
0.563725
35
204
3.285714
0.542857
0.313043
0.121739
0.243478
0.330435
0
0
0
0
0
0
0.04375
0.215686
204
10
51
20.4
0.675
0
0
0
0
0
0.073892
0
0
0
0
0
0
1
0
false
0
0
0
0
0.375
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
9b30f32bd9e67a028906cb2d5b8c5ab8b0669c93
996
py
Python
pyscf/future/__init__.py
robert-anderson/pyscf
cdc56e168cb15f47e8cdc791a92d689fa9b655af
[ "Apache-2.0" ]
2
2019-05-28T05:25:56.000Z
2019-11-09T02:16:43.000Z
pyscf/future/__init__.py
robert-anderson/pyscf
cdc56e168cb15f47e8cdc791a92d689fa9b655af
[ "Apache-2.0" ]
36
2018-08-22T19:44:03.000Z
2020-05-09T10:02:36.000Z
pyscf/future/__init__.py
robert-anderson/pyscf
cdc56e168cb15f47e8cdc791a92d689fa9b655af
[ "Apache-2.0" ]
4
2018-02-14T16:28:28.000Z
2019-08-12T16:40:30.000Z
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys sys.stderr.write(''' Warning Modules in the "future" directory (dmrgscf, fciqmcscf, shciscf, icmspt, xianci) have been moved to pyscf/pyscf directory. You can still import these modules. from the "future" directory, and they work the same as before. To avoid name conflicts with python built-in module "future", this directory will be deleted in future release. ''')
34.344828
79
0.764056
153
996
4.973856
0.660131
0.078844
0.034166
0.04205
0
0
0
0
0
0
0
0.01444
0.165663
996
28
80
35.571429
0.901324
0.586345
0
0
0
0
0.874055
0
0
0
0
0
0
1
0
true
0
0.222222
0
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
9b48f7e905ce4692f5e4348364875080fb97ddc3
587
py
Python
Dataset/Leetcode/train/46/418.py
kkcookies99/UAST
fff81885aa07901786141a71e5600a08d7cb4868
[ "MIT" ]
null
null
null
Dataset/Leetcode/train/46/418.py
kkcookies99/UAST
fff81885aa07901786141a71e5600a08d7cb4868
[ "MIT" ]
null
null
null
Dataset/Leetcode/train/46/418.py
kkcookies99/UAST
fff81885aa07901786141a71e5600a08d7cb4868
[ "MIT" ]
null
null
null
class Solution(object): def XXX(self, nums): """ :type nums: List[int] :rtype: List[List[int]] """ def backtrace(nums,track): if len(nums) == len(track): res.append(track[:]) return for i in range(len(nums)): if nums[i] in track: continue track.append(nums[i]) backtrace(nums,track) track.remove(track[-1]) track = [] res = [] backtrace(nums,track) return res
25.521739
39
0.422487
58
587
4.275862
0.431034
0.157258
0.217742
0
0
0
0
0
0
0
0
0.003135
0.456559
587
22
40
26.681818
0.774295
0
0
0.125
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
2
9b7d17de91a9fdc4dd894fc26f2da604b315ab7c
7,461
py
Python
dlmnn/unused/tffuncs.py
SkafteNicki/Deep_LMNN
e70b495befdc6f6f1b24029e470c42a3b3821a93
[ "MIT" ]
1
2021-09-14T10:06:53.000Z
2021-09-14T10:06:53.000Z
dlmnn/unused/tffuncs.py
SkafteNicki/Deep_LMNN
e70b495befdc6f6f1b24029e470c42a3b3821a93
[ "MIT" ]
1
2018-12-12T07:18:42.000Z
2018-12-12T07:18:42.000Z
dlmnn/unused/tffuncs.py
SkafteNicki/Deep_LMNN
e70b495befdc6f6f1b24029e470c42a3b3821a93
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue May 29 09:58:39 2018 @author: nsde """ #%% #%% def tf_mahalanobisTransformer(X, scope='mahalanobis_transformer'): """ Creates a transformer function that for an given input matrix X, calculates the linear transformation L*X_i """ with tf.variable_scope(scope, reuse=tf.AUTO_REUSE, values=[X]): X = tf.cast(X, tf.float32) L = tf.get_variable("L", initializer=np.eye(50, 50, dtype=np.float32)) return tf.matmul(X, L) #%% def tf_convTransformer(X, scope='conv_transformer'): """ Creates a transformer function that for an given input tensor X, computes the convolution of that tensor with some weights W. """ with tf.variable_scope(scope, reuse=tf.AUTO_REUSE, values=[X]): X = tf.cast(X, tf.float32) W = tf.get_variable("W", initializer=np.random.normal(size=(3,3,1,10)).astype('float32')) return tf.nn.conv2d(X, W, strides=[1,1,1,1], padding="VALID") #%% def keras_mahalanobisTransformer(X, scope='mahalanobis_transformer'): X = tf.cast(X, tf.float32) with tf.variable_scope(scope, reuse=tf.AUTO_REUSE, values=[X]): S = Sequential() S.add(InputLayer(input_shape=(50,))) S.add(Dense(50, use_bias=False, kernel_initializer='identity')) return S.call(X) #%% if __name__ == '__main__': # Make sure that variables are shared X=tf.cast(np.random.normal(size=(100,50)), tf.float32) tr = KerasTransformer(input_shape=(50,)) tr.add(Dense(50, use_bias=False, kernel_initializer='identity')) tr.add(Dense(20, use_bias=False, kernel_initializer='identity')) tr.add(Dense(10, use_bias=False, kernel_initializer='identity')) trans_func1 = tr.get_function() res1=trans_func1(X) res2=trans_func1(X) # Check that we only have created three variables for w in tf.trainable_variables(): print(w) #%% def tf_pairwiseMahalanobisDistance2(X1, X2, L): ''' For a given mahalanobis distance parametrized by L, find the pairwise squared distance between all observations in matrix X1 and matrix X2. Input X1: N x d matrix, each row being an observation X2: M x d matrix, each row being an observation L: d x d matrix Output D: N x M matrix, with squared distances ''' with tf.name_scope('pairwiseMahalanobisDistance2'): X1, X2 = tf.cast(X1, tf.float32), tf.cast(X2, tf.float32) X1L = tf.matmul(X1, L) X2L = tf.matmul(X2, L) term1 = tf.pow(tf.norm(X1L, axis=1),2.0) term2 = tf.pow(tf.norm(X2L, axis=1),2.0) term3 = 2.0*tf.matmul(X1L, tf.transpose(X2L)) return tf.transpose(tf.maximum(tf.cast(0.0, dtype=X1L.dtype), term1 + tf.transpose(term2 - term3))) #%% def tf_mahalanobisTransformer(X, L): ''' Transformer for the mahalanobis distance ''' with tf.name_scope('mahalanobisTransformer'): X, L = tf.cast(X, tf.float32), tf.cast(L, tf.float32) return tf.matmul(X, L) #%% def tf_pairwiseConvDistance2(X1, X2, W): ''' For a given set of convolutional weights W, calculate the convolution with tensor X1 and X2 and then calculates the pairwise squared distance (euclidean) between conv features Input X: N x h x w x c tensor, each slice being an image Y: M x h x w x c tensor, each slice being an image W: f1 x f2 x c x nf tensor, where f1, f2 are the filter sizes and nf is the number of filters Output D: N x M matrix, with squared conv distances ''' with tf.name_scope('pairwiseConvDistance2'): X1, X2 = tf.cast(X1, tf.float32), tf.cast(X2, tf.float32) N, M = tf.shape(X1)[0], tf.shape(X2)[0] n_filt = tf.shape(W)[3] convX1 = tf.nn.conv2d(X1, W, strides=[1,1,1,1], padding='SAME') # N x height x width x n_filt convX2 = tf.nn.conv2d(X2, W, strides=[1,1,1,1], padding='SAME') # M x height x width x n_filt convX1_perm = tf.transpose(convX1, perm=[3,0,1,2]) # n_filt x N x height x width convX2_perm = tf.transpose(convX2, perm=[3,0,1,2]) # n_filt x M x height x width convX1_resh = tf.reshape(convX1_perm, (n_filt, N, -1)) # n_filt x N x (height*width) convX2_resh = tf.reshape(convX2_perm, (n_filt, M, -1)) # n_filt x M x (height*width) term1 = tf.expand_dims(tf.pow(tf.norm(convX1_resh, axis=2), 2.0), 2) # n_filt x N x 1 term2 = tf.expand_dims(tf.pow(tf.norm(convX2_resh, axis=2), 2.0), 1) # n_filt x 1 x M term3 = 2.0*tf.matmul(convX1_resh, tf.transpose(convX2_resh, perm=[0,2,1])) # n_filt x N x M summ = term1 + term2 - term3 # n_filt x N x M return tf.maximum(tf.cast(0.0, tf.float32), tf.reduce_sum(summ, axis=0)) # N x M #%% def tf_convTransformer(X, W): ''' Transformer for the conv distance ''' with tf.name_scope('convTransformer'): X, W = tf.cast(X, tf.float32), tf.cast(W, tf.float32) return tf.nn.conv2d(X, W, strides=[1,1,1,1], padding='SAME') #%% def tf_nonlin_pairwiseConvDistance2(X1, X2, W): ''' For a given set of convolutional weights W, calculate the convolution with tensor X1 and X2 and then calculates the pairwise squared distance (euclidean) between conv features Input X: N x h x w x c tensor, each slice being an image Y: M x h x w x c tensor, each slice being an image W: f1 x f2 x c x nf tensor, where f1, f2 are the filter sizes and nf is the number of filters Output D: N x M matrix, with squared conv distances ''' with tf.name_scope('pairwiseConvDistance2'): X1, X2 = tf.cast(X1, tf.float32), tf.cast(X2, tf.float32) N, M = tf.shape(X1)[0], tf.shape(X2)[0] n_filt = tf.shape(W)[3] convX1 = tf.nn.conv2d(X1, W, strides=[1,1,1,1], padding='SAME') # N x height x width x n_filt convX2 = tf.nn.conv2d(X2, W, strides=[1,1,1,1], padding='SAME') # M x height x width x n_filt convX1 = tf.nn.relu(convX1) convX2 = tf.nn.relu(convX2) convX1_perm = tf.transpose(convX1, perm=[3,0,1,2]) # n_filt x N x height x width convX2_perm = tf.transpose(convX2, perm=[3,0,1,2]) # n_filt x M x height x width convX1_resh = tf.reshape(convX1_perm, (n_filt, N, -1)) # n_filt x N x (height*width) convX2_resh = tf.reshape(convX2_perm, (n_filt, M, -1)) # n_filt x M x (height*width) term1 = tf.expand_dims(tf.pow(tf.norm(convX1_resh, axis=2), 2.0), 2) # n_filt x N x 1 term2 = tf.expand_dims(tf.pow(tf.norm(convX2_resh, axis=2), 2.0), 1) # n_filt x 1 x M term3 = 2.0*tf.matmul(convX1_resh, tf.transpose(convX2_resh, perm=[0,2,1])) # n_filt x N x M summ = term1 + term2 - term3 # n_filt x N x M return tf.maximum(tf.cast(0.0, tf.float32), tf.reduce_sum(summ, axis=0)) # N x M #%% def tf_nonlin_convTransformer(X, W): ''' Transformer for the conv distance ''' with tf.name_scope('convTransformer'): X, W = tf.cast(X, tf.float32), tf.cast(W, tf.float32) return tf.nn.relu(tf.nn.conv2d(X, W, strides=[1,1,1,1], padding='SAME')) #%% def tf_mode(array): ''' Find the mode of the input array. Expects 1D array ''' with tf.name_scope('mode'): unique, _, count = tf.unique_with_counts(array) max_idx = tf.argmax(count, axis=0) return unique[max_idx]
44.147929
101
0.630747
1,244
7,461
3.69373
0.155949
0.028292
0.020892
0.015234
0.732971
0.700109
0.684657
0.679869
0.647443
0.614581
0
0.052641
0.233615
7,461
169
102
44.147929
0.750962
0.308136
0
0.483146
0
0
0.054098
0.028066
0
0
0
0
0
1
0.11236
false
0
0
0
0.224719
0.011236
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
9b8acd0d1d067d0ee02a8dfa5e3ba3514941cafe
253
py
Python
cursoemvideo/python/exercicio/027(primeiroUltimoNome).py
mateusjustino/cursos
10927bf62f89b5847bb0acd998e9e9191472d0f4
[ "MIT" ]
null
null
null
cursoemvideo/python/exercicio/027(primeiroUltimoNome).py
mateusjustino/cursos
10927bf62f89b5847bb0acd998e9e9191472d0f4
[ "MIT" ]
null
null
null
cursoemvideo/python/exercicio/027(primeiroUltimoNome).py
mateusjustino/cursos
10927bf62f89b5847bb0acd998e9e9191472d0f4
[ "MIT" ]
null
null
null
nome = ' Pedro da Silva Moreira' nomeSeparado = nome.split() print('O primeiro nome é:') print(nomeSeparado[0]) #assim print('O último nome é:') print(nomeSeparado[-1]) #ou assim print('O último nome é:') print(nomeSeparado[len(nomeSeparado)-1])
19.461538
40
0.703557
37
253
4.810811
0.459459
0.101124
0.168539
0.370787
0.438202
0.438202
0.438202
0.438202
0
0
0
0.013761
0.13834
253
12
41
21.083333
0.802752
0.051383
0
0.25
0
0
0.327731
0
0
0
0
0
0
1
0
false
0
0
0
0
0.75
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
2
9b8d7494ccada2f0417446c39f325d154dd972da
307
py
Python
tests/extractor/test_extractor.py
victordibia/signver
e547177b5dab542c6d242566675ddb9468dadc08
[ "MIT" ]
4
2021-09-06T13:02:05.000Z
2022-03-20T15:22:45.000Z
tests/extractor/test_extractor.py
victordibia/signver
e547177b5dab542c6d242566675ddb9468dadc08
[ "MIT" ]
null
null
null
tests/extractor/test_extractor.py
victordibia/signver
e547177b5dab542c6d242566675ddb9468dadc08
[ "MIT" ]
1
2022-03-04T16:20:52.000Z
2022-03-04T16:20:52.000Z
from logging import log from tensorflow.python.ops.variables import model_variables from signver.extractor import MetricExtractor def test_extractor_load(): model_path = "models/extractor/metric" extractor = MetricExtractor() extractor.load(model_path) assert extractor.model is not None
25.583333
59
0.791531
38
307
6.263158
0.578947
0.109244
0.151261
0.184874
0
0
0
0
0
0
0
0
0.14658
307
11
60
27.909091
0.908397
0
0
0
0
0
0.074919
0.074919
0
0
0
0
0.125
1
0.125
false
0
0.375
0
0.5
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
9b923db117d6fd1c71cc7e4fa23ed7ff5f2df80c
480
py
Python
data-strucutre/dictionaries.py
howtoautomateinth/awesome-python
b081de60a6b34cfb6f13e15dc63ac3dc4f4787cd
[ "Apache-2.0" ]
3
2020-05-21T04:50:07.000Z
2021-03-28T04:39:07.000Z
data-strucutre/dictionaries.py
howtoautomateinth/awesome-python
b081de60a6b34cfb6f13e15dc63ac3dc4f4787cd
[ "Apache-2.0" ]
null
null
null
data-strucutre/dictionaries.py
howtoautomateinth/awesome-python
b081de60a6b34cfb6f13e15dc63ac3dc4f4787cd
[ "Apache-2.0" ]
3
2020-03-02T06:30:49.000Z
2021-05-29T03:13:56.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- # simple dictionary mybasket = {'apple':2.99,'orange':1.99,'milk':5.8} print(mybasket['apple']) # dictionary with list inside mynestedbasket = {'apple':2.99,'orange':1.99,'milk':['chocolate','stawbery']} print(mynestedbasket['milk'][1].upper()) # append more key mybasket['pizza'] = 4.5 print(mybasket) # get only keys print(mybasket.keys()) # get only values print(mybasket.values()) # get pair values print(mybasket.items())
22.857143
81
0.675
67
480
4.835821
0.522388
0.200617
0.049383
0.08642
0.12963
0.12963
0.12963
0
0
0
0
0.042453
0.116667
480
21
82
22.857143
0.721698
0.304167
0
0
0
0
0.186544
0
0
0
0
0
0
1
0
false
0
0
0
0
0.666667
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
2
9bb16486d67ab1cd31c631ac13a9a3a1da83b48d
620
py
Python
pbpstats/resources/enhanced_pbp/stats_nba/free_throw.py
pauldevos/pbpstats
71c0b5e2bd45d0ca031646c70cd1c1f30c6a7152
[ "MIT" ]
null
null
null
pbpstats/resources/enhanced_pbp/stats_nba/free_throw.py
pauldevos/pbpstats
71c0b5e2bd45d0ca031646c70cd1c1f30c6a7152
[ "MIT" ]
null
null
null
pbpstats/resources/enhanced_pbp/stats_nba/free_throw.py
pauldevos/pbpstats
71c0b5e2bd45d0ca031646c70cd1c1f30c6a7152
[ "MIT" ]
null
null
null
from pbpstats.resources.enhanced_pbp.stats_nba.enhanced_pbp_item import ( StatsEnhancedPbpItem, ) from pbpstats.resources.enhanced_pbp import FreeThrow class StatsFreeThrow(FreeThrow, StatsEnhancedPbpItem): """ Class for free throw events """ def __init__(self, *args): super().__init__(*args) @property def is_made(self): """ returns True if shot was made, False otherwise """ return "MISS " not in self.description def get_offense_team_id(self): """ returns team id that took the shot """ return self.team_id
22.962963
73
0.646774
71
620
5.394366
0.619718
0.086162
0.109661
0.151436
0.167102
0
0
0
0
0
0
0
0.266129
620
26
74
23.846154
0.841758
0.175806
0
0
0
0
0.011136
0
0
0
0
0
0
1
0.25
false
0
0.166667
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
9bb1842cb0c3a935743c2e944c304e7e27db1478
175
py
Python
launcher.py
DukeX9/David-TelegramBot-Docker
5269cde874b5d2082f5c38a86ada1f23943cb650
[ "MIT" ]
null
null
null
launcher.py
DukeX9/David-TelegramBot-Docker
5269cde874b5d2082f5c38a86ada1f23943cb650
[ "MIT" ]
null
null
null
launcher.py
DukeX9/David-TelegramBot-Docker
5269cde874b5d2082f5c38a86ada1f23943cb650
[ "MIT" ]
null
null
null
from bot import init app = init() if __name__ == '__main__': # note the threaded arg which allow # your app to have more than one thread app.run(threaded=True)
17.5
43
0.674286
27
175
4.074074
0.851852
0
0
0
0
0
0
0
0
0
0
0
0.245714
175
9
44
19.444444
0.833333
0.405714
0
0
0
0
0.08
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
9bb43931b46f3af13d3eaba7dfd666d7ed3ff6d0
3,846
py
Python
pse_summary/settings/base.py
ralphqq/pse-indices-scoreboard
f11a01915ac5a17b663db604afe996c5ed928fb9
[ "MIT" ]
1
2019-10-08T16:54:07.000Z
2019-10-08T16:54:07.000Z
pse_summary/settings/base.py
ralphqq/pse-indices-scoreboard
f11a01915ac5a17b663db604afe996c5ed928fb9
[ "MIT" ]
8
2020-02-12T01:17:41.000Z
2021-12-13T20:06:12.000Z
pse_summary/settings/base.py
ralphqq/pse-indices-scoreboard
f11a01915ac5a17b663db604afe996c5ed928fb9
[ "MIT" ]
null
null
null
""" Django settings for pse_summary project. Generated by 'django-admin startproject' using Django 2.2.3. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ import os from celery.schedules import crontab from dotenv import load_dotenv # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath( __file__ )))) # Load environment variables from .env file load_dotenv(os.path.join(BASE_DIR, '.env')) # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.humanize', 'main_board', 'index_scraper', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'pse_summary.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'pse_summary.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': os.environ.get('DB_NAME'), 'USER': os.environ.get('DB_USERNAME'), 'PASSWORD': os.environ.get('DB_PASSWORD'), 'HOST': os.environ.get('DB_HOST', ''), 'PORT': os.environ.get('DB_PORT'), 'TEST': { 'NAME': 'test_pse_summary_db' }, } } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Asia/Manila' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') # Celery settings CELERY_BROKER_URL = os.environ.get( 'CELERY_BROKER_URL', 'redis://localhost:6379' ) CELERY_RESULT_BACKEND = os.environ.get( 'CELERY_RESULT_BACKEND', 'redis://localhost:6379' ) CELERY_ACCEPT_CONTENT = ['application/json'] CELERY_RESULT_SERIALIZER = 'json' CELERY_TASK_SERIALIZER = 'json' CELERY_TIMEZONE = TIME_ZONE # Celery Beat Schedule CELERY_BEAT_SCHEDULE = { 'get-values-during-business-hours': { 'task': 'index_getter', 'schedule': crontab(minute='*/3', hour='9-16', day_of_week='1-5') } }
25.812081
91
0.681487
433
3,846
5.900693
0.376443
0.081409
0.046575
0.058708
0.186693
0.166732
0.105675
0.094325
0.046967
0
0
0.011055
0.176807
3,846
148
92
25.986486
0.795957
0.210608
0
0.020833
1
0
0.482415
0.364964
0
0
0
0
0
1
0
false
0.0625
0.03125
0
0.03125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
9bbe8dff525decb04bd791562f13040180a0090c
346
py
Python
md-envs/md_envs/__init__.py
IBM/logical-agent-driven-polymer-discovery
9f2040ca0b19a2bc149ff840aa98a3f122606c32
[ "Apache-2.0" ]
null
null
null
md-envs/md_envs/__init__.py
IBM/logical-agent-driven-polymer-discovery
9f2040ca0b19a2bc149ff840aa98a3f122606c32
[ "Apache-2.0" ]
null
null
null
md-envs/md_envs/__init__.py
IBM/logical-agent-driven-polymer-discovery
9f2040ca0b19a2bc149ff840aa98a3f122606c32
[ "Apache-2.0" ]
null
null
null
from gym.envs.registration import register register( id='eADPD-v1', entry_point='md_envs.envs:AgentDrivenPolymerDiscovery', ) register( id='logical-eADPD-v1', entry_point='md_envs.envs:LogicalAgentDrivenPolymerDiscovery', ) register( id='direct-eADPD-v1', entry_point='md_envs.envs:DirectAgentDrivenPolymerDiscovery', )
21.625
66
0.754335
38
346
6.710526
0.447368
0.117647
0.141176
0.2
0.317647
0.317647
0.317647
0
0
0
0
0.009868
0.121387
346
16
67
21.625
0.828947
0
0
0.230769
0
0
0.495677
0.383285
0
0
0
0
0
1
0
true
0
0.076923
0
0.076923
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
32ff0f5a6da73d601925d20173aeb929eb765c44
448
py
Python
exercicio3.py
cecilialourenco/Exercicios
960231651566a35e94f9e075124280ef3ddf1bb9
[ "Unlicense" ]
null
null
null
exercicio3.py
cecilialourenco/Exercicios
960231651566a35e94f9e075124280ef3ddf1bb9
[ "Unlicense" ]
null
null
null
exercicio3.py
cecilialourenco/Exercicios
960231651566a35e94f9e075124280ef3ddf1bb9
[ "Unlicense" ]
null
null
null
'''Faça um programa que leia 4 notas, mostre as notas e a média na tela.''' notas = [] nota1 = float(input('Digite a primeira nota: ')) notas.append(nota1) nota2 = float(input('Digite a segunda nota: ')) notas.append(nota2) nota3 = float(input('Digite a terceira nota: ')) notas.append(nota3) nota4 = float(input('Digite a quarta nota: ')) notas.append(nota4) print(notas) media = float(input('A média é: {}'. format(sum(notas)/len(notas))))
23.578947
75
0.689732
69
448
4.478261
0.478261
0.161812
0.20712
0.220065
0
0
0
0
0
0
0
0.023438
0.142857
448
18
76
24.888889
0.78125
0.154018
0
0
0
0
0.284182
0
0
0
0
0
0
1
0
false
0
0
0
0
0.090909
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
fd01f25ecbcb738ebf3eb6464ff69f891397c315
290
py
Python
tudo/ex017.py
Ramon-Erik/Exercicios-Python
158a7f1846dd3d486aa0517fa337d46d73aab649
[ "MIT" ]
1
2021-07-08T00:35:57.000Z
2021-07-08T00:35:57.000Z
tudo/ex017.py
Ramon-Erik/Exercicios-Python
158a7f1846dd3d486aa0517fa337d46d73aab649
[ "MIT" ]
null
null
null
tudo/ex017.py
Ramon-Erik/Exercicios-Python
158a7f1846dd3d486aa0517fa337d46d73aab649
[ "MIT" ]
null
null
null
import emoji from math import hypot cateto_opos = float(input('Cateto oposto: ')) cateto_adja = float(input('Cateto adjacente: ')) hipotenusa = (hypot(cateto_opos, cateto_adja)) print(emoji.emojize(f'De acordo com os dados, a hipotenusa é igual a {hipotenusa:.2f} :nerd_face::thumbs_up:.'))
48.333333
112
0.758621
44
290
4.863636
0.659091
0.102804
0.140187
0
0
0
0
0
0
0
0
0.003861
0.106897
290
6
112
48.333333
0.822394
0
0
0
0
0.166667
0.412371
0.079038
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.166667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
fd26146f953fd6c149d4d0ab9b1beecc7baf6caf
332
py
Python
ics/structures/stop_dhcp_server_command.py
intrepidcs/python_ics
7bfa8c2f893763608f9255f9536a2019cfae0c23
[ "Unlicense" ]
45
2017-10-17T08:42:08.000Z
2022-02-21T16:26:48.000Z
ics/structures/stop_dhcp_server_command.py
intrepidcs/python_ics
7bfa8c2f893763608f9255f9536a2019cfae0c23
[ "Unlicense" ]
106
2017-03-07T21:10:39.000Z
2022-03-29T15:32:46.000Z
ics/structures/stop_dhcp_server_command.py
intrepidcs/python_ics
7bfa8c2f893763608f9255f9536a2019cfae0c23
[ "Unlicense" ]
17
2017-04-04T12:30:22.000Z
2022-01-28T05:30:25.000Z
# This file was auto generated; Do not modify, if you value your sanity! import ctypes import enum class stop_dhcp_server_command(ctypes.Structure): _pack_ = 2 _fields_ = [ ('networkId', ctypes.c_uint16), ] _StopDHCPServerCommand = stop_dhcp_server_command StopDHCPServerCommand = stop_dhcp_server_command
19.529412
72
0.753012
41
332
5.731707
0.707317
0.102128
0.178723
0.268085
0.357447
0
0
0
0
0
0
0.01107
0.183735
332
16
73
20.75
0.856089
0.210843
0
0
1
0
0.034749
0
0
0
0
0
0
1
0
false
0
0.222222
0
0.555556
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
fd3ac65f567e5909162483e0e75534a983ee2851
2,612
py
Python
abiflows/database/mongoengine/utils.py
gmatteo/abiflows
bde67408b024c34c17d78c9fcc672b42be71e1e5
[ "BSD-3-Clause" ]
4
2018-12-13T09:12:17.000Z
2019-06-14T15:16:08.000Z
abiflows/database/mongoengine/utils.py
gmatteo/abiflows
bde67408b024c34c17d78c9fcc672b42be71e1e5
[ "BSD-3-Clause" ]
3
2018-01-03T17:08:22.000Z
2018-08-23T22:29:41.000Z
abiflows/database/mongoengine/utils.py
abinit/abiflows
bde67408b024c34c17d78c9fcc672b42be71e1e5
[ "BSD-3-Clause" ]
13
2017-07-22T01:05:20.000Z
2021-06-08T10:57:53.000Z
# coding: utf-8 """ Utilities to handle mongoengine classes and connections. """ import contextlib from pymatgen.util.serialization import pmg_serialize from monty.json import MSONable from mongoengine import connect from mongoengine.context_managers import switch_collection from mongoengine.connection import DEFAULT_CONNECTION_NAME class DatabaseData(MSONable): """ Basic class providing data to connect to a collection in the database and switching to that collection. Wraps mongoengine's connect and switch_collection functions. """ def __init__(self, database, host=None, port=None, collection=None, username=None, password=None): """ Args: database: name of the database host: the host name of the mongod instance to connect to port: the port that the mongod instance is running on collection: name of the collection username: username to authenticate with password: password to authenticate with """ #TODO handle multiple collections? # note: if making collection a list (or a dict), make it safe for mutable default arguments, otherwise there # will probably be problems with the switch_collection self.database = database self.host = host self.port = port self.collection = collection self.username = username self.password = password @classmethod def from_dict(cls, d): d = d.copy() d.pop("@module", None) d.pop("@class", None) return cls(**d) @pmg_serialize def as_dict(self): return dict(database=self.database, host=self.host, port=self.port, collection=self.collection, username=self.username, password=self.password) @pmg_serialize def as_dict_no_credentials(self): return dict(database=self.database, host=self.host, port=self.port, collection=self.collection) def connect_mongoengine(self, alias=DEFAULT_CONNECTION_NAME): """ Open the connection to the selected database """ return connect(db=self.database, host=self.host, port=self.port, username=self.username, password=self.password, alias=alias) @contextlib.contextmanager def switch_collection(self, cls): """ Switches to the chosen collection using Mongoengine's switch_collection. """ if self.collection: with switch_collection(cls, self.collection) as new_cls: yield new_cls else: yield cls
34.826667
116
0.665008
311
2,612
5.501608
0.327974
0.056108
0.037405
0.035067
0.188194
0.163647
0.116891
0.116891
0.09585
0.09585
0
0.000518
0.261103
2,612
74
117
35.297297
0.88601
0.321593
0
0.054054
0
0
0.008
0
0
0
0
0.013514
0
1
0.162162
false
0.108108
0.162162
0.054054
0.459459
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
1
0
0
0
0
0
2
fd4135c5a31407306c87f27c0c48e1d8bf5ef87b
18,727
py
Python
accounting/models.py
venkat0708/BalajiVV
ddf74d26a7ecae3f3bc5a902dcab09bf8f30e448
[ "MIT" ]
null
null
null
accounting/models.py
venkat0708/BalajiVV
ddf74d26a7ecae3f3bc5a902dcab09bf8f30e448
[ "MIT" ]
null
null
null
accounting/models.py
venkat0708/BalajiVV
ddf74d26a7ecae3f3bc5a902dcab09bf8f30e448
[ "MIT" ]
null
null
null
from django.db import models from django.core.validators import RegexValidator, MinValueValidator,MaxValueValidator from django.core.urlresolvers import reverse from django.dispatch import receiver from django.db.models.signals import post_save, pre_save, pre_delete from django.utils import timezone from core.models import BaseEntity from products.models import Service from customers.models import Staff, Customer, Vendor class CommissionStructure(BaseEntity): """ commission for staff based on services""" staff = models.ForeignKey( Staff, related_name='commissions' ) service = models.ForeignKey( Service, related_name='commissions' ) amount = models.IntegerField( default=500, validators=[ MinValueValidator( 10, message = 'Amount should be greater than 10' ), MaxValueValidator( 100000, message = 'Amount should be less than 100000' ), ] ) class Meta: unique_together = ("staff", "service") def __str__(self): return str(self.staff.name)+ ' '+ str(self.service.name) def get_absolute_url(self): return reverse('customers:Staff_Detail', kwargs={'pk': self.staff.id}) def get_update_url(self): return reverse('accounting:CommissionStructure_Update', kwargs={'pk': self.id}) def get_delete_url(self): return reverse('accounting:CommissionStructure_Delete', kwargs={'pk': self.id}) class Payout(BaseEntity): """ Payout to all vendors""" MODE_CHOICES=( ('BANK', 'Bank'), ('CHEQUE', 'Cheque'), ('DD', 'Demand Draft'), ('CASH', 'Cash'), ) vendor = models.ForeignKey( Vendor, related_name='bill_payouts' ) date = models.DateField( verbose_name='payment date' ) time = models.TimeField( verbose_name='payment time' ) amount = models.IntegerField( default=500, validators=[ MinValueValidator( 10, message = 'Amount should be greater than 10' ), MaxValueValidator( 10000000, message = 'Amount should be less than 10000000' ), ] ) mode = models.CharField( max_length =15, choices = MODE_CHOICES, default = 'CASH', ) def __str__(self): return self.vendor.name def get_absolute_url(self): return reverse('accounting:Payout_Detail', kwargs={'id': self.id}) def get_update_url(self): return reverse('accounting:Payout_Update', kwargs={'id': self.id}) def get_delete_url(self): return reverse('accounting:Payout_Delete', kwargs={'id': self.id}) class PayCommissionOrSalary(BaseEntity): """ Payout to all staff members""" MODE_CHOICES=( ('BANK', 'Bank'), ('CHEQUE', 'Cheque'), ('DD', 'Demand Draft'), ('CASH', 'Cash'), ) staff = models.ForeignKey( Staff, related_name= 'commissions_payouts' ) date = models.DateField( verbose_name='payment date' ) time = models.TimeField( verbose_name='payment time' ) amount = models.IntegerField( default=500, validators=[ MinValueValidator( 10, message = 'Amount should be greater than 10' ), MaxValueValidator( 10000000, message = 'Amount should be less than 10000000' ), ] ) mode = models.CharField( max_length =15, choices = MODE_CHOICES, default = 'CASH', ) def get_absolute_url(self): return reverse('accounting:PayCommissionOrSalary_Detail', kwargs={'id': self.id}) def get_update_url(self): return reverse('accounting:PayCommissionOrSalary_Update', kwargs={'id': self.id}) def get_delete_url(self): return reverse('accounting:PayCommissionOrSalary_Delete', kwargs={'id': self.id}) class Payin(BaseEntity): """ Payins from all customers""" MODE_CHOICES=( ('BANK', 'Bank'), ('CHEQUE', 'Cheque'), ('DD', 'Demand Draft'), ('CASH', 'Cash'), ) customer = models.ForeignKey( 'customers.Customer', related_name='customer_payins', blank = True, null = True, ) event = models.ForeignKey( 'booking.Event', related_name='event_payins', blank = True, null =True, ) date = models.DateField( verbose_name='payment date' ) time = models.TimeField( verbose_name='payment time' ) amount = models.IntegerField( default=500, validators=[ MinValueValidator( 10, message = 'Amount should be greater than 10' ), MaxValueValidator( 10000000, message = 'Amount should be less than 10000000' ), ] ) mode = models.CharField( max_length =15, choices = MODE_CHOICES, default = 'CASH', ) def get_absolute_url(self): return reverse('accounting:Payin_Detail', kwargs={'id': self.id}) def get_update_url(self): return reverse('accounting:Payin_Update', kwargs={'id': self.id}) def get_delete_url(self): return reverse('accounting:Payin_Delete', kwargs={'id': self.id}) class Invoice(BaseEntity): """ Invoices are generated based on events state""" STATUS_CHOICES=( ('CREATED', 'Created'), ('CONFIRMED', 'Confirmed'), ('PARTIAL_PAYMENT', 'Partially Paid'), ('RECEIVED', 'Received'), ('CLOSED', 'Closed') ) customer = models.ForeignKey( Customer, related_name='invoices', ) event = models.ForeignKey( 'booking.Event', related_name='invoice', ) generated_date = models.DateField( verbose_name='date invoice generated' ) due_date = models.DateField( verbose_name='date payment is expected' ) paid_date = models.DateField( verbose_name='date payment is expected', blank =True, null = True, ) status = models.CharField( max_length =15, choices = STATUS_CHOICES, default = 'CREATED', ) amount = models.IntegerField( default=500, validators=[ MinValueValidator( 10, message = 'Amount should be greater than 10' ), MaxValueValidator( 10000000, message = 'Amount should be less than 10000000' ), ] ) paid = models.IntegerField( default=500, validators=[ MinValueValidator( 10, message = 'Amount should be greater than 10' ), MaxValueValidator( 10000000, message = 'Amount should be less than 10000000' ), ] ) payins = models.ManyToManyField( Payin, related_name='invoices', null = True, blank = True, ) def get_absolute_url(self): return reverse('accounting:Invoice_Detail', kwargs={'id': self.id}) def get_update_url(self): return reverse('accounting:Invoice_Update', kwargs={'id': self.id}) def get_delete_url(self): return reverse('accounting:Invoice_Delete', kwargs={'id': self.id}) class Bill(BaseEntity): """ Invoices are generated based on events state""" STATUS_CHOICES=( ('CREATED', 'Created'), ('CONFIRMED', 'Confirmed'), ('PARTIAL_PAYMENT', 'Partially Paid'), ('PAID', 'Paid'), ('CLOSED', 'Closed') ) vendor = models.ForeignKey( Vendor, related_name='vendor', ) booked_service = models.ForeignKey( 'booking.Booked_Service', related_name='billed_services', ) generated_date = models.DateField( verbose_name='date bill generated' ) due_date = models.DateField( verbose_name='date payout is expected' ) paid_date = models.DateField( verbose_name='date payout is made', null = True, blank = True, ) status = models.CharField( max_length =15, choices = STATUS_CHOICES, default = 'CREATED', ) amount = models.IntegerField( default=500, validators=[ MinValueValidator( 10, message = 'Amount should be greater than 10' ), MaxValueValidator( 10000000, message = 'Amount should be less than 10000000' ), ] ) paid = models.IntegerField( default=500, validators=[ MinValueValidator( 0, message = 'Amount should be greater than 0' ), MaxValueValidator( 10000000, message = 'Amount should be less than 10000000' ), ] ) payouts = models.ManyToManyField( Payout, related_name='bills', null = True, blank = True, ) @property def due_amount(self): return self.amount - self.paid def __str__(self): return self.vendor.name def get_absolute_url(self): return reverse('accounting:Bill_Detail', kwargs={'id': self.id}) def get_update_url(self): return reverse('accounting:Bill_Update', kwargs={'id': self.id}) def get_delete_url(self): return reverse('accounting:Bill_Delete', kwargs={'id': self.id}) class Commission(BaseEntity): """ Commissions are generated based on events state""" STATUS_CHOICES=( ('CREATED', 'Created'), ('CONFIRMED', 'Confirmed'), ('PARTIAL_PAYMENT', 'Partially Paid'), ('PAID', 'paid fully'), ('CLOSED', 'Closed') ) staff = models.ForeignKey( Staff, related_name='staff_commissions', ) event = models.ForeignKey( 'booking.Event', related_name='event_commissions', blank = True, null =True, ) booked_service = models.ForeignKey( 'booking.Booked_Service', related_name='commissions', ) generated_date = models.DateField( verbose_name='date commission generated' ) due_date = models.DateField( verbose_name='date commission is expected' ) paid_date = models.DateField( verbose_name='date commission is paid', null = True, blank =True ) status = models.CharField( max_length =15, choices = STATUS_CHOICES, default = 'CREATED', ) amount = models.IntegerField( default=500, validators=[ MinValueValidator( 10, message = 'Amount should be greater than 10' ), MaxValueValidator( 10000000, message = 'Amount should be less than 10000000' ), ] ) paid = models.IntegerField( default=500, validators=[ MinValueValidator( 0, message = 'Amount should be greater than 0' ), MaxValueValidator( 10000000, message = 'Amount should be less than 10000000' ), ] ) payouts = models.ManyToManyField( PayCommissionOrSalary, related_name='commissions', null = True, blank = True, ) def get_absolute_url(self): return reverse('accounting:Commission_Detail', kwargs={'id': self.id}) def get_update_url(self): return reverse('accounting:Commission_Update', kwargs={'id': self.id}) def get_delete_url(self): return reverse('accounting:Commission_Delete', kwargs={'id': self.id}) @receiver(post_save, sender = PayCommissionOrSalary) def update_commissions_salaries_based_on_PayCommissionOrSalary_post_save(sender, instance, created, **kwargs): payout = instance amount = payout.amount commissions = Commission.objects.filter(staff = payout.staff).order_by('generated_date') for commission in commissions: if amount >= commission.amount: commission.paid = commission.amount commission.status = 'PAID' amount -= commission.amount commission.paid_date = timezone.now().date() commission.payouts.add(payout) commission.save() elif amount > 0: commission.paid += amount if commission.paid < commission.amount: commission.status = 'PARTIAL_PAYMENT' else: commission.status = 'PAID' commission.paid_date = timezone.now().date() amount -= commission.paid commission.payouts.add(payout) commission.save() @receiver(pre_save, sender = PayCommissionOrSalary) def update_bill_based_on_PayCommissionOrSalary_pre_save(sender, instance, **kwargs): print('triggered pre save PayCommissionOrSalary') payout = instance try: past_payout = PayCommissionOrSalary.objects.get(pk = payout.id) amount = past_payout.amount commissions = past_payout.commissions.all() for commission in commissions: if amount >=0: if commission.paid < amount: commission.paid = 0 commission.status = 'CONFIRMED' commission.save() commission -= commission.paid elif commission.paid >= amount: commission.paid -= amount commission.status = 'PARTIAL_PAYMENT' commission.save() amount -= commission.paid except: pass @receiver(pre_delete, sender = PayCommissionOrSalary) def update_bill_based_on_PayCommissionOrSalary_pre_delete(sender, instance, **kwargs): print('triggered pre delete PayCommissionOrSalary' ) payout = instance try: delted_payout = PayCommissionOrSalary.objects.get(pk = payout.id) amount = delted_payout.amount commissions = delted_payout.commissions.all() print(commissions) for commission in commissions: if amount >0: if commission.paid <= amount: commission.paid = 0 commission.status = 'CONFIRMED' commission.save() amount -= commission.paid elif commission.paid > amount: commission.paid -= amount commission.status = 'PARTIAL_PAYMENT' commission.save() amount -= commission.paid except: print('failed') @receiver(post_save, sender = Payout) def update_bill_based_on_payout_post_save(sender, instance, created, **kwargs): print('triggered post save payout') payout = instance amount = payout.amount bills = Bill.objects.filter(vendor = payout.vendor).order_by('generated_date') for bill in bills: if bill.paid < bill.amount: if amount >= bill.amount: bill.paid = bill.amount bill.status = 'PAID' amount -= bill.amount bill.paid_date = timezone.now().date() bill.payouts.add(payout) bill.save() elif amount > 0: bill.paid += amount if bill.paid < bill.amount: bill.status = 'PARTIAL_PAYMENT' else: bill.status = 'PAID' bill.paid_date = timezone.now().date() amount -= bill.paid bill.payouts.add(payout) bill.save() @receiver(pre_save, sender = Payout) def update_bill_based_on_payout_pre_save(sender, instance, **kwargs): print('triggered pre save payout') payout = instance try: past_payout = Payout.objects.get(pk = payout.id) amount = past_payout.amount print(amount) bills = past_payout.bills.all() for bill in bills: if amount >0: if bill.paid <= amount: bill.paid = 0 bill.status = 'CONFIRMED' bill.save() amount -= bill.paid elif bill.paid > amount: bill.paid -= amount bill.status = 'PARTIAL_PAYMENT' bill.save() amount -= bill.paid except: pass @receiver(pre_delete, sender = Payout) def update_bill_based_on_payout_pre_delete(sender, instance, **kwargs): print('triggered pre delete payout' ) payout = instance try: delted_payout = Payout.objects.get(pk = payout.id) amount = delted_payout.amount print(amount) bills = delted_payout.bills.all() for bill in bills: if amount >0: if bill.paid <= amount: print(bill.paid) bill.paid = 0 bill.status = 'CONFIRMED' bill.paid_date = None bill.save() amount -= bill.paid elif bill.paid > amount: bill.paid -= amount bill.status = 'PARTIAL_PAYMENT' bill.paid_date = None bill.save() amount -= bill.paid except: pass
29.772655
110
0.533081
1,679
18,727
5.812984
0.088148
0.025615
0.027971
0.043033
0.816496
0.765676
0.664652
0.622541
0.58627
0.479098
0
0.020743
0.371869
18,727
628
111
29.820064
0.808977
0.013457
0
0.657356
0
0
0.141989
0.036081
0
0
0
0
0
1
0.057728
false
0.005587
0.01676
0.046555
0.234637
0.018622
0
0
0
null
0
0
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
fd4424d02208e046d11632fd9a9d8ceb09610607
287
py
Python
Anylink-server/add_user.py
orike122/anylink
844199c0b094670642a13603b1edeb763e8c50fc
[ "MIT" ]
3
2020-02-02T19:48:50.000Z
2020-02-02T20:05:14.000Z
Anylink-server/add_user.py
orike122/anylink
844199c0b094670642a13603b1edeb763e8c50fc
[ "MIT" ]
4
2021-01-28T19:53:40.000Z
2022-03-25T18:44:41.000Z
Anylink-server/add_user.py
orike122/anylink
844199c0b094670642a13603b1edeb763e8c50fc
[ "MIT" ]
null
null
null
from config import Configuration if __name__ == "__main__": config = Configuration("/home/orikeidar01/config.json", "anylink") config.database.add_user("test@gmail.com", "ECD71870D1963316A97E3AC3408C9835AD8CF0F3C1BC703527C30265534F75AE", "anylink")
41
107
0.707317
23
287
8.434783
0.782609
0
0
0
0
0
0
0
0
0
0
0.192308
0.184669
287
6
108
47.833333
0.636752
0
0
0
0
0
0.449477
0.324042
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
b5c4f240f5c89da02cb112a0309854a25d615d5c
981
py
Python
tests/conftest.py
mr-mixas/nested.py
23857f248a6411b15961a0e2a169c2f14421ccb7
[ "Apache-2.0" ]
null
null
null
tests/conftest.py
mr-mixas/nested.py
23857f248a6411b15961a0e2a169c2f14421ccb7
[ "Apache-2.0" ]
null
null
null
tests/conftest.py
mr-mixas/nested.py
23857f248a6411b15961a0e2a169c2f14421ccb7
[ "Apache-2.0" ]
null
null
null
import os import pytest from io import StringIO pytest.register_assert_rewrite('tests.common') @pytest.fixture def content(): def _reader(filename): with open(filename) as f: return f.read() return _reader @pytest.fixture def expected(request): filename = os.path.splitext(request.module.__file__)[0] filename += '.' + request.function.__name__ + '.exp' with open(filename) as f: return f.read() @pytest.fixture def rpath(request): def _path_resolver(filename): path = os.path.join( os.path.dirname(request.module.__file__), filename, ) return os.path.relpath( path, os.path.join(os.path.dirname(__file__), '..'), ) return _path_resolver @pytest.fixture def stringio(): return StringIO() class _StringIOTTY(StringIO): def isatty(self): return True @pytest.fixture def stringio_tty(): return _StringIOTTY()
17.210526
59
0.629969
113
981
5.230089
0.380531
0.060914
0.135364
0.060914
0.192893
0.192893
0.192893
0.101523
0
0
0
0.00137
0.255861
981
56
60
17.517857
0.808219
0
0
0.243243
0
0
0.019368
0
0
0
0
0
0.027027
1
0.216216
false
0
0.081081
0.081081
0.540541
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
b5c62ca5b3c4f2884cd5d03e834c6d0ef771ac38
773
py
Python
100 Exercicios/Mundo 1/EX-27 - PRIMEIRO E ÚLTIMO NOME DE UMA PESSOA.py
marevandro/Python-Exercicios
e377b6ab7343f82e44f32ac39abf2e1e4dcb529c
[ "MIT" ]
null
null
null
100 Exercicios/Mundo 1/EX-27 - PRIMEIRO E ÚLTIMO NOME DE UMA PESSOA.py
marevandro/Python-Exercicios
e377b6ab7343f82e44f32ac39abf2e1e4dcb529c
[ "MIT" ]
null
null
null
100 Exercicios/Mundo 1/EX-27 - PRIMEIRO E ÚLTIMO NOME DE UMA PESSOA.py
marevandro/Python-Exercicios
e377b6ab7343f82e44f32ac39abf2e1e4dcb529c
[ "MIT" ]
null
null
null
'''Faça um programa que leia o nome completo de uma pessoa, mostrando em seguida o primeiro e o último nome separadamente.''' print('''Não e necessário utilizarmos o len nessa situação, ficou entendido no último exercício que a leitura pode ser vista de trás pra frente utilizando -1''') next = input("Aperte enter para continuar: ") name = str(input('Digite seu nome completo: ')).strip() n1 = name.split() print(f"Seu primeiro nome é {n1[0]}.") print(f"Seu ultimo nome é {n1[-1]}.") next = input("""Vamos verificar a resolução do curso em video. Aperte ENTER: """) n = str(input('Digite seu nome completo: ')) nome = n.split() print('Muito prazer em te conhecer!') print('Seu primeiro nome é {}'.format(nome[0])) print('Seu último nome é {}'.format(nome[len(nome)-1]))
38.65
82
0.71022
126
773
4.357143
0.539683
0.03643
0.03643
0.061931
0.105647
0.105647
0
0
0
0
0
0.012121
0.146184
773
20
83
38.65
0.819697
0.153946
0
0
0
0
0.639445
0
0
0
0
0
0
1
0
false
0
0
0
0
0.428571
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
2
b5c7a55fe855fc2212bba3c62bfa56f41cb12f40
3,213
py
Python
scripts/field/enter_402000630.py
G00dBye/YYMS
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
[ "MIT" ]
54
2019-04-16T23:24:48.000Z
2021-12-18T11:41:50.000Z
scripts/field/enter_402000630.py
G00dBye/YYMS
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
[ "MIT" ]
3
2019-05-19T15:19:41.000Z
2020-04-27T16:29:16.000Z
scripts/field/enter_402000630.py
G00dBye/YYMS
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
[ "MIT" ]
49
2020-11-25T23:29:16.000Z
2022-03-26T16:20:24.000Z
# Created by MechAviv # Map ID :: 402000630 # Desert Cavern : Below the Sinkhole # Update Quest Record EX | Quest ID: [34931] | Data: dir=1;exp=1 sm.curNodeEventEnd(True) sm.setTemporarySkillSet(0) sm.setInGameDirectionMode(True, False, False, False) sm.setStandAloneMode(True) sm.removeAdditionalEffect() sm.zoomCamera(0, 2000, 0, -142, -250) sm.blind(1, 255, 0, 0, 0, 0, 0) sm.sendDelay(1200) sm.blind(0, 0, 0, 0, 0, 1000, 0) sm.sendDelay(1400) sm.sendDelay(500) sm.zoomCamera(3000, 1000, 3000, 100, 0) sm.sendDelay(3500) sm.setSpeakerID(3001510) sm.setSpeakerType(3) sm.removeEscapeButton() sm.flipDialogue() sm.setBoxChat() sm.boxChatPlayerAsSpeaker() sm.setBoxOverrideSpeaker() sm.flipBoxChat() sm.flipBoxChatPlayerAsSpeaker() sm.setColor(1) sm.sendNext("#face2#So this is what's below the sand.") sm.setSpeakerID(3001510) sm.setSpeakerType(3) sm.removeEscapeButton() sm.flipDialogue() sm.setBoxChat() sm.boxChatPlayerAsSpeaker() sm.setBoxOverrideSpeaker() sm.flipBoxChat() sm.flipBoxChatPlayerAsSpeaker() sm.setColor(1) sm.sendSay("#face2#And now we've all been separated.") sm.setSpeakerID(3001500) sm.setSpeakerType(3) sm.removeEscapeButton() sm.flipDialogue() sm.setBoxChat() sm.boxChatPlayerAsSpeaker() sm.setBoxOverrideSpeaker() sm.flipBoxChat() sm.flipBoxChatPlayerAsSpeaker() sm.setColor(1) sm.sendSay("#face0#Well...") sm.blind(1, 150, 0, 0, 0, 500, 0) sm.playSound("Sound/SoundEff.img/PinkBean/expectation", 100) sm.OnOffLayer_On(500, "d0", 0, -80, -1, "Effect/Direction17.img/effect/ark/illust/7/1", 4, 1, -1, 0) sm.sendDelay(1000) sm.blind(0, 0, 0, 0, 0, 500, 0) sm.OnOffLayer_Off(500, "d0", 0) sm.sendDelay(500) sm.setSpeakerID(3001500) sm.setSpeakerType(3) sm.removeEscapeButton() sm.flipDialogue() sm.setBoxChat() sm.boxChatPlayerAsSpeaker() sm.setBoxOverrideSpeaker() sm.flipBoxChat() sm.flipBoxChatPlayerAsSpeaker() sm.setColor(1) sm.sendNext("#face0#At least we got this.") sm.setSpeakerID(3001510) sm.setSpeakerType(3) sm.removeEscapeButton() sm.flipDialogue() sm.setBoxChat() sm.boxChatPlayerAsSpeaker() sm.setBoxOverrideSpeaker() sm.flipBoxChat() sm.flipBoxChatPlayerAsSpeaker() sm.setColor(1) sm.sendSay("#face0#Wow! You managed to catch that while we were falling? Impressive!") sm.setSpeakerID(3001510) sm.setSpeakerType(3) sm.removeEscapeButton() sm.flipDialogue() sm.setBoxChat() sm.boxChatPlayerAsSpeaker() sm.setBoxOverrideSpeaker() sm.flipBoxChat() sm.flipBoxChatPlayerAsSpeaker() sm.setColor(1) sm.sendSay("#face2#I'm not happy about being this far underground. What was that demolitions dummy thinking?!") sm.setSpeakerID(3001510) sm.setSpeakerType(3) sm.removeEscapeButton() sm.flipDialogue() sm.setBoxChat() sm.boxChatPlayerAsSpeaker() sm.setBoxOverrideSpeaker() sm.flipBoxChat() sm.flipBoxChatPlayerAsSpeaker() sm.setColor(1) sm.sendSay("#face2#Now we're going to waste a bunch of time we don't have tracking everyone down.") sm.showFadeTransition(0, 1000, 3000) sm.zoomCamera(0, 1000, 2147483647, 2147483647, 2147483647) sm.moveCamera(True, 0, 0, 0) sm.sendDelay(300) sm.removeOverlapScreen(1000) sm.moveCamera(True, 0, 0, 0) sm.setStandAloneMode(False) sm.setTemporarySkillSet(0) sm.setInGameDirectionMode(False, True, False, False)
21.42
111
0.76564
430
3,213
5.716279
0.313953
0.014646
0.014646
0.054109
0.639138
0.590724
0.584622
0.557771
0.557771
0.557771
0
0.088175
0.089325
3,213
149
112
21.563758
0.75188
0.042639
0
0.716981
0
0.018868
0.150765
0.027027
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
b5d98bf2d5dd851ded7bd9a59d520b9776cf8d07
139
py
Python
clinepunk/__init__.py
TaylorMonacelli/clinepunk
d9a354ad5b03305f9283fb39895f69c01b67d6f6
[ "MIT" ]
null
null
null
clinepunk/__init__.py
TaylorMonacelli/clinepunk
d9a354ad5b03305f9283fb39895f69c01b67d6f6
[ "MIT" ]
null
null
null
clinepunk/__init__.py
TaylorMonacelli/clinepunk
d9a354ad5b03305f9283fb39895f69c01b67d6f6
[ "MIT" ]
null
null
null
"""Top-level package for clinepunk.""" __author__ = """Taylor Monacelli""" __email__ = "taylormonacelli@gmail.com" __version__ = "0.1.14"
23.166667
39
0.71223
16
139
5.4375
1
0
0
0
0
0
0
0
0
0
0
0.032258
0.107914
139
5
40
27.8
0.669355
0.230216
0
0
0
0
0.465347
0.247525
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
b5f09e43123f9b60ca620739b8050060364eeeaa
638
py
Python
wecode/posts/admin.py
fullyalive/project_wecode
c68df66325fff7a982b9059a207d147679f8311d
[ "MIT" ]
null
null
null
wecode/posts/admin.py
fullyalive/project_wecode
c68df66325fff7a982b9059a207d147679f8311d
[ "MIT" ]
2
2019-02-22T17:43:18.000Z
2022-02-10T11:19:41.000Z
wecode/posts/admin.py
fullyalive/project_wecode
c68df66325fff7a982b9059a207d147679f8311d
[ "MIT" ]
null
null
null
from django.contrib import admin from django_summernote.admin import SummernoteModelAdmin from . import models @admin.register(models.Post) class PostAdmin(SummernoteModelAdmin): # list_filter = ('post_type') list_display = ['id','title', 'post_type', 'creator', 'view_count','created_at'] fields = ['title', 'post_type', 'creator', 'description', 'view_count', 'isImportant'] list_display_links = ('title','creator') @admin.register(models.PostLike) class LikeAdmin(admin.ModelAdmin): pass @admin.register(models.PostComment) class CommentAdmin(SummernoteModelAdmin): pass
26.583333
61
0.702194
67
638
6.522388
0.492537
0.089245
0.130435
0.091533
0
0
0
0
0
0
0
0
0.169279
638
23
62
27.73913
0.824528
0.04232
0
0.125
0
0
0.177924
0
0
0
0
0
0
1
0
false
0.125
0.25
0
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
2
b5f95018616cf739ff08d74f4e51a3903e9e3555
3,181
py
Python
scripts/planetlab/gen_commands.py
SyncFree/SwiftCloud
ff87f80412211d2ac3647f27b016ba7f62b4dd0d
[ "Apache-2.0" ]
26
2015-09-29T14:55:56.000Z
2021-07-06T06:34:25.000Z
scripts/planetlab/gen_commands.py
marsleezm/cprdt
268838bb33423c30ad82b1bbef8789432eb6ed40
[ "Apache-2.0" ]
null
null
null
scripts/planetlab/gen_commands.py
marsleezm/cprdt
268838bb33423c30ad82b1bbef8789432eb6ed40
[ "Apache-2.0" ]
8
2015-04-29T09:32:34.000Z
2021-06-27T14:20:58.000Z
#!/usr/bin/python # Generate a command file for adding bogus users to a file # CLARIFY LICENSING: THIS SCRIPT COMES FROM WALTER PAPER PUBLISHED IN OSDI 2011 import sys, os, base64, random, time allUsers = [] commands_per_user=10 def doStatusLine(user): return 'status;"{0}";\n'.format(activities[random.randint(0, len(activities)-1)]) def doPostLine(user): recipient = allUsers[random.randint(0, len(allUsers) - 1)] return 'post;{0};"What up, dawg";\n'.format(recipient) def doReadLine(user): peer = allUsers[random.randint(0, len(allUsers) - 1)] return 'read;{0};\n'.format(peer) def doFriendLine(user): peer = allUsers[random.randint(0, len(allUsers) - 1)] return 'friend;{0};\n'.format(peer) def doSeeFriendLine(user): peer = allUsers[random.randint(0, len(allUsers) - 1)] return 'see_friends;{0};\n'.format(peer) def doLogin(user): return 'login;{0};passwd;\n'.format(user) def doLogout(user): return 'logout;{0};\n'.format(user) def doMixedLine(user): return 0 # A mixed load def doMixed(commandCount, fOut): mix = [] for k, v in commands.items(): for i in range(0,v[1]): mix.append(v[0]) for i in range(0, commandCount/commands_per_user): userName = allUsers[i % len(allUsers) - 1] #login fOut.write('login;{0};passwd;\n'.format(userName)) for i in range(0, commands_per_user): fOut.write(mix[random.randint(0, len(mix)-1)](userName)) #logout fOut.write('logout;{0};\n'.format(userName)) # How to distribute the operations commands = { 'status' : (doStatusLine , 5), 'post' : (doPostLine, 5), 'read' : (doReadLine, 80), 'friend' : (doFriendLine , 2), 'see_friends' : (doSeeFriendLine , 8), 'mixed' : (doMixedLine , 0), } def readUsers(usersFile): fIn = open(usersFile, 'r') for line in fIn: allUsers.append(line.split(';')[1]) def main(): numUsers=0 if (len(sys.argv) != 4 and len(sys.argv) != 5): print "Usage ", sys.argv[0],"<command_type> <user_file> <count> [<out_file>]" sys.exit() commandType = sys.argv[1] fName = sys.argv[2] if len(sys.argv) == 5: outFile = sys.argv[4] fOut = open(outFile, 'w') else: fOut = sys.stdout commandCount = int(sys.argv[3]) foundOne = False doCommand = doStatusLine for k, v in commands.items(): if sys.argv[1] == k: foundOne = True doCommand = v[0] if not foundOne: print 'supported commands' for k, v in commands.items(): print k sys.exit(-1) random.seed(time.time()) readUsers(fName) if sys.argv[1] == 'mixed': doMixed(commandCount, fOut) else: for i in range(0, commandCount/commands_per_user): # Choose a user to run the next 10 commands under #userName = allUsers[random.randint(0, len(allUsers) - 1)] userName = allUsers[i % len(allUsers) - 1] #login fOut.write('login;{0};passwd;\n'.format(userName)) for i in range(0, commands_per_user): fOut.write(doCommand(userName)) #logout fOut.write('logout;{0};\n'.format(userName)) if __name__ == "__main__": main()
26.508333
83
0.623703
440
3,181
4.456818
0.288636
0.039266
0.028557
0.060683
0.374299
0.335543
0.304946
0.287608
0.267211
0.18154
0
0.026506
0.217227
3,181
119
84
26.731092
0.761044
0.101855
0
0.216867
1
0
0.106503
0
0
0
0
0
0
0
null
null
0.036145
0.012048
null
null
0.036145
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
2