source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
# -*- coding: utf-8 -*-
'''
Module for gathering disk information on Windows
:depends: - win32api Python module
'''
from __future__ import absolute_import
# Import python libs
import ctypes
import string
# Import salt libs
import salt.utils
try:
import win32api
except ImportError:
pass
# Define the module's virtual name
__virtualname__ = 'disk'
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.is_windows():
return __virtualname__
return False
def usage():
'''
Return usage information for volumes mounted on this minion
CLI Example:
.. code-block:: bash
salt '*' disk.usage
'''
drives = []
ret = {}
drive_bitmask = ctypes.windll.kernel32.GetLogicalDrives()
for letter in string.uppercase:
if drive_bitmask & 1:
drives.append(letter)
drive_bitmask >>= 1
for drive in drives:
try:
(available_bytes,
total_bytes,
total_free_bytes) = win32api.GetDiskFreeSpaceEx(
'{0}:\\'.format(drive)
)
used = total_bytes - total_free_bytes
capacity = used / float(total_bytes) * 100
ret['{0}:\\'.format(drive)] = {
'filesystem': '{0}:\\'.format(drive),
'1K-blocks': total_bytes / 1024,
'used': used / 1024,
'available': total_free_bytes / 1024,
'capacity': '{0:.0f}%'.format(capacity),
}
except Exception:
ret['{0}:\\'.format(drive)] = {
'filesystem': '{0}:\\'.format(drive),
'1K-blocks': None,
'used': None,
'available': None,
'capacity': None,
}
return ret
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": true
... | 3 | salt/modules/win_disk.py | bogdanr/salt |
import unittest
def soma(param, param1):
return param + param1
class BasicoTests(unittest.TestCase):
def test_soma(self):
resultado = soma(1, 2)
self.assertEqual(3, resultado)
resultado = soma(3, 2)
self.assertEqual(5, resultado)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | testes_basicos.py | renzon/pypraticot2 |
import sqlite3
from abc import ABCMeta, abstractmethod
from model.dao.daoexception import DAOException
class AbstractDAO(object):
__metaclass__ = ABCMeta
def __init__(self, conn):
self._conn = conn
"""
base CRUD operation
"""
# GENERIC CREATE FUNCTION
def _insert(self, request, parameters):
with self._conn as conn:
try:
c = conn.cursor()
c.execute(request, parameters)
conn.commit()
return c.lastrowid
except sqlite3.Error as ex:
conn.rollback()
DAOException(self, ex)
# GENERIC READ FUNCTION
def _read(self, request, parameters=None):
with self._conn as conn:
try:
c = conn.cursor()
if parameters is None:
c.execute(request)
else:
c.execute(request, parameters)
return c.fetchall()
except Exception as ex:
DAOException(self, ex)
# GENERIC UPDATE FUNCTION
def _update(self, request, parameters):
with self._conn as conn:
try:
c = conn.cursor()
c.execute(request, parameters)
conn.commit()
return True
except Exception as ex:
conn.rollback()
DAOException(self, ex)
return False
# GENERIC DELETE FUNCTION
def _delete(self, request, obj_id):
with self._conn as conn:
try:
c = conn.cursor()
c.execute(request, obj_id)
conn.commit()
return True
except Exception as ex:
conn.rollback()
DAOException(self, ex)
return False
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | model/dao/abstractdao.py | ChatNoir76/Championnat |
import typing
def floor_sum(n: int) -> int:
s = 0
i = 1
while i <= n:
x = n // i
j = n // x + 1
s += x * (j - i)
i = j
return s
def main() -> typing.NoReturn:
n = int(input())
print(floor_sum(n))
main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answe... | 3 | jp.atcoder/abc230/abc230_e/28080338.py | kagemeka/atcoder-submissions |
import tensorflow as tf
from tensorflow.keras import losses
import tensorflow.python.keras.backend as K
class ImageGradientDifferenceLoss(losses.Loss):
def __init__(self):
super().__init__()
def call(self, y_true, y_pred):
# for 5D inputs
gdl = 0
for i in range(y_true.shape[1]):
dy_true, dx_true = tf.image.image_gradients(y_true[:,i])
dy_pred, dx_pred = tf.image.image_gradients(y_pred[:,i])
gdl+=K.mean(K.abs(dy_pred - dy_true) + K.abs(dx_pred - dx_true))
print(gdl)
return gdl
class LPLoss(losses.Loss):
def __init__(self, l_num=2):
self.l_num = l_num #NOTE: tensorflow.loss must set at __init__.
super().__init__()
def call(self, y_true, y_pred, l_num=2):
mse = tf.math.reduce_mean((y_true - y_pred)**self.l_num, axis=0)
return tf.math.reduce_mean(mse)
class MaskSeq2seqLoss(losses.Loss):
def __init__(self, mask):
super().__init__()
self.mask = mask
def call(self, y_true, y_pred):
pass
class MaskMSELoss(tf.keras.losses.Loss):
def __init__(self, mask):
super().__init__()
self.mask = mask
def call(self, y_true, y_pred):
mse = tf.math.reduce_mean(tf.square(y_true - y_pred), axis=0)
mask_mse = tf.math.multiply(mse, self.mask)
return tf.math.reduce_mean(mask_mse)
class MaskSSIMLoss(tf.keras.losses.Loss):
def __init__(self, mask):
super().__init__()
self.mask = mask
def call(self, y_true, y_pred):
y_true_ = tf.math.multiply(y_true, self.mask)
y_pred_ = tf.math.multiply(y_pred, self.mask)
return 1 - tf.reduce_mean(
tf.image.ssim(y_true_, y_pred_, max_val=1.0, filter_size=3))
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | src/factory/loss.py | leelew/HRSEPP |
#!/usr/bin/python3
"""
Flask App that integrates with AirBnB static HTML Template
"""
from flask import Flask, render_template, url_for
from models import storage
import uuid
# flask setup
app = Flask(__name__)
app.url_map.strict_slashes = False
port = 5000
host = '0.0.0.0'
# begin flask page rendering
@app.teardown_appcontext
def teardown_db(exception):
"""
after each request, this method calls .close() (i.e. .remove()) on
the current SQLAlchemy Session
"""
storage.close()
@app.route('/3-hbnb/')
def hbnb_filters(the_id=None):
"""
handles request to custom template with states, cities & amentities
"""
state_objs = storage.all('State').values()
states = dict([state.name, state] for state in state_objs)
amens = storage.all('Amenity').values()
places = storage.all('Place').values()
users = dict([user.id, "{} {}".format(user.first_name, user.last_name)]
for user in storage.all('User').values())
return render_template('3-hbnb.html',
states=states,
amens=amens,
places=places,
users=users,
cache_id=uuid.uuid4()
)
if __name__ == "__main__":
"""
MAIN Flask App"""
app.run(host=host, port=port)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | web_dynamic/3-hbnb.py | Sranciato/AirBnB_clone_v4 |
import pytest
import pyansys
from pyansys import examples
from vtki.plotting import running_xserver
import os
@pytest.mark.skipif(not running_xserver(), reason="Requires active X Server")
def test_show_hex_archive():
examples.show_hex_archive(off_screen=True)
def test_load_result():
examples.load_result()
@pytest.mark.skipif(not running_xserver(), reason="Requires active X Server")
def test_show_displacement():
examples.show_displacement(interactive=False)
@pytest.mark.skipif(not running_xserver(), reason="Requires active X Server")
def test_show_stress():
examples.show_stress(interactive=False)
def test_load_km():
examples.load_km()
@pytest.mark.skipif(not running_xserver(), reason="Requires active X Server")
def test_show_cell_qual():
examples.show_cell_qual(meshtype='tet', off_screen=True)
examples.show_cell_qual(meshtype='hex', off_screen=True)
@pytest.mark.skipif(not running_xserver(), reason="Requires active X Server")
def test_cylinderansys_182():
exec_file = '/usr/ansys_inc/v182/ansys/bin/ansys182'
if os.path.isfile(exec_file):
assert examples.ansys_cylinder_demo(as_test=True)
@pytest.mark.skipif(not running_xserver(), reason="Requires active X Server")
def test_cylinderansys_150():
exec_file = '/usr/ansys_inc/v150/ansys/bin/ansys150'
if os.path.isfile(exec_file):
assert examples.ansys_cylinder_demo(exec_file, as_test=True)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | tests/test_examples.py | guyms/pyansys |
import pytest
from mitmproxy.contentviews import protobuf
from . import full_eval
datadir = "mitmproxy/contentviews/test_protobuf_data/"
def test_view_protobuf_request(tdata):
v = full_eval(protobuf.ViewProtobuf())
p = tdata.path(datadir + "protobuf01")
with open(p, "rb") as f:
raw = f.read()
content_type, output = v(raw)
assert content_type == "Protobuf"
assert output == [[('text', '1: 3bbc333c-e61c-433b-819a-0b9a8cc103b8')]]
with pytest.raises(ValueError, match="Failed to parse input."):
v(b'foobar')
@pytest.mark.parametrize("filename", ["protobuf02", "protobuf03"])
def test_format_pbuf(filename, tdata):
path = tdata.path(datadir + filename)
with open(path, "rb") as f:
input = f.read()
with open(path + "-decoded") as f:
expected = f.read()
assert protobuf.format_pbuf(input) == expected
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | test/mitmproxy/contentviews/test_protobuf.py | 0x7c48/mitmproxy |
# Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from poppy.manager.base import notifications
from tests.unit import base
class TestProviderWrapper(base.TestCase):
def setUp(self):
super(TestProviderWrapper, self).setUp()
self.notifications_wrapper_obj = notifications.NotificationWrapper()
def test_create(self):
mock_obj = mock.Mock()
mock_ext = mock.Mock(obj=mock_obj)
self.notifications_wrapper_obj.send(mock_ext,
"test_subject",
"test_mail_content")
mock_ext.obj.services_controller.send.assert_called_once_with(
"test_subject", "test_mail_content")
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | tests/unit/manager/default/test_notification_wrapper.py | satroutr/poppy |
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
full_name = models.CharField(max_length=100)
email = models.EmailField(max_length=150)
phone = models.CharField(max_length=10)
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def update_profile_signal(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | users/models.py | mohilkhare1708/descriptiveAnswerChecker |
from django.db import models
from django.core.urlresolvers import reverse
from accounts.models import Company
class Contact(models.Model):
company = models.ForeignKey(Company)
name = models.CharField(max_length=256)
identification = models.CharField(max_length=256, null=True, blank=True)
phone = models.CharField(max_length=256, null=True, blank=True)
address = models.CharField(max_length=256, null=True, blank=True)
email = models.EmailField(null=True, blank=True)
is_customer = models.BooleanField()
is_provider = models.BooleanField()
def get_absolute_url(self):
return reverse('contacts:detail', kwargs={'pk': self.pk})
def __str__(self):
return self.name
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | accountit/contacts/models.py | nicolasmesa/Accountit |
import copy
from bsm.config.util import package_path
from bsm.config.util import check_conflict_package
from bsm.util import safe_mkdir
from bsm.util.config import dump_config
from bsm.operation import Base
from bsm.logger import get_logger
_logger = get_logger()
class InstallPackageConfigError(Exception):
pass
class InstallPackageConfig(Base):
def execute(self, package, category, subdir, version, category_origin, subdir_origin, version_origin, from_install=False):
config_package_name = 'package_install' if from_install else 'package_runtime'
pkg_cfg_origin = copy.deepcopy(self._config[config_package_name].package_config(category_origin, subdir_origin, package, version_origin)['config_origin'])
pkg_cfg_origin['version'] = version
pkg_path = package_path(self._config['app'], self._config['category'], category, subdir, package, version)
ctg_cf, sd_cf, pkg_cf, ver_cf = check_conflict_package(pkg_path['main_dir'], self._config['package_runtime'])
if ctg_cf:
raise InstallPackageConfigError('Package path conflicts with package "{0}", category "{1}", subdir "{2}", version "{3}"'.format(pkg_cf, ctg_cf, sd_cf, ver_cf))
safe_mkdir(pkg_path['config_dir'])
dump_config(pkg_cfg_origin, pkg_path['config_file'])
self._config.reset()
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{... | 3 | bsm/operation/install_package_config.py | bsmsoft/bsm |
import pytest
from runner import ProjectType
from glotter import project_test, project_fixture
from test.utilities import clean_list
invalid_permutations = (
'description,in_params,expected', [
(
'no input',
None,
'Usage: please provide a list of integers (e.g. "8, 3, 1, 2")'
), (
'empty input',
'""',
'Usage: please provide a list of integers (e.g. "8, 3, 1, 2")'
)
]
)
valid_permutations = (
'description,in_params,expected', [
(
'sample input no rotation',
'"3, 1, 2, 8"',
'29'
), (
'sample input one rotation',
'"1, 2, 8, 3"',
'29'
), (
'sample input many rotations',
'"8, 3, 1, 2"',
'29'
)
]
)
@project_fixture(ProjectType.MaximumArrayRotation.key)
def linear_search(request):
request.param.build()
yield request.param
request.param.cleanup()
@project_test(ProjectType.MaximumArrayRotation.key)
@pytest.mark.parametrize(valid_permutations[0], valid_permutations[1],
ids=[p[0] for p in valid_permutations[1]])
def test_maximimum_array_rotation_valid(description, in_params, expected, linear_search):
actual = linear_search.run(params=in_params)
assert actual.strip().lower() == expected
@project_test(ProjectType.MaximumArrayRotation.key)
@pytest.mark.parametrize(invalid_permutations[0], invalid_permutations[1],
ids=[p[0] for p in invalid_permutations[1]])
def test_maximimum_array_rotation_invalid(description, in_params, expected, linear_search):
actual = linear_search.run(params=in_params)
assert actual.strip() == expected
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | test/projects/test_maximum_array_rotation.py | jrg94/sample-programs-in-every-language |
from django.db import models
from django.utils import timezone
import datetime
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
# def was_published_recently(self):
# return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | E73/framework-tutorial/mysite/polls/models.py | wendy006/Web-Dev-Course |
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.test import TestCase
from postgres_stats import Percentile
from .models import Number
class TestAggregates(TestCase):
def setUp(self):
numbers = [31, 83, 237, 250, 305, 314, 439, 500, 520, 526, 527, 533,
540, 612, 831, 854, 857, 904, 928, 973]
for n in numbers:
Number.objects.create(n=n)
def test_percentile_median(self):
results = Number.objects.all().aggregate(
median=Percentile('n', 0.5, output_field=models.FloatField()))
assert results['median'] == 526.5
def test_percentile_continuous(self):
results = Number.objects.all().aggregate(
quartiles=Percentile('n', [0.25, 0.5, 0.75],
output_field=ArrayField(models.FloatField())))
assert results['quartiles'] == [311.75, 526.5, 836.75]
def test_percentile_not_continuous(self):
results = Number.objects.all().aggregate(
quartiles=Percentile('n', [0.25, 0.5, 0.75],
continuous=False,
output_field=ArrayField(models.FloatField())))
assert results['quartiles'] == [305, 526, 831]
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | tests/test_aggregates.py | rtidatascience/django-postgres-power |
from flask import Flask
from flask_graphql import GraphQLView
from models import db_session
from schema import schema, Department
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return '<p> Hello World!</p>'
app.add_url_rule(
'/graphql',
view_func=GraphQLView.as_view(
'graphql',
schema=schema,
graphiql=True # for having the GraphiQL interface
)
)
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
if __name__ == '__main__':
app.run() | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | graphql/flask-graphql-basic_example2/app.py | CALlanoR/virtual_environments |
__author__ = 'gkour'
import matplotlib.animation as manimation
class Animation:
def __init__(self, fig, file_name):
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Movie Test', artist='Matplotlib', comment='Movie support!')
self.writer = FFMpegWriter(fps=1, metadata=metadata)
self.writer.setup(fig, file_name+".mp4", 100)
def add_frame(self):
self.writer.grab_frame()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | Animation.py | kourgeorge/plus-maze-simulator |
from unittest import TestCase
from expects import expect, equal, raise_error
from slender import List
class TestSelect(TestCase):
def test_select_if_func_is_valid(self):
e = List([1, 2, 3, 4, 5])
expect(e.select(lambda item: item > 3).to_list()).to(equal([4, 5]))
def test_select_if_func_is_invalid_for_all_items(self):
e = List([1, 2, 3, 4, 5])
expect(e.select(lambda item: item > 6).to_list()).to(equal([]))
def test_select_if_func_is_different(self):
e = List([1, 2, 3, 4])
expect(lambda: e.select('...')).to(raise_error(TypeError))
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | slender/tests/list/test_select.py | torokmark/slender |
"""Tests for the backend"""
from .tools import logprint, AppTestCase, load_file_to_dict, load_json
class GetConstructsAsGenbankTests(AppTestCase):
endpoint = 'get_constructs_as_genbanks'
defaults = dict(
database_token='',
constructsData={}
)
def test_emma_2_constructs_with_one_combinatorial(self):
json = load_json('emma_2_constructs_with_one_combinatorial.json')
response = self.run_job(json_request=json)
self.assertTrue('zip_file' in response)
class GetConstructsAsPDFTests(AppTestCase):
endpoint = 'get_constructs_as_pdf'
defaults = dict(constructsData={})
def test_emma_no_annotation_to_pdf(self):
json = load_json('emma_no_annotation_to_pdf.json')
response = self.run_job(json_request=json)
self.assertTrue('pdf_file' in response)
class SendOrderToEGFTests(AppTestCase):
endpoint = 'send_order_to_egf'
defaults = dict(constructsData={}, customer={})
def test_send_order_to_egf(self):
json = load_json('emma_send_order_to_egf.json')
response = self.run_job(json_request=json)
assert 'message' in response
self.assertTrue('order was sent' in response['message'])
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | backend/app/tests/tests.py | Edinburgh-Genome-Foundry/dab |
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
UUIDSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
Configuration,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
UUIDBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
class IntegerEnumBig(
_SchemaEnumMaker(
enum_value_to_name={
10: "POSITIVE_10",
11: "POSITIVE_11",
12: "POSITIVE_12",
}
),
IntSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
@classmethod
@property
def POSITIVE_10(cls):
return cls(10)
@classmethod
@property
def POSITIVE_11(cls):
return cls(11)
@classmethod
@property
def POSITIVE_12(cls):
return cls(12)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | samples/openapi3/client/petstore/python-experimental/petstore_api/model/integer_enum_big.py | chanjarster/openapi-generator |
class Decorator:
"""Clase de decorador simple."""
def __init__(self, a,b):
self.a = a
self.b = b
def suma(self):
print(self.a+self.b)
self.c=self.a*self.b
def resta(self):
print(self.c)
elvis=Decorator(10,5)
elvis.resta() | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | pruebas/prueba54.py | elviscruz45/Selenium |
import pytest
from dependency_injector import Scope
from dependency_injector.errors import InvalidScopeError
from ..utils import Context
from . import ioc
class Service1:
pass
class Service2:
def __init__(self, service1: Service1):
self.service1 = service1
def test_inject_transient_into_dependent_should_fail(ioc):
ioc.register(Service1, Scope.TRANSIENT)
ioc.register(Service2, Scope.DEPENDENT)
context = Context()
with pytest.raises(InvalidScopeError):
ioc.get(Service2, context=context)
def test_inject_dependent_into_singleton_should_fail(ioc):
ioc.register(Service1, Scope.DEPENDENT)
ioc.register(Service2, Scope.SINGLETON)
with pytest.raises(InvalidScopeError):
ioc.get(Service2)
def test_inject_transient_into_singleton_should_fail(ioc):
ioc.register(Service1, Scope.TRANSIENT)
ioc.register(Service2, Scope.SINGLETON)
with pytest.raises(InvalidScopeError):
ioc.get(Service2)
def test_dependent_scope_cleanup(ioc):
ioc.register(Service1, Scope.DEPENDENT)
context = Context()
ioc.get(Service1, context=context)
assert id(context) in ioc.store_dependent
del context
assert len(ioc.store_dependent) == 0
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | test/sync_tests/test_scopes.py | livioribeiro/dependency-injector |
from rest_framework import status
from rest_framework.views import exception_handler as drf_exception_handler
from django.http import JsonResponse
def default_handler(exc, context):
# https://www.django-rest-framework.org/api-guide/exceptions/
# Call REST framework's default exception handler first,
# to get the standard error response.
response = drf_exception_handler(exc, context)
# Now add the HTTP status code to the response.
if response is not None:
if not response.data.get("detail"):
response.data = {
"detail": response.data,
"status_code": response.status_code,
}
else:
response.data["status_code"] = response.status_code
return response
def bad_request(request, exception, *args, **kwargs):
"""
Generic 400 error handler.
"""
data = {"error": "Bad Request (400)"}
return JsonResponse(data, status=status.HTTP_400_BAD_REQUEST)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | degvabank/degvabank/core/exception_handler.py | Vixx-X/DEGVABanck-backend |
import os
import signal
import sys
import logging
import time
class Watcher(object):
"""this class solves two problems with multithreaded
programs in Python, (1) a signal might be delivered
to any thread (which is just a malfeature) and (2) if
the thread that gets the signal is waiting, the signal
is ignored (which is a bug).
The watcher is a concurrent process (not thread) that
waits for a signal and the process that contains the
threads. See Appendix A of The Little Book of Semaphores.
http://greenteapress.com/semaphores/
I have only tested this on Linux. I would expect it to
work on the Macintosh and not work on Windows.
"""
def __init__(self):
""" Creates a child thread, which returns. The parent
thread waits for a KeyboardInterrupt and then kills
the child thread.
"""
self.child = os.fork()
if self.child == 0:
return
else:
self.watch()
def watch(self):
try:
os.wait()
except KeyboardInterrupt:
logging.debug('Watcher process received KeyboardInterrupt')
signals = (
('SIGUSR2', 1),
('SIGTERM', 3),
('SIGKILL', 5),
)
for sig, sleep_time in signals:
if not os.path.exists('/proc/%d' % self.child):
logging.debug('Process terminated!')
break
else:
logging.debug('Sending %s signal to child process' % sig)
try:
os.kill(self.child, getattr(signal, sig))
except OSError:
pass
logging.debug('Waiting 1 second after sending %s' % sig)
time.sleep(sleep_time)
sys.exit()
def watch():
Watcher()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | grab/tools/watch.py | subeax/grab |
import unittest
from scrubadub.filth import Filth
from scrubadub.exceptions import InvalidReplaceWith, FilthMergeError
class FilthTestCase(unittest.TestCase):
def test_disallowed_replace_with(self):
"""replace_with should fail gracefully"""
filth = Filth()
with self.assertRaises(InvalidReplaceWith):
filth.replace_with('surrogate')
with self.assertRaises(InvalidReplaceWith):
filth.replace_with('something_invalid')
def test_nonoverlapping_filth(self):
"""can't merge non-overlapping filth"""
a_filth = Filth(beg=0, end=3, text="the")
b_filth = Filth(beg=4, end=7, text="end")
with self.assertRaises(FilthMergeError):
a_filth.merge(b_filth)
with self.assertRaises(FilthMergeError):
b_filth.merge(a_filth)
def test_text_merge(self):
"""make sure text length is correct"""
class SomeFilth(Filth):
type = 'something'
text = "the end"
a_filth = SomeFilth(beg=0, end=3, text=text[:3])
b_filth = SomeFilth(beg=1, end=7, text=text[1:])
c_filth = a_filth.merge(b_filth)
self.assertEqual(c_filth.text, text)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | tests/test_filth.py | robclewley/scrubadub |
# coding: UTF-8
from __future__ import absolute_import
from datetime import datetime
from flask import current_app
from flask.json import JSONEncoder
from flask.ext.assets import Environment
from flask.ext.migrate import Migrate
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class JSONSerializationMixin(object):
ignored_fields = []
serialize_property = []
@classmethod
def deserialize(cls, data):
return cls(**data)
def serialize(self):
_d = {k: v.value for k, v in
dict(self._sa_instance_state.attrs).items()
if not k.startswith('_') and k not in self.ignored_fields}
_update = {_property: getattr(self, _property) for _property
in self.serialize_property}
_d.update(_update)
return _d
class AppJSONEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, JSONSerializationMixin):
return o.serialize()
if isinstance(o, datetime):
return o.strftime('%Y-%m-%dT%H:%M:%SZ')
return super(AppJSONEncoder, self).default(o)
def init_app(app):
Environment(app)
db.init_app(app)
Migrate(app, db)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | sisgep1/base.py | ArthurPBressan/sisgep1 |
from transform import Transform
import tensorflow as tf
class StyleTransferTester:
def __init__(self, session, content_image, model_path):
# session
self.sess = session
# input images
self.x0 = content_image
# input model
self.model_path = model_path
# image transform network
self.transform = Transform()
# build graph for style transfer
self._build_graph()
def _build_graph(self):
# graph input
self.x = tf.placeholder(tf.float32, shape=self.x0.shape, name='input')
self.xi = tf.expand_dims(self.x, 0) # add one dim for batch
# result image from transform-net
self.y_hat = self.transform.net(self.xi/255.0)
self.y_hat = tf.squeeze(self.y_hat) # remove one dim for batch
self.y_hat = tf.clip_by_value(self.y_hat, 0., 255.)
def test(self):
# initialize parameters
self.sess.run(tf.global_variables_initializer())
# load pre-trained model
saver = tf.train.Saver()
saver.restore(self.sess, self.model_path)
# get transformed image
output = self.sess.run(self.y_hat, feed_dict={self.x: self.x0})
return output
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | style_transfer_tester.py | altonelli/tensorflow-fast-style-transfer |
import contextvars
import gettext
import os.path
from glob import glob
from app.t_string import TString
BASE_DIR = ""
LOCALE_DEFAULT = "en_US"
LOCALE_DIR = "locale"
locales = frozenset(
map(
os.path.basename,
filter(os.path.isdir, glob(os.path.join(BASE_DIR, LOCALE_DIR, "*"))),
)
)
gettext_translations = {
locale: gettext.translation(
"bot",
languages=(locale,),
localedir=os.path.join(BASE_DIR, LOCALE_DIR),
)
for locale in locales
}
gettext_translations["en_US"] = gettext.NullTranslations()
locales |= {"en_US"}
def use_current_gettext(*args, **kwargs) -> str:
"""Translate a string using the proper gettext based
on the current_locale context var.
:return: The gettext for the current locale
:rtype: str
"""
if not gettext_translations:
return gettext.gettext(*args, **kwargs)
locale = current_locale.get()
return gettext_translations.get(
locale, gettext_translations[LOCALE_DEFAULT]
).gettext(*args, **kwargs)
def translate(string: str) -> str:
"""Translates text.
:param string: The text that needs translation
:type string: str
:return: The translated text
:rtype: str
"""
tstring = TString(string, use_current_gettext)
return str(tstring) # translate immediatly
def lazy_translate(string: str) -> TString:
"""Lazy translates text.
:param string: The text that needs translation
:type string: str
:return: The TString object that can be translated later
:rtype: TString
"""
tstring = TString(string, use_current_gettext)
return tstring
current_locale: contextvars.ContextVar = contextvars.ContextVar("i18n")
def set_current_locale():
"""Sets the locale to the LOCALE_DEFAULT."""
current_locale.set(LOCALE_DEFAULT)
set_current_locale()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | app/i18n.py | CircuitsBots/discord-i18n |
import numpy as np
def accuracy(y: np.ndarray, y_prediction: np.ndarray) -> np.float32:
"""Calculates accuracy for true labels and predicted labels.
:params y: True labels.
:params y_prediction: Predicted labels.
:return: Accuracy
:raises ValueError: If true labels and predicted labels are not of the same shape.
"""
if y.shape != y_prediction.shape:
raise ValueError(
f"Expected true labels and predicted labels to be of same shape, received true labels with shape {str(y.shape)} and predicted labels with shape {str(y_prediction.shape)} instead."
)
return (np.argmax(y, axis=1) == np.argmax(y_prediction, axis=1)).sum() / y.shape[0]
def train_to_test_accuracy_gap(
train_accuracy: np.float32, test_accuracy: np.float32
) -> np.float32:
"""Calculates the gap between the train and test accuracy of a classifier.
The gap is calculated by subtracting the test accuracy from the train accuracy.
:params train_accuracy: The train accuracy.
:params test_accuracy: The test accuracy.
:return: The gap between the train and test accuracy.
"""
return train_accuracy - test_accuracy
def train_to_test_accuracy_ratio(
train_accuracy: np.float32, test_accuracy: np.float32
) -> np.float32:
"""Calculates the ratio between the train and test accuracy of a classifier.
The ratio is calculated by dividing the test accuracy by the train accuracy.
:params train_accuracy: The train accuracy.
:params test_accuracy: The test accuracy.
:return: The ratio between the train and test accuracy.
"""
return train_accuracy / test_accuracy
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | privacy_evaluator/metrics/basics.py | mariesig/privacy-evaluator |
from unittest import TestCase
from ddtrace.ext.http import URL
from ddtrace.filters import FilterRequestsOnUrl
from ddtrace.span import Span
class FilterRequestOnUrlTests(TestCase):
def test_is_match(self):
span = Span(name="Name", tracer=None)
span.set_tag(URL, r"http://example.com")
filtr = FilterRequestsOnUrl("http://examp.*.com")
trace = filtr.process_trace([span])
self.assertIsNone(trace)
def test_is_not_match(self):
span = Span(name="Name", tracer=None)
span.set_tag(URL, r"http://anotherexample.com")
filtr = FilterRequestsOnUrl("http://examp.*.com")
trace = filtr.process_trace([span])
self.assertIsNotNone(trace)
def test_list_match(self):
span = Span(name="Name", tracer=None)
span.set_tag(URL, r"http://anotherdomain.example.com")
filtr = FilterRequestsOnUrl([r"http://domain\.example\.com", r"http://anotherdomain\.example\.com"])
trace = filtr.process_trace([span])
self.assertIsNone(trace)
def test_list_no_match(self):
span = Span(name="Name", tracer=None)
span.set_tag(URL, r"http://cooldomain.example.com")
filtr = FilterRequestsOnUrl([r"http://domain\.example\.com", r"http://anotherdomain\.example\.com"])
trace = filtr.process_trace([span])
self.assertIsNotNone(trace)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | tests/tracer/test_filters.py | mbmblbelt/dd-trace-py |
"""Kraken - objects.Attributes.StringAttribute module.
Classes:
StringAttribute - Base Attribute.
"""
from kraken.core.objects.attributes.attribute import Attribute
from kraken.core.kraken_system import ks
class StringAttribute(Attribute):
"""String Attribute. Implemented value type checking."""
def __init__(self, name, value="", parent=None):
super(StringAttribute, self).__init__(name, value=value, parent=parent)
if not isinstance(value, basestring):
raise TypeError("Value is not of type 'str':" + str(value))
def setValue(self, value):
"""Sets the value of the attribute.
Arguments:
value -- Value to set the attribute to.
Return:
True if successful.
"""
if not isinstance(value, basestring):
raise TypeError("Value is not of type 'str':" + str(value))
super(StringAttribute, self).setValue(str(value))
return True
def getRTVal(self):
"""Returns and RTVal object for this attribute.
Return:
RTVal
"""
return ks.rtVal('String', self._value)
def getDataType(self):
"""Returns the name of the data type for this attribute.
Return:
string
"""
return 'String' | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | Python/kraken/core/objects/attributes/string_attribute.py | FabricExile/Kraken |
# https://codeforces.com/problemset/problem/230/B
# ! Only those numbers are T-prime which are perfect sq and their sq. root is also prime
# ? By using the sieve of Eratosthenes algorithm, we are calculating the prime numbers up to 1000000 and checking they are prime or not
# ? Also checking that number is perfect Square
# ? By fulfilling these two conditions we can check for T-prime number
# ? limit = 1000000
def calculate_prime_flag_for_each_number_upto_limit():
prime_flag = [True] * limit
prime_flag[0] = prime_flag[1] = False
for i in range(2,limit):
if prime_flag[i] == True:
for j in range(i*i, limit, i):
prime_flag[j] = False
return prime_flag
def check_if_a_number_is_t_prime(n):
if n == 4:
return True
if n < 4 or n%2==0:
return False
sqrt_n = n**0.5
if sqrt_n==int(sqrt_n):
if prime_flag[int(sqrt_n)] == True:
return True
return False
prime_flag = calculate_prime_flag_for_each_number_upto_limit()
total_numbers = int(input())
input_array = list(map(int,input().split()))
for i in input_array:
if check_if_a_number_is_t_prime(i)==True:
print("YES")
else:
print("NO") | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | Codeforces/230B T primes.py | a3X3k/Competitive-programing-hacktoberfest-2021 |
def write(path, content):
with open(path, "a+") as dst_file:
dst_file.write(content + '\n')
def read2mem(path):
with open(path) as f:
content = ''
while 1:
try:
lines = f.readlines(100)
except UnicodeDecodeError:
f.close()
continue
if not lines:
break
for line in lines:
content += line
return content | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | GDLnotes/src/util/file_helper.py | dachmx/tfnotes |
"""empty message
Revision ID: 98f414e72943
Revises: 4f65c2238756
Create Date: 2019-04-30 23:46:07.283010
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '98f414e72943'
down_revision = '4f65c2238756'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('timings', sa.Column('juma', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('timings', 'juma')
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?... | 3 | migrations/versions/98f414e72943_.py | ansu5555/mt-backend |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinLamboTestFramework
from test_framework.util import *
class P2PMempoolTests(BitcoinLamboTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
#connect a mininode
aTestNode = NodeConnCB()
node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
aTestNode.add_connection(node)
NetworkThread().start()
aTestNode.wait_for_verack()
#request mempool
aTestNode.send_message(msg_mempool())
aTestNode.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | test/functional/p2p-mempool.py | btclambo/BitcoinLambo |
import unittest
from checkov.terraform.checks.resource.gcp.GoogleCloudSqlDatabaseRequireSsl import check
from checkov.common.models.enums import CheckResult
class GoogleCloudSqlDatabaseRequireSsl(unittest.TestCase):
def test_failure(self):
resource_conf = {'name': ['google_cluster'], 'monitoring_service': ['none']}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {'settings': [{'tier': ['1'], 'ip_configuration': [{'require_ssl': [True]}]}]}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | tests/terraform/checks/resource/gcp/test_GoogleCloudSqlDatabaseRequireSsl.py | cclauss/checkov |
from Element.FlutterFind import FlutterFind
from selenium.common.exceptions import WebDriverException, NoSuchElementException
from Utilitys.WaitUtils import WaitUtils
class FlutterElement(FlutterFind):
def __init__(self, driver):
FlutterFind.__init__(self)
self.driver = driver
self.interval = 0.5
self.timeout = 20
def find_flutter_element_and_click(self, value):
try:
self.driver.find_flutter_element(value).click()
except Exception as e:
raise NoSuchElementException
def flutter_scroll_to_text(self, value):
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.execute_script(
"flutter:scrollIntoView", value, 0.1)
except Exception as handleRetry:
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.execute_script(
"flutter:scrollIntoView", value, 0.1)
except Exception as e:
raise NoSuchElementException
def find_flutter_element_sendkeys(self, locator, value):
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.elementSendKeys(locator, value)
except Exception as handleRetry:
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.elementSendKeys(locator, value)
except Exception as e:
raise NoSuchElementException
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cl... | 3 | Element/FlutterElement.py | sunnyyukaige/Automation-core |
import os.path
try:
from bpython._version import __version__ as version
except ImportError:
version = 'unknown'
__version__ = version
package_dir = os.path.abspath(os.path.dirname(__file__))
def embed(locals_=None, args=['-i', '-q'], banner=None):
from bpython.cli import main
return main(args, locals_, banner)
def bpython_curses_shell_runner(env, help):
return embed(locals_=env, banner=help + '\n') | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | pyramid_bpython_curses.py | iivmok/pyramid_bpython_curses |
from congregation.config.network import NetworkConfig
from congregation.config.codegen import CodeGenConfig, JiffConfig
class Config:
def __init__(self):
self.system_configs = {}
def add_config(self, cfg: [NetworkConfig, CodeGenConfig, JiffConfig]):
self.system_configs[cfg.cfg_key] = cfg
return self
@staticmethod
def from_env():
network_conf = NetworkConfig.from_env()
codegen_conf = CodeGenConfig.from_env()
jiff_conf = JiffConfig.from_env()
conf = Config()
conf.add_config(network_conf)
conf.add_config(codegen_conf)
conf.add_config(jiff_conf)
return conf
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | congregation/config/config.py | CCD-HRI/congregation |
import csv
# ==========================================================
# Function for reading a CSV file into a dictionary format
# ==========================================================
def read_variables_csv(csvfile):
"""
Builds a Python dictionary object from an input CSV file.
Helper function to read a CSV file on the disk, where user stores the limits/ranges of the process variables.
Output of this function can be used directly with any DOE builder function
The CSV file should be in the same directory
"""
dict_key={}
try:
with open(csvfile) as f:
reader = csv.DictReader(f)
fields = reader.fieldnames
for field in fields:
lst=[]
with open(csvfile) as f:
reader = csv.DictReader(f)
for row in reader:
lst.append(float(row[field]))
dict_key[field]=lst
return dict_key
except:
print("Error in reading the specified file from the disk. Please make sure it is in current directory.")
return -1
# ===============================================================
# Function for writing the design matrix into an output CSV file
# ===============================================================
def write_csv(df,filename,rounding=2):
"""
Writes a CSV file on to the disk from the computed design matrix
filename: To be specified by the user. Just a name is fine. .CSV extension will be added automatically.
rounding: Number up to which decimal the output will be rounded off. Often needed for practical DOE plans.
"""
df_copy = round(df,rounding)
try:
if '.csv' not in filename:
filename=filename+'.csv'
df_copy.to_csv(filename,index=False)
except:
return -1 | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside... | 3 | doepy/Test/doepy/read_write.py | Sigmun/doepy |
from app import db
from datetime import datetime, timedelta
class User(db.Model):
__tablename__ = 'users'
transfer_historys = db.relationship('TransferHistory', backref='users')
login_logs = db.relationship('LoginLog', backref='users')
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), nullable=False, unique=True)
password = db.Column(db.String(20), nullable=False)
accountNo = db.Column(db.String(10), nullable=False, unique=True)
balance = db.Column(db.String(20), nullable=False)
tel = db.Column(db.String(20), nullable=False)
token = db.Column(db.String(20))
citizenID = db.Column(db.String(13))
def __repr__(self):
return '<id {}>'.format(self.id)
class TransferHistory(db.Model):
__tablename__ = 'transfer_historys'
id = db.Column(db.Integer, primary_key=True)
account = db.Column(db.String(10))
to_account = db.Column(db.String(10))
amount = db.Column(db.String(10))
datetime = db.Column(db.DateTime, default=datetime.now())
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
def __repr__(self):
return '<id {}>'.format(self.id)
class LoginLog(db.Model):
__tablename__ = 'login_logs'
id = db.Column(db.Integer, primary_key=True)
datetime = db.Column(db.DateTime, default=datetime.now())
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
def __repr__(self):
return '<id {}>'.format(self.id)
class TokenCount(db.Model):
__tablename__ = 'token_counts'
id = db.Column(db.Integer, primary_key=True)
token_count = db.Column(db.Integer, nullable=False)
def __repr__(self):
return '<id {}>'.format(self.id)
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{... | 3 | Backend/models.py | Saboor-Hakimi/WaTF-Bank |
from re import findall, match, sub
from colorifix.colorifix import erase
from pymortafix._getchar import _Getch
def get_sub_from_matching(dictionary, matching):
index = [i for i, group in enumerate(matching.groups()) if group]
matched = list(dictionary)[index[0]] if index else None
return dictionary.get(matched)
def multisub(sub_dict, string, sequential=False):
"""Infinite sub in one iteration # sub_dict: {what_to_sub:substitution}"""
if not sequential:
rgx = "|".join(f"({s})" for s in sub_dict.keys())
return sub(rgx, lambda m: get_sub_from_matching(sub_dict, m), string)
else:
for rgx, substitution in sub_dict.items():
string = sub(rgx, substitution, string)
return string
def strict_input(
text, wrong_text=None, choices=None, regex=None, check=None, flush=False
):
"""Get user input with some requirements"""
inp = input(text)
if flush:
erase(len(findall(r"\n", text)) + 1)
while (
(not choices or choices and inp not in choices)
and (not regex or regex and not match(regex, inp))
and (not check or check and not check(inp))
and (choices or regex or check)
):
if wrong_text:
inp = input(wrong_text)
else:
inp = input(text)
if flush:
erase(len(findall(r"\n", wrong_text or text)) + 1)
return inp
def direct_input(choices=None):
"""Get user single char input w/o return, with optional restricted choices"""
inkey = _Getch()
k = inkey()
while (
choices
and (isinstance(k, str) and k or k.decode(errors="replace")) not in choices
):
k = inkey()
return isinstance(k, str) and k or k.decode(errors="replace")
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | pymortafix/utils.py | Mortafix/PyMortafix |
#swatch.py
#by TakHayashi
#execution time measurement module by closure.
#usage: see test code in __main__ of this.
from time import perf_counter,time,process_time
#don't use process_time : resolution > 16ms
def startwatch():
begin = perf_counter()
last = begin
def lap(form=None):
nonlocal begin, last
now = perf_counter()
fromstart = now-begin
fromlast = now-last
last = now
if form:
return '{1:{0}}'.format(form, fromstart), '{1:{0}}'.format(form, fromlast)
else:
return fromstart, fromlast
return lap
#for test
if __name__ == '__main__':
N=1000000
lap = startwatch()
lap2 = startwatch()
print(id(lap), id(lap2))
for i in range(N):
pass
t, dt = lap()
print(t, dt)
for i in range(N):
pass
t, dt = lap('5.3f')
print(t, dt)
for i in range(N):
pass
t, dt = lap('7.5f')
print(t, dt)
t, dt = lap2()
print(t, dt)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | solver/swatch.py | TakHayashi/EnjoyProg |
class NeuralNet():
def __init__(self, game):
pass
def train(self, examples):
"""
This function trains the neural network with examples obtained from
self-play.
Input:
examples: a list of training examples, where each example is of form
(board, pi, v). pi is the MCTS informed policy vector for
the given board, and v is its value. The examples has
board in its canonical form.
"""
pass
def predict(self, board):
"""
Input:
board: current board in its canonical form.
Returns:
pi: a policy vector for the current board- a numpy array of length
game.getActionSize
v: a float in [-1,1] that gives the value of the current board
"""
pass
def save_checkpoint(self, folder, filename):
"""
Saves the current neural network (with its parameters) in
folder/filename
"""
pass
def load_checkpoint(self, folder, filename):
"""
Loads parameters of the neural network from folder/filename
"""
pass | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | pommerman/NN/neural_net.py | MaxU11/playground |
import cv2
import os,shutil
import numpy as np
from Adb import Adb
import time
class Photo():
'''
提取图片信息,比较图片
'''
def __init__(self,img_path) -> None:
'''
读取图片
'''
self.img = cv2.imread(img_path)
class sourceData():
'''
获取测试数据
'''
def __init__(self) -> None:
pass
@staticmethod
def getScreenPhoto():
adb = Adb(device='d5c42b2a')
for x in range(100):
adb.screenCap()
adb.pullBackScreenCap(os.path.join('.','photo',time.strftime("%Y-%m-%d_%H-%M-%S.png", time.localtime()) ))
print("截图",time.asctime(time.localtime()))
time.sleep(3)
@staticmethod
def calcOujilide(img):
img_new = img[938:1035,1935:2247]
img_new_num = np.sum(img_new)/(img_new.shape[0]*img_new.shape[1]*img_new.shape[2])
return img_new_num
@staticmethod
def calcFangcha(img):
'''
计算938:1035,1935:2247区域间图片的方差,用于比较图片见相似程度
计算过程,对图像每一行像素求平均,对所有行像素平均值求方差
return (int)
'''
img_new = img[938:1013,1935:2247]
img_avg = np.mean(img_new,axis=(0,2))
return np.var(img_avg)
if __name__ is '__main__':
static_num = sourceData.calcFangcha(cv2.imread(os.path.join("adb","screen.png")))
for img_name in os.listdir(os.path.join("photo")):
img = cv2.imread(os.path.join("photo",img_name))
img_num = sourceData.calcFangcha(img)
chazhi = abs(static_num-img_num)
# chazhi = (abs(static_num**2-img_num**2))**0.5
print(img_name,"的差值为",chazhi)
if chazhi<20:
print("Copy this file: ",img_name)
shutil.copyfile(os.path.join("photo",img_name),os.path.join("photo2",img_name))
print("Write this file: ",img_name)
cv2.imwrite(os.path.join("photo3",img_name),img[938:1013,1935:2247])
# '''截图 400s'''
# sourceData.getScreenPhoto() | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?... | 3 | Photo.py | Rougnt/ArkNightAutoClick |
#!/usr/bin/env python
from testcases import gen_random, gen_fake_random
from time import time
begin = time()
RANDOM = True
conf_cnt = 7
if RANDOM:
test_cases = gen_random(10, conf_cnt)
else:
test_cases = gen_fake_random()
def check_valid(colors: list) -> bool:
# check if it's a valid palette
for conflicts in test_cases:
results = [colors[i] for i in conflicts]
if len(set(results)) != len(conflicts):
# conflict exists!
return False
return True
def get_color_count(colors: list) -> int:
# get different color counts in a palette
return len(set(colors))
palettes = [[0]]
for _ in range(1, conf_cnt):
new_palettes = []
for palette in palettes:
for i in range(conf_cnt):
new_palettes.append(palette + [i])
palettes = new_palettes
min_color = conf_cnt
min_palette = []
for palette in palettes:
if not check_valid(palette):
continue
color_count = get_color_count(palette)
if color_count < min_color:
min_color = color_count
min_palette = [palette]
elif color_count == min_color:
min_palette.append(palette)
end = time()
print("Min color count: %d" % min_color)
print("Possible coloring palettes: \n%s" %
('\n'.join([str(p) for p in min_palette])))
print("Min color count: %d" % min_color)
print("\nTime elapsed: %.6fs" % (end - begin))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answe... | 3 | chapter-1/1-9/src/brute_force.py | yuetsin/beauty-of-programming |
### AGENDA
import os
class Agenda:
# Construtor
def __init__(self):
self.contatos = dict()
self.size = 0
pass
pass
# ToString
def __str__(self):
return str(self.contatos)
def addContato(self, nome, numero):
if nome in self.contatos:
self.contatos[nome].append(numero)
pass
else:
initList = list()
initList.append(numero)
self.contatos[nome] = initList
pass
self.size += 1
pass
def count(self):
return self.size
pass
novaAgenda = Agenda()
"""
novaAgenda.AddContato("narto", "123456")
novaAgenda.AddContato("narto", "654321")
novaAgenda.AddContato("narto", "024680")
novaAgenda.AddContato("junior", "654321")
novaAgenda.AddContato("junior", "024680")
"""
#print(novaAgenda)
canExit = False
canPrintAgenda = False
os.system("cls")
while not canExit:
print()
print("AGENDA")
print("1 - Adicionar contato.")
print("2 - Listar contatos.")
print("Qualquer outro valor para sair.")
if canPrintAgenda:
print()
print("Imprimindo Contatos")
if novaAgenda.count() > 0:
print(novaAgenda)
pass
else:
print("Nao há contatos registrados.")
pass
canPrintAgenda = False
pass
print()
op = int(input("Digite sua opção: "))
if op == 1:
os.system("cls")
print()
print("Adicionando contato")
nome = input("Digite o nome do contato: ")
numero = input("Digite um numero: ")
novaAgenda.addContato(nome, numero)
pass
elif op == 2:
canPrintAgenda = True
pass
else:
canExit = True
os.system("cls")
pass
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer":... | 3 | atividade-lista4/q3-done.py | nartojunior/ppgti0006-atividades |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkbaas.endpoint import endpoint_data
class SyncFabricChaincodeStatusRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Baas', '2018-07-31', 'SyncFabricChaincodeStatus')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_OrganizationId(self):
return self.get_body_params().get('OrganizationId')
def set_OrganizationId(self,OrganizationId):
self.add_body_params('OrganizationId', OrganizationId)
def get_ChaincodeId(self):
return self.get_body_params().get('ChaincodeId')
def set_ChaincodeId(self,ChaincodeId):
self.add_body_params('ChaincodeId', ChaincodeId) | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | aliyun-python-sdk-baas/aliyunsdkbaas/request/v20180731/SyncFabricChaincodeStatusRequest.py | yndu13/aliyun-openapi-python-sdk |
from io import BytesIO
import PIL.Image
from django.test import TestCase
from django.core.files.images import ImageFile
from wagtail.tests.utils import WagtailTestUtils
from wagtail.images.models import Image
from wagtail_meta_preview.utils import get_focal
# Taken from wagtail.images.test.utils
def get_test_image_file(filename="test.png", colour="white", size=(640, 480)):
f = BytesIO()
image = PIL.Image.new("RGBA", size, colour)
image.save(f, "PNG")
return ImageFile(f, name=filename)
class TestUtils(TestCase, WagtailTestUtils):
def setUp(self):
self.image = Image(
title="Test image", file=get_test_image_file(colour="white"),
)
def test_focal(self):
self.assertEqual(get_focal(self.image), {"x": "50.00%", "y": "50.00%"})
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | tests/test_utils.py | marteinn/wagtail-meta-preview |
furl = None
try:
from furl import furl
except ImportError:
pass
import six
from sqlalchemy import types
from .scalar_coercible import ScalarCoercible
class URLType(types.TypeDecorator, ScalarCoercible):
"""
URLType stores furl_ objects into database.
.. _furl: https://github.com/gruns/furl
::
from sqlalchemy_utils import URLType
from furl import furl
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
website = sa.Column(URLType)
user = User(website=u'www.example.com')
# website is coerced to furl object, hence all nice furl operations
# come available
user.website.args['some_argument'] = '12'
print user.website
# www.example.com?some_argument=12
"""
impl = types.UnicodeText
def process_bind_param(self, value, dialect):
if furl is not None and isinstance(value, furl):
return six.text_type(value)
if isinstance(value, six.string_types):
return value
def process_result_value(self, value, dialect):
if furl is None:
return value
if value is not None:
return furl(value)
def _coerce(self, value):
if furl is None:
return value
if value is not None and not isinstance(value, furl):
return furl(value)
return value
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than cla... | 3 | sqlalchemy_utils/types/url.py | kelvinhammond/sqlalchemy-utils |
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW.json')
def test_storage_encoding_KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | tests/storage/cases/test_KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW.py | juztin/pytezos-1 |
"""Removing settings table
Revision ID: 2932df901655
Revises: 155c6ce689ed
Create Date: 2014-04-03 10:45:09.592592
"""
# revision identifiers, used by Alembic.
revision = '2932df901655'
down_revision = '155c6ce689ed'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_table('settings')
def downgrade():
op.create_table(
'settings',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column(
'lambda_score',
sa.Float,
server_default=sa.text('0.0'),
nullable=False
)
)
connection = op.get_bind()
connection.execute('INSERT INTO settings(lambda_score) VALUES(0.0)')
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | holmes/migrations/versions/2932df901655_removing_settings_table.py | scorphus/holmes-api |
import json
from adapters.base_adapter import Adapter
from adapters.adapter_with_battery import AdapterWithBattery
from devices.switch.on_off_switch import OnOffSwitch
# TODO: Think how to reuse the code between classes
class SirenAdapter(Adapter):
def __init__(self):
super().__init__()
self.switch = OnOffSwitch('switch', 'state')
self.switch.set_icon(13)
self.devices.append(self.switch)
def handle_command(self, alias, device, command, level, color):
device_data = self._get_legacy_device_data()
self.switch.handle_command(device_data, command, level, color)
mode = 'emergency' if command.upper() == 'ON' else 'stop'
return {
'topic': device_data['friendly_name'] + '/set',
'payload': json.dumps({
'warning': {
'mode': mode,
'duration': 10
}
})
}
class SirenAdapterWithBattery(AdapterWithBattery):
def __init__(self):
super().__init__()
self.switch = OnOffSwitch('switch', 'state')
self.switch.set_icon(13)
self.devices.append(self.switch)
def handle_command(self, alias, device, command, level, color):
device_data = self._get_legacy_device_data()
self.switch.handle_command(device_data, command, level, color)
mode = 'emergency' if command.upper() == 'ON' else 'stop'
return {
'topic': device_data['friendly_name'] + '/set',
'payload': json.dumps({
'warning': {
'mode': mode,
'duration': 10
}
})
} | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | adapters/generic/siren.py | russdan/domoticz-zigbee2mqtt-plugin |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
from threading import Event
import time
from odoo.http import request
class EventManager(object):
def __init__(self):
self.events = []
self.sessions = {}
def _delete_expired_sessions(self, max_time=70):
'''
Clears sessions that are no longer called.
:param max_time: time a session can stay unused before being deleted
'''
now = time.time()
expired_sessions = [
session
for session in self.sessions
if now - self.sessions[session]['time_request'] > max_time
]
for session in expired_sessions:
del self.sessions[session]
def add_request(self, listener):
self.session = {
'session_id': listener['session_id'],
'devices': listener['devices'],
'event': Event(),
'result': {},
'time_request': time.time(),
}
self._delete_expired_sessions()
self.sessions[listener['session_id']] = self.session
return self.sessions[listener['session_id']]
def device_changed(self, device):
event = {
**device.data,
'device_identifier': device.device_identifier,
'time': time.time(),
'request_data': json.loads(request.params['data']) if request and 'data' in request.params else None,
}
self.events.append(event)
for session in self.sessions:
if device.device_identifier in self.sessions[session]['devices'] and not self.sessions[session]['event'].isSet():
self.sessions[session]['result'] = event
self.sessions[session]['event'].set()
event_manager = EventManager()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | addons/hw_drivers/event_manager.py | SHIVJITH/Odoo_Machine_Test |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions shared by modules in aqt/jax."""
def normalize_axes(axes, ndim):
# A tuple by convention. len(axes_tuple) then also gives the rank efficiently.
return tuple([ax if ax >= 0 else ndim + ax for ax in axes])
def broadcast_rank(source, target):
"""Broadcasts source to match target's rank following Numpy semantics."""
return source.reshape((1,) * (target.ndim - source.ndim) + source.shape)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside... | 3 | aqt/jax/utils.py | DionysisChristopoulos/google-research |
from gosubl import gs
import os
import sublime_plugin
def _stx(v):
old = [
'GoSublime.tmLanguage',
'GoSublime-next.tmLanguage',
]
fn = 'Packages/GoSublime/syntax/GoSublime-Go.tmLanguage'
if not os.path.exists(gs.dist_path('syntax/GoSublime-Go.tmLanguage')):
return
stx = v.settings().get('syntax')
if stx:
name = stx.replace('\\', '/').split('/')[-1]
if name in old:
print('GoSublime: changing syntax of `%s` from `%s` to `%s`' % (
(v.file_name() or ('view://%s' % v.id())),
stx,
fn
))
v.set_syntax_file(fn)
class Ev(sublime_plugin.EventListener):
def on_load(self, view):
_stx(view)
def on_activated(self, view):
_stx(view)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | GoSublime/gssynforce.py | MiYogurt/my-sublimetext3-plugin |
import socket
from six.moves.urllib.parse import urlparse
from frappe import get_conf
REDIS_KEYS = ("redis_cache", "redis_queue", "redis_socketio")
def is_open(ip, port, timeout=10):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
try:
s.connect((ip, int(port)))
s.shutdown(socket.SHUT_RDWR)
return True
except socket.error:
return False
finally:
s.close()
def check_database():
config = get_conf()
db_type = config.get("db_type", "mariadb")
db_host = config.get("db_host", "localhost")
db_port = config.get("db_port", 3306 if db_type == "mariadb" else 5432)
return {db_type: is_open(db_host, db_port)}
def check_redis(redis_services=None):
config = get_conf()
services = redis_services or REDIS_KEYS
status = {}
for conn in services:
redis_url = urlparse(config.get(conn)).netloc
redis_host, redis_port = redis_url.split(":")
status[conn] = is_open(redis_host, redis_port)
return status
def check_connection(redis_services=None):
service_status = {}
service_status.update(check_database())
service_status.update(check_redis(redis_services))
return service_status
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | frappe/utils/connections.py | erpnext-tm/frappe |
"""Test ModiscoFile
"""
import pandas as pd
from bpnet.modisco.files import ModiscoFile, ModiscoFileGroup
from bpnet.modisco.core import Pattern, Seqlet
def test_modisco_file(mf, contrib_file):
# contrib_file required for `mf.get_ranges()`
assert len(mf.patterns()) > 0
p = mf.get_pattern("metacluster_0/pattern_0")
assert isinstance(p, Pattern)
assert len(mf.patterns()) > 0
assert isinstance(mf.patterns()[0], Pattern)
assert len(mf.pattern_names()) > 0
assert isinstance(mf.pattern_names()[0], str)
assert mf.tasks() == ['Oct4/profile/wn']
assert 'patterns' in mf.stats()
assert isinstance(mf.seqlet_df_instances(), pd.DataFrame)
assert mf.n_seqlets("metacluster_0/pattern_0") > 0
assert isinstance(mf.load_ranges(), pd.DataFrame)
assert isinstance(mf.seqlets()['metacluster_0/pattern_0'][0], Seqlet)
def test_modisco_file_group(mfg):
p = mfg.get_pattern("Oct4/metacluster_0/pattern_0")
assert isinstance(p, Pattern)
assert len(mfg.patterns()) > 0
assert isinstance(mfg.patterns()[0], Pattern)
assert len(mfg.pattern_names()) > 0
assert isinstance(mfg.pattern_names()[0], str)
assert mfg.tasks() == ['Oct4/profile/wn'] # since we used two times the Oct4 task
assert mfg.n_seqlets("Oct4/metacluster_0/pattern_0") > 0
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | tests/modisco/test_modisco_file.py | mlweilert/bpnet |
from typing import Optional
from sqlalchemy.orm import Session
from app.models.actor import Actor
from app.schemas.actor import ActorCreate, ActorInDB
from app.core.exceptions import exceptions
class CRUDActor:
def get_by_name(self, db: Session, name: str) -> Optional[Actor]:
return db.query(Actor).filter(Actor.name == name).first()
def create(self, db: Session, actor_create: ActorCreate) -> Actor:
if self.get_by_name(db, actor_create.name):
raise exceptions.NAME_NOT_AVAILABLE
actor_in_db = ActorInDB(**actor_create.dict())
actor_obj = Actor(**actor_in_db.dict())
db.add(actor_obj)
db.commit()
db.refresh(actor_obj)
return actor_obj
crud_actor = CRUDActor()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | app/crud/crud_actor.py | luovkle/FastAPI-Movie-Manager |
# Importing the base class
from KratosMultiphysics.CoSimulationApplication.base_classes.co_simulation_predictor import CoSimulationPredictor
# Other imports
import KratosMultiphysics.CoSimulationApplication.co_simulation_tools as cs_tools
def Create(settings, solver_wrapper):
cs_tools.SettingsTypeCheck(settings)
return LinearPredictor(settings, solver_wrapper)
class LinearPredictor(CoSimulationPredictor):
def Predict(self):
current_data = self.interface_data.GetData(0)
previous_data = self.interface_data.GetData(1)
predicted_data = 2*current_data - previous_data
self._UpdateData(predicted_data)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | applications/CoSimulationApplication/python_scripts/predictors/linear.py | lkusch/Kratos |
from functools import wraps
from itertools import count
from typing import Callable
from logzero import logger
from chaoslib.types import Journal
counter = None
def initcounter(f: Callable) -> Callable:
@wraps(f)
def wrapped(*args, **kwargs) -> None:
global counter
counter = count()
f(*args, **kwargs)
return wrapped
def keepcount(f: Callable) -> Callable:
@wraps(f)
def wrapped(*args, **kwargs) -> None:
next(counter)
f(*args, **kwargs)
return wrapped
@keepcount
def after_activity_control(**kwargs):
logger.info("Activity is called")
@initcounter
def configure_control(**kwargs):
logger.info("configure is called")
def after_experiment_control(state: Journal, **kwargs):
state["counted_activities"] = next(counter)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | tests/fixtures/controls/dummy_with_decorated_control.py | Mickael-Roger/chaostoolkit-lib |
from machine import ADC , Pin
from Blocky.Pin import getPin
class Light :
def __init__ (self , port , sensitive = 4):
pin = getPin(port)
if (pin[2] == None):
from machine import reset
reset()
self.adc = ADC(Pin(pin[2]))
if (sensitive == 1): self.adc.atten(ADC.ATTN_0DB)
elif (sensitive == 2): self.adc.atten(ADC.ATTN_2_5DB)
elif (sensitive == 3): self.adc.atten(ADC.ATTN_6DB)
elif (sensitive == 4): self.adc.atten(ADC.ATTN_11DB)
def read(self):
return self.adc.read()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | Light.py | curlyz/ESP32_Firmware |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_allclose
from gammapy.datasets import Datasets
from gammapy.modeling.tests.test_fit import MyDataset
@pytest.fixture(scope="session")
def datasets():
return Datasets([MyDataset(name="test-1"), MyDataset(name="test-2")])
def test_datasets_init(datasets):
# Passing a Python list of `Dataset` objects should work
Datasets(list(datasets))
# Passing an existing `Datasets` object should work
Datasets(datasets)
def test_datasets_types(datasets):
assert datasets.is_all_same_type
def test_datasets_likelihood(datasets):
likelihood = datasets.stat_sum()
assert_allclose(likelihood, 14472200.0002)
def test_datasets_str(datasets):
assert "Datasets" in str(datasets)
def test_datasets_getitem(datasets):
assert datasets["test-1"].name == "test-1"
assert datasets["test-2"].name == "test-2"
def test_names(datasets):
assert datasets.names == ["test-1", "test-2"]
def test_Datasets_mutation():
dat = MyDataset(name="test-1")
dats = Datasets([MyDataset(name="test-2"), MyDataset(name="test-3")])
dats2 = Datasets([MyDataset(name="test-4"), MyDataset(name="test-5")])
dats.insert(0, dat)
assert dats.names == ["test-1", "test-2", "test-3"]
dats.extend(dats2)
assert dats.names == ["test-1", "test-2", "test-3", "test-4", "test-5"]
dat3 = dats[3]
dats.remove(dats[3])
assert dats.names == ["test-1", "test-2", "test-3", "test-5"]
dats.append(dat3)
assert dats.names == ["test-1", "test-2", "test-3", "test-5", "test-4"]
dats.pop(3)
assert dats.names == ["test-1", "test-2", "test-3", "test-4"]
with pytest.raises(ValueError, match="Dataset names must be unique"):
dats.append(dat)
with pytest.raises(ValueError, match="Dataset names must be unique"):
dats.insert(0, dat)
with pytest.raises(ValueError, match="Dataset names must be unique"):
dats.extend(dats2)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding... | 3 | gammapy/datasets/tests/test_datasets.py | Rishank2610/gammapy |
#cas
def get_infos():
import ti_graphics, ti_system
fnop = lambda : None
screen_w, screen_h, screen_y0, font_w, font_h, poly_set_pixel, poly_fill_rect, poly_draw_ellipse, poly_fill_circle, poly_get_key, poly_draw_string = 320, 210, 30, 10, 15, fnop, fnop, fnop, fnop, ti_system.wait_key, fnop
def poly_fill_rect(x, y, w, h, c):
ti_graphics.setColor(c)
ti_graphics.fillRect(x, y + screen_y0, w, h)
def poly_set_pixel(x, y, c):
ti_graphics.setPixel(x, y + screen_y0, c)
def poly_draw_ellipse(x, y, rx, ry, c):
ti_graphics.setColor(c)
x0, y0 = x - rx, y - ry
for dy in range(1 + (y0 > int(y0))):
for dx in range(1 + (x0 > int(x0))):
ti_graphics.drawArc(x0 + dx, y0 + dy + screen_y0, 2 * rx, 2 * ry, 0, 3600)
def poly_fill_circle(x, y, r, c):
ti_graphics.setColor(c)
ti_graphics.fillCircle(xx, y + screen_y0, r)
def poly_draw_string(s, x, y, cf, cb):
poly_fill_rect(x, y, font_w, font_h, cb)
ti_graphics.setColor(cf)
ti_graphics.drawString(s, x, y + screen_y0)
return screen_w, screen_h, font_h, poly_set_pixel, poly_fill_rect, poly_draw_ellipse, poly_fill_circle, poly_draw_string, poly_get_key
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | ch05/solar_ce/polysol.py | jabrena/space-math |
from extractor import ActivityExtractor
from fitparse import FitFile
import uuid
import os
from utils import get_base_path
import json
class FitFileActivityExtractor(ActivityExtractor):
PROVIDER_NAME = 'fitfile'
def __init__(self, file_stream):
self.fitfile = FitFile(file_stream)
self.activity_id = str(uuid.uuid4())
def get_activity(self, id):
raise NotImplementedError("One fit file always contains data for one activity only")
def get_activities(self):
data = []
for index, record in enumerate(self.fitfile.get_messages('record')):
if index > 196: # for the demo, we need to cut off the beginning of the file
record_dict = {metric.name: metric.value if metric.name != "timestamp" else metric.raw_value
for metric in record}
record_dict.update({
'activityId': self.activity_id,
'type': 'fitfile_upload',
"position_lat": self.semicircles_to_degrees(record_dict["position_lat"]),
"position_long": self.semicircles_to_degrees(record_dict["position_long"])})
data.append(record_dict)
return data
def persist_activity(self, id):
raise NotImplementedError("One fit file always contains data for one activity only")
def persist_activities(self):
file_path = os.path.join(get_base_path(), 'fitfile', 'data', f'activity-{self.activity_id}.json')
activities = self.get_activities()
with open(file_path, 'w') as f:
json.dump(activities, f, indent=4, sort_keys=True)
def get_activity_ids(self, activities):
return [self.activity_id]
def semicircles_to_degrees(self, semicircles):
return semicircles * 180 / 2 ** 31
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | kafka-producer/file/extractor.py | peng-data-minimization/fitness-data-pipeline |
import numpy as np
import pytest
import networkx as nx
import ot
import tw
class TestBuildValidTreeMetric(object):
@pytest.mark.parametrize(
"num_node, edges",
[
(5, [(i % 5, (i + 1) % 5, i + 1) for i in range(5)]),
(3, [(i % 3, (i + 1) % 3, i + 1) for i in range(3)]),
],
)
def test_invalid_tree(self, num_node, edges):
with pytest.raises(ValueError):
first_prob = np.zeros(num_node)
second_prob = np.zeros(num_node)
first_prob[0] = 1.0
second_prob[-1] = 1.0
tw.distance(first_prob, second_prob, edges)
class TestTreeWasserstein(object):
def test_tree_wasserstein(self):
for i in range(100):
num_node = np.random.randint(10, 200)
G = nx.generators.random_tree(num_node)
edges = [(fr, to, 1) for (fr, to) in list(G.edges())]
first_prob = np.random.rand(num_node)
first_prob = first_prob / first_prob.sum()
second_prob = np.random.rand(num_node)
second_prob = second_prob / second_prob.sum()
twd = tw.distance(first_prob, second_prob, edges)
adj_dict = dict(nx.all_pairs_shortest_path_length(G))
metric = np.array(
[[adj_dict[i][j] for i in range(num_node)] for j in range(num_node)]
)
ans = ot.lp.emd2(first_prob, second_prob, metric)
assert np.allclose([twd], [ans]), f"i: {i}, TW : {twd}, WD : {ans}"
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
... | 3 | tests/test_treewasserstein.py | InkToYou/TreeWasserstein |
#
# Custom boto3 session for use with temporary credentials with auto refresh
#
# https://github.com/boto/boto3/issues/443
# https://gist.github.com/jappievw/2c54fd3150fd6e80cc05a7b4cdea60f6
#
# thanks @jappievw https://gist.github.com/jappievw
#
from boto3 import Session
from botocore.credentials import (CredentialProvider, CredentialResolver,
RefreshableCredentials, create_assume_role_refresher)
from botocore.session import get_session
def make_refreshable_assume_role_session(main_session, assume_role_params):
provider = SessionWithRefreshableAssumeRoleProvider(main_session, assume_role_params)
resolver = CredentialResolver(providers=[provider])
botocore_session = get_session()
botocore_session.register_component('credential_provider', resolver)
return Session(botocore_session=botocore_session, region_name=main_session.region_name)
class SessionWithRefreshableAssumeRoleProvider(CredentialProvider):
METHOD = 'custom-refreshable-assume-role'
def __init__(self, main_session, assume_role_params):
self._main_session = main_session
self._assume_role_params = assume_role_params
super().__init__()
def load(self):
refresh = create_assume_role_refresher(self._main_session.client('sts'),
self._assume_role_params)
return RefreshableCredentials.create_from_metadata(
metadata=refresh(),
refresh_using=refresh,
method=self.METHOD)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | zentral/utils/boto3.py | arubdesu/zentral |
import paypalhttp
from urllib.parse import quote # Python 3+
# noinspection PyDictCreation
class OrdersValidateRequest:
"""
Validates a payment method and checks it for contingencies.
"""
def __init__(self, order_id):
self.verb = "POST"
self.path = "/v2/checkout/orders/{order_id}/validate-payment-method?".replace("{order_id}", quote(str(order_id)))
self.headers = {}
self.headers["Content-Type"] = "application/json"
self.body = None
def pay_pal_client_metadata_id(self, pay_pal_client_metadata_id):
self.headers["PayPal-Client-Metadata-Id"] = str(pay_pal_client_metadata_id)
def request_body(self, order_action_request):
self.body = order_action_request
return self
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | _sdk/_paypal/orders/orders_validate_request.py | Memberships-Affiliate-Management-API/membership_and_affiliate_api |
from conans import ConanFile, CMake, tools
from os import path
class RoseArrayTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
self.run(path.join("bin", "test_package"))
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | test_package/conanfile.py | markushedvall/rose-array |
import time
from bitarray import bitarray
from huffman import freq_string, huffCode
def traverse(it, tree):
"""
return False, when it has no more elements, or the leave node
resulting from traversing the tree
"""
try:
subtree = tree[next(it)]
except StopIteration:
return False
if isinstance(subtree, list) and len(subtree) == 2:
return traverse(it, subtree)
else: # leave node
return subtree
def insert(tree, sym, ba):
"""
insert symbol which is mapped to bitarray into tree
"""
v = ba[0]
if len(ba) > 1:
if tree[v] == []:
tree[v] = [[], []]
insert(tree[v], sym, ba[1:])
else:
if tree[v] != []:
raise ValueError("prefix code ambiguous")
tree[v] = sym
def decode(codedict, bitsequence):
"""
this function does the same thing as the bitarray decode method
"""
# generate tree from codedict
tree = [[], []]
for sym, ba in codedict.items():
insert(tree, sym, ba)
# actual decoding by traversing until StopIteration
res = []
it = iter(bitsequence)
while True:
r = traverse(it, tree)
if r is False:
break
else:
if r == []:
raise ValueError("prefix code does not match data")
res.append(r)
return res
def main():
txt = open('README').read()
code = huffCode(freq_string(txt))
sample = 2000 * txt
a = bitarray()
a.encode(code, sample)
# Time the decode function above
start_time = time.time()
res = decode(code, a)
Py_time = time.time() - start_time
assert ''.join(res) == sample
print('Py_time: %.6f sec' % Py_time)
# Time the decode method which is implemented in C
start_time = time.time()
res = a.decode(code)
C_time = time.time() - start_time
assert ''.join(res) == sample
print('C_time: %.6f sec' % C_time)
print('Ratio: %f' % (Py_time / C_time))
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | examples/decoding.py | DominicBurkart/bitarray |
"""Abstraction to send a TunnelingRequest and wait for TunnelingResponse."""
from __future__ import annotations
from typing import TYPE_CHECKING
from xknx.knxip import (
CEMIFrame,
CEMIMessageCode,
KNXIPFrame,
TunnellingAck,
TunnellingRequest,
)
from .request_response import RequestResponse
if TYPE_CHECKING:
from xknx.io.udp_client import UDPClient
from xknx.telegram import IndividualAddress, Telegram
from xknx.xknx import XKNX
class Tunnelling(RequestResponse):
"""Class to TunnelingRequest and wait for TunnelingResponse."""
def __init__(
self,
xknx: XKNX,
udp_client: UDPClient,
data_endpoint: tuple[str, int] | None,
telegram: Telegram,
src_address: IndividualAddress,
sequence_counter: int,
communication_channel_id: int,
):
"""Initialize Tunnelling class."""
self.xknx = xknx
self.udp_client = udp_client
self.data_endpoint_addr = data_endpoint
self.src_address = src_address
super().__init__(xknx, self.udp_client, TunnellingAck)
self.telegram = telegram
self.sequence_counter = sequence_counter
self.communication_channel_id = communication_channel_id
async def send_request(self) -> None:
"""Build knxipframe (within derived class) and send via UDP."""
self.udpclient.send(self.create_knxipframe(), addr=self.data_endpoint_addr)
def create_knxipframe(self) -> KNXIPFrame:
"""Create KNX/IP Frame object to be sent to device."""
cemi = CEMIFrame.init_from_telegram(
self.xknx,
telegram=self.telegram,
code=CEMIMessageCode.L_DATA_REQ,
src_addr=self.src_address,
)
tunnelling_request = TunnellingRequest(
self.xknx,
communication_channel_id=self.communication_channel_id,
sequence_counter=self.sequence_counter,
cemi=cemi,
)
return KNXIPFrame.init_from_body(tunnelling_request)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": fals... | 3 | xknx/io/request_response/tunnelling.py | gr0vity-dev/xknx |
"""Functions to deal with hases."""
import hashlib
def read_in_chunks(file_object, chunk_size=1024):
"""
Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k.
borrowed from http://stackoverflow.com/a/519653
Parameters
----------
file_object : file object
File to read in chunks.
Keyword arguments:
chunk_size : int
size of chunks to read (default 1024)
Yields
------
datachunks
requested data
"""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def generate_sha256_hash(filepath):
"""
Take a filepath and return a hex digest of a sha2 hash.
Parameters
----------
filepath : str
Path to file.
Returns
-------
str
requested hash
"""
sha_result = hashlib.sha256()
try:
file_object = open(filepath, "rb")
except IOError:
return None
for chunk in read_in_chunks(file_object):
sha_result.update(chunk)
file_object.close()
return sha_result.hexdigest()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | rubberband/utils/hasher.py | ambros-gleixner/rubberband |
import torch
import torch.nn as nn
from torch.nn.functional import mse_loss
class PSNRLoss(nn.Module):
r"""Creates a criterion that calculates the PSNR between 2 images. Given an m x n image, the PSNR is:
.. math::
\text{PSNR} = 10 \log_{10} \bigg(\frac{\text{MAX}_I^2}{MSE(I,T)}\bigg)
where
.. math::
\text{MSE}(I,T) = \frac{1}{mn}\sum_{i=0}^{m-1}\sum_{j=0}^{n-1} [I(i,j) - T(i,j)]^2
and :math:`\text{MAX}_I` is the maximum possible input value
(e.g for floating point images :math:`\text{MAX}_I=1`).
Arguments:
max_val (float): Maximum value of input
Shape:
- input: :math:`(*)`
- approximation: :math:`(*)` same shape as input
- output: :math:`()` a scalar
Examples:
>>> kornia.losses.psnr_loss(torch.ones(1), 1.2*torch.ones(1), 2)
tensor(20.0000) # 10 * log(4/((1.2-1)**2)) / log(10)
Reference:
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Definition
"""
def __init__(self, max_val: float) -> None:
super(PSNRLoss, self).__init__()
self.max_val: float = max_val
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: # type: ignore
return psnr_loss(input, target, self.max_val)
def psnr_loss(input: torch.Tensor, target: torch.Tensor, max_val: float) -> torch.Tensor:
r"""Function that computes PSNR
See :class:`~kornia.losses.PSNRLoss` for details.
"""
if not torch.is_tensor(input) or not torch.is_tensor(target):
raise TypeError(f"Expected 2 torch tensors but got {type(input)} and {type(target)}")
if input.shape != target.shape:
raise TypeError(f"Expected tensors of equal shapes, but got {input.shape} and {target.shape}")
mse_val = mse_loss(input, target, reduction='mean')
max_val_tensor: torch.Tensor = torch.tensor(max_val).to(input.device).to(input.dtype)
return 10 * torch.log10(max_val_tensor * max_val_tensor / mse_val)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return t... | 3 | kornia/losses/psnr.py | pmeier/kornia |
from pathlib import Path
from hashlib import sha256
import os
import tarfile
import bldr.util
def targz_pack_atomic(tgz_next_name: Path, tgz_name: Path, source_path: Path):
"""
Atomically create a new .tar.gz by writing to the "next" .tar.gz
and renaming when complete
"""
targz_pack(tgz_next_name, source_path)
if tgz_name.exists():
tgz_name.unlink()
tgz_next_name.rename(tgz_name)
def targz_pack(tgz_name: Path, source_path: Path):
"""
Create a new .tar.gz from the specified folder
Examples:
history/current -> history/current.tar.gz
history/generated/current -> history/generated/current.tar.gz
"""
with tarfile.open(tgz_name, "w:gz") as tar:
tar.add(source_path, arcname=source_path.name)
def targz_unpack(tgz_path: Path, target_path: Path):
"""
Expand a .tar.gz to the specified folder
Examples:
history/current.tar.gz -> history/current
history/generated/current.tar.gz -> history/generated/current
"""
if not tgz_path.exists():
target_path.mkdir(parents=True, exist_ok=True)
return
if target_path.exists():
bldr.util.rmtree(target_path)
target_path.mkdir(parents=True)
with tarfile.open(tgz_path, "r:gz") as tar:
tar.extractall(path=target_path.parent)
def targz_sha(tgz_path: Path):
return sha256(tgz_path.read_bytes()).hexdigest() | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | bldr/gen/history.py | bldr-cmd/bldr-cmd |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenAppSilanApigraytwoQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenAppSilanApigraytwoQueryResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayOpenAppSilanApigraytwoQueryResponse, self).parse_response_content(response_content)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | alipay/aop/api/response/AlipayOpenAppSilanApigraytwoQueryResponse.py | snowxmas/alipay-sdk-python-all |
from custom_components.xiaomi_cloud_map_extractor.common.vacuum import XiaomiCloudVacuum
class XiaomiCloudVacuumV2(XiaomiCloudVacuum):
def __init__(self, connector, country, user_id, device_id, model):
super().__init__(connector, country, user_id, device_id, model)
def get_map_url(self, map_name):
url = self._connector.get_api_url(self._country) + '/v2/home/get_interim_file_url'
params = {
"data": f'{{"obj_name":"{self._user_id}/{self._device_id}/{map_name}"}}'
}
api_response = self._connector.execute_api_call(url, params)
if api_response is None or "result" not in api_response or "url" not in api_response["result"]:
return None
return api_response["result"]["url"]
def should_get_map_from_vacuum(self):
return False
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than cla... | 3 | custom_components/xiaomi_cloud_map_extractor/common/vacuum_v2.py | GuyKh/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor |
from collections import defaultdict
def solution(n, results):
answer = 0
graph = defaultdict(list)
for result in results: # graph dictionary는 "선수 번호" : [해당 선수가 이긴 result들] 로 구성되어 있다.
graph[result[0]].append(result)
for key in range(1, n + 1): # 삼단논법을 통해 확장시킬 수 있는 result들을 확장시킨다.
index = 0
while True:
if graph[key]: # 해당 선수가 이긴 결과가 있으면
graph = syllogism(graph, graph[key][index]) # 삼단논법을 통해 확장시킨다.
if len(graph[key]) == index + 1: # 더이상 확장 시킬게 없으면 담은 선수 번호로 넘어간다.
break
else: # 확장시켰으면 그 result에 대해서도 삼단논법을 적용해 확장시킨다.
index += 1
else: # 해당 선수가 이긴 결과가 없으면 넘어감.
break
cnt = [0] * n
for key in graph.keys(): # graph을 순회하면서 result에서 각각 몇번 등장하는지 count (각 vertex의 edge 수)
for i in graph[key]:
cnt[i[0] - 1] += 1
cnt[i[1] - 1] += 1
for i in cnt: # edge가 n-1개이면 정답에 포함.
if i >= n - 1:
answer += 1
return answer
def syllogism(graph, result): # 삼단논법을 통해 만든 결과들을 추가해주는 함수.
for i in graph[result[1]]:
if [result[0], i[1]] not in graph[result[0]]:
graph[result[0]].append([result[0], i[1]])
return graph
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | Programmers/rank.py | Park-Young-Hun/Algorithm |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2018 Justin Bewley Lo (justinbewley.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from pyrcm.terminal.rediscmd import RedisClusterCmd
class RedisClusterCmd_Exists(RedisClusterCmd):
CMDNAME = 'EXISTS'
CMDDETAILS = {
'read_or_write': 'read',
'description': 'returns whether the key exists in the cluster',
'example': '{} [key]'.format(CMDNAME)
}
def __init__(self, rc_client, args):
super(RedisClusterCmd_Exists, self).__init__(rc_client, args)
def get_args_error(self):
resp = None
if len(self.args) != 2:
resp = "invalid number of arguments"
return resp
def runcmd(self):
return self.rc_client.exists(self.args[1])
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | pyrcm/terminal/rediscmd_exists.py | booleys1012/redisclustermon |
from flask import g
from app.comm.CompositeOperate import CompositeOperate
from app.comm.SqlExecute import SqlExecute
from app.module_config import table_module_map
class CommentController(CompositeOperate):
def __init__(self, module):
super(CommentController, self).__init__(module)
def after_deal_get(self):
comments = g.result.get("data")
# 获取用户点赞记录
user_id = g.flask_httpauth_user.get('id', None) if g.flask_httpauth_user else None
# 点赞记录
comment_licks_dict = dict()
if user_id is not None:
sql_query = table_module_map['bloglikelog'].sql_query_default
sql_query = f'{sql_query} where bll_userid={user_id}'
user_likes = SqlExecute.query_sql_data(sql_query)
comment_licks_dict = {like['bll_blogcommentid']:like['bll_status'] for like in user_likes}
# 所有评论根节点,添加用户是否点赞标志
new_comments = []
for comment in comments:
comment['is_like'] = comment_licks_dict.get(comment['id']) or 0
if not comment['bc_commentupid']:
new_comments.append(comment)
# new_comments = [comment for comment in comments if not comment['bc_commentupid']]
for comment in new_comments:
# 获取每个评论的回复
comment['sub'] = [sub for sub in comments if sub['bc_commentupid']==comment['id']]
g.result['data'] = new_comments
def before_deal_post(self):
g.json_data["data"]["bc_createuid"] = g.flask_httpauth_user.get('id')
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answe... | 3 | blog_server/app/api/general/CommentController.py | szhu9903/flask-react-blog |
import math
#Class for Popularity based Recommender System model
class popularity_recommender_py():
def __init__(self):
self.train_data = None
self.user_id = None
self.popularity_recommendations = None
def smooth_user_preference(self, x):
return math.log(1 + x, 2)
#Create the popularity based recommender system model
def create(self, train_data, user_id):
self.train_data = train_data
self.user_id = user_id
event_type_strength = {
'Followed': 1.0,
'Like': 1.0,
'Love': 2.0,
'Commented': 4.0,
'Replied': 4.0
}
train_data['eventStrength'] = train_data['action'].apply(lambda x: event_type_strength[x]).copy()
users_interactions_count = train_data.groupby(['user_id', 'post_id']).size().groupby('user_id').size()
users_with_enough_interactions = users_interactions_count[users_interactions_count >= 2].reset_index()[['user_id']]
interactions_from_selected_users = train_data.merge(users_with_enough_interactions, how='right', left_on='user_id', right_on='user_id')
interactions_full = interactions_from_selected_users.groupby(['user_id'])['eventStrength'].sum().apply(self.smooth_user_preference).reset_index()
popular_users = interactions_full.sort_values('eventStrength', ascending=False)
#Get the top 10 recommendations
self.popularity_recommendations = popular_users.head(10)
#Use the popularity based recommender system model to
#make recommendations
def recommend(self, user_id):
user_recommendations = self.popularity_recommendations
popular_users = user_recommendations[user_recommendations['user_id'] != user_id]
# drop unnecessary columns
popular_users.drop(['eventStrength'], axis=1, inplace=True)
return popular_users
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | rec.py | hngi/lucidblog_hng_ml_team |
from pi_monitor.core import Event
import sendgrid
from sendgrid.helpers.mail import Content, Email, Mail
from python_http_client import exceptions
class Sendgrid:
def __init__(self, config):
config = _process_config(config)
self.api_key = config['api_ky']
self.from_email = config['from_email']
self.to_email = config['to_email']
def act(self, event: Event):
print("{} acting on {}".format(__name__, event))
sg = sendgrid.SendGridAPIClient(
apikey=self.api_key,
)
from_email = Email(self.from_email)
to_email = Email(self.to_email)
subject = "pivpn update"
content = Content(
"text/plain", event.message,
)
mail = Mail(from_email, subject, to_email, content)
try:
response = sg.client.mail.send.post(request_body=mail.get())
except exceptions.BadRequestsError as e:
print(e.body)
raise
return response
def _process_config(config):
"""
Make sure config object has required values
"""
required_fields = [
"api_key",
"from_email",
"to_email",
]
for field in required_fields:
if field not in config:
raise ValueError("required field {} not found in config file".format(field))
return config
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | pi_monitor/actions/sendgrid.py | arlenk/pivpn-monitor |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DeleteRepoTriggerRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'cr', '2018-12-01', 'DeleteRepoTrigger','acr')
self.set_method('POST')
def get_RepoId(self):
return self.get_query_params().get('RepoId')
def set_RepoId(self,RepoId):
self.add_query_param('RepoId',RepoId)
def get_TriggerId(self):
return self.get_query_params().get('TriggerId')
def set_TriggerId(self,TriggerId):
self.add_query_param('TriggerId',TriggerId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId) | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | aliyun-python-sdk-cr/aliyunsdkcr/request/v20181201/DeleteRepoTriggerRequest.py | LittleJober/aliyun-openapi-python-sdk |
import asyncio
from mitmproxy.test.taddons import RecordingMaster
async def err():
raise RuntimeError
async def test_exception_handler():
m = RecordingMaster(None)
running = asyncio.create_task(m.run())
asyncio.create_task(err())
await m.await_log("Traceback", level="error")
m.shutdown()
await running
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | test/mitmproxy/test_master.py | fedosgad/mitmproxy |
import cv2
from libs.ffmpeg_reader import FFMPEG_VideoReader
def mat2bytes(image):
image = image[:, :, ::-1]
return cv2.imencode('.jpg', image)[1].tostring()
class VideoCapture:
def __init__(self, video_source, start_frame=0, shift_time=0):
self.video = FFMPEG_VideoReader(video_source, True)
self.video.initialize(starttime=0)
self.duration = self.video.ffmpeg_duration * 1000
self.start_frame = start_frame
self.shift = shift_time
self.pos = start_frame
self.set_position(self.start_frame)
self.cached_frame = None
def isOpened(self):
return True if self.video is not None else False
def set_position(self, position=0):
self.pos = position
self.video.skip_frames(position)
def get_position(self):
return self.video.pos
def get_frame(self, pos):
if pos == self.pos and self.cached_frame is not None:
return self.cached_frame
self.pos = pos
try:
frame = self.video.get_frame(t=pos)
except OSError as err:
print('Bad frame!')
else:
frame = mat2bytes(frame)
self.cached_frame = frame
return frame
def get_time(self, pos=None):
if pos is None:
pos = self.video.pos
fps = self.video.fps
hours = int(pos // 3600 // fps)
minutes = int((pos - (hours * 3600 * fps)) // (60 * fps))
secondes = int((pos - (hours * 3600 * fps) - minutes * (60 * fps)) // fps)
return (secondes + minutes * 60 + hours * 3600) * 1000
def length(self):
return self.video.nframes
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cl... | 3 | libs/video_processing.py | tyommik/rewiever |
class Fighter:
def __init__(self, hp, defense, power):
self.max_hp = hp
self.hp = hp
self.defense = defense
self.power = power
def take_damage(self, amount):
results=[]
self.hp -= amount
if self.hp<0:
results.append({'dead':self.owner})
return results
def attack(self, target):
results=[]
damage = self.power - target.fighter.defense
if damage > 0:
results.append({'message': '{0} attacks {1} for {2} hit points.'.format(
self.owner.name.capitalize(), target.name, str(damage))})
results.extend(target.fighter.take_damage(damage))
else:
results.append({'message': '{0} attacks {1} but does no damage.'.format(
self.owner.name.capitalize(), target.name)})
return results
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cl... | 3 | components/fighter.py | StormCloud71/tdl-roguelike-tute |
import grpc
from sea.servicer import ServicerMeta, msg2dict, stream2dict
from sea import exceptions
from sea.pb2 import default_pb2
from tests.wd.protos import helloworld_pb2
def test_meta_servicer(app, logstream):
class HelloContext():
def __init__(self):
self.code = None
self.details = None
def set_code(self, code):
self.code = code
def set_details(self, details):
self.details = details
class HelloServicer(metaclass=ServicerMeta):
def return_error(self, request, context):
raise exceptions.BadRequestException('error')
def return_normal(self, request, context):
return 'Got it!'
logstream.truncate(0)
logstream.seek(0)
servicer = HelloServicer()
context = HelloContext()
ret = servicer.return_error(None, context)
assert isinstance(ret, default_pb2.Empty)
assert context.code is grpc.StatusCode.INVALID_ARGUMENT
assert context.details == 'error'
p = logstream.tell()
assert p > 0
content = logstream.getvalue()
assert 'HelloServicer.return_error' in content
ret = servicer.return_normal(None, context)
assert ret == 'Got it!'
assert logstream.tell() > p
def test_msg2dict(app):
app.name = 'v-name'
app.msg = 'v-msg'
ret = msg2dict(app, ['name', 'msg', 'tz'])
assert ret == {'name': 'v-name', 'msg': 'v-msg', 'tz': 'Asia/Shanghai'}
request = helloworld_pb2.HelloRequest(name="value")
ret = msg2dict(request)
assert ret == {"name": "value"}
def test_stream2dict():
def stream_generator():
for i in range(5):
yield helloworld_pb2.HelloRequest(name=str(i))
ret = stream2dict(stream_generator())
for i, part in enumerate(ret):
assert part == {"name": str(i)}
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fals... | 3 | tests/test_servicer.py | yangtt0509/sea |
"""Zeroconf usage utility to warn about multiple instances."""
from contextlib import suppress
import logging
from typing import Any
import zeroconf
from homeassistant.helpers.frame import (
MissingIntegrationFrame,
get_integration_frame,
report_integration,
)
from .models import HaZeroconf
_LOGGER = logging.getLogger(__name__)
def install_multiple_zeroconf_catcher(hass_zc: HaZeroconf) -> None:
"""Wrap the Zeroconf class to return the shared instance if multiple instances are detected."""
def new_zeroconf_new(self: zeroconf.Zeroconf, *k: Any, **kw: Any) -> HaZeroconf:
_report(
"attempted to create another Zeroconf instance. Please use the shared Zeroconf via await homeassistant.components.zeroconf.async_get_instance(hass)",
)
return hass_zc
def new_zeroconf_init(self: zeroconf.Zeroconf, *k: Any, **kw: Any) -> None:
return
zeroconf.Zeroconf.__new__ = new_zeroconf_new # type: ignore
zeroconf.Zeroconf.__init__ = new_zeroconf_init # type: ignore
def _report(what: str) -> None:
"""Report incorrect usage.
Async friendly.
"""
integration_frame = None
with suppress(MissingIntegrationFrame):
integration_frame = get_integration_frame(exclude_integrations={"zeroconf"})
if not integration_frame:
_LOGGER.warning(
"Detected code that %s; Please report this issue", what, stack_info=True
)
return
report_integration(what, integration_frame)
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
... | 3 | homeassistant/components/zeroconf/usage.py | basicpail/core |
import logging
import requestresponder
from edge.httputility import HttpUtility
class ProxyWriter(requestresponder.RequestResponder):
def __init__(self, configFilePath):
super(ProxyWriter, self).__init__(configFilePath)
def get(self, requestHandler):
super(ProxyWriter, self).get(requestHandler)
try:
httpUtility = HttpUtility()
result = httpUtility.getResponse(self._generateUrl(requestHandler), self.onResponse)
except BaseException as exception:
raise exception
def onResponse(self, response):
if response.error:
self.requestHandler.set_status(404)
self.requestHandler.write(str(response.error))
self.requestHandler.finish()
else:
for name, value in response.headers.iteritems():
logging.debug('header: '+name+':'+value)
self.requestHandler.set_header(name, value)
self.requestHandler.set_header('Access-Control-Allow-Origin', '*')
self.requestHandler.write(response.body)
self.requestHandler.finish()
def _generateUrl(self, requestHandler):
pass
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | src/main/python/libraries/edge/writer/proxywriter.py | mayadebellis/incubator-sdap-edge |
def testaArq(arq):
"""
-> Verifica se existe o arquivo arq
:arq: Nome do arquivo a ser testado.
:return: retorna True se o arquivo for encontrado,
caso contrário False
"""
try:
a = open(arq)
except FileNotFoundError: # O arquivo não foi encontrado
print('Arquivo não encontrado!')
return False
else:
return True
def criaArq(arq=''):
"""
-> Cria um arquivo de texto, caso ele não exista.
:param arq: Nome do arquivo.
:return:
"""
try:
a = open(arq, 'xt')
except FileExistsError:
print(f'ERRO: o arquivo \"{arq}\" já existe!')
else:
print(f'O arquivo \"{arq}\" foi criado com sucesso!')
finally:
a.close()
return
def leArq(arq=''):
"""
-> Abre e mostra os itens de um arquivo texto.
:param arq: Nome do arquivo.
:return:
"""
return
def editaArq(arq):
"""
-> Abre um arquivo de texto e adiciona novo item no
final do arquivo.
:param arq: Nome do arquivo.
:return:
"""
return
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | bibli/arquivo/__init__.py | EduardoPessanha/Python |
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
from Modules.MultiHeadAttention import MultiHeadAttention
class Attention(nn.Module):
def __init__(self, dim):
super(Attention, self).__init__()
self.encoders = self._build_model(dim)
def _build_model(self, dim):
layers = []
dim = dim
layers.append(MultiHeadAttention(dim, dim, dim))
return nn.ModuleList(layers)
def forward(self, inputs):
net_inputs = inputs
net_inputs.contiguous()
for enc in self.encoders:
net_inputs = enc(net_inputs, net_inputs)
return net_inputs
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | Modules/Attention.py | drat/Neural-Voice-Cloning-With-Few-Samples |
# This Python file uses the following encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import dpaycli as stm
class SharedInstance(object):
"""Singelton for the DPay Instance"""
instance = None
config = {}
def shared_dpay_instance():
""" This method will initialize ``SharedInstance.instance`` and return it.
The purpose of this method is to have offer single default
dpay instance that can be reused by multiple classes.
.. code-block:: python
from dpaycli.account import Account
from dpaycli.instance import shared_dpay_instance
account = Account("test")
# is equivalent with
account = Account("test", dpay_instance=shared_dpay_instance())
"""
if not SharedInstance.instance:
clear_cache()
SharedInstance.instance = stm.DPay(**SharedInstance.config)
return SharedInstance.instance
def set_shared_dpay_instance(dpay_instance):
""" This method allows us to override default dpay instance for all users of
``SharedInstance.instance``.
:param dpaycli.dpay.DPay dpay_instance: DPay instance
"""
clear_cache()
SharedInstance.instance = dpay_instance
def clear_cache():
""" Clear Caches
"""
from .blockchainobject import BlockchainObject
BlockchainObject.clear_cache()
def set_shared_config(config):
""" This allows to set a config that will be used when calling
``shared_dpay_instance`` and allows to define the configuration
without requiring to actually create an instance
"""
if not isinstance(config, dict):
raise AssertionError()
SharedInstance.config.update(config)
# if one is already set, delete
if SharedInstance.instance:
clear_cache()
SharedInstance.instance = None
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | dpaycli/instance.py | dpays/dpay-cli |
import tkinter as tk
import pyautogui
import time
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.criar_opcoes()
def criar_opcoes(self):
self.hi_there = tk.Button(self)
self.hi_there["text"] = " Escolha o número de mensagens que quer enviar:\n Após clicar no numero de mensagens,\nvoce tem 5 segundo para apertar em algum local que tenha 'chat'"
self.hi_there.pack(side="top")
#criar um botão
self.hi_there = tk.Button(self)
self.hi_there["text"] = "10 mensagens"
self.hi_there["command"] = self.opcao1
self.hi_there.pack(side="top")
#criar um botão
self.hi_there = tk.Button(self)
self.hi_there["text"] = "20 mensagens"
self.hi_there["command"] = self.opcao2
self.hi_there.pack(side="top")
#criar um botão
self.hi_there = tk.Button(self)
self.hi_there["text"] = "30 mensagens"
self.hi_there["command"] = self.opcao3
self.hi_there.pack(side="top")
#botão de sair
self.quit = tk.Button(self, text="QUIT", fg="red",
command=self.master.destroy)
self.quit.pack(side="right")
#o que o botão 1 faz:
def opcao1(self):
time.sleep(5)
for c in range(10):
pyautogui.typewrite('eai')
pyautogui.press('enter')
#o que o botão 2 faz:
def opcao2(self):
time.sleep(5)
for c in range(20):
pyautogui.typewrite('eai coisa feia')
pyautogui.press('enter')
#o que o botão 3 faz:
def opcao3(self):
time.sleep(5)
for c in range(30):
pyautogui.typewrite('eai')
pyautogui.press('enter')
root = tk.Tk()
root.title("Felipe Gomes")
root.geometry("352x200")
app = Application(master=root)
app.mainloop()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | novo.py | Felipe-Gs/Exerciccios-Python3 |
# coding: utf-8
"""
RadioManager
RadioManager # noqa: E501
OpenAPI spec version: 2.0
Contact: support@pluxbox.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import radiomanager_sdk
from radiomanager_sdk.models.broadcast import Broadcast # noqa: E501
from radiomanager_sdk.rest import ApiException
class TestBroadcast(unittest.TestCase):
"""Broadcast unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBroadcast(self):
"""Test Broadcast"""
# FIXME: construct object with mandatory attributes with example values
# model = radiomanager_sdk.models.broadcast.Broadcast() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | test/test_broadcast.py | Pluxbox/radiomanager-python-client |
import os
import sqlite3
class DBRecorder:
def __init__(self, config):
self.config = config
self.conn = None
self.cursor = None
def connect_db(self):
path = os.path.expanduser(self.config['output']['dir'])
if not os.path.exists(path):
os.makedirs(path)
self.conn = sqlite3.connect(os.path.join(path, self.config['output']['file_name']))
self.cursor = self.conn.cursor()
if not self.conn or not self.cursor:
print('Fail to connect to sqlite db')
exit(-1)
def create_db_table(self):
if not self.conn:
self.connect_db()
create_strs = [
'hash text primary key not null',
'summary text not null',
'description text',
'date text',
'author text'
]
try:
self.cursor.execute('create table record({})'.format(','.join(create_strs)))
except sqlite3.OperationalError:
# table already exists, do nothing
pass
def add_db_record(self, row):
if not self.conn:
self.connect_db()
question_marks = ['?' for _ in row.keys()]
sql_str = 'insert into record({}) values ({})'.format(','.join(row.keys()), ','.join(question_marks))
try:
self.cursor.execute(sql_str, tuple(row.values()))
self.conn.commit()
except sqlite3.DatabaseError:
# row already exists, do nothing
pass
def close(self):
self.cursor.close()
self.conn.close()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | Recorder.py | shiheyuan/GitGrabber |
import abc
class Writer(abc.ABC):
@abc.abstractmethod
def write_cur_frame(self, frame_info, output):
pass
@abc.abstractmethod
def write_frame_exec(self, frame_info, exec_time, exec_times):
pass
@abc.abstractmethod
def write_add(self, var, val, history, *, action, plural):
pass
@abc.abstractmethod
def write_change(self, var, val_before, val_after, history, *, action):
pass
@abc.abstractmethod
def write_remove(self, var, val, history, *, action):
pass
@abc.abstractmethod
def write_variable_summary(self, var_history):
pass
@abc.abstractmethod
def write_profiler_summary(self, frame_exec_times):
pass
@abc.abstractmethod
def write_time_summary(self, exec_start_time, exec_stop_time):
pass
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | vardbg/output/writer.py | vishal2612200/vardbg |
#!/usr/bin/env python
#from .core import *
import numpy as np
import pandas as pd
import shutil
import urllib
import urlparse
from os.path import splitext, basename
import os
from os import sys, path
from pprint import pprint
import StringIO
import db
from gp import *
from core import *
from IPython.core.debugger import Tracer
class Annotation(UploadCsvConvert):
def __init__(self, xe):
xe.attrib['newCols'] = 'gid,annotation_type_id,content,annotation_field1,ds,tax_id'
UploadCsvConvert.__init__(self,xe=xe,dest='annotation')
self.type_col = 'annotation_type_id'
def get_type_col_value_sql(self):
return 'SELECT annotation_type_id FROM %s.annotation_type WHERE annotation_type_name = ?' % SyncDB.DATABASE
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | database/task_class/annotation.py | cozy9/Metascape |
# -*- coding: utf-8 -*-
"""Public forms."""
from flask_wtf import Form
from wtforms import PasswordField, StringField
from wtforms.validators import DataRequired
from xixi.user.models import User
class LoginForm(Form):
"""Login form."""
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
def __init__(self, *args, **kwargs):
"""Create instance."""
super(LoginForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
"""Validate the form."""
initial_validation = super(LoginForm, self).validate()
if not initial_validation:
return False
self.user = User.query.filter_by(username=self.username.data).first()
if not self.user:
self.username.errors.append('Unknown username')
return False
if not self.user.check_password(self.password.data):
self.password.errors.append('Invalid password')
return False
if not self.user.active:
self.username.errors.append('User not activated')
return False
return True
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | xixi/public/forms.py | defbobo/xixi |
"""
The MIT License (MIT)
Copyright (c) 2021 Bang & Olufsen a/s
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from enum import Enum
from beoremote.events.entity import Entity
class StatusEvent(Entity):
class State(str, Enum):
ok = "ok"
error = "error"
def __init__(
self, type: str, state: State, message: str = None
): # pylint: disable=redefined-builtin
self.type = type
self.state = state
self.message = message
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{... | 3 | beoremote/events/statusEvent.py | bang-olufsen/beoremote-halo |
import git
import os
import sys
import subprocess
from tools.config import Config
from tools.logger import log_error, log_info
def exit_if_not_executed_in_ide_environment():
'''This part checks if environment variables is set or not.'''
if not ("ICSD_FILESERVER_USER" and "ICSD_FILESERVER_PASSWD") in os.environ:
log_error("Please use CobiGen IDE initialized console and set the variables in the variables-customized.bat.")
sys.exit()
def is_valid_branch(config: Config) -> bool:
'''This Method is responsible for checking branches in repository with branch entered by user'''
if git.cmd.Git(config.root_path).execute("git ls-remote --heads origin "+config.branch_to_be_released+" | wc -l") == "":
log_info("Branch is not known remotely.")
is_branch_valid = False
else:
log_info("Branch is valid.")
is_branch_valid = True
return is_branch_valid
def exit_if_origin_is_not_correct(config: Config):
remote_origin = git.cmd.Git(config.root_path).execute("git remote -v")
if config.github_repo not in remote_origin:
log_error("Origin of the current repository is not '" + config.github_repo + "', Please go to correct directory.")
sys.exit()
def check_running_in_bash():
try:
FNULL = open(os.devnull, 'w')
subprocess.call("ls", stdout=FNULL)
except:
log_error("Please run the script in a linux like environment (e.g. git bash)")
sys.exit()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | scripts/src/tools/validation.py | iriberri/tools-cobigen |
#!/usr/bin/env python3
import isce
from isceobj.Sensor import createSensor
import shelve
import argparse
import glob
from isceobj.Util import Poly1D
from isceobj.Planet.AstronomicalHandbook import Const
import os
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser(description='Unpack CSK SLC data and store metadata in pickle file.')
parser.add_argument('-i','--input', dest='h5dir', type=str,
required=True, help='Input CSK directory')
parser.add_argument('-d','--dop_file', dest='dopFile', type=str,
default=None, help='Doppler file')
parser.add_argument('-o', '--output', dest='slcdir', type=str,
required=True, help='Output SLC directory')
return parser.parse_args()
def unpack(hdf5, slcname, dopFile, parse=False):
'''
Unpack HDF5 to binary SLC file.
'''
obj = createSensor('UAVSAR_STACK')
obj.configure()
obj.metadataFile = hdf5
obj.dopplerFile = dopFile
#obj.parse()
obj.extractImage()
if not os.path.isdir(slcname):
os.mkdir(slcname)
pickName = os.path.join(slcname, 'data')
with shelve.open(pickName) as db:
db['frame'] = obj.frame
if __name__ == '__main__':
'''
Main driver.
'''
inps = cmdLineParse()
if inps.slcdir.endswith('/'):
inps.slcdir = inps.slcdir[:-1]
if inps.h5dir.endswith('/'):
inps.h5dir = inps.h5dir[:-1]
unpack(inps.h5dir, inps.slcdir, inps.dopFile)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | uavsar_rtc_mlc/Docker_Install/share/stripmapStack/unpackFrame_UAVSAR.py | sgk0/isce_docker_tools |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.