source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
import threading
import time
class Timer(object):
def __init__(self, interval, callback_func, oneshot=False, args=None, kwargs=None):
self._interval = interval
self._oneshot = oneshot
self._f = callback_func
self._args = args if args is not None else []
self._kwargs = kwargs if kwargs is not None else {}
self._timer = None
self._start_time = None
self._elapsed_time = None
self._remaining_time = None
def _callback(self):
self._f(*self._args, **self._kwargs)
if not self._oneshot:
self.start()
else:
self.stop()
def stop(self):
if self._timer:
self._timer.cancel()
self._elapsed_time = time.time() - self._start_time
self._remaining_time = self._interval - self._elapsed_time
self._timer = None
def start(self, interval=None):
self._interval = interval if interval is not None else self._interval
self._timer = threading.Timer(self._interval, self._callback)
self._start_time = time.time()
self._timer.start()
@property
def elapsed(self):
return self._elapsed_time
@property
def remaining(self):
return self._remaining_time
@property
def running(self):
return True if self._timer else False
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | thym/timer.py | apgeorg/thyme |
from schemdraw import Drawing
from schemdraw import elements as elm
from .input import Input
def draw_void():
drawing = Drawing()
transistor = drawing.add(elm.transistors.JFetP().right().reverse())
drawing += elm.SourceV().at(transistor.drain).up().label('Vdd').reverse()
drawing += elm.Ground().left()
drawing += elm.Line().left().at(transistor.gate).length(drawing.unit/4)
drawing.push()
drawing += elm.Dot()
drawing += elm.Resistor().down().label('Rg')
drawing += elm.Ground()
drawing.pop()
drawing += elm.Capacitor().left()
drawing += elm.Dot().label('Vi')
drawing += elm.Dot().at(transistor.source).down()
drawing.push()
drawing += elm.Resistor().down().label('Rs').length(drawing.unit*0.857)
drawing += elm.Ground()
drawing.pop()
drawing += elm.Capacitor().right().label('C2')
drawing += elm.Dot().label('Vo')
return drawing
def draw(amplifier_input: Input):
Vdd = str(amplifier_input.Vdd)
Rg = str(amplifier_input.Rg)
Rs = str(amplifier_input.Rs)
drawing = Drawing()
transistor = drawing.add(elm.transistors.JFetP().right().reverse())
drawing += elm.SourceV().at(transistor.drain).up().label(Vdd).reverse()
drawing += elm.Ground().left()
drawing += elm.Line().left().at(transistor.gate).length(drawing.unit/4)
drawing.push()
drawing += elm.Dot()
drawing += elm.Resistor().down().label(Rg)
drawing += elm.Ground()
drawing.pop()
drawing += elm.Capacitor().left()
drawing += elm.Dot().label('Vi')
drawing += elm.Dot().at(transistor.source).down()
drawing.push()
drawing += elm.Resistor().down().label(Rs).length(drawing.unit*0.857)
drawing += elm.Ground()
drawing.pop()
drawing += elm.Capacitor().right().label('C2')
drawing += elm.Dot().label('Vo')
return drawing
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | pyelectric/electronic/amplifier/common_drain_polarization_fet/drawing.py | luanws/pyelectric |
#!/usr/bin/env python3
"""
Exercise 13.4 from Serre "Linear Representations of Finite Groups ", 1977.
"""
import sys, os
import numpy
from element import Linear, Z, Q
from action import mulclose
M = Linear(4, Q)
def quaternion(a, b, c, d):
# build matrix representation of quaternion
A = M.get([
[a, -b, -c, -d],
[b, a, -d, c],
[c, d, a, -b],
[d, -c, b, a]])
return A
e = quaternion(1, 0, 0, 0)
i = quaternion(0, 1, 0, 0)
j = quaternion(0, 0, 1, 0)
k = quaternion(0, 0, 0, 1)
basis = [e, i, j, k]
def dot(a, b):
a = numpy.array(a.value)
b = numpy.array(b.value)
c = a*b
return c.sum()
def get_rep(A, left=True):
Arep = []
for v in basis:
row = []
for u in basis:
if left:
B = A*u # left action
else:
B = u*A # right action
r = dot(B, v)
row.append(r/4)
Arep.append(row)
Arep = M.get(Arep)
return Arep
def test():
assert i*i == -e
assert j*j == -e
assert k*k == -e
for a in [i, j, k]:
for b in [i, j, k]:
if a!=b:
assert a*b == -b*a
assert i*j*k == -e
one = Q.one
A = (one/2)*(i+j+k-e)
assert A!=e
assert A**2!=e
assert A**3==e
Q_8 = mulclose([i, j, k])
assert len(Q_8)==8
# Q_8 acts by right multiplication, C_3 by left multiplication
Arep = get_rep(A)
Qrep = [get_rep(V, False) for V in [i, j, k]]
for V in Qrep:
#print(V)
assert V*Arep == Arep*V
G = mulclose(Qrep + [Arep])
assert len(G) == 24
chi = []
G = list(G)
G.sort(key = get_order)
for g in G:
print(str(get_order(g)).rjust(3), end=" ")
chi.append(g.trace())
print()
for x in chi:
print(str(x).rjust(3), end=" ")
print()
def get_order(g):
n = 1
a = g
while a*a != a: # identity
a = a*g
n += 1
return n
if __name__ == "__main__":
test()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | bruhat/serre_example.py | punkdit/bruhat |
import astropy.io.fits as fits
from specutils import SpectrumList
from specutils.io.registers import data_loader
from .loaders import FITS_FILE_EXTS, SINGLE_SPLIT_LABEL
GALAH_CONFIG = {
"hdus": {
"0": {"purpose": "science"},
"1": {"purpose": "error_stdev"},
"2": {"purpose": "unreduced_science"},
"3": {"purpose": "unreduced_error_stdev"},
"4": {"purpose": "skip"},
},
"wcs": {
"pixel_reference_point_keyword": "CRPIX1",
"pixel_reference_point_value_keyword": "CRVAL1",
"pixel_width_keyword": "CDELT1",
"wavelength_unit": "Angstrom",
},
"units": {"flux_unit": "count"},
"all_standard_units": False,
"all_keywords": False,
"valid_wcs": False,
}
def identify_galah(origin, *args, **kwargs):
"""
Identify if the current file is a GALAH file
"""
file_obj = args[0]
if isinstance(file_obj, fits.hdu.hdulist.HDUList):
hdulist = file_obj
else:
hdulist = fits.open(file_obj, **kwargs)
if "galah" in hdulist[0].header.get("REFERENC"):
if not isinstance(file_obj, fits.hdu.hdulist.HDUList):
hdulist.close()
return True
if not isinstance(file_obj, fits.hdu.hdulist.HDUList):
hdulist.close()
return False
@data_loader(
label="GALAH", extensions=FITS_FILE_EXTS, dtype=SpectrumList,
identifier=identify_galah,
)
def galah_loader(fname):
spectra = SpectrumList.read(
fname, format=SINGLE_SPLIT_LABEL, **GALAH_CONFIG
)
return spectra
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | src/ssv/galah.py | einshoe/ssv-py |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/07_gbe.sst.data_provider.ipynb (unless otherwise specified).
__all__ = ['SSTDataProvider']
# Cell
from fastcore.foundation import patch
from ..data_provider import GBEProvider
from ...data_provider import get_efficiently
import numpy as np
# Cell
class SSTDataProvider(GBEProvider):
'''This class builds upon GBEProvider to get the working memory task data.'''
def __init__(self, data_folder_path):
GBEProvider.__init__(self, data_folder_path)
# Cell
@patch
def decode_sst_strings(self:SSTDataProvider, gbe_data):
df = self.decode_gbe_strings(gbe_data, 'FruitTapGame')
# Removing left/right distinctions
df['rt'] = df.lefttime.astype(int) + df.righttime.astype(int)
df['is_stop'] = (df.stop.astype(int) > 0).astype(float)
df.loc[df.rt==0,'rt'] = np.nan # Setting 0 RTs to nan
df['responded'] = (df.rt.isna()==False).astype(float)
# Calculating SSD
crw = 650 # ToDo: I'll have to double check this is correct; in Smittenaar it's reported as 500ms, but Ying used 650ms (it's correct as we use the center of response window)
df['ssd'] = crw - df.gobaddelay.astype(int)
df.loc[df.is_stop==False,'ssd'] = np.nan
# Error analysis
df['omission'] = ((df.is_stop==0) & ((df.rt.isna()) | (df.rt >= 800))).astype(float)
df['comission'] = ((df.is_stop==1) & (df.rt.isna()==False)).astype(float)
df['premature'] = (df.rt <= 500).astype(float)
# Creating convenience variables and restructuring
df['accuracy'] = df.success.astype(int)
df = df[[
'gbe_index',
'trial_number',
'anticipation',
'is_stop','gobaddelay','ssd',
'responded',
'rt',
'accuracy',
'omission',
'comission',
'premature']]
return df
# Cell
@patch
@get_efficiently
def get_sst_data(self:SSTDataProvider):
gbe_data = self.get_gbe_data()
df = self.decode_sst_strings(gbe_data)
return df | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false... | 3 | trr265/gbe/sst/data_provider.py | hgzech/trr265 |
from django.db import DatabaseError
from django.forms import ModelForm, Field, ValidationError, BooleanField
from django.forms.widgets import CheckboxInput
from explorer.models import Query, MSG_FAILED_BLACKLIST
_ = lambda x: x
class SqlField(Field):
def validate(self, value):
"""
Ensure that the SQL passes the blacklist and executes. Execution check is skipped if params are present.
:param value: The SQL for this Query model.
"""
query = Query(sql=value)
passes_blacklist, failing_words = query.passes_blacklist()
error = MSG_FAILED_BLACKLIST % ', '.join(failing_words) if not passes_blacklist else None
if not error and not query.available_params():
try:
query.execute_query_only()
except DatabaseError as e:
error = str(e)
if error:
raise ValidationError(
_(error),
code="InvalidSql"
)
class QueryForm(ModelForm):
sql = SqlField()
snapshot = BooleanField(widget=CheckboxInput, required=False)
def clean(self):
if self.instance and self.data.get('created_by_user', None):
self.cleaned_data['created_by_user'] = self.instance.created_by_user
return super(QueryForm, self).clean()
@property
def created_by_user_email(self):
return self.instance.created_by_user.email if self.instance.created_by_user else '--'
@property
def created_by_user_id(self):
return self.instance.created_by_user.id if self.instance.created_by_user else ''
class Meta:
model = Query
fields = ['title', 'sql', 'description', 'created_by_user', 'snapshot'] | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | explorer/forms.py | nivasse/django-sql-explorer |
import time
from decorator import decorate
class HighPerformanceTimer(object):
def __init__(self, callback):
self._callback = callback
def _new_timer(self):
return self.__class__(self._callback)
def __enter__(self):
self._start = time.perf_counter_ns()
def __exit__(self, typ, value, traceback):
# Time can go backwards.
duration = max(time.perf_counter_ns() - self._start, 0)
self._callback(duration / 1000)
def __call__(self, f):
def wrapped(func, *args, **kwargs):
# Obtaining new instance of timer every time
# ensures thread safety and reentrancy.
with self._new_timer():
return func(*args, **kwargs)
return decorate(f, wrapped)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (exc... | 3 | src/serenity/utils/metrics.py | dthomp87/serenity |
import datetime
from vit.formatter import DateTime
# TODO: Remove this once tasklib bug is fixed.
from tasklib.serializing import SerializingObject
serializer = SerializingObject({})
class UdaDate(DateTime):
def format(self, dt, task):
if not dt:
return self.markup_none(self.colorize())
# TODO: Remove this once tasklib bug is fixed.
# https://github.com/robgolding/tasklib/issues/30
dt = dt if isinstance(dt, datetime.datetime) else serializer.timestamp_deserializer(dt)
formatted_date = dt.strftime(self.custom_formatter or self.formatter.report)
return (len(formatted_date), (self.colorize(dt), formatted_date))
def colorize(self, dt=None):
return self.colorizer.uda_date(self.column, dt)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | vit/formatter/uda_date.py | kinifwyne/vit |
from abc import ABC, abstractmethod
from typing import Iterable, Tuple, Iterator, List
from .._typing import KeyType, LabelType, ItemType
from ._Binner import Binner
class TwoPassBinner(Binner[KeyType, LabelType], ABC):
"""
Class for binners which require an initial pass over
the set of items being binned to generate state (e.g.
statistics).
"""
def _bin_items(self, items: Iterable[ItemType]) -> Iterator[Tuple[LabelType, ItemType]]:
# Need to cache the items as we're doing two passes
items = list(items)
# Configure ourselves on the items first
self._configure(items)
return super()._bin_items(items)
@abstractmethod
def _configure(self, items: List[ItemType]):
"""
Configures this binner on the given binnables items. Can modify
the items list as well.
:param items: The items to configure ourselves against.
"""
pass
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | src/wai/bynning/binners/_TwoPassBinner.py | waikato-datamining/bynning |
#!/usr/bin/env python
"""Types-related part of GRR API client library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from typing import Any
from grr_api_client import errors
from grr_api_client import utils
from grr_response_proto import flows_pb2
class UnknownFlowName(errors.Error):
pass
class Types(object):
"""Object that helps users to deal with GRR type system."""
def __init__(self, context=None):
super(Types, self).__init__()
if not context:
raise ValueError("context can't be empty")
self._context = context
self._flow_descriptors = None
def CreateFlowRunnerArgs(self):
"""Creates flow runner args object."""
return flows_pb2.FlowRunnerArgs()
def CreateHuntRunnerArgs(self):
"""Creates hunt runner args object."""
return flows_pb2.HuntRunnerArgs()
# TODO: Delete this method as it is not really type-safe.
def CreateFlowArgs(self, flow_name=None) -> Any:
"""Creates flow arguments object for a flow with a given name."""
if not self._flow_descriptors:
self._flow_descriptors = {}
result = self._context.SendRequest("ListFlowDescriptors", None)
for item in result.items:
self._flow_descriptors[item.name] = item
try:
flow_descriptor = self._flow_descriptors[flow_name]
except KeyError:
raise UnknownFlowName(flow_name)
return utils.CopyProto(utils.UnpackAny(flow_descriptor.default_args))
def UnpackAny(self, proto_any):
"""Resolves the type and unpacks the given protobuf Any object."""
return utils.UnpackAny(proto_any)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | api_client/python/grr_api_client/types.py | isabella232/grr |
from docutils.parsers.rst import Directive
from docutils import nodes
from sphinx.util.nodes import set_source_info
import os
import re
def setup(app):
app.add_directive('fp_output', OutputDirective)
class OutputDirective(Directive):
required_arguments = 1
optional_arguments = 1
def run(self):
method = self.arguments[0]
try:
obj_name = self.arguments[1]
except IndexError:
obj_name = 'fdm'
suffix = '.txt'
assert re.match('^[a-zA-Z][a-zA-Z0-9_]*$', method)
srcdir = self.state.document.settings.env.srcdir
with open(os.path.join(srcdir, 'fp_output', method + suffix)) as fd:
content = fd.read()
if '\n\n' in content:
method = method.split('_params')[0]
params, result = content.split('\n\n')
params = ', '.join(params.split('\n'))
else:
params, result = '', content
out = f">>> {obj_name}.{method}({params})\n{result}"
literal = nodes.literal_block(out, out)
literal['language'] = 'python'
set_source_info(self, literal)
self.state.parent.children[-1].children[-1].append(literal)
return []
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | docs/fpoutput.py | functionistic/firepyer |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'hyperlink27.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks."""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url('A1', r"external:\\Vboxsvr\share\foo bar.xlsx#'Some Sheet'!A1")
workbook.close()
self.assertExcelEqual()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | xlsxwriter/test/comparison/test_hyperlink27.py | haiyangd/XlsxWriter |
import pathgraph
import robotsearch
import unittest
class TestGraphMethods(unittest.TestCase):
def test_create_undirected_graph(self):
self.assertTrue(isinstance(pathgraph.graph_by_type("undirected"), pathgraph.UndirectedGraph))
def test_create_directed_graph(self):
self.assertTrue(isinstance(pathgraph.graph_by_type("directed"), pathgraph.DirectedGraph))
def test_add_duplicate_edge_undirected(self):
graph = pathgraph.graph_by_type("undirected")
destination = pathgraph.DestinationNode("B", 1)
self.assertTrue(graph.add_edge(fromKey="A", destination=destination))
self.assertFalse(graph.add_edge(fromKey="A", destination=destination))
def test_add_duplicate_edge_directed(self):
graph=pathgraph.graph_by_type("directed")
destination = pathgraph.DestinationNode("B", 1)
self.assertTrue(graph.add_edge(fromKey="A", destination=destination))
self.assertFalse(graph.add_edge(fromKey="A", destination=destination))
def main():
unittest.main()
if __name__ == "__main__":
main() | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | python/main.py | jamesGadoury/robot-search |
#!/usr/bin/env python3
#
# Copyright (c) 2018 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Bin counter Poller Apps."""
from empower.core.app import EmpowerApp
class BinCounterPoller(EmpowerApp):
"""Bin Counter Poller Apps.
Command Line Parameters:
tenant_id: tenant id
every: loop period in ms (optional, default 5000ms)
Example:
./empower-runtime.py apps.pollers.bincounterspoller \
--tenant_id=52313ecb-9d00-4b7d-b873-b55d3d9ada26D
"""
def lvap_join(self, lvap):
"""New LVAP."""
self.bin_counter(lvap=lvap.addr,
bins=[512, 1514, 8192])
def launch(tenant_id):
""" Initialize the module. """
return BinCounterPoller(tenant_id=tenant_id)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | empower/apps/pollers/bincounterpoller.py | paolasoto88/empower-runtime-v17 |
'''input
57
12
'''
# -*- coding: utf-8 -*-
# AtCoder Beginner Contest
# Problem B
def is_harshad_number(number):
if number % sum_digit(number) == 0:
return "Yes"
else:
return "No"
def sum_digit(number):
return sum(list(map(int, list(str(number)))))
if __name__ == '__main__':
number = int(input())
result = is_harshad_number(number)
print(result)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | ABC/abc051-abc100/abc080/b.py | KATO-Hiro/AtCoder |
# -*- coding: utf-8 -*-
# pylint: disable=undefined-variable,no-name-in-module
from datetime import datetime, timedelta, timezone
import time
UNIX_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
def now():
return datetime.now(timezone.utc)
def now_in_millis():
return int(round(time.time() * 1000))
def epoch_millis(dt):
if isinstance(dt, datetime):
return int(round((dt - UNIX_EPOCH).total_seconds() * 1000))
elif type(dt) == int:
return dt
elif dt is None:
return dt
else:
raise ValueError("Cannot convert argument to epoch milliseconds")
def datetime_from_millis(millis):
return UNIX_EPOCH + timedelta(milliseconds=millis)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | client/verta/verta/_internal_utils/time_utils/_time_utils_py3.py | houqp/modeldb |
# coding: utf-8
"""
Hydrogen Nucleus API
The Hydrogen Nucleus API # noqa: E501
OpenAPI spec version: 1.9.5
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nucleus_api
from nucleus_api.models.order_reconcile_return_object import OrderReconcileReturnObject # noqa: E501
from nucleus_api.rest import ApiException
class TestOrderReconcileReturnObject(unittest.TestCase):
"""OrderReconcileReturnObject unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOrderReconcileReturnObject(self):
"""Test OrderReconcileReturnObject"""
# FIXME: construct object with mandatory attributes with example values
# model = nucleus_api.models.order_reconcile_return_object.OrderReconcileReturnObject() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | atom/nucleus/python/test/test_order_reconcile_return_object.py | AbhiGupta03/SDK |
#! /usr/bin/env python3
import logging
import sys
def analyze(fin):
result = []
for line in fin.readlines():
line = line.strip()
if line == "":
continue
parts = line.split()
if len(parts) != 3:
logging.error("skipping unexpected line '%s'", line)
continue
if parts[2] != "s":
logging.error("unexpected time unit: '%s'", parts[2])
continue
name = parts[0]
time = float(parts[1])
result.append((name, time))
return result
def report(result, threshold = 0.5):
sum = 0
fails = 0
for name, time in result:
sum += time
if time > threshold:
fails += 1
print("FAIL: %s lasts too long (%f s)" % (name, time))
return sum, fails
if __name__ == "__main__":
result = analyze(sys.stdin)
if len(sys.argv) > 1:
threshold = float(sys.argv[1])
else:
threshold = 0.5
sum, fails = report(result, threshold)
print("total: %f s" % sum)
if fails > 0:
sys.exit(-1)
sys.exit(0)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | tools/analyze-test-timing.py | tacr-iotcloud/base |
import time
def jstype(obj, typ):
__pragma__('js', '{}', '''
var t = typeof(obj)''')
if t == typ:
return True
return False
class PyDate:
'''
Descendants get a self.value property, which is always in sync
with an internal self.ts = unixtime'''
_value = ts = None
def get_value(self):
''' the js Date we return is based on the unixtime ts '''
if self._value:
t = self._value.getTime() / 1000
if t == self.ts:
return self._value
# will set the new self._value to self.ts and return it
return self.set_value()
def set_value(self, ts):
''' accept none, js data and unix time
on none our self.ts wins
'''
if ts:
if not jstype(ts, 'number'):
self._value = ts
self.ts = ts.getTime() / 1000
return self._value
# ts = unixtime:
self.ts = ts
self._value = __new__(Date(ts * 1000))
return self._value
if not self.ts:
self.ts = time.time()
return self.set_value(self.ts)
value = property(get_value, set_value)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | doc/kendo/src/ch1/tools.py | axiros/transcrypt_material |
# coding=utf-8
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import datetime
from bodytime import AutoRestTimeTestService
import pytest
@pytest.fixture
def client():
with AutoRestTimeTestService(base_url="http://localhost:3000") as client:
yield client
class TestTime(object):
def test_get(self, client):
assert client.time.get() == datetime.time(11, 34, 56)
def test_put(self, client):
result = client.time.put(datetime.time(8, 7, 56))
assert result == "Nice job posting time"
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | test/vanilla/AcceptanceTests/test_time.py | Azure/autorest.azure-functions-python |
# -*- coding: utf-8 -*-
from enum import Enum
import numpy as np
class DatasetXY:
class XType(Enum):
ALL = 1 # all points of the plane
RANDOM = 2 # random (gaussian distribution) selected points
def __init__(self, t, x_lims, groups, traing_data_ratio, f):
self.x_lims = x_lims
self.groups = groups
self.traing_data_ratio = traing_data_ratio
''' setting x '''
self.x_range = range(self.x_lims[0], self.x_lims[1])
self.x_range_len = self.x_lims[1] - self.x_lims[0]
self.x_range_area = self.x_range_len ** 2
if t == DatasetXY.XType.ALL:
x = np.array([[[r, c] for c in self.x_range] \
for r in self.x_range])
x = x.reshape((self.x_range_area, 2))
elif t == DatasetXY.XType.RANDOM:
x = np.random.randint(self.x_lims[0], self.x_lims[1], \
(self.x_range_area, 2))
self.x_list = x.tolist()
''' setting y '''
self.y_list = [f(e) for e in self.x_list]
''' splitting training and evaluation data '''
self.traing_x_list = []
self.traing_y_list = []
self.evaltn_x_list = []
self.evaltn_y_list = []
for e in zip(self.x_list, self.y_list):
e_x = e[0]
e_y = e[1]
if np.random.randint(1, self.traing_data_ratio + 1) == 1:
self.evaltn_x_list.append(e_x)
self.evaltn_y_list.append(e_y)
else:
self.traing_x_list.append(e_x)
self.traing_y_list.append(e_y)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
}... | 3 | src/datasetxy.py | danielefdf/nene |
import json
import logging.config
import os
default_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "%(asctime)s :: %(name)s :: %(levelname)s :: %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
"info_file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "INFO",
"formatter": "simple",
"filename": "gee_assets_info.log",
"maxBytes": 10485760,
"backupCount": 20,
"encoding": "utf8"
},
"error_file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "ERROR",
"formatter": "simple",
"filename": "gee_assets_errors.log",
"maxBytes": 10485760,
"backupCount": 20,
"encoding": "utf8"
}
},
"root": {
"level": "INFO",
"handlers": ["console", "info_file_handler", "error_file_handler"]
}
}
def setup_logging():
path = os.path.join(os.path.dirname(__file__), 'logconfig.json')
try:
with open(path, 'rt') as f:
config = json.load(f)
except Exception as e:
logging.exception('Could not load logconfig.json. Loading default logging configuration.')
config = default_config
logging.config.dictConfig(config)
def get_credential(file_path):
"""
Read credential json file and return
username and password
"""
with open(file_path) as json_file:
config = json.load(json_file)
assert "username" in config.keys()
assert "password" in config.keys()
return config["username"], config["password"]
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | geeup/config.py | thipokKub/geeup |
class Mobile:
def dial(self, number):
print(f"dialing number {number}")
def ring(self):
print("ringing using built in tones.....")
class SmartMobile(Mobile):
def ring(self):
"""
overriding a Method
"""
print("ringing using custom ring tones .... ") | [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{... | 3 | June21/ClassesandObjects/inheritance_101.py | pythonbykhaja/intesivepython |
# Copyright 2009-2010 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of wc as an MRJob.
This is meant as an example of why mapper_final is useful."""
from mrjob.job import MRJob
class MRWordCountUtility(MRJob):
def __init__(self, *args, **kwargs):
super(MRWordCountUtility, self).__init__(*args, **kwargs)
self.chars = 0
self.words = 0
self.lines = 0
def mapper(self, _, line):
if False: yield # I'm a generator!
self.chars += len(line) + 1 # +1 for newline
self.words += sum(1 for word in line.split() if word.strip())
self.lines += 1
def mapper_final(self):
yield('chars', self.chars)
yield('words', self.words)
yield('lines', self.lines)
def reducer(self, key, values):
yield(key, sum(values))
if __name__ == '__main__':
MRWordCountUtility.run()
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | mrjob/examples/mr_wc.py | cleemesser/mrjob |
from flask import Flask, json
import pyodbc
conn = pyodbc.connect('DRIVER={PostgreSQL Unicode};SERVER=10.4.28.183;DATABASE=postgres;UID=postgres;PWD=developer2020')
app = Flask(__name__)
def random_products(conn):
cnxn = conn.cursor()
cnxn.execute('select categoryid, name from categories c where parentid is null')
rows = cnxn.fetchall()
cnxn.commit()
return rows
@app.route('/')
def hello():
show_data = random_products(conn)
return str(show_data)
if __name__ == '__main__':
app.run()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | randoms_products/main.py | pechuga22/services-kiero |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class RCNN(nn.Module):
def __init__(self, vocab_size, embed_dim, output_dim, hidden_dim, num_layers, dropout, weight):
super(RCNN, self).__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.embedding.weight.data.copy_(torch.from_numpy(weight))
self.lstm = nn.LSTM(embed_dim, hidden_dim, num_layers=num_layers, bidirectional=True, dropout=dropout)
self.linear = nn.Linear(2 * hidden_dim + embed_dim, hidden_dim)
self.fc = nn.Linear(hidden_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text):
# input = input.permute(1, 0, 2)
embeds = self.embedding(text)
embeds = embeds.permute(1, 0, 2)
# embeds = self.dropout(embeds)
# self.lstm.flatten_parameters()
output, (hidden, _) = self.lstm(embeds)
output = torch.cat((output, embeds), 2)
output = output.permute(1, 0, 2)
output = self.linear(output).permute(0, 2, 1)
pool = F.max_pool1d(output, output.size(2)).squeeze(2)
# hidden = self.dropout(hidden)
# pool = self.dropout(pool)
# output = self.fc(hidden.squeeze(0))
output = self.fc(pool)
return output
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter tha... | 3 | pytorch/classification/rcnn/model.py | czhongyu/information-extraction |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class PriceInformation(object):
def __init__(self):
self._amount = None
self._type = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PriceInformation()
if 'amount' in d:
o.amount = d['amount']
if 'type' in d:
o.type = d['type']
return o
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | alipay/aop/api/domain/PriceInformation.py | antopen/alipay-sdk-python-all |
import os
from pathlib import Path
import pytest
from flair.data import Corpus, Token
from flair.models import SequenceTagger
from ner_sample.models.flair_ner import FlairNERModel
from tests.mocks import MockDataLoader
def test_flair_inference_ner_mock_data(pretrained_model, dataset_loader):
_, test = dataset_loader.get_dataset()
# Predict on test set
predictions = pretrained_model.predict(test)
assert len(predictions) == len(test)
def test_flair_inference_correct(
pretrained_model: FlairNERModel, dataset_loader: MockDataLoader
):
_, test = dataset_loader.get_dataset()
# Replace previous tag to populate prediction
predictions = pretrained_model.predict(test)
tp_count = 0
total_count = 0
for sentence, prediction in zip(test, predictions):
for i in range(len(sentence.tokens)):
pred = prediction.tokens[i].annotation_layers["ner"][0].value
actual = sentence.tokens[i].annotation_layers["gold_ner"][0].value
if pred == actual:
tp_count += 1
total_count += 1
assert float(tp_count) / total_count > 0.7
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | samples/ner_sample/tests/test_flair_ner.py | katyamust/ml-expr-fw |
import os
import unittest
from flask import current_app
from flask_testing import TestCase
from core import masakhane
class TestDevelopmentConfig(TestCase):
def create_app(self):
masakhane.config.from_object('core.config.DevelopmentConfig')
return masakhane
def test_app_is_development(self):
self.assertTrue(masakhane.config['SECRET_KEY'] == "super-secret-key")
self.assertFalse(current_app is None)
self.assertTrue(
masakhane.config['SQLALCHEMY_DATABASE_URI'] ==
os.getenv('DATABASE_TEST_URL', "sqlite:///masakhane.db")
)
class TestTestingConfig(TestCase):
def create_app(self):
masakhane.config.from_object('core.config.StagingConfig')
return masakhane
def test_app_is_testing(self):
self.assertTrue(masakhane.config['SECRET_KEY'] == "key_testing")
self.assertTrue(masakhane.config['TESTING'])
self.assertTrue(
masakhane.config['SQLALCHEMY_DATABASE_URI'] ==
os.getenv('DATABASE_TEST_URL', "sqlite:///masakhane.db")
)
class TestProductionConfig(TestCase):
def create_app(self):
masakhane.config.from_object('core.config.ProductionConfig')
return masakhane
def test_app_is_production(self):
self.assertTrue(masakhane.config['SECRET_KEY'] == "key_production")
self.assertFalse(masakhane.config['TESTING'])
if __name__ == '__main__':
unittest.main() | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | src/server/core/tests/test_config.py | Freshia/masakhane-web |
"""Base UptimeRobot entity."""
from __future__ import annotations
from pyuptimerobot import UptimeRobotMonitor
from homeassistant.helpers.entity import EntityDescription
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import ATTR_TARGET, ATTRIBUTION, DOMAIN
class UptimeRobotEntity(CoordinatorEntity):
"""Base UptimeRobot entity."""
_attr_attribution = ATTRIBUTION
def __init__(
self,
coordinator: DataUpdateCoordinator,
description: EntityDescription,
monitor: UptimeRobotMonitor,
) -> None:
"""Initialize Uptime Robot entities."""
super().__init__(coordinator)
self.entity_description = description
self._monitor = monitor
self._attr_device_info = {
"identifiers": {(DOMAIN, str(self.monitor.id))},
"name": self.monitor.friendly_name,
"manufacturer": "Uptime Robot Team",
"entry_type": "service",
"model": self.monitor.type.name,
"configuration_url": f"https://uptimerobot.com/dashboard#{self.monitor.id}",
}
self._attr_extra_state_attributes = {
ATTR_TARGET: self.monitor.url,
}
self._attr_unique_id = str(self.monitor.id)
@property
def _monitors(self) -> list[UptimeRobotMonitor]:
"""Return all monitors."""
return self.coordinator.data or []
@property
def monitor(self) -> UptimeRobotMonitor:
"""Return the monitor for this entity."""
return next(
(
monitor
for monitor in self._monitors
if str(monitor.id) == self.entity_description.key
),
self._monitor,
)
@property
def monitor_available(self) -> bool:
"""Returtn if the monitor is available."""
return bool(self.monitor.status == 2)
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
... | 3 | homeassistant/components/uptimerobot/entity.py | FlavorFx/core |
import pytest
import networkx as nx
import scse.controller.miniscot as miniSCOT
from scse.main.cli import MiniSCOTDebuggerApp
_HORIZON = 10
def _create_miniscot():
return miniSCOT.SupplyChainEnvironment(time_horizon = _HORIZON,
asin_selection = 1)
def test_importable():
env = _create_miniscot()
final_state = env.run()
assert final_state['clock'] == _HORIZON
def test_fulfilled():
env = _create_miniscot()
final_state = env.run()
G = final_state['network']
# TODO remove the 'CUST1' hard-coding
cust_node_data = G.nodes['Customer']
total_delivered_to_customer = cust_node_data['delivered']
assert total_delivered_to_customer > 0
def test_profiles():
env = miniSCOT.SupplyChainEnvironment(time_horizon = _HORIZON,
asin_selection = 1,
profile = 'newsvendor_demo_profile')
final_state = env.run()
assert final_state['clock'] == _HORIZON
def test_cli_transcript():
app = MiniSCOTDebuggerApp()
app.do_start("-seed 12345 -horizon 1")
#app._start(horizon = 10, seed = 12345, asin_list_size = 1)
app.do_next("")
app.do_run("")
# We just need to reach the end...
assert True
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | test/unit/test_supply_chain_simulation_environment.py | bellmast/supply-chain-simulation-environment |
# -*- coding: utf-8 -*-
#
from typing import Optional
from ..category import Category
class Utils(Category):
def check_link(
self,
url: str = None,
**kwargs
) -> dict:
return self._request("checkLink", locals())
def delete_from_last_shortened(
self,
key: str = None,
**kwargs
) -> dict:
return self._request("deleteFromLastShortened", locals())
def get_last_shortened_links(
self,
count: Optional[int] = None,
offset: Optional[int] = None,
**kwargs
) -> dict:
return self._request("getLastShortenedLinks", locals())
def get_link_stats(
self,
key: str = None,
source: Optional[str] = None,
access_key: Optional[str] = None,
interval: Optional[str] = None,
intervals_count: Optional[int] = None,
extended: Optional[bool] = None,
**kwargs
) -> dict:
return self._request("getLinkStats", locals())
def get_server_time(
self,
**kwargs
) -> dict:
return self._request("getServerTime", locals())
def get_short_link(
self,
url: str = None,
private: Optional[bool] = None,
**kwargs
) -> dict:
return self._request("getShortLink", locals())
def resolve_screen_name(
self,
screen_name: str = None,
**kwargs
) -> dict:
return self._request("resolveScreenName", locals())
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exc... | 3 | pyvdk/api/categories/utils.py | UT1C/pyVDK |
# Copyright (c) 2020 Manfred Moitzi
# License: MIT License
import ezdxf
def new_doc(pdmode: int, pdsize: float = 1):
doc = ezdxf.new('R2000')
doc.header['$PDMODE'] = pdmode
doc.header['$PDSIZE'] = pdsize
return doc
PDSIZE = 0.5
MODES = [
0, 1, 2, 3, 4,
32, 33, 34, 35, 36,
64, 65, 66, 67, 68,
96, 97, 98, 99, 100,
]
def add_point(x, angle: float, color: int):
point = msp.add_point((x, 3), dxfattribs={
'color': color,
'angle': angle,
})
for entity in [e.translate(0, -2, 0) for e in
point.virtual_entities(PDSIZE, pdmode)]:
msp.add_entity(entity)
for pdmode in MODES:
doc = new_doc(pdmode, PDSIZE)
msp = doc.modelspace()
msp.add_lwpolyline([(0, 0), (10, 0), (10, 4), (0, 4)], close=True)
add_point(1, 0, 1)
add_point(3, 30, 2)
add_point(5, 45, 3)
add_point(7, 60, 4)
add_point(9, 90, 6)
doc.set_modelspace_vport(10, (5, 2))
doc.saveas(f'points_pdmode_{pdmode}.dxf')
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (... | 3 | examples_dxf/create_point_examples.py | jpsantos-mf/ezdxf |
""" Pymode utils. """
import os.path
import sys
import threading
import warnings
from contextlib import contextmanager
import vim # noqa
from ._compat import StringIO, PY2
DEBUG = int(vim.eval('g:pymode_debug'))
warnings.filterwarnings('ignore')
@contextmanager
def silence_stderr():
""" Redirect stderr. """
if DEBUG:
yield
else:
with threading.Lock():
stderr = sys.stderr
sys.stderr = StringIO()
yield
with threading.Lock():
sys.stderr = stderr
def patch_paths():
""" Function description. """
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs'))
if PY2:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs2'))
else:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs3'))
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | bundle/vim-python-mode/pymode/utils.py | ninegrid/dotfiles-vim |
from zeeguu.core.test.rules.article_rule import ArticleRule
from zeeguu.core.test.rules.base_rule import BaseRule
from zeeguu.core.test.rules.language_rule import LanguageRule
from zeeguu.core.test.rules.url_rule import UrlRule
from zeeguu.core.model.text import Text
class TextRule(BaseRule):
"""A Rule testing class for the zeeguu.core.model.Text model class.
Creates a Text object with random data and saves it to the database.
"""
def __init__(self, length=59):
super().__init__()
self.text = self._create_model_object(length)
self.save(self.text)
def _create_model_object(self, length):
random_content = self.faker.text(max_nb_chars=length)
random_language = LanguageRule().random
random_article = ArticleRule().article
random_url = random_article.url
text = Text(random_content, random_language, random_url, random_article )
if self._exists_in_db(text):
return self._create_model_object(length)
return text
@staticmethod
def _exists_in_db(obj):
"""An database existence check is not necessary since no primary key
constraints can be violated.
"""
return False
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than clas... | 3 | zeeguu/core/test/rules/text_rule.py | mircealungu/Zeeguu-API-2 |
from argparse import ArgumentParser
from typing import Any
from django.core.management.base import BaseCommand, CommandError
from zerver.lib.actions import do_delete_old_unclaimed_attachments
from zerver.models import get_old_unclaimed_attachments
class Command(BaseCommand):
help = """Remove unclaimed attachments from storage older than a supplied
numerical value indicating the limit of how old the attachment can be.
One week is taken as the default value."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('-w', '--weeks',
dest='delta_weeks',
default=5,
type=int,
help="Limiting value of how old the file can be.")
parser.add_argument('-f', '--for-real',
action='store_true',
help="Actually remove the files from the storage.")
def handle(self, *args: Any, **options: Any) -> None:
delta_weeks = options['delta_weeks']
print(f"Deleting unclaimed attached files older than {delta_weeks} weeks")
# print the list of files that are going to be removed
old_attachments = get_old_unclaimed_attachments(delta_weeks)
for old_attachment in old_attachments:
print(f"* {old_attachment.file_name} created at {old_attachment.create_time}")
print("")
if not options["for_real"]:
raise CommandError("This was a dry run. Pass -f to actually delete.")
do_delete_old_unclaimed_attachments(delta_weeks)
print("")
print("Unclaimed files deleted.")
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer... | 3 | zerver/management/commands/delete_old_unclaimed_attachments.py | cozyrohan/zulip |
"""
This module contains the logic of the plugin
in charge of creating fake nodes returning
the the data inside an input directory
"""
from typing import Dict, List
from suzieq.poller.worker.inventory.inventory import Inventory
from suzieq.poller.worker.nodes.files import FileNode
class InputDirInventory(Inventory):
"""InputDirInventory is not a real inventory
source plugin, but it is a debugging instrument,
since it overrides the Inventory class in order to
create fake nodes returning the content of the input
directory
"""
def __init__(self, add_task_fn, **kwargs) -> None:
self.input_dir = kwargs.pop('input_dir', None)
super().__init__(add_task_fn, **kwargs)
async def build_inventory(self) -> Dict[str, FileNode]:
"""Returns a list containing a single fake node
returning the data contained in the input directory
Returns:
Dict[str, FileNode]: a list containing all the nodes in the
inventory
"""
node = FileNode()
# pylint: disable=protected-access
await node._init(self.input_dir)
self._nodes = {node.hostname: node}
return self._nodes
async def _get_device_list(self) -> List[Dict]:
return []
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | suzieq/poller/worker/inventory/dir.py | LucaNicosia/suzieq |
#!/user/bin/python
'''Fibonacci Number Modulo m
Compute the nth Fibonacci number modulo m.
Input: Integers 0 <= n <= 10^18 and 2 <= m <= 10^5
Output: nth Fibonacci number modulo m, F(n) mod m.
To compute F(n) mod m, calculate the remainder of n mod length...
...
A Pisano period is the period of an integer sequence which is
obtained by reducing each term of a primary sequence modulo some
integer m ≥ 1.
The Pisano period is defined as the length of the period of the
sequence obtained by reading the Fibonacci sequence modulo m
'''
import sys
def get_fibonacci_huge_naive(n, m):
if n <= 1:
return n
previous = 0
current = 1
for _ in range(n - 1):
previous, current = current, previous + current
return current % m
def get_fibonacci_huge_fast(n, m):
'''Pisano Number
calculate length of Pisano Period given m.
calculate pisano numners until cycle is determined.
p[n+1] = (p[n] + p[n-1]) % m
'''
return current % m
if __name__ == '__main__':
input = sys.stdin.read()
n, m = map(int, input.split())
print(get_fibonacci_huge_fast(n, m))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | 01-algorithmic-design-and-techniques/week-2/fibonacci-huge.py | andrewnachtigal/UCSD-Algorithms |
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2016-2019 Datadog, Inc.
from checks import AgentCheck
from common import assert_init_config_init, assert_agent_config_init, assert_instance_init
class TestCheck(AgentCheck):
def __init__(self, *args, **kwargs):
super(TestCheck, self).__init__(*args, **kwargs)
assert_init_config_init(self)
assert_agent_config_init(self, True)
assert_instance_init(self)
def check(self, instance):
pass
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | pkg/collector/py/tests/kwargs_init_signature.py | arminioa/datadog-agent |
from dataclasses import dataclass
from mchqr.detector import Detector
from mchqr.dev import NotOverriden, subclasses_dict
from mchqr.image import ImageList
from mchqr.solution import AlgoSolution
from multiprocessing import Pool
from time import perf_counter_ns
@dataclass
class BaseAlgorithm:
detector: Detector
images: ImageList
def measure(self):
start = perf_counter_ns()
data = self.run()
end = perf_counter_ns()
return end - start, data
def run(self) -> AlgoSolution:
raise NotOverriden(self.run)
class ProcessPool(BaseAlgorithm):
def run(self) -> AlgoSolution:
with Pool(len(self.images)) as pool:
return dict(
pool.map(self.detector, self.images)
)
class Sequence(BaseAlgorithm):
def run(self) -> AlgoSolution:
return dict(
map(self.detector, self.images)
)
algos = subclasses_dict(BaseAlgorithm)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false... | 3 | source/mchqr/algo.py | Matej-Chmel/parallel-qr-code-scanner |
import requests, time
from pythontools.core import logger, tools
def uploadToHastebin(content):
url = 'https://hastebin.com'
data = ""
if type(content) == str:
data = content
elif type(content) == list:
for i in content:
data += str(i) + "\n"
else:
logger.log("§cError: Please insert string or list!")
return
response = requests.post(url + '/documents', data=data.encode('utf-8'))
return url + '/' + response.json()['key']
logTimes = {}
def startLogTime(name):
logTimes[name] = time.time()
def endLogTime(name, log=True):
if name in logTimes:
convertedTime = tools.convertTime(time.time() - logTimes[name], millis=True)
if log:
logger.log(f"§8[§bTIME§8] §e{name} finished in §6{convertedTime}")
logTimes.pop(name)
return convertedTime
else:
logger.log(f"§cError: {name} not exist!")
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | pythontools/dev/dev.py | CrawlerCode/PythonTools |
from selfdescribing import SelfDescribing
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
class Matcher(SelfDescribing):
"""A matcher over acceptable values.
A matcher is able to describe itself to give feedback when it fails.
Matcher implementations should *not* directly implement this protocol.
Instead, *extend* the :py:class:`~hamcrest.core.base_matcher.BaseMatcher`
class, which will ensure that the
:py:class:`~hamcrest.core.matcher.Matcher` API can grow to support new
features and remain compatible with all
:py:class:`~hamcrest.core.matcher.Matcher` implementations.
"""
def matches(self, item, mismatch_description=None):
"""Evaluates the matcher for argument item.
If a mismatch is detected and argument ``mismatch_description`` is
provided, it will generate a description of why the matcher has not
accepted the item.
:param item: The object against which the matcher is evaluated.
:returns: ``True`` if ``item`` matches, otherwise ``False``.
"""
raise NotImplementedError('matches')
def describe_mismatch(self, item, mismatch_description):
"""Generates a description of why the matcher has not accepted the
item.
The description will be part of a larger description of why a matching
failed, so it should be concise.
This method assumes that ``matches(item)`` is ``False``, but will not
check this.
:param item: The item that the
:py:class:`~hamcrest.core.matcher.Matcher` has rejected.
:param mismatch_description: The description to be built or appended
to.
"""
raise NotImplementedError('describe_mismatch')
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | src/hamcrest/core/matcher.py | pexip/os-pyhamcrest |
# coding: utf-8
"""
Talend Management Console Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.plans__executions_api import PlansExecutionsApi # noqa: E501
from swagger_client.rest import ApiException
class TestPlansExecutionsApi(unittest.TestCase):
"""PlansExecutionsApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.plans__executions_api.PlansExecutionsApi() # noqa: E501
def tearDown(self):
pass
def test_execute(self):
"""Test case for execute
Execute Plan # noqa: E501
"""
pass
def test_get_execution_status(self):
"""Test case for get_execution_status
Get Plan execution status # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | pytmcapi/test/test_plans__executions_api.py | mverrilli/tmc-api-clients |
from numpy.testing import assert_array_almost_equal
from .test_carsons import ABCN_line_z_primitive
from carsons.carsons import CarsonsEquations
def test_compatibility_with_dict_of_phases():
class BackwardsCompatibleModel():
def __init__(self):
self.resistance = {
"A": 0.000115575,
"B": 0.000115575,
"C": 0.000115575,
"N": 0.000367852,
}
self.geometric_mean_radius = {
"A": 0.00947938,
"B": 0.00947938,
"C": 0.00947938,
"N": 0.00248107,
}
self.wire_positions = {
"A": (0.762, 8.5344),
"B": (2.1336, 8.5344),
"C": (0, 8.5344),
"N": (1.2192, 7.3152),
}
self.phases = {
"A": "A",
"B": "B",
"C": "C",
"N": "N",
}
# we are compatible models that provide 'phases'
# as a dictionary
model = BackwardsCompatibleModel()
z_primative = CarsonsEquations(model).build_z_primitive()
assert_array_almost_equal(
z_primative,
ABCN_line_z_primitive(),
decimal=4
)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true... | 3 | tests/test_dict_compatibility.py | kdheepak/carsons |
import pytest
import wave.data.likeness as likeness
class TestLikenessFDS:
def testRoundedLikenessFDS(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(64)],
comparison=[0 for i in range(64)],
ceiling=64
)
assert 31.46 == likeness_instance.getLikeness()
def testExactRoundedLikenessFDS(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(64)],
comparison=[i for i in range(64)],
ceiling=64
)
assert 100.00 == likeness_instance.getLikeness()
def testSinglePercentageNamco(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(64)],
comparison=[0 for i in range(64)],
ceiling=64
)
assert 100.00 == likeness_instance.getPercentage(0, 0)
assert 0.00 == likeness_instance.getPercentage(63, 63)
class TestLikenessNamco:
def testExactRoundedLikenessNamco(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(16)],
comparison=[i for i in range(16)],
ceiling=16
)
assert 100.00 == likeness_instance.getLikeness()
def testRoundedLikenessNamco(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(16)],
comparison=[0 for i in range(16)],
ceiling=16
)
assert 33.71 == likeness_instance.getLikeness()
def testSinglePercentageNamco(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(16)],
comparison=[0 for i in range(16)],
ceiling=16
)
assert 100.00 == likeness_instance.getPercentage(0, 0)
assert 0.00 == likeness_instance.getPercentage(15, 15)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | tests/test_likeness.py | herbeeg/famitracker-instrument-generator |
#!/usr/bin/python
import os
import sys
import json
if sys.version_info[0] < 3:
import thread
else:
import _thread
import threading
class AbstractConnector():
def __init__(self, local_device):
self.Protocol = None
self.Adaptor = None
self.LocalDevice = local_device
# Flags
self.IsConnected = False
# Callbacks
self.OnDeviceDisconnectCallback = None
def SetProtocol(self, protocol):
self.Protocol = protocol
def SetAdaptor(self, adaptor):
self.Adaptor = adaptor
def SetDeviceDisconnectCallback(self, callback):
self.OnDeviceDisconnectCallback = callback
def Connect(self, type):
return self.IsConnected
def Disconnect(self):
return self.IsConnected
def IsValidDevice(self):
return True
def GetUUID(self):
return ""
def GetDeviceInfo(self):
return ""
def SetSensorInfo(self, info):
return True
def GetSensorInfo(self, info):
return ""
def GetSensorListInfo(self):
return "" | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answ... | 3 | MkSAbstractConnector.py | MakeSenseCorp/mksdk-py |
from ...models import UserProfile
from django.core.management.base import BaseCommand
def list_organization_names():
orgs = []
user_profiles = UserProfile.objects.all()
for up in user_profiles:
if up.organization_name:
orgs.append(up.organization_name)
return orgs
class Command(BaseCommand):
help = 'List all organization names '
def handle(self, *args, **options):
orgs = list_organization_names()
for i in orgs:
print(i)
if not orgs:
print('No organizations to display.')
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | apps/accounts/management/commands/get_all_organizations.py | dtisza1/bluebutton-web-server |
from django import forms
#from django.contrib.auth.models import User
from users.models import User
from products.models import Product
from categories.models import Category
class RegisterForm(forms.Form):
username = forms.CharField(label='Nombre de Usuario',required=True, min_length=4, max_length=50,widget=forms.TextInput(attrs={
'class':'form-control', 'id':'username'
}))
email = forms.EmailField(required=True, widget=forms.EmailInput(attrs={
'class':'form-control', 'id':'email', 'placeholder':'email@ejemplo.com'
}))
password = forms.CharField(label='Contraseña',required=True, min_length=4, max_length=50,widget=forms.PasswordInput(attrs={
'class':'form-control', 'id':'password'
}))
def clean_username(self):
username = self.cleaned_data.get('username')
if User.objects.filter(username=username).exists():
raise forms.ValidationError('El nombre de usuario se encuentra en uso')
return username
def clean_email(self):
email = self.cleaned_data.get('email')
if User.objects.filter(email=email).exists():
raise forms.ValidationError('El email se encuentra en uso')
return email | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | MarvelLA/forms.py | IvanLezcano/TrabajoPracticoFinalPoloticDjango |
from os import path
from flask import Flask, render_template, send_from_directory, jsonify
from flask_webpack import Webpack
from version import __version__
import fabfile
import fabric
import time
here = path.abspath(path.dirname(__file__))
app = Flask(__name__)
webpack = Webpack()
app.config["WEBPACK_MANIFEST_PATH"] = path.join(here, "manifest.json")
webpack.init_app(app)
lb_host = 'localhost'
@app.route("/")
def index():
return render_template("index.html")
@app.route("/assets/<path:filename>")
def send_asset(filename):
return send_from_directory(path.join(here, "public"), filename)
@app.route("/vips")
def vips():
return jsonify(fabfile.get_vips())
@app.route("/vip_status/<vip>")
def vip_status(vip):
r = fabric.api.execute(fabfile.status, vip, hosts=[lb_host])
time.sleep(1)
return jsonify(r[lb_host])
@app.route("/enable_service/<vip>/<service>")
def enable_service(vip, service):
fabric.api.execute(fabfile.enable, vip, service, hosts=[lb_host])
return jsonify({})
@app.route("/disable_service/<vip>/<service>")
def disable_service(vip, service):
fabric.api.execute(fabfile.disable, vip, service, hosts=[lb_host])
return jsonify({})
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | pyreact/__init__.py | bzhou/pyreact |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
PACKAGE = 'logrotate'
PACKAGE_BINARY = '/usr/sbin/logrotate'
def test_logrotate_package_installed(host):
"""
Tests if logrotate package is installed.
"""
assert host.package("logrotate").is_installed
def test_logrotate_binary_exists(host):
"""
Tests if logrotate binary exists.
"""
assert host.file(PACKAGE_BINARY).exists
def test_logrotate_binary_file(host):
"""
Tests if logrotate binary is a file type.
"""
assert host.file(PACKAGE_BINARY).is_file
def test_logrotate_binary_which(host):
"""
Tests the output to confirm logrotate's binary location.
"""
assert host.check_output('which logrotate') == PACKAGE_BINARY
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self... | 3 | molecule/default/tests/test_default.py | darkwizard242/ansible-role-logrotate |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.WGL import _types as _cs
# End users want this...
from OpenGL.raw.WGL._types import *
from OpenGL.raw.WGL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'WGL_EXT_extensions_string'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.WGL,'WGL_EXT_extensions_string',error_checker=_errors._error_checker)
@_f
@_p.types(ctypes.c_char_p,)
def wglGetExtensionsStringEXT():pass
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?... | 3 | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/WGL/EXT/extensions_string.py | JE-Chen/je_old_repo |
import click
import psycopg2 as pg2
from flask import current_app, g
from flask.cli import with_appcontext
from psycopg2.extras import DictCursor
def get_db():
if 'db' not in g:
g.db = pg2.connect(
**current_app.config['DATABASE'],
)
g.db.cursor_factory = DictCursor
return g.db
def close_db(e=None):
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
db = get_db()
db.autocommit = True
cur = db.cursor()
with current_app.open_resource('schema.sql') as f:
cur.execute(f.read().decode('utf8'))
cur.close()
db.autocommit = False
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Clear the existing data and create new tables."""
init_db()
click.echo('Initialized the database.')
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | abrv/db.py | shwnchpl/abrv |
import bisect
from typing import List, Sequence, TypeVar, Union, overload
from seqtk._helper import slice_to_indices
_T = TypeVar("_T")
class concatenate(Sequence[_T]):
def __init__(self, sequences: Sequence[Sequence[_T]]):
self._sequences = sequences
self._cumulative_sizes = self._calculate_cumsum(sequences)
@staticmethod
def _calculate_cumsum(sequences: Sequence[Sequence[_T]]) -> List[int]:
r = []
s = 0
for seq in sequences:
seq_size = len(seq)
r.append(seq_size + s)
s += seq_size
return r
def __len__(self) -> int:
return self._cumulative_sizes[-1]
@overload
def __getitem__(self, index: int) -> _T:
...
@overload
def __getitem__(self, index: slice) -> List[_T]:
...
def __getitem__(self, index: Union[int, slice]) -> Union[_T, List[_T]]:
if isinstance(index, slice):
return [
self._getitem_with_integer_index(i) for i in slice_to_indices(index)
]
return self._getitem_with_integer_index(index)
def _getitem_with_integer_index(self, index: int) -> _T:
seq_index = bisect.bisect_right(self._cumulative_sizes, index)
offset = self._cumulative_sizes[seq_index - 1] if seq_index > 0 else 0
intra_seq_sample_index = index - offset
return self._sequences[seq_index][intra_seq_sample_index]
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | seqtk/_concatenate.py | yuyu2172/seqtk |
from fosscord.ext import tasks
import fosscord
class MyClient(fosscord.Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# an attribute we can access from our task
self.counter = 0
# start the task to run in the background
self.my_background_task.start()
async def on_ready(self):
print(f"Logged in as {self.user} (ID: {self.user.id})")
print("------")
@tasks.loop(seconds=60) # task runs every 60 seconds
async def my_background_task(self):
channel = self.get_channel(1234567) # channel ID goes here
self.counter += 1
await channel.send(self.counter)
@my_background_task.before_loop
async def before_my_task(self):
await self.wait_until_ready() # wait until the bot logs in
client = MyClient()
client.run("token")
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer... | 3 | examples/background_task.py | Random-Access-Exomemory/fosscord.py |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import unittest
import os
from PIL import Image
from pytest import raises
from kraken.pageseg import segment
from kraken.lib.exceptions import KrakenInputException
thisfile = os.path.abspath(os.path.dirname(__file__))
resources = os.path.abspath(os.path.join(thisfile, 'resources'))
class TestPageSeg(unittest.TestCase):
"""
Tests of the page segmentation functionality
"""
def test_segment_color(self):
"""
Test correct handling of color input.
"""
with raises(KrakenInputException):
with Image.open(os.path.join(resources, 'input.jpg')) as im:
segment(im)
def test_segment_bw(self):
"""
Tests segmentation of bi-level input.
"""
with Image.open(os.path.join(resources, 'bw.png')) as im:
lines = segment(im)
# test if line count is roughly correct
self.assertAlmostEqual(len(lines['boxes']), 30, msg='Segmentation differs '
'wildly from true line count', delta=5)
# check if lines do not extend beyond image
for box in lines['boxes']:
self.assertLess(0, box[0], msg='Line x0 < 0')
self.assertLess(0, box[1], msg='Line y0 < 0')
self.assertGreater(im.size[0], box[2], msg='Line x1 > {}'.format(im.size[0]))
self.assertGreater(im.size[1], box[3], msg='Line y1 > {}'.format(im.size[1]))
| [
{
"point_num": 1,
"id": "any_function_over_40_lines",
"question": "Is any function in this file longer than 40 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},... | 3 | tests/test_pageseg.py | UB-Mannheim/kraken |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Crap class
but make code more compact. lmao
WARNING! WARNING!
HIGH CONCENTRATION OF SHIT!
and in future here will be adding more and more methods and classes
but i'm not shure
"""
import os
def success(message):
return '<div class="alert alert-success alert-dismissable">' \
'<button type="button" class="close" data-dismiss="alert">×</button>' \
'{}</div>'.format(message)
def warning(message):
return '<div class="alert alert-danger alert-dismissable">' \
'<button type="button" class="close" data-dismiss="alert">×</button>' \
'{}</div>'.format(message)
def playlist(path):
"""
Especially here ._.
:param path:
:return:
"""
listdir = os.listdir(path)
raw_html = ''
for i in listdir:
raw_html += '<option>{}</option>'.format(unicode.encode(unicode(str(i), 'utf-8'), 'utf8'))
return raw_html # fix utf-8 encode and some useful stuff such as <option> format
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | web_audio/helpers.py | SaxAlien/crap-code |
from __future__ import print_function
from sys import argv
from pprint import pformat
from twisted.internet.task import react
from twisted.web.client import Agent, readBody
from twisted.web.http_headers import Headers
def cbRequest(response):
print('Response version:', response.version)
print('Response code:', response.code)
print('Response phrase:', response.phrase)
print('Response headers:')
print(pformat(list(response.headers.getAllRawHeaders())))
d = readBody(response)
d.addCallback(cbBody)
return d
def cbBody(body):
print('Response body:')
print(body)
def main(reactor, url=b"http://example.com/"):
agent = Agent(reactor)
d = agent.request(
'GET', url,
Headers({'User-Agent': ['Twisted Web Client Example']}),
None)
d.addCallback(cbRequest)
return d
react(main, argv[1:])
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | docs/web/howto/listings/client/responseBody.py | hawkowl/twisted |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import time
from datetime import datetime
def get_timestamp():
return int(time.time())
def throttle_period_expired(timestamp, throttle):
if not timestamp:
return True
elif isinstance(timestamp, datetime):
return (datetime.utcnow() - timestamp).total_seconds() > throttle
else:
return (get_timestamp() - timestamp) > throttle
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | website/util/time.py | DanielSBrown/osf.io |
class Graph():
def __init__(self):
self.vertex = {}
# for printing the Graph vertexes
def printGraph(self):
print(self.vertex)
for i in self.vertex.keys():
print(i,' -> ', ' -> '.join([str(j) for j in self.vertex[i]]))
# for adding the edge beween two vertexes
def addEdge(self, fromVertex, toVertex):
# check if vertex is already present,
if fromVertex in self.vertex.keys():
self.vertex[fromVertex].append(toVertex)
else:
# else make a new vertex
self.vertex[fromVertex] = [toVertex]
def DFS(self):
# visited array for storing already visited nodes
visited = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if visited[i] == False:
self.DFSRec(i, visited)
def DFSRec(self, startVertex, visited):
# mark start vertex as visited
visited[startVertex] = True
print(startVertex, end = ' ')
# Recur for all the vertexes that are adjacent to this node
for i in self.vertex.keys():
if visited[i] == False:
self.DFSRec(i, visited)
if __name__ == '__main__':
g = Graph()
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(2, 3)
g.addEdge(3, 3)
g.printGraph()
print('DFS:')
g.DFS()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (exclud... | 3 | Graph/P02_DepthFirstSearch.py | Abhishekkumar001/Data-Structures-using-Python-master |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.mport threading
from unittest import mock
import designate.rpc
from designate.sink import service
import designate.tests
from designate.tests import fixtures
class TestSinkService(designate.tests.TestCase):
def setUp(self):
super(TestSinkService, self).setUp()
self.stdlog = fixtures.StandardLogging()
self.useFixture(self.stdlog)
self.CONF.set_override('enabled_notification_handlers', ['fake'],
'service:sink')
self.service = service.Service()
@mock.patch.object(designate.rpc, 'get_notification_listener')
def test_service_start(self, mock_notification_listener):
self.service.start()
self.assertTrue(mock_notification_listener.called)
def test_service_stop(self):
self.service.stop()
self.assertIn('Stopping sink service', self.stdlog.logger.output)
def test_service_name(self):
self.assertEqual('sink', self.service.service_name)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | designate/tests/unit/sink/test_service.py | mrlesmithjr/designate |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('User must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new superuser with given details"""
user = self.create_user(email,name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of our user"""
return self.email
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | profiles_api/models.py | Davit2018/profiles-rest-api |
#!/usr/bin/python3
import pytest
def test_weight(WBTC, WETH, accounts, SwapRouter, NonfungiblePositionManager, CellarPoolShareContract):
ACCURACY = 10 ** 6
SwapRouter.exactOutputSingle([WETH, WBTC, 3000, accounts[0], 2 ** 256 - 1, 10 ** 7, 2 * 10 ** 18, 0], {"from": accounts[0], "value": 2 * 10 ** 18})
WBTC.approve(CellarPoolShareContract, 10 ** 7, {"from": accounts[0]})
ETH_amount = 10 ** 18
WBTC_amount = 5 * 10 ** 6
cellarAddParams = [WBTC_amount, ETH_amount, 0, 0, 2 ** 256 - 1]
CellarPoolShareContract.addLiquidityForUniV3(cellarAddParams, {"from": accounts[0], "value": ETH_amount})
cellarAddParams = [WBTC_amount, ETH_amount, 0, 0, 2 ** 256 - 1]
CellarPoolShareContract.addLiquidityForUniV3(cellarAddParams, {"from": accounts[0], "value": ETH_amount})
token_id_0 = NonfungiblePositionManager.tokenOfOwnerByIndex(CellarPoolShareContract, 0)
liq_0 = NonfungiblePositionManager.positions(token_id_0)[7]
weight_0 = CellarPoolShareContract.cellarTickInfo(0)[3]
NFT_count = NonfungiblePositionManager.balanceOf(CellarPoolShareContract)
for i in range(NFT_count - 1):
token_id = NonfungiblePositionManager.tokenOfOwnerByIndex(CellarPoolShareContract, i + 1)
liq = NonfungiblePositionManager.positions(token_id)[7]
weight = CellarPoolShareContract.cellarTickInfo(i + 1)[3]
assert approximateCompare(liq_0 * weight, liq * weight_0, ACCURACY)
def approximateCompare(a, b, accuracy):
delta = 0
if a > b:
return (a - b) * accuracy < a
else:
return (b - a) * accuracy < b
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding... | 3 | tests/test_05_weight.py | VolumeFi/somm-wbtc-eth-test-cellar |
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def add(self, data):
node = Node(data)
# if list is empty
if not self.head:
self.head = node
else:
current = self.head
while current.next:
current = current.next
current.next = node
def display(self):
values = []
current = self.head
while current:
values.append(current.data)
current = current.next
return values | [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},... | 3 | python/hash_table/linked_list.py | samuelclark907/data-structures-and-algorithms |
#!/usr/bin/env python3
#
# Copyright (C) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
""" This module allows you to create and write tf record dataset. """
import argparse
import os
from text_detection.annotation import write_to_tfrecords
def parse_args():
""" Parses arguments. """
args = argparse.ArgumentParser()
args.add_argument('--input_datasets', required=True, help='Comma-separated datasets paths.')
args.add_argument('--output', required=True, help='Path where output tf record will be written to.')
args.add_argument('--imshow_delay', type=int, default=-1,
help='If it is non-negative, this script will draw detected and groundtruth boxes')
return args.parse_args()
def main():
""" Main function. """
args = parse_args()
os.makedirs(os.path.dirname(args.output), exist_ok=True)
write_to_tfrecords(output_path=args.output, datasets=args.input_datasets.split(','),
imshow_delay=args.imshow_delay)
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true... | 3 | tensorflow_toolkit/text_detection/tools/create_dataset.py | morkovka1337/openvino_training_extensions |
#!/usr/bin/python3
import os.path
import sys
import unittest
import devpipeline_core.toolsupport
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "common"))
import mockconfig
_TOOL_FNS = {"bar": (lambda: None, "bar")}
class TestToolBuilder(unittest.TestCase):
def test_tool_exists(self):
config = {"foo": "bar"}
tool = devpipeline_core.toolsupport.tool_builder(config, "foo", _TOOL_FNS)
self.assertEqual(None, tool)
def test_tool_missing(self):
config = mockconfig.MockComponent("a", {})
def _build_tool():
return devpipeline_core.toolsupport.tool_builder(config, "foo", _TOOL_FNS)
self.assertRaises(devpipeline_core.toolsupport.MissingToolKey, _build_tool)
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
... | 3 | test/toolsupport/test_toolbuilder.py | dev-pipeline/devpipeline-core |
import logging
import argparse
import blink.main_dense as main_dense
logger = logging.getLogger(__name__)
class EntityLinker:
def __init__(self, model_path, logger=None):
self.logger = logger
self.models_path = model_path
self.config = {
"test_entities": None,
"test_mentions": None,
"interactive": False,
"biencoder_model": self.models_path+"biencoder_wiki_large.bin",
"biencoder_config": self.models_path+"biencoder_wiki_large.json",
"entity_catalogue": self.models_path+"entity.jsonl",
"entity_encoding": self.models_path+"all_entities_large.t7",
"crossencoder_model": self.models_path+"crossencoder_wiki_large.bin",
"crossencoder_config": self.models_path+"crossencoder_wiki_large.json",
"fast": True, # set this to be true if speed is a concern
"output_path": "logs/", # logging directory
"faiss_index": "flat",
"index_path": self.models_path+"index.pkl",
"top_k": 30
}
self.args = argparse.Namespace(**self.config)
self.models = main_dense.load_models(self.args, logger=self.logger)
def __call__(self, data_to_link):
_, _, _, _, _, predictions, scores, = main_dense.run(self.args, logger=self.logger,
biencoder=self.models[0], biencoder_params=self.models[1], crossencoder=self.models[2],
crossencoder_params=self.models[3], candidate_encoding=self.models[4],
title2id=self.models[5], id2title=self.models[6], id2text=self.models[7],
wikipedia_id2local_id=self.models[8], faiss_indexer=self.models[9],
test_data=data_to_link)
return predictions
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | qakgc/linker/linker.py | pbmstrk/odqa |
import cv2
import numpy as np
class sharpening:
def __init__(self):
pass
def sharp(self,image):
# Create sharpening kernel
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
# applying the sharpening kernel to the input image & displaying it.
sharpened = cv2.filter2D(image, -1, kernel)
# Noise reduction
sharpened = cv2.bilateralFilter(sharpened, 9, 75, 75)
return sharpened
# Create an image object
image = cv2.imread("./car.jpg")
tmp_canvas = sharpening()
res = tmp_canvas.sharp(image)
cv2.imwrite('sharped.jpg', res)
cv2.imshow('original',image)
cv2.imshow('sharp',res)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer... | 3 | sharpening.py | adityaRakhecha/Image-Filters |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import scipy.stats as osp_stats
from jax import lax
from jax._src.lax.lax import _const as _lax_const
from jax._src.numpy.util import _wraps
from jax._src.numpy.lax_numpy import _promote_args_inexact, where, inf
@_wraps(osp_stats.chi2.logpdf, update_doc=False)
def logpdf(x, df, loc=0, scale=1):
x, df, loc, scale = _promote_args_inexact("chi2.logpdf", x, df, loc, scale)
one = _lax_const(x, 1)
two = _lax_const(x, 2)
y = lax.div(lax.sub(x, loc), scale)
df_on_two = lax.div(df, two)
kernel = lax.sub(lax.mul(lax.sub(df_on_two, one), lax.log(y)), lax.div(y,two))
nrml_cnst = lax.neg(lax.add(lax.lgamma(df_on_two),lax.div(lax.mul(lax.log(two), df),two)))
log_probs = lax.add(lax.sub(nrml_cnst, lax.log(scale)), kernel)
return where(lax.lt(x, loc), -inf, log_probs)
@_wraps(osp_stats.chi2.pdf, update_doc=False)
def pdf(x, df, loc=0, scale=1):
return lax.exp(logpdf(x, df, loc, scale))
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | jax/_src/scipy/stats/chi2.py | mariogeiger/jax |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..preprocess import Erode
def test_Erode_inputs():
input_map = dict(
args=dict(argstr='%s', ),
debug=dict(
argstr='-debug',
position=1,
),
dilate=dict(
argstr='-dilate',
position=1,
),
environ=dict(
nohash=True,
usedefault=True,
),
in_file=dict(
argstr='%s',
extensions=None,
mandatory=True,
position=-2,
),
number_of_passes=dict(argstr='-npass %s', ),
out_filename=dict(
argstr='%s',
extensions=None,
genfile=True,
position=-1,
),
quiet=dict(
argstr='-quiet',
position=1,
),
)
inputs = Erode.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Erode_outputs():
output_map = dict(out_file=dict(extensions=None, ), )
outputs = Erode.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | nipype/interfaces/mrtrix/tests/test_auto_Erode.py | vferat/nipype |
import pytest
from rethinkdb import utils_common
@pytest.fixture
def parser():
opt_parser = utils_common.CommonOptionsParser()
opt_parser.add_option(
"-e",
"--export",
dest="db_tables",
metavar="DB|DB.TABLE",
default=[],
type='db_table',
action="append")
opt_parser.add_option(
"--clients",
dest="clients",
metavar="NUM",
default=3,
type="pos_int")
return opt_parser
def test_option_parser_int_pos(parser):
options, args = parser.parse_args(['--clients', '4'], connect=False)
assert options.clients == 4
def test_option_parser_int_pos_equals(parser):
options, args = parser.parse_args(['--clients=4'], connect=False)
assert options.clients == 4
def test_option_parser_int_pos_default(parser):
options, args = parser.parse_args([], connect=False)
assert options.clients == 3
def test_option_parser_int_pos_fail(parser):
with pytest.raises(SystemExit):
parser.parse_args(['--clients=asdf'], connect=False)
def test_option_parser_int_pos_zero(parser):
with pytest.raises(SystemExit):
parser.parse_args(['--clients=0'], connect=False)
def test_option_parser_db_table(parser):
options, args = parser.parse_args(['--export=example.table'], connect=False)
assert options.db_tables == [('example', 'table')]
def test_option_parser_db_table_append(parser):
options, args = parser.parse_args(['--export=example.table', '--export=example.another'], connect=False)
assert options.db_tables == [('example', 'table'), ('example', 'another')]
def test_option_parser_db_table_only_db(parser):
options, args = parser.parse_args(['--export=example'], connect=False)
assert options.db_tables == [('example', None)]
def test_option_parser_db_table_fail(parser):
with pytest.raises(SystemExit):
parser.parse_args(['--export='], connect=False)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | tests/test_utils_common.py | jayvdb/rethinkdb-python |
from pygame import Rect
# noinspection PyPackageRequirements
from OpenGL import GL
from albow.openGL.GLViewport import GLViewport
class GLOrtho(GLViewport):
"""
GLOrtho provides an OpenGL drawing area with an orthographic projection.
Using a GLOrtho widget is the same as using a GLViewport, except that you do not need to
provide a `setup_projection()` method.
------
------
"""
def __init__(self, rect: Rect=None, xmin=-1, xmax=1, ymin=-1, ymax=1, near=-1, far=1, **kwds):
"""
Creates a GLOrtho instance with the given initial values for its projection parameters.
Args:
rect: A pygame Rect
xmin: Specify the coordinates for the left vertical clipping planes.
xmax: Specify the coordinates for the right vertical clipping planes.
ymin: Specify the coordinates for the bottom horizontal clipping planes.
ymax: Specify the coordinates for the top horizontal clipping planes.
near: Specify the distances to the nearer clipping planes.
These distances are negative if the plane is to be behind the viewer.
far: Specify the distances to the depth clipping planes.
These distances are negative if the plane is to be behind the viewer.
**kwds:
"""
#
# Python 3 update
#
# GLViewport.__init__(self, rect, **kwds)
super().__init__(rect, **kwds)
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.near = near
self.far = far
def setup_projection(self):
GL.glOrtho(self.xmin, self.xmax, self.ymin, self.ymax, self.near, self.far)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true... | 3 | albow/openGL/GLOrtho.py | hasii2011/albow-python-3 |
"""
Given a graph, a source vertex in the graph and a number k,
find if there is a simple path (without any cycle) starting
from given source and ending at any other vertex that is greater than k.
"""
import math
from gfg.graphs.ds import GraphM # type: ignore
def path_greater(graph: list, source: int, k: int, vertices: int) -> list:
"""
Greedy solution won't always work. We'd need to try every possible
path from source to all other vertices.
Backtracking should be applied here.
Time Complexity: O(n!)
"""
def backtrack(next_vertex: int, dist: int, visited: set, total: int) -> int:
nonlocal path
if next_vertex in visited:
return 0
visited.add(next_vertex)
path.append(next_vertex)
cur_total = total + dist
if cur_total > k:
return cur_total
for src, dist in enumerate(graph[next_vertex]):
if src not in visited and 0 < dist < math.inf:
cur_total = backtrack(src, dist, visited, cur_total)
if cur_total > k:
return cur_total
visited.remove(next_vertex)
path.pop(-1)
return total
path: list = [source]
for source, distance in enumerate(graph[source]):
if 0 < distance < math.inf and backtrack(source, distance, {source}, 0) > k:
return path
return []
if __name__ == "__main__":
g = GraphM(9)
g.add_edge(0, 1, 4)
g.add_edge(0, 7, 8)
g.add_edge(1, 2, 8)
g.add_edge(1, 7, 11)
g.add_edge(2, 3, 7)
g.add_edge(2, 5, 4)
g.add_edge(2, 8, 2)
g.add_edge(3, 4, 9)
g.add_edge(3, 5, 14)
g.add_edge(4, 5, 10)
g.add_edge(5, 6, 2)
g.add_edge(6, 7, 1)
g.add_edge(6, 8, 6)
g.add_edge(7, 8, 7)
print(path_greater(g.graph, 0, 60, g.num_vertices))
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding... | 3 | gfg/graphs/path_length_greater_than_k.py | rrwt/daily-coding-challenge |
"""User models"""
# Django
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
# Utilities
from cride.utils.models import CRideModel
class User(CRideModel, AbstractUser):
"""User model.
Extend from Django's Abstract User, change the username field
to email and add some extra fields.
"""
email = models.EmailField(
'email address',
unique=True,
error_messages={
'unique': 'A user with that email already exist.'
}
)
phone_regex = RegexValidator(
regex=r'\+?1?\d{9,15}$',
message="Phone number must be entered in the format: +999999999. Up to 15 digits allowed."
)
phone_number = models.CharField(max_length=17, blank=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username','first_name','last_name']
is_client = models.BooleanField(
'client',
default=True,
help_text=(
'Help easily distinguish users and perform queries. '
'Clients are the main type of users.'
)
)
is_verified = models.BooleanField(
'verified',
default=False,
help_text='Set to true when the user have verified its email address.'
)
def __str__(self):
"""Return username"""
return self.username
def get_short_name(self):
"""Return username"""
return self.username | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | cride/users/models/users.py | MkAngelo/cride |
#!/usr/bin/env python
import os
PATH_TO_BRENDA = ''
def run_blast(querys, evalue_cutoff, identity_cutoff, threads, db_path=PATH_TO_BRENDA):
"""
"""
blast_cmd = f"blastp -query {querys} -db {db_path} -out {querys}_vs_brenda.txt -evalue {evalue_cutoff} -outfmt 6 -num_threads {threads}"
os.system(blast_cmd)
query_list = []
hit_list = []
blast_outfile = open(f'{querys}_vs_brenda.txt', 'r')
for line in blast_outfile:
query, hit = line.split("\t")[0,1]
query_list.append(query)
hit_list.append(hit)
return query_list, hit_list
def ec_filter(query_list, hit_list, ec_number):
"""
"""
passed_query = query_list.copy()
for i, query in enumerate(query_list):
anno = hit_list[i]
anno_ec_number = anno.split("|")[0].replace(">EC:", "").split(".")
if len(anno_ec_number) < 3:
passed_query.pop(i)
elif ec_number == "_".join(anno_ec_number):
continue
else:
passed_query.pop(i)
return set(passed_query)
def read_query(query_file):
"""
"""
query_file = open(query_file, 'r')
name_map = open('name_map.txt', 'w+')
protein_dict = {}
i = 1
for line in query_file:
if line.startswith(">"):
i += 1
name_map.write(f"protein_{i}\t{line}")
protein_dict[f"protein_{i}"] = ""
else:
protein_dict[f"protein_{i}"] += line.strip().replace("*", "")
name_map.close()
query_file.close()
return protein_dict
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | gpse/helper.py | JinyuanSun/GASSER |
'''A class for estimator names.
'''
from dmrg_helpers.extract.tuple_to_key import tuple_to_key
from sqlite3 import register_adapter, register_converter
class EstimatorName(object):
"""A class to store estimator names into the database.
You use this function to handle estimator names inside the databaseself.
Parameters
----------
operators: a tuple of strings.
The names of the several single-site operators that compose the
correlator.
"""
def __init__(self, operators):
super(EstimatorName, self).__init__()
self.operators = operators
def adapt_estimator_name(estimator_name):
'''Adapts the estimator name to the database format.
You use this function to introduce an estimator name into the database.
Parameters
----------
estimator_name: an EstimatorName.
The estimator name you want to adapt.
Returns
-------
a string in the format to be stored in the database.
'''
return tuple_to_key(estimator_name.operators)
def convert_estimator_name(s):
'''Converts back an entry of the database to an EstimatorName object.
You use this function when extracting an estimator name from the database.
Parameters
----------
s : a string
An estimator name as stored in the database.
Returns
-------
an EstimatorName object.
'''
operators = s.split(':')
return EstimatorName(operators)
register_adapter(EstimatorName, adapt_estimator_name)
register_converter('estimator_name', convert_estimator_name)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | dmrg_helpers/extract/estimator_name.py | iglpdc/dmrg_helpers |
import pixelsort.util as util
def lightness(pixel):
return util.lightness(pixel)
def intensity(pixel):
return pixel[0] + pixel[1] + pixel[2]
def hue(pixel):
return util.hue(pixel)
def saturation(pixel):
return util.saturation(pixel)
def minimum(pixel):
return min(pixel[0], pixel[1], pixel[2])
choices = {
"lightness": lightness,
"hue": hue,
"intensity": intensity,
"minimum": minimum,
"saturation": saturation
}
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | pixelsort/sorting.py | petarnguyen/pixelsort |
import numpy as np
def turn(n):
"""Formula from WIkipedia.
n could be numpy array of integers
"""
return (((n & -n) << 1) & n) != 0
def dragon(N):
"""Generate dragon curve
Returns a pair of integer arrays, (x,y), each 2^N elements long
"""
t = turn(np.linspace(0, 2**N-1, 2**N, dtype=np.int32))
a = np.remainder(np.cumsum(t*2-1), 4)
# 1 | 0
# --+--
# 2 | 3
dx = np.array([1, -1, -1, 1], dtype=np.int32)
dy = np.array([1, 1, -1, -1], dtype=np.int32)
x = np.cumsum(dx[a])
y = np.cumsum(dy[a])
return x-((dx-1)//2)[a],y-((dy-1)//2)[a]
def dragon_binary_diagram(N):
"""Draw dragon curve on a bitmap
Returned bitmap size is 2^N x 2^N
"""
#Prepare canvas to draw curve
D = np.zeros((2**N,2**N), dtype=np.float32)
#Get curve. Scale is 2x.
dx, dy = dragon(2*N-1)
dx *= 2
dy *= 2
#Center the curve.
cx, cy = (int(dx.mean()), int(dy.mean()))
x0 = cx - D.shape[0]//2
y0 = cy - D.shape[1]//2
dx -= x0
dy -= y0
#Given array of coordinates, writes 1 at theese coordinates, when they are inside canvas.
def putOnesAt(dx,dy):
inside = (dx >= 0) & (dx < D.shape[0]) & (dy>=0) & (dy<D.shape[0])
#Take part of x,y coordinates that are inside the image, and write repeated pattern by them
#
D[dx[inside],dy[inside]] = 1
#Draw corners
putOnesAt(dx,dy)
#Draw midpoints between corners
dx1 = (dx[0:-1]+dx[1:])//2
dy1 = (dy[0:-1]+dy[1:])//2
putOnesAt(dx1,dy1)
return D
def showdragon(N):
pp.plot(*(dragon(N)+()))
pp.show()
if __name__=="__main__":
from matplotlib import pyplot as pp
order = 16
print("Showing dragon curve of order {}".format(order))
showdragon(order)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding... | 3 | dragon.py | dmishin/fft-image-experiments |
# -*- coding: utf-8 -*-
from . import Base
import sqlalchemy as db
class JobAlternateTitle(Base):
__tablename__ = 'jobs_alternate_titles'
uuid = db.Column(db.String, primary_key=True)
title = db.Column(db.String)
nlp_a = db.Column(db.String)
job_uuid = db.Column(db.String, db.ForeignKey('jobs_master.uuid'))
def __init__(self, uuid, title, nlp_a, job_uuid):
self.uuid = uuid
self.title = title
self.nlp_a = nlp_a
self.job_uuid = job_uuid
def __repr__(self):
return '<uuid {}>'.format(self.uuid)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | api_sync/v1/models/jobs_alternate_titles.py | robinsonkwame/skills-airflow |
def test_termination(instance, workspace, run):
instance.launch_run(run.run_id, workspace)
assert instance.run_launcher.terminate(run.run_id)
assert not instance.run_launcher.terminate(run.run_id)
def test_missing_run(instance, workspace, run, monkeypatch):
instance.launch_run(run.run_id, workspace)
def missing_run(*_args, **_kwargs):
return None
original = instance.get_run_by_id
monkeypatch.setattr(instance, "get_run_by_id", missing_run)
assert not instance.run_launcher.terminate(run.run_id)
monkeypatch.setattr(instance, "get_run_by_id", original)
assert instance.run_launcher.terminate(run.run_id)
def test_missing_tag(instance, workspace, run):
instance.launch_run(run.run_id, workspace)
original = instance.get_run_by_id(run.run_id).tags
instance.add_run_tags(run.run_id, {"ecs/task_arn": ""})
assert not instance.run_launcher.terminate(run.run_id)
instance.add_run_tags(run.run_id, original)
instance.add_run_tags(run.run_id, {"ecs/cluster": ""})
assert not instance.run_launcher.terminate(run.run_id)
instance.add_run_tags(run.run_id, original)
assert instance.run_launcher.terminate(run.run_id)
def test_eventual_consistency(instance, workspace, run, monkeypatch):
instance.launch_run(run.run_id, workspace)
def empty(*_args, **_kwargs):
return {"tasks": []}
original = instance.run_launcher.ecs.describe_tasks
monkeypatch.setattr(instance.run_launcher.ecs, "describe_tasks", empty)
assert not instance.run_launcher.terminate(run.run_id)
monkeypatch.setattr(instance.run_launcher.ecs, "describe_tasks", original)
assert instance.run_launcher.terminate(run.run_id)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | python_modules/libraries/dagster-aws/dagster_aws_tests/ecs_tests/launcher_tests/test_termination.py | asamoal/dagster |
# Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Measurement(object):
""" A measurement is an object with a measure and a value attached to it
:type measure: :class: '~opencensus.stats.measure.Measure'
:param measure: A measure to pass into the measurement
:type value: int or float
:param value: value of the measurement
"""
def __init__(self, measure, value):
self._measure = measure
self._value = value
@property
def value(self):
"""The value of the current measurement"""
return self._value
@property
def measure(self):
"""The measure of the current measurement"""
return self._measure
class MeasurementInt(Measurement):
""" Creates a new Integer Measurement """
def __init__(self, measure, value):
super(MeasurementInt, self).__init__(measure, value)
class MeasurementFloat(Measurement):
""" Creates a new Float Measurement """
def __init__(self, measure, value):
super(MeasurementFloat, self).__init__(measure, value)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherita... | 3 | .venv/lib/python3.8/site-packages/opencensus/stats/measurement.py | MarkusMeyer13/graph-teams-presence |
# -*- coding: utf-8 -*-
# import cvxpy as cvx
from typing import Optional, Tuple, Union
import numpy as np
from lmi_solver.lmi0_oracle import lmi0_oracle
from lmi_solver.lmi_oracle import lmi_oracle
Arr = Union[np.ndarray]
Cut = Tuple[Arr, float]
class mle_oracle:
def __init__(self, Sig: Arr, Y: Arr):
"""Maximum likelyhood estimation:
min log det Ω(p) + Tr( Ω(p)^{-1} Y )
s.t. 2Y ⪰ Ω(p) ⪰ 0,
Arguments:
Sig (Arr): Covariance matrix
Y (Arr): Biased covariance matrix
"""
self.Y = Y
self.Sig = Sig
self.lmi0 = lmi0_oracle(Sig)
self.lmi = lmi_oracle(Sig, 2 * Y)
# self.lmi2 = lmi2_oracle(Sig, 2*Y)
def __call__(self, x: Arr, t: float) -> Tuple[Cut, Optional[float]]:
"""[summary]
Arguments:
x (Arr): coefficients of basis functions
t (float): the best-so-far optimal value
Returns:
Tuple[Cut, float]: [description]
"""
if cut := self.lmi(x):
return cut, None
if cut := self.lmi0(x):
return cut, None
R = self.lmi0.Q.sqrt()
invR = np.linalg.inv(R)
S = invR @ invR.T
SY = S @ self.Y
diag = np.diag(R)
f1 = 2 * np.sum(np.log(diag)) + np.trace(SY)
n = len(x)
m = len(self.Y)
g = np.zeros(n)
for i in range(n):
SFsi = S @ self.Sig[i]
# g[i] = sum(S[k] @ self.Sig[k] for k in range(m))
g[i] = np.trace(SFsi)
g[i] -= sum(SFsi[k, :] @ SY[:, k] for k in range(m))
f = f1 - t
if (f := f1 - t) >= 0:
return (g, f), None
return (g, 0.0), f1
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | src/corr_solver/mle_corr_oracle.py | luk036/corr-solver |
import numpy as np
def euclidean_distance(p1,p2):
"""
returns euclidean distance between matrices
@params:
p1, p2: np.ndarray
matrices to perform operation to.
"""
return np.sqrt(np.sum((p1-p2)**2, axis=1))
def entropy(p):
"""
Will be our measurement for uncertainty in our construction
of descision tree
@params:
p: float
"""
if p == 0:
return 0
elif p == 1:
return 0
else:
return -(p * np.log2(p) + (1 - p) * np.log2(1 - p))
def information_gain(left_child, right_child):
"""
measurement of how much info we gained when splitting a node
using our entropy method.
@def:
takes in a list of classes from left and right child to return
the information gain of our curr split
@params:
left_child: np.ndarray
curr left child arr
right_child: np.ndarray
curr left child arr
"""
parent = left_child + right_child
p_par = parent.count(1) / len(parent) if len(parent) > 0 else 0
p_left = left_child.count(1) / len(left_child) if len(left_child) \
> 0 else 0
p_right = right_child.count(1) / len(right_child) if len(right_child) \
> 0 else 0
infogain_p = self.entropy(p_par)
infogain_l = self.entropy(p_left)
infogain_r = self.entropy(p_right)
return infogain_p - len(left_child) / len(parent) * infogain_l - \
len(right_child) / len(parent) * infogain_r
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",... | 3 | utils/data_operations.py | spitzc32/CropMe |
# coding=UTF-8
from operators.op import op
from camera.TCPCamera import TCPCamera
from camera.socket.SocketCamera import SocketCamera
from SocketCamerasHandler import SocketCamerasHandler
class TCPSocketCamera(SocketCamera, TCPCamera):
'''
# TCP Socket Camera
inherits from ```BaseSocketCamera``` and ```TCPCamera```
```Abstract Class```
Description ::
----------
Class that contains a TCP socket Camera logic.
Input ::
-----
:attr str serverip: Contains the ip from server that Camera is connected to.
:attr int serverport: Contains the port from server that Camera is connected to.
:attr str serverkey: Contains the server socket key (ip:port) that Camera is connected to.
:attr str socket: Contains the socket object that handles the TCP connection.
:attr str socketkey: Contains the Camera socket key (ip:port).
:attr str handler: Contains the handler of the connection flow [SocketCameraHandler]
:attr str thread: Contains the thread that listens and processes requests.
:attr str sendingThread: Contains the thread that prepares and sends messages to the server.
[More Attributes can be added]
'''
def __init__(self, serverip, serverport, ip="", port="", cameraid=None, sessionid=None, type="BOTH"):
super().__init__(serverip=serverip, serverport=serverport,
ip=ip, port=port, cameraid=cameraid, sessionid=sessionid, type=type)
# Gets the handler of connection
def getHandler(self):
return SocketCamerasHandler(camera=self)
# Sends message to server
def sendMessageToServer(self, message):
self.addOutputMessage(message)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 4 | digital-twin/deviceManager/camera/socket/TCPSocketCamera.py | matbmoser/SOTA |
import os
import sys
import json
from .version import __version__
from satsearch import Search
from satstac import Items
from satsearch.parser import SatUtilsParser
import satsearch.config as config
def main(items=None, printmd=None, printcal=False, found=False,
save=None, download=None, requestor_pays=False, **kwargs):
""" Main function for performing a search """
if items is None:
## if there are no items then perform a search
search = Search.search(**kwargs)
if found:
num = search.found()
print('%s items found' % num)
return num
items = search.items()
else:
# otherwise, load a search from a file
items = Items.load(items)
print('%s items found' % len(items))
# print metadata
if printmd is not None:
print(items.summary(printmd))
# print calendar
if printcal:
print(items.calendar())
# save all metadata in JSON file
if save is not None:
items.save(filename=save)
# download files given `download` keys
if download is not None:
if 'ALL' in download:
# get complete set of assets
download = set([k for i in items for k in i.assets])
for key in download:
items.download(key=key, path=config.DATADIR, filename=config.FILENAME, requestor_pays=requestor_pays)
return items
def cli():
parser = SatUtilsParser.newbie(description='sat-search (v%s)' % __version__)
kwargs = parser.parse_args(sys.argv[1:])
# if a filename, read the GeoJSON file
if 'intersects' in kwargs:
if os.path.exists(kwargs['intersects']):
with open(kwargs['intersects']) as f:
kwargs['intersects'] = json.loads(f.read())
cmd = kwargs.pop('command', None)
if cmd is not None:
main(**kwargs)
if __name__ == "__main__":
cli()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | satsearch/main.py | lishrimp/sat-search |
class ObjectFactory:
def __init__(self):
self._builders={}
def register_builder(self, key , builder):
self._builders[key] = builder
def create(self, key, **kwargs):
builder = self._builders.get(key)
if not builder:
raise ValueError(key)
return builder(**kwargs) | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | pattern/object_factory.py | FurfurV/DistributedSystems |
'''
lab8
'''
#3.1
def count_words(input_str):
return len(input_str.split())
#3.2
demo_str = 'Hello World!'
print(count_words(demo_str))
#3.3
def find_min_num(input_list):
min_item = input_list[0]
for num in input_list:
if type(num) is not str:
if min_item>= num:
min_item = num
return min_item
#3.4
demo_list=[1,2,3,4,5,6]
print(find_min_num(demo_list))
#3.5
mix_list=[1,2,3,'a',4,5,6]
print(find_min_num(mix_list)) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | lab8.py | ConAllen3/ia241 |
from Data import Data
import scoring.dictionary.SSS93 as dictionary
class SSS93(Data):
scores = {'raw' : None }#
def scoring_raw(self, score):
option_numbers = dictionary.option_numbers
score.set(dictionary.factors_names,0)
for i, item in self.items():
try:
answer = int(item.get('user_answered'))
factors = dictionary.factors[i + 1]
score.increase('raw', answer-1 )
for factor in factors:
score.increase(factor , answer-1 )
except:
pass
raw_score = score.get('raw')
interpretation = self.get_level_interpretation(raw_score)
score.set('interpretation', interpretation)
def get_level_interpretation(self,raw_score):
intervals = list(dictionary.level_interpretation.keys())
for interval in intervals:
if interval[0] <= raw_score <= interval[1]:
return dictionary.level_interpretation[interval]
else:
return None
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | scoring/SSS93.py | majazeh/risloo-samples |
import requests
import asyncio
import aiohttp
from requests.exceptions import ConnectionError
from my_fake_useragent import UserAgent
import random
def get_page(url, options={}):
try:
ua = UserAgent()
except:
pass
try:
base_headers = {
'User-Agent': ua.random(),
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8'
}
except:
base_headers = {
'User-Agent': ua.random(),
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8'
}
headers = dict(base_headers, **options)
print('Getting', url)
try:
r = requests.get(url, headers=headers)
print('Getting result', url, r.status_code)
if r.status_code == 200:
return r.text
except ConnectionError:
print('Crawling Failed', url)
return None
class Downloader(object):
"""
一个异步下载器,可以对代理源异步抓取,但是容易被BAN。
"""
def __init__(self, urls):
self.urls = urls
self._htmls = []
async def download_single_page(self, url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
self._htmls.append(await resp.text())
def download(self):
loop = asyncio.get_event_loop()
tasks = [self.download_single_page(url) for url in self.urls]
loop.run_until_complete(asyncio.wait(tasks))
@property
def htmls(self):
self.download()
return self._htmls
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self... | 3 | proxypool/utils.py | WiseJason/proxy |
# Python
import os
import sys
import traceback
# Pycompss
from pycompss.api.task import task
from pycompss.api.parameter import FILE_IN, FILE_OUT
# Adapters commons pycompss
from biobb_adapters.pycompss.biobb_commons import task_config
# Wrapped Biobb
from biobb_chemistry.babelm.babel_convert import BabelConvert # Importing class instead of module to avoid name collision
task_time_out = int(os.environ.get('TASK_TIME_OUT', 0))
@task(input_path=FILE_IN, output_path=FILE_OUT,
on_failure="IGNORE", time_out=task_time_out)
def _babelconvert(input_path, output_path, properties, **kwargs):
task_config.pop_pmi(os.environ)
try:
BabelConvert(input_path=input_path, output_path=output_path, properties=properties, **kwargs).launch()
except Exception as e:
traceback.print_exc()
raise e
finally:
sys.stdout.flush()
sys.stderr.flush()
def babel_convert(input_path, output_path, properties=None, **kwargs):
if (output_path is None or os.path.exists(output_path)) and \
True:
print("WARN: Task BabelConvert already executed.")
else:
_babelconvert( input_path, output_path, properties, **kwargs) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | biobb_adapters/pycompss/biobb_chemistry/babelm/babel_convert.py | bioexcel/biobb_adapters |
#coding:utf8
'''
Created on 2013-10-25
@author: lan (www.9miao.com)
'''
PLAYER_PLAYER =1
PLAYER_PET = 2
MONSTER_MONSTER =1
MATRIXLIST = [100001,100002,100003,100004,100005,100006,100007,100008,100009]
class BattleSide(object):
'''战斗方类'''
def __init__(self,character):
'''初始化战斗方
'''
self.members = []
self.matrixSetting = {}
for eyeNo in range(1,10):
memID = character.matrix._matrixSetting.get('eyes_%d'%eyeNo)
if memID==0:
continue
else:
pet = character.pet.getPet(memID)
self.members.append(pet)
self.matrixSetting[memID] = eyeNo
def getCharacterEyeNo(self,characterId):
'''获取角色在阵法中的位置'''
eyeNo = self.matrixSetting.get(characterId)
return eyeNo
def getMembers(self):
'''获取战斗方成员信息'''
fighters = []
for member in self.members:
data = member.getFightData()
fighters.append(data)
return fighters
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | server/src/project_n/app/game/core/fight/battleSide.py | isuhao/gamein9miao |
'''
Function:
define the darknet
Author:
Charles
'''
import torch
import torch.nn as nn
'''define darknet53'''
class Darknet53(nn.Module):
def __init__(self, **kwargs):
super(Darknet53, self).__init__()
'''forward'''
def forward(self, x):
pass | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | modules/backbones/darknet.py | DetectionBLWX/YOLOv3 |
import os, logging
from flask import Flask, request
from werkzeug.serving import run_simple
from galaxylearning.utils.utils import return_data_decorator, LoggerFactory
app = Flask(__name__)
BASE_MODEL_PATH = os.path.join(os.path.abspath("."), "res", "models")
logger = LoggerFactory.getLogger(__name__, logging.INFO)
@return_data_decorator
@app.route("/", methods=['GET'])
def test_client():
return "Hello galaxylearning client", 200
@return_data_decorator
@app.route("/aggregatepars", methods=['POST'])
def submit_aggregate_pars():
logger.info("receive aggregate files")
recv_aggregate_files = request.files
# print(recv_aggregate_files)
for filename in recv_aggregate_files:
job_id = filename.split("_")[-2]
# print("recv_filename: ", recv_aggregate_files[filename])
tmp_aggregate_file = recv_aggregate_files[filename]
job_base_model_dir = os.path.join(BASE_MODEL_PATH, "models_{}".format(job_id), "tmp_aggregate_pars")
latest_num = len(os.listdir(job_base_model_dir)) - 1
latest_tmp_aggretate_file_path = os.path.join(job_base_model_dir, "avg_pars_{}".format(latest_num))
with open(latest_tmp_aggretate_file_path, "wb") as f:
for line in tmp_aggregate_file.readlines():
f.write(line)
logger.info("recv success")
return "ok", 200
def start_communicate_client(client_ip, client_port):
app.url_map.strict_slashes = False
run_simple(hostname=client_ip, port=int(client_port), application=app, threaded=True)
logger.info("galaxy learning client started") | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"... | 3 | galaxylearning/core/communicate_client.py | ZJU-DistributedAI/GalaxyLearning |
import RPi.GPIO as GPIO
from time import sleep
import sys
#Set warnings off (optional)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
#Set Button and LED pins
JOYSTICK_BUTTON = 12
MAIN_SWITCH = 22
LED = 6
class button():
'''
A simple Push-Button class
'''
def __init__(self, pin, pud_up_down):
print("'def __init__(self," + str(pin)+ "): '")
GPIO.setup(pin, GPIO.IN, pull_up_down=pud_up_down)
GPIO.setup(LED,GPIO.OUT)
GPIO.add_event_detect(pin, GPIO.BOTH, callback=self.push_button_callback, bouncetime=300)
# GPIO.add_event_detect(pin, GPIO.FALLING, callback=self.release_button_callback, bouncetime=300)
def push_button_callback(self, channel):
print(channel)
sleep(0.1)
if GPIO.input(channel):
print("Rising edge detected on " + str(channel) )
GPIO.output(LED,GPIO.HIGH)
else:
print("Falling edge detected on " + str(channel) )
GPIO.output(LED,GPIO.LOW)
def main(args=None):
main_switch = button(MAIN_SWITCH, GPIO.PUD_DOWN)
joystick_button = button(JOYSTICK_BUTTON, GPIO.PUD_UP)
try:
while True:
print(".")
sleep(5)
except KeyboardInterrupt:
print("LedLightNode **** 💀 Ctrl-C detected...")
finally:
print("LedLightNode **** 🪦 Ending... ")
print( str(sys.exc_info()[1]) ) # Need ´import sys´
# Time to clean up stuff!
GPIO.cleanup()
if __name__ == "__main__":
main() | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | pet_mk_viii/UnitTest/push_button.py | Pet-Series/Pet-Mk-VII |
# Copyright 2019 BlueCat Networks (USA) Inc. and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from unittest import mock
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
sys.modules["bluecat"] = mock.Mock()
sys.modules["bluecat.api_exception"] = mock.Mock()
sys.modules["config"] = mock.Mock()
sys.modules["config.default_config"] = mock.Mock()
sys.modules["main_app"] = mock.Mock()
sys.modules["bluecat_portal.workflows.trouble_shooting_ui.common"] = mock.Mock()
sys.modules["bluecat_portal.config"] = mock.Mock()
sys.modules["bluecat_portal"] = mock.Mock()
def route_fake(app, path, methods=None):
def func_wrapper(func):
# pylint: disable=missing-docstring
return func
return func_wrapper
def workflow_permission_required_fake(path):
def func_wrapper(func):
# pylint: disable=missing-docstring
return func
return func_wrapper
mock.patch('bluecat.route', route_fake).start()
mock.patch('bluecat.util.workflow_permission_required', workflow_permission_required_fake).start()
mock.patch('bluecat.util.exception_catcher', lambda x: x).start()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | tests/context.py | haihuynh-bluecat/trouble_shooting_ui |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_event_series import V1EventSeries # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1EventSeries(unittest.TestCase):
"""V1EventSeries unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1EventSeries(self):
"""Test V1EventSeries"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_event_series.V1EventSeries() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | kubernetes/test/test_v1_event_series.py | L3T/python |
#!
from datetime import date
import os
from os import listdir, path
import shutil
import logging
logging.basicConfig(level=logging.INFO, format='%(message)s')
#
# path utils
#
def this_folder():
return os.path.dirname(os.path.abspath(__file__))
def par_folder():
return path.join(this_folder(), '..')
def make_if_none(directory):
if not os.path.exists(directory):
os.makedirs(directory)
#
# bootswatch paths
#
root_generator = path.join(this_folder(), 'generator')
root_3rd = path.join(root_generator, '3rd')
root_bootswatch = path.join(root_3rd, 'bootswatch')
root_themes = [(root_bootswatch, '3'),
(path.join(root_bootswatch, '2'), '2'),
(path.join(root_bootswatch, '4-alpha'), '4')]
#
# collect theme css to the same folder with script
#
for r, ver in root_themes:
to = path.join(root_generator, 'bootswatch-themes', ver)
make_if_none(to)
for theme_folders in os.listdir(r):
# logging.info(theme_folders)
test_file = path.join(r, theme_folders, 'bootstrap.min.css')
if path.exists(test_file):
shutil.copy(path.join(r, theme_folders, 'bootstrap.min.css'), path.join(to, theme_folders + '.min.css'))
# logging.info('done copying {}\n\t{}'.format(test_file, to))
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
... | 3 | collect_themes.py | rockonedege/markdown-cv |
from fastapi import Depends, HTTPException, status, Header
from fastapi.security import OAuth2PasswordBearer
from pydantic import ValidationError
from jose import jwt
from webapi.db.config import async_session
from webapi.db import models, schemas
from webapi.db.dals.user_dal import UserDAL
from webapi.setting import settings
from webapi.utils import security
reusable_oauth2 = OAuth2PasswordBearer(
tokenUrl=f'/api/admin/login/access_token/'
)
class DALGetter:
def __init__(self, dal_cls):
self.dal_cls = dal_cls
async def __call__(self):
async with async_session() as session:
async with session.begin():
yield self.dal_cls(session)
async def get_current_user(
dal: UserDAL = Depends(DALGetter(UserDAL)), token: str = Depends(reusable_oauth2)
) -> models.User:
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(
token, settings.SECRET_KEY, algorithms=[security.ALGORITHM]
)
token_data = schemas.token.TokenPayload(**payload)
except (jwt.JWTError, ValidationError):
raise credentials_exception
user = await dal.get(id=token_data.sub)
if user is None:
raise credentials_exception
return user
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherita... | 3 | webapi/utils/dependencies.py | xqhgit/fastapi-vue-blog |
# https://www.runoob.com/python/python-object.html
# !/usr/bin/python
# -*- coding: UTF-8 -*-
class Parent: # 定义父类
parentAttr = 100
def __init__(self):
print("调用父类构造函数")
def parentMethod(self):
print('调用父类方法')
def setAttr(self, attr):
Parent.parentAttr = attr
def getAttr(self):
print("父类属性 :", Parent.parentAttr)
class Child(Parent): # 定义子类
def __init__(self):
print("调用子类构造方法")
def childMethod(self):
print('调用子类方法')
c = Child() # 实例化子类
c.childMethod() # 调用子类的方法
c.parentMethod() # 调用父类方法
c.setAttr(200) # 再次调用父类的方法 - 设置属性值
c.getAttr() # 再次调用父类的方法 - 获取属性值
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": fals... | 3 | Python高级编程/0面向对象 继承调用顺序.py | shao1chuan/pythonbook |
min_x = 0
min_y = 0
def init_game():
global min_x, min_y
min_x = 0
min_y = 0
def get_position_middle(max, current):
return int(abs((max - current) / 2))
def get_position_middle_reversed(min, current):
result = get_position_middle(min, current)
if result == 0:
result = 1
return result
def try_guest_hacker_v1(direction_hacker, matrix_width, matrix_height, player_x, player_y):
position_move_y = 0
if "U" in direction_hacker:
position_move_y = min(0, player_y - 1)
if "D" in direction_hacker:
position_move_y = max(matrix_height - 1, player_y + 1)
position_move_x = 0
if "L" in direction_hacker:
position_move_x = min(0, player_x - 1)
if "R" in direction_hacker:
position_move_x = max(matrix_width - 1, player_x + 1)
return [position_move_x, position_move_y]
def try_guest_hacker(direction_hacker, matrix_width, matrix_height, player_x, player_y):
'''
[[0 0 0 0 2 1 0 0]
[0 0 0 0 0 0 0 0]]
'''
global min_x, min_y
max_x = matrix_width
max_y = matrix_height
player_new_x = player_x
player_new_y = player_y
position_move_x = 0
position_move_y = 0
if "U" in direction_hacker:
max_y = player_new_y
position_move_y = 0 - get_position_middle_reversed(min_y, player_new_y)
if "D" in direction_hacker:
min_y = player_new_y
position_move_y = get_position_middle(max_y, player_new_y)
if "L" in direction_hacker:
max_x = player_new_x
position_move_x = 0 - get_position_middle_reversed(min_x, player_new_x)
if "R" in direction_hacker:
min_x = player_new_x
position_move_x = get_position_middle(max_x, player_new_x)
player_new_x = player_new_x + position_move_x
player_new_y = player_new_y + position_move_y
return [player_new_x, player_new_y]
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | python/binary_search/game_binary_search.py | alfreddagenais/kilukru-dev-articles |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from .._version import VERSION
class AutoRestDurationTestServiceConfiguration(Configuration):
"""Configuration for AutoRestDurationTestService.
Note that all parameters used to create this instance are saved as instance
attributes.
"""
def __init__(self, **kwargs: Any) -> None:
super(AutoRestDurationTestServiceConfiguration, self).__init__(**kwargs)
kwargs.setdefault("sdk_moniker", "autorestdurationtestservice/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer"... | 3 | test/vanilla/Expected/AcceptanceTests/BodyDuration/bodyduration/aio/_configuration.py | qwordy/autorest.python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.