source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
from django.contrib import admin
from django.contrib.admin import ModelAdmin
from common.admin import BaseRegionalAdminMixin
from containers.models import Container
@admin.register(Container)
class ContainerAdmin(BaseRegionalAdminMixin, ModelAdmin):
ordering = ("size_class",)
list_display = (
"id",
"size_class",
"standard",
"volume",
"deposit",
"delivery_fee",
"image_url",
)
def has_change_permission(self, request, obj=None):
return request.user.is_superuser
def has_delete_permission(self, request, obj=None):
return request.user.is_superuser
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/... | 3 | containers/admin.py | stanwood/traidoo-api |
# IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.stocks.government import quiverquant_model
@pytest.fixture(scope="module")
def vcr_config():
return {
"filter_headers": [("Authorization", "MOCK_TOKEN")],
}
@pytest.mark.vcr
@pytest.mark.parametrize(
"gov_type, ticker",
[
# ("congress", None),
("congress", "COIN"),
# ("senate", None),
("senate", "CONE"),
# ("house", None),
("house", "SPY"),
# ("contracts", None),
("contracts", "AMRC"),
# ("quarter-contracts", None),
("quarter-contracts", "SSTK"),
# ("corporate-lobbying", None),
("corporate-lobbying", "HBI"),
],
)
def test_get_government_trading(gov_type, recorder, ticker):
result_df = quiverquant_model.get_government_trading(
gov_type=gov_type,
ticker=ticker,
)
recorder.capture(result_df.head(10))
@pytest.mark.default_cassette("test_analyze_qtr_contracts")
@pytest.mark.vcr
@pytest.mark.parametrize("analysis", ["total", "upmom", "downmom"])
def test_analyze_qtr_contracts(analysis, recorder):
result_df = quiverquant_model.analyze_qtr_contracts(
analysis=analysis,
num=10,
)
recorder.capture(result_df.head(10))
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | tests/openbb_terminal/stocks/government/test_quiverquant_model.py | tehcoderer/GamestonkTerminal |
class Solution:
# Delete row func using indexes, O(n*m) time, O(1) space
def minDeletionSize(self, strs: List[str]) -> int:
n, m = len(strs), len(strs[0])
def delete(j):
for i in range(n-1):
if strs[i][j] > strs[i+1][j]:
return True
return False
return sum(delete(j) for j in range(m))
# One liner using zip (Top Voted), O(n*m) time and space
def minDeletionSize(self, A):
return sum(any(a > b for a, b in zip(col, col[1:])) for col in zip(*A))
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | sols/944.py | Paul11100/LeetCode |
import os
import io
from stanza_wrapper import parse
__here__ = os.path.dirname(os.path.realpath(__file__))
txt = b'''Dit is een tekst. Er zijn twee zinnen.'''
def assert_equal(val1, val2):
assert val1 == val2
def test_tokenize():
my_obj = parse(io.BytesIO(txt))
token_list = list(my_obj.get_tokens())
assert_equal(len(token_list), 10)
last_token = token_list[-1]
assert_equal(last_token.get_offset(), str(len(txt)-1))
# Check linguistic processor layers
layers = list(my_obj.get_linguisticProcessors())
assert_equal(len(layers), 3)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | tests/test_tokenizer.py | Filter-Bubble/stanza_wrapper |
#!/usr/bin/env python
import logging
import os
from pprint import pformat
from .cleaner import delete_tweets, print_tweet_urls, print_user_info
from .util import parse_options, write_credentials_template
def main():
args = parse_options()
_set_log_config(args=args)
logging.debug('args:' + os.linesep + pformat(vars(args)))
if args.subcommand == 'init':
write_credentials_template()
elif args.subcommand == 'user':
print_user_info(zip_path=args.zip_path)
elif args.subcommand == 'urls':
print_tweet_urls(zip_path=args.zip_path, pattern=args.pattern)
elif args.subcommand == 'delete':
delete_tweets(
zip_path=args.zip_path, cred_yml_path=args.cred_yml_path,
ignore_errors=args.ignore_errors, pattern=args.pattern
)
def _set_log_config(args):
if args.debug:
lv = logging.DEBUG
elif args.info:
lv = logging.INFO
else:
lv = logging.WARNING
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', level=lv
)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | deltw/cli.py | dceoy/extract-twitter-id |
import cv2
class SimplePreprocessor:
def __init__(self, width, height, inter=cv2.INTER_AREA):
# store the target image width, height, and interpolation
# method used when resizing
self.width = width
self.height = height
self.inter = inter
def preprocess(self, image):
# resize the image to a fixed size, ignoring the aspect
# ratio
return cv2.resize(image, (self.width, self.height), interpolation=self.inter) | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | 22. Neural Networks from Scratch/preprocessing/simplepreprocessor.py | Arjitg450/Machine-Learning-Case-Studies |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 28 01:15:31 2018
@author: Andres
"""
import pandas as pd
url = 'http://catalogo.datosabiertos.gob.ec/api/action/datastore_search?resource_id=8513f446-1c94-426e-8592-d4cbdd295f33&limit=1000'
datos = pd.read_json(url, typ='frame')
datos =pd.DataFrame.from_dict(datos["result"]["records"]).set_index("_id")
#datos[ datos['Sexo'] != 'MASCULINO' ]
datos.loc[289,'Canton']
class Homicidio:
def __init__(self,Canton,Circuito,Distrito,Edad,Estado_Civil,Fecha_infraccion,Hora_infraccion,Nacionalidad,Provincia,Sexo,Zona,tipo_muert_matriz):
self.Canton=Canton
self.Circuito=Circuito
self.Distrito=Distrito
self.Edad = Edad
self.Estado_civil=Estado_Civil
self.Fecha_infraccion=Fecha_infraccion
self.Hora_infraccion=Hora_infraccion
self.Nacionalidad=Nacionalidad
self.Provincia = Provincia
self.Sexo = Sexo
self.Zona = Zona
self.tipo = tipo_muert_matriz
def get_list(self):
return [self.Canton,self.Circuito,self.Distrito,self.Edad,self.Estado_civil,self.Fecha_infraccion,
self.Hora_infraccion,self.Nacionalidad,self.Provincia,self.Sexo,self.Zona,self.tipo]
def insertar(Canton,Circuito,Distrito,Edad,Estado_Civil,Fecha_infraccion,Hora_infraccion,Nacionalidad,Provincia,Sexo,Zona,tipo_muert_matriz):
global datos
_id = datos.index+1
homicidio=Homicidio(Canton,Circuito,Distrito,Edad,Estado_Civil,Fecha_infraccion,Hora_infraccion,Nacionalidad,Provincia,Sexo,Zona,tipo_muert_matriz)
s = homicidio.get_list()
serie = pd.Series(s,index=datos.columns)
datos = datos.append(serie,ignore_index=True) # adding a row
insertar("MIAMI", "CI","W","211","SOLTERO", "2019-04-05T00:00:00","2015-12-02T23:00:00",
"EEUU","FLORIDA","MASCULINO", "ZONA 80","Asesinatos"
)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | 03_spyder/proyecto_spyder.py | 2018-B-GR1-Python/Velasco-Yepez-Andres-David |
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import ultracart
from ultracart.rest import ApiException
from ultracart.models.item_option import ItemOption
class TestItemOption(unittest.TestCase):
""" ItemOption unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testItemOption(self):
"""
Test ItemOption
"""
# FIXME: construct object with mandatory attributes with example values
#model = ultracart.models.item_option.ItemOption()
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | test/test_item_option.py | gstingy/uc_python_api |
#
# Copyright Soramitsu Co., Ltd. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
import irohalib
import commons
import primitive_pb2
admin = commons.new_user('admin@first')
alice = commons.new_user('alice@second')
iroha = irohalib.Iroha(admin['id'])
@commons.hex
def genesis_tx():
test_permissions = [primitive_pb2.can_get_all_acc_txs]
genesis_commands = commons.genesis_block(admin, alice, test_permissions, multidomain=True)
tx = iroha.transaction(genesis_commands)
irohalib.IrohaCrypto.sign_transaction(tx, admin['key'])
return tx
@commons.hex
def account_transactions_query():
query = iroha.query('GetAccountTransactions', creator_account=alice['id'], account_id=admin['id'], page_size=10)
irohalib.IrohaCrypto.sign_query(query, alice['key'])
return query
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | example/python/permissions/can_get_all_acc_txs.py | artyom-yurin/iroha-archive |
import os
from unittest import mock
def delete_file(filename):
while os.path.exists(filename):
os.unlink(filename)
@mock.patch('os.path.exists', side_effect=(True, False, False))
@mock.patch('os.unlink')
def test_delete_file(mock_exists, mock_unlink):
# first try:
delete_file('some non-existing file')
# second try:
delete_file('some non-existing file')
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | books/masteringPython/cp10/source_test/mock_delete.py | Bingwen-Hu/hackaway |
from typing import Any, Callable, Dict, Iterable, List
import spotipy
from spotipy.oauth2 import SpotifyOAuth
from . import constants
from .types import Track
def get_saved_tracks() -> List[Track]:
"""Gets list of saved tracks using Spotify API.
Returns
-------
List[Track]
list of users saved tracks
"""
spotify = _get_spotify_session()
return [
Track.from_spotify_track_dict(track_dict["track"])
for track_dict in _paginated_iter(spotify.current_user_saved_tracks)
]
def _paginated_iter(iter_callable: Callable[[int, int], Dict[str, Any]], limit: int = 20) -> Iterable[Dict[str, Any]]:
"""Paginated iterator for spotiy paginated return.
Parameters
----------
iter_callable : Callable[[int, int], Dict[str, Any]]
paginated iterable callable to iterate over
limit : int
limit of results to send in single API call, by default 20
Yields
-------
Generator[Dict[str, Any], None, None]
yields items for iterable callable
"""
# init offset and loop condition
offset: int = 0
is_next: bool = True
while is_next:
paginated_dict = iter_callable(limit=limit, offset=offset)
for item in paginated_dict["items"]:
yield item
is_next = paginated_dict["next"]
offset += limit
def _get_spotify_session(scope: str = "user-library-read") -> spotipy.Spotify:
"""Gets spotify session.
Parameters
----------
scope : str
scope of session, by default "user-library-read"
Returns
-------
spotipy.Spotify
spotify session
"""
return spotipy.Spotify(auth_manager=SpotifyOAuth(
client_id=constants.SPOTIPY_CLIENT_ID,
client_secret=constants.SPOTIPY_CLIENT_SECRET,
redirect_uri="http://localhost:8080",
scope=scope,
))
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 2... | 3 | spotify_2_apple_music/spotify.py | ryansingman/spotify-2-apple-music |
"""change SystemParameter key to name
Revision ID: cae154d4dcb4
Revises: 17e220f9fafb
Create Date: 2021-09-24 13:42:12.722846
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cae154d4dcb4'
down_revision = '17e220f9fafb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('system_parameter', sa.Column('name', sa.String(), nullable=True))
op.drop_column('system_parameter', 'key')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('system_parameter', sa.Column('key', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_column('system_parameter', 'name')
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answe... | 3 | apis/alembic/versions/cae154d4dcb4_change_systemparameter_key_to_name.py | iii-org/devops-system |
from .._py2 import *
class CacheError(Exception):
'''Base class for exceptions related to the cache'''
pass
class PageNotCachedError(CacheError):
'''Exception raised when a non-existent page is requested'''
def __init__(self):
super().__init__('This page has not been cached yet.')
class PageClearedError(CacheError):
'''Exception raised when a deleted page is requested'''
def __init__(self):
super().__init__('This page has been cleared.')
class CacheMemoryError(CacheError):
'''Exception raised when max data already stored in cache'''
def __init__(self):
super().__init__(
'Cannot save any more pages, call /cache/clear or' +
'/cache/clear/{page_name}')
class CacheOverwriteError(CacheError):
'''Exception raised when attempted overwriting of page'''
def __init__(self):
super().__init__(
'Cannot overwrite page, choose a different name')
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": true
},
{
... | 3 | mixnmatchttp/cache/exc.py | aayla-secura/simple_CORS_https_server |
from bs4 import BeautifulSoup
from ..occurrences.occurrences import Occurrences
from ..occurrences.occurrence_interface import OccurrenceInterface
class Recommendation36:
"""
Recomendação 36: Nome da recomendação
- Fornecer controle para áudio
"""
def __init__(self, sourcecode):
self.rec = 36
self.sourcecode = sourcecode
self.occurrences = Occurrences()
def avaliacao(self):
soap = BeautifulSoup(self.sourcecode, 'html.parser')
audios = soap.find_all('audio')
for audios in audios:
self.occurrences.add(OccurrenceInterface(self.rec, 2, audios, 2))
return self.occurrences.list_of_occurrences
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | crawling/recommendations/recommendation36.py | carlosbognar/TCC-WCGA-1 |
'''
LeetCode LinkedList Q.876 Middle of the Linked List
Recusion and Slow/Fast Pointer Solution
'''
def middleNode(self, head: ListNode) -> ListNode:
def rec(slow, fast):
if not fast:
return slow
elif not fast.next:
return slow
return rec(slow.next, fast.next.next)
return rec(head, head)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | Q876_Middle_Linked_List.py | amitjoshi9627/LeetCode-Solutions |
from plenum.common.messages.node_messages import Nomination, Primary
from plenum.server.replica import Replica
from plenum.test.test_node import TestNode
def checkNomination(node: TestNode, nomineeName: str):
matches = [replica.name for instId, replica in enumerate(node.elector.replicas) if
node.elector.didReplicaNominate(instId) is True and
replica.name in node.elector.nominations[instId] and
node.elector.nominations[instId][replica.name][0] ==
Replica.generateName(nomineeName, instId)]
assert len(matches) > 0
return matches[0]
# TODO Think of a better name for this function
def getSelfNominationByNode(node: TestNode) -> int:
"""
This function returns the index of the protocol instance for which it nominated itself
@param node: the node
@return: the protocol instance index
"""
for instId, replica in enumerate(node.elector.replicas):
name = Replica.generateName(node.name, instId)
if node.elector.nominations.get(instId, {}).get(name, [None, ])[
0] == name:
return instId
def nominationByNode(name: str, byNode: TestNode, instId: int):
return Nomination(name, instId, byNode.viewNo,
byNode.replicas[instId].lastOrderedPPSeqNo[1])
def primaryByNode(name: str, byNode: TestNode, instId: int):
return Primary(name, instId, byNode.viewNo,
byNode.replicas[instId].lastOrderedPPSeqNo[1])
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | plenum/test/primary_election/helpers.py | spivachuk/plenum |
import pygame
import random
import math
import enemies.enemy
class ZigZagEnemy(enemies.enemy.Enemy):
def __init__(self, game):
super().__init__(game)
self.timer = self.game.getRepeateTimer()
self.timer.duration = 3000
self.timer.action = self.changeAngle
def changeAngle(self):
if random.randint(0, 1) == 0:
self.angle += math.pi / 2
else:
self.angle -= math.pi / 2
def update(self, elapsed, gameScene):
self.y += math.sin(self.angle) * self.vspeed * elapsed
self.x += math.cos(self.angle) * self.hspeed * elapsed
self.rect.x = self.x
self.rect.y = self.y
if not self.active and not self.inGame():
pass
if not self.active and self.inGame():
self.active = True
if self.active and self.inGame():
pass
if self.active and not self.inGame():
self.kill()
self.timer.cancel()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | PyShooter/enemies/zigzagenemy.py | ildave/PyShooter |
load("//:deps.bzl",
"bazel_gazelle",
"bazel_skylib",
"org_pubref_rules_node",
"build_bazel_rules_nodejs",
"build_bazel_rules_typescript",
"com_google_protobuf",
"io_bazel_rules_closure",
"io_bazel_rules_go",
"io_bazel_rules_webtesting",
"ts_protoc_gen",
)
def ts_proto_compile(**kwargs):
bazel_gazelle(**kwargs)
bazel_skylib(**kwargs)
org_pubref_rules_node(**kwargs)
build_bazel_rules_nodejs(**kwargs)
build_bazel_rules_typescript(**kwargs)
com_google_protobuf(**kwargs)
io_bazel_rules_closure(**kwargs)
io_bazel_rules_go(**kwargs)
io_bazel_rules_go(**kwargs)
io_bazel_rules_webtesting(**kwargs)
ts_protoc_gen(**kwargs)
def ts_grpc_compile(**kwargs):
ts_proto_compile(**kwargs)
def ts_proto_library(**kwargs):
ts_proto_compile(**kwargs)
def ts_grpc_library(**kwargs):
ts_grpc_compile(**kwargs)
ts_proto_library(**kwargs)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | github.com/improbable-eng/ts-protoc-gen/deps.bzl | mirandacong/rules_proto |
from ignition.dsl.flame.tensors.tensor_names import (convert_name,
add_lower_ind, add_upper_ind, set_lower_ind, set_upper_ind, to_latex)
def test_convert_name():
assert(convert_name("A", 2) == "A")
assert(convert_name("A", 1) == "a")
assert(convert_name("A", 0) == "alpha")
assert(convert_name("a", 2) == "A")
assert(convert_name("a", 1) == "a")
assert(convert_name("a", 0) == "alpha")
assert(convert_name("alpha", 2) == "A")
assert(convert_name("alpha", 1) == "a")
assert(convert_name("alpha", 0) == "alpha")
assert(convert_name("A_TL", 2) == "A_TL")
assert(convert_name("a^T", 2) == "A^T")
assert(convert_name("alpha_01", 2) == "A_01")
assert(convert_name("foo_bar", 1) == "foo_bar")
def test_inds():
assert(set_upper_ind("A", "TL") == "A^TL")
assert(set_upper_ind("A_01", "TL") == "A_01^TL")
assert(set_upper_ind("A_TL^T", "TL") == "A_TL^TL")
assert(set_lower_ind("A", "TL") == "A_TL")
assert(set_lower_ind("A_01", "TL") == "A_TL")
assert(set_lower_ind("A_TL^T", "TL") == "A_TL^T")
def test_to_latex():
assert(to_latex("A_01^T") == "A_{01}^T")
assert(to_latex("alpha") == "\\alpha")
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | ignition/dsl/flame/tensors/tests/test_tensor_names.py | IgnitionProject/ignition |
# -*- encoding: utf-8 -*-
from django.core.management import BaseCommand
from django.db.models import CharField, TextField, Q
from django.apps import apps
from bpp.models import Sumy, Typ_KBN, Charakter_Formalny
from bpp.util import usun_nieuzywany_typ_charakter
class Command(BaseCommand):
help = "Usuwa nieuzywane charaktery formalne i typy KBN (stosuj po imporcie z DBF)"
def add_arguments(self, parser):
parser.add_argument("--dry-run", action="store_true")
def handle(self, dry_run=False, *args, **options):
usun_nieuzywany_typ_charakter(Typ_KBN, "typ_kbn", dry_run)
usun_nieuzywany_typ_charakter(Charakter_Formalny, "charakter_formalny", dry_run)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answe... | 3 | src/bpp/management/commands/usun_nieuzywane_charaktery_kbny.py | iplweb/django-bpp |
import json
import logging
import time
from six.moves.urllib.request import urlopen
SPACEL_URL = 'https://ami.pbl.io/spacel/%s.json'
logger = logging.getLogger('spacel.aws.ami')
class AmiFinder(object):
def __init__(self, channel=None, cache_bust=None):
self._channel = channel or 'stable'
self.cache_bust = cache_bust
self._cache = {}
def spacel_ami(self, region):
ami = self._ami(SPACEL_URL, region)
logger.debug('Found SpaceL AMI %s in %s', ami, region)
return ami
def _ami(self, url, region):
url %= self._channel
if not self.cache_bust:
manifest = self._cache.get(url)
if manifest:
return manifest.get(region)
logger.debug('AMI manifest %s not cached, fetching...', url)
else:
url += '?cache=%s' % time.time()
opened = urlopen(url)
manifest = json.loads(opened.read().decode('utf-8'))
if not self.cache_bust:
self._cache[url] = manifest
return manifest.get(region)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | src/spacel/aws/ami.py | mycloudandme/spacel-provision |
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from turbogears.decorator import weak_signature_decorator
import xhtml2pdf.pisa as pisa
from six import StringIO
import cherrypy
def to_pdf(filename=None, content_type="application/pdf"):
def entangle(func):
def decorated(func, *args, **kw):
output = func(*args, **kw)
dst = StringIO.StringIO()
result = pisa.CreatePDF(
StringIO.StringIO(output),
dst
)
if not result.err:
cherrypy.response.headers["Content-Type"] = content_type
if filename:
cherrypy.response.headers["Content-Disposition"] = "attachment; filename=" + filename
output = dst.getvalue()
return output
return decorated
return weak_signature_decorator(entangle)
topdf = to_pdf
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside... | 3 | xhtml2pdf/turbogears.py | trib3/xhtml2pdf |
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules import conv
from torch.nn.modules.utils import _single
from ..functions.max_sv import max_singular_value
class SNConv1d(conv._ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
super(SNConv1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _single(0), groups, bias)
self.register_buffer('u', torch.Tensor(1, out_channels).normal_())
@property
def W_(self):
w_mat = self.weight.view(self.weight.size(0), -1)
sigma, _u = max_singular_value(w_mat, self.u)
self.u.copy_(_u)
return self.weight / sigma
def forward(self, input):
return F.conv1d(input, self.W_, self.bias, self.stride, self.padding, self.dilation, self.groups)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | src/snlayers/snconv1d.py | Zihang97/PAGAN |
from . import get_connector, get_engine
import pandas as pd
def get_dataframe(table_name, limit=None):
# limit query
limit_query=""
if limit:
limit_query="limit {}".format(limit)
# create query
query = "SELECT * FROM {} {}".format(table_name, limit_query)
# get dataframe from sql query
df = pd.read_sql(query, con=get_connector())
return df
def dataframe_to_table(df, table_name):
df.to_sql(table_name,
con=get_engine(),
index=False,
if_exists='replace')
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | db/pandas.py | NovaSBE-DSKC/retention-evaluation |
import os
from swaglyrics.cli import lyrics
from swaglyrics import SameSongPlaying
from flask import Flask, render_template
from SwSpotify import spotify, SpotifyNotRunning
app = Flask(__name__, template_folder=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'))
# use relative path of the template folder
song = None
artist = None
@app.route('/')
def tab():
# format lyrics for the browser tab template
global song, artist
try:
song, artist = spotify.current()
current_lyrics = lyrics(song, artist)
except SpotifyNotRunning:
current_lyrics = 'Nothing playing at the moment.'
current_lyrics = current_lyrics.split('\n') # break lyrics line by line
return render_template('lyrics.html', lyrics=current_lyrics, song=song, artist=artist)
@app.route('/songChanged', methods=['GET'])
def song_changed():
# to refresh lyrics when song changed
global song, artist
try:
if spotify.current() == (song, artist):
raise SameSongPlaying
else:
return 'yes'
except (SpotifyNotRunning, SameSongPlaying):
return 'no'
if __name__ == '__main__':
app.run()
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | swaglyrics/tab.py | Tantan4321/SwagLyrics-For-Spotify |
import logging
from .Container import Container
class SyslogUdpClientContainer(Container):
def __init__(self, name, vols, network, image_store, command=None):
super().__init__(name, 'syslog-udp-client', vols, network, image_store, command)
def get_startup_finished_log_entry(self):
return "Syslog UDP client started"
def deploy(self):
if not self.set_deployed():
return
logging.info('Creating and running a Syslog udp client docker container...')
self.client.containers.run(
"ubuntu:20.04",
detach=True,
name=self.name,
network=self.network.name,
entrypoint='/bin/bash -c "echo Syslog UDP client started; while true; do logger --udp -n minifi-cpp-flow -P 514 sample_log; sleep 1; done"')
logging.info('Added container \'%s\'', self.name)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than clas... | 3 | docker/test/integration/minifi/core/SyslogUdpClientContainer.py | nandorsoma/nifi-minifi-cpp |
#!/usr/bin/ctx python
# -*- coding: UTF-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pytest
import time
import redis
from pprint import pprint as pp
from sensor.constants import (TEST_CAHNNEL_ID, CONFIG)
class PublishMock(object):
def __init__(self):
self.q = redis.StrictRedis(**CONFIG)
# self._p = self.q.pubsub()
# self._p.subscribe(TEST_CAHNNEL_ID)
def send_fatal(self):
self.q.publish(TEST_CAHNNEL_ID, {'type': 'EXCEPTION',
'exception_type': 'FATAL',
'stdout': 'This is the stdout message',
'stderr': 'and this is the stderr message',
'rc': 444})
def send_warning(self):
self.q.publish(TEST_CAHNNEL_ID, {'type': 'EXCEPTION',
'exception_type': 'OTHER',
'stdout': 'This is the stdout message',
'stderr': 'and this is the stderr message',
'rc': 444})
pub = PublishMock()
@pytest.mark.monitor
def test_ctx(context, track):
print()
model, _ = context
pp(model)
l1 = len(model)
print('Adding test group')
model['test'] = []
print('ctx.test >>', model.test)
print ('type(ctx.test) >>', type(model.test))
assert len(model) - 1 == l1, 'Length was not increased'
print('Deleting test group')
del model['test']
l1 = len(model)
pp(model)
assert len(model) == l1, 'Length was not increased'
track.register(model.all.address, ['netwok', 'filesystem'])
pub.send_warning()
time.sleep(2)
# assert False, 'aaaaaaaaaajasd,amksndlakisdja,msdakmsndakjsd,ajksd,amn'
track.unregister(model.all.address, ['netwok', 'filesystem'])
pub.send_warning()
time.sleep(2)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | tests/test_monitor.py | autostack/pytest-sensor |
from suds.client import Client
from suds import WebFault
from model.project import Project
class SoapHelper:
def __init__(self, app):
self.app = app
def can_login(self, username, password):
client = Client("http://localhost:8080/mantisbt-1.2.20/api/soap/mantisconnect.php?wsdl")
try:
client.service.mc_login(username, password)
return True
except WebFault:
return False
def get_project_list(self, username, password):
project_list = []
client = Client("http://localhost:8080/mantisbt-1.2.20/api/soap/mantisconnect.php?wsdl")
projects = client.service.mc_projects_get_user_accessible(username, password)
for i in range(len(projects)):
name = projects[i].name
description = projects[i].description
project_list.append(Project(name=name, description=description))
return project_list
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | fixture/soap.py | nicholas-y/python_mantis |
__version__ = "0.4.4"
from typing import Dict
_OPT_DEFAULTS: Dict[str, bool] = dict(
specialized_code=True,
optimize_einsums=True,
jit_script_fx=True,
)
def set_optimization_defaults(**kwargs) -> None:
r"""Globally set the default optimization settings.
Parameters
----------
**kwargs
Keyword arguments to set the default optimization settings.
"""
for k, v in kwargs.items():
if k not in _OPT_DEFAULTS:
raise ValueError(f"Unknown optimization option: {k}")
_OPT_DEFAULTS[k] = v
def get_optimization_defaults() -> Dict[str, bool]:
r"""Get the global default optimization settings."""
return dict(_OPT_DEFAULTS)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | e3nn/__init__.py | claycurry34/e3nn |
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
Unit test file for netaddr test plugin: mac
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import unittest
from ansible_collections.ansible.utils.plugins.test.mac import _mac
class TestMac(unittest.TestCase):
def setUp(self):
pass
def test_invalid_data(self):
"""Check passing invalid argspec"""
# missing argument
with self.assertRaises(TypeError) as error:
_mac()
self.assertIn("argument", str(error.exception))
def test_valid_data(self):
"""Check passing valid data as per criteria"""
result = _mac(mac="02:16:3e:e4:16:f3")
self.assertEqual(result, True)
result = _mac(mac="02-16-3e-e4-16-f3")
self.assertEqual(result, True)
result = _mac(mac="0216.3ee4.16f3")
self.assertEqual(result, True)
result = _mac(mac="02163ee416f3")
self.assertEqual(result, True)
result = _mac(mac="string")
self.assertEqual(result, False)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | venv/lib/python3.6/site-packages/ansible_collections/ansible/utils/tests/unit/plugins/test/test_mac.py | usegalaxy-no/usegalaxy |
"""Checks for obsolete messages in PO files.
Returns an error code if a PO file has an obsolete message.
"""
import argparse
import sys
def check_obsolete_messages(filenames, quiet=False):
"""Warns about all obsolete messages found in a set of PO files.
Parameters
----------
filenames : list
Set of file names to check.
quiet : bool, optional
Enabled, don't print output to stderr when an obsolete message is found.
Returns
-------
int: 0 if no obsolete messages found, 1 otherwise.
"""
exitcode = 0
for filename in filenames:
with open(filename) as f:
content_lines = f.readlines()
_inside_obsolete_message = False
for i, line in enumerate(content_lines):
if not _inside_obsolete_message and line.startswith("#~ "):
_inside_obsolete_message = True
exitcode = 1
if not quiet:
sys.stderr.write(f"Found obsolete message at {filename}:{i + 1}\n")
elif _inside_obsolete_message and not line.startswith("#~ "):
_inside_obsolete_message = False
return exitcode
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames", nargs="*", help="Filenames to check for obsolete messages"
)
parser.add_argument("-q", "--quiet", action="store_true", help="Supress output")
args = parser.parse_args()
return check_obsolete_messages(args.filenames, quiet=args.quiet)
if __name__ == "__main__":
exit(main())
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | hooks/obsolete_messages.py | mondeja/pre-commit-po-hooks |
import tflite_runtime.interpreter as tflite
from PIL import Image
from io import BytesIO
from urllib import request
import numpy as np
#import model
interpreter = tflite.Interpreter(model_path='cats-dogs-v2.tflite')
interpreter.allocate_tensors()
# get input and output index
input_index = interpreter.get_input_details()[0]['index']
output_index = interpreter.get_output_details()[0]['index']
def download_image(url):
with request.urlopen(url) as resp:
buffer = resp.read()
stream = BytesIO(buffer)
img = Image.open(stream)
return img
def prepare_image(img, target_size):
if img.mode != 'RGB':
img = img.convert('RGB')
img = img.resize(target_size, Image.NEAREST)
return img
# url = 'https://upload.wikimedia.org/wikipedia/commons/1/18/Vombatus_ursinus_-Maria_Island_National_Park.jpg'
def preprocessor(img):
x = np.array(img, dtype='float32') / 255
return np.array([x])
def predict(url):
img = download_image(url)
img = prepare_image(img, (150, 150))
X = preprocessor(img)
interpreter.set_tensor(input_index, X)
interpreter.invoke()
preds = interpreter.get_tensor(output_index)
float_predictions = preds[0].tolist()
label = ['cat' if preds[0] < 0.5 else 'dog']
return dict(zip(label, float_predictions))
def lambda_handler(event, context):
url = event['url']
result = predict(url)
return result
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | 9_Serverless/lambda_function.py | snikhil17/mlzoomcamp |
#!/usr/bin/env python3
import os
import logging
import json
import argparse
from gen import engine_pb2
from gen import issue_pb2
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
__source_dir = "/dracon/source"
def parse_flags(args: object) -> object:
"""
Parses the input flags for a producer
"""
parser.add_argument('-in', help='tool results file')
parser.add_argument('-out', help='producer output file')
return parser.parse_args(args)
def parse_in_file_json(args: object) -> dict:
"""
A generic method to return a tool's JSON results file as a dict
"""
results_file = vars(args)['in']
with open(results_file) as f:
data = f.read()
return json.loads(data)
def write_dracon_out(args: object, tool_name: str, issues: [issue_pb2.Issue]):
"""
A method to write the resulting protobuf to the output file
"""
out_file = vars(args)['out']
source = __get_meta_source()
clean_issues = []
for iss in issues:
iss.description = iss.description.replace(__source_dir, ".")
iss.title = iss.title.replace(__source_dir, ".")
iss.target = iss.target.replace(__source_dir, ".")
iss.source = source
clean_issues.append(iss)
ltr = engine_pb2.LaunchToolResponse(
tool_name=tool_name,
issues=issues
)
with open(out_file, 'ab') as f:
f.write(ltr.SerializeToString())
__meta_src_file = ".source.dracon"
def __get_meta_source() -> str:
"""
This obtains the source address in the __meta_src_file from the source workspace
"""
meta_src_path = os.path.join(__source_dir, __meta_src_file)
if os.path.exists(meta_src_path):
with open(meta_src_path) as f:
return f.read().strip()
return "unknown"
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding... | 3 | producers/producer.py | ValntinDragan/dracon-1 |
__all__ = ("group_attempts", "fails_filter", "reduce_to_failures",)
def group_attempts(sequence, filter_func=None):
if filter_func is None:
filter_func = lambda x:True
last, l = None, []
for x in sequence:
if isinstance(x, tuple) and x[0] == 'inspecting':
if l:
yield last, l
last, l = x[1], []
elif last is not None:
if filter_func(x):
# inline ignored frames
if getattr(x, 'ignored', False):
l.extend(y for y in x.events if filter_func(y))
else:
l.append(x)
if l:
yield last, l
def fails_filter(x):
if not isinstance(x, tuple):
return not x.succeeded
if x[0] == "viable":
return not x[1]
return x[0] != "inspecting"
def reduce_to_failures(frame):
if frame.succeeded:
return []
l = [frame]
for pkg, nodes in group_attempts(frame.events, fails_filter):
l2 = []
for x in nodes:
if not isinstance(x, tuple):
l2.append(reduce_to_failures(x))
else:
l2.append(x)
l.append((pkg, l2))
return l
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | src/pkgcore/resolver/util.py | thesamesam/pkgcore |
from django import forms
from sme_uniforme_apps.proponentes.models import Anexo
class AnexoForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AnexoForm, self).__init__(*args, **kwargs)
self.fields['tipo_documento'].required = True
class Meta:
model = Anexo
fields = '__all__' | [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{... | 3 | sme_uniforme_apps/proponentes/models/forms.py | prefeiturasp/SME-PortalUniforme-BackEnd |
import os
from unittest import TestCase
class TestCLI(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_command(self):
pass
def test_index_of(self):
pass
def test_contains(self):
pass
def test_get_or_die(self):
pass
def test_get_or_default(self):
pass
def get_existing_filename_or_die(self):
pass
class TestApplication(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_set_prompt(self):
pass
def test_get_prompt(self):
pass
def test_get_command_from_user_input(self):
pass
def test_main(self):
pass
def test_on_q(self):
pass
def test_on_quit(self):
pass
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
... | 3 | tests/test_cli.py | simonski/pycommon |
from .BasicTypeAttr import BasicTypeAttr
class StringAttr(BasicTypeAttr):
def __init__(self, attr):
BasicTypeAttr.__init__(self, attr)
if self.get('Max') is not None:
self['Max'] = int(self['Max'])
if self.get('Min') is not None:
self['Min'] = int(self['Min'])
def printWarnings(self, out):
if self.get('Max') in (None, '') and not self.get('SQLType'):
out.write('warning: model %s: class %s: attr %s: max string length unspecified\n' % (
self.model().name(), self.klass().name(), self.name()))
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
... | 3 | webware/MiddleKit/Core/StringAttr.py | PeaceWorksTechnologySolutions/w4py3-middlekit |
import unittest
import tests.io.generate_pazy_udpout as gp
import os
import shutil
class TestPazyCoupledStatic(unittest.TestCase):
"""
Test Pazy wing static coupled case and compare against a benchmark result.
As of the time of writing, benchmark result has not been verified but it
serves as a backward compatibility check for code improvements.
"""
route_test_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
def test_dynamic_aoa(self):
u_inf = 50
alpha = 0
case_name = 'pazy_uinf{:04g}_alpha{:04g}'.format(u_inf * 10, alpha * 10)
M = 4
N = 16
Msf = 1
cases_folder = self.route_test_dir + '/cases/'
output_folder = self.route_test_dir + '/cases/'
# run case
gp.generate_pazy_udp(u_inf, case_name, output_folder, cases_folder,
alpha=alpha,
M=M,
N=N,
Msf=Msf,
cd=self.route_test_dir)
def tearDown(self):
cases_folder = self.route_test_dir + '/cases/'
if os.path.isdir(cases_folder):
shutil.rmtree(cases_folder)
if __name__ == '__main__':
unittest.main() | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | tests/io/test_pazy_udpout.py | ACea15/sharpy |
A = 'A'
B = 'B'
Environment = {
A: 'Dirty',
B: 'Dirty',
'Current': A
}
def REFLEX_VACUUM_AGENT(loc_st): # Determine action
if loc_st[1] == 'Dirty':
return 'Suck'
if loc_st[0] == A:
return 'Right'
if loc_st[0] == B:
return 'Left'
def Sensors(): # Sense Environment
location = Environment['Current']
return (location, Environment[location])
def Actuators(action): # Modify Environment
location = Environment['Current']
if action == 'Suck':
Environment[location] = 'Clean'
elif action == 'Right' and location == A:
Environment['Current'] = B
elif action == 'Left' and location == B:
Environment['Current'] = A
def run(n, make_agent): # run the agent through n steps
print(' Current New')
print('location status action location status')
for i in range(1, n):
(location, status) = Sensors() # Sense Environment before action
print("{:12s}{:8s}".format(location, status), end='')
action = make_agent(Sensors())
Actuators(action)
(location, status) = Sensors() # Sense Environment after action
print("{:8s}{:12s}{:8s}".format(action, location, status))
if __name__ == '__main__':
run(10, REFLEX_VACUUM_AGENT)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | Lecture_3_Agents/Exercise1/Exercises/reflex_vacuum_agent.py | aleksander-GD/AI-F20 |
import unittest
import os
from datetime import datetime
from util import Singleton
from .. import GithubService, GithubRemote, GithubRealRemote
@Singleton
class GithubMockRemote(GithubRemote):
def get_notifications(self):
return [{"type": "Issue", "title": "This is an issue"}]
def connect(self, key):
pass
class TestGithubService(unittest.TestCase):
# Don't want to exceed the limit... mock on DONOTMOCK
if "DONOTMOCK_GITHUB" in os.environ:
github_service = GithubService.instance(GithubRealRemote.instance())
else:
print("Mocking remotes...")
github_service = GithubService.instance(GithubMockRemote.instance())
def test_get_notifications_returns_valid_notification(self):
self.github_service.connect()
self.assertTrue(
type(self.github_service.get_notifications()[0]["title"]) is str
)
self.assertTrue(type(self.github_service.get_notifications()[0]["type"]) is str)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
... | 3 | services/github/test/test_service.py | Ovakefali13/buerro |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, omar jaber and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
class OKRPerformanceProfile(Document):
def validate(self):
self.validate_kr_time_frame_total()
def validate_kr_time_frame_total(self):
if self.objectives and self.key_results:
for objective in self.objectives:
total_days_to_achieve_target = 0
for key_result in self.key_results:
if key_result.objective == objective.objective:
total_days_to_achieve_target += key_result.target_to_be_achieved_by
if total_days_to_achieve_target > objective.time_frame:
frappe.msgprint(_("""Total of Achieve Target by for the objective <b>{0}</b> is greater than the Time Frame""").format(objective.objective))
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | one_fm/one_fm/doctype/okr_performance_profile/okr_performance_profile.py | askmetoo/One-FM |
from DAL.DAL_XRayPatient import DAL_XRayPatient
class BUS_XRayPatient():
def __init__(self):
self.dalXRayPatient = DAL_XRayPatient()
def firstLoadLinkXRay(self, IDPatient):
return self.dalXRayPatient.selectLinkXRayViaIDPatient(IDPatient=IDPatient)
def loadLinkXRay(self, IDPatient, XRayType):
return self.dalXRayPatient.selectLinkXRay(IDPatient=IDPatient, XRayType=XRayType) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | eHealth_Version1.0_01_13_2022_12_47_pm_Release/BUS/BUS_XRayPatient.py | kyvipro113/Graduation_Thesis |
from django.contrib.sitemaps import Sitemap
from Blog.models import Post
class BlogSitemap(Sitemap):
changefreq = "weekly"
priority = 0.5
def items(self):
return Post.objects.filter(status=True)
def lastmod(self, obj):
return obj.published_date
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | Blog/sitemaps.py | myselfajp/MyFirstPage |
from django.contrib.auth.models import User
from rest_framework.test import APITestCase
class FVHAPITestCase(APITestCase):
def assert_dict_contains(self, superset, subset, path=''):
for key, expected in subset.items():
full_path = path + key
received = superset.get(key, None)
if isinstance(expected, dict) and isinstance(received, dict):
self.assert_dict_contains(superset[key], expected, full_path + '.')
else:
assert received == expected, 'Value mismatch for key {}: {} != {}'.format(
full_path, expected, received
)
def create_user(self):
return User.objects.create(
username='courier', first_name='Coranne', last_name='Courier', email='coranne@couriersrus.com')
def create_and_login_user(self):
user = self.create_user()
self.client.force_login(user)
return user
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | django_server/feedback_map/rest/tests/base.py | ForumViriumHelsinki/FVHFeedbackMap |
# Go through a directory, gather all files with
# specific postfix and attempt converting them into
# .wav files with 16-bit, 16khz sampling rate.
import argparse
import os
import subprocess
from tqdm import tqdm
parser = argparse.ArgumentParser("Gather audio files from directory and turn them into .wav files")
parser.add_argument("directory", help="Directory to scan through")
parser.add_argument("output", help="Directory where converted samples should be placed to")
parser.add_argument("--postfix", type=str, default=".flac", help="Postfix of files to convert")
FFMPEG_PATH = "ffmpeg"
FFMPEG_TEMPLATE = FFMPEG_PATH + " -y -hide_banner -loglevel panic -i {input_file} -acodec pcm_s16le -ac 1 -ar 16000 {output_file}"
def convert_file(input_file, output_file):
subprocess.check_output(
FFMPEG_TEMPLATE.format(
input_file=input_file,
output_file=output_file
),
shell=True,
)
def main(args):
progress_bar = tqdm()
for dirpath, dirnames, filenames in os.walk(args.directory):
for filename in filenames:
if filename.endswith(args.postfix):
original_path = os.path.join(dirpath, filename)
target_path = original_path.replace(args.directory, args.output)
target_path = target_path.replace(args.postfix, ".wav")
# Make sure directories exist
os.makedirs(os.path.dirname(target_path), exist_ok=True)
convert_file(original_path, target_path)
progress_bar.update(1)
if __name__ == "__main__":
args = parser.parse_args()
main(args) | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | utils/audio_files_to_wav16.py | entn-at/asv-cm-reinforce |
"""
42. Storing files according to a custom storage system
``FileField`` and its variations can take a ``storage`` argument to specify how
and where files should be stored.
"""
import random
import tempfile
from django.db import models
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
temp_storage_location = tempfile.mkdtemp()
temp_storage = FileSystemStorage(location=temp_storage_location)
# Write out a file to be used as default content
temp_storage.save('tests/default.txt', ContentFile('default content'))
class Storage(models.Model):
def custom_upload_to(self, filename):
return 'foo'
def random_upload_to(self, filename):
# This returns a different result each time,
# to make sure it only gets called once.
return '%s/%s' % (random.randint(100, 999), filename)
normal = models.FileField(storage=temp_storage, upload_to='tests')
custom = models.FileField(storage=temp_storage, upload_to=custom_upload_to)
random = models.FileField(storage=temp_storage, upload_to=random_upload_to)
default = models.FileField(storage=temp_storage, upload_to='tests', default='tests/default.txt')
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | src/django-nonrel/tests/modeltests/files/models.py | adamjmcgrath/glancydesign |
#!/usr/bin/env python
# Copyright (C) 2015 Dmitry Rodionov
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
from os import path
from time import sleep
def sanitize_path(raw_path):
""" Replace spaces with backslashes+spaces """
return raw_path.replace(" ", "\\ ")
def path_for_script(script):
""" Return the full path for the given script """
return path.join(current_directory(), script)
def current_directory():
return path.dirname(path.abspath(__file__))
def filelines(source_file):
""" A generator that returns lines of the file.
If there're no new lines it waits until the file is updated.
"""
# Go to the end of the file
source_file.seek(0, 2)
while True:
line = source_file.readline()
if not line:
# Sleep briefly
sleep(0.1)
continue
yield line
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | analyzer/darwin/lib/dtrace/common.py | Yuanmessi/Bold-Falcon |
# coding: utf-8
"""
metal-api
API to manage and control plane resources like machines, switches, operating system images, machine sizes, networks, IP addresses and more # noqa: E501
OpenAPI spec version: v0.15.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import metal_python
from metal_python.models.v1_filesystem_layout_constraints import V1FilesystemLayoutConstraints # noqa: E501
from metal_python.rest import ApiException
class TestV1FilesystemLayoutConstraints(unittest.TestCase):
"""V1FilesystemLayoutConstraints unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1FilesystemLayoutConstraints(self):
"""Test V1FilesystemLayoutConstraints"""
# FIXME: construct object with mandatory attributes with example values
# model = metal_python.models.v1_filesystem_layout_constraints.V1FilesystemLayoutConstraints() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer":... | 3 | test/test_v1_filesystem_layout_constraints.py | metal-stack/metal-python |
"""Helper methods for components within Home Assistant."""
from __future__ import annotations
from collections.abc import Iterable, Sequence
import re
from typing import TYPE_CHECKING
from homeassistant.const import CONF_PLATFORM
if TYPE_CHECKING:
from .typing import ConfigType
def config_per_platform(
config: ConfigType, domain: str
) -> Iterable[tuple[str | None, ConfigType]]:
"""Break a component config into different platforms.
For example, will find 'switch', 'switch 2', 'switch 3', .. etc
Async friendly.
"""
for config_key in extract_domain_configs(config, domain):
if not (platform_config := config[config_key]):
continue
if not isinstance(platform_config, list):
platform_config = [platform_config]
item: ConfigType
platform: str | None
for item in platform_config:
try:
platform = item.get(CONF_PLATFORM)
except AttributeError:
platform = None
yield platform, item
def extract_domain_configs(config: ConfigType, domain: str) -> Sequence[str]:
"""Extract keys from config for given domain name.
Async friendly.
"""
pattern = re.compile(fr"^{domain}(| .+)$")
return [key for key in config.keys() if pattern.match(key)]
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"a... | 3 | homeassistant/helpers/__init__.py | PiotrMachowski/core |
from typing import List, Tuple
from ....source_shared.base import Base
from ....utilities.byte_io_mdl import ByteIO
class StudioTrivert(Base):
def __init__(self):
self.vertex_index = 0
self.normal_index = 0
self.uv = []
def read(self, reader: ByteIO):
self.vertex_index = reader.read_uint16()
self.normal_index = reader.read_uint16()
self.uv = [reader.read_uint16(), reader.read_uint16()]
class StudioMesh(Base):
def __init__(self):
self.triangle_count = 0
self.triangle_offset = 0
self.skin_ref = 0
self.normal_count = 0
self.normal_offset = 0
self.triangles: List[StudioTrivert] = []
def read(self, reader: ByteIO):
(self.triangle_count, self.triangle_offset,
self.skin_ref,
self.normal_count, self.normal_offset) = reader.read_fmt('5i')
with reader.save_current_pos():
reader.seek(self.triangle_offset)
for _ in range(self.triangle_count * 3):
trivert = StudioTrivert()
trivert.read(reader)
self.triangles.append(trivert)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | goldsrc/mdl_v6/structs/mesh.py | tltneon/SourceIO |
import pytest
@pytest.mark.webtest
def test_send_http():
print('========== Hello *********************************')
assert True
def test_something_quick():
pass
def test_another():
pass
class TestClass(object):
def test_method(self):
pass
# Run marked tests
# pytest -v -m webtest
# pytest -v -m "not webtest"
# Using -k expr to select tests based on their name
# pytest -v -k http
# pytest -k "not send_http" -v
# pytest -k "http or quick" -v
#
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | 14_Lesson14/test_marking/test_example1.py | turovod/Otus |
# coding=utf-8
from __future__ import unicode_literals
from django import forms
from models import Result
# Create your forms here.
class QueryForm(forms.ModelForm):
class Meta:
model = Result
fields = ('doc_id', 'authorList',)
def clean(self):
form_doc_id = self.cleaned_data.get('doc_id')
form_authorList = self.cleaned_data.get('authorList')
form_authorList = form_authorList + ", "
authorList = form_authorList.split(",")
if len(authorList) > 1:
if authorList[len(authorList) - 1] != "" and authorList[len(authorList) - 1] > 0:
#Remove space element in list
if " " in authorList:
authorList = space_remover(authorList)
# Remove blank element in list
if "" in authorList:
authorList = blank_remover(authorList)
if len(authorList) > 1:
form_authorList = ', '.join([str(auth) for auth in authorList])
self.data[u'authorList'] = form_authorList
self.cleaned_data[u'authorList'] = form_authorList
else:
raise forms.ValidationError("Author List too short")
else:
raise forms.ValidationError("Author List input incorrect")
else:
raise forms.ValidationError("Author List too short")
super(QueryForm, self).clean()
def space_remover(authorList):
if " " in authorList:
authorList.remove(" ")
return space_remover(authorList)
else:
return authorList
def blank_remover(authorList):
if "" in authorList:
authorList.remove("")
return blank_remover(authorList)
else:
return authorList | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | deep_stylo/forms.py | Ninad998/FinalYearProject |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Preprocessor for stemming
Created on Thu Oct 7 14:00:58 2021
@author: zqirui
"""
from code.preprocessing.preprocessor import Preprocessor
from code.util import string_to_words_list
import nltk
class Stemmer(Preprocessor):
def __init__(self, input_column, output_column):
"""
Constructor
"""
# input column "tweet", new output column
super().__init__([input_column], output_column)
def _get_values(self, inputs : list) -> list:
"""
stems the words in the tweet
Parameters
----------
inputs : list
list of tokenized words per tweet.
Returns
-------
list
list of stemmed words per tweet.
"""
stemmed = []
stemmer = nltk.stem.snowball.SnowballStemmer("english")
for tweet in inputs[0]:
tweet_stemmed = []
words = string_to_words_list(tweet)
for word in words:
word_stemmed = stemmer.stem(word)
tweet_stemmed.append(word_stemmed)
stemmed.append(str(tweet_stemmed))
print("{0} tweets were stemmed!".format(len(stemmed)))
return stemmed
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | code/preprocessing/stemmer.py | zqirui/MLinPractice |
import json
import requests
from time import sleep
from urllib3.exceptions import HTTPError
from .exceptions import ErroApiBNMP
REGISTROS = 50
DADOS = {
'criterio': {
'orgaoJulgador': {
'uf': 'RJ',
'municipio': '',
'descricao': '',
},
'orgaoJTR': {},
'parte': {},
},
'paginador': {
'paginaAtual': None,
'registrosPorPagina': REGISTROS
},
'fonetica': 'true',
'ordenacao': {
'porNome': 'false',
'porData': 'false',
},
}
def _procura_mandados(pagina):
DADOS['paginador']['paginaAtual'] = pagina
resp = requests.post(
url='http://www.cnj.jus.br/bnmp/rest/pesquisar',
data=json.dumps(DADOS),
headers={
'Content-Type': 'application/json',
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
}
)
if resp.status_code != 200:
raise ErroApiBNMP('Erro ao chamar api BNMP: %d' % resp.status_code)
return resp.json().get('mandados')
def _tentativa_api_mandados(metodo, *args, **kwargs):
for tentativa in range(3):
try:
retorno = metodo(*args, **kwargs)
return retorno
except HTTPError:
sleep(0.1)
continue
else:
raise ErroApiBNMP('Máximo de tentativas esgotadas')
def mandados_de_prisao():
pagina = 1
mandados = _tentativa_api_mandados(_procura_mandados, pagina)
while mandados:
for mandado in mandados:
yield mandado
pagina += 1
mandados = _tentativa_api_mandados(_procura_mandados, pagina)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | robobnmp/cliente.py | MinisterioPublicoRJ/robobnmp |
import re
import sys
cookie_re = re.compile(br"coding[:=]\s*([-\w.]+)")
if sys.version_info[0] == 2:
default_encoding = "ascii"
else:
default_encoding = "utf-8"
def guess_encoding(fp):
for _i in range(2):
ln = fp.readline()
m = cookie_re.search(ln)
if m is not None:
return m.group(1).decode("ascii")
return default_encoding
def _run():
global __file__
import os
import site # noqa: F401
sys.frozen = "macosx_plugin"
base = os.environ["RESOURCEPATH"]
if "ARGVZERO" in os.environ:
argv0 = os.path.basename(os.environ["ARGVZERO"])
else:
argv0 = None
script = SCRIPT_MAP.get(argv0, DEFAULT_SCRIPT) # noqa: F821
sys.argv[0] = __file__ = path = os.path.join(base, script)
if sys.version_info[0] == 2:
with open(path, "rU") as fp:
source = fp.read() + "\n"
else:
with open(path, "rb") as fp:
encoding = guess_encoding(fp)
with open(path, "r", encoding=encoding) as fp:
source = fp.read() + "\n"
BOM = b"\xef\xbb\xbf".decode("utf-8")
if source.startswith(BOM):
source = source[1:]
exec(compile(source, script, "exec"), globals(), globals())
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | venv/lib/python3.9/site-packages/py2app/bootstrap/boot_aliasplugin.py | dequeb/asmbattle |
from flask_restful import Resource, reqparse
from models.user import UserModel
from resources.email import Email
class ResetPassword(Resource):
parser = reqparse.RequestParser()
parser.add_argument("email", required=True)
def post(self):
data = ResetPassword.parser.parse_args()
user = UserModel.find_by_email(data["email"])
if user:
Email.send_reset_password_msg(user)
return {"message": "Email sent"}, 200
else:
return {"message": "Invalid email"}, 400
def get(self, token):
user = UserModel.validate_reset_password(token)
if user:
return {"message": "Valid token", "user_id": user.id}, 200
else:
return {"message": "Expired token"}, 422
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | resources/reset_password.py | donovan-PNW/dwellinglybackend |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Used for testing the stop of an instance
"""
import os
from types import SimpleNamespace
# Set env to make debugging in interactive shell more comfortable
os.environ['AWS_SPAWNER_TEST'] = '1'
import spawner
from models import Server
from tornado import gen
#%% Configure
class DummyUser():
def __init__(self, name,):
self.name = name
self.last_activity = ''
self.server = SimpleNamespace(**{'ip' : '',
'base_url': ''})
self.url = ''
self.settings = {}
dummyUser = DummyUser(name='developmentUser')
try:
server = Server.get_server(user_id=dummyUser.name)
except:
server = Server.new_server(server_id = '' , user_id = dummyUser.name , ebs_volume_id = '')
assert server.user_id == dummyUser.name
dummyUserOptions = {'EBS_VOL_ID' : '',
'EBS_VOL_SIZE' : 3,
'INSTANCE_TYPE': 't2.nano'}
dummyHubOptions = SimpleNamespace(**{'public_host' : '',
'api_url' : '',
'base_url' : ''})
dummyApiToken = open('api_token.txt','r').read()
dummyOAuthID = '1234'
#%%Prepare Instance
instanceSpawner = spawner.InstanceSpawner()
instanceSpawner.set_debug_options(dummyUser = dummyUser, dummyUserOptions=dummyUserOptions,
dummyHubOptions=dummyHubOptions, dummyApiToken = dummyApiToken,
dummyOAuthID = dummyOAuthID)
#%%
@gen.coroutine
def stop(instance):
ret = yield instance.stop()
return ret
output = stop(instanceSpawner) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
... | 3 | test_stop.py | tomcatling/jupyterhub_aws_spawner |
"""
Tests for the spm info utility
"""
import shutil
import pytest
from tests.support.case import SPMCase
@pytest.mark.windows_whitelisted
@pytest.mark.destructive_test
class SPMInfoTest(SPMCase):
"""
Validate the spm info command
"""
def setUp(self):
self.config = self._spm_config()
self._spm_build_files(self.config)
@pytest.mark.slow_test
def test_spm_info(self):
"""
test spm build
"""
self._spm_create_update_repo(self.config)
install = self.run_spm("install", self.config, "apache")
get_info = self.run_spm("info", self.config, "apache")
check_info = ["Supported OSes", "Supported OS", "installing Apache"]
for info in check_info:
self.assertIn(info, "".join(get_info))
def tearDown(self):
shutil.rmtree(self._tmp_spm)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | tests/integration/spm/test_info.py | babs/salt |
#!/usr/bin/env python3
from datetime import datetime, timezone
from reporting.category import Category
from reporting.activity import Activity
#{
# "_class": "org.jenkinsci.plugins.workflow.job.WorkflowRun",
# "id": "1",
# "url": "http://jk.domain/job/ancestor/job/grandfather/job/father/1/",
# "result": "SUCCESS",
# "timestamp": 1493222401537,
# "duration": 51364
#}
class JenkinsBuild:
def __init__(self, hierarchy_id, json_dict):
self.id = json_dict['id']
self.hierarchy_id = hierarchy_id
self.url = json_dict['url']
self.result = json_dict['result']
self.numeric_result = 1 if json_dict['result'] == 'SUCCESS' else 0
self.start_timestamp = self._posix_timestamp_to_local(json_dict['timestamp'])
self.finish_timestamp = self._posix_timestamp_to_local(json_dict['timestamp'] + json_dict['duration'])
self.duration = json_dict['duration']
def to_activity(self):
category = Category('Jenkins', self.hierarchy_id)
return Activity(category, self.id, self.numeric_result, self.start_timestamp, self.finish_timestamp, self.duration)
def _posix_timestamp_to_local(self, posix_timestamp_in_ms):
posix_timestamp_in_s = posix_timestamp_in_ms / 1000
utc_timestamp = datetime.fromtimestamp(posix_timestamp_in_s)
local_timestamp = utc_timestamp.replace(tzinfo = timezone.utc).astimezone(tz = None)
return local_timestamp.isoformat() | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | statsSend/jenkins/jenkinsBuild.py | luigiberrettini/build-deploy-stats |
class Rectangle:
def __init__(self, length, width):
self.length = length
self.width = width
def area(self):
return self.length * self.width
def perimeter(self):
return 2 * self.length + 2 * self.width
class Square(Rectangle):
def __init__(self, length):
super(Square, self).__init__(length, length)
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than clas... | 3 | test/test_files/test_inheritance/super_test_3.py | SoftwareUnderstanding/inspect4py |
from typing import TYPE_CHECKING, Optional
from ..core.notification.utils import get_site_context
from ..core.notify_events import NotifyEventType
from ..graphql.core.utils import to_global_id_or_none
if TYPE_CHECKING:
from ..account.models import User
from ..app.models import App
from ..plugins.manager import PluginsManager
from .models import Invoice
def get_invoice_payload(invoice):
return {
"id": to_global_id_or_none(invoice),
"number": invoice.number,
"download_url": invoice.url,
"order_id": to_global_id_or_none(invoice.order),
}
def send_invoice(
invoice: "Invoice",
staff_user: "User",
app: Optional["App"],
manager: "PluginsManager",
):
"""Send an invoice to user of related order with URL to download it."""
payload = {
"invoice": get_invoice_payload(invoice),
"recipient_email": invoice.order.get_customer_email(), # type: ignore
"requester_user_id": to_global_id_or_none(staff_user),
"requester_app_id": to_global_id_or_none(app) if app else None,
**get_site_context(),
}
channel_slug = None
if invoice.order and invoice.order.channel:
channel_slug = invoice.order.channel.slug
manager.notify(
NotifyEventType.INVOICE_READY, payload, channel_slug=channel_slug
) # type: ignore
manager.invoice_sent(invoice, invoice.order.get_customer_email()) # type: ignore
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | saleor/invoice/notifications.py | nestfiy/saleor |
import numpy as np
class LowLevelController:
"""Low level controller of a point mass robot with dynamics:
x_{k+1} = x_k + v_k * Ts * cos(psi_k)
y_{k+1} = y_k + v_k * Ts * sin(psi_k)
v_{k+1} = v_k + Ts * a_k
psi_{k+1} = psi_k + Ts * omega_k
omega_{k+1} = omega_k + Ts * epsilon_k
Where a_k and epsilon_k are the inputs and are the translational and rotational
accelerations respectively.
For now we assume, that it is a perfect controller which is able to produce
the exact commanded outputs if they are reachable with the provided
input constraints.
"""
def __init__(self,
params):
"""Initializes a LowLevelController."""
self._init_from_params(params)
def get_inputs(self, state, cmd_vel):
"""produces control inputs based on the actual state and the commanded
velocities in cmd_vel = np.array([v_des, omega_des])"""
v_des = cmd_vel[0]
omega_des = cmd_vel[1]
v_k = state[2]
omega_k = state[4]
# translational acceleration:
a_k = (v_des - v_k) / self._Ts
if a_k > self._acc_max:
a_k = self._acc_max
elif a_k < self._acc_min:
a_k = self._acc_min
# angular acceleration:
epsilon_k = (omega_des - omega_k) / self._Ts
if epsilon_k > self._epsilon_max:
a_epsilon_kk = self._epsilon_max
elif epsilon_k < self._epsilon_min:
epsilon_k = self._epsilon_min
return np.array([a_k, epsilon_k])
def _init_from_params(self, params):
"""Initializes some variables from the params."""
self._Ts = params["general"]["Ts"]
self._acc_min = params["LowLevelController"]["acc_min"]
self._acc_max = params["LowLevelController"]["acc_max"]
self._epsilon_min = params["LowLevelController"]["epsilon_min"]
self._epsilon_max = params["LowLevelController"]["epsilon_max"]
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined insid... | 3 | gradplanner/controller/low_level_controller.py | ferenctorok/potential_field_planner |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
try:
from marionette.marionette import Actions
except:
from marionette_driver.marionette import Actions
from gaiatest import GaiaTestCase
from gaiatest.apps.search.app import Search
from gaiatest.apps.system.regions.activities import Activities
from gaiatest.apps.system.app import System
class TestBrowserSaveImage(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
self.connect_to_local_area_network()
self.test_url = self.marionette.absolute_url('IMG_0001.jpg')
def test_browser_save_image(self):
"""
https://moztrap.mozilla.org/manage/case/6889/
"""
# Check that there are no images on sdcard before saving
self.assertEqual(0, len(self.data_layer.sdcard_files('.jpeg')))
search = Search(self.marionette)
search.launch()
browser = search.go_to_url(self.test_url)
browser.switch_to_content()
# Long tap on the image inside the browser content
image = self.marionette.find_element('css selector', 'img')
Actions(self.marionette).\
press(image).\
wait(3).\
release().\
wait(1).\
perform()
activities = Activities(self.marionette)
activities.tap_save_image()
system = System(self.marionette)
system.wait_for_notification_toaster_displayed()
system.wait_for_notification_toaster_not_displayed()
self.assertEqual(1, len(self.data_layer.sdcard_files('.jpeg')))
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fals... | 3 | tests/python/gaia-ui-tests/gaiatest/tests/functional/browser/test_browser_save_image.py | marshall/gaia |
#!/usr/bin/python
# Add security groups to CloudFormation template files.
# This will modify the CloudFormation template files,
import sys, json, glob
from ansible.module_utils.basic import *
from cfyaml import yaml
def read_template(file_name):
with open(file_name, 'r') as f:
template = yaml.load(f)
return template
def write_template(file_name, template):
with open(file_name, 'w') as f:
text = yaml.dump(template, default_flow_style=False)
f.write(text)
def add_security_groups_to_elb(elb_resource, security_groups, template):
for resource in template['Resources']:
if elb_resource in resource:
elb_security_groups = template['Resources'][elb_resource]['Properties']['SecurityGroups']
for secgrp in security_groups:
elb_security_groups.append(secgrp)
return template
def main():
module = AnsibleModule(
argument_spec = dict(
template_dir = dict(required=True, type='str'),
elb_resource = dict(required=True, type='str'),
security_groups = dict(required=True, type='list')
)
)
template_dir = module.params['template_dir']
elb_resource = module.params['elb_resource']
security_groups = module.params['security_groups']
template_files = glob.glob(template_dir + "*.yaml")
for template_file in template_files:
template = read_template(template_file)
template = add_security_groups_to_elb(elb_resource, security_groups, template)
write_template(template_file, template)
module.exit_json(changed = True, message = ", ".join(template_files))
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | ansible/library/cf_security_groups.py | pzurzolo/aem-aws-stack-builder |
#!/usr/bin/env python3
import re
from ranking.management.modules.common import REQ, BaseModule, parsed_table
from ranking.management.modules.excepts import ExceptionParseStandings
class Statistic(BaseModule):
def get_standings(self, users=None, statistics=None):
season = self.get_season()
def standings_page(req):
return req.get(self.standings_url)
print(self.standings_url)
with REQ(
with_proxy=True,
args_proxy=dict(
time_limit=3,
n_limit=30,
connect=standings_page,
),
) as req:
page = req.proxer.get_connect_ret()
html_table = re.search('<table[^>]*>.*?</table>', page, re.MULTILINE | re.DOTALL)
if not html_table:
raise ExceptionParseStandings('Not found html table')
mapping = {
'Rank': 'place',
'Name': 'name',
'Language': 'language',
}
table = parsed_table.ParsedTable(html_table.group(0), header_mapping=mapping)
result = {}
for r in table:
row = dict()
for k, v in r.items():
if v.value:
row[k] = v.value
if 'member' not in row:
row['member'] = f'{row["name"]} {season}'
result[row['member']] = row
return {'result': result}
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined insid... | 3 | ranking/management/modules/techgig.py | aropan/clist |
"""Routines to make lines
"""
from Numeric import *
from math import cos, sin, pi, atan2
def R(theta):
array = zeros([3,3], Float)
array[0,0] = array[1,1] = cos(theta)
array[0,1] = sin(theta)
array[1,0] = -sin(theta)
array[2,2] = 1
return array
def T(tx, ty):
array = zeros([3,3], Float)
array[0,0] = array[1,1] = array[2,2] = 1.0
array[2,0] = tx
array[2,1] = ty
return array
offset = 0.2
d1 = array((0,offset,1))
d2 = array((0,-offset,1))
def make_double_line_slow(p1, p2):
x1,y1 = p1
x2,y2 = p2
dx = x2-x1
dy = y2-y1
theta = atan2(dy,dx)
d = math.sqrt(dx*dx + dy*dy)
d3 = array((d,offset,1))
d4 = array((d,-offset,1))
M = matrixmultiply(R(theta), T(x1,y1))
a1 = matrixmultiply(d1, M)[0:2]
a2 = matrixmultiply(d2, M)[0:2]
a3 = matrixmultiply(d3, M)[0:2]
a4 = matrixmultiply(d4, M)[0:2]
return a1, a2, a3, a4
R90 = R(pi/2.0)
def make_double_line(p1, p2, split=3):
x1,y1 = p1
x2,y2 = p2
dx = (x2-x1)
dy = (y2-y1)
d = math.sqrt(dx*dx + dy*dy)
dx *= 0.05
dy *= 0.05
ox, oy = matrixmultiply([dx,dy,1], R90)[0:2]
return ( (x1+ox, y1+oy),
(x1-ox, y1-oy),
(x2+ox, y2+oy),
(x2-ox, y2-oy))
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | Depict/LineGenerator.py | UnixJunkie/frowns |
"""Module for testing Amino Acid DelIns Classifier."""
import unittest
from variation.classifiers import AminoAcidDelInsClassifier
from .classifier_base import ClassifierBase
class TestAminoAcidDelInsClassifier(ClassifierBase, unittest.TestCase):
"""A class to test the Amino Acid DelIns Classifier."""
def classifier_instance(self):
"""Return AminoAcidDelInsClassifier instance."""
return AminoAcidDelInsClassifier()
def fixture_name(self):
"""Return AminoAcidDelInsClassifier fixture name."""
return 'amino_acid_delins'
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},... | 3 | tests/classifiers/test_amino_acid_delins.py | cancervariants/variant-normalization |
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from st2common.models.db.executionstate import ActionExecutionStateDB
from st2common.persistence.executionstate import ActionExecutionState
LOG = logging.getLogger(__name__)
def setup_query(liveaction_id, runnertype_db, query_context):
if not getattr(runnertype_db, 'query_module', None):
raise Exception('The runner "%s" does not have a query module.' % runnertype_db.name)
state_db = ActionExecutionStateDB(
execution_id=liveaction_id,
query_module=runnertype_db.query_module,
query_context=query_context
)
ActionExecutionState.add_or_update(state_db)
def remove_query(liveaction_id):
state_db = ActionExecutionState.query(execution_id=liveaction_id)
if not state_db:
return False
ActionExecutionState.delete(state_db, publish=False, dispatch_trigger=False)
return True
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | st2common/st2common/services/queries.py | saucetray/st2 |
from django import forms
from .models import Comment, Answer
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('text',)
widgets = {
#'author':forms.TextInput(attrs={'class':'textinputclass'}),
'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea'}),
}
class AnonymousCommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('author','text')
widgets = {
'author':forms.TextInput(attrs={'class':'textinputclass'}),
'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea'}),
}
class AnswerForm(forms.Form):
answer_content = forms.CharField(label='',max_length=200,widget=forms.TextInput(attrs={'class': 'form-control','placeholder':'New Answer ..'}),required=False)
def clean(self):
cleaned_data = super(AnswerForm, self).clean()
return cleaned_data | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{... | 3 | djangofiles/BlogProject/blog/forms.py | manvith263/tricalidee |
# coding: utf-8
"""
Strava API v3
The [Swagger Playground](https://developers.strava.com/playground) is the easiest way to familiarize yourself with the Strava API by submitting HTTP requests and observing the responses before you write any client code. It will show what a response will look like with different endpoints depending on the authorization scope you receive from your athletes. To use the Playground, go to https://www.strava.com/settings/api and change your “Authorization Callback Domain” to developers.strava.com. Please note, we only support Swagger 2.0. There is a known issue where you can only select one scope at a time. For more information, please check the section “client code” at https://developers.strava.com/docs. # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.temperature_stream import TemperatureStream # noqa: E501
from swagger_client.rest import ApiException
class TestTemperatureStream(unittest.TestCase):
"""TemperatureStream unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTemperatureStream(self):
"""Test TemperatureStream"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.temperature_stream.TemperatureStream() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | xapp-image-base/swagger/test/test_temperature_stream.py | martinsallandm/hw-xapp-python-lenovo |
# Copyright (c) 2019, NVIDIA CORPORATION.
import urllib.parse
from utils import assert_eq
import nvstrings
urls1 = ["http://www.hellow.com", "/home/nvidia/nfs", "123.45 ~ABCDEF"]
urls2 = [
"http://www.hellow.com?k1=acc%C3%A9nted&k2=a%2F/b.c",
"%2Fhome%2fnfs",
"987%20ZYX",
]
def test_encode_url():
s = nvstrings.to_device(urls1)
got = s.url_encode()
expected = []
for url in urls1:
expected.append(urllib.parse.quote(url, safe="~"))
assert_eq(got, expected)
def test_decode_url():
s = nvstrings.to_device(urls2)
got = s.url_decode()
expected = []
for url in urls2:
expected.append(urllib.parse.unquote(url))
assert_eq(got, expected)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | python/nvstrings/tests/test_url.py | williamBlazing/cudf |
# Import all dependencies
import re, os, sys
from shutil import copyfile
def GetNewName(oldName, parameters):
"""if (not '.dll' in oldName
and not '.so' in oldName
and not '.eon' in oldName
):
raise ValueError()"""
pattern = r'([a-zA_Z_\.]+)([0-9]+)(.*)'
beginning = re.sub(pattern, r'\1', oldName)
pictureIndex = int(re.sub(pattern, r'\2', oldName)) - 1
ending = re.sub(pattern, r'\3', oldName)
pictureIndexString = str(pictureIndex)
pictureIndexString = ('0' * (5 - len(pictureIndexString))) + pictureIndexString
return beginning + pictureIndexString + ending
def renameFiles(inputFolder, outputFolder, parameters):
if not os.path.exists(outputFolder):
os.mkdir(outputFolder)
# Browse all files and subfolders
for dirname, dirnames, filenames in os.walk(inputFolder):
# Browse all files in current subfolder
filenames.sort(reverse=True)
for filename in filenames:
try:
newFilename = GetNewName(filename, parameters)
inputFile = os.path.join(dirname, filename)
outputFile = os.path.join(outputFolder, newFilename)
print('renaming ' + inputFile + ' into ' + outputFile)
copyfile(inputFile, outputFile)
except ValueError:
print('Wrong filename. Skipping this file.')
if __name__ == "__main__":
inputFolder = ''
outputFolder = ''
if len(sys.argv) < 3:
inputFolder = input('>> Input folder : ')
outputFolder = input('>> Output folder : ')
else:
inputFolder = sys.argv[1]
outputFolder = sys.argv[2]
renameFiles(inputFolder, outputFolder, ['', '', '']) | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | renameFiles.py | fkorsa/PythonScripts |
import pymysql.cursors
import ldap
def get_domain_name():
"""
Returns the domain name of the current configuration from a config file
Returns
-------
string
the domain name
"""
with open("/var/www/logic_webapp/webapp_config") as file:
line = file.readline()
domain = line.split("=")[1].rstrip() # Take right hand side of = and remove \n
return domain
def get_db_password():
with open("/var/www/logic_webapp/webapp_config") as file:
line = file.readlines()[1]
password = line.split("=")[
1
].rstrip() # Take right hand side of = and remove \n
return password
def create_slurm_db_connection(host, port, user, password, db):
"""
Creates the connection to the database (MySQL) so it can be queried
Parameters
----------
host : string
hostname on which is located the DB
port : integer
port on which the connection is to be established
user : string
user name with which the connection is to be established
password : string
password of the user on the database (of the user `user`)
db : string
name of the database which will be queried
Returns
-------
PyMySQL Connection object
"""
connection = pymysql.connect(
host=host, port=port, user=user, password=password, db=db,
)
print("[+] Slurm accounting DB connection is up! [+]")
return connection
def create_ldap_connection(host):
"""
Creates an LDAP connection object with a given hostname
Parameters
----------
host : hostname with the LDAP database in the form of (ldap://host)
Returns
-------
LDAP connection object
"""
connection = ldap.initialize(host)
connection.set_option(ldap.OPT_REFERRALS, 0)
connection.simple_bind_s()
return connection
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | webapp/external_access.py | Quoding/petricore |
from copy import deepcopy
with open("day-04/input.txt", "r") as file:
numbers = [int(i) for i in file.readline().split(",")]
boards = [
[[int(i) for i in row.split()] for row in board.strip().split("\n")]
for board in file.read().strip().split("\n\n")
]
def find_first(numbers, boards, mark=False):
check = lambda l: all(map(lambda n: n is mark, l))
for number in numbers:
for index, board in enumerate(boards):
for row in board:
for i, v in enumerate(row):
if v == number:
row[i] = mark
col = list(zip(*board))[i]
if check(row) or check(col):
return index, number, boards
def part_1(numbers, boards):
boards = deepcopy(boards)
i, n, _ = find_first(numbers, boards)
return sum(sum(boards[i], [])) * n
def part_2(numbers, boards):
boards = deepcopy(boards)
while len(boards) > 1:
i, n, boards = find_first(numbers, boards)
boards.pop(i)
i, n, boards = find_first(numbers, boards)
return sum(sum(boards[i], [])) * n
print(part_1(numbers, boards))
print(part_2(numbers, boards))
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | day-04/main.py | andrewyazura/aoc-2021 |
from starling_sim.basemodel.agent.agent import Agent
class SpatialAgent(Agent):
"""
Class describing a spatial agent, with a position and origin in the simulation environment.
"""
SCHEMA = {
"properties": {
"origin": {
"type": ["number", "string"],
"title": "Origin position",
"description": "origin position id (inferred from geometry)",
}
},
"required": ["origin"]
}
def __init__(self, simulation_model, agent_id, origin, **kwargs):
Agent.__init__(self, simulation_model, agent_id, **kwargs)
self.origin = origin
self.position = origin
def __str__(self):
return "[id={}, origin={}]".format(self.id, self.origin)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | starling_sim/basemodel/agent/spatial_agent.py | tellae/starling |
#!/usr/bin/env python
##########################################################
# File Name: classfier.py
# Author: gaoyu
# mail: gaoyu14@pku.edu.cn
# Created Time: 2018-04-30 20:21:33
##########################################################
from base_loss import *
from pycaffe import L
class ClassfierLoss(BaseLoss):
def GAN_label(self):
assert(hasattr(self, "session"))
batch_size = self._param["data"]["batch_size"]
dummydata = lambda name, value: \
L.DummyData(
name = name,
dummy_data_param = dict(
shape = dict(dim = [batch_size,]),
data_filler = dict(value = value)))
neg_label = dummydata("NegLabel", 0)
pos_label = dummydata("PosLabel", 1)
#concat bottom 0 as gt, bottom 1 as pr
#when training generator, gt should be false, pr should be true
#when training classfier, gt should be true, pr should be false
if self.session == "G":
return L.Concat(neg_label, pos_label,
name = "OppSoftmaxLabel",
axis = 0)
elif self.session == "P":
return L.Concat(pos_label, neg_label,
name = "SoftmaxLabel",
axis = 0)
else:
raise Exception("LossError", "Unknown session")
def add_loss(self, classfier, session = None):
self.session = session or self.session
softmax_label = self.GAN_label()
loss = L.SoftmaxWithLoss(classfier, softmax_label,
loss_weight = 1000)
accuracy = L.Accuracy(classfier, softmax_label)
self.net.ClassfierLoss = loss
self.net.Accuraccy = accuracy
return loss
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | net/gan/loss/classfier.py | bacTlink/caffe_tmpname |
import torch
import torch.nn as nn
import torch.nn.functional as F
class QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=64, fc2_units=64):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return self.fc3(x) | [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | p1_navigation/model.py | cshreyastech/deep-reinforcement-learning |
from addons.base.apps import BaseAddonAppConfig
from addons.dropbox.views import dropbox_root_folder
from addons.dropbox import routes
class DropboxAddonAppConfig(BaseAddonAppConfig):
name = 'addons.dropbox'
label = 'addons_dropbox'
full_name = 'Dropbox'
short_name = 'dropbox'
configs = ['accounts', 'node']
has_hgrid_files = True
max_file_size = 150 # MB
routes = [routes.auth_routes, routes.api_routes]
owners = ['user', 'node']
categories = ['storage']
@property
def get_hgrid_data(self):
return dropbox_root_folder
FOLDER_SELECTED = 'dropbox_folder_selected'
NODE_AUTHORIZED = 'dropbox_node_authorized'
NODE_DEAUTHORIZED = 'dropbox_node_deauthorized'
actions = (FOLDER_SELECTED, NODE_AUTHORIZED, NODE_DEAUTHORIZED, )
@property
def user_settings(self):
return self.get_model('UserSettings')
@property
def node_settings(self):
return self.get_model('NodeSettings')
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
}... | 3 | addons/dropbox/apps.py | alexschiller/osf.io |
from typing import List
from entities import Entity
class Enemy(Entity):
def __init__(self, pos_x: int, pos_y: int, tile: str, friendly_tile: str, enemy_tile: str):
super().__init__("enemy", pos_x, pos_y, tile)
self.friendly_tile = friendly_tile
self.enemy_tile = enemy_tile
class Battle:
def __init__(self, teams: List[List[Entity]]):
self.teams = teams
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self... | 3 | battles.py | legend-plus/LegendBot |
class Buffer:
def __init__(self):
self._buffer = bytearray()
def write(self, data: bytes):
self._buffer.extend(data)
def read(self, size: int):
data = self._buffer[:size]
self._buffer[:size] = b""
return data
def peek(self, size: int):
return self._buffer[:size]
def count(self):
return len(self._buffer)
def __len__(self):
return len(self._buffer) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/... | 3 | utils.py | HarukaMa/bitshares-p2p |
# U S Σ R Δ T O R / Ümüd
""" U S Σ R Δ T O R """
from userbot import LOGS
from telethon.tl.types import DocumentAttributeFilename
def __list_all_modules():
from os.path import dirname, basename, isfile
import glob
mod_paths = glob.glob(dirname(__file__) + "/*.py")
all_modules = [
basename(f)[:-3] for f in mod_paths
if isfile(f) and f.endswith(".py") and not f.endswith("__init__.py")
]
return all_modules
ALL_MODULES = sorted(__list_all_modules())
LOGS.info("Yüklənəcək modullar: %s", str(ALL_MODULES))
__all__ = ALL_MODULES + ["ALL_MODULES"]
async def MEDIACHECK(reply):
type = "img"
if reply and reply.media:
if reply.photo:
data = reply.photo
elif reply.document:
if DocumentAttributeFilename(file_name='AnimatedSticker.tgs') in reply.media.document.attributes:
return False
if reply.gif or reply.video:
type = "vid"
if reply.audio or reply.voice:
return False
data = reply.media.document
else:
return False
else:
return False
if not data or data is None:
return False
else:
return (data, type)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | userbot/modules/__init__.py | caerus19/Userator |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 7 14:53:52 2021
@author: ml
"""
import ast
import nltk
from src.feature_extraction.feature_extractor import FeatureExtractor
class BigramFeature(FeatureExtractor):
def __init__(self, input_column):
super().__init__([input_column], "{0}_bigrams".format(input_column))
def _set_variables(self, inputs):
overall_text = []
for line in inputs:
tokens = ast.literal_eval(line.item())
overall_text += tokens
self._bigrams = nltk.bigrams(overall_text) | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | src/feature_extraction/bigrams.py | chbroecker/MLinPractice |
"""
Python program to search for an element in the linked list using recursion
"""
class Node:
def __init__(self, data):
self.data = data
self.next = None
class Linked_list:
def __init__(self):
self.head = None
self.last_node = None
def append(self, data):
if self.last_node is None:
self.head = Node(data)
self.last_node = self.head
else:
self.last_node.next = Node(data)
self.last_node = self.last_node.next
def display(self):
current = self.head
while current is not None:
print(current.data, end=" ")
current = current.next
def find_index(self, key):
return self.find_index_helper(key, 0, self.head)
def find_index_helper(self, key, start, node):
if node is None:
return -1
if node.data == key:
return start
else:
return self.find_index_helper(key, start + 1, node.next)
a_llist = Linked_list()
n = int(input("How many elements would you like to add : "))
for i in range(n):
data = int(input("Enter data item : "))
a_llist.append(data)
print("The lined list : ", end=" ")
a_llist.display()
print()
key = int(input("What data item would you like to search : "))
index = a_llist.find_index(key)
if index == -1:
print(str(key) + ' was not found')
else:
print(str(key) + "is at index" + str(index) + ".")
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
... | 3 | Advance_Python/Linked_List/Search_Ele_In_SLL.py | siddharth-143/Python |
"""
Add a keyword and a description field which are helpful for SEO optimization.
"""
from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from feincms import extensions
class Extension(extensions.Extension):
def handle_model(self):
self.model.add_to_class('meta_keywords', models.TextField(
_('meta keywords'),
blank=True,
help_text=_('Keywords are ignored by most search engines.')))
self.model.add_to_class('meta_description', models.TextField(
_('meta description'),
blank=True,
help_text=_('This text is displayed on the search results page. '
'It is however not used for the SEO ranking. '
'Text longer than 140 characters is truncated.')))
def handle_modeladmin(self, modeladmin):
modeladmin.extend_list(
'search_fields',
['meta_keywords', 'meta_description'],
)
modeladmin.add_extension_options(_('Search engine optimization'), {
'fields': ('meta_keywords', 'meta_description'),
'classes': ('collapse',),
})
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | feincms/module/extensions/seo.py | barseghyanartur/feincms |
from ldap3 import Server, Connection, ALL
from utils.config import get_config
_client = None
_base_dn = None
def init(serverUrl):
global _client
if _client is None:
server = Server(serverUrl, get_info=ALL)
_client = Connection(server, None, None, auto_bind=True)
return _client
def init_from_config():
global _base_dn
config = get_config()
serverUrl = config['ldap']['server']
_base_dn = config['ldap']['base_dn']
return init(serverUrl)
def user_exists(username):
global _client
global _base_dn
init_from_config()
search_filter = "uid={},{}".format(username, _base_dn)
return _client.search(search_filter, '(objectclass=person)')
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | utils/ldap_client.py | bsquizz/qontract-reconcile |
from tkinter import *
from PIL import Image,ImageTk
from resizeimage import resizeimage
#import os
class CountryIMG:
def __init__(self):
self.size = 400
self.TXT =open('CountryImages\Europ\Panstwa.txt',"r",encoding="utf-8")
self.CountryName=[]
self.CountryImage = []
for line in self.TXT:
line = line.replace('\n','')
self.name=line
line = line.lower()
line = self.removeAccents(line)
try:
self.PATH= "CountryImages\Europ\mapa-"+line+".gif"
#self.PATH = "CountryImages\Europ\kosowo.jpg"
self.IMG = Image.open(self.PATH)
self.ImageResized = resizeimage.resize_cover(self.IMG,[self.size, self.size])
self.countryImage = ImageTk.PhotoImage(self.ImageResized)
self.CountryName.append(self.name)
self.CountryImage.append(self.countryImage)
except:
line = line.upper()
self.Country= [list(a) for a in zip(self.CountryImage, self.CountryName)]
def removeAccents(self,input_text):
self.strange='ąćęłńóśżź '
self.ascii_replacements='acelnoszz-'
self.translator=str.maketrans(self.strange,self.ascii_replacements)
return input_text.translate(self.translator) | [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | CountryIMG.py | AGH-Narzedzia-Informatyczne/Project-Maze |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC call related to the uptime command.
Test corresponds to code in rpc/server.cpp.
"""
import time
from test_framework.test_framework import IsocoinTestFramework
class UptimeTest(IsocoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self._test_uptime()
def _test_uptime(self):
wait_time = 10
self.nodes[0].setmocktime(int(time.time() + wait_time))
assert(self.nodes[0].uptime() >= wait_time)
if __name__ == '__main__':
UptimeTest().main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | test/functional/rpc_uptime.py | jardelfrank42/paymecoin |
import os
import pytest
import numpy as np
from deepforest import _io as io
open_buffer = io.Buffer(use_buffer=True,
buffer_dir="./",
store_est=True,
store_pred=True,
store_data=True)
close_buffer = io.Buffer(use_buffer=False)
X = np.zeros((42, 42), dtype=np.uint8)
def test_buffer_name():
name = open_buffer.name
assert isinstance(name, str)
name = close_buffer.name
assert name is None
def test_store_data_close_buffer():
"""When `store_data` is False, the buffer directly returns the array."""
ret = close_buffer.cache_data(0, X)
assert isinstance(ret, np.ndarray)
def test_store_data_open_buffer():
"""
When `store_data` is True, the buffer returns the memmap object of the
dumped array.
"""
layer_idx = 0
ret = open_buffer.cache_data(layer_idx, X, is_training_data=True)
assert isinstance(ret, np.memmap)
assert os.path.exists(os.path.join(
open_buffer.data_dir_, "joblib_train_{}.mmap".format(layer_idx)))
ret = open_buffer.cache_data(layer_idx, X, is_training_data=False)
assert isinstance(ret, np.memmap)
assert os.path.exists(os.path.join(
open_buffer.data_dir_, "joblib_test_{}.mmap".format(layer_idx)))
def test_load_estimator_missing():
err_msg = "Missing estimator in the path: unknown.est."
with pytest.raises(FileNotFoundError, match=err_msg):
open_buffer.load_estimator("unknown.est")
def test_load_predictor_missing():
err_msg = "Missing predictor in the path: unknown.est."
with pytest.raises(FileNotFoundError, match=err_msg):
open_buffer.load_predictor("unknown.est")
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",... | 3 | tests/test_buffer.py | pjgao/Deep-Forest |
#!/usr/bin/env python
"""
Simple implementation for mixup. The loss and onehot functions origin from: https://github.com/moskomule/mixup.pytorch
Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz: mixup: Beyond Empirical Risk Minimization
https://arxiv.org/abs/1710.09412
"""
__all__ = [ 'mixup_cross_entropy_loss', 'mixup' ]
import numpy as np
import torch
from torch.autograd import Variable
def mixup_cross_entropy_loss(input, target, size_average=True):
"""Origin: https://github.com/moskomule/mixup.pytorch
in PyTorch's cross entropy, targets are expected to be labels
so to predict probabilities this loss is needed
suppose q is the target and p is the input
loss(p, q) = -\sum_i q_i \log p_i
"""
assert input.size() == target.size()
assert isinstance(input, Variable) and isinstance(target, Variable)
input = torch.log(torch.nn.functional.softmax(input, dim=1).clamp(1e-5, 1))
# input = input - torch.log(torch.sum(torch.exp(input), dim=1)).view(-1, 1)
loss = - torch.sum(input * target)
return loss / input.size()[0] if size_average else loss
def onehot(targets, num_classes):
"""Origin: https://github.com/moskomule/mixup.pytorch
convert index tensor into onehot tensor
:param targets: index tensor
:param num_classes: number of classes
"""
assert isinstance(targets, torch.LongTensor)
return torch.zeros(targets.size()[0], num_classes).scatter_(1, targets.view(-1, 1), 1)
def mixup(inputs, targets, num_classes, alpha=2):
"""Mixup on 1x32x32 mel-spectrograms.
"""
s = inputs.size()[0]
weight = torch.Tensor(np.random.beta(alpha, alpha, s))
index = np.random.permutation(s)
x1, x2 = inputs, inputs[index, :, :, :]
y1, y2 = onehot(targets, num_classes), onehot(targets[index,], num_classes)
weight = weight.view(s, 1, 1, 1)
inputs = weight*x1 + (1-weight)*x2
weight = weight.view(s, 1)
targets = weight*y1 + (1-weight)*y2
return inputs, targets
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docst... | 3 | kws/utils/mixup.py | bozliu/E2E-Keyword-Spotting |
#!/usr/bin/env python3
import argparse
import tarfile
import tempfile
from pathlib import Path
def work(path_in: Path, path_out: Path):
path_out.parent.mkdir(exist_ok=True, parents=True)
outs = []
sids = []
fileinfos = []
with path_in.open() as inf:
for line in inf:
if line.startswith('# S-ID:'):
outs.append('')
sids.append(line[7:].split(' ')[0].strip())
fileinfos.append(line)
if 'MEMO:' not in line:
line = line.strip() + ' MEMO:\n'
outs[-1] += line
with tempfile.TemporaryDirectory() as tempdir:
workdir = Path(tempdir).joinpath(path_in.stem)
workdir.mkdir(exist_ok=True)
with workdir.joinpath('fileinfos').open('w') as outf:
outf.write(''.join(fileinfos))
for sid, content in zip(sids, outs):
with workdir.joinpath(sid).open('w') as outf:
outf.write(content)
with tarfile.open(path_out, mode='w:gz') as gzf:
gzf.add(workdir, workdir.name)
def operation(path_in: Path, path_out_dir: Path) -> None:
path_out_dir.mkdir(exist_ok=True, parents=True)
for path_f in sorted(path_in.iterdir()):
outname: Path = path_out_dir.joinpath(
path_f.stem, f'{path_f.stem}.tar.gz')
work(path_f, outname)
def get_opts() -> argparse.Namespace:
oparser = argparse.ArgumentParser()
oparser.add_argument("--input", "-i", type=Path, required=True)
oparser.add_argument("--output", "-o", type=Path, required=True)
return oparser.parse_args()
def main() -> None:
opts = get_opts()
operation(opts.input, opts.output)
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer"... | 3 | scripts/dist2katool.py | shirayu/ita-corpus-chuwa |
# -*- coding: utf-8 -*-
# Created by restran on 2016/10/10
from __future__ import unicode_literals, absolute_import
import logging
import os
import sys
import psycopg2
# 把项目的目录加入的环境变量中,这样才可以导入 common.base
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common.base import TaskExecutor, read_dict
logger = logging.getLogger(__name__)
"""
PostgreSQL 弱口令爆破
"""
found_password = []
ip_list = read_dict('ip.txt', clear_none=True)
username_list = ['postgres', 'root']
database_name = None
def weak_pass(password, ip, db, port_list=None):
for name in username_list:
if port_list is None:
port_list = [5432, 1521]
for port in port_list:
try:
psycopg2.connect(host=ip,
port=port,
database=database_name,
user=name,
password=password, )
logger.info('[True ] %s %s:%s' % (ip, name, password))
found_password.append((ip, '%s:%s' % (name, password)))
return True
except Exception as e:
logger.info(e)
logger.info('[False] %s %s:%s' % (ip, name, password))
return False
def main():
password_list = read_dict('password.dic')
for ip in ip_list:
executor = TaskExecutor(password_list)
executor.run(weak_pass, ip, database_name)
for t in found_password:
logger.info('%s password is %s' % t)
if __name__ == "__main__":
main()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fe... | 3 | password/postgresql.py | restran/hacker-scripts |
"""
Test base objects with context
"""
from pii_manager import PiiEnum, PiiEntity
from pii_manager.api import PiiManager
def _pii(pos):
return PiiEntity(PiiEnum.GOV_ID, pos, "3451-K", country="vo", name="vogonian ID")
TEST = [
("my Vogon ID is 3451-K", [_pii(15)]),
("the number 3451-K is my Vogonian ID", [_pii(11)]),
("the Vogon ID are 3451-K", []), # context outside window
("my Betelgeuse ID is 3451-K", []), # context does not match
]
# ------------------------------------------------------------------------
DUMMY_REGEX = {
"pii": PiiEnum.GOV_ID,
"type": "regex",
"task": r"""\b\d{4}-\w\b""",
"lang": "en",
"name": "vogonian ID",
"country": "vo",
"doc": "a toy example to match a government id",
"context": {"value": ["Vogon ID", "vogonian id"], "width": [12, 20]},
}
def test10_context_regex():
"""
Check a PII task with contexts, regex variant
"""
obj = PiiManager("en", mode="extract")
obj.add_tasks([DUMMY_REGEX])
for (text, exp) in TEST:
got = obj(text)
assert list(got) == exp
# ------------------------------------------------------------------------
DUMMY_CLASS = {
"pii": PiiEnum.GOV_ID,
"type": "PiiTask",
"task": "unit.api.test_manager_add.DummyPii",
"lang": "en",
"country": "vo",
"name": "vogonian ID",
"doc": "a toy example to match a government id",
"context": {"value": ["Vogon ID", "vogonian id"], "width": [12, 20]},
}
def test20_context_class():
"""
Check a PII task with contexts, class variant
"""
obj = PiiManager("en", mode="extract")
obj.add_tasks([DUMMY_CLASS])
for (text, exp) in TEST:
got = obj(text)
assert list(got) == exp
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | pii-manager/test/unit/api/test_manager_ctx.py | ruinunca/data_tooling |
import ckan.model as model
from sqlalchemy import Column, types
from sqlalchemy.ext.declarative import declarative_base
log = __import__('logging').getLogger(__name__)
Base = declarative_base()
class MunicipalityBoundingBox(Base):
'''
Contains bbox data for every Finnish municipality
'''
__tablename__ = 'municipality_bounding_boxes'
name = Column(types.UnicodeText, nullable=False, primary_key=True)
lat_min = Column(types.UnicodeText, nullable=False)
lat_max = Column(types.UnicodeText, nullable=False)
lng_min = Column(types.UnicodeText, nullable=False)
lng_max = Column(types.UnicodeText, nullable=False)
@classmethod
def bulk_save(cls, objects):
model.Session.bulk_save_objects(objects)
model.Session.commit()
def ytp_main_init_tables(engine):
msg = 'Tables initialized: MunicipalityBoundingBox'
Base.metadata.create_all(engine)
log.info(msg)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | modules/ckanext-ytp_main/ckanext/ytp/model.py | vrk-kpa/opendata-ckan |
# -*- coding: utf-8 -*-
from pyramid_oereb.standard.xtf_import.util import parse_string, parse_multilingual_text, parse_ref
class PublicLawRestriction(object):
TAG_INFORMATION = 'Aussage'
TAG_SUB_THEME = 'SubThema'
TAG_OTHER_THEME = 'WeiteresThema'
TAG_TYPE_CODE = 'ArtCode'
TAG_TYPE_CODE_LIST = 'ArtCodeliste'
TAG_LAW_STATUS = 'Rechtsstatus'
TAG_PUBLISHED_FROM = 'publiziertAb'
TAG_VIEW_SERVICE = 'DarstellungsDienst'
TAG_RESPONSIBLE_OFFICE = 'ZustaendigeStelle'
def __init__(self, session, model, topic_code):
self._session = session
self._model = model
self._topic_code = topic_code
def parse(self, public_law_restriction): # pragma: no cover
instance = self._model(
id=public_law_restriction.attrib['TID'],
information=parse_multilingual_text(public_law_restriction, self.TAG_INFORMATION),
topic=self._topic_code,
sub_theme=parse_string(public_law_restriction, self.TAG_SUB_THEME),
other_theme=parse_string(public_law_restriction, self.TAG_OTHER_THEME),
type_code=parse_string(public_law_restriction, self.TAG_TYPE_CODE),
type_code_list=parse_string(public_law_restriction, self.TAG_TYPE_CODE_LIST),
law_status=parse_string(public_law_restriction, self.TAG_LAW_STATUS),
published_from=parse_string(public_law_restriction, self.TAG_PUBLISHED_FROM),
view_service_id=parse_ref(public_law_restriction, self.TAG_VIEW_SERVICE),
office_id=parse_ref(public_law_restriction, self.TAG_RESPONSIBLE_OFFICE)
)
self._session.add(instance)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | pyramid_oereb/standard/xtf_import/public_law_restriction.py | pvalsecc/pyramid_oereb |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
from ..utils import all_numeric_dtypes
class Max(Base):
@staticmethod
def export(): # type: () -> None
data_0 = np.array([3, 2, 1]).astype(np.float32)
data_1 = np.array([1, 4, 4]).astype(np.float32)
data_2 = np.array([2, 5, 3]).astype(np.float32)
result = np.array([3, 5, 4]).astype(np.float32)
node = onnx.helper.make_node(
'Max',
inputs=['data_0', 'data_1', 'data_2'],
outputs=['result'],
)
expect(node, inputs=[data_0, data_1, data_2], outputs=[result],
name='test_max_example')
node = onnx.helper.make_node(
'Max',
inputs=['data_0'],
outputs=['result'],
)
expect(node, inputs=[data_0], outputs=[data_0],
name='test_max_one_input')
result = np.maximum(data_0, data_1)
node = onnx.helper.make_node(
'Max',
inputs=['data_0', 'data_1'],
outputs=['result'],
)
expect(node, inputs=[data_0, data_1], outputs=[result],
name='test_max_two_inputs')
@staticmethod
def export_max_all_numeric_types(): # type: () -> None
for op_dtype in all_numeric_dtypes:
data_0 = np.array([3, 2, 1]).astype(op_dtype)
data_1 = np.array([1, 4, 4]).astype(op_dtype)
result = np.array([3, 4, 4]).astype(op_dtype)
node = onnx.helper.make_node(
'Max',
inputs=['data_0', 'data_1'],
outputs=['result'],
)
expect(node, inputs=[data_0, data_1], outputs=[result],
name='test_max_{0}'.format(np.dtype(op_dtype).name))
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | onnx/backend/test/case/node/max.py | cnheider/onnx |
import pytest
from gi.repository import Gtk
from gaphas.canvas import Canvas
from gaphas.item import Element, Line
from gaphas.view import GtkView
class Box(Element):
def draw(self, context):
cr = context.cairo
top_left = self.handles()[0].pos
cr.rectangle(top_left.x, top_left.y, self.width, self.height)
cr.stroke()
@pytest.fixture
def canvas():
return Canvas()
@pytest.fixture
def connections(canvas):
return canvas.connections
@pytest.fixture
def view(canvas):
view = GtkView(canvas)
# resize, as if a widget is configured
view._qtree.resize((0, 0, 400, 400))
view.update()
return view
@pytest.fixture
def scrolled_window(view):
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.add(
view
) if Gtk.get_major_version() == 3 else scrolled_window.set_child(view)
view.update()
return scrolled_window
@pytest.fixture
def window(view):
if Gtk.get_major_version() == 3:
window = Gtk.Window.new(Gtk.WindowType.TOPLEVEL)
window.add(view)
window.show_all()
else:
window = Gtk.Window.new()
window.set_child(view)
yield window
window.destroy()
@pytest.fixture
def box(canvas, connections):
box = Box(connections)
canvas.add(box)
return box
@pytest.fixture
def line(canvas, connections):
line = Line(connections)
line.tail.pos = (100, 100)
canvas.add(line)
return line
@pytest.fixture
def handler():
events = []
def handler(*args):
events.append(args)
handler.events = events # type: ignore[attr-defined]
return handler
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (exc... | 3 | tests/conftest.py | gaphor/gaphas |
from tool.runners.python import SubmissionPy
class RemiSubmission(SubmissionPy):
def get_param(self, p, opcode, index, param):
modes = opcode // 100
for _ in range(index):
modes //= 10
mode = modes % 10
if mode == 0:
return p[param]
elif mode == 1:
return param
def execute(self, p, p_input):
p_output = []
pc = 0
while True:
opcode = p[pc]
if opcode % 100 == 1:
a = self.get_param(p, opcode, 0, p[pc + 1])
b = self.get_param(p, opcode, 1, p[pc + 2])
p[p[pc + 3]] = a + b
pc += 4
elif opcode % 100 == 2:
a = self.get_param(p, opcode, 0, p[pc + 1])
b = self.get_param(p, opcode, 1, p[pc + 2])
p[p[pc + 3]] = a * b
pc += 4
elif opcode % 100 == 3:
p[p[pc + 1]] = p_input
pc += 2
elif opcode % 100 == 4:
p_output.append(self.get_param(p, opcode, 0, p[pc + 1]))
pc += 2
elif opcode % 100 == 99:
break
return p_output
def run(self, s):
p = [int(n) for n in s.split(",")]
outputs = self.execute(p.copy(), 1)
return outputs[-1]
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | day-05/part-1/remi.py | TPXP/adventofcode-2019 |
from bin.Settings.SettingsEntity import SettingsEntity
from bin.Settings.SettingsSerialEntity import SettingsSerialEntity
from bin.Utility.SerialDiscoverer import get_port_name
class DeviceFactoryAbstract:
def __init__(self, settings_entity=SettingsEntity("")):
self.settings_entity = settings_entity
def create(self, *args, **kwargs):
raise NotImplemented()
def get_settings_entity(self):
return self.settings_entity
class DeviceManagerFactorySerialAbstract(DeviceFactoryAbstract):
def __init__(self, settings_entity=SettingsEntity("")):
DeviceFactoryAbstract.__init__(self, settings_entity)
self.port = settings_entity.get_entry(SettingsSerialEntity.PORT)
def port_exists(self):
port = get_port_name(self.port)
self.settings_entity.add_entry(SettingsSerialEntity.PORT, port)
return port
def get_name(self):
return "Factory of {} @port: {}".format(
self.__class__.__name__, self.port_exists())
class DeviceFactory(DeviceFactoryAbstract):
def __init__(self, manager_factory):
assert isinstance(manager_factory, self.get_manager_factory_type())
self.device_manager_factory = manager_factory
DeviceFactoryAbstract.__init__(self, manager_factory.get_settings_entity())
def get_manager_factory_type(self):
raise NotImplemented("")
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
... | 3 | src/bin/Devices/DeviceFactory.py | rCorvidae/OrionPI |
import torch.nn as nn
import torch.nn.functional as F
from utils.noisy_liner import NoisyLinear
from torch.nn import LayerNorm
class NoisyRNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(NoisyRNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = NoisyLinear(args.rnn_hidden_dim, args.n_actions, True, args.device)
if getattr(args, "use_layer_norm", False):
self.layer_norm = LayerNorm(args.rnn_hidden_dim)
def init_hidden(self):
# make hidden states on same device as model
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
b, a, e = inputs.size()
inputs = inputs.view(-1, e)
x = F.relu(self.fc1(inputs), inplace=True)
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
hh = self.rnn(x, h_in)
if getattr(self.args, "use_layer_norm", False):
q = self.fc2(self.layer_norm(hh))
else:
q = self.fc2(hh)
return q.view(b, a, -1), hh.view(b, a, -1) | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | src/modules/agents/noisy_agents.py | mariuslindegaard/6.867_MARL_project |
"""
Makes python 2 behave more like python 3.
Ideally we import this globally so all our python 2 interpreters will assist in spotting errors early.
"""
# future imports are harmless if they implement behaviour that already exists in the current interpreter version
from __future__ import absolute_import, division, print_function
import sys
from collections import OrderedDict
if sys.version_info.major == 2:
# Override dict and make items() behave like iteritems() to retain performance
class dict(dict):
def items(self):
return super(dict, self).iteritems()
def keys(self):
return super(dict, self).iterkeys()
def values(self):
return super(dict, self).itervalues()
class OrderedDict(OrderedDict):
def items(self):
return super(OrderedDict, self).iteritems()
def keys(self):
return super(OrderedDict, self).iterkeys()
def values(self):
return super(OrderedDict, self).itervalues()
# Override range with xrange to mimic python3's range
range = xrange
import cStringIO as io
else:
unicode = str
long = int
import io
try:
from typing import *
T = TypeVar('T')
except:
pass
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
... | 3 | errorCheckTool/py23.py | peerke88/error-check-tool |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.