source string | points list | n_points int64 | path string | repo string |
|---|---|---|---|---|
from django.db import models
from django.contrib.auth.models import (
AbstractBaseUser,
PermissionsMixin,
BaseUserManager
)
class UserProfileManager(BaseUserManager):
'''Manager for user profiles'''
def create_user(self, email, name, password=None): # password = none means that if you don't set a password you wont be able to authenticate with a user
"""create a new user profile"""
if not email:
raise ValueError('Users must have an email address.')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new super user"""
user= self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
'''Database model for users in the system'''
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email' # overrides default django username which is usally just name as opposed to email
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of user"""
return self.email
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer":... | 3 | profiles_api/models.py | steve-carey/profiles-rest-api |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdrds.endpoint import endpoint_data
class DescribeDrdsSqlAuditStatusRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Drds', '2019-01-23', 'DescribeDrdsSqlAuditStatus','drds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DrdsInstanceId(self):
return self.get_query_params().get('DrdsInstanceId')
def set_DrdsInstanceId(self,DrdsInstanceId):
self.add_query_param('DrdsInstanceId',DrdsInstanceId) | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | aliyun-python-sdk-drds/aliyunsdkdrds/request/v20190123/DescribeDrdsSqlAuditStatusRequest.py | yndu13/aliyun-openapi-python-sdk |
import wx
class MyPanel(wx.Panel):
def __init__(self, *args, **kw):
super(MyPanel, self).__init__(*args, **kw)
self.Bind(wx.EVT_BUTTON, self.OnButtonClicked)
def OnButtonClicked(self, e):
print('Evénement est déclanché de la classe Button')
# e.Skip()
class MyButton(wx.Button):
def __init__(self, *args, **kw):
super(MyButton, self).__init__(*args, **kw)
self.Bind(wx.EVT_BUTTON, self.OnButtonClicked)
def OnButtonClicked(self, e):
print('Evénement est déclanché de la classe Button')
#e.Skip()
class Example(wx.Frame):
def __init__(self, *args, **kw):
super(Example, self).__init__(*args, **kw)
self.InitUI()
def InitUI(self):
mpnl = MyPanel(self)
MyButton(mpnl, label='Ok', pos=(15, 15))
self.Bind(wx.EVT_BUTTON, self.OnButtonClicked)
self.SetTitle('M2I & MQL - Propagate event')
self.Centre()
def OnButtonClicked(self, e):
print('Evénement est déclanché de la classe Frame')
e.Skip()
def main():
app = wx.App()
ex = Example(None)
ex.Show()
app.MainLoop()
if __name__ == '__main__':
main() | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding sel... | 3 | wxPython/wxPython examples/eventpropagate.py | otmanabdoun/IHM-Python |
from ... import options as opts
from ...charts.chart import Chart
from ...commons.types import Numeric, Optional, Sequence, Union
from ...globals import ChartType
class Map(Chart):
"""
<<< Map >>>
Map are mainly used for visualization of geographic area data.
"""
def __init__(self, init_opts: opts.InitOpts = opts.InitOpts()):
super().__init__(init_opts=init_opts)
def add(
self,
series_name: str,
data_pair: Sequence,
maptype: str = "china",
*,
is_selected: bool = True,
is_roam: bool = True,
center: Optional[Sequence] = None,
zoom: Optional[Numeric] = 1,
name_map: Optional[dict] = None,
symbol: Optional[str] = None,
is_map_symbol_show: bool = True,
label_opts: Union[opts.LabelOpts, dict] = opts.LabelOpts(),
tooltip_opts: Union[opts.TooltipOpts, dict, None] = None,
itemstyle_opts: Union[opts.ItemStyleOpts, dict, None] = None,
):
self.js_dependencies.add(maptype)
data = [{"name": n, "value": v} for n, v in data_pair]
self._append_legend(series_name, is_selected)
self.options.get("series").append(
{
"type": ChartType.MAP,
"name": series_name,
"symbol": symbol,
"label": label_opts,
"mapType": maptype,
"data": data,
"roam": is_roam,
"center": center,
"zoom": zoom,
"nameMap": name_map,
"showLegendSymbol": is_map_symbol_show,
"tooltip": tooltip_opts,
"itemStyle": itemstyle_opts,
}
)
return self
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
... | 3 | data analysis/pyecharts/pyecharts/charts/basic_charts/map.py | mrxgavin/Coursework |
from functools import wraps
def double_return(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
result = fn(*args, **kwargs)
return [result, result]
return wrapper
@double_return
def add(x, y):
return x + y
print(add(1, 2))
@double_return
def greet(name):
return "Hi, I'm " + name
print(greet("Norberto Sánchez-Dichi")) | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (exc... | 3 | Python3/Exercises/DoubleReturn/double_return.py | norbertosanchezdichi/TIL |
class Verifications(object):
def __init__(self):
pass
def correct_functionality(self, user_input, keywords, functionalities):
for functionality in functionalities:
functionality['keywords_found'] = []
for keyword in keywords:
if keyword['value'] in user_input:
index = keyword['id_function'] - 1
functionalities[index]['keywords_found'].append(keyword)
probable_function = functionalities[0]
for functionality in functionalities:
function_score = len(functionality['keywords_found'])
probable_function_score = len(probable_function['keywords_found'])
if function_score > probable_function_score:
probable_function = functionality
if not probable_function['keywords_found']:
return None
return probable_function
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true... | 3 | app/modules/verifications.py | PaulloClara/cav |
import DistributedElevator
import DistributedBossElevator
from ElevatorConstants import *
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import TTLocalizer
class DistributedBBElevator(DistributedBossElevator.DistributedBossElevator):
def __init__(self, cr):
DistributedBossElevator.DistributedBossElevator.__init__(self, cr)
self.type = ELEVATOR_BB
self.countdownTime = ElevatorData[self.type]['countdown']
self.elevatorPoints = BossbotElevatorPoints
def setupElevator(self):
geom = base.cr.playGame.hood.loader.geom
self.elevatorModel = loader.loadModel('phase_12/models/bossbotHQ/BB_Elevator')
self.leftDoor = self.elevatorModel.find('**/left-door')
if self.leftDoor.isEmpty():
self.leftDoor = self.elevatorModel.find('**/left_door')
self.rightDoor = self.elevatorModel.find('**/right-door')
if self.rightDoor.isEmpty():
self.rightDoor = self.elevatorModel.find('**/right_door')
locator = geom.find('**/elevator_locator')
self.elevatorModel.reparentTo(locator)
DistributedElevator.DistributedElevator.setupElevator(self)
def getDestName(self):
return TTLocalizer.ElevatorBossBotBoss
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | toontown/building/DistributedBBElevator.py | AnonymousDeveloper65535/open-toontown |
from locust import HttpUser, task, events
from locust_plugins import jmeter_listener
class DemoBlazeUser(HttpUser):
host = "https://www.demoblaze.com"
@task
def t(self):
self.client.get("/")
@events.init.add_listener
def on_locust_init(environment, **kwargs):
jmeter_listener.JmeterListener(env=environment, testplan="examplePlan")
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | examples/jmeter_listener_example.py | Maffey/locust-plugins |
def binary_search(a,k, low, high):
mid = (low+high)//2
if len(a[low:high+1]) <=1:
# print(a[low:high+1], a[0])
if a[mid] == k:
return mid
else:
return -1
if a[mid] == k:
return mid
elif a[mid] >= k:
return binary_search(a,k,low, mid)
elif a[mid] < k:
return binary_search(a,k, mid+1,high)
else:
return -1
def binary_search_v2(a,k):
low, high = 0, len(a)-1
while low <= high:
mid = (low+high)//2
if a[mid] == k:
return mid
elif a[mid] >= k:
high = mid
else:
low = mid+1
return -1
if __name__ == "__main__":
a = [0, 2, 3, 4, 5, 6, 7, 8, 9, 18]
# a = sorted(a)
# print(a)
# print(binary_search(a,72, 0, len(a)-1))
print(binary_search_v2(a,18))
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding ... | 3 | code-everyday-challenge/n196_repeat.py | ved93/deliberate-practice-challenges |
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""
Layer Normalization.
https://arxiv.org/abs/1607.06450
"""
def __init__(self, hidden_size, eps=1e-6):
super(LayerNorm, self).__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
hidden_states = self.gamma * (x-mean) / (std + self.eps)
return hidden_states + self.beta
class T5LayerNorm(nn.Module):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
def __init__(self, hidden_size, eps=1e-6):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# layer norm should always be calculated in float32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.type_as(self.weight)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | uer/layers/layer_norm.py | krevas/ET-BERT |
from collections import UserDict
from miniworld.model.network.connections.JSONEncoder import JSONStrMixin
# TODO: REMOVE
class NodeDictMixin:
"""
"""
#########################################
# Structure Converting
#########################################
def to_ids(self):
"""
Convert all :py:class:`.EmulationNode` to their id.
Returns
-------
UserDict
All instances of EmulationNode replaced by their id.
Examples
--------
>>> x = {[EmulationNode(1), EmulationNode(2)]: {'loss': 0.5, 'bandwidth': 500}}
>>> x.to_ids()
{('1', '1'): {'loss': 0.5, 'bandwidth': 500}}
"""
converted_to_ids = {(emu_node_x.id, emu_node_y.id): val_inner for (emu_node_x, emu_node_y), val_inner in self.items()}
return self.__class__(converted_to_ids)
class NodeDict(JSONStrMixin, UserDict):
pass
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{... | 3 | miniworld/model/network/connections/NodeDictMixin.py | miniworld-project/miniworld_core |
import unittest
from letter_n_gram_list import letter_n_gram_list
from inc_freq import inc_freq
class TestLetterNGramList(unittest.TestCase):
def test_letter_n_gram_list(self):
ss = [
"今日は",
"今日は",
"今日は",
"今日は",
"abcd",
"abcd",
"abcd",
"",
""
]
ns = [
1,
2,
3,
4,
3,
4,
5,
1,
2
]
expecteds = [
["今", "日", "は"],
["今日", "日は"],
["今日は"],
[],
["abc", "bcd"],
["abcd"],
[],
[],
[]
]
for s, n, expected in zip(ss, ns, expecteds):
self.assertEqual(expected, letter_n_gram_list(s, n))
class TestIncFreq(unittest.TestCase):
def test_inc_freq(self):
ss = [
"Is this a pen?",
"Hello."
]
d = {"a": 0, "e": 1, "s": 2, ".": 3}
expecteds = [
{"a": 1, "e": 2, "s": 4, ".": 3},
{"a": 1, "e": 3, "s": 4, ".": 4},
]
for s, expected in zip(ss, expecteds):
inc_freq(s, d)
self.assertEqual(expected, d)
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | src/pre3/test.py | shimech/higashi |
import yaml
import os
# 获取当前文件路径
filePath = os.path.dirname(__file__)
# 获取当前文件的Realpath
fileNamePath = os.path.split(os.path.realpath(__file__))[0]
# 获取配置文件的路径
yamlPath = os.path.join(fileNamePath, 'config.yml')
# 加上 ,encoding='utf-8',处理配置文件中含中文出现乱码的情况
yml_read = yaml.load(open(yamlPath, 'r', encoding='utf-8').read(), Loader=yaml.FullLoader)
"""使用装饰器创建类"""
def Blive(cls):
Dict = yml_read['BLive']
for name, value in Dict.items():
setattr(cls, name, value)
return cls
def database(cls):
Dict = yml_read['database']
for name, value in Dict.items():
setattr(cls, name, value)
return cls
def api(cls):
Dict = yml_read['api']
for name, value in Dict.items():
setattr(cls, name, value)
return cls
@Blive
class BLive(object):
pass
@database
class database(object):
pass
@api
class api(object):
pass | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}... | 3 | config.py | retroxz/bilidm |
from linptech.crc8 import crc8
import logging
class Packet(object):
'''
Base class for Packet.
Mainly used for for packet generation and
Packet.parse_msg(buf) for parsing message.
parse_msg() returns subclass, if one is defined for the data type.
'''
def __init__(self, data=None, optional="00"*7):
if data is None:
logging.warning('Packet.data is None')
else:
self.data = data
if optional is None:
logging.info('Packet.optional is None.')
else:
self.optional = optional
@staticmethod
def check(packet):
"""
check packet with crc
"""
if packet.startswith("550") and \
crc8(packet[2:10])==packet[10:12] and \
crc8(packet[12:-2])==packet[-2:]:
return True
else:
return False
@staticmethod
def parse(packet):
"""
parse an packet to data and optional for receive
"""
if Packet.check(packet):
try:
data_len=int(packet[4:6],16)
data=packet[12:12+data_len*2]
optional=packet[12+data_len*2:26+data_len*2]
return data,optional
except Exception as e:
logging.error("parse packet wrong:%s",e)
return
else :
logging.error("packet is invalid")
return
@staticmethod
def create(data=None, optional="00"*7):
"""
Creates an packet ready for sending.
Uses data and optional.
"""
try:
data_len = "{0:>02}".format(hex(int(len(data)/2))[2:])
m1 = "00"+data_len+"0701"
m2 = data+optional
packet = "55"+m1+crc8(m1)+m2+crc8(m2)
return packet
except Exception as e:
logging.error("create packet wrong:%s",e)
return
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
data="1f8000004581020101"
Packet.create(data) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding se... | 3 | linptech/packet.py | yangguozhanzhao/linptech |
def tipleft():
from time import sleep
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
forward = 18
backward = 17
GPIO.setup(forward,GPIO.OUT)
GPIO.setup(backward,GPIO.OUT)
GPIO.output(forward,1)
sleep(0.3)
GPIO.output(forward,0)
sleep(1)
GPIO.output(backward,1)
sleep(0.435)
GPIO.output(backward,0)
def tipright():
from time import sleep
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
forward = 18
backward = 17
GPIO.setup(forward,GPIO.OUT)
GPIO.setup(backward,GPIO.OUT)
GPIO.output(backward,1)
sleep(0.3)
GPIO.output(backward,0)
sleep(1)
GPIO.output(forward,1)
sleep(0.4)
GPIO.output(forward,0)
tipleft()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | models(RaspberryPI)/research/object_detection/motortest2.py | Caitmaire/EcoBinTesting |
# -*- coding: utf-8 -*-
"""
Copyright 2020 Giuliano Franca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
====================================================================================================
How to use:
* Run this script using either Python2.x or Python3.x
Requirements:
* Maya 2018 or above.
This code supports Pylint. Rc file in project.
"""
import maya.cmds as cmds
import gfTools.gfCore as gfCore
def launchMayaNodes():
"""Launch gfToolsNodes for Autodesk Maya."""
if gfCore.platform() == "Windows":
cmds.loadPlugin("gfToolsNodes.mll")
elif gfCore.platform() == "Linux":
cmds.loadPlugin("gfToolsNodes.so")
else:
cmds.loadPlugin("gfToolsNodes.bundle")
def launchGFUtilitiesBeltMaya():
"""Launch gfUtilitiesBelt for Autodesk Maya."""
# pylint: disable=import-error, undefined-variable
from gfUtilitiesBelt import run
reload(run)
run.main()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | scripts/maya/gfToolsLauncher.py | giulianofranca/gfTools |
#! /opt/stack/bin/python
# @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import crypt
import os
import random
import time
import string
class Password:
def get_rand(self, num_bytes=16, choices=string.ascii_letters + string.digits):
password = ''
for _ in range(num_bytes):
# Note: The `getrandom` system call will never block once the entropy
# pool has been initialized, which seems to happen before any of our
# code would ever run, probably due to only needing to collect 4096 bits
# of entropy. The RDRAND x86 instruction has been available since 2012
# and the Linux kernel will use it to initialize the entropy pool.
try:
# Try seeding random in a non-blocking way
# Note: Using 64 bits (8 bytes) of entropy per password byte
random.seed(os.getrandom(8, flags=os.GRND_NONBLOCK))
except BlockingIOError:
# Blocked getting entropy from the OS, so use milliseconds instead
random.seed(int(time.time() * pow(10, 9)))
password += random.choice(choices)
return password
def get_salt(self):
return '$6${}'.format(
self.get_rand(choices=string.ascii_letters + string.digits + './')
)
def get_cleartext_pw(self, c_pw=None):
if not c_pw:
c_pw = self.get_rand()
return c_pw
def get_crypt_pw(self, c_pw=None):
# if c_pw was not specified, generate a random password
if not c_pw:
c_pw = self.get_cleartext_pw()
salt = self.get_salt()
return crypt.crypt(c_pw, salt)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/c... | 3 | common/src/stack/pylib/stack/password.py | sammeidinger/stack |
# -*- coding: utf-8 -*-
import irc3
from irc3.plugins.command import command
from random import choice
@irc3.plugin
class Plugin(object):
def __init__(self, bot):
self.bot = bot
@command(permission=None)
def flip(self, mask, target, args):
"""Flip a coin
%%flip
"""
yield choice(['Heads', 'Tails'])
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a ty... | 3 | bangbot/plugins/flip.py | Asara/bangbot3 |
class Ant(object):
last_position = None
tour = []
"""docstring for Ant"""
def __init__(self, name, initial_position):
super(Ant, self).__init__()
self.name = name
self.__position = initial_position
@property
def position(self):
return self.__position
@position.setter
def position(self, position):
self.last_position = self.__position
self.__position = position
def add_step(self, step):
self.tour.append(step)
def clear_tour(self):
self.tour = []
def __repr__(self):
return f'<Ant {self.name}: {self.position}>'
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | src/ant.py | andaviaco/aco |
from datetime import datetime, timedelta
class Report():
def __init__(self, dbm, data):
self.now = datetime.now()
self.today = self.now.strftime('%Y-%m-%d')
self.dbm = dbm
self.report_type = data['reportType']
self.pe_id = data['peId']
self.date_from = None
if 'dateFrom' in data:
self.date_from = data['dateFrom']
self.date_to = self.today
if 'dateTo' in data:
self.date_to = data['dateTo']
self.anno_type = None
if 'annoType' in data:
self.anno_type = data['annoType']
self.user_id = None
if 'userId' in data:
self.user_id = data['userId']
self.iteration = None
if 'iteration' in data:
self.iteration = data['iteration']
def get_report(self):
report = dict()
data = []
labels = []
if self.report_type == 'annosPerLabel':
data, labels = self.__get_annos_per_label()
elif self.report_type == 'annosPerDay':
data, labels = self.__get_annos_per_day()
#labels.append(self.today)
report['labels'] = labels
report['data'] = data
return report
def __get_annos_per_label(self):
data = []
labels = []
two_d_anno_counts = self.dbm.count_two_d_annos_per_label(self.pe_id, self.user_id, self.iteration, self.date_from, self.date_to)
for row in two_d_anno_counts:
data.append(row[2])
labels.append(row[1])
return data, labels
def __get_annos_per_day(self):
data = []
labels = []
two_d_anno_counts = self.dbm.count_two_d_annos_per_day(self.pe_id, self.user_id, self.iteration, self.date_from, self.date_to)
for row in two_d_anno_counts:
data.append(row[0])
labels.append('{}-{}-{}'.format(row[3], row[2], row[1]))
return data, labels | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | backend/lost/logic/report.py | Avni-Hirpara/lost |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
import solutions.day5.solver as solver
def test_solver1():
# act
output = solver.solver1([0, 3, 0, 1, -3])
# assert
assert output == 5
def test_solver1_jumps():
# arrange
jumps = [0, 3, 0, 1, -3]
# act
solver.solver1(jumps)
# assert
assert jumps == [2, 5, 0, 1, -2]
def test_solver2():
assert solver.solver2([0, 3, 0, 1, -3]) == 10
def test_solver2_jumps():
# arrange
jumps = [0, 3, 0, 1, -3]
# act
solver.solver2(jumps)
# assert
assert jumps == [2, 3, 2, 3, -1]
# added after finding solution
def test_part1():
assert solver.part1('solutions/day5') == 381680
def test_part2():
assert solver.part2('solutions/day5') == 29717847
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | solutions/day5/test_solver.py | NunoMCSilva/My-Advent-of-Code-2017-Solutions |
from .labels import LabelsPlugin
from electrum_blk.plugin import hook
class Plugin(LabelsPlugin):
@hook
def load_wallet(self, wallet, window):
self.start_wallet(wallet)
def on_pulled(self, wallet):
self.logger.info('labels pulled from server')
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than cla... | 3 | electrum_blk/plugins/labels/cmdline.py | nedcloud-blackchain/electrum-blk |
import time
import thriftpy2
from thriftpy2.utils import serialize, deserialize
from thriftpy2.protocol import TBinaryProtocolFactory, TCyBinaryProtocolFactory
addressbook = thriftpy2.load("addressbook.thrift")
def make_addressbook():
phone1 = addressbook.PhoneNumber()
phone1.type = addressbook.PhoneType.MOBILE
phone1.number = b'555-1212'
phone2 = addressbook.PhoneNumber()
phone2.type = addressbook.PhoneType.HOME
phone2.number = b'555-1234'
person = addressbook.Person()
person.name = b"Alice"
person.phones = [phone1, phone2]
person.created_at = 1400000000
ab = addressbook.AddressBook()
ab.people = {person.name: person}
return ab
ab_encoded = serialize(make_addressbook())
def encode(n, proto_factory=TBinaryProtocolFactory()):
ab = make_addressbook()
start = time.time()
for i in range(n):
serialize(ab, proto_factory)
end = time.time()
print("encode\t-> {}".format(end - start))
def decode(n, proto_factory=TBinaryProtocolFactory()):
ab = addressbook.AddressBook()
start = time.time()
for i in range(n):
deserialize(ab, ab_encoded, proto_factory)
end = time.time()
print("decode\t-> {}".format(end - start))
def main():
n = 100000
print("binary protocol struct benchmark for {} times:".format(n))
encode(n)
decode(n)
print("\ncybin protocol struct benchmark for {} times:".format(n))
encode(n, TCyBinaryProtocolFactory())
decode(n, TCyBinaryProtocolFactory())
if __name__ == "__main__":
main()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | benchmark/benchmark_struct.py | JonnoFTW/thriftpy2 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SetAppEnvConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Workbench-ide', '2021-01-21', 'SetAppEnvConfig')
self.set_method('POST')
def get_DeployStage(self): # String
return self.get_query_params().get('DeployStage')
def set_DeployStage(self, DeployStage): # String
self.add_query_param('DeployStage', DeployStage)
def get_EnvConfigList(self): # String
return self.get_query_params().get('EnvConfigList')
def set_EnvConfigList(self, EnvConfigList): # String
self.add_query_param('EnvConfigList', EnvConfigList)
def get_AppId(self): # Long
return self.get_query_params().get('AppId')
def set_AppId(self, AppId): # Long
self.add_query_param('AppId', AppId)
def get_CurrentOrgId(self): # String
return self.get_query_params().get('CurrentOrgId')
def set_CurrentOrgId(self, CurrentOrgId): # String
self.add_query_param('CurrentOrgId', CurrentOrgId)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | aliyun-python-sdk-workbench-ide/aliyunsdkworkbench_ide/request/v20210121/SetAppEnvConfigRequest.py | yndu13/aliyun-openapi-python-sdk |
"""probing statistics as dict
Revision ID: 03ddf61b5b3c
Revises: 8b783114dc9a
Create Date: 2022-01-11 19:00:16.273260
"""
import sqlmodel.sql.sqltypes
# revision identifiers, used by Alembic.
from sqlalchemy.dialects.postgresql import JSON
from alembic import op
revision = "03ddf61b5b3c"
down_revision = "8b783114dc9a"
branch_labels = None
depends_on = None
def upgrade():
op.alter_column(
"measurement_agent",
"probing_statistics",
existing_type=sqlmodel.sql.sqltypes.AutoString(),
type_=JSON(),
postgresql_using="probing_statistics::json",
)
def downgrade():
op.alter_column(
"measurement_agent",
"probing_statistics",
existing_type=JSON(),
type_=sqlmodel.sql.sqltypes.AutoString(),
)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | alembic/versions/03ddf61b5b3c_probing_statistics_as_dict.py | dioptra-io/iris |
import requests
from bs4 import BeautifulSoup
def main():
url = 'http://blog.castman.net/web-crawler-tutorial/ch1/connect.html'
print(get_elem_text(url, 'p'))
print(get_elem_text(url, 'title'))
print(get_elem_text(url, 'not_exist'))
def get_elem_text(url, elem):
try:
resp = requests.get(url)
if resp.status_code == 200:
soup = BeautifulSoup(resp.text, 'html.parser')
return soup.find(elem).text
except:
print('Can not reach the website or the element.')
return None
if __name__ == '__main__':
main() | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
... | 3 | ch1/hw_main.py | x24870/web-crawler |
import tensorflow as tf
import os
def _get_training_data(FLAGS):
''' Buildind the input pipeline for training and inference using TFRecords files.
@return data only for the training
@return data for the inference
'''
filenames=[FLAGS.tf_records_train_path+'/'+f for f in os.listdir(FLAGS.tf_records_train_path)]
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(parse)
dataset = dataset.shuffle(buffer_size=1)
dataset = dataset.repeat()
dataset = dataset.batch(FLAGS.batch_size)
dataset = dataset.prefetch(buffer_size=1)
dataset2 = tf.data.TFRecordDataset(filenames)
dataset2 = dataset2.map(parse)
dataset2 = dataset2.shuffle(buffer_size=1)
dataset2 = dataset2.repeat()
dataset2 = dataset2.batch(1)
dataset2 = dataset2.prefetch(buffer_size=1)
return dataset, dataset2
def _get_test_data(FLAGS):
''' Buildind the input pipeline for test data.'''
filenames=[FLAGS.tf_records_test_path+'/'+f for f in os.listdir(FLAGS.tf_records_test_path)]
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(parse)
dataset = dataset.shuffle(buffer_size=1)
dataset = dataset.repeat()
dataset = dataset.batch(1)
dataset = dataset.prefetch(buffer_size=1)
return dataset
def parse(serialized):
''' Parser fot the TFRecords file.'''
features={'movie_ratings':tf.FixedLenFeature([3952], tf.float32),
}
parsed_example=tf.parse_single_example(serialized,
features=features,
)
movie_ratings = tf.cast(parsed_example['movie_ratings'], tf.float32)
return movie_ratings | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | src/data/dataset.py | artem-oppermann/Deep-Autoencoders-For-Collaborative-Filtering |
#!/usr/bin/env python3
# Copyright (c) 2017 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test digibyte-cli"""
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import assert_equal
class TestDigiByteCli(DigiByteTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
self.log.info("Compare responses from getinfo RPC and `digibyte-cli getinfo`")
cli_get_info = self.nodes[0].cli.getinfo()
rpc_get_info = self.nodes[0].getinfo()
assert_equal(cli_get_info, rpc_get_info)
if __name__ == '__main__':
TestDigiByteCli().main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | test/functional/digibyte_cli.py | zejii/digibyte |
# -*- coding: utf-8 -*-
#
# This file is part of Flask-CLI
# Copyright (C) 2015 CERN.
#
# Flask-AppFactory is free software; you can redistribute it and/or
# modify it under the terms of the Revised BSD License; see LICENSE
# file for more details.
"""Flask extension to enable CLI."""
import types
from . import AppGroup
class FlaskCLI(object):
"""Flask-CLI extension.
Initialization of the extension:
>>> from flask import Flask
>>> from flask_cli import FlaskCLI
>>> app = Flask('myapp')
>>> FlaskCLI(app)
or alternatively using the factory pattern:
>>> app = Flask('myapp')
>>> ext = FlaskCLI()
>>> ext.init_app(app)
"""
def __init__(self, app=None):
"""Initialize the Flask-CLI."""
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Initialize a Flask application."""
# Follow the Flask guidelines on usage of app.extensions
if not hasattr(app, 'extensions'):
app.extensions = {}
if 'flask-cli' in app.extensions:
raise RuntimeError("Flask-CLI application already initialized")
app.extensions['flask-cli'] = self
self.setup_pre10(app)
def setup_pre10(self, app):
"""Setup Flask pre-1.0 application object."""
if hasattr(app, 'cli'):
return
from flask_cli.app import make_shell_context, shell_context_processor
app.cli = AppGroup(app.name)
app.shell_context_processors = []
app.make_shell_context = types.MethodType(make_shell_context, app)
app.shell_context_processor = types.MethodType(
shell_context_processor, app)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (... | 3 | env/lib/python3.8/site-packages/flask_cli/ext.py | thenack/sneaky |
from typing import Any
from ..envs.complex_simplify import ComplexSimplify
from ..types import MathyEnvDifficulty, MathyEnvProblemArgs
from .mathy_gym_env import MathyGymEnv, safe_register
class GymComplexTerms(MathyGymEnv):
def __init__(self, difficulty: MathyEnvDifficulty, **kwargs: Any):
super(GymComplexTerms, self).__init__(
env_class=ComplexSimplify,
env_problem_args=MathyEnvProblemArgs(difficulty=difficulty),
**kwargs
)
class ComplexTermsEasy(GymComplexTerms):
def __init__(self, **kwargs: Any):
super(ComplexTermsEasy, self).__init__(
difficulty=MathyEnvDifficulty.easy, **kwargs
)
class ComplexTermsNormal(GymComplexTerms):
def __init__(self, **kwargs: Any):
super(ComplexTermsNormal, self).__init__(
difficulty=MathyEnvDifficulty.normal, **kwargs
)
class ComplexTermsHard(GymComplexTerms):
def __init__(self, **kwargs: Any):
super(ComplexTermsHard, self).__init__(
difficulty=MathyEnvDifficulty.hard, **kwargs
)
safe_register(id="mathy-complex-easy-v0", entry_point="mathy_envs.gym:ComplexTermsEasy")
safe_register(
id="mathy-complex-normal-v0", entry_point="mathy_envs.gym:ComplexTermsNormal"
)
safe_register(id="mathy-complex-hard-v0", entry_point="mathy_envs.gym:ComplexTermsHard")
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false... | 3 | mathy_envs/gym/gym_complex_simplify.py | mathy/mathy_envs |
"""empty message
Revision ID: 4432129ea292
Revises:
Create Date: 2020-05-13 18:27:44.141674
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4432129ea292'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('menus', sa.Column('price', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('menus', 'price')
# ### end Alembic commands ###
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answe... | 3 | migrations/versions/4432129ea292_.py | Brenda-M/food-booth |
from __future__ import annotations
from json import dumps, loads
from typing import Any
import attr
from ..abc import Serializer
from ..marshalling import marshal_object, unmarshal_object
@attr.define(kw_only=True, eq=False)
class JSONSerializer(Serializer):
magic_key: str = '_apscheduler_json'
dump_options: dict[str, Any] = attr.field(factory=dict)
load_options: dict[str, Any] = attr.field(factory=dict)
def __attrs_post_init__(self):
self.dump_options['default'] = self._default_hook
self.load_options['object_hook'] = self._object_hook
def _default_hook(self, obj):
if hasattr(obj, '__getstate__'):
cls_ref, state = marshal_object(obj)
return {self.magic_key: [cls_ref, state]}
raise TypeError(f'Object of type {obj.__class__.__name__!r} is not JSON serializable')
def _object_hook(self, obj_state: dict[str, Any]):
if self.magic_key in obj_state:
ref, *rest = obj_state[self.magic_key]
return unmarshal_object(ref, *rest)
return obj_state
def serialize(self, obj) -> bytes:
return dumps(obj, ensure_ascii=False, **self.dump_options).encode('utf-8')
def deserialize(self, serialized: bytes):
return loads(serialized, **self.load_options)
def serialize_to_unicode(self, obj) -> str:
return dumps(obj, ensure_ascii=False, **self.dump_options)
def deserialize_from_unicode(self, serialized: str):
return loads(serialized, **self.load_options)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | src/apscheduler/serializers/json.py | axuy/apscheduler |
from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTest(TestCase):
def test_wait_for_db_ready(self):
"""test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},... | 3 | app/core/tests/test_commands.py | jingr1986/recipie-app-api |
"""This superclass represents our get_value abstract class"""
from abc import abstractmethod
from typing import List
from utils.setting import Setting
class SettingSFA(Setting):
"""Each setting (topology) has to implements methods to obtain
the bounds"""
@abstractmethod
def sfa_arr_bound(self, param_list: List[float]) -> float:
"""
SFA analysis paying burst with the arrivals
:param param_list: theta parameter
"""
pass
@abstractmethod
def sfa_min_bound(self, param_list: List[float]) -> float:
"""
SFA analysis paying burst with the min rate
:param param_list: theta parameter
"""
pass
@abstractmethod
def sfa_rate_diff_bound(self, param_list: List[float]) -> float:
"""
SFA analysis paying burst with the min rate and rate-diff penalty
:param param_list: theta parameter
"""
pass
@abstractmethod
def sfa_ac_bound(self, param_list: List[float]) -> float:
"""
SFA analysis using analytic combinatorics
:param param_list: theta parameter
"""
pass
@abstractmethod
def sfa_explicit(self, param_list: List[float]) -> float:
"""
SFA explicit computation
:param param_list: theta parameter
"""
pass
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | src/utils/setting_sfa.py | paulnikolaus/python-mgf-calculator |
import discord
import config
import requests
client = discord.Client()
@client.event
async def on_ready():
for guild_id in client.guilds:
if guild_id.name == config.DISCORD_GUILD_NAME:
break
print(
f'{client.user} is connected to {guild_id.name}(id: {guild_id.id})'
)
@client.event
async def on_message(message):
if message.author == client.user:
return
wordbank = ['cat', 'puppy', 'bunny', 'giraffe', 'poop']
if message.content == 'pycascade':
response = 'Hello everyone! Welcome and have a great time!'
await message.channel.send(response)
elif message.content in wordbank:
await message.channel.send("please don't use bad words")
elif 'pokemon' in message.content:
# input: pokemon pikachu
pokemon = message.content.split()[1]
req = requests.get(f"https://getpokemonweakness.azurewebsites.net/api/getweakness?pokemon={pokemon}")
await message.channel.send(req.content)
client.run(config.DISCORD_BOT_TOKEN) | [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | bot.py | ijoosong/pycascadediscordbot |
from typing import List
from functools import reduce
mapping = {"F": 0, "B": 1, "L": 0, "R": 1}
def read_input(in_file: str) -> List[str]:
try:
f_desc = open(in_file, "r")
return [line.replace("\n", "") for line in f_desc.readlines()]
except IOError:
f_desc.close()
return []
def compute_id(seat_desc: str) -> int:
seat_id = 0
for i in range(len(seat_desc) - 3):
seat_id += mapping[seat_desc[i]] * (2 ** (len(seat_desc) - 4 - i))
seat_id *= 8
for i in range(3):
seat_id += mapping[seat_desc[i + len(seat_desc) - 3]] * (2 ** (2 - i))
return seat_id
in_file = "day5/in-day-5.txt"
seats = read_input(in_file)
sorted_seats_ids = sorted([compute_id(x) for x in seats])
for i in range(len(sorted_seats_ids) - 1):
if sorted_seats_ids[i + 1] - sorted_seats_ids[i] != 1:
print(sorted_seats_ids[i] + 1)
break
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
"answe... | 3 | day5/day5-part2.py | GeorgianBadita/advent-of-code-2020 |
#!/bin/python
from deap import tools
from deap import base
import promoterz
import statistics
class Locale():
def __init__(self, World, name, position, loop):
self.World = World
self.name = name
self.EPOCH = 0
self.position = position
self.EvolutionStatistics = []
self.HallOfFame = tools.HallOfFame(30)
self.extratools = promoterz.evolutionHooks.getLocaleEvolutionToolbox(
World, self
)
# GENERATION METHOD SELECTION;
# to easily employ various GA algorithms,
# this base EPOCH processor loads a GenerationMethod file,
# which should contain a genToolbox function to generate
# fully working DEAP toolbox, and a reconstructTradeSettings
# function to convert parameters from individue to usable strategy Settings;
# Check promoterz/representation;
#genconf.Strategy = Strategy # ovrride strat defined on settings if needed;
# --initial population
self.population = World.tools.population(World.genconf.POP_SIZE)
self.lastEvaluation = None
self.lastEvaluationOnSecondary = None
# --INIT STATISTICS;
self.stats = statistics.getStatisticsMeter()
self.InitialBestScores, self.FinalBestScores = [], []
self.POP_SIZE = World.genconf.POP_SIZE
self.loop = loop
def run(self):
print(self.name)
self.loop(self.World, self)
self.EPOCH += 1
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than... | 3 | promoterz/locale.py | caux/japonicus |
from ..core import maxsum
from ..cli import argparse
class Main:
def __init__(self):
self.args = None
def setArgs(self,args):
self.args = args
def run(self):
parser = argparse.MaxSumArgparse()
args = parser.parse(self.args)
p = maxsum.MaxSum()
p.setIntegers(args.integers)
p.setAccumulate(args.accumulate)
print(p.run())
def main(args):
main = Main()
main.setArgs(args)
main.run()
if __name__ == '__main__':
import sys
main(sys.argv[1:])
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"ans... | 3 | maxsum/main/main.py | arunstiwari/python-docker-demo |
# -*- coding: utf-8 -*-
from sqlalchemy import event, exc, select
from sqlalchemy.engine import Connection, Engine
def pessimistic_connection_handling(some_engine: Engine) -> None:
@event.listens_for(some_engine, 'engine_connect')
def ping_connection(
connection: Connection, branch: bool
) -> None:
if branch:
# 'branch' refer to a sub-connection of a connection
# we don't want to bother pinging on these.
return
# turn off 'close with result'. This flag is only used with
# 'connectionless' execution, otherwise will be False in any case.
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
try:
# run a SELECT 1. use a core select() so that
# the SELECT of a scalar value without a table is
# appropriately formatted for the backend.
connection.scalar(select([1]))
except exc.DBAPIError as err:
# catch SQLAlchemy's DBAPIError, which is a wrapper
# for the DBAPI's exception. It includes a .connection_invalidated
# attribute which specifies if this connection is a 'disconnect'
# candition, which is based on inspection of the original exception
# by the dialect in use.
if err.connection_invalidated:
# run the same SELECT again - the connection with re-validate
# itself and establish a new connection. The disconnect detection
# here also causes the whole connection pool to be invalidated
# so that all stale connection are discarded.
connection.scalar(select[1])
else:
raise
finally:
# restore 'close with result'.
connection.should_close_with_result = save_should_close_with_result
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding... | 3 | raven/utils/core.py | dantin/raven |
"""Djinni manager tool"""
import os
import ezored.functions as fn
import ezored.logging as log
from ezored import constants as const
# -----------------------------------------------------------------------------
def run(params={}):
args = params['args']
if len(args) > 0:
action = args[0]
if action:
if action == 'generate':
generate(params)
else:
help(params)
else:
help(params)
else:
help(params)
# -----------------------------------------------------------------------------
def generate(params={}):
dirs = fn.find_dirs_simple(os.path.join(
fn.root_dir(),
const.DIR_NAME_FILES,
const.DIR_NAME_DJINNI),
'*'
)
if dirs:
log.info('Generating files for all modules...')
dirs.sort()
for item in dirs:
if fn.file_exists(os.path.join(item, 'generate.py')):
dir_name = os.path.basename(item)
log.info('Generating djinni files for "{0}"...'.format(dir_name))
fn.run_simple(['python', 'generate.py'], item)
log.ok()
else:
log.error('No djinni modules to generate')
# -----------------------------------------------------------------------------
def help(params={}):
log.colored('Available actions:\n', log.PURPLE)
log.normal(' - generate')
# -----------------------------------------------------------------------------
def get_description(params={}):
return 'Djinni manager tool'
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | files/commands/djinni/djinni.py | uilianries/ezored |
#!/usr/bin/env python
import rospy,copy
from geometry_msgs.msg import Twist
from std_srvs.srv import Trigger, TriggerResponse
from pimouse_ros.msg import LightSensorValues
class WallStop():
def __init__(self):
self.cmd_vel = rospy.Publisher('/cmd_vel',Twist,queue_size=1)
self.sensor_values = LightSensorValues()
rospy.Subscriber('/lightsensors',LightSensorValues,self.callback)
def callback(self,messages):
self.sensor_values = messages
def run(self):
rate = rospy.Rate(10)
data = Twist()
while not rospy.is_shutdown():
data.linear.x = 0.2 if self.sensor_values.sum_all < 500 else 0.0
self.cmd_vel.publish(data)
rate.sleep()
if __name__ == '__main__':
rospy.init_node('wall_stop')
rospy.wait_for_service('/motor_on')
rospy.wait_for_service('/motor_off')
rospy.on_shutdown(rospy.ServiceProxy('/motor_off',Trigger).call)
rospy.ServiceProxy('/motor_on',Trigger).call()
WallStop().run()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": ... | 3 | scripts/wall_stop.py | kenjiinukai/pimouse_run_corridor |
class Animal(object):
pass
class Duck(Animal):
pass
class Snake(Animal):
pass
class Platypus(Animal):
pass
def can_quack(animal):
if isinstance(animal, Duck):
return True
elif isinstance(animal, Snake):
return False
else:
raise RuntimeError('Unknown animal!')
if __name__ == '__main__':
print(can_quack(Duck()))
print(can_quack(Snake()))
print(can_quack(Platypus()))
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{... | 3 | solid/3-bad-lsp.py | CtrlWebInc/montreal-django-tests |
from aleph.tests.util import TestCase
class StreamApiTestCase(TestCase):
def setUp(self):
super(StreamApiTestCase, self).setUp()
def test_entities(self):
self.load_fixtures()
res = self.client.get('/api/2/entities/_stream')
assert res.status_code == 403, res
_, headers = self.login(is_admin=True)
res = self.client.get('/api/2/entities/_stream', headers=headers)
assert res.status_code == 200, res
lines = len(res.data.split(b'\n'))
assert 19 == lines, lines
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
... | 3 | aleph/tests/test_stream_api.py | jalmquist/aleph |
import numpy as np
from typing import Optional
from oneflow import Tensor
from oneflow.utils.data import Dataset
from ....types import arr_type
from ....types import arrays_type
from ....misc.toolkit import to_flow
class TensorDataset(Dataset):
def __init__(
self,
x: arr_type,
y: Optional[arr_type],
others: Optional[arrays_type] = None,
):
if isinstance(x, np.ndarray):
x = to_flow(x)
if isinstance(y, np.ndarray):
y = to_flow(y)
if others is not None:
others = [v if isinstance(v, Tensor) else to_flow(v) for v in others]
self.x = x
self.y = y
self.others = others
def __getitem__(self, index: int) -> arrays_type:
label = 0 if self.y is None else self.y[index]
item = [self.x[index], label]
if self.others is not None:
item.extend(v[index] for v in self.others)
return tuple(item)
def __len__(self) -> int:
return self.x.shape[0]
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | cflow/api/cv/data/core.py | carefree0910/carefree-flow |
from quark_core_api.core import QuarkApplication
from quark_core_api.context import ApplicationContext
from quark_core_api.common import ContextInitializer
import json
import os
app_dir = os.path.expanduser("~\\")
app = QuarkApplication(ApplicationContext(app_dir, ContextInitializer.application))
location = "D:\\quark"
def create_ws(name, loc):
return app.create_workspace(name, loc)
def create_xp(ws, name):
return ws.create_experiment(name)
def create_script(ws, name):
return ws.create_script(name,"{} script text goes here...".format(name))
def add_script(xp, stage, name):
xp.add_script(stage, name)
def add_param(xp, name, value):
xp.add_parameter(name, value)
def print_workspaces():
for ws in app.workspaces:
print(ws, app.workspaces[ws].name)
# create_xp(app.workspaces[20181001131931], "LGBM_CV")
# add_script(app.workspaces[20181001131931].experiments["LGBM_CV"], "prep", "preprocess")
# add_script(app.workspaces[20181001131931].experiments["LGBM_CV"], "prep", "remove_nan")
# add_script(app.workspaces[20181001131931].experiments["LGBM_CV"], "prep", "clean")
pipeline = app.workspaces[20181001131931].experiments["LGBM_CV"].pipeline
for s in pipeline.steps:
print(s.name)
for s in pipeline.stages:
print(s)
# from quark_core_api.context import ApplicationContext, ContextInitializer
# app_ctx_init = {
# "workspaces": [{"id":1, "name":"ws-1", "dir":"home"}]
# }
# def test_application_context():
# ctx = ApplicationContext(None, ContextInitializer(app_ctx_init, None))
# ctx.create_storage("app")
# print (ctx.workspaces[0])
# test_application_context()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | _test_.py | arcticle/Quark |
"""implements splash screen
taken from http://code.activestate.com/recipes/534124-elegant-tkinter-splash-screen/
"""
import time
try:
import tkinter.tix as Tix
except ImportError: # Python 2
import Tix
class SplashScreen( object ):
def __init__( self, tkRoot, imageFilename, minSplashTime=0 ):
self._root = tkRoot
self._image = Tix.PhotoImage( file=imageFilename )
self._splash = None
self._minSplashTime = time.time() + minSplashTime
def __enter__( self ):
# Remove the app window from the display
self._root.withdraw( )
# Calculate the geometry to center the splash image
scrnWt = self._root.winfo_screenwidth( )
scrnHt = self._root.winfo_screenheight( )
imgWt = self._image.width()
imgHt = self._image.height()
imgXPos = (scrnWt / 2) - (imgWt / 2)
imgYPos = (scrnHt / 2) - (imgHt / 2)
# Create the splash screen
self._splash = Tix.Toplevel()
self._splash.overrideredirect(1)
self._splash.geometry( '+%d+%d' % (imgXPos, imgYPos) )
Tix.Label( self._splash, image=self._image, cursor='watch' ).pack( )
# Force Tk to draw the splash screen outside of mainloop()
self._splash.update( )
def __exit__( self, exc_type, exc_value, traceback ):
# Make sure the minimum splash time has elapsed
timeNow = time.time()
if timeNow < self._minSplashTime:
time.sleep( self._minSplashTime - timeNow )
del self._image
# Destroy the splash window
self._splash.destroy( )
# Display the application window
#self._root.deiconify( )
if __name__ == '__main__':
tkRoot = Tix.Tk( )
with SplashScreen( tkRoot, 'res/install.gif', 2.0 ):
time.sleep(15)
print("Ready...")
tkRoot.mainloop( )
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | splash.py | migrate2iaas/free-s3drive |
#!/usr/bin/env python
import asyncio
import sys
import stream
async def process_in_term():
process = await asyncio.create_subprocess_exec(*["xterm", "-e", "./example.sh"])
print("Create called")
return await process.wait()
async def interact():
read = await stream.ainput(prompt="Koi", loop=loop),
print("Received {} of type {}".format(read, type(read)))
if __name__ == "__main__":
loop = asyncio.get_event_loop()
rc = loop.run_until_complete(asyncio.wait([
process_in_term(),
process_in_term(),
interact(),
]))
loop.close()
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | async_proto.py | KoolChain/Grisou |
from django.db import models
class StatusQuerySet(models.QuerySet):
def approved(self):
return self.filter(status="approved")
def waiting(self):
return self.filter(status="waiting")
def rejected(self):
return self.filter(status="rejected")
class CommitManager(models.Manager):
def get_queryset(self):
return StatusQuerySet(self.model, using=self._db)
@property
def approved_commits(self):
return self.get_queryset().approved()
@property
def waiting_commits(self):
return self.get_queryset().waiting()
@property
def rejected_commits(self):
return self.get_queryset().rejected()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": ... | 3 | core/cooggerapp/models/managers/commit.py | bisguzar/coogger |
import sys
from types import ModuleType
from typing import Dict
__all__ = ["MonkeyJail"]
_sys_modules: Dict[str, ModuleType] = {}
class MonkeyJail:
def __init__(self):
self.saved = {}
def __enter__(self):
from gevent import monkey
for key in list(monkey.saved) + ["selectors"]:
if key in sys.modules:
self.saved[key] = sys.modules.pop(key)
sys.modules.update(_sys_modules)
def __exit__(self, exc_type, exc_val, exc_tb):
for key in list(self.saved) + ["selectors"]:
if key in sys.modules:
_sys_modules[key] = sys.modules[key]
sys.modules.update(self.saved)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | asyncio_gevent/gevent_loop/monkey_jail.py | gfmio/asyncio-gevent |
# -*- coding=utf-8 -*-
r"""
"""
def showinfo(title: str, message: str):
pass
def showwarning(title: str, message: str):
pass
def showerror(title: str, message: str):
pass
def askquestion(title: str, message: str):
pass
def askokcancel(title: str, message: str):
pass
def askyesno(title: str, message: str):
pass
def askyesnocancel(title: str, message: str):
pass
def askretrycancel(title: str, message: str):
pass
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or few... | 3 | docs/newplatform.py | PlayerG9/PyMessageBox |
class Solution:
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if(len(nums)==1): return nums[0] # 1的时候不work 两个dp,一个从第一位开始,一个从倒数第二位结束
last, now = 0, 0
last1, now1 = 0, 0
for i, n in enumerate(nums):
if i<len(nums)-1:
last, now = now, max(n+last,now)
print(now, last)
if i>0:
last1, now1 = now1, max(n+last1,now1)
return max(now,now1)
class Solution:
def rob(self, nums):
def rob(nums):
now = prev = 0
for n in nums:
now, prev = max(now, prev + n), now
return now
return max(rob(nums[len(nums) != 1:]), rob(nums[:-1])) | [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": true
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": f... | 3 | Dynamic Programming/213. House Robber II.py | beckswu/Leetcode |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, tools
import os
class DoctestConan(ConanFile):
name = "doctest"
version = "1.2.6"
url = "https://github.com/bincrafters/conan-doctest"
description = "C++98/C++11 single header testing framework"
license = "MIT"
exports = ["LICENSE.md"]
source_subfolder = "source_subfolder"
def source(self):
source_url = "https://github.com/onqtam/doctest"
tools.get("{0}/archive/{1}.tar.gz".format(source_url, self.version))
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self.source_subfolder)
#Rename to self.source_subfolder is a convention to simplify later steps
def package(self):
self.copy(pattern="LICENSE.txt", dst="licenses", src=self.source_subfolder)
self.copy(pattern="*doctest.h*", dst="include", src=os.path.join(self.source_subfolder,"doctest"))
def package_id(self):
self.info.header_only()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | conanfile.py | franc0is/conan-doctest |
from manim import *
class SquareToCircle(Scene):
def construct(self):
square = Square()
circle = Circle()
self.play(Transform(square, circle))
class SceneWithMultipleCalls(Scene):
def construct(self):
number = Integer(0)
self.add(number)
for i in range(10):
self.play(Animation(Square()))
class SceneWithMultipleWaitCalls(Scene):
def construct(self):
self.play(Create(Square()))
self.wait(1)
self.play(Create(Square().shift(DOWN)))
self.wait(1)
self.play(Create(Square().shift(2 * DOWN)))
self.wait(1)
self.play(Create(Square().shift(3 * DOWN)))
self.wait(1)
class NoAnimations(Scene):
def construct(self):
dot = Dot().set_color(GREEN)
self.add(dot)
self.wait(1)
class SceneWithStaticWait(Scene):
def construct(self):
self.add(Square())
self.wait()
class SceneWithNonStaticWait(Scene):
def construct(self):
s = Square()
# Non static wait are triggered by mobject with time based updaters.
s.add_updater(lambda mob, dt: None)
self.add(s)
self.wait()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
}... | 3 | tests/test_scene_rendering/simple_scenes.py | fargetan/manim |
from django import template
from ..forms import PlanForm
register = template.Library()
@register.inclusion_tag("drfstripe/_change_plan_form.html", takes_context=True)
def change_plan_form(context):
context.update({
"form": PlanForm(initial={
"plan": context["request"].user.customer.current_subscription.plan
})
})
return context
@register.inclusion_tag("drfstripe/_subscribe_form.html", takes_context=True)
def subscribe_form(context):
context.update({
"form": PlanForm()
})
return context
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | drfstripe/templatetags/payments_tags.py | brandon-fox/django-rest-framework-stripe |
"""xy-tag."""
import io
import os
import re
from setuptools import find_packages, setup
VERSION_RE = re.compile(r"""__version__ = ['"]([0-9b.]+)['"]""")
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*args):
"""Read complete file contents."""
return io.open(os.path.join(HERE, *args), encoding="utf-8").read()
def get_version():
"""Read the version from this module."""
init = read("src", "xy_tag", "__init__.py")
return VERSION_RE.search(init).group(1)
def get_requirements():
"""Read the requirements file."""
raw_requirements = read("requirements.txt")
requirements = []
dependencies = []
for req in raw_requirements.splitlines():
req = req.strip()
if not req:
continue
if req.startswith("#"):
continue
if "+" in req:
dependencies.append(req)
else:
requirements.append(req)
return requirements, dependencies
INSTALL_REQUIRES, DEPENDENCY_LINKS = get_requirements()
setup(
name="xy-tag",
version=get_version(),
packages=find_packages("src"),
package_dir={"": "src"},
url="https://github.com/mattsb42-meta/xy-tag",
author="Matt Bullock",
author_email="m@ttsb42.com",
maintainer="Matt Bullock",
description="xy-tag",
long_description=read("README.rst"),
keywords="xy-tag xy_tag",
data_files=["README.rst", "CHANGELOG.rst", "LICENSE", "requirements.txt"],
license="Apache 2.0",
install_requires=INSTALL_REQUIRES,
dependency_links=DEPENDENCY_LINKS,
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython"
]
)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | setup.py | mattsb42/xy-tag |
# coding: utf-8
from __future__ import unicode_literals
import re
from ..utils import unsmuggle_url
from .common import InfoExtractor
class JWPlatformIE(InfoExtractor):
_VALID_URL = r"(?:https?://(?:content\.jwplatform|cdn\.jwplayer)\.com/(?:(?:feed|player|thumb|preview)s|jw6|v2/media)/|jwplatform:)(?P<id>[a-zA-Z0-9]{8})"
_TESTS = [
{
"url": "http://content.jwplatform.com/players/nPripu9l-ALJ3XQCI.js",
"md5": "fa8899fa601eb7c83a64e9d568bdf325",
"info_dict": {
"id": "nPripu9l",
"ext": "mov",
"title": "Big Buck Bunny Trailer",
"description": "Big Buck Bunny is a short animated film by the Blender Institute. It is made using free and open source software.",
"upload_date": "20081127",
"timestamp": 1227796140,
},
},
{
"url": "https://cdn.jwplayer.com/players/nPripu9l-ALJ3XQCI.js",
"only_matching": True,
},
]
@staticmethod
def _extract_url(webpage):
urls = JWPlatformIE._extract_urls(webpage)
return urls[0] if urls else None
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<(?:script|iframe)[^>]+?src=["\']((?:https?:)?//(?:content\.jwplatform|cdn\.jwplayer)\.com/players/[a-zA-Z0-9]{8})',
webpage,
)
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
self._initialize_geo_bypass(
{
"countries": smuggled_data.get("geo_countries"),
}
)
video_id = self._match_id(url)
json_data = self._download_json(
"https://cdn.jwplayer.com/v2/media/" + video_id, video_id
)
return self._parse_jwplayer_data(json_data, video_id)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inherit... | 3 | nextdl/extractor/jwplatform.py | devenu85/nextdl |
import unittest
import os
from simpleh5.utilities.search_utilities import _build_search_string
class TestBuildString(unittest.TestCase):
def test_string_single(self):
query = ['strs', '==', 'abc']
match_string, uservars = _build_search_string(query)
self.assertEqual(match_string, "(n0==b'abc')")
self.assertDictEqual(uservars, {'n0': 'strs'})
def test_string_single_unicode(self):
query = (
('strs', '==', '£')
)
match_string, uservars = _build_search_string(query)
self.assertEqual(match_string, "(n0==b'\\xc2\\xa3')")
self.assertDictEqual(uservars, {'n0': 'strs'})
def test_string_compound(self):
query = [
[('strs', '==', 'abc'), ('strs', '==', '£')],
['nums', '>', 1.3]
]
match_string, uservars = _build_search_string(query)
self.assertEqual(match_string, "((n0==b'abc')|(n0==b'\\xc2\\xa3'))&(n1>1.3)")
self.assertDictEqual(uservars, {'n0': 'strs', 'n1': 'nums'})
def test_string_double_compound(self):
# meaningless logic
query = [
[('strs', '!=', 'abc'), ('strs', '!=', 'cba')],
('strs', '==', '£')
]
match_string, uservars = _build_search_string(query)
self.assertEqual(match_string, "((n0!=b'abc')|(n0!=b'cba'))&(n0==b'\\xc2\\xa3')")
self.assertDictEqual(uservars, {'n0': 'strs'})
if __name__ == "__main__":
unittest.main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false... | 3 | simpleh5/tests/test_simpleh5_search_util.py | ssolari/simpleh5 |
import discord
from discord.ext import commands
from x86 import helpers
class Ping:
"""Ping command"""
@commands.command()
@commands.cooldown(6,12)
async def ping(self, ctx):
"""Ping the bot"""
#heartbeat latency (i'll be honest, dont really understand how it works)
hb_latency = round(ctx.bot.latency*1000, 2)
'''
await ctx.send(hb_latency)
#this is a tuple
await ctx.send(ctx.bot.latencies)
await ctx.send(len(ctx.bot.latencies)>1)
await ctx.send(ctx.guild)
await ctx.send(ctx.guild.shard_id)
'''
#only valid if bot gets big enough
if ctx.guild and len(ctx.bot.latencies) > 1:
current_shard_latency = ctx.bot.latencies[ctx.guild.shard_id]
#shard heartbeat latency
shb_latency = round(current_shard_latency*1000, 2)
latency_message = f'\n**```ini\nHeartbeat Latency: [{hb_latency} ms] \nCurrent Shard Heartbeat: [{shb_latency} ms]```**'
else:
latency_message = f'\n**```ini\nHeartbeat Latency: [{hb_latency} ms] ```**'
#create embed
embed = discord.Embed(description=latency_message, color=await helpers.get_color())
ping_message = await ctx.send(embed=embed)
#ping_message = await ctx.send(latency_message)
#round trip ping
rt_ping = round((ping_message.created_at - ctx.message.created_at).total_seconds() * 1000, 2)
embed = discord.Embed(description=f'{latency_message[:-6]} \nRound-trip time: [{rt_ping} ms] ```**', color=await helpers.get_color())
await ping_message.edit(embed=embed)
def setup(bot):
bot.add_cog(Ping())
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fals... | 3 | cogs/utils/ping.py | zz-xx/robox86 |
from PIL import Image
import matplotlib.pyplot as plt
from .annotator import Annotator
class BoxAnnotator(Annotator):
def __init__(self, annotations_limit=None, window_timeout=0):
super().__init__(annotations_limit, window_timeout)
def run(self, img_absolute_path : str):
raise NotImplementedError("BoxAnnotator is not yet implemented")
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"ans... | 3 | annotation/annotators/box_annotator.py | mtszkw/pointpicker |
from watchmen_boot.guid.snowflake import get_surrogate_key
from watchmen.pipeline.core.dependency.graph.label import Label
from watchmen.pipeline.core.dependency.graph.node import Node
from watchmen.pipeline.core.dependency.graph.property import Property
def buildPipelineNode(pipeline):
labels = buildPipelineLabels()
property_dict = {"name": pipeline.name}
properties = buildPipelineProperties(property_dict)
pipeline_node = Node(**{
'id': pipeline.pipelineId,
'object_id': "pipeline",
'name': pipeline.name,
'labels': labels,
'properties': properties
})
return pipeline_node
def buildPipelineProperties(properties: dict):
property_list = []
for key, value in properties.items():
property_ = Property(**{
'id': get_surrogate_key(),
'name': key,
'value': value
})
property_list.append(property_)
return property_list
def buildPipelineLabels():
labels = []
label = Label(**{
'id': get_surrogate_key(),
'name': 'type',
'value': 'pipeline'
})
labels.append(label)
return labels
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | watchmen/pipeline/core/dependency/model/pipeline.py | Insurance-Metrics-Measure-Advisory/watchman-data-connector |
#import PyPDF2 # PyPDF2 extracts texts from PDF markup. We found that it worked relatively poor with CVPR papers. Spaces between words are often omitted in the outputs.
import textract # textract uses external OCR command "tesseract" to extract texts. The workflow is to first convert pdf files to ppm images and then apply OCR to extract texts.
from nltk.tokenize import word_tokenize
import os, re
import django
django.setup()
from papers.settings import BASE_DIR
import xml.etree.ElementTree as ET
def get_stopwords():
with open("{}/static/stopwords.txt".format(BASE_DIR)) as f:
stopwords = [w.strip() for w in f.readlines()]
return stopwords
STOPWORDS = get_stopwords()
def extract_keywords_from_pdf(pdf_file):
text = str(textract.process(pdf_file, method='tesseract', language='eng', layout="layout"))
tokens = word_tokenize(text)
tokens =[tk.strip() for tk in tokens]
tokens =[tk.replace('-\\n','') for tk in tokens]
words = [w for w in tokens if w not in STOPWORDS]
words = [re.sub('[^0-9a-zA-Z]+','',w).lower() for w in words]
words = [w for w in words if len(w) > 2]
return words
def parse_cermine_output(cermine_file):
tree = ET.parse(cermine_file)
root = tree.getroot()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | crawler/pdf.py | mental689/paddict |
import ConfigParser
import os
import sys
def irida_config():
try:
config=read_config()
confdict = {section: dict(config.items(section)) for section in config.sections()}
return confdict
except Exception as e :
print(str(e),' Could not read configuration file')
def read_config():
config = ConfigParser.RawConfigParser()
pathname = os.path.dirname(os.path.abspath(sys.argv[0]))
configFilePath = pathname+"/"+"config.ini"
try:
config.read(configFilePath)
return config
except Exception as e :
print(str(e)) | [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": tru... | 3 | src/Config/irida_con.py | Public-Health-Bioinformatics/sequdas-upload |
"""Define the command line iterface."""
import os
import glob
def _file_path():
"""Determine the file path."""
return os.environ.get("EFI_MONITOR_FILE_PATH", "/sys/firmware/efi/efivars/dump*")
def _files():
"""Find the dump_files."""
return glob.glob(_file_path())
def check():
"""Check for efi dump files."""
for a_file in _files():
print(a_file)
def clear():
"""Clear out efi dump files."""
for a_file in _files():
os.unlink(a_file)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer":... | 3 | efi_monitor/_cli.py | ambauma/efi-monitor |
from seminars.seminar_oop.employee import Employee
class HRRegistry:
def __init__(self, name: str, location: str):
self.name = name
self.location = location
self.registry = {}
self.counter = 1
def add_employee(self, employee: Employee):
self.registry[self.counter] = employee
self.counter += 1
employee_1 = Employee('Ivan', 25, 40000)
employee_2 = Employee('Boris', 30, 10000000)
hr_registry = HRRegistry('HSE', 'Kostina')
print(hr_registry.registry)
hr_registry.add_employee(employee_1)
print(hr_registry.registry)
hr_registry.add_employee(employee_2)
print(hr_registry.registry)
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than cla... | 3 | seminars/seminar_oop/hr_registry.py | dmitry-uraev/programming-2021-19fpl |
import numpy as np
from scipy.io import wavfile
def normalize(data):
temp = np.float32(data) - np.min(data)
out = (temp / np.max(temp) - 0.5) * 2
return out
def make_batch(path):
data = wavfile.read(path)[1][:, 0]
return get_wavenet_data(data)
def get_wavenet_data(data, resolution=256):
data_ = normalize(data)
# data_f = np.sign(data_) * (np.log(1 + 255*np.abs(data_)) / np.log(1 + 255))
bins = np.linspace(-1, 1, resolution)
# Quantize inputs.
inputs = np.digitize(data_[0:-1], bins, right=False) - 1
inputs = bins[inputs][None, :, None]
# Encode targets as ints.
targets = (np.digitize(data_[1::], bins, right=False) - 1)[None, :]
return inputs, targets
def get_normalized_data(data):
# A bit of a hack, sorry no time
data_ = normalize(data)
return data_[0:-1], data_[1:]
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | lib/fast-wavenet/wavenet/utils.py | rpp0/em-operation-extraction |
import unittest
from crypto_tools.stats import letter_frequency
class TestLetterFrequency(unittest.TestCase):
def test_count_empty(self):
counter = letter_frequency.LetterFrequency("")
self.assertEqual(counter.calculate(), {})
def test_count_text(self):
text = "The quick brown fox jumps over the lazy dog"
counter = letter_frequency.LetterFrequency(text).calculate()
self.assertTrue(0.0285 <= counter["a"] <= 0.0286)
self.assertTrue(0.0571 <= counter["t"] <= 0.0572)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | tests/test_letter_frequency.py | roccobarbi/crypto_tools |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import pytest
from azure.communication.sms.aio import SmsClient
from azure.communication.sms import (
PhoneNumber, SendSmsOptions
)
from _shared.asynctestcase import AsyncCommunicationTestCase
from _shared.testcase import (
BodyReplacerProcessor, ResponseReplacerProcessor
)
class SMSClientTestAsync(AsyncCommunicationTestCase):
def __init__(self, method_name):
super(SMSClientTestAsync, self).__init__(method_name)
def setUp(self):
super(SMSClientTestAsync, self).setUp()
if self.is_playback():
self.phone_number = "+18000005555"
else:
self.phone_number = os.getenv("PHONE_NUMBER")
self.recording_processors.extend([
BodyReplacerProcessor(keys=["to", "from", "messageId"]),
ResponseReplacerProcessor(keys=[self._resource_name])])
@AsyncCommunicationTestCase.await_prepared_test
@pytest.mark.live_test_only
async def test_send_sms_async(self):
sms_client = SmsClient.from_connection_string(self.connection_str)
async with sms_client:
# calling send() with sms values
sms_response = await sms_client.send(
from_phone_number=PhoneNumber(self.phone_number),
to_phone_numbers=[PhoneNumber(self.phone_number)],
message="Hello World via SMS",
send_sms_options=SendSmsOptions(enable_delivery_report=True)) # optional property
assert sms_response.message_id is not None
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer":... | 3 | sdk/communication/azure-communication-sms/tests/test_sms_client_e2e_async.py | vbarbaresi/azure-sdk-for-python |
# Copyright (c) 2016 SwiftStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MetaMiddleware(object):
def __init__(self, app, conf):
self.app = app
def __call__(self, env, start_response):
hToDel = list()
vToAdd = list()
for h in env:
if h.upper() == 'HTTP_X_PROXYFS_BIMODAL':
hToDel.append(h)
vToAdd.append(env[h])
for h in hToDel:
del env[h]
for v in vToAdd:
env['HTTP_X_ACCOUNT_SYSMETA_PROXYFS_BIMODAL'] = v # only last one, if multiple, will determine value
def meta_response(status, response_headers, exc_info=None):
hvToDel = list()
vToAdd = list()
for (h,v) in response_headers:
if h.upper() == 'X-ACCOUNT-SYSMETA-PROXYFS-BIMODAL':
hvToDel.append((h,v))
vToAdd.append(v)
for hv in hvToDel:
response_headers.remove(hv)
for v in vToAdd:
response_headers.append(('X-ProxyFS-BiModal',v)) # potentially multiple instances of same header
return start_response(status, response_headers, exc_info)
return self.app(env, meta_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def meta_filter(app):
return MetaMiddleware(app, conf)
return meta_filter
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than cla... | 3 | meta_middleware/meta_middleware/middleware.py | kevin-wyx/ProxyFS |
#
# Copyright (c) Ionplus AG and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for details.
#
from legacy import calculation_sets, routines, targets, results
class Generator(object):
def __init__(self, db_session, source_schema, target_schema, machine_number, isotope_number):
self.db_session = db_session
self.source_schema = source_schema
self.target_schema = target_schema
self.machine_number = machine_number
self.isotope_number = isotope_number
self.schema_mappings = [
('_brahma_', source_schema),
('_legacy_', target_schema),
]
def run(self):
self._prepare_and_execute(calculation_sets.calc_corr_t)
self._prepare_and_execute(calculation_sets.calc_sample_t)
self._prepare_and_execute(calculation_sets.calc_set_t)
self._prepare_and_execute(targets.sampletype_t)
self._prepare_and_execute(targets.target_v)
self._prepare_and_execute(results.workana_v)
self._prepare_and_execute(results.workproto_v_nt)
self._prepare_and_execute(routines.setCycleEnableNT)
self._prepare_and_execute(routines.setRunEnableNT)
def _prepare_and_execute(self, query):
self._execute(self._prepare(query),
machine_number=self.machine_number,
isotope_number=self.isotope_number)
def _prepare(self, query):
for schema_mapping in self.schema_mappings:
query = query.replace(*schema_mapping)
return query
def _execute(self, query, **kwargs):
with self.db_session.cursor() as cursor:
cursor.execute(query, kwargs)
self.db_session.commit()
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | legacy/generator.py | eclissi91/brahma |
class Color(object):
def __init__(self, r=255, g=255, b=255, a=255):
self.r = r
self.g = g
self.b = b
self.a = a
@property
def hex(self):
return '%0.2X%0.2X%0.2X%0.2X' % (self.a, self.r, self.g, self.b)
def __hash__(self):
return (self.a << 24) + (self.r << 16) + (self.g << 8) + (self.b)
def __eq__(self, other):
if not other:
return False
return self.r == other.r and self.g == other.g and self.b == other.b and self.a == other.a
def __str__(self):
return self.hex
Color.WHITE = Color(255, 255, 255, 255)
Color.BLACK = Color(0, 0, 0, 255) | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},... | 3 | pyexcelerate/Color.py | AlexHill/PyExcelerate |
from GeneralUtils import flatten
def collatz(n):
while n > 1:
yield n
if n % 2 == 0:
n = n//2
else:
n = 3*n+1
yield n
def collatz_inv(n):
if n % 6 == 4:
return 2*n, (n-1)//3
return 2*n
def collatz_inv_graph(n,levels):
S = [n]
for i in range(levels):
S = [collatz_inv(m) for m in S]
S = flatten(S)
print(S)
def collatz_inv_print(levels):
D = set([1])
S = [1]
for i in range(levels):
for m in S:
print(f"{collatz_inv(m)} → {m}")
S = [collatz_inv(m) for m in S]
S = set(flatten(S))
S = S-D
D = D|S
S = list(S)
#
#for i in collatz(119):
# print(i)
#collatz_inv_graph(1,9) | [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excludi... | 3 | Computation/Collatz.py | SymmetricChaos/FiniteFields |
import numpy as np
import skimage
from skimage import io, transform, exposure, data, color
from skimage.color import *
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
def unmix_purple_img(purp_img, loud=False):
"""
Accepts a purple image object as a parameter
and returns the image with the colors unmixed for
easier segmentation
"""
hematoxylin_matrix = np.ones((3,3)) * (0.644, 0.717, 0.267) # cell profiler matrix for purple images
stain_img = purp_img[:, :, [0, 1, 2]] # need only first 3 channels to separate stains
separated_img = separate_stains(stain_img, hematoxylin_matrix) # apply stain matrix to image
if loud:
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8,8))
ax[0].set_title("Original")
ax[0].imshow(purp_img)
ax[1].set_title("Hematoxylin")
ax[1].imshow(separated_img[:, :, 0])
return separated_img[:, :, 0]
def unmix_pink_imgs(pink_img, loud=False):
"""
Same as unmix_purple_img but takes a pink image
as a parameter
"""
stain_img = pink_img[:, :, [0, 1, 2]]
separated_img = separate_stains(stain_img, rbd_from_rgb)
if loud:
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8,8))
ax[0].set_title("Original")
ax[0].imshow(pink_img)
ax[1].set_title("RBD")
ax[1].imshow(separated_img[:, :, 1])
return separated_img[:, :, 1]
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": true
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answ... | 3 | functions/colors.py | broadinstitute/segmentation_experiments |
import simple
class Simple2:
def __init__(self):
self.info = "SimpleClass2"
class Simple3(simple.Simple):
def __init__(self):
simple.Simple.__init__(self)
text = "text in simple"
assert simple.text == text
_s = simple.Simple()
_s3 = Simple3()
assert _s.info == _s3.info
import recursive_import
_s = recursive_import.myClass()
assert str(_s) == "success!"
import from_import_test.b
assert from_import_test.b.v == 1
import from_import_test.c
assert from_import_test.c.v == 1
# test of keyword "global" in functions of an imported module
import global_in_imported
assert global_in_imported.X == 15
from delegator import Delegator
delegate = Delegator([])
# issue 768
import modtest
# issue 1261
import colorsys
colorsys.ONE_THIRD # no AttributeError
from colorsys import *
try:
ONE_THIRD
raise Exception("should have raised NameError")
except NameError:
pass
# use "__getattr__" and "__dir__" at module level (PEP 562)
assert simple.strange == "a strange name"
assert dir(simple) == ["Simple", "text", "strange", "unknown"]
# issue 1483
from foobar import *
assert str(Foo()) == "foo"
print('passed all tests')
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | www/tests/test_import.py | sejalseth/brython |
import komand
from .schema import LabelIssueInput, LabelIssueOutput
# Custom imports below
class LabelIssue(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='label_issue',
description='Label Issue',
input=LabelIssueInput(),
output=LabelIssueOutput())
def run(self, params={}):
"""Add label to issue"""
issue = self.connection.client.issue(id=params['id'])
if not issue:
raise Exception('Error: No issue found with ID: ' + params['id'])
labels = params['label'].split(',')
for label in labels:
if label not in issue.fields.labels:
issue.fields.labels.append(label)
self.logger.info('Adding labels to issue %s: %s', params['id'], issue.fields.labels)
issue.update(fields={'labels': issue.fields.labels})
return {'success': True}
def test(self):
t = self.connection.test()
if t:
return {'success': True}
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | jira/komand_jira/actions/label_issue/action.py | xhennessy-r7/insightconnect-plugins |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 29 11:46:26 2022
@author: Pedro
"""
def search(lista, target) -> int:
for i in range(len(lista)):
if lista [i] == target:
return i
return -1
def search2(lista, target) -> int:
for i, element in enumerate(lista):
if element == target:
return i
return -1 | [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return... | 3 | teoria/clase 29-03/algoritmos.py | pgentil/PC |
import base64
import os
from typing import Iterator, List, Tuple
from lms.extractors.base import Extractor, File
from lms.models.errors import BadUploadFile
from lms.utils.files import ALLOWED_IMAGES_EXTENSIONS
class Imagefile(Extractor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.filename_no_ext, _, self.ext = (
os.path.basename(self.filename).rpartition('.')
)
def can_extract(self) -> bool:
return self.ext.lower() in ALLOWED_IMAGES_EXTENSIONS
def get_exercise(self, to_extract: bytes) -> Tuple[int, List[File]]:
exercise_id = 0
if self.filename:
exercise_id, _ = self._clean(self.filename_no_ext)
if not exercise_id:
raise BadUploadFile("Can't resolve exercise id.", self.filename)
decoded = base64.b64encode(to_extract)
return (exercise_id, [File(f'/main.{self.ext.lower()}', decoded)])
def get_exercises(self) -> Iterator[Tuple[int, List[File]]]:
exercise_id, files = self.get_exercise(self.file_content)
if exercise_id and files and files[0].code:
yield (exercise_id, files)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | lms/extractors/imagefile.py | PureDreamer/lms |
from flask import abort, jsonify
from flask_restful import Resource
from flask_simplelogin import login_required
from test.models import Product
class ProductResource(Resource):
def get(self):
products = Product.query.all() or abort(204)
return jsonify(
{"products": [product.to_dict() for product in products]}
)
@login_required(basic=True, username="admin")
def post(self):
"""
Creates a new product.
Only admin user authenticated using basic auth can post
Basic takes base64 encripted username:password.
# curl -XPOST localhost:5000/api/v1/product/ \
# -H "Authorization: Basic Y2h1Y2s6bm9ycmlz" \
# -H "Content-Type: application/json"
"""
return NotImplementedError(
"Someone please complete this example and send a PR :)"
)
class ProductItemResource(Resource):
def get(self, product_id):
product = Product.query.filter_by(id=product_id).first() or abort(404)
return jsonify(product.to_dict())
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true... | 3 | test/ext/restapi/resources.py | YakovBoiev/test |
from abc import ABCMeta, abstractmethod
class TemplateEngine(object):
"""
An abstract class of TemplateEngine
"""
__metaclass__ = ABCMeta
def __init__(self):
pass
def __str__(self):
return 'Abstract Engine class with file path {0}'.format(self._inputTemplatePath)
class TemplateEngineEnum(object) :
"""
A enumeration-like class that retrieves a TemplateEngine given name
"""
@staticmethod
def getAllShapes():
"""
Return a list of class name of subtype of class TemplateEngine
:return: a list of subclass of TemplateEngine
"""
classes = TemplateEngine.__subclasses__()
values = []
for clazz in classes:
values.append(clazz.__name__)
return values
@staticmethod
def valueOf(str):
"""
:param str: the literal name of TemplateEngine subclass
:return: the literal name of TemplateEngine subclass
"""
if str in TemplateEngineEnum.getAllShapes():
return str
else:
return None | [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
}... | 3 | mkconfig/core/engine.py | mcfongtw/MkConfig |
"""Qtextedit module."""
# -*- coding: utf-8 -*-
from PyQt6 import QtWidgets # type: ignore[import]
from pineboolib.core import decorators
from typing import Optional
class QTextEdit(QtWidgets.QTextEdit):
"""QTextEdit class."""
LogText: int = 0
RichText: int = 1
def __init__(self, parent: Optional["QtWidgets.QWidget"] = None) -> None:
"""Inicialize."""
super().__init__(parent)
self.LogText = 0 # pylint: disable=invalid-name
self.RichText = 1 # pylint: disable=invalid-name
def setText(self, text: str) -> None:
"""Set text."""
super(QTextEdit, self).setText(text)
# if not project.DGI.localDesktop():
# project.DGI._par.addQueque("%s_setText" % self._parent.objectName(), text)
def getText(self) -> str:
"""Return text."""
return super().toPlainText()
@decorators.not_implemented_warn
def getTextFormat(self) -> int:
"""Return text format."""
return -1
@decorators.incomplete
def setTextFormat(self, value: int) -> None:
"""Set text format."""
if value == 0: # LogText
self.setReadOnly(True)
self.setAcceptRichText(False)
elif value == 1:
self.setReadOnly(False)
self.setAcceptRichText(True)
def setShown(self, value: bool) -> None:
"""Set visible."""
self.setVisible(value)
# if value:
# super().show()
# else:
# super().hide()
def getPlainText(self) -> str:
"""Return text in plain text format."""
return super(QTextEdit, self).toPlainText()
def setAutoFormatting(self, value=QtWidgets.QTextEdit.AutoFormattingFlag.AutoAll) -> None:
"""Set auto formating mode."""
super().setAutoFormatting(QtWidgets.QTextEdit.AutoFormattingFlag.AutoAll)
textFormat = property(getTextFormat, setTextFormat)
text = property(getText, setText)
PlainText = property(getPlainText, setText)
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": false
},
{
"point_num": 2,
"id": "all_return_types_annotated",
"question": "Does every function in this file have a return type annotation?",
... | 3 | pineboolib/q3widgets/qtextedit.py | Aulla/pineboo |
from django import forms
class OAuthValidationError(Exception):
"""
Exception to throw inside :class:`OAuthForm` if any OAuth2 related errors
are encountered such as invalid grant type, invalid client, etc.
:attr:`OAuthValidationError` expects a dictionary outlining the OAuth error
as its first argument when instantiating.
:example:
::
class GrantValidationForm(OAuthForm):
grant_type = forms.CharField()
def clean_grant(self):
if not self.cleaned_data.get('grant_type') == 'code':
raise OAuthValidationError({
'error': 'invalid_grant',
'error_description': "%s is not a valid grant type" % (
self.cleaned_data.get('grant_type'))
})
The different types of errors are outlined in :rfc:`4.2.2.1` and
:rfc:`5.2`.
"""
class OAuthForm(forms.Form):
"""
Form class that creates shallow error dicts and exists early when a
:class:`OAuthValidationError` is raised.
The shallow error dict is reused when returning error responses to the
client.
The different types of errors are outlined in :rfc:`4.2.2.1` and
:rfc:`5.2`.
"""
def __init__(self, *args, **kwargs):
self.client = kwargs.pop('client', None)
super(OAuthForm, self).__init__(*args, **kwargs)
def _clean_fields(self):
"""
Overriding the default cleaning behaviour to exit early on errors
instead of validating each field.
"""
try:
super(OAuthForm, self)._clean_fields()
except OAuthValidationError as e:
self._errors.update(e.args[0])
def _clean_form(self):
"""
Overriding the default cleaning behaviour for a shallow error dict.
"""
try:
super(OAuthForm, self)._clean_form()
except OAuthValidationError as e:
self._errors.update(e.args[0])
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": ... | 3 | provider/forms.py | basilfx/django-oauth2-provider |
import unittest
import atomic_neu.atomic as atomic
import numpy as np
class TestElectronCooling(unittest.TestCase):
def setUp(self):
ad = atomic.element('li')
eq = atomic.CollRadEquilibrium(ad)
self.temperature = np.logspace(0, 3, 50)
self.electron_density = 1e19
# y is a FractionalAbundance object.
y = eq.ionisation_stage_distribution(self.temperature,
self.electron_density)
self.elc = atomic.ElectronCooling(y, neutral_fraction=1e-2)
def test_keys(self):
"""Makes sure ElectronCooling has all the right keys"""
expected = ['ionisation', 'recombination',
'cx_power', 'line_power',
'continuum_power', 'rad_total',
'total']
result = self.elc.power.keys()
self.assertCountEqual(expected, result)
def test_rad_total(self):
"""Tests that rad_total is what I think it is."""
p = self.elc.power
expected = p['rad_total']
result = p['line_power'] + p['cx_power'] + p['continuum_power']
np.testing.assert_allclose(expected, result)
def test_equilbrium(self):
"""Test that ionisation and recombination powers are opposite.
Hence, total = rad_total.
"""
ion = self.elc.power['ionisation']
negrecomb = -self.elc.power['recombination']
total = self.elc.power['total']
rad_total = self.elc.power['rad_total']
np.testing.assert_allclose(ion, negrecomb)
np.testing.assert_allclose(total, rad_total)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | atomic/tests/test_electron_cooling.py | ezekial4/atomic_neu |
#! python3
# aoc_01.py
# Advent of code:
# https://adventofcode.com/2021/day/1
# https://adventofcode.com/2021/day/1#part2
# download input data (optional, for future use)
# Count depth increase (if current num is > last: ++)
# return number of depth increases
def aoc_count_depth_increase(aoc_input):
prev_depth = 0
cnt = -1
with open(aoc_input, 'r') as input:
for depth in input.readlines():
if int(depth) > prev_depth:
cnt+=1
prev_depth = int(depth)
return cnt
#2nd challenge - better code as integers are converted in the beginning
def aoc_count_depth_sum_increase(aoc_input, winsize=3):
prev_depth_sum = 0
cnt = -1
with open(aoc_input, 'r') as input:
depths = list(map(int,input.readlines()))
print(depths)
for i in range(len(depths) - winsize + 1):
window = depths[i: i + winsize]
depth_sum = sum(window)
print(window, sum(window))
if depth_sum > prev_depth_sum:
cnt+=1
prev_depth_sum = depth_sum
return cnt
print("Hello World!")
print(aoc_count_depth_increase('aoc_01_example.txt'))
print(aoc_count_depth_increase('aoc_01_input.txt'))
print(aoc_count_depth_sum_increase('aoc_01_input.txt'))
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than ... | 3 | aoc_01.py | ForestRupicolous/advent_of_code |
# Copyright 2017 Robert Csordas. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
import fcntl
class LockFile:
def __init__(self, fname):
self._fname = fname
self._fd = None
def acquire(self):
self._fd=open(self._fname, "w")
os.chmod(self._fname, 0o777)
fcntl.lockf(self._fd, fcntl.LOCK_EX)
def release(self):
fcntl.lockf(self._fd, fcntl.LOCK_UN)
self._fd.close()
self._fd = None
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self... | 3 | Utils/lockfile.py | RobertCsordas/dnc |
import json
import pytest
from tests import async_mock
from ariespython import did
@pytest.mark.parametrize(
'mocked_method, method_to_test, before_dict, after_dict',
[
('indy.did.create_and_store_my_did', did.create_and_store_my_did, [], []),
('indy.did.create_key', did.create_key, [], []),
('indy.did.store_their_did', did.store_their_did, [], []),
('indy.did.set_key_metadata', did.set_key_metadata, ['vk'], []),
('indy.did.set_did_metadata', did.set_did_metadata, ['did'], [])
]
)
@pytest.mark.asyncio
async def test_dumping_dict(mocked_method, method_to_test, before_dict, after_dict):
with async_mock(
mocked_method,
) as method:
did_dict = {
'blah': 'blah'
}
await method_to_test(1, *before_dict, did_dict, *after_dict)
method.assert_called_once_with(1, *before_dict, json.dumps(did_dict), *after_dict)
@pytest.mark.parametrize(
'mocked_method, method_to_test, params',
[
('indy.did.get_did_metadata', did.get_did_metadata, [1, 'did']),
('indy.did.get_key_metadata', did.get_key_metadata, [1, 'vk'])
]
)
@pytest.mark.asyncio
async def test_loading_dict(mocked_method, method_to_test, params):
with async_mock(mocked_method) as method:
method.return_value = json.dumps({'testing': 'test'})
loaded_dict = await method_to_test(*params)
assert isinstance(loaded_dict, dict)
assert 'testing' in loaded_dict
assert loaded_dict['testing'] == 'test'
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docs... | 3 | tests/did/test_did.py | dbluhm/aries-sdk-python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
- author: Lkeme
- contact: Useri@live.cn
- file: AsyncioLoop
- time: 2019/9/18 12:54
- desc: 兼容不同系统的协程逻辑
"""
import asyncio
import platform
# 自动判断类型生成loop
def switch_sys_loop():
sys_type = platform.system()
if sys_type == "Windows":
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
return loop
# 逻辑
async def main_loop(thread):
print(f"当前是第 {thread} 个协程")
# 运行
def run():
threads = 10
# 获取loop
loop = switch_sys_loop()
# 协程任务
task_work = [
main_loop(thread) for thread in range(threads)
]
loop.run_until_complete(asyncio.wait(task_work))
loop.close()
if __name__ == '__main__':
run()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fals... | 3 | AsyncioLoop.py | lkeme/py-common-code |
from functools import wraps
from os import environ
from backends.exceptions import ErrorException
def wrap_exception(exception_type, error_message):
def _typed_exception_wrapper(func):
@wraps(func)
def _adapt_exception_types(*args, **kwargs):
try:
return func(*args, **kwargs)
except exception_type as ex:
raise ErrorException(error_message) from ex
return _adapt_exception_types
return _typed_exception_wrapper
def getenv_required(key):
try:
return environ[key]
except KeyError:
raise ErrorException(
'Required environment variable %s not set' % key)
def getenv_int(key, default):
try:
value = environ[key]
except KeyError:
return default
try:
return int(value)
except ValueError:
raise ErrorException(
'Environment variable %s with value %s '
'is not convertible to int' % (key, value))
| [
{
"point_num": 1,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (excluding self/cls)?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written... | 3 | backends/utils.py | DaKnOb/TorPaste |
import unittest
import utils
# O(n) time. O(n) space. Iteration.
class Solution:
def addStrings(self, num1: str, num2: str) -> str:
if len(num1) < len(num2):
num1, num2 = num2, num1
result = []
carry = 0
for i in range(len(num1)):
a = ord(num1[len(num1) - 1 - i]) - ord('0')
b = ord(num2[len(num2) - 1 - i]) - ord('0') if i < len(num2) else 0
c = a + b + carry
if c >= 10:
c -= 10
carry = 1
else:
carry = 0
result.append(str(c))
if carry == 1:
result.append('1')
return ''.join(reversed(result))
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution)
if __name__ == '__main__':
unittest.main()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
... | 3 | problems/test_0415_iteration.py | chrisxue815/leetcode_python |
import os
import sys
sys.path.append(os.path.abspath('.'))
def install():
from context_menu import menus
import modules
pyLoc = sys.executable # .replace('python.exe', 'pythonw.exe')
scriptLoc = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'main.py') # Location of parser to be called
menu = menus.ContextMenu('Sort Files', type='DIRECTORY_BACKGROUND')
extractCommand = menus.ContextCommand('Extract Files', python=modules.handleExtract)
extensionCommand = menus.ContextCommand('Sort by Extension', python=modules.handleExtension)
typeCommand = menus.ContextCommand('Sort by Type', python=modules.handleType)
dateMenu = menus.ContextMenu('Sort By Date')
dateMenu.add_items([
menus.ContextCommand('Day', python=modules.handleDate, params='D'),
menus.ContextCommand('Month', python=modules.handleDate, params='M'),
menus.ContextCommand('Year', python=modules.handleDate, params='Y')
])
menu.add_items([
extractCommand,
extensionCommand,
typeCommand,
dateMenu
])
menu.compile()
def uninstall():
from context_menu import menus
menus.removeMenu('Sort Files', 'DIRECTORY_BACKGROUND')
install()
# if __name__ == "__main__":
# if is_admin():
# install()
# else:
# ctypes.windll.shell32.ShellExecuteW(
# None, "runas", sys.executable, __file__, None, 1)
#
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": fal... | 3 | output/freshen/reginstall.py | saleguas/freshen-file-sorter |
from sqlalchemy import Column, create_engine, DateTime, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
Base = declarative_base()
class Pet(Base):
__tablename__ = "pets"
id = Column(String(20), primary_key=True)
name = Column(String(100))
animal_type = Column(String(20))
created = Column(DateTime())
def update(self, id=None, name=None, animal_type=None, tags=None, created=None):
if name is not None:
self.name = name
if animal_type is not None:
self.animal_type = animal_type
if created is not None:
self.created = created
def dump(self):
return {k: v for k, v in vars(self).items() if not k.startswith("_")}
def init_db(uri):
engine = create_engine(uri, convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base.query = db_session.query_property()
Base.metadata.create_all(bind=engine)
return db_session
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "no_function_exceeds_5_params",
"question": "Does every function in this file take 5 or fewer parameters (exclu... | 3 | examples/swagger2/sqlalchemy/orm.py | athenianco/especifico |
import sys
from weasyprint import HTML
def get_file_names():
if len(sys.argv) < 2:
print('HTML file name not provided')
exit
HTML_file_name = sys.argv[1]
pdf_file_name = HTML_file_name + '.pdf'
if len(sys.argv) >= 3:
pdf_file_name = sys.argv[2]
return (HTML_file_name, pdf_file_name)
def generate_pdf_from_html(HTML_file_name, pdf_file_name):
HTML(HTML_file_name).write_pdf(pdf_file_name)
def main():
HTML_file_name, pdf_file_name = get_file_names()
generate_pdf_from_html(HTML_file_name, pdf_file_name)
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answer": true
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answ... | 3 | html_to_pdf.py | Shawn-Xinhai/html_to_pdf |
"""
The Program receives from the USER an INTEGER
and displays if it’s an ODD or EVEN number.
"""
# START Definition of FUNCTIONS
def valutaIntPositive(numero):
if numero.isdigit():
if numero != "0":
return True
return False
def evenOrOdd(number):
if number % 2 == 0:
return "EVEN"
else:
return "ODD"
# END Definition of FUNCTIONS
# Acquisition and Control of the DATA entered by the USER
numberInt = input("Enter an INTEGER number: ")
numberIntPositive = valutaIntPositive(numberInt)
while not(numberIntPositive):
print("Incorrect entry. Try again.")
numberInt = input("Enter an INTEGER number: ")
numberIntPositive = valutaIntPositive(numberInt)
# Conversion STR -> INT
numberInt = int(numberInt)
# Valuation EVEN or ODD number
typeNumber = evenOrOdd(numberInt)
# Displaying the RESULT (formatted)
print("The NUMBER " + str(numberInt) + " is " + typeNumber)
| [
{
"point_num": 1,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"ans... | 3 | chap_02/exe_035_even_odd.py | aleattene/python-workbook |
import graphene
from graphene_django.types import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from graphql_relay.node.node import from_global_id
from . import models
class TransactionNode(DjangoObjectType):
class Meta:
model = models.Transaction
filter_fields = {
'payee': ['exact'],
'date': ['exact', 'lt', 'lte', 'gt', 'gte']
}
interfaces = (graphene.relay.Node, )
class EntryNode(DjangoObjectType):
class Meta:
model = models.Entry
filter_fields = ['account', 'is_cleared']
interfaces = (graphene.relay.Node, )
class Query(object):
transaction_list = DjangoFilterConnectionField(
TransactionNode,
ledger_id=graphene.ID(required=True)
)
def resolve_transaction_list(self, info, **kwargs):
node, ledger_id = from_global_id(kwargs.get('ledger_id'))
assert node == 'LedgerNode'
return models.Transaction.objects.filter(
ledger_id=ledger_id,
ledger__creator=info.context.user
).order_by('-date', 'id')
| [
{
"point_num": 1,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": false
},
{
"point_num": 2,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{... | 3 | backend/transaction/schema.py | elielagmay/react-budgeteer |
from models.todo import Todo
from repositories.todo_repository import todo_repository as default_todo_repository
class TodoService:
def __init__(self, todo_repository=default_todo_repository):
self._todo_repository = todo_repository
def create_todo(self, content, done=False):
return self._todo_repository.create(Todo(content=content, done=done))
def get_all_todos(self):
return self._todo_repository.find_all()
def delete_todo(self, todo_id):
return self._todo_repository.delete_by_id(todo_id)
def delete_all_todos(self):
return self._todo_repository.delete_all()
todo_service = TodoService()
| [
{
"point_num": 1,
"id": "all_params_annotated",
"question": "Does every function parameter in this file have a type annotation (excluding self/cls)?",
"answer": false
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written ... | 3 | src/services/todo_service.py | ohjelmistotuotanto-hy/todo-web |
from heapq import heappush, nsmallest
import numpy as np
class NearestNeighbor():
def __init__(self, embeddings, encodings, config):
self.embeddings = embeddings
self.encodings = encodings
self.config = config
def euclidian_distance(self, e1, e2):
'''
https://stackoverflow.com/questions/1401712/how-can-the-euclidean-distance-be-calculated-with-numpy
'''
return np.linalg.norm(e1 - e2)
def get_embedding(self, word):
if self.encodings.word_in_vocab(word):
return self.embeddings[word]
return self.embeddings[config.unknown_word]
def nearest_neighbors(self, word, count=1):
embedding = self.get_embedding(word)
heap = []
# TODO: is it faster to not have the the string comparision and instead always
# remove the first element of the array which will have a distance of 0
# TODO: implement faster solution than the heap where it only keeps track of K
# values which should vastly reduce the number of operations required.
for w in self.embeddings:
if w == word:
continue
dist = self.euclidian_distance(embedding, self.embeddings[w])
heappush(heap, (dist, w))
return nsmallest(count, heap)
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true
},
{
"point_num": 2,
"id": "all_function_names_snake_case",
"question": "Are all function names in this file written in snake_case?",
"answ... | 3 | Word2Vec/NearestNeighbor.py | bi3mer/Word2Vec |
import distutils.command.bdist_rpm as orig
class bdist_rpm(orig.bdist_rpm):
"""
Override the default bdist_rpm behavior to do the following:
1. Run egg_info to ensure the name and version are properly calculated.
2. Always run 'install' using --single-version-externally-managed to
disable eggs in RPM distributions.
"""
def run(self):
# ensure distro name is up-to-date
self.run_command("egg_info")
orig.bdist_rpm.run(self)
def _make_spec_file(self):
spec = orig.bdist_rpm._make_spec_file(self)
spec = [
line.replace(
"setup.py install ",
"setup.py install --single-version-externally-managed ",
).replace("%setup", "%setup -n %{name}-%{unmangled_version}")
for line in spec
]
return spec
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | python3-virtualenv/lib/python3.8/site-packages/setuptools/command/bdist_rpm.py | bbalkaransingh23888/OrientationHack |
from typing import Iterable
__all__ = ['in_', 'not_in', 'exists', 'not_exists', 'equal', 'not_equal']
class Operator:
def __init__(self, op_name: str, op: str, value=None):
self.op = op
self.value = value
self.op_name = op_name
def encode(self, key):
return f"{key}{self.op}{self.value}"
class SequenceOperator(Operator):
def encode(self, key):
return f"{key} {self.op} ({','.join(self.value)})"
class BinaryOperator(Operator):
pass
class UnaryOperator(Operator):
def encode(self, key):
return f"{self.op}{key}"
def in_(values: Iterable) -> SequenceOperator:
return SequenceOperator('in_', 'in', sorted(values))
def not_in(values: Iterable) -> SequenceOperator:
return SequenceOperator('not_in', 'notin', sorted(values))
def exists() -> UnaryOperator:
return UnaryOperator('exists', '')
def not_exists() -> UnaryOperator:
return UnaryOperator('not_exists', '!')
def equal(value: str) -> BinaryOperator:
return BinaryOperator('equal', '=', value)
def not_equal(value: str) -> BinaryOperator:
return BinaryOperator('not_equal', '!=', value)
| [
{
"point_num": 1,
"id": "every_class_has_docstring",
"question": "Does every class in this file have a docstring?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},
{
... | 3 | lightkube/operators.py | addyess/lightkube |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import subprocess
from pants.backend.core.tasks.task import Task
from pants.base.exceptions import TaskError
from pants.contrib.cpp.targets.cpp_binary import CppBinary
from pants.contrib.cpp.targets.cpp_library import CppLibrary
from pants.contrib.cpp.targets.cpp_target import CppTarget
from pants.contrib.cpp.toolchain.cpp_toolchain import CppToolchain
class CppTask(Task):
@staticmethod
def is_cpp(target):
return isinstance(target, CppTarget)
@staticmethod
def is_library(target):
return isinstance(target, CppLibrary)
@staticmethod
def is_binary(target):
return isinstance(target, CppBinary)
@classmethod
def register_options(cls, register):
super(CppTask, cls).register_options(register)
register('--compiler', advanced=True, fingerprint=True,
help='Set a specific compiler to use (eg, g++-4.8, clang++)')
def execute(self):
raise NotImplementedError('execute must be implemented by subclasses of CppTask')
def run_command(self, cmd, workunit):
try:
self.context.log.debug('Executing: {0}'.format(cmd))
# TODO: capture stdout/stderr and redirect to log
subprocess.check_call(cmd, stdout=workunit.output('stdout'), stderr=workunit.output('stderr'))
except subprocess.CalledProcessError as e:
raise TaskError('Execution failed: {0}'.format(e))
except:
raise TaskError('Failed to execute {0}'.format(cmd))
@property
def cpp_toolchain(self):
return CppToolchain(self.get_options().compiler)
| [
{
"point_num": 1,
"id": "has_nested_function_def",
"question": "Does this file contain any function defined inside another function?",
"answer": false
},
{
"point_num": 2,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answ... | 3 | contrib/cpp/src/python/pants/contrib/cpp/tasks/cpp_task.py | areitz/pants |
import pickle as plk
from sklearn import svm
from wsicolorfilter.filter import Filter
class SvmFilter(Filter):
"""Filter which assign each pixel to the nearest centroid of the model."""
def create_model(self):
return svm.LinearSVC()
def train_model(self, x, y):
# train model
self.model.fit(x, y)
def predict(self, img):
orig_shape = img.shape
# bitmap to string of pixels
img = img.reshape((orig_shape[0] * orig_shape[1], orig_shape[2]))
filter_mask = self.model.predict(img)
filter_mask = filter_mask.reshape(orig_shape[0], orig_shape[1])
filter_mask -= 1 # normalize output
return filter_mask
def load_model(self, file_name='svm_filter.npy'):
with open(file_name, 'rb') as file:
self.model = plk.load(file)
def save_model(self, file_name='svm_filter.npy'):
with open(file_name, 'wb') as file:
plk.dump(self.model, file)
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "more_functions_than_classes",
"question": "Does this file define more functions than classes?",
"answer": true
},... | 3 | wsicolorfilter/svm_filter.py | mjirik/wsicolorfilter |
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
import numba
import numpy as np
import argparse
import time
@numba.njit()
def linear_regression(Y, X, w, iterations, alphaN):
for i in range(iterations):
w -= alphaN * np.dot(X.T, np.dot(X,w)-Y)
return w
def main():
parser = argparse.ArgumentParser(description='Linear Regression.')
parser.add_argument('--samples', dest='samples', type=int, default=200000)
parser.add_argument('--features', dest='features', type=int, default=10)
parser.add_argument('--functions', dest='functions', type=int, default=4)
parser.add_argument('--iterations', dest='iterations', type=int, default=20)
args = parser.parse_args()
N = args.samples
D = args.features
p = args.functions
iterations = args.iterations
alphaN = 0.01/N
w = np.zeros((D,p))
np.random.seed(0)
points = np.random.random((N,D))
labels = np.random.random((N,p))
t1 = time.time()
w = linear_regression(labels, points, w, iterations, alphaN)
selftimed = time.time()-t1
print("SELFTIMED ", selftimed)
print("checksum: ", np.sum(w))
if __name__ == '__main__':
main()
| [
{
"point_num": 1,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_has_docstring",
"question": "Does every function in this file have a docstring?",
"answer": fal... | 3 | examples/linear_regression/linear_regression_numba.py | uw-ipd/numba |
import pytest
from unittest import TestCase
from unittest.mock import patch
from web_crawler.utils.singleton import Singleton
from web_crawler.utils.web_crawler_logger import WebCrawlerLogger
@pytest.mark.unit_tests
@patch('web_crawler.utils.web_crawler_logger.logging')
class TestWebCrawlerLogger(TestCase):
def setUp(self):
try:
Singleton._instances.pop(WebCrawlerLogger)
except KeyError:
pass
def test_init(self, logging_mock):
# When
first = WebCrawlerLogger()
second = WebCrawlerLogger()
# Then
assert first is second
logging_mock.getLogger.assert_called_once_with('web-crawler')
logging_mock.getLogger.return_value.setLevel.assert_called_once_with(logging_mock.DEBUG)
# test console handler added
logging_mock.StreamHandler.assert_called_once_with()
logging_mock.StreamHandler.return_value.setLevel.assert_called_once_with(logging_mock.DEBUG)
logging_mock.StreamHandler.return_value.setFormatter.assert_called_once_with(logging_mock.Formatter.return_value)
logging_mock.getLogger.return_value.addHandler.assert_called_once_with(logging_mock.StreamHandler.return_value)
def test_logger_returned(self, logging_mock):
assert logging_mock.getLogger.return_value == WebCrawlerLogger().get_logger()
| [
{
"point_num": 1,
"id": "has_multiple_inheritance",
"question": "Does any class in this file use multiple inheritance?",
"answer": false
},
{
"point_num": 2,
"id": "every_function_under_20_lines",
"question": "Is every function in this file shorter than 20 lines?",
"answer": true... | 3 | tests/unit/utils/test_web_crawler_logger.py | tul1/py-webcrawler |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.