content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import os
from oauthlib.oauth2 import WebApplicationClient
GOOGLE_CLIENT_ID = os.environ.get('GOOGLE_CLIENT_ID', None)
GOOGLE_CLIENT_SECRET = os.environ.get('GOOGLE_CLIENT_SECRET', None)
GOOGLE_DISCOVERY_URL = (
"https://accounts.google.com/.well-known/openid-configuration"
)
# OAuth 2 client setup
client = WebApplicationClient(GOOGLE_CLIENT_ID)
| [
11748,
28686,
198,
6738,
267,
18439,
8019,
13,
12162,
1071,
17,
1330,
5313,
23416,
11792,
198,
198,
38,
6684,
38,
2538,
62,
5097,
28495,
62,
2389,
796,
28686,
13,
268,
2268,
13,
1136,
10786,
38,
6684,
38,
2538,
62,
5097,
28495,
62,
... | 2.458333 | 144 |
from django.db import models, migrations
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
628
] | 3.818182 | 11 |
#!/usr/bin/env python3
# coding: utf8
##
# HectorHardware.py API class for Hector9000 hardware
#
# imports
from __future__ import division
devEnvironment = True
import time
import sys
import Adafruit_PCA9685
from HectorConfig import config
# Uncomment to enable debug output.
import logging
if not devEnvironment:
import RPi.GPIO as GPIO
from hx711 import HX711
logging.basicConfig(level=logging.DEBUG)
# Helper function to make setting a servo pulse width simpler.
# end class HectorHardware
if __name__ == "__main__":
if not devEnvironment:
hector = HectorHardware(config)
hector.ping(int(sys.argv[1]))
hector.finger(0)
if __name__ == "XX__main__":
if not devEnvironment:
hector = HectorHardware(config)
hector.finger(0)
hector.arm_in()
for i in range(hector.numValves):
print("close valve %d = channel %d" % (i, hector.valveChannels[i]))
hector.valve_close(hector.valveChannels[i])
input("Bitte Glas auf die Markierung stellen")
# hector.ping(1)
hector.arm_out()
hector.valve_dose(1, 100)
hector.valve_dose(3, 20)
hector.finger(1)
hector.valve_dose(11, 100)
hector.arm_in()
hector.ping(3)
hector.finger(0)
hector.cleanAndExit()
print("done.")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
19617,
25,
3384,
69,
23,
198,
2235,
198,
2,
197,
39,
9250,
49865,
13,
9078,
197,
197,
17614,
1398,
329,
42621,
24,
830,
6890,
198,
2,
198,
2,
17944,
198,
6738,
11593,
37443,... | 2.277778 | 594 |
import pytest
from ormik import fields, FieldError
class MockModel(object):
""" Mock Model """
_pk = fields.AutoField(name='id')
@classmethod
| [
11748,
12972,
9288,
198,
198,
6738,
393,
76,
1134,
1330,
7032,
11,
7663,
12331,
628,
198,
4871,
44123,
17633,
7,
15252,
2599,
198,
220,
220,
220,
37227,
44123,
9104,
37227,
628,
220,
220,
220,
4808,
79,
74,
796,
7032,
13,
27722,
15878... | 2.892857 | 56 |
import numpy as np
from dnnv.nn.graph import OperationGraph
from dnnv.nn.layers import InputLayer
from dnnv.nn.operations import *
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
288,
20471,
85,
13,
20471,
13,
34960,
1330,
14680,
37065,
198,
6738,
288,
20471,
85,
13,
20471,
13,
75,
6962,
1330,
23412,
49925,
198,
6738,
288,
20471,
85,
13,
20471,
13,
3575,
602,
1330... | 3 | 45 |
from django.contrib import admin
from luz.conta.models import Conta
@admin.register(Conta)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
300,
10277,
13,
3642,
64,
13,
27530,
1330,
2345,
64,
628,
198,
31,
28482,
13,
30238,
7,
4264,
64,
8,
198
] | 2.9375 | 32 |
# When you create a new test file, make sure to add it here.
# Simply import the class from your file, and then add that class to the '__all__' array.
from game.test_suite.tests.test_example import TestExample
__all__ = [
'TestExample'
] | [
2,
1649,
345,
2251,
257,
649,
1332,
2393,
11,
787,
1654,
284,
751,
340,
994,
13,
198,
2,
17973,
1330,
262,
1398,
422,
534,
2393,
11,
290,
788,
751,
326,
1398,
284,
262,
705,
834,
439,
834,
6,
7177,
13,
198,
198,
6738,
983,
13,
... | 3.24 | 75 |
#!/usr/bin/python3
import pandas as pd
import pickle
import sys
# Simple script that takes in the Canada Post .add file
# and outputs a pickled data frame.
#
# Input/Output files must be specified on the command line
# convert_canada_add.py input_file output_file
#
# File must conform to specification located at:
# https://www.canadapost.ca/cpo/mc/assets/pdf/business/postalcodetechspecs_en.pdf
#TODOs
street_name_conv = {'DR':('Drive')}
street_name_conv2 = {'AL':'Alley', 'AV':'Avenue', 'BA': 'Bay', 'BV':'Boulevard', 'CA':'Cape',
'CE':'Centre', 'CI':'Circle', 'CL':'Close', 'CM':'Common', 'CO':'Court', 'CR':'Crescent',
'CV':'Cove', 'DR':'Drive', 'GA':'Gate', 'GD':'Gardens', 'GR':'Green', 'GV':'Grove',
'HE':'Heath', 'HI':'Highway', 'HL':'Hill', 'HT':'Heights', 'IS':'Island', 'LD':'Landing',
'LI':'Link', 'LN':'Lane', 'ME':'Mews', 'MR':'Manor', 'MT':'Mount', 'PA':'Park',
'PH':'Path', 'PL':'Place', 'PR':'Parade', 'PS':'Passage', 'PT':'Point', 'PY':'Parkway',
'PZ':'Plaza', 'RD':'Road', 'RI':'Rise', 'RO':'Row', 'SQ':'Square', 'ST':'Street',
'TC':'Terrace', 'TR':'Trail', 'VI':'Villas', 'VW':'View', 'WK':'Walk', 'WY':'Way'}
col_types={'latitude':object, 'longitude':object}
if len (sys.argv) != 3 :
print ('Input/Output files must be specified on the command line')
print ('# convert_canada_add.py input_file output_file')
exit(-1)
parcels = pd.read_csv(sys.argv[1], float_precision='high', dtype=col_types)
parcels = parcels.rename(str.upper, axis='columns')
parcels.HOUSE_ALPHA = parcels.HOUSE_ALPHA.fillna('')
parcels['STREET_TYPE_EXTENDED'] = parcels.STREET_TYPE.apply( lambda x: street_name_conv2[x]).str.upper()
parcels['ADDRESS_EXTENDED'] = parcels.HOUSE_NUMBER.map(str) + parcels.HOUSE_ALPHA + ' ' + \
parcels.STREET_NAME + ' ' + parcels.STREET_TYPE_EXTENDED + ' ' + \
parcels.STREET_QUAD
parcels = parcels[['ADDRESS', 'ADDRESS_EXTENDED', 'STREET_NAME', 'STREET_TYPE', 'STREET_TYPE_EXTENDED',
'STREET_QUAD', 'HOUSE_NUMBER', 'HOUSE_ALPHA', 'ADDRESS_TYPE', 'LONGITUDE', 'LATITUDE']]
# For debugging
#print (parcels.head(20))
#print (parcels.info())
pickle.dump( parcels, open( sys.argv[2], "wb" ) ) | [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2298,
293,
198,
11748,
25064,
198,
198,
2,
17427,
4226,
326,
2753,
287,
262,
3340,
2947,
764,
2860,
2393,
220,
198,
2,
290,
23862,
257,... | 2.265849 | 978 |
from django.conf import settings
SETTINGS = getattr(settings, 'ACTSTREAM_SETTINGS', {})
def get_action_manager():
"""
Returns the class of the action manager to use from ACTSTREAM_SETTINGS['MANAGER']
"""
mod = SETTINGS.get('MANAGER', 'actstream.managers.ActionManager')
mod_path = mod.split('.')
try:
return getattr(__import__('.'.join(mod_path[:-1]), {}, {},
[mod_path[-1]]), mod_path[-1])()
except ImportError:
raise ImportError(
'Cannot import %s try fixing ACTSTREAM_SETTINGS[MANAGER]'
'setting.' % mod
)
FETCH_RELATIONS = SETTINGS.get('FETCH_RELATIONS', True)
USE_JSONFIELD = SETTINGS.get('USE_JSONFIELD', False)
ACTION_MODEL = SETTINGS.get('ACTION_MODEL', 'acstream.Action')
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
628,
198,
28480,
51,
20754,
796,
651,
35226,
7,
33692,
11,
705,
10659,
2257,
32235,
62,
28480,
51,
20754,
3256,
23884,
8,
628,
198,
4299,
651,
62,
2673,
62,
37153,
33529,
198,
220,
220,
220,
... | 2.289398 | 349 |
# -*- coding:utf-8 -*-
# https://leetcode.com/problems/valid-number/description/ | [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
12102,
12,
17618,
14,
11213,
14
] | 2.580645 | 31 |
# Copyright Amazon.com, Inc. or its affiliates.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import sys
import backoff
import logging
import boto3
import itertools
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.process.traversal import *
from neptune_python_utils.gremlin_utils import GremlinUtils
from neptune_python_utils.endpoints import Endpoints
from neptune_python_utils.mappings import Mappings
logging.getLogger('backoff').addHandler(logging.StreamHandler())
logger = logging.getLogger()
GremlinUtils.init_statics(globals())
| [
2,
15069,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
198,
2,
921,
743,
407,
779,
428,
239... | 3.534946 | 372 |
# Generated by Django 2.2.4 on 2019-08-24 20:18
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
19,
319,
13130,
12,
2919,
12,
1731,
1160,
25,
1507,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Generated by Django 2.0.5 on 2018-09-19 14:12
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
20,
319,
2864,
12,
2931,
12,
1129,
1478,
25,
1065,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628,
198
] | 2.709677 | 31 |
import copy
import datetime
from .attachment import Attachment
from .errors import BugException
VALID_STATUS = ["ASSIGNED", "NEW", "REOPENED", "RESOLVED", "UNCONFIRMED", "VERIFIED"]
VALID_RESOLUTION = ["DUPLICATE", "FIXED", "INACTIVE", "INCOMPLETE",
"INVALID", "MOVED", "WONTFIX", "WORKSFORME"]
ARRAY_TYPES = ["blocks", "cc", "cc_detail", "depends_on",
"flags", "groups", "keywords", "see_also"]
class Bug(object):
"""This represents a Bugzilla Bug"""
def __init__(self, bugsy=None, **kwargs):
"""
Defaults are set if there are no kwargs passed in. To pass in
a dict create the Bug object like the following
:param bugsy: Bugsy instance to use to connect to Bugzilla.
>>> bug = Bug(**myDict)
"""
self._bugsy = bugsy
self._bug = dict(**kwargs)
self._copy = dict(**kwargs)
self._bug['op_sys'] = kwargs.get('op_sys', 'All')
self._bug['product'] = kwargs.get('product', 'core')
self._bug['component'] = kwargs.get('component', 'general')
self._bug['platform'] = kwargs.get('platform', 'All')
self._bug['version'] = kwargs.get('version', 'unspecified')
def to_dict(self):
"""
Return the raw dict that is used inside this object
"""
return self._bug
def update(self):
"""
Update this object with the latest changes from Bugzilla
>>> bug.status
'NEW'
#Changes happen on Bugzilla
>>> bug.update()
>>> bug.status
'FIXED'
"""
if 'id' in self._bug:
result = self._bugsy.request('bug/%s' % self._bug['id'])
self._bug = dict(**result['bugs'][0])
self._copy = dict(**result['bugs'][0])
else:
raise BugException("Unable to update bug that isn't in Bugzilla")
def get_comments(self):
"""
Obtain comments for this bug.
Returns a list of Comment instances.
"""
bug = str(self._bug['id'])
res = self._bugsy.request('bug/%s/comment' % bug)
return [Comment(bugsy=self._bugsy, **comments) for comments
in res['bugs'][bug]['comments']]
def add_comment(self, comment):
"""
Adds a comment to a bug. If the bug object does not have a bug ID
(ie you are creating a bug) then you will need to also call `put`
on the :class:`Bugsy` class.
>>> bug.add_comment("I like sausages")
>>> bugzilla.put(bug)
If it does have a bug id then this will immediately post to the server
>>> bug.add_comment("I like eggs too")
More examples can be found at:
https://github.com/AutomatedTester/Bugsy/blob/master/example/add_comments.py
"""
# If we have a key post immediately otherwise hold onto it until
# put(bug) is called
if 'id' in self._bug:
self._bugsy.request('bug/{}/comment'.format(self._bug['id']),
method='POST', json={"comment": comment}
)
else:
self._bug['comment'] = comment
def get_attachments(self):
"""
Obtain comments for this bug.
Returns a list of Comment instances.
"""
bug = str(self._bug['id'])
res = self._bugsy.request(
'bug/%s/attachment' % bug,
)
return [Attachment(bugsy=self._bugsy, **attachments) for attachments
in res['bugs'][bug]]
def diff(self):
"""
Generates a dictionary containing only the changed values
Special handling of ARRAY_TYPES fields is required to only PUT changed objects
>>> bug.cc
['foo@bar.com']
>>> bug.cc.append('abc@xyz.com')
>>> bug.cc
['foo@bar.com', 'abc@xyz.com']
>>>bug.diff()
{'cc': {'added': ['abc@xyz.com']}}
"""
changed = {}
for key in self._bug:
if key not in ARRAY_TYPES:
if key not in self._copy or self._bug[key] != self._copy[key]:
changed[key] = self._bug[key]
elif key == 'flags':
if self._bug.get(key, []) != self._copy.get(key, []):
changed[key] = self._bug.get(key, [])
else:
values_now = set(self._bug.get(key, []))
values_orig = set(self._copy.get(key, []))
additions = list(values_now - values_orig)
subtractions = list(values_orig - values_now)
if additions or subtractions:
changed[key] = {}
if len(additions):
changed[key]['add'] = additions
if len(subtractions):
changed[key]['remove'] = subtractions
return changed
class Comment(object):
"""
Represents a single Bugzilla comment.
To get comments you need to do the following
>>> bugs = bugzilla.search_for.keywords("checkin-needed").search()
>>> comments = bugs[0].get_comments()
>>> # Returns the comment 0 of the first checkin-needed bug
>>> comments[0].text
"""
@property
def text(self):
r"""
Return the text that is in this comment
>>> comment.text # David really likes cheese apparently
"""
return self._comment['text']
@property
def id(self):
r"""
Return the comment id that is associated with Bugzilla.
"""
return self._comment['id']
@property
def attachment_id(self):
"""
If the comment was made on an attachment, return the ID of that
attachment. Otherwise it will return None.
"""
return self._comment['attachment_id']
@property
def author(self):
"""
Return the login name of the comment's author.
"""
return self._comment['author']
@property
def creator(self):
"""
Return the login name of the comment's author.
"""
return self._comment['creator']
@property
def bug_id(self):
"""
Return the ID of the bug that this comment is on.
"""
return self._comment['bug_id']
@property
def time(self):
"""
This is exactly same as :attr:`creation_time`.
For compatibility, time is still usable. However, please note
that time may be deprecated and removed in a future release.
Prefer :attr:`creation_time` instead.
"""
return self._comment['time']
@property
def creation_time(self):
"""
Return the time (in Bugzilla's timezone) that the comment was
added.
"""
return self._comment['creation_time']
@property
def is_private(self):
"""
Return True if this comment is private (only visible to a certain
group called the "insidergroup").
"""
return self._comment['is_private']
@property
def tags(self):
"""
Return a set of comment tags currently set for the comment.
"""
return self._comment['tags']
def add_tags(self, tags):
"""
Add tags to the comments
"""
if not isinstance(tags, list):
tags = [tags]
self._bugsy.request('bug/comment/%s/tags' % self._comment['id'],
method='PUT', json={"add": tags})
def remove_tags(self, tags):
"""
Add tags to the comments
"""
if not isinstance(tags, list):
tags = [tags]
self._bugsy.request('bug/comment/%s/tags' % self._comment['id'],
method='PUT', json={"remove": tags})
| [
11748,
4866,
198,
11748,
4818,
8079,
198,
198,
6738,
764,
1078,
15520,
1330,
3460,
15520,
198,
6738,
764,
48277,
1330,
15217,
16922,
198,
198,
23428,
2389,
62,
35744,
2937,
796,
14631,
10705,
16284,
1961,
1600,
366,
13965,
1600,
366,
2200... | 2.137968 | 3,769 |
import random
execute(100)
| [
11748,
4738,
628,
198,
41049,
7,
3064,
8,
198
] | 3.222222 | 9 |
HERO = {'B': 'Batman', 'J': 'Joker', 'R': 'Robin'}
OUTPUT = '{}: {}'.format
| [
16879,
46,
796,
1391,
6,
33,
10354,
705,
37039,
3256,
705,
41,
10354,
705,
41,
11020,
3256,
705,
49,
10354,
705,
40656,
6,
92,
198,
2606,
7250,
3843,
796,
705,
90,
38362,
23884,
4458,
18982,
628
] | 2.138889 | 36 |
import asyncio
import datetime
import logging
import os
import pytest
import requests_mock
from fhempy.lib.pkg_installer import check_and_install_dependencies
from tests.utils import mock_fhem
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path) as fdp:
return fdp.read()
@pytest.fixture(autouse=True)
@pytest.mark.asyncio
| [
11748,
30351,
952,
198,
11748,
4818,
8079,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
11748,
12972,
9288,
198,
11748,
7007,
62,
76,
735,
198,
6738,
277,
258,
3149,
88,
13,
8019,
13,
35339,
62,
17350,
263,
1330,
2198,
62,
392,
62,... | 2.734177 | 158 |
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from f3utils.service_meta import _ServiceMeta
class DOM_Meta(_ServiceMeta):
"""Metaclass for `DOMScope` ones.
Prepares a dictionary of descriptors that the scope can apply
to components or pages under it.
Operates at class initialization phase,
"""
# eof
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
277,
18,
26791,
13,
15271,
62,
28961,
1330,
4808,
16177,
48526,
628,
198,
4871,
24121,
62,
48526,
28264,
16177... | 2.849206 | 126 |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404,redirect, reverse, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader, RequestContext
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from django.core.mail import EmailMessage
from django.conf import settings
from createprofile.forms import CreateProfileFromEE, CreateProfileFromER, DocumentForm, ProfilePictureForm, ListingForm, LogoForm
from core.models import Profile
from .models import Employee_Profile, Employer_Profile , Document, Profile_Picture, Logo,Listing
from django.core.files.storage import FileSystemStorage
# Create your views here.
@login_required
@login_required
@login_required
@login_required
@login_required
@login_required
@login_required
@login_required
@login_required
@login_required
@login_required
@login_required
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
651,
62,
15252,
62,
273,
62,
26429,
11,
445,
1060,
11,
9575,
11,
8543,
62,
1462,
... | 3.581227 | 277 |
import copy
from typing import List
import logging
import gunpowder as gp
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class DeepCopyArrays(gp.BatchFilter):
""" Deep-copy arrays
Args:
arrays (List[gp.ArrayKey]): ArrayKeys to be copied
output_arrays (List[gp.ArrayKey]): optional, ArrayKeys for outputs
"""
| [
11748,
4866,
198,
6738,
19720,
1330,
7343,
198,
11748,
18931,
198,
198,
11748,
2485,
45855,
355,
27809,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
6404,
1362,
13,
2617,
4971,
7,
6404,
2667,
13,... | 2.776923 | 130 |
"""Module contains a mouth state estimator
see `mouth state`_
"""
from typing import Union, Dict
from FaceEngine import MouthEstimation, IMouthEstimatorPtr # pylint: disable=E0611,E0401
from lunavl.sdk.base import BaseEstimation
from lunavl.sdk.errors.errors import LunaVLError
from lunavl.sdk.errors.exceptions import CoreExceptionWrap, assertError
from ..base import BaseEstimator
from ..face_estimators.facewarper import FaceWarp, FaceWarpedImage
class MouthStates(BaseEstimation):
"""
Mouth states. There are 3 states of mouth: smile, occlusion and neither a smile nor an occlusion was detected.
Estimation properties:
- smile
- mouth
- occlusion
"""
# pylint: disable=W0235
@property
def smile(self) -> float:
"""
Get smile score value.
Returns:
value in range [0, 1]
"""
return self._coreEstimation.smile
@property
def opened(self) -> float:
"""
Get opened score value.
Returns:
value in range [0, 1]
"""
return self._coreEstimation.opened
@property
def occlusion(self) -> float:
"""
Get occlusion score value.
Returns:
value in range [0, 1]
"""
return self._coreEstimation.occluded
def asDict(self) -> Dict[str, float]:
"""
Convert to dict.
Returns:
{'opened': self.opened, 'occlusion': self.occlusion, 'smile': self.smile}
"""
return {"opened": self.opened, "occluded": self.occlusion, "smile": self.smile}
class MouthStateEstimator(BaseEstimator):
"""
Mouth state estimator.
"""
# pylint: disable=W0235
def __init__(self, coreEstimator: IMouthEstimatorPtr):
"""
Init.
Args:
coreEstimator: core estimator
"""
super().__init__(coreEstimator)
# pylint: disable=W0221
@CoreExceptionWrap(LunaVLError.EstimationMouthStateError)
def estimate(self, warp: Union[FaceWarp, FaceWarpedImage]) -> MouthStates:
"""
Estimate mouth state on warp.
Args:
warp: warped image
Returns:
estimated states
Raises:
LunaSDKException: if estimation failed
"""
error, mouthState = self._coreEstimator.estimate(warp.warpedImage.coreImage)
assertError(error)
return MouthStates(mouthState)
| [
37811,
26796,
4909,
257,
5422,
1181,
3959,
1352,
198,
198,
3826,
4600,
14775,
1181,
63,
62,
198,
37811,
198,
6738,
19720,
1330,
4479,
11,
360,
713,
198,
198,
6738,
15399,
13798,
1330,
44764,
22362,
18991,
11,
8959,
1536,
22362,
320,
135... | 2.319549 | 1,064 |
#
# Copyright (c) 2014 Chris Jerdonek. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
from textwrap import dedent
from openrcv import models
from openrcv.models import normalize_ballots, normalize_ballots_to, BallotsResource, ContestInput
from openrcv import streams
from openrcv.streams import ListResource
from openrcv.utils import StringInfo
from openrcv.utiltest.helpers import UnitCase
class NormalizeBallotsToTest(UnitCase):
"""Tests of normalize_ballots_to()."""
def test(self):
"""
Simultaneously checks--
(1) "compressing" (by weight),
(2) lexicographically ordering by choice (and not by weight), and
(3) ballots with no choices (aka undervotes).
"""
ballots = [
(1, (2, )),
(1, ()),
(1, (3, )),
(2, ()),
(4, (1, )),
(1, (2, )),
]
source = ListResource(ballots)
target = ListResource()
normalize_ballots_to(source, target)
with target.reading() as gen:
normalized = list(gen)
self.assertEqual(normalized, [(3, ()), (4, (1,)), (2, (2,)), (1, (3,))])
class NormalizeBallotTest(UnitCase):
"""Tests of normalize_ballots()."""
| [
2,
198,
2,
15069,
357,
66,
8,
1946,
5180,
4230,
28060,
74,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
198,
2,
4866,
286,
428,
3788,
290,
3917,
10... | 2.834371 | 803 |
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name="pdfoutliner",
version="0.0.3",
author="GHPen",
description="Command line interface for generating pdftk-style bookmark files in a user-friendly way, and (optionally) outputs a PDF file with the specified outline.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/GHPen/pdfoutliner",
packages=["pdfoutliner"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
"console_scripts": [
"pdfoutliner = pdfoutliner.__main__:main"
]
},
) | [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
40406,
7,
3672,
2625,
12315,
... | 2.509091 | 330 |
from django.db.models import Count
from django.http import HttpResponse
from django.shortcuts import render
from functools import wraps
from time import sleep
# Create your views here.
from messenger.models import Message
from users.models import CustomUser
from django.db import connection, reset_queries
import logging
import time
from django.views.decorators.gzip import gzip_page
from django.contrib.auth.decorators import login_required
@log_sql
@log_time
@gzip_page
@sql_request_delay
@cached
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
2764,
201,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
201,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
201,
198,
201,
198,
6738,
1257,
310,
10141,
1330,
27521,
2... | 2.811224 | 196 |
# -*- coding: utf-8 -*-
"""
Display the different computation steps for sensor to cubemap
This file will allow to demonstrate the various computation setp associated with a cubemap transfer calculation
@author: Brice Dubost
Copyright 2020 Brice Dubost
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import transf_mat, plotting
MLX_SENSORS_XPIX = 32
MLX_SENSORS_YPIX = 24
#================= CUBEMAP DATA =====================
cub_npix = 256
p_cam_data = transf_mat.gen_cubemap_data(cub_npix)
#================= We look for only one sensor position ===============
# We set up the angle at which we look at
p_sen_data = {}
p_sen_data[0] = { "alpha" : 0, "beta_i" : 0 }
#The we add some extra information about the sensor itself
for sen in p_sen_data:
p_sen_data[sen]["HFOV"] = 45.64884636
p_sen_data[sen]["VFOV"] = 33.6037171
p_sen_data[sen]["gamma_i"] = 7.36132776
p_sen_data[sen]["distortion"] = -2.87108226
p_sen_data[sen]["HNPIX"] = MLX_SENSORS_XPIX
p_sen_data[sen]["VNPIX"] = MLX_SENSORS_YPIX
#================ Now we study compute all matrices ========================
if __name__ == "__main__":
print("Hi let's look at sensor calculation ")
print("Cubemap info: p_cam_data",p_cam_data,"\n")
print("Sensor data: p_sen_data",p_sen_data,"\n")
print("Now we do the overlapt computation \n")
transf_mat.compute_all_matrices(p_cam_data,p_sen_data,plot = True, plot_sen = True)
print("Now we have two new keys in the cubemap data: sen_m and sen_nm\n")
print("\t sen_m is indexed by sensors, and contains, for each pixel of the cubemap the X and Y coordinates of this sensor in this position\n")
print("\t sen_nm contains the number of sensors at each pixel of this cubemap\n")
print("Cubemap info new elements from compute_all_matrices: p_cam_data['F'].keys()",p_cam_data['F']['sen_m'],"\n")
print("Cubemap info new elements from compute_all_matrices: p_cam_data['F'].keys()",p_cam_data['F']['sen_nm'],"\n")
print("Sensor data: p_sen_data",p_sen_data,"\n")
plotting.plot_image(p_cam_data['F']['sen_nm'],title = "Number of overlapping sensors for\nFront view: p_cam_data['F']['sen_nm']",fidx = 1)
plotting.plot_image(p_cam_data['F']['sen_m'][0]['x'],title = "X coordinates of\nsensor 0 in the front view:\n p_cam_data['F']['sen_m'][0]['x']", fidx = 2)
plotting.plot_image(p_cam_data['F']['sen_m'][0]['y'],title = "Y coordinates of\nsensor 0 in the front view:\n p_cam_data['F']['sen_m'][0]['y']", fidx = 3)
#============ Let's look a bit closer on what compute_all_matrices is doing ================
#the key line in this function is the call to compute_pixel_mat
# (sen_m[sen_i]['x'], sen_m[sen_i]['y']) = compute_pixel_mat(p_cam_data[cub_i],p_sen_data[sen_i], debug = debug)
#First it computes the transfer matrix between the two cameras
p_cam_data_front = transf_mat.gen_cubemap_data(cub_npix)['F']
print("Cubemap data (destination camera) ", p_cam_data_front)
print("Sensor data (source camera) ",p_sen_data[0],"\n")
M = transf_mat.compute_transfer_mat(cam_src = p_sen_data[0],cam_dst = p_cam_data_front, debug = True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
23114,
262,
1180,
29964,
4831,
329,
12694,
284,
13617,
368,
499,
198,
198,
1212,
2393,
481,
1249,
284,
10176,
262,
2972,
29964,
900,
79,
3917,
351,
257,
1361... | 2.578231 | 1,470 |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from typing import Dict
from activitystreams import Activity
from zope.interface import Interface
__author__ = 'Oscar Eriksson <oscar.eriks@gmail.com>'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
9... | 3.757576 | 198 |
# Sort a k increasing and decreasing array
# [1, 2, 3, 3, 2, 1, 0, 4, 5]
# find all the decreasing intervals and reverse them
# add the result to a list
# merge
UP, DOWN = 0, 1
if __name__ == '__main__':
test_cases = [
([1, 2, 3, 4, 3, 1, 2, 4]),
]
for test_case in test_cases:
print split(test_case)
print split_v2(test_case)
| [
2,
33947,
257,
479,
3649,
290,
24030,
7177,
198,
2,
685,
16,
11,
362,
11,
513,
11,
513,
11,
362,
11,
352,
11,
657,
11,
604,
11,
642,
60,
628,
198,
2,
1064,
477,
262,
24030,
20016,
290,
9575,
606,
198,
2,
751,
262,
1255,
284,
... | 2.34375 | 160 |
from uuid import uuid4
from .base import AbstractRPCCommLayer
from aioredis import create_redis
from ..models import RPCStack, RPCResult, RPCBase, SERIALIZABLE_MODELS
RESULT_EXPIRE_TIME = 300 # seconds
class RPCRedisCommLayer(AbstractRPCCommLayer):
"""
Redis remote procedure call communication layer
"""
@classmethod
async def create(
cls, subchannel=b'subchannel', pubchannel=b'pubchannel',
host='localhost', port=6379, serialization=None):
"""
Use a static create method to allow async context,
__init__ cannot be async.
"""
self = RPCRedisCommLayer(subchannel, pubchannel)
# Create communicationLayer
self.host = host
self.port = port
self.serialization = serialization
# Redis for publishing
self.redis = await create_redis(
f'redis://{host}')
# By default register all RPC models
for model in SERIALIZABLE_MODELS:
# Register models to serialization
serialization.register(model)
self.subscribed = False
# Subscription has own redis
self.sub_redis = None
self.sub_channel = None
# By default subscribe
await self.do_subscribe()
return self
def __init__(self, subchannel, pubchannel):
"""
Initialize and set the sub/pub channels
"""
self.subchannel = subchannel
self.pubchannel = pubchannel
async def publish(self, rpc_instance: RPCBase, channel=None):
"""
Publish redis implementation, publishes RPCBase instances.
:return: the number of receivers
"""
# rpc_instance should be a subclass of RPCBase
# For now just check if instance of RPCBase
assert isinstance(rpc_instance, RPCBase)
if isinstance(rpc_instance, RPCStack):
# Add subchannel to RPCStack as respond_to
rpc_instance.respond_to = self.subchannel
elif ((isinstance(rpc_instance, RPCResult) and
rpc_instance.data is not None)):
# Customized:
# result data via redis.set
# result without data via redis.publish
redis_key = uuid4().hex
# Store the result data via key/value in redis
await self.redis.set(
redis_key,
self.serialization.dumpb(rpc_instance.data),
expire=RESULT_EXPIRE_TIME)
# Set redis_key and remove data, since
# this is stored in redis now
rpc_instance.data = {'redis_key': redis_key}
# Override the pub_channel with channel, if set
pub_channel = channel if channel is not None else self.pubchannel
# Publish rpc_instance and return number of listeners
return await self.redis.publish(
pub_channel,
self.serialization.dumpb(rpc_instance))
async def get_data(self, redis_key, delete=True):
"""
Helper function to get data by redis_key, by default
delete the data after retrieval.
"""
data = self.serialization.loadb(
await self.redis.get(redis_key))
if delete:
await self.redis.delete(redis_key)
return data
async def _process_msg(self, msg, on_rpc_event_callback, channel=None):
"""
Interal message processing, is called on every received
message via the subscription.
"""
event = self.serialization.loadb(msg)
# rpc_instance should be a subclass of RPCBase
# For now just check if instance of RPCBase
assert isinstance(event, RPCBase)
if on_rpc_event_callback:
if isinstance(event, RPCResult):
# Customized:
# result data via redis.set
# result without data via redis.publish
# Get data from redis and put it on the event
if isinstance(event.data, dict) and 'redis_key' in event.data:
event.data = await self.get_data(event.data['redis_key'])
await on_rpc_event_callback(
event, channel=channel.name)
async def subscribe(self, on_rpc_event_callback, channel=None, redis=None):
"""
Redis implementation for subscribe method, receives messages from
subscription channel.
Note: does block in while loop until .unsubscribe() is called.
"""
try:
if channel is None:
channel = self.sub_channel
if redis is None:
redis = self.sub_redis
self.subscribed = True
# Inside a while loop, wait for incoming events.
while await channel.wait_message():
await self._process_msg(
await channel.get(),
on_rpc_event_callback,
channel=channel)
finally:
# Close connections and cleanup
self.subscribed = False
redis.close()
await redis.wait_closed()
async def unsubscribe(self):
"""
Redis implementation for unsubscribe. Stops subscription and breaks
out of the while loop in .subscribe()
"""
if self.subscribed:
await self.sub_redis.unsubscribe(
self.sub_channel.name)
self.subscribed = False
async def close(self):
"""
Stop subscription & close everything
"""
await self.unsubscribe()
self.redis.close()
await self.redis.wait_closed()
| [
6738,
334,
27112,
1330,
334,
27112,
19,
198,
6738,
764,
8692,
1330,
27741,
20031,
4093,
2002,
49925,
198,
6738,
257,
72,
1850,
271,
1330,
2251,
62,
445,
271,
198,
6738,
11485,
27530,
1330,
39400,
25896,
11,
39400,
23004,
11,
39400,
1488... | 2.2608 | 2,500 |
# import requests
import os, sys, io
import re
import logging
import datetime
from pathlib import Path
import csv
Output_Path = str(os.path.join(os.getcwd(), 'Outputs'))
Storage_Path = str(os.path.join(Output_Path,"APOPND_THSR_Ok.csv"))
Latilongi_Keyword = "經緯座標"
| [
198,
2,
1330,
7007,
198,
11748,
28686,
11,
25064,
11,
33245,
198,
11748,
302,
198,
11748,
18931,
198,
11748,
4818,
8079,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
269,
21370,
198,
198,
26410,
62,
15235,
796,
965,
7,
418,
13,
697... | 2.384615 | 117 |
__author__ = u'schmatz'
from systemConfiguration import SystemConfiguration
import os
import directoryController
import errors
| [
834,
9800,
834,
796,
334,
338,
354,
6759,
89,
6,
198,
6738,
1080,
38149,
1330,
4482,
38149,
198,
11748,
28686,
198,
11748,
8619,
22130,
198,
11748,
8563,
628,
628
] | 4.482759 | 29 |
"""Run doctests on choices.py and helpers.py"""
import doctest
import sys
from . import choices, helpers
failures = 0
failures += doctest.testmod(m=choices, report=True)[0]
failures += doctest.testmod(m=helpers, report=True)[0]
if failures > 0:
sys.exit(1)
| [
37811,
10987,
10412,
3558,
319,
7747,
13,
9078,
290,
49385,
13,
9078,
37811,
198,
198,
11748,
10412,
395,
198,
11748,
25064,
198,
6738,
764,
1330,
7747,
11,
49385,
198,
198,
32165,
942,
796,
657,
198,
198,
32165,
942,
15853,
10412,
395,... | 2.789474 | 95 |
import torch # must be imported before anything from torchshapelets
from .discrepancies import CppDiscrepancy, L2Discrepancy, LogsignatureDiscrepancy
from .regularisation import similarity_regularisation
from .shapelet_transform import GeneralisedShapeletTransform
__version__ = '0.1.0'
del torch
| [
11748,
28034,
220,
1303,
1276,
307,
17392,
878,
1997,
422,
28034,
43358,
5289,
198,
198,
6738,
764,
15410,
7856,
16183,
1330,
327,
381,
15642,
7856,
3883,
11,
406,
17,
15642,
7856,
3883,
11,
5972,
12683,
1300,
15642,
7856,
3883,
198,
67... | 3.7625 | 80 |
#
# Copyright 2015 Quantopian, Inc.
# Modifications Copyright 2018 Alpaca
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import pytz
import numpy as np
import pandas as pd
from datetime import tzinfo
from itertools import chain
from contextlib import ExitStack
from copy import copy
import importlib
from trading_calendars import get_calendar
import pylivetrader.protocol as proto
from pylivetrader.assets import AssetFinder, Asset
from pylivetrader.data.bardata import handle_non_market_minutes
from pylivetrader.data.data_portal import DataPortal
from pylivetrader.executor.executor import AlgorithmExecutor
from pylivetrader.errors import (
APINotSupported, CannotOrderDelistedAsset, UnsupportedOrderParameters,
ScheduleFunctionInvalidCalendar, OrderDuringInitialize,
RegisterAccountControlPostInit, RegisterTradingControlPostInit,
OrderInBeforeTradingStart, HistoryInInitialize,
)
from pylivetrader.finance.execution import (
MarketOrder, LimitOrder, StopLimitOrder, StopOrder
)
from pylivetrader.finance.controls import (
LongOnly,
MaxOrderCount,
MaxOrderSize,
MaxPositionSize,
MaxLeverage,
RestrictedListOrder
)
from pylivetrader.finance.asset_restrictions import (
Restrictions,
NoRestrictions,
StaticRestrictions,
SecurityListRestrictions,
)
from pylivetrader.misc.security_list import SecurityList
from pylivetrader.misc import events
from pylivetrader.misc.events import (
EventManager,
make_eventrule,
date_rules,
time_rules,
calendars,
AfterOpen,
BeforeClose
)
from pylivetrader.misc.math_utils import round_if_near_integer, tolerant_equals
from pylivetrader.misc.api_context import (
api_method,
LiveTraderAPI,
require_initialized,
disallowed_in_before_trading_start,
)
from pylivetrader.misc.pd_utils import normalize_date
from pylivetrader.misc.preprocess import preprocess
from pylivetrader.misc.input_validation import (
coerce_string,
ensure_upper_case,
expect_types,
expect_dtypes,
optional,
)
from pylivetrader.statestore import StateStore
from logbook import Logger, lookup_level
log = Logger('Algorithm')
class Algorithm(object):
"""Provides algorithm compatible with zipline.
"""
def __init__(self, *args, **kwargs):
'''
data_frequency: 'minute' or 'daily'
algoname: str, defaults to 'algo'
backend: str or Backend instance, defaults to 'alpaca'
(str is either backend module name under
'pylivetrader.backend', or global import path)
trading_calendar: pd.DateIndex for trading calendar
initialize: initialize function
handle_data: handle_data function
before_trading_start: before_trading_start function
log_level: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'
'''
log.level = lookup_level(kwargs.pop('log_level', 'INFO'))
self._recorded_vars = {}
self.data_frequency = kwargs.pop('data_frequency', 'minute')
assert self.data_frequency in ('minute', 'daily')
self._algoname = kwargs.pop('algoname', 'algo')
self.quantopian_compatible = kwargs.pop('quantopian_compatible', True)
self._state_store = StateStore(
kwargs.pop('statefile', None) or
'{}-state.pkl'.format(self._algoname)
)
self._pipelines = {}
backend_param = kwargs.pop('backend', 'alpaca')
if not isinstance(backend_param, str):
self._backend = backend_param
self._backend_name = backend_param.__class__.__name__
else:
self._backend_name = backend_param
try:
# First, tries to import official backend packages
backendmod = importlib.import_module(
'pylivetrader.backend.{}'.format(self._backend_name))
except ImportError:
# Then if failes, tries to find pkg in global package
# namespace.
try:
backendmod = importlib.import_module(
self._backend_name)
except ImportError:
raise RuntimeError(
"Could not find backend package `{}`.".format(
self._backend_name))
backend_options = kwargs.pop('backend_options', None) or {}
self._backend = backendmod.Backend(**backend_options)
self.asset_finder = AssetFinder(self._backend)
self.trading_calendar = kwargs.pop(
'trading_calendar', get_calendar('NYSE'))
self.data_portal = DataPortal(
self._backend,
self.asset_finder,
self.trading_calendar,
self.quantopian_compatible
)
self.event_manager = EventManager()
self.trading_controls = []
self.account_controls = []
self.restrictions = NoRestrictions()
self._initialize = kwargs.pop('initialize', noop)
self._handle_data = kwargs.pop('handle_data', noop)
self._before_trading_start = kwargs.pop('before_trading_start', noop)
self.event_manager.add_event(
events.Event(
events.Always(),
# We pass handle_data.__func__ to get the unbound method.
self.handle_data.__func__,
),
prepend=True,
)
self._account_needs_update = True
self._portfolio_needs_update = True
self._in_before_trading_start = False
self._assets_from_source = []
self._context_persistence_excludes = []
self._max_shares = int(1e+11)
self.initialized = False
self.api_methods = [func for func in dir(Algorithm) if callable(
getattr(Algorithm, func)
)]
@api_method
@api_method
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
@api_method
@api_method
def schedule_function(self,
func,
date_rule=None,
time_rule=None,
half_days=True,
calendar=None):
"""Schedules a function to be called according to some timed rules.
Parameters
----------
func : callable[(context, data) -> None]
The function to execute when the rule is triggered.
date_rule : EventRule, optional
The rule for the dates to execute this function.
time_rule : EventRule, optional
The rule for the times to execute this function.
half_days : bool, optional
Should this rule fire on half days?
calendar : Sentinel, optional
Calendar used to reconcile date and time rules.
See Also
--------
:class:`zipline.api.date_rules`
:class:`zipline.api.time_rules` sta
"""
# When the user calls schedule_function(func, <time_rule>), assume that
# the user meant to specify a time rule but no date rule, instead of
# a date rule and no time rule as the signature suggests
if isinstance(date_rule, (AfterOpen, BeforeClose)) and not time_rule:
warnings.warn('Got a time rule for the second positional argument '
'date_rule. You should use keyword argument '
'time_rule= when calling schedule_function without '
'specifying a date_rule', stacklevel=3)
date_rule = date_rule or date_rules.every_day()
time_rule = ((time_rule or time_rules.every_minute())
if self.data_frequency == 'minute' else
# If we are in daily mode the time_rule is ignored.
time_rules.every_minute())
# Check the type of the algorithm's schedule before pulling calendar
# Note that the ExchangeTradingSchedule is currently the only
# TradingSchedule class, so this is unlikely to be hit
if calendar is None:
cal = self.trading_calendar
elif calendar is calendars.US_EQUITIES:
cal = get_calendar('NYSE')
elif calendar is calendars.US_FUTURES:
cal = get_calendar('us_futures')
else:
raise ScheduleFunctionInvalidCalendar(
given_calendar=calendar,
allowed_calendars=(
'[calendars.US_EQUITIES, calendars.US_FUTURES]'
),
)
self.add_event(
make_eventrule(date_rule, time_rule, cal, half_days),
func,
)
@api_method
def record(self, *args, **kwargs):
"""Track and record values each day.
Parameters
----------
**kwargs
The names and values to record.
Notes
-----
These values will appear in the performance packets and the performance
dataframe passed to ``analyze`` and returned from
:func:`~zipline.run_algorithm`.
"""
# Make 2 objects both referencing the same iterator
args = [iter(args)] * 2
# Zip generates list entries by calling `next` on each iterator it
# receives. In this case the two iterators are the same object, so the
# call to next on args[0] will also advance args[1], resulting in zip
# returning (a,b) (c,d) (e,f) rather than (a,a) (b,b) (c,c) etc.
positionals = zip(*args)
for name, value in chain(positionals, kwargs.items()):
self._recorded_vars[name] = value
@api_method
def set_benchmark(self, benchmark):
'''Just do nothing for compatibility.'''
pass
@api_method
@preprocess(symbol=ensure_upper_case)
def symbol(self, symbol):
'''Lookup equity by symbol.
Parameters:
symbol (string): The ticker symbol for the asset.
Returns:
equity (Equity): The equity object lookuped by the ``symbol``.
Raises:
AssetNotFound: When could not resolve the ``Asset`` by ``symbol``.
'''
return self.asset_finder.lookup_symbol(symbol, as_of_date=None)
@api_method
@api_method
def symbols(self, *args, **kwargs):
'''Lookup equities by symbol.
Parameters:
args (iterable[str]): List of ticker symbols for the asset.
Returns:
equities (List[Equity]): The equity lookuped by the ``symbol``.
Raises:
AssetNotFound: When could not resolve the ``Asset`` by ``symbol``.
'''
return [self.symbol(idendifier, **kwargs) for idendifier in args]
@api_method
def sid(self, sid):
'''Lookup equity by asset unique identifier
Parameters:
sid: asset unique identifier.
Returns:
equity (Equity): The equity object lookuped by the ``sid``.
Raises:
AssetNotFound: When could not resolve the ``Asset`` by ``sid``.
'''
return self.asset_finder.retrieve_asset(sid)
@api_method
@api_method
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
@property
@property
@property
@api_method
@preprocess(tz=coerce_string(pytz.timezone))
@expect_types(tz=optional(tzinfo))
@api_method
def set_slippage(self, **kwargs):
'''Just do nothing for compatibility.'''
pass
@api_method
def set_commission(self, **kwargs):
'''Just do nothing for compatibility.'''
pass
@api_method
def set_cancel_policy(self, *args):
'''Just do nothing for compatibility.'''
pass
@api_method
def set_symbol_lookup_date(self, dt):
'''Just do nothing for compatibility.'''
pass
@api_method
@api_method
@api_method
@api_method
@api_method
@expect_types(share_counts=pd.Series)
@expect_dtypes(share_counts=np.dtype('float64'))
@api_method
def get_open_orders(self, asset=None):
'''
If asset is unspecified or None, returns a dictionary keyed by
asset ID. The dictionary contains a list of orders for each ID,
oldest first. If an asset is specified, returns a list of open
orders for that asset, oldest first.
'''
return self.get_all_orders(asset=asset, status='open')
@api_method
def get_recent_orders(self, days_back=2):
'''
Returns all orders from the past n days.
'''
return self.get_all_orders(days_back=days_back)
@api_method
def get_all_orders(
self,
asset=None,
before=None,
status='all',
days_back=None):
'''
If asset is unspecified or None, returns a dictionary keyed by
asset ID. The dictionary contains a list of orders for each ID,
oldest first. If an asset is specified, returns a list of open
orders for that asset, oldest first. Orders submitted after
before will not be returned. If provided, only orders of type
status ('closed' or 'open') will be returned.
'''
orders = self._backend.all_orders(before, status, days_back)
omap = {}
orders = sorted([
o for o in orders.values()
], key=lambda o: o.dt)
for order in orders:
key = order.asset
if key not in omap:
omap[key] = []
omap[key].append(order.to_api_obj())
if asset is None:
return omap
return omap.get(asset, [])
@api_method
@api_method
@api_method
@require_initialized(HistoryInInitialize())
def history(self, bar_count, frequency, field, ffill=True):
"""DEPRECATED: use ``data.history`` instead.
"""
return self.get_history_window(
bar_count,
frequency,
self._calculate_universe(),
field,
ffill,
)
def validate_order_params(self,
asset,
amount,
limit_price,
stop_price,
style):
"""
Helper method for validating parameters to the order API function.
Raises an UnsupportedOrderParameters if invalid arguments are found.
"""
if not self.initialized:
raise OrderDuringInitialize(
msg="order() can only be called from within handle_data()"
)
if style:
if limit_price:
raise UnsupportedOrderParameters(
msg="Passing both limit_price and style is not supported."
)
if stop_price:
raise UnsupportedOrderParameters(
msg="Passing both stop_price and style is not supported."
)
for control in self.trading_controls:
control.validate(asset,
amount,
self.portfolio,
self.get_datetime(),
self.executor.current_data)
@staticmethod
def round_order(amount):
"""
Convert number of shares to an integer.
By default, truncates to the integer share count that's either within
.0001 of amount or closer to zero.
E.g. 3.9999 -> 4.0; 5.5 -> 5.0; -5.5 -> -5.0
"""
return int(round_if_near_integer(amount))
@staticmethod
def __convert_order_params_for_blotter(limit_price, stop_price, style):
"""
Helper method for converting deprecated limit_price and stop_price
arguments into ExecutionStyle instances.
This function assumes that either style == None or (limit_price,
stop_price) == (None, None).
"""
if style:
assert (limit_price, stop_price) == (None, None)
return style
if limit_price and stop_price:
return StopLimitOrder(limit_price, stop_price)
if limit_price:
return LimitOrder(limit_price)
if stop_price:
return StopOrder(stop_price)
else:
return MarketOrder()
def _calculate_order_value_amount(self, asset, value):
"""
Calculates how many shares/contracts to order based on the type of
asset being ordered.
"""
if not self.executor.current_data.can_trade(asset):
raise CannotOrderDelistedAsset(
msg="Cannot order {0}, as it not tradable".format(asset.symbol)
)
last_price = \
self.executor.current_data.current(asset, "price")
if np.isnan(last_price):
raise CannotOrderDelistedAsset(
msg="Cannot order {0} on {1} as there is no last "
"price for the security.".format(asset.symbol,
self.datetime)
)
if tolerant_equals(last_price, 0):
zero_message = "Price of 0 for {psid}; can't infer value".format(
psid=asset
)
log.debug(zero_message)
# Don't place any order
return 0
return value / last_price
#
# Account Controls
#
def register_account_control(self, control):
"""
Register a new AccountControl to be checked on each bar.
"""
if self.initialized:
raise RegisterAccountControlPostInit()
self.account_controls.append(control)
@api_method
def set_max_leverage(self, max_leverage):
"""Set a limit on the maximum leverage of the algorithm.
Parameters
----------
max_leverage : float
The maximum leverage for the algorithm. If not provided there will
be no maximum.
"""
control = MaxLeverage(max_leverage)
self.register_account_control(control)
#
# Trading Controls
#
def register_trading_control(self, control):
"""
Register a new TradingControl to be checked prior to order calls.
"""
if self.initialized:
raise RegisterTradingControlPostInit()
self.trading_controls.append(control)
@api_method
def set_max_position_size(
self,
asset=None,
max_shares=None,
max_notional=None,
on_error='fail'):
"""Set a limit on the number of shares and/or dollar value held for the
given sid. Limits are treated as absolute values and are enforced at
the time that the algo attempts to place an order for sid. This means
that it's possible to end up with more than the max number of shares
due to splits/dividends, and more than the max notional due to price
improvement.
If an algorithm attempts to place an order that would result in
increasing the absolute value of shares/dollar value exceeding one of
these limits, raise a TradingControlException.
Parameters
----------
asset : Asset, optional
If provided, this sets the guard only on positions in the given
asset.
max_shares : int, optional
The maximum number of shares to hold for an asset.
max_notional : float, optional
The maximum value to hold for an asset.
"""
control = MaxPositionSize(asset=asset,
max_shares=max_shares,
max_notional=max_notional,
on_error=on_error)
self.register_trading_control(control)
@api_method
def set_max_order_size(self,
asset=None,
max_shares=None,
max_notional=None,
on_error='fail'):
"""Set a limit on the number of shares and/or dollar value of any single
order placed for sid. Limits are treated as absolute values and are
enforced at the time that the algo attempts to place an order for sid.
If an algorithm attempts to place an order that would result in
exceeding one of these limits, raise a TradingControlException.
Parameters
----------
asset : Asset, optional
If provided, this sets the guard only on positions in the given
asset.
max_shares : int, optional
The maximum number of shares that can be ordered at one time.
max_notional : float, optional
The maximum value that can be ordered at one time.
"""
control = MaxOrderSize(asset=asset,
max_shares=max_shares,
max_notional=max_notional,
on_error=on_error)
self.register_trading_control(control)
@api_method
def set_max_order_count(self, max_count, on_error='fail'):
"""Set a limit on the number of orders that can be placed in a single
day.
Parameters
----------
max_count : int
The maximum number of orders that can be placed on any single day.
"""
control = MaxOrderCount(on_error, max_count)
self.register_trading_control(control)
@api_method
def set_do_not_order_list(self, restricted_list, on_error='fail'):
"""Set a restriction on which assets can be ordered.
Parameters
----------
restricted_list : container[Asset], SecurityList
The assets that cannot be ordered.
"""
if isinstance(restricted_list, SecurityList):
warnings.warn(
"`set_do_not_order_list(security_lists.leveraged_etf_list)` "
"is deprecated. Use `set_asset_restrictions("
"security_lists.restrict_leveraged_etfs)` instead.",
category=DeprecationWarning,
stacklevel=2
)
restrictions = SecurityListRestrictions(restricted_list)
else:
warnings.warn(
"`set_do_not_order_list(container_of_assets)` is deprecated. "
"Create a zipline.finance.asset_restrictions."
"StaticRestrictions object with a container of assets and use "
"`set_asset_restrictions(StaticRestrictions("
"container_of_assets))` instead.",
category=DeprecationWarning,
stacklevel=2
)
restrictions = StaticRestrictions(restricted_list)
self.set_asset_restrictions(restrictions, on_error)
@api_method
@expect_types(
restrictions=Restrictions,
on_error=str,
)
def set_asset_restrictions(self, restrictions, on_error='fail'):
"""Set a restriction on which assets can be ordered.
Parameters
----------
restricted_list : Restrictions
An object providing information about restricted assets.
See Also
--------
zipline.finance.asset_restrictions.Restrictions
"""
control = RestrictedListOrder(on_error, restrictions)
self.register_trading_control(control)
self.restrictions |= restrictions
@api_method
def set_long_only(self, on_error='fail'):
"""Set a rule specifying that this algorithm cannot take short
positions.
"""
self.register_trading_control(LongOnly(on_error))
@api_method
@api_method
| [
2,
198,
2,
15069,
1853,
16972,
37548,
11,
3457,
13,
198,
2,
3401,
6637,
15069,
2864,
978,
79,
22260,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
... | 2.261185 | 10,751 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib.request, urllib.error
import json
import os
import subprocess
import logging
import traceback
import yaml
from datetime import datetime
import dateutil.parser
from TwitterAPI import TwitterAPI
import eyed3
from enum import Enum
import shutil
import uuid
# 通知用
twitter = None
if __name__ == "__main__":
Main.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
2956,
297,
571,
13,
25927,
11,
2956,
297,
571,
13,
18224,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
850,... | 2.902256 | 133 |
""" Overlap Matrix Consolidation """
import numpy as np
import scipy.sparse as sp
from .. import overlap
def apply_chunk_id_maps(overlap_arr, chunk_id_maps):
"""
Applies id maps to each overlap matrix, returns a combined overlap mat
"""
rs, cs, vs = list(), list(), list()
for (overlap, id_map) in zip(overlap_arr.flat, chunk_id_maps.flat):
for (r, c, v) in zip(overlap.row, overlap.col, overlap.data):
rs.append(id_map[r])
cs.append(c)
vs.append(v)
return sp.coo_matrix((vs, (rs, cs)))
| [
37811,
3827,
37796,
24936,
43419,
341,
37227,
628,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
82,
29572,
355,
599,
198,
198,
6738,
11485,
1330,
21721,
628,
198,
4299,
4174,
62,
354,
2954,
62,
312,
62,
31803,
7,
... | 2.35 | 240 |
if __name__ == "__main__":
print("start")
# arr = [
# [1, 0, 1, 0, 0],
# [1, 0, 1, 1, 1],
# [0, 1, 1, 1, 1],
# [1, 0, 0, 1, 0]
# ]
# arr = [
# [1, 1, 1],
# [1, 1, 1],
# [0, 1, 1]
# ]
# maximal_square(arr)
arr = [
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[1, 0, 0, 1, 0]
]
# arr = [
# [1 , 1],
# [1 , 1]
# ]
# arr = [[1, 0, 1, 0, 0],
# [1, 0, 1, 1, 1],
# [1, 1, 1, 1, 1],
# [1, 0, 0, 1, 0]]
print(arr)
arr = [[1, 0, 1, 0, 0],
[1, 0, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 0, 0, 1, 0]]
#print('res', maximal_square(arr))
print(maximal_square_dp(arr))
| [
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
7203,
9688,
4943,
198,
220,
220,
220,
1303,
5240,
796,
685,
198,
220,
220,
220,
1303,
220,
220,
220,
220,
685,
16,
11,
657,
11,... | 1.466667 | 555 |
"""
1383. Maximum Performance of a Team
There are n engineers numbered from 1 to n and two arrays: speed and efficiency, where speed[i] and efficiency[i] represent the speed and efficiency for the i-th engineer respectively. Return the maximum performance of a team composed of at most k engineers, since the answer can be a huge number, return this modulo 10^9 + 7.
The performance of a team is the sum of their engineers' speeds multiplied by the minimum efficiency among their engineers.
Example 1:
Input: n = 6, speed = [2,10,3,1,5,8], efficiency = [5,4,3,9,7,2], k = 2
Output: 60
Explanation:
We have the maximum performance of the team by selecting engineer 2 (with speed=10 and efficiency=4) and engineer 5 (with speed=5 and efficiency=7). That is, performance = (10 + 5) * min(4, 7) = 60.
Example 2:
Input: n = 6, speed = [2,10,3,1,5,8], efficiency = [5,4,3,9,7,2], k = 3
Output: 68
Explanation:
This is the same example as the first but k = 3. We can select engineer 1, engineer 2 and engineer 5 to get the maximum performance of the team. That is, performance = (2 + 10 + 5) * min(5, 4, 7) = 68.
Example 3:
Input: n = 6, speed = [2,10,3,1,5,8], efficiency = [5,4,3,9,7,2], k = 4
Output: 72
"""
# revisit
# Runtime: 416 ms, faster than 87.10% of Python3 online submissions for Maximum Performance of a Team.
# Memory Usage: 29.9 MB, less than 86.45% of Python3 online submissions for Maximum Performance of a Team.
# first time do a leetcode contest
# should have solved this problem
# at first thought it was dp, but turns out just a simple heap problem
# should open your mind and don't panic
# Runtime: 432 ms, faster than 66.67% of Python3 online submissions for Maximum Performance of a Team.
# Memory Usage: 34 MB, less than 100.00% of Python3 online submissions for Maximum Performance of a Team.
import heapq
| [
37811,
198,
1485,
5999,
13,
22246,
15193,
286,
257,
4816,
198,
1858,
389,
299,
12037,
25840,
422,
352,
284,
299,
290,
734,
26515,
25,
2866,
290,
9332,
11,
810,
2866,
58,
72,
60,
290,
9332,
58,
72,
60,
2380,
262,
2866,
290,
9332,
3... | 3.184122 | 592 |
# -*- coding:utf-8 -*-
from flask import Blueprint
from flask import session
from flask import request
from flask import redirect
from flask import render_template
from flask import current_app
from flask.ext.babel import gettext
from flask.ext.login import login_required
from flask.ext.login import login_user
from flask.ext.login import logout_user
from flask.ext.login import current_user
from flask.ext.login import UserMixin
from flask.ext.principal import Identity
from flask.ext.principal import identity_changed
from flask.ext.principal import AnonymousIdentity
from app import helpers
from app import forms
from app import models
view = Blueprint('index', __name__,
template_folder='../templates')
@view.route('')
@login_required
@view.route('login', methods=['GET', 'POST'])
@view.route('logout')
@login_required
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
6738,
42903,
1330,
39932,
198,
6738,
42903,
1330,
6246,
198,
6738,
42903,
1330,
2581,
198,
6738,
42903,
1330,
18941,
198,
6738,
42903,
1330,
8543,
62,
28243,
198,
6738,
... | 3.329457 | 258 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Hello Peewee.'''
from datetime import datetime, timedelta
from flask import Flask, jsonify, request, render_template, abort
from flask.ext.classy import FlaskView
from flask_peewee.db import Database
from flask_peewee.utils import get_object_or_404
import peewee as pw
from serializers import ItemSerializer, PersonSerializer
app = Flask(__name__)
app.config.from_object(Settings)
### Models ###
db = Database(app)
### API ###
class RecentCheckoutsView(FlaskView):
'''Demonstrates a more complex query.'''
route_base = '/recentcheckouts/'
def index(self):
'''Return items checked out in the past hour.'''
hour_ago = datetime.utcnow() - timedelta(hours=1)
query = Item.select().where(Item.checked_out &
(Item.updated > hour_ago)) \
.order_by(Item.updated.desc())
recent = [item for item in query] # Executes query
return jsonify({"items": ItemSerializer(recent, many=True).data})
@app.route("/")
# Register views
api_prefix = "/api/v1/"
ItemsView.register(app, route_prefix=api_prefix)
PeopleView.register(app, route_prefix=api_prefix)
RecentCheckoutsView.register(app, route_prefix=api_prefix)
if __name__ == '__main__':
create_tables()
app.run(port=5000)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
15496,
2631,
413,
1453,
2637,
7061,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
198,
6738,... | 2.5625 | 528 |
from bisect import bisect_right
from torch.optim.lr_scheduler import _LRScheduler
| [
6738,
47457,
478,
1330,
47457,
478,
62,
3506,
198,
6738,
28034,
13,
40085,
13,
14050,
62,
1416,
704,
18173,
1330,
4808,
43,
6998,
1740,
18173,
628
] | 3.192308 | 26 |
#! /usr/bin/env python3
import RPi.GPIO as GPIO
import time
import signal
import sys
# The Noctua PWM control actually wants 25 kHz (kilo!), see page 6 on:
# https://noctua.at/pub/media/wysiwyg/Noctua_PWM_specifications_white_paper.pdf
# However, the RPi.GPIO library causes high CPU usage when using high
# frequencies - probably because it can currently only do software PWM.
# So we set a lower frequency in the 10s of Hz here. You should expect that
# this value doesn't work very well and adapt it to what works in your setup.
# We will work on the issue and try to use hardware PWM in the future:
PWM_FREQ = 25 # [Hz] PWM frequency
FAN_PIN = 18 # BCM pin used to drive PWM fan
WAIT_TIME = 1 # [s] Time to wait between each refresh
OFF_TEMP = 40 # [°C] temperature below which to stop the fan
MIN_TEMP = 45 # [°C] temperature above which to start the fan
MAX_TEMP = 70 # [°C] temperature at which to operate at max fan speed
FAN_LOW = 1
FAN_HIGH = 100
FAN_OFF = 0
FAN_MAX = 100
FAN_GAIN = float(FAN_HIGH - FAN_LOW) / float(MAX_TEMP - MIN_TEMP)
try:
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(FAN_PIN, GPIO.OUT, initial=GPIO.LOW)
fan = GPIO.PWM(FAN_PIN, PWM_FREQ)
while True:
handleFanSpeed(fan, getCpuTemperature())
time.sleep(WAIT_TIME)
except KeyboardInterrupt:
pass
finally:
GPIO.cleanup()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
11748,
640,
198,
11748,
6737,
198,
11748,
25064,
198,
198,
2,
383,
32809,
6413,
350,
22117,
1630,
1682,
3382,
1679,
37597,
357... | 2.530612 | 588 |
import pathlib
import shutil
import logging
from flask import Flask, current_app
from flask_sqlalchemy import SQLAlchemy
from flask_smorest import Api
from variance.extensions import db
| [
11748,
3108,
8019,
198,
11748,
4423,
346,
198,
11748,
18931,
198,
6738,
42903,
1330,
46947,
11,
1459,
62,
1324,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
6738,
42903,
62,
82,
3549,
301,
1330,
5949,
72,
198,... | 3.74 | 50 |
#!/usr/bin/env python
# Run using python3 N50_Calculator <Input Path> <GenomeSize (Optional)>
import sys
import os
import scipy
if __name__ == "__main__":
if len(sys.argv) == 2:
if os.path.isfile(sys.argv[1]):
NGorN50(file_path=sys.argv[1])
else:
print ('Improper Input Path Specified')
print ('Please use: python3 N50_Calculator <Input Path> <Genome Size (Optional)>')
sys.exit()
elif len(sys.argv) == 3:
try:
int(sys.argv[2])
NGorN50(sys.argv[1], sys.argv[2])
except ValueError:
print ('Improper Genome Size specified')
print ('Please use: python3 N50_Calculator <Input Path> <Genome Size (Optional)>')
sys.exit()
else:
print ('Improper input format')
print ('Please use: python3 N50_Calculator <Input Path> <Genome Size (Optional)>')
sys.exit()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
5660,
1262,
21015,
18,
399,
1120,
62,
9771,
3129,
1352,
1279,
20560,
10644,
29,
1279,
13746,
462,
10699,
357,
30719,
8,
29,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
629,
54... | 2.091518 | 448 |
from collections import defaultdict
from corehq import feature_previews, toggles
from corehq.apps.custom_data_fields.models import CustomDataFieldsDefinition
from corehq.apps.data_dictionary.models import CaseType, CaseProperty
from corehq.apps.fixtures.dbaccessors import get_fixture_data_type_by_tag, get_fixture_items_for_data_type
from corehq.apps.linked_domain.util import _clean_json
from corehq.apps.locations.views import LocationFieldsView
from corehq.apps.products.views import ProductFieldsView
from corehq.apps.users.models import UserRole
from corehq.apps.users.views.mobile import UserFieldsView
from corehq.apps.integration.models import DialerSettings, GaenOtpServerSettings, HmacCalloutSettings
from corehq.apps.reports.models import TableauServer, TableauVisualization
from corehq.apps.data_interfaces.models import AutomaticUpdateRule
| [
6738,
17268,
1330,
4277,
11600,
198,
198,
6738,
4755,
71,
80,
1330,
3895,
62,
3866,
33571,
11,
284,
32723,
198,
6738,
4755,
71,
80,
13,
18211,
13,
23144,
62,
7890,
62,
25747,
13,
27530,
1330,
8562,
6601,
15878,
82,
36621,
198,
6738,
... | 3.373541 | 257 |
#!/usr/bin/env python3
# Copyright (c) 2018, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# import pandas to workaround issue when pandas is imported after some other libraries
# https://github.com/pandas-dev/pandas/issues/23040
import pandas
import datetime
import logging
import threading
import typing as ty
import uuid
import matplotlib
import numpy as np
matplotlib.use('Gtk3Agg')
import rospy
from std_msgs.msg import String
from geometry_msgs.msg import Point
from supersaop.msg import Euler, ElevationMap, PredictedWildfireMap, PropagateCmd, Plan, PlanCmd, \
Trajectory, Maneuver, Raster, RasterMetaData, WildfireMap, MeanWindStamped, SurfaceWindMap, \
Timed2DPointStamped, PoseEuler
from fire_rs.geodata.display import GeoDataDisplay
from fire_rs.geodata.geo_data import GeoData
from fire_rs.planning.display import TrajectoryDisplayExtension, plot_plan_trajectories
from fire_rs.monitoring.supersaop import ObservationPlanning
import serialization
TEST_AREA = ((480060.0, 489060.0), (6210074.0, 6217074.0))
if __name__ == '__main__':
try:
op = ObservationPlanningNode()
rospy.spin()
except rospy.ROSInterruptException:
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
66,
8,
2864,
11,
31171,
6998,
12,
13534,
1921,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
... | 3.127065 | 787 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import random
from datetime import datetime, timedelta
from pony import orm
from flask import url_for
from flask_babel import format_timedelta
from smilepack.database import db
from smilepack import models
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
4738,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
... | 3.213483 | 89 |
"""
${NAME}
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy
from mcedit2.rendering.modelmesh import BlockModelMesh
from mcedit2.rendering import layers
from mcedit2.rendering.chunkmeshes.chunksections import ChunkSectionsRenderer
from mcedit2.rendering.chunkmeshes.entitymesh import TileEntityLocationMesh, MonsterLocationRenderer, ItemRenderer, \
ItemFrameMesh, MonsterModelRenderer, CommandBlockColorsMesh, CommandBlockLocationMesh, \
TileEntityModelRenderer
from mcedit2.rendering.chunkmeshes.heightlevel import HeightLevelBlockMesh
from mcedit2.rendering.chunkmeshes.lowdetail import LowDetailBlockMesh, OverheadBlockMesh
from mcedit2.rendering.chunkmeshes.mobspawns import MobSpawnsBlockMesh
from mcedit2.rendering.chunkmeshes.terrainpop import TerrainPopulatedRenderer
from mcedit2.rendering.chunkmeshes.tileticks import TileTicksRenderer
from mcedit2.util import profiler
from mceditlib.util.lazyprop import lazyprop
from mceditlib import faces
from mceditlib import exceptions
from mceditlib.selection import BoundingBox, SectionBox
log = logging.getLogger(__name__)
| [
37811,
198,
220,
220,
220,
25597,
20608,
92,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
11748,
18931,
198,
198,
11748,
299,
32152,
198,
6738,
28... | 3.180822 | 365 |
# Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
@attr.s
| [
2,
6127,
7560,
416,
300,
668,
62,
21282,
74,
62,
5235,
13,
8410,
5626,
48483,
13,
198,
198,
6738,
279,
2645,
668,
13,
75,
668,
62,
25927,
1330,
16089,
18453,
3041,
80,
11,
4808,
3605,
62,
24396,
62,
18076,
198,
6738,
279,
2645,
66... | 2.724138 | 87 |
from .filemanager import MDFileManager # NOQA F401
| [
6738,
764,
7753,
37153,
1330,
337,
8068,
576,
13511,
220,
1303,
8005,
48,
32,
376,
21844,
198
] | 3.058824 | 17 |
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Powered By KK Studio
# Wrapper Database
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
45090,
2750,
509,
42,
11733,
198,
2,
27323,
2848,
24047,
198,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
... | 3.033898 | 59 |
"""Tests of asynchronous tasks."""
def test_byeworld_async(byeworld):
"""Test of ByWorld asynchronous task method."""
task = byeworld.asynchronous.delay('test')
assert task.get(timeout=10) == {'async': 'test'}
| [
37811,
51,
3558,
286,
39354,
8861,
526,
15931,
628,
198,
4299,
1332,
62,
1525,
38136,
62,
292,
13361,
7,
1525,
38136,
2599,
198,
220,
220,
220,
37227,
14402,
286,
2750,
10603,
39354,
4876,
2446,
526,
15931,
198,
220,
220,
220,
4876,
7... | 2.947368 | 76 |
import unittest
from typing import Dict, Optional, Any
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
19720,
1330,
360,
713,
11,
32233,
11,
4377,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.74359 | 39 |
from PyQt5 import QtCore, QtOpenGL
from PyQt5.QtCore import Qt
##----------------------------------------------------------------##
| [
6738,
9485,
48,
83,
20,
1330,
33734,
14055,
11,
33734,
11505,
8763,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
33734,
628,
198,
198,
2235,
10097,
2235,
198
] | 4.354839 | 31 |
import cv2
import matplotlib.pyplot as plt
from utils import *
from darknet import Darknet
import keras
import cv2
import os
import numpy as np
import pandas as pd
from os import listdir
from os.path import isfile, join, isdir
from PIL import Image
import numpy as np
import shelve
import matplotlib.pyplot as plt
# Set the location and name of the cfg file
cfg_file = '/content/CVND_Exercises/2_2_YOLO/cfg/yolov3.cfg'
# Set the location and name of the pre-trained weights file
weight_file = '/content/drive/My Drive/yolov3.weights'
# Set the location and name of the COCO object classes file
namesfile = '/content/CVND_Exercises/2_2_YOLO/data/coco.names'
# Load the network architecture
m = Darknet(cfg_file)
# Load the pre-trained weights
m.load_weights(weight_file)
# Load the COCO object classes
class_names = load_class_names(namesfile) | [
11748,
269,
85,
17,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
3384,
4487,
1330,
1635,
198,
6738,
3223,
3262,
1330,
3801,
3262,
198,
198,
11748,
41927,
292,
198,
11748,
269,
85,
17,
198,
11748,
28686... | 2.965157 | 287 |
import pytest
from topaz import mapdict
| [
11748,
12972,
9288,
198,
198,
6738,
1353,
1031,
1330,
3975,
11600,
628,
198
] | 3.307692 | 13 |
from itertools import chain
import numpy as np
import pandas as pd
from tqdm import tqdm
from ..preprocessing import SW as stop_words
from .base import BaseTextFeatureExtractor, TextFeature
def _compute_linguistic_features(words_corpus, show_progress=True,
extreme_thresh=[2, 0.75]):
""" Compute all the linguistic features
Inputs:
words_corpus (list of list of string):
"""
# pre-compute some entities
doc_freq = get_document_frequency(words_corpus)
rare_words, common_words = get_extreme_words(doc_freq, extreme_thresh)
# compute all features
feats = []
with tqdm(total=len(words_corpus), ncols=80,
disable=not show_progress) as p:
for i, words in enumerate(words_corpus):
feat = {}
feat['num_words'] = N = _num_words(words)
if N is not None:
# feat['num_rep_phrase'] = _num_rep_phrase(words, phrase_dict)
feat['num_unique_words'] = _num_unique_words(words)
feat['num_stop_words'] = _num_stop_words(words)
feat['num_rare_words'] = _num_extreme_words(words, rare_words)
feat['num_common_words'] = _num_extreme_words(words, common_words)
feat['ratio_unique_words'] = feat['num_unique_words'] / N
feat['ratio_stop_words'] = feat['num_stop_words'] / N
feat['ratio_rare_words'] = feat['num_rare_words'] / N
feat['ratio_common_words'] = feat['num_common_words'] / N
else:
feat['num_unique_words'] = 0
feat['num_stop_words'] = 0
feat['num_rare_words'] = 0
feat['num_common_words'] = 0
feat['ratio_unique_words'] = None
feat['ratio_stop_words'] = None
feat['ratio_rare_words'] = None
feat['ratio_common_words'] = None
feats.append(feat)
p.update(1)
feats = pd.DataFrame(feats)
return feats
@words_sanity_check
def _num_words(words):
""" Count number of words per songs
"""
return len(words)
@words_sanity_check
def _num_rep_phrase(words, phrase_dict):
""" count appearance of phrase give in the phrase dict
"""
pass
@words_sanity_check
def _num_unique_words(words):
""" Count unique number of words per song
"""
return len(set(words))
@words_sanity_check
def _num_stop_words(words):
""" Count the number of stop words included
"""
return len([w for w in set(words) if w in stop_words])
@words_sanity_check
def _num_extreme_words(words, extreme_words, average=True):
""" Count the number of common words
Inputs:
words (list of string): to be checked
extreme_words (set of string or dict[string] -> float): common words set
Returns:
tuple or list of int: # of extreme words in each extreme polars
"""
if not isinstance(extreme_words, (dict, set)):
raise Exception('[ERROR] common/rare word list should be set!')
elif isinstance(extreme_words, list):
extreme_words = set(extreme_words)
if not len(extreme_words) > 0:
raise Exception('[ERROR] no words found!!')
res = 0
for word in words:
if word in extreme_words:
res += 1
if average:
res /= len(extreme_words)
return res
def get_extreme_words(df, thresh=[2, .95]):
""" Extract extreme words in each polars
Inputs:
idf (dict[string] -> float): contains words and
their document raw frequency (in count)
thresh ([int, float]): threshold to determine extreme words.
the first element is the threshold for rare words.
words appeared less than this number are treated as rare
the second element is the threshold for common words.
words appeared more than this ratio (to the entire corpus)
considered as the common word.
Returns:
tuple of string: extreme words.
"""
df_arr = np.array(list(df.values()))
com_thrs = np.percentile(df_arr, thresh[1] * 100)
rar_words = set(w for w, freq in df.items() if freq < thresh[0])
com_words = set(w for w, freq in df.items() if freq > com_thrs)
return rar_words, com_words
def get_document_frequency(corpus):
""" Get document frequency from given corpus
Inputs:
texts (list of list of string): corpus
Returns:
dict[string] -> float: list of vocabulary and their document frequency
"""
unique_words = set(chain.from_iterable(corpus))
df = dict.fromkeys(unique_words, 0)
for words in corpus:
for word in words:
df[word] += 1
return df
| [
6738,
340,
861,
10141,
1330,
6333,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
6738,
11485,
3866,
36948,
1330,
12672,
355,
2245,
62,
10... | 2.279776 | 2,141 |
# Actual file used is kept in scantron/ansible-playbooks/roles/console/templates/production.py.j2
| [
2,
33520,
2393,
973,
318,
4030,
287,
35183,
1313,
14,
504,
856,
12,
1759,
12106,
14,
305,
829,
14,
41947,
14,
11498,
17041,
14,
25493,
13,
9078,
13,
73,
17,
198
] | 3.16129 | 31 |
from sympy import *
# Verify SymTrace in MultiBspline.hpp
# The first part of this script verifies the expression for the trace of the product of a
# symmetric matrix (H) and a general matrix (G).
# The second part creates body of the SymTrace test in test_multi_spline.cpp.
G = MatrixSymbol('G', 3, 3)
H = MatrixSymbol('H', 3, 3)
h1 = Matrix(H)
print 'H = ',h1
# Symmetrize H
h1 = h1.subs(H[1,0], H[0,1])
h1 = h1.subs(H[2,1], H[1,2])
h1 = h1.subs(H[2,0], H[0,2])
print 'Symmetrized H = ',h1
print
e = Trace(h1*Matrix(G)).doit()
print 'Trace = ',e
h00 = Symbol('h00')
h01 = Symbol('h01')
h02 = Symbol('h02')
h11 = Symbol('h11')
h12 = Symbol('h12')
h22 = Symbol('h22')
print
e = e.subs(H[0,0], h00)
e = e.subs(H[0,1], h01)
e = e.subs(H[0,2], h02)
e = e.subs(H[1,1], h11)
e = e.subs(H[1,2], h12)
e = e.subs(H[2,2], h22)
print 'Trace = ',e
print
e2 = e.collect([h00,h01,h02,h12,h11,h22])
print 'Trace =',e2
g = IndexedBase('g')
e2 = e2.subs(G[0,0], g[0])
e2 = e2.subs(G[0,1]+G[1,0], g[1])
e2 = e2.subs(G[0,2]+G[2,0], g[2])
e2 = e2.subs(G[1,1], g[1])
e2 = e2.subs(G[1,2]+G[2,1], g[4])
e2 = e2.subs(G[2,2], g[5])
print
print 'Replace with symmetrized G'
print 'Trace =',e2
vH = Matrix([[1.0, 2.0, 3.0],
[2.0, 4.4, 1.1],
[3.0, 1.1, 0.9]])
vG = Matrix([[0.1, 0.2, 0.3],
[1.4, 2.3, 8.0],
[0.9, 1.4, 2.3]])
tr = Trace(vH*vG).doit()
print 'Trace = ',tr
print
print 'Concrete values for unit test'
print '// Code from here to the end of the function generate by gen_trace.py'
print
print ' double h00 = %g;'%vH[0,0]
print ' double h01 = %g;'%vH[0,1]
print ' double h02 = %g;'%vH[0,2]
print ' double h11 = %g;'%vH[1,1]
print ' double h12 = %g;'%vH[1,2]
print ' double h22 = %g;'%vH[2,2]
print
print ' double gg[6] = {%g, %g, %g, %g, %g, %g};'%(vG[0,0], vG[0,1] + vG[1,0], vG[0,2] + vG[2,0], vG[1,1], vG[1,2] + vG[2,1], vG[2,2])
print
print ' double tr = SymTrace(h00, h01, h02, h11, h12, h22, gg);'
print ' REQUIRE(tr == Approx(%g));'%tr
print
| [
198,
6738,
10558,
88,
1330,
1635,
198,
198,
2,
49899,
15845,
2898,
558,
287,
15237,
33,
22018,
500,
13,
71,
381,
198,
198,
2,
383,
717,
636,
286,
428,
4226,
3326,
6945,
262,
5408,
329,
262,
12854,
286,
262,
1720,
286,
257,
198,
2,... | 1.832117 | 1,096 |
# 4673 셀프넘버
set_number = [d(n) for n in range(1, 10000 + 1)]
self_number = [n for n in range(1, 10000 + 1) if n not in set_number]
for s in self_number:
print(s)
| [
2,
604,
45758,
23821,
227,
222,
169,
242,
226,
167,
226,
246,
167,
110,
226,
628,
198,
2617,
62,
17618,
796,
685,
67,
7,
77,
8,
329,
299,
287,
2837,
7,
16,
11,
33028,
1343,
352,
15437,
198,
944,
62,
17618,
796,
685,
77,
329,
2... | 2.08642 | 81 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import pickle
import re
import torch
import nni.retiarii.nn.pytorch as nn
from nni.retiarii.nn.pytorch import LayerChoice
from nni.retiarii.serializer import model_wrapper
from blocks import ShuffleNetBlock, ShuffleXceptionBlock
@model_wrapper
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
198,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
302,
198,
198,
11748,
28034,
198,
11748,
299,
8461,
13,
1186,
72,
2743,
72,
13,
20471,
13,
... | 3.171429 | 105 |
import logging
from functools import partial
from multiprocessing.dummy import Pool as ThreadPool
from taskcat._s3_sync import S3Sync
from taskcat.exceptions import TaskCatException
LOG = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
18540,
305,
919,
278,
13,
67,
13513,
1330,
19850,
355,
14122,
27201,
198,
198,
6738,
4876,
9246,
13557,
82,
18,
62,
27261,
1330,
311,
18,
28985,
198,
6738,
4876,
9246,
... | 3.415385 | 65 |
import json
from back.models import helper
| [
11748,
33918,
198,
198,
6738,
736,
13,
27530,
1330,
31904,
628,
198
] | 3.833333 | 12 |
import re
from collections import defaultdict, ChainMap, OrderedDict
from dictknife.langhelpers import make_dict
import yaml
from yaml.representer import SafeRepresenter
load = yaml.load
dump = yaml.dump
| [
11748,
302,
198,
6738,
17268,
1330,
4277,
11600,
11,
21853,
13912,
11,
14230,
1068,
35,
713,
198,
6738,
8633,
48810,
13,
17204,
16794,
364,
1330,
787,
62,
11600,
198,
11748,
331,
43695,
198,
6738,
331,
43695,
13,
15603,
263,
1330,
19978... | 3.466667 | 60 |
from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME phenix.evalurama
from mmtbx.programs import evalurama
from iotbx.cli_parser import run_program
run_program(evalurama.Program)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
2,
45651,
22737,
55,
62,
28480,
62,
26288,
47,
11417,
1137,
62,
20608,
6566,
844,
13,
18206,
333,
1689,
198,
198,
6738,
8085,
83,
65,
87,
13,
23065,
... | 2.96 | 75 |
import pybullet
from pybullet_utils import bullet_client
import numpy as np
import cv2, os
import time
import gym
from datetime import datetime
from collections import OrderedDict
from pybullet_data import getDataPath
from bullet_envs.utils import seeding_np_random, AddNoise
robot_diameter = 0.4
initZ = 0.
| [
11748,
12972,
15065,
1616,
198,
6738,
12972,
15065,
1616,
62,
26791,
1330,
10492,
62,
16366,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
11,
28686,
198,
11748,
640,
198,
11748,
11550,
198,
6738,
4818,
8079,
1330,
4818,
... | 3.13 | 100 |
"""
Description: 20. Valid Parentheses
Difficulty: Easy
Given a string s containing just the characters '(', ')', '{', '}', '[' and ']',
determine if the input string is valid.
An input string is valid if:
Open brackets must be closed by the same type of brackets.
Open brackets must be closed in the correct order.
Example 1:
----------
Input: s = "()"
Output: true
Example 2:
---------
Input: s = "()[]{}"
Output: true
Example 3:
----------
Input: s = "(]"
Output: false
Constraints:
------------
1 <= s.length <= 104
s consists of parentheses only '()[]{}'.
"""
| [
37811,
198,
11828,
25,
1160,
13,
48951,
16774,
39815,
198,
28813,
22402,
25,
16789,
198,
198,
15056,
257,
4731,
264,
7268,
655,
262,
3435,
29513,
3256,
705,
8,
3256,
705,
90,
3256,
705,
92,
3256,
705,
17816,
290,
705,
60,
3256,
220,
... | 3.145161 | 186 |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from google.appengine.ext import ndb
from google.appengine.ext.ndb.query import Cursor
from typing import Type, TypeVar, Optional
from mcfw.properties import bool_property, unicode_property, long_property, unicode_list_property, typed_property, \
float_property, get_members
from mcfw.rpc import serialize_complex_value, parse_complex_value
TO_TYPE = TypeVar('TO_TYPE', bound='TO')
RETURNSTATUS_TO_SUCCESS = ReturnStatusTO.create()
WIDGET_MAPPING = WidgetMapping()
WIDGET_MODEL_MAPPING = WrappedMapping(WIDGET_MAPPING, 'model_type')
WIDGET_TO_MAPPING = WrappedMapping(WIDGET_MAPPING, 'to_type')
WIDGET_RESULT_MAPPING = WidgetResultMapping()
WIDGET_RESULT_MODEL_MAPPING = WrappedMapping(WIDGET_RESULT_MAPPING, 'model_type')
WIDGET_RESULT_TO_MAPPING = WrappedMapping(WIDGET_RESULT_MAPPING, 'to_type')
MESSAGE_TYPE_MAPPING = MessageTypeMapping()
MESSAGE_TYPE_TO_MAPPING = WrappedMapping(MESSAGE_TYPE_MAPPING, 'to_type')
ROOT_MESSAGE_TYPE_TO_MAPPING = WrappedMapping(MESSAGE_TYPE_MAPPING, 'root_to_type')
del WrappedMapping
del Mapping
del WidgetMapping
del WidgetResultMapping
del MessageTypeMapping
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
12131,
3469,
6916,
15664,
23973,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
... | 2.887439 | 613 |
print("This is a .py file")
| [
4798,
7203,
1212,
318,
257,
764,
9078,
2393,
4943,
198
] | 2.8 | 10 |
import itertools
d = dict(itertools.izip(the_keys, the_values))
| [
11748,
340,
861,
10141,
198,
67,
796,
8633,
7,
270,
861,
10141,
13,
528,
541,
7,
1169,
62,
13083,
11,
262,
62,
27160,
4008,
198
] | 2.56 | 25 |
"""This module provides some functionality to evaluate different explanation methods on several evaluation criteria."""
from typing import Union, Callable, Dict
import numpy as np
from .metrics import *
from .helpers.constants import *
from .helpers.model_interface import ModelInterface
def evaluate(
metrics: dict,
xai_methods: Union[Dict[str, Callable], Dict[str, np.ndarray], list],
model: ModelInterface,
x_batch: np.ndarray,
y_batch: np.ndarray,
a_batch: Union[np.ndarray, None] = None,
s_batch: Union[np.ndarray, None] = None,
agg_func: Callable = lambda x: x,
progress: bool = False,
*args,
**kwargs,
) -> dict:
"""
A methods to evaluate metrics given some explanation methods.
Parameters
----------
metrics
xai_methods
model
x_batch
y_batch
s_batch
agg_func
kwargs
Returns
-------
"""
if xai_methods is None:
print("Define the explanation methods that you want to evaluate.")
if metrics is None:
print(
"Define the Quantus evaluation metrics that you want to evaluate the explanations against."
)
results = {}
if isinstance(xai_methods, list):
assert a_batch is not None, (
"If 'explanation_methods' is a list of methods as strings, "
"then a_batch arguments should provide the necessary attributions corresponding "
"to each input."
)
for method in xai_methods:
results[method] = {}
for metric, metric_func in metrics.items():
if progress:
print(f"Evaluating {method} explanations on {metric} metric...")
results[method][metric] = agg_func(
metric_func(
model=model,
x_batch=x_batch,
y_batch=y_batch,
a_batch=a_batch,
s_batch=s_batch,
**{**kwargs, **{"method": method}},
)
)
elif isinstance(xai_methods, dict):
for method, method_func in xai_methods.items():
results[method] = {}
if callable(method_func):
# Asserts.
assert_explain_func(explain_func=method_func)
# Generate explanations.
a_batch = method_func(
model=model,
inputs=x_batch,
targets=y_batch,
**kwargs,
)
a_batch = utils.expand_attribution_channel(a_batch, x_batch)
# Asserts.
assert_attributions(a_batch=a_batch, x_batch=x_batch)
elif isinstance(method_func, np.ndarray):
a_batch = method_func
else:
if not isinstance(method_func, np.ndarray):
raise TypeError(
"Explanations must be of type np.ndarray or a Callable function that outputs np.nparray."
)
for metric, metric_func in metrics.items():
if progress:
print(f"Evaluating {method} explanations on {metric} metric...")
results[method][metric] = agg_func(
metric_func(
model=model,
x_batch=x_batch,
y_batch=y_batch,
a_batch=a_batch,
s_batch=s_batch,
**{**kwargs, **{"method": method}},
)
)
return results
| [
37811,
1212,
8265,
3769,
617,
11244,
284,
13446,
1180,
7468,
5050,
319,
1811,
12660,
9987,
526,
15931,
198,
6738,
19720,
1330,
4479,
11,
4889,
540,
11,
360,
713,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
764,
4164,
10466,
1330,
163... | 1.951852 | 1,890 |
from django.contrib.auth import get_user_model, authenticate
from rest_framework import serializers
from . import models, constants
User = get_user_model()
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
11,
8323,
5344,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
764,
1330,
4981,
11,
38491,
198,
198,
12982,
796,
651,
62,
7220,
62,
19849,
3419,... | 3.613636 | 44 |
import torch
import time
| [
11748,
28034,
198,
11748,
640,
628
] | 4.333333 | 6 |
import numpy as np
import os
from utils import IntersectList, image_ids_to_pair_id, pair_id_to_image_ids, blob_to_array
from matches_list import MatchesList
from database import COLMAPDatabase
# def RegenerateMatches(num_images, is_geo_neighbors, matches_list):
# # subject to simplications. For now we mimic the original code
# # Actually I feel like do this in my way is way more convenient and straightforaward
# new_matches_list = MatchesList(num_images)
# for i in range(num_images):
# for j, matches in matches_list[i]:
# # j is stored as 1-based
# if is_geo_neighbors[i][j-1]:
# new_matches_list.append((j, matches))
#
# return new_matches_list
# for i in range(num_images):
# for j in range(num_images):
# existed = False
# for k in path_graph[i]:
# if k == j:
# existed = True
# break
# if not existed:
# for k in path_graph[j]:
# if k == i:
# existed = True
# break
#
# if existed:
# pair_id = image_ids_to_pair_id(i, j)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
198,
6738,
3384,
4487,
1330,
4225,
8831,
8053,
11,
2939,
62,
2340,
62,
1462,
62,
24874,
62,
312,
11,
5166,
62,
312,
62,
1462,
62,
9060,
62,
2340,
11,
44812,
62,
1462,
62,
18747,... | 2.033223 | 602 |
import json
from random import choice
import config
import modules
from templates.quick_replies import add_quick_reply
from templates.text import TextTemplate
| [
11748,
33918,
198,
6738,
4738,
1330,
3572,
198,
198,
11748,
4566,
198,
11748,
13103,
198,
6738,
24019,
13,
24209,
62,
35666,
444,
1330,
751,
62,
24209,
62,
47768,
198,
6738,
24019,
13,
5239,
1330,
8255,
30800,
628
] | 4.351351 | 37 |
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import sys
import random
# from contact_point_dataset_torch_multi_label import MyDataset
from simple_dataset import MyDataset
import os
import time
import argparse
from functools import partial
from torch.utils.data import DataLoader
import torch
# from torch.utils.tensorboard import SummaryWriter
from tensorboardX import SummaryWriter
import datetime
random.seed(2)
torch.manual_seed(2)
np.random.seed(2)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
UTILS_DIR = os.path.abspath(os.path.join(BASE_DIR, '..', 'utils'))
sys.path.append(UTILS_DIR)
from data_helper import *
from coord_helper import *
from train_helper import *
from bullet_helper import *
from rotation_lib import *
import s1_model_multi_label as s1_model
import s2a_model as s2a_model
import s2b_model_discretize as s2b_model
import s3_classifier_model as s3_model
from s2_utils import *
import s3_replay_buffer_pose as ReplayBuffer
import ES_multithread
import multiprocessing
from scipy.special import softmax
from s3_rl_collect import calc_pose_cem_init, cem_transform_pc_batch, cem_eval, bullet_check
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--home_dir_data", default="../data")
parser.add_argument('--pointset_dir', default='/scr2/')
parser.add_argument('--bohg4', action='store_true')
parser.add_argument('--no_vis', action='store_true')
parser.add_argument('--model_name', default='s3_rl_collect')
parser.add_argument('--comment', default='')
parser.add_argument('--exp_name', default='exp_s3')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--log_freq', type=int, default=2)
parser.add_argument('--train_list', default='train_list')
parser.add_argument('--test_list', default='test_list')
parser.add_argument('--restrict_object_cat', default='')
parser.add_argument('--run_test', action='store_true')
parser.add_argument('--no_save', action='store_true')
parser.add_argument('--overfit', action='store_true')
parser.add_argument('--restore_model_name', default='')
parser.add_argument('--restore_model_epoch', type=int, default=-1)
parser.add_argument('--max_epochs', type=int, default=10000)
parser.add_argument('--eval_epoch_freq', type=int, default=2)
parser.add_argument('--eval_sample_n', type=int, default=1)
parser.add_argument('--model_save_freq', type=int, default=3000)
parser.add_argument('--no_eval', action='store_true')
parser.add_argument('--loss_transl_const', default=1)
parser.add_argument('--data_one_pose', action='store_true')
parser.add_argument('--data_vary_scale', action='store_true')
parser.add_argument('--data_more_pose', action='store_true')
parser.add_argument('--data_vary_scale_more_pose', action='store_true')
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--learning_rate', type=float, default=1e-4)
#s1 argument
parser.add_argument('--z_dim', type=int, default=32)
#s2 argument
parser.add_argument('--top_k_o', type=int, default=128)
parser.add_argument('--top_k_h', type=int, default=128)
parser.add_argument('--n_gt_sample', type=int, default=128)
parser.add_argument('--top_k_corr', type=int, default=256)
parser.add_argument('--pose_loss_l2', action='store_true')
#s3 argument
parser.add_argument('--s3_num_cp', type=int, default=3)
parser.add_argument('--cem_n_iter', type=int, default=10)
parser.add_argument('--cem_max_transl', default=0.02)
parser.add_argument('--cem_max_aa', default=0.5)
parser.add_argument('--cem_sigma_init_transl', default=1e-2)
parser.add_argument('--cem_sigma_init_aa', default=1e-1)
parser.add_argument('--cem_pop_size', type=int, default=32)
parser.add_argument('--cem_damp_transl', default=0.005)
parser.add_argument('--cem_damp_limit_transl', default=1e-2)
parser.add_argument('--cem_damp_aa', default=0.1)
parser.add_argument('--cem_damp_limit_aa', default=0.1)
parser.add_argument('--cem_parents', type=int, default=10)
parser.add_argument('--bullet_gui', action='store_true')
parser.add_argument('--s3_train_folder_dir', default='/juno/downloads/new_hang_training/')
parser.add_argument('--s3_train_name', default='s3')
parser.add_argument('--s3_device_name', default='bohg4')
parser.add_argument('--s3_buffer_dir', default='')
parser.add_argument('--s3_model_dir', default='')
parser.add_argument('--no_fcl', action='store_true')
parser.add_argument('--s3_buffer_freq', default=1000, type=int)
parser.add_argument('--s3_model_freq', default=1000, type=int)
parser.add_argument('--pretrain_s3', action='store_true')
parser.add_argument('--pretrain_s3_folder', default='exp_s3')
parser.add_argument('--pretrain_s3_model_name', default='Feb19_14-26-47_s3_classifier_model_new_data')
parser.add_argument('--pretrain_s3_epoch', default=750000, type=int)
parser.add_argument('--preload_pose_dir', default='/scr1/yifan/geo-hook/lin_my/runs/exp_s3/Mar13_23-04-53_s3_rl_collect_20_rand/eval/s3_rl_collect_20_rand_eval_epoch_1_train.json')
args = parser.parse_args()
args.data_more_pose = True
if args.bohg4:
args.pointset_dir = '/scr1/yifan'
args.no_vis = True
args.home_dir_data = '/scr1/yifan/hang'
file_name = "{}".format(args.model_name)
file_name += '_{}'.format(args.restrict_object_cat) if args.restrict_object_cat != '' else ''
file_name += "_{}".format(args.comment) if args.comment != "" else ""
file_name += '_overfit' if args.overfit else ''
folder_name = datetime.datetime.now().strftime('%b%d_%H-%M-%S_') + file_name
if args.run_test:
folder_name += '_test'
result_folder = 'runs/{}'.format(folder_name)
if args.exp_name is not "":
result_folder = 'runs/{}/{}'.format(args.exp_name, folder_name)
if args.debug:
result_folder = 'runs/debug/{}'.format(folder_name)
model_folder = os.path.join(result_folder, 'models')
if not os.path.exists(model_folder):
os.makedirs(result_folder)
if args.debug:
args.s3_train_name = 'debug'
else:
args.s3_train_name += "_{}".format(args.comment) if args.comment != "" else ""
s3_train_dir = os.path.join(args.s3_train_folder_dir, args.s3_train_name)
mkdir_if_not(s3_train_dir)
args.s3_buffer_dir = os.path.join(s3_train_dir, 'buffers')
mkdir_if_not(args.s3_buffer_dir)
args.s3_buffer_dir = os.path.join(args.s3_buffer_dir, args.s3_device_name)
mkdir_if_not(args.s3_buffer_dir)
args.s3_model_dir = os.path.join(s3_train_dir, 'models')
mkdir_if_not(args.s3_model_dir)
print("---------------------------------------")
print("Model Name: {}, Train List: {}, Test List: {}".format(args.model_name, args.train_list, args.test_list))
print("---------------------------------------")
if args.run_test:
print("Restore Model: {}, Test Sample N: {}".format(args.restore_model_name, args.eval_sample_n))
print("---------------------------------------")
writer = None
if not args.run_test:
writer = SummaryWriter(log_dir=result_folder, comment=file_name)
#record all parameters value
with open("{}/parameters.txt".format(result_folder), 'w') as file:
for key in sorted(vars(args).keys()):
value = vars(args)[key]
file.write("{} = {}\n".format(key, value))
cp_result_folder_dir = os.path.join(args.home_dir_data, 'dataset_cp')
train_list_dir = os.path.join(cp_result_folder_dir, 'labels', '{}.txt'.format(args.train_list))
test_list_dir = os.path.join(cp_result_folder_dir, 'labels', '{}.txt'.format(args.test_list))
print('TRAIN_LIST:', args.train_list, train_list_dir)
print('TEST_LIST:', args.test_list, test_list_dir)
# if args.overfit:
# args.no_eval = True
# args.no_save = True
if args.run_test:
args.max_epochs = 1
if args.restore_model_name != '':
assert args.restore_model_epoch != -1
if args.restore_model_epoch != -1:
assert args.restore_model_name != ''
train_loader = None
if (not args.run_test) or args.overfit:
train_set = MyDataset(args.home_dir_data, train_list_dir, is_train=True, use_partial_pc=True, use_fcl=(not args.no_fcl), args=args)
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True,
num_workers=6, collate_fn=MyDataset.pad_collate_fn_for_dict, drop_last=True)
if not args.no_eval:
test_set = MyDataset(args.home_dir_data, test_list_dir, is_train=False, use_partial_pc=True, use_fcl=(not args.no_fcl), args=args)
test_loader = DataLoader(train_set if args.overfit else test_set, batch_size=args.batch_size, shuffle=True,
num_workers=6, collate_fn=MyDataset.pad_collate_fn_for_dict, drop_last=True)
else:
test_set = None
test_loader = None
# if not args.run_test:
# print('len of train {} len of test {}'.format(len(train_set), len(test_set)))
# else:
# print('len of train {} len of test {}'.format(len(test_set), len(test_set)))
extra_dict = {
'fcl_object_dict': train_set.fcl_object_dict,
'fcl_hook_dict': train_set.fcl_hook_dict,
}
if not args.run_test:
train(args, train_set, train_loader, test_set, test_loader, writer, result_folder, file_name, extra_dict=extra_dict)
else:
train(args, test_set, test_loader, test_set, test_loader, writer, result_folder, file_name, extra_dict=extra_dict)
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11192,
273,
11125,
13,
3642,
822,
13,
82,
2475,
355,
18862,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
11748,
4738,
198,
2,
422,
2800,
62,
4122,
62,
19608,
292,
316,
6... | 2.58439 | 3,549 |
import random
from blubber_orm import Reservations, Items, Users, Orders
| [
11748,
4738,
198,
198,
6738,
698,
549,
527,
62,
579,
1330,
40425,
602,
11,
17230,
11,
18987,
11,
30689,
198
] | 3.7 | 20 |
#used as a configuration file
def fan:
pin = 4
dependencies = [“python3”,”gpiozero”]
| [
2,
1484,
355,
257,
8398,
2393,
198,
198,
4299,
4336,
25,
198,
220,
220,
220,
6757,
796,
604,
198,
198,
45841,
3976,
796,
685,
447,
250,
29412,
18,
447,
251,
11,
447,
251,
31197,
952,
22570,
447,
251,
60,
198
] | 2.275 | 40 |
from .numpy_backend import NumPyBackend
from .numba_backend import numba_backend_lazy
from .qasm_output_backend import QasmOutputBackend
from .ibmq_backend import ibmq_backend
from .sympy_backend import SympyBackend
from .onequbitgate_transpiler import OneQubitGateCompactionTranspiler
from .twoqubitgate_transpiler import TwoQubitGateDecomposingTranspiler
from .draw_backend import DrawCircuit
from .quimb import Quimb
from .cuquantum import cuTN
BACKENDS = {
"numpy": NumPyBackend,
"numba": numba_backend_lazy,
"qasm_output": QasmOutputBackend,
"ibmq": ibmq_backend,
"sympy_unitary": SympyBackend,
"2q_decomposition": TwoQubitGateDecomposingTranspiler,
"1q_compaction": OneQubitGateCompactionTranspiler,
"draw": DrawCircuit,
"quimb": Quimb,
"cuTN": cuTN,
}
DEFAULT_BACKEND_NAME = "quimb"
| [
6738,
764,
77,
32152,
62,
1891,
437,
1330,
31835,
20519,
7282,
437,
198,
6738,
764,
77,
2178,
64,
62,
1891,
437,
1330,
997,
7012,
62,
1891,
437,
62,
75,
12582,
198,
6738,
764,
80,
8597,
62,
22915,
62,
1891,
437,
1330,
1195,
8597,
... | 2.495495 | 333 |
from django.shortcuts import render, redirect
# Create your views here.
from .forms import PostForm
from .models import Post
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
198,
2,
13610,
534,
5009,
994,
13,
198,
6738,
764,
23914,
1330,
2947,
8479,
198,
6738,
764,
27530,
1330,
2947,
198
] | 3.9375 | 32 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Created on 17/10/27 09:53:55
@author: Changzhi Sun
"""
import argparse
import os
import json
import utils
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
help="path to json config",
required=True
)
args = parser.parse_args()
config_file_path = args.config
config = utils.read_config(config_file_path)
train_file, dev_file, test_file = "train/data.json", "dev/data.json", "test/data.json"
TRAIN_CORPUS = os.path.join("../data/", config['data']['corpus'], train_file)
DEV_CORPUS = os.path.join("../data/", config['data']['corpus'], dev_file)
TEST_CORPUS = os.path.join("../data/", config['data']['corpus'], test_file)
TRAIN_SAVE = os.path.join("../data/", config['data']['corpus'], "train/train.conll")
DEV_SAVE = os.path.join("../data/", config['data']['corpus'], "dev/dev.conll")
TEST_SAVE = os.path.join("../data/", config['data']['corpus'], "test/test.conll")
conll_input_gen(TRAIN_CORPUS, TRAIN_SAVE)
conll_input_gen(DEV_CORPUS, DEV_SAVE)
conll_input_gen(TEST_CORPUS, TEST_SAVE)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
1596,
14,
940,
14,
1983,
7769,
25,
4310,
25,
2816,
198,
198,
31,
9800,
25,
22597,
89,
5303,
3825... | 2.375556 | 450 |
#!/usr/bin/env python
import numpy as np
import time, sys
import matplotlib.pyplot as plt
import matplotlib
from generic_seg import GenericSegmenter
import cv2
from sklearn.neighbors import BallTree
from scipy.stats import pearsonr, spearmanr
import utils
k_closest = 5
aspect_thresh = 0.45
type_thresh = -2.8
plt.ion()
# LOAD MODEL DATABASE
comp_fc7, props, fc7_feats, pool5_feats = utils.load_feature_db()
type_ids = np.zeros(shape=(len(props),), dtype=np.int32)
aspect_ids = np.zeros(shape=(len(props),), dtype=np.int32)
object_ids = np.zeros(shape=(len(props),), dtype=np.int32)
for i in range(len(props)):
type_ids[i] = props[i]['type_id']
aspect_ids[i] = props[i]['aspect_id']
object_ids[i] = props[i]['object_id']
class_gmms = utils.load_feature_stats()
labels = utils.load_db_labels()
tree = BallTree(comp_fc7, leaf_size=5)
test_comp_fc7, test_props, test_fc7_feats, test_pool5_feats = utils.load_test_set_db()
test_type_ids = np.zeros(shape=(len(props),), dtype=np.int32)
test_aspect_ids = np.zeros(shape=(len(props),), dtype=np.int32)
test_object_ids = np.zeros(shape=(len(props),), dtype=np.int32)
for i in range(len(test_props)):
test_type_ids[i] = test_props[i]['type_id']
test_aspect_ids[i] = test_props[i]['aspect_id']
test_object_ids[i] = test_props[i]['object_id']
# RUN EVALUATION
type_query_times = []
aspect_comp_times = []
type_successes = []
aspect_successes = []
object_successes = []
aspect_dists = []
top_2_success = []
for iter in range(len(test_props)):
query_comp_fc7 = test_comp_fc7[iter, :]
query_pool5 = test_pool5_feats[iter, :]
query_type_id = test_type_ids[iter]
query_aspect_id = test_aspect_ids[iter]
query_object_id = test_object_ids[iter]
query_start_time = time.time()
distances, idxes = tree.query(query_comp_fc7, k=k_closest)
distances = distances[0]
idxes = idxes[0]
R_type_ids = type_ids[idxes]
R_unique_type_ids = np.unique(R_type_ids)
R_aspect_ids = aspect_ids[idxes]
# SCORE EACH TYPE IN R
valid_types = []
for i, type_id in enumerate(R_unique_type_ids):
gmm = class_gmms[type_id]
score = gmm.score(query_comp_fc7) / comp_fc7.shape[1]
if score > type_thresh:
valid_types.append(type_id)
# IF NO VALID TYPES, CHECK IF THAT WAS THE ANSWER
# UNKNOWN TYPES IN TEST SET ARE ALL NEGATIVE
if len(valid_types) == 0:
if query_type_id < 0:
type_successes.append(1)
else:
print 'TYPE FAILURE NO VALID TYPES', iter, query_type_id
type_successes.append(0)
type_query_times.append(time.time() - query_start_time)
continue # no need to go on from here
if not query_type_id in valid_types:
print 'TYPE FAILURE', iter, query_type_id, valid_types, score
type_successes.append(0)
continue
else:
type_successes.append(1)
type_mask = np.zeros(R_type_ids.shape, dtype=np.bool)
for type_id in valid_types:
type_mask = type_mask | R_type_ids == type_id
aspect_idxes = idxes[type_mask]
type_query_times.append(time.time() - query_start_time)
# find the closest in terms of pool 5 features
# search over all classes returned and use the pearson r correlation
aspect_start_time = time.time()
query_pool5 = query_pool5 / query_pool5.max()
pearson_rhos = []
for idx in aspect_idxes:
pool5 = pool5_feats[idx, :]
p_rho, pval = pearsonr(pool5, query_pool5)
pearson_rhos.append(p_rho)
aspect_sort_idxes = np.argsort(pearson_rhos)[::-1]
print pearson_rhos, iter, query_aspect_id, aspect_ids[aspect_idxes[aspect_sort_idxes[0]]]
if pearson_rhos[aspect_sort_idxes[0]] < aspect_thresh:
if query_type_id > 0:
aspect_successes.append(0)
object_successes.append(0)
top_2_success.append(0)
print 'No sufficient Aspect Fail!'
continue
else:
object_successes.append(int(query_object_id == object_ids[aspect_idxes[aspect_sort_idxes[0]]]))
ans_aspect_id = aspect_ids[aspect_idxes[aspect_sort_idxes[0]]]
if query_aspect_id == ans_aspect_id:
aspect_successes.append(1)
top_2_success.append(1)
else:
aspect_successes.append(0)
print 'Fail Due to Top Aspect not being query Aspect!'
if len(aspect_sort_idxes) > 1 and pearson_rhos[aspect_sort_idxes[1]] > aspect_thresh:
ans_aspect_id_2 = aspect_ids[aspect_idxes[aspect_sort_idxes[1]]]
if query_aspect_id == ans_aspect_id_2:
top_2_success.append(1)
else:
print 'Top-2 Fail!' , query_aspect_id, ans_aspect_id_2
top_2_success.append(0)
else:
print 'Top-2 Fail! No Second one to consider'
top_2_success.append(0)
dist = abs(query_aspect_id - ans_aspect_id)
if dist > 4:
dist = abs((dist + 3) % 8 - 3)
aspect_dists.append(dist)
aspect_comp_times.append(time.time() - aspect_start_time)
aspect_dists = np.asarray(aspect_dists)
type_query_times = np.asarray(type_query_times)
type_successes = np.asarray(type_successes)
aspect_successes = np.asarray(aspect_successes)
object_successes = np.asarray(object_successes)
aspect_comp_times = np.asarray(aspect_comp_times)
top_2_success = np.asarray(top_2_success)
avg_type_query_time = np.mean(type_query_times)
avg_aspect_comp_time = np.mean(aspect_comp_times)
avg_type_success = np.mean(type_successes)
avg_aspect_success = np.mean(aspect_successes)
avg_aspect_dist = np.mean(aspect_dists)
avg_top_2_success = np.mean(top_2_success)
avg_object_success = np.mean(object_successes)
print 'Avg Type Query Time :', avg_type_query_time
print 'Avg Aspect Comp time : ', avg_aspect_comp_time
print 'Avg Type Success : ', avg_type_success
print 'Avg Object Success : ', avg_object_success
print 'Avg Aspect Success : ', avg_aspect_success
print 'Avg Aspect Dist : ', avg_aspect_dist
print 'Avg Top 2 Success', avg_top_2_success
#
# utils.dump_test_set_db(test_comp_fc7, test_props, test_fc7_feats, test_pool5_feats)
fc7_compressor = utils.load_compressor(layer='fc7', dimension=128, compression='pca')
fc7_scalar = utils.load_scalar(layer='fc7')
pool5_scalar = utils.load_scalar(layer='pool5')
compression_times = []
for i in range(len(props)):
comp_start_time = time.time()
fc7 = fc7_feats[i, :]
fc7 = fc7_scalar.transform(fc7)
comp_feat = fc7_compressor.transform(fc7)
compression_times.append(time.time() - comp_start_time)
compression_times = np.asarray(compression_times)
print 'Avg Compression Time : ', compression_times.mean()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
11,
25064,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
198,
198,
6738,
14276,
62,
... | 2.221638 | 3,041 |
# -*- coding: utf-8 -*-
# ____ ____
# / ___| ___ _ __ ___ ___ _ __ | _ \ _ _
# \___ \ / __| '__/ _ \/ _ \ '_ \| |_) | | | |
# ___) | (__| | | __/ __/ | | | __/| |_| |
# |____/ \___|_| \___|\___|_| |_|_| \__, |
# |___/
"""
Screenpy
FADE IN:
INT. SITEPACKAGES DIRECTORY
Screenpy is a composition-based test framework. It is inspired by the Serenity
library for Java.
:copyright: (c) 2019 by Perry Goy.
:license: MIT, see LICENSE for more details.
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1427,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,... | 1.556745 | 467 |
try:
import comet_ml
except ModuleNotFoundError as e:
print("comet_ml not found, ignoring")
import sys
import os
import yaml
import json
from datetime import datetime
import glob
import random
import platform
import numpy as np
from pathlib import Path
import click
from tqdm import tqdm
import shutil
from functools import partial
import shlex
import subprocess
import matplotlib.pyplot as plt
import logging
import tensorflow as tf
from tensorflow.keras import mixed_precision
import tensorflow_addons as tfa
import keras_tuner as kt
from tfmodel.data import Dataset
from tfmodel.datasets import CMSDatasetFactory, DelphesDatasetFactory
from tfmodel.model_setup import (
make_model,
configure_model_weights,
LearningRateLoggingCallback,
prepare_callbacks,
FlattenedCategoricalAccuracy,
SingleClassRecall,
eval_model,
freeze_model,
)
from tfmodel.utils import (
get_lr_schedule,
get_optimizer,
create_experiment_dir,
get_strategy,
make_weight_function,
load_config,
compute_weights_invsqrt,
compute_weights_none,
get_train_val_datasets,
get_dataset_def,
set_config_loss,
get_loss_dict,
parse_config,
get_best_checkpoint,
delete_all_but_best_checkpoint,
get_tuner,
get_heptfds_dataset,
get_datasets,
)
from tfmodel.lr_finder import LRFinder
from tfmodel import hypertuning
from tfmodel.utils_analysis import (
plot_ray_analysis,
analyze_ray_experiment,
topk_summary_plot_v2,
summarize_top_k,
count_skipped_configurations,
)
import ray
from ray import tune
from ray.tune.integration.keras import TuneReportCheckpointCallback
from ray.tune.integration.tensorflow import DistributedTrainableCreator
from ray.tune.logger import TBXLoggerCallback
from ray.tune import Analysis
from raytune.search_space import search_space, set_raytune_search_parameters, raytune_num_samples
from raytune.utils import get_raytune_schedule, get_raytune_search_alg
customization_functions = {
"pipeline_test": customize_pipeline_test
}
@click.group()
@click.help_option("-h", "--help")
@main.command()
@click.help_option("-h", "--help")
@click.option("-c", "--config", help="configuration file", type=click.Path())
@click.option("-w", "--weights", default=None, help="trained weights to load", type=click.Path())
@click.option("--ntrain", default=None, help="override the number of training steps", type=int)
@click.option("--ntest", default=None, help="override the number of testing steps", type=int)
@click.option("--nepochs", default=None, help="override the number of training epochs", type=int)
@click.option("-r", "--recreate", help="force creation of new experiment dir", is_flag=True)
@click.option("-p", "--prefix", default="", help="prefix to put at beginning of training dir name", type=str)
@click.option("--plot-freq", default=None, help="plot detailed validation every N epochs", type=int)
@click.option("--customize", help="customization function", type=str, default=None)
@main.command()
@click.help_option("-h", "--help")
@click.option("-t", "--train_dir", required=True, help="directory containing a completed training", type=click.Path())
@click.option("-c", "--config", help="configuration file", type=click.Path())
@click.option("-w", "--weights", default=None, help="trained weights to load", type=click.Path())
@click.option("-e", "--evaluation_dir", help="optionally specify evaluation output dir", type=click.Path())
def evaluate(config, train_dir, weights, evaluation_dir):
"""Evaluate the trained model in train_dir"""
if config is None:
config = Path(train_dir) / "config.yaml"
assert config.exists(), "Could not find config file in train_dir, please provide one with -c <path/to/config>"
config, _ = parse_config(config, weights=weights)
if evaluation_dir is None:
eval_dir = str(Path(train_dir) / "evaluation")
else:
eval_dir = evaluation_dir
Path(eval_dir).mkdir(parents=True, exist_ok=True)
if config["setup"]["dtype"] == "float16":
model_dtype = tf.dtypes.float16
policy = mixed_precision.Policy("mixed_float16")
mixed_precision.set_global_policy(policy)
opt = mixed_precision.LossScaleOptimizer(opt)
else:
model_dtype = tf.dtypes.float32
strategy, num_gpus = get_strategy()
ds_test, _ = get_heptfds_dataset(config["validation_dataset"], config, num_gpus, "test")
ds_test = ds_test.batch(5)
model = make_model(config, model_dtype)
model.build((1, config["dataset"]["padded_num_elem_size"], config["dataset"]["num_input_features"]))
# need to load the weights in the same trainable configuration as the model was set up
configure_model_weights(model, config["setup"].get("weights_config", "all"))
if weights:
model.load_weights(weights, by_name=True)
else:
weights = get_best_checkpoint(train_dir)
print("Loading best weights that could be found from {}".format(weights))
model.load_weights(weights, by_name=True)
eval_model(model, ds_test, config, eval_dir)
freeze_model(model, config, ds_test.take(1), train_dir)
@main.command()
@click.help_option("-h", "--help")
@click.option("-c", "--config", help="configuration file", type=click.Path())
@click.option("-o", "--outdir", help="output directory", type=click.Path(), default=".")
@click.option("-n", "--figname", help="name of saved figure", type=click.Path(), default="lr_finder.jpg")
@click.option("-l", "--logscale", help="use log scale on y-axis in figure", default=False, is_flag=True)
def find_lr(config, outdir, figname, logscale):
"""Run the Learning Rate Finder to produce a batch loss vs. LR plot from
which an appropriate LR-range can be determined"""
config, _ = parse_config(config)
# Decide tf.distribute.strategy depending on number of available GPUs
strategy, num_gpus = get_strategy()
ds_train, ds_info = get_heptfds_dataset(config["training_dataset"], config, num_gpus, "train", config["setup"]["num_events_train"])
ds_train = ds_train.take(1)
with strategy.scope():
opt = tf.keras.optimizers.Adam(learning_rate=1e-7) # This learning rate will be changed by the lr_finder
if config["setup"]["dtype"] == "float16":
model_dtype = tf.dtypes.float16
policy = mixed_precision.Policy("mixed_float16")
mixed_precision.set_global_policy(policy)
opt = mixed_precision.LossScaleOptimizer(opt)
else:
model_dtype = tf.dtypes.float32
model = make_model(config, model_dtype)
config = set_config_loss(config, config["setup"]["trainable"])
# Run model once to build the layers
model.build((1, config["dataset"]["padded_num_elem_size"], config["dataset"]["num_input_features"]))
configure_model_weights(model, config["setup"]["trainable"])
loss_dict, loss_weights = get_loss_dict(config)
model.compile(
loss=loss_dict,
optimizer=opt,
sample_weight_mode="temporal",
loss_weights=loss_weights,
metrics={
"cls": [
FlattenedCategoricalAccuracy(name="acc_unweighted", dtype=tf.float64),
FlattenedCategoricalAccuracy(use_weights=True, name="acc_weighted", dtype=tf.float64),
]
},
)
model.summary()
max_steps = 200
lr_finder = LRFinder(max_steps=max_steps)
callbacks = [lr_finder]
model.fit(
ds_train.repeat(),
epochs=max_steps,
callbacks=callbacks,
steps_per_epoch=1,
)
lr_finder.plot(save_dir=outdir, figname=figname, log_scale=logscale)
@main.command()
@click.help_option("-h", "--help")
@click.option("-t", "--train_dir", help="training directory", type=click.Path())
@click.option("-d", "--dry_run", help="do not delete anything", is_flag=True, default=False)
def delete_all_but_best_ckpt(train_dir, dry_run):
"""Delete all checkpoint weights in <train_dir>/weights/ except the one with lowest loss in its filename."""
delete_all_but_best_checkpoint(train_dir, dry_run)
@main.command()
@click.help_option("-h", "--help")
@click.option("-c", "--config", help="configuration file", type=click.Path(), required=True)
@click.option("-o", "--outdir", help="output dir", type=click.Path(), required=True)
@click.option("--ntrain", default=None, help="override the number of training events", type=int)
@click.option("--ntest", default=None, help="override the number of testing events", type=int)
@click.option("-r", "--recreate", help="overwrite old hypertune results", is_flag=True, default=False)
@main.command()
@click.help_option("-h", "--help")
@click.option("-c", "--config", help="configuration file", type=click.Path())
@click.option("-n", "--name", help="experiment name", type=str, default="test_exp")
@click.option("-l", "--local", help="run locally", is_flag=True)
@click.option("--cpus", help="number of cpus per worker", type=int, default=1)
@click.option("--gpus", help="number of gpus per worker", type=int, default=0)
@click.option("--tune_result_dir", help="Tune result dir", type=str, default=None)
@click.option("-r", "--resume", help="resume run from local_dir", is_flag=True)
@click.option("--ntrain", default=None, help="override the number of training steps", type=int)
@click.option("--ntest", default=None, help="override the number of testing steps", type=int)
@click.option("-s", "--seeds", help="set the random seeds", is_flag=True)
@main.command()
@click.help_option("-h", "--help")
@click.option("-d", "--exp_dir", help="experiment dir", type=click.Path())
@main.command()
@click.help_option("-h", "--help")
@click.option("-d", "--exp_dir", help="experiment dir", type=click.Path())
@click.option("-s", "--save", help="save plots in trial dirs", is_flag=True)
@click.option("-k", "--skip", help="skip first values to avoid large losses at start of training", type=int)
@click.option("--metric", help="experiment dir", type=str, default="val_loss")
@click.option("--mode", help="experiment dir", type=str, default="min")
@main.command()
@click.help_option("-h", "--help")
@click.option("-c", "--config", help="configuration file", type=click.Path())
@click.option("--ntrain", default=None, help="override the number of training events", type=int)
@click.option("--ntest", default=None, help="override the number of testing events", type=int)
def debug_data(config, ntrain, ntest):
"""Train a model defined by config"""
config, config_file_stem, global_batch_size, n_train, n_test, n_epochs, weights = parse_config(
config, ntrain, ntest, weights=None,
)
dataset_def = get_dataset_def(config)
ds_train, ds_test, dataset_transform = get_train_val_datasets(config, global_batch_size=1, n_train=n_train, n_test=n_test)
# cand_counts = np.zeros(8)
# for data_item in tqdm(ds_train, desc="Counting"):
# import pdb; pdb.set_trace()
# cand_vals, cand_count = np.unique(np.argmax(data_item[1]['cls'], axis=2), return_counts=True)
# cand_counts[cand_vals.astype("int32")] += cand_count
# print("cand_counts: ", cand_counts)
dsf = CMSDatasetFactory(config)
ds_train, _ = dsf.get_dataset(split="train")
ds_test, _ = dsf.get_dataset(split="test")
for data_item in tqdm(ds_train, desc="Counting"):
import pdb; pdb.set_trace()
if __name__ == "__main__":
main()
| [
28311,
25,
198,
220,
220,
220,
1330,
31733,
62,
4029,
198,
16341,
19937,
3673,
21077,
12331,
355,
304,
25,
198,
220,
220,
220,
3601,
7203,
785,
316,
62,
4029,
407,
1043,
11,
15482,
4943,
198,
198,
11748,
25064,
198,
11748,
28686,
198,... | 2.631987 | 4,383 |
cont18 = homem = mulher20 = 0
while True:
idade = int(input('\033[30mDigite a sua idade:\033[m '))
sexo = str(input('\033[30mDigite o seu sexo [M/F]:\033[m ')).upper().strip()
run = str(input('\033[30mVocê gostaria de continuar? [S/N]\033[m ')).upper().strip()
if idade >= 18:
cont18 += 1
if sexo[0] in 'M':
homem += 1
if sexo[0] in 'F':
if idade < 20:
mulher20 += 1
if run[0] in 'S':
print('\033[1;30mOk!\033[m\n')
else:
break
print('\033[1;32mFIM DA COMPUTAÇÃO DE DADOS!\033[m')
print(f'\n\033[1;30mTem {cont18} pessoas com mais de 18 anos!\033[m')
print(f'\033[1;30m{homem} homens foram cadastrados!\033[m')
print(f'\033[1;30m{mulher20} mulheres tem menos de 20 anos\033[m')
| [
3642,
1507,
796,
3488,
368,
796,
35971,
372,
1238,
796,
657,
201,
198,
4514,
6407,
25,
201,
198,
220,
220,
220,
4686,
671,
796,
493,
7,
15414,
10786,
59,
44427,
58,
1270,
76,
19511,
578,
257,
424,
64,
4686,
671,
7479,
44427,
58,
7... | 1.870813 | 418 |
if __name__ == '__main__':
main()
| [
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.105263 | 19 |
import turtle
colors = ['red','purple','blue','green','yellow','orange']
t=turtle.Pen()
t.speed(0)
turtle.bgcolor("black")
for x in range(360):
t.pencolor(colors[x % 6])
t.width(x/100 + 1)
t.forward(x)
t.left(59)
turtle.done() | [
11748,
28699,
201,
198,
4033,
669,
796,
37250,
445,
41707,
14225,
1154,
41707,
17585,
41707,
14809,
41707,
36022,
41707,
43745,
20520,
201,
198,
83,
28,
83,
17964,
13,
25553,
3419,
201,
198,
83,
13,
12287,
7,
15,
8,
201,
198,
83,
1796... | 2.135593 | 118 |
"""
_InsertStreamCMSSWVersion_
Oracle implementation of InsertStreamCMSSWVersion
"""
from WMCore.Database.DBFormatter import DBFormatter
| [
37811,
198,
62,
44402,
12124,
24187,
5432,
54,
14815,
62,
198,
198,
48625,
7822,
286,
35835,
12124,
24187,
5432,
54,
14815,
198,
198,
37811,
198,
198,
6738,
370,
9655,
382,
13,
38105,
13,
11012,
8479,
1436,
1330,
20137,
8479,
1436,
198
... | 3.414634 | 41 |
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
import re
from corpuscrawler.util import cleantext, crawl_bbc_news, crawl_udhr
| [
2,
15069,
2177,
3012,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.741294 | 201 |
nome = str(input('Digite seu nome Completo: ')).strip()
print('Analisando seu nome...')
print('Seu nome em maiúsculas é {}.'.format(nome.upper()))
print('Seu nome em minuscula é {}.'.format(nome.lower()))
print('Seu nome ao todo tem {} letras.'.format(len(nome) - nome.count(' ')))
#dividido = nome.split()
#print('Seu primeiro nome tem {} letras.'.format(len(dividido[0])))
print('Seu primeiro nome tem {} letras.'.format(nome.find(' ')))
| [
77,
462,
796,
965,
7,
15414,
10786,
19511,
578,
384,
84,
299,
462,
955,
1154,
1462,
25,
705,
29720,
36311,
3419,
198,
4798,
10786,
2025,
27315,
25440,
384,
84,
299,
462,
986,
11537,
198,
4798,
10786,
4653,
84,
299,
462,
795,
285,
18... | 2.5 | 176 |
# -*- coding: utf-8 -*-
"""
formlayout
==========
Module creating Qt form dialogs/layouts to edit various type of parameters
formlayout License Agreement (MIT License)
------------------------------------------
Copyright (c) 2009-2015 Pierre Raybaut
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
__version__ = '2.0.0alpha'
__license__ = __doc__
DEBUG_FORMLAYOUT = False
import os
import sys
import datetime
STDERR = sys.stderr
# ---+- PyQt-PySide compatibility -+----
_modname = os.environ.setdefault('QT_API', 'pyqt')
assert _modname in ('pyqt', 'pyqt5', 'pyside', 'pyside2')
if os.environ['QT_API'].startswith('pyqt'):
try:
if os.environ['QT_API'] == 'pyqt5':
import PyQt5 # analysis:ignore
else:
import PyQt4 # analysis:ignore
except ImportError:
# Switching to PySide
os.environ['QT_API'] = _modname = 'pyside'
try:
import PySide # analysis:ignore
except ImportError:
# Switching to PySide2
os.environ['QT_API'] = _modname = 'pyside2'
try:
import PySide2 # analysis:ignore
except ImportError:
raise ImportError("formlayout requires PyQt4, PyQt5, PySide or PySide2")
if os.environ['QT_API'] == 'pyqt':
try:
from PyQt4.QtGui import QFormLayout
except ImportError:
raise ImportError("formlayout requires PyQt4, PyQt5, PySide or PySide2")
from PyQt4.QtGui import * # analysis:ignore
from PyQt4.QtCore import * # analysis:ignore
from PyQt4.QtCore import pyqtSlot as Slot
from PyQt4.QtCore import pyqtProperty as Property
QT_LIB = 'PyQt4'
if os.environ['QT_API'] == 'pyqt5':
from PyQt5.QtWidgets import * # analysis:ignore
from PyQt5.QtGui import * # analysis:ignore
from PyQt5.QtCore import * # analysis:ignore
from PyQt5.QtCore import pyqtSignal as Signal # analysis:ignore
from PyQt5.QtCore import pyqtSlot as Slot # analysis:ignore
from PyQt5.QtCore import pyqtProperty as Property # analysis:ignore
SIGNAL = None # analysis:ignore
QT_LIB = 'PyQt5'
if os.environ['QT_API'] == 'pyside':
from PySide.QtGui import * # analysis:ignore
from PySide.QtCore import * # analysis:ignore
QT_LIB = 'PySide'
if os.environ['QT_API'] == 'pyside2':
from PySide2.QtGui import * # analysis:ignore
from PySide2.QtCore import * # analysis:ignore
from PySide2.QtWidgets import * # analysis:ignore
QT_LIB = 'PySide2'
# ---+- Python 2-3 compatibility -+----
PY2 = sys.version[0] == '2'
if PY2:
# Python 2
import codecs
def u(obj):
"""Make unicode object"""
return codecs.unicode_escape_decode(obj)[0]
else:
# Python 3
def u(obj):
"""Return string as it is"""
return obj
def is_text_string(obj):
"""Return True if `obj` is a text string, False if it is anything else,
like binary data (Python 3) or QString (Python 2, PyQt API #1)"""
if PY2:
# Python 2
return isinstance(obj, basestring)
else:
# Python 3
return isinstance(obj, str)
def is_binary_string(obj):
"""Return True if `obj` is a binary string, False if it is anything else"""
if PY2:
# Python 2
return isinstance(obj, str)
else:
# Python 3
return isinstance(obj, bytes)
def is_string(obj):
"""Return True if `obj` is a text or binary Python string object,
False if it is anything else, like a QString (Python 2, PyQt API #1)"""
return is_text_string(obj) or is_binary_string(obj)
def to_text_string(obj, encoding=None):
"""Convert `obj` to (unicode) text string"""
if PY2:
# Python 2
if encoding is None:
return unicode(obj)
else:
return unicode(obj, encoding)
else:
# Python 3
if encoding is None:
return str(obj)
elif isinstance(obj, str):
# In case this function is not used properly, this could happen
return obj
else:
return str(obj, encoding)
class ColorButton(QPushButton):
"""
Color choosing push button
"""
__pyqtSignals__ = ("colorChanged(QColor)",)
if SIGNAL is None:
colorChanged = Signal("QColor")
@Slot(QColor)
color = Property("QColor", get_color, set_color)
def text_to_qcolor(text):
"""
Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated
"""
color = QColor()
if not is_string(text): # testing for QString (PyQt API#1)
text = str(text)
if not is_text_string(text):
return color
if text.startswith('#') and len(text)==7:
correct = '#0123456789abcdef'
for char in text:
if char.lower() not in correct:
return color
elif text not in list(QColor.colorNames()):
return color
color.setNamedColor(text)
return color
class ColorLayout(QHBoxLayout):
"""Color-specialized QLineEdit layout"""
class FileLayout(QHBoxLayout):
"""File-specialized QLineEdit layout"""
class SliderLayout(QHBoxLayout):
"""QSlider with QLabel"""
class RadioLayout(QVBoxLayout):
"""Radio buttons layout with QButtonGroup"""
class CheckLayout(QVBoxLayout):
"""Check boxes layout with QButtonGroup"""
class PushLayout(QHBoxLayout):
"""Push buttons horizontal layout"""
class CountLayout(QHBoxLayout):
"""Field with a QSpinBox"""
def font_is_installed(font):
"""Check if font is installed"""
return [fam for fam in QFontDatabase().families()
if to_text_string(fam) == font]
def tuple_to_qfont(tup):
"""
Create a QFont from tuple:
(family [string], size [int], italic [bool], bold [bool])
"""
if not isinstance(tup, tuple) or len(tup) != 4 \
or not is_text_string(tup[0]) \
or not isinstance(tup[1], int) \
or not isinstance(tup[2], bool) \
or not isinstance(tup[3], bool):
return None
font = QFont()
family, size, italic, bold = tup
font.setFamily(family)
font.setPointSize(size)
font.setItalic(italic)
font.setBold(bold)
return font
class FontLayout(QGridLayout):
"""Font selection"""
class FormDialog(QDialog):
"""Form Dialog"""
def get(self):
"""Return form result"""
# It is import to avoid accessing Qt C++ object as it has probably
# already been destroyed, due to the Qt.WA_DeleteOnClose attribute
if self.outfile:
if self.result in ['list', 'dict', 'OrderedDict']:
fd = open(self.outfile + '.py', 'w')
fd.write(str(self.data))
elif self.result == 'JSON':
fd = open(self.outfile + '.json', 'w')
data = json.loads(self.data, object_pairs_hook=OrderedDict)
json.dump(data, fd)
elif self.result == 'XML':
fd = open(self.outfile + '.xml', 'w')
root = ET.fromstring(self.data)
tree = ET.ElementTree(root)
tree.write(fd, encoding='UTF-8')
fd.close()
else:
return self.data
def fedit(data, title="", comment="", icon=None, parent=None, apply=None,
ok=True, cancel=True, result='list', outfile=None, type='form',
scrollbar=False, background_color=None, widget_color=None):
"""
Create form dialog and return result
(if Cancel button is pressed, return None)
:param tuple data: datalist, datagroup (see below)
:param str title: form title
:param str comment: header comment
:param QIcon icon: dialog box icon
:param QWidget parent: parent widget
:param str ok: customized ok button label
:param str cancel: customized cancel button label
:param tuple apply: (label, function) customized button label and callback
:param function apply: function taking two arguments (result, widgets)
:param str result: result serialization ('list', 'dict', 'OrderedDict',
'JSON' or 'XML')
:param str outfile: write result to the file outfile.[py|json|xml]
:param str type: layout type ('form' or 'questions')
:param bool scrollbar: vertical scrollbar
:param str background_color: color of the background
:param str widget_color: color of the widgets
:return: Serialized result (data type depends on `result` parameter)
datalist: list/tuple of (field_name, field_value)
datagroup: list/tuple of (datalist *or* datagroup, title, comment)
Tips:
* one field for each member of a datalist
* one tab for each member of a top-level datagroup
* one page (of a multipage widget, each page can be selected with a
combo box) for each member of a datagroup inside a datagroup
Supported types for field_value:
- int, float, str, unicode, bool
- colors: in Qt-compatible text form, i.e. in hex format or name (red,...)
(automatically detected from a string)
- list/tuple:
* the first element will be the selected index (or value)
* the other elements can be couples (key, value) or only values
"""
# Create a QApplication instance if no instance currently exists
# (e.g. if the module is used directly from the interpreter)
test_travis = os.environ.get('TEST_CI_WIDGETS', None)
if test_travis is not None:
app = QApplication.instance()
if app is None:
app = QApplication([])
timer = QTimer(app)
timer.timeout.connect(app.quit)
timer.start(1000)
elif QApplication.startingUp():
_app = QApplication([])
translator_qt = QTranslator()
translator_qt.load('qt_' + QLocale.system().name(),
QLibraryInfo.location(QLibraryInfo.TranslationsPath))
_app.installTranslator(translator_qt)
serial = ['list', 'dict', 'OrderedDict', 'JSON', 'XML']
if result not in serial:
print("Warning: '%s' not in %s, default to list" %
(result, ', '.join(serial)), file=sys.stderr)
result = 'list'
layouts = ['form', 'questions']
if type not in layouts:
print("Warning: '%s' not in %s, default to form" %
(type, ', '.join(layouts)), file=sys.stderr)
type = 'form'
dialog = FormDialog(data, title, comment, icon, parent, apply, ok, cancel,
result, outfile, type, scrollbar, background_color,
widget_color)
if dialog.exec_():
return dialog.get()
if __name__ == "__main__":
#--------- datalist example
datalist = create_datalist_example()
print("result:", fedit(datalist, title="Example",
comment="This is just an <b>example</b>.",
apply=apply_test))
#--------- datagroup example
datagroup = create_datagroup_example()
print("result:", fedit(datagroup, "Global title"))
#--------- datagroup inside a datagroup example
datalist = create_datalist_example()
datagroup = create_datagroup_example()
print("result:", fedit(((datagroup, "Title 1", "Tab 1 comment"),
(datalist, "Title 2", "Tab 2 comment"),
(datalist, "Title 3", "Tab 3 comment")),
"Global title"))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
687,
39786,
198,
2559,
855,
198,
198,
26796,
4441,
33734,
1296,
17310,
82,
14,
10724,
5269,
284,
4370,
2972,
2099,
286,
10007,
628,
198,
687,
39786,
13789,
1... | 2.444401 | 5,117 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ __ __ ___
# / | \ | \ | \ / Automatic
# \__ |__/ |__/ |___| \__ Annotation
# \ | | | | \ of
# ___/ | | | | ___/ Speech
#
#
# http://www.sppas.org/
#
# ---------------------------------------------------------------------------
# Laboratoire Parole et Langage, Aix-en-Provence, France
# Copyright (C) 2011-2016 Brigitte Bigi
#
# This banner notice must not be removed
# ---------------------------------------------------------------------------
# Use of this software is governed by the GNU Public License, version 3.
#
# SPPAS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SPPAS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
# File: audioinfo.py
# ----------------------------------------------------------------------------
import wx
import logging
import sppas.src.audiodata.aio
from sppas.src.ui.wxgui.sp_consts import ERROR_COLOUR
from sppas.src.ui.wxgui.sp_consts import INFO_COLOUR
from sppas.src.ui.wxgui.sp_consts import WARNING_COLOUR
from sppas.src.ui.wxgui.sp_consts import OK_COLOUR
from sppas.src.ui.wxgui.sp_consts import MIN_PANEL_W
from sppas.src.ui.wxgui.sp_consts import MIN_PANEL_H
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
LABEL_LIST = [ "Audio file name: ",
"Duration (seconds): ",
"Frame rate (Hz): ",
"Sample width (bits): ",
"Channels: " ]
NO_INFO_LABEL = " ... "
# ---------------------------------------------------------------------------
class AudioInfo( wx.Panel ):
"""
@author: Brigitte Bigi
@organization: Laboratoire Parole et Langage, Aix-en-Provence, France
@contact: develop@sppas.org
@license: GPL, v3
@copyright: Copyright (C) 2011-2016 Brigitte Bigi
@summary: Display general information about an audio file.
Information has a different color depending on the level of acceptability
in SPPAS.
"""
def __init__(self, parent, preferences):
"""
Create a new AudioInfo instance.
@parent (wxWindow)
"""
wx.Panel.__init__(self, parent)
self._prefs = preferences
self._labels = []
self._values = []
gbs = self._create_content()
self.SetFont( self._prefs.GetValue('M_FONT') )
self.SetBackgroundColour( self._prefs.GetValue('M_BG_COLOUR') )
self.SetForegroundColour( self._prefs.GetValue('M_FG_COLOUR') )
self.SetSizer(gbs)
self.SetAutoLayout( True )
self.SetMinSize((MIN_PANEL_W,MIN_PANEL_H))
self.Layout()
# -----------------------------------------------------------------------
# Private methods to create the GUI and initialize members
# -----------------------------------------------------------------------
def _create_content(self):
"""
GUI design.
"""
gbs = wx.GridBagSizer(len(LABEL_LIST), 2)
for i,label in enumerate(LABEL_LIST):
static_tx = wx.StaticText(self, -1, label)
self._labels.append( static_tx )
gbs.Add(static_tx, (i,0), flag=wx.ALL, border=2)
tx = wx.TextCtrl(self, -1, NO_INFO_LABEL, style=wx.TE_READONLY)
self._values.append( tx )
gbs.Add(tx, (i,1), flag=wx.EXPAND|wx.RIGHT, border=2)
gbs.AddGrowableCol(1)
return gbs
# -----------------------------------------------------------------------
# GUI
# -----------------------------------------------------------------------
def SetPreferences(self, prefs):
"""
Set new preferences.
"""
self._prefs = prefs
self.SetBackgroundColour( self._prefs.GetValue("M_BG_COLOUR") )
self.SetForegroundColour( self._prefs.GetValue("M_FG_COLOUR") )
self.SetFont( self._prefs.GetValue("M_FONT") )
#-------------------------------------------------------------------------
def SetFont(self, font):
"""
Change font of all wx texts.
"""
wx.Window.SetFont( self,font )
for p in self._values:
p.SetFont( font )
for l in self._labels:
l.SetFont( font )
self.Refresh()
# -----------------------------------------------------------------------
def SetBackgroundColour(self, colour):
"""
Change the background color of all wx objects.
"""
wx.Window.SetBackgroundColour( self,colour )
for p in self._values:
p.SetBackgroundColour( colour )
for l in self._labels:
l.SetBackgroundColour( colour )
self.Refresh()
# -----------------------------------------------------------------------
def SetForegroundColour(self, colour):
"""
Change the foreground color of all wx objects.
"""
wx.Window.SetForegroundColour( self,colour )
for l in self._labels:
l.SetForegroundColour( colour )
self.Refresh()
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# Callbacks
# -----------------------------------------------------------------------
def FileSelected(self, filename):
"""
Show information of a sound file.
"""
self.fix_filename(filename)
try:
_audio = sppas.src.audiodata.aio.open( filename )
self.fix_duration( _audio.get_duration())
self.fix_framerate( _audio.get_framerate())
self.fix_sampwidth( _audio.get_sampwidth())
self.fix_nchannels( _audio.get_nchannels())
_audio.close()
except Exception as e:
logging.info(" ... Error reading %s: %s" % (filename,e))
for i in range(1, len(self._values)):
self._values[i].ChangeValue( NO_INFO_LABEL )
self._values[i].SetForegroundColour( self._prefs.GetValue('M_FG_COLOUR') )
#------------------------------------------------------------------------
def FileDeSelected(self):
"""
Reset information.
"""
for v in self._values:
v.ChangeValue( NO_INFO_LABEL )
v.SetForegroundColour( self._prefs.GetValue('M_FG_COLOUR') )
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
16529,
32284,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
46444,
220,
220,
11593,
220,
220,
220,
... | 2.584274 | 2,836 |
"""
Python Easy Language Translator PyElant is a python tool for easily performing
translations and storing it on the clipboard.
Input can be received via microphone, command line or the clipboard itself.
@brief PyElant
@author Paulo Marcos
@date 2021-03-19
Copyright (c) 2021 paulomarcosdj <@> outlook.com
"""
# pylint: disable-msg=C0103
import sys
import speech_recognition as sr
from googletrans import Translator
from pynput import keyboard
import pyperclip
import notify2
class PyElant:
""" PyElant class """
def background_translator(self):
""" Runs in the background and waits for key press """
with keyboard.Listener(on_press=lambda event: PyElant.on_press(self, event),
on_release=lambda event: PyElant.on_release(self, event)) as self.listener:
self.listener.join()
def clipboard_translator(self):
""" Translates from the clipboard back to itself """
self.text = pyperclip.paste()
PyElant.translate_text(self)
def start_listening(self):
""" Start listening for microphone input """
# obtain audio from the microphone
recognizer = sr.Recognizer()
with sr.Microphone() as source:
PyElant.printv(self.verbose, "Say something")
audio = recognizer.listen(source)
# recognize speech using Google Speech Recognition
try:
self.text = recognizer.recognize_google(audio, language=self.input_language)
PyElant.translate_text(self)
except sr.UnknownValueError:
PyElant.printv(self.verbose, "Speech Recognition could not understand audio")
except sr.RequestError as error:
PyElant.printv(self.verbose, "Could not request results from Speech Recognition service; {0}".format(error))
def on_press(self, key):
""" Detects key press and performs either translation via microphone or clipboard """
if key in self.combination_clipboard:
self.current.add(key)
if all(k in self.current for k in self.combination_clipboard):
PyElant.clipboard_translator(self)
if key in self.combination_microphone:
self.current.add(key)
if all(k in self.current for k in self.combination_microphone):
PyElant.start_listening(self)
if key == keyboard.Key.esc:
self.listener.stop()
def on_release(self, key):
""" Detects key release to reset key buffer """
try:
self.current.remove(key)
except KeyError:
pass
def translate_text(self):
""" Translates text from a specified input language to the specified output language """
try:
TRANSLATOR = Translator()
result = TRANSLATOR.translate(self.text,
src=self.input_language,
dest=self.output_language)
PyElant.printv(self.verbose, "Input: {}".format(self.text))
PyElant.printv(self.verbose, "Output: {}".format(result.text))
pyperclip.copy(result.text)
except ValueError as error:
PyElant.printv(self.verbose, "Error found.")
sys.exit(error)
if not self.disable_notification:
notify2.init('pyelant')
notify = notify2.Notification(self.text,
result.text + " was copied to the clipboard",
"notification-message-im" # Icon name
)
notify.show()
@staticmethod
def printv(verbose, text):
""" Print message if verbose is on """
if verbose:
print(text)
| [
37811,
198,
37906,
16789,
15417,
3602,
41880,
9485,
9527,
415,
318,
257,
21015,
2891,
329,
3538,
9489,
198,
7645,
49905,
290,
23069,
340,
319,
262,
47999,
13,
198,
20560,
460,
307,
2722,
2884,
21822,
11,
3141,
1627,
393,
262,
47999,
234... | 2.272944 | 1,678 |
from getratings.models.ratings import Ratings
| [
6738,
651,
10366,
654,
13,
27530,
13,
10366,
654,
1330,
36826,
201,
198,
201,
198
] | 3.266667 | 15 |
import torch
import torch.nn as nn
from utils import custom_init
# self.weight_init()
#
# def weight_init(self):
# for block in self._modules:
# try:
# for m in self._modules[block]:
# custom_init(m)
# except:
# custom_init(block)
# self.weight_init()
#
# def weight_init(self):
# for block in self._modules:
# try:
# for m in self._modules[block]:
# custom_init(m)
# except:
# custom_init(block)
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
3384,
4487,
1330,
2183,
62,
15003,
628,
198,
220,
220,
220,
1303,
220,
220,
220,
220,
2116,
13,
6551,
62,
15003,
3419,
198,
220,
220,
220,
1303,
198,
220,
220,
220... | 1.826748 | 329 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Created on Mon Jan 22 13:36:32 2018
#@author: bas
#First create the boxes to stack. Move the robot to the first box to pick. Add the box to the robot with attache_box(link, name). Move the arm to the place point. Release the box with remove_attached_object(link, name). Repeat.
import sys
import rospy
from moveit_commander import RobotCommander, PlanningSceneInterface, MoveGroupCommander
from geometry_msgs.msg import PoseStamped, Pose
import tf
import math
global scene
if __name__ == "__main__":
try:
main(sys.argv)
except rospy.ROSInteruptException:
pass | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
41972,
319,
2892,
2365,
2534,
1511,
25,
2623,
25,
2624,
2864,
198,
2,
31,
9800,
25,
1615,
198,
198,
2,
5962,
225... | 3.11 | 200 |
__version__ = "0.9.2"
import os
from django.core.exceptions import ImproperlyConfigured
# load plugins
from django.conf import settings
try:
plugins_config = getattr(settings, "FASTAPP_PLUGINS_CONFIG", {})
plugins = plugins_config.keys()
plugins = plugins + getattr(settings, "FASTAPP_PLUGINS", [])
for plugin in list(set(plugins)):
amod = my_import(plugin)
except ImproperlyConfigured, e:
print e
| [
834,
9641,
834,
796,
366,
15,
13,
24,
13,
17,
1,
198,
198,
11748,
28686,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
12205,
525,
306,
16934,
1522,
198,
198,
2,
3440,
20652,
198,
6738,
42625,
14208,
13,
10414,
1330... | 2.721519 | 158 |
import cv2, time, pandas
from datetime import datetime
first_frame=None
#fill list with two empty items
status_list=[None,None]
times=[]
df=pandas.DataFrame(columns=["Start", "End"])
video=cv2.VideoCapture(0)
while True:
check, frame = video.read()
status=0
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#making the gray image blurry to remove noise and increase accuracy
gray=cv2.GaussianBlur(gray, (21,21),0)
if first_frame is None:
#store the first frame in the first_frame variable
first_frame=gray
continue
#delta_frame is the frame that holds the difference between frame 1 and teh current frame
delta_frame =cv2.absdiff(first_frame,gray)
#thresh value - if there is a difference of more than 30 between the first frame we assign those as white pixels
#may need to test - having much more success with 100 thresh value
thresh_frame=cv2.threshold(delta_frame, 100, 255, cv2.THRESH_BINARY)[1] #threshBinary only requires access to the second returned tuple
thresh_frame=cv2.dilate(thresh_frame, None, iterations=2)
#find all the contours of the image
(_,cnts,_)=cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
#if contour has less than 1000 pixels
if cv2.contourArea(contour) < 500:
continue
status=1
(x, y, w, h)=cv2.boundingRect(contour)
cv2.rectangle(frame,(x,y), (x+w, y+h), (0,255,0), 3)
status_list.append(status)
#to improve memory - only include last two status
#comment to see all status
status_list=status_list[-2:]
#check the last item in the list and the item before the last item
#if the last frame of the status_list (index of -1) equals 1 and the next to last frame = 0 append the date and time
if status_list[-1]==1 and status_list[-2]==0:
times.append(datetime.now())
if status_list[-1]==0 and status_list[-2]==1:
times.append(datetime.now())
cv2.imshow("Gray Frame", gray)
cv2.imshow("Delta Frame", delta_frame)
cv2.imshow("Threshold Frame", thresh_frame)
cv2.imshow("Color Frame", frame)
#key=cv2.waitKey(1000) # 1 second intevals
key=cv2.waitKey(1) # 1 millisecond intervals
#print the variables to check results - info only - not needed for project
#print(gray)
#print(delta_frame)
#this is to break the loop with a "q" key (for quit)
if key==ord('q'):
if status==1:
times.append(datetime.now())
break
#print(status)
print(status_list)
print(times)
for i in range(0,len(times), 2):
#append the first and second objects into the first 2 columns
#then i value is 3 so you are now appending 3 and 4 to the first 2 columns
df=df.append({"Start":times[i], "End":times[i+1]}, ignore_index=True)
df.to_csv("Times.csv")
video.release()
cv2.destroyAllWindows() | [
11748,
269,
85,
17,
11,
640,
11,
19798,
292,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11085,
62,
14535,
28,
14202,
198,
2,
20797,
1351,
351,
734,
6565,
3709,
198,
13376,
62,
4868,
41888,
14202,
11,
14202,
60,
198,
22355,
2... | 2.535065 | 1,155 |
import imgaug as ia
import imgaug.augmenters as iaa
import os
import cv2
import numpy as np
from util import sequence
anno_INPUT_DIR = r'annotation_files/'
img_INPUT_DIR = r'image_files/'
OUTPUT_DIR = 'output_dir/'
AUGMENT_SIZE = 6
if __name__ == '__main__':
main()
| [
11748,
33705,
7493,
355,
220,
544,
198,
11748,
33705,
7493,
13,
559,
5154,
364,
355,
220,
544,
64,
198,
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
7736,
1330,
8379,
198,
198,
1236,
78,
62,
... | 2.464286 | 112 |
from glob import glob
from os import chdir
from os.path import dirname
import pandas as pd
import numpy as np
from pandas.core.frame import DataFrame
import matplotlib.pyplot as plt
from numpy.fft import rfft, rfftfreq
from scipy import signal
from math import pi
WINDOW = "sg" # sg for savgol filter
WINDOW = "hamming"
WINDOW_SIZE = 41
CUTOFF_FREQ = 2.5 # Hz
SG_POLYORDER = 2
SG_PARAMS = (WINDOW_SIZE, SG_POLYORDER)
USING_SG = WINDOW.lower() == "sg"
if __name__ == "__main__":
main()
| [
6738,
15095,
1330,
15095,
198,
6738,
28686,
1330,
442,
15908,
198,
6738,
28686,
13,
6978,
1330,
26672,
3672,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
19798,
292,
13,
7295,
13,
14535,
1330,
606... | 2.53 | 200 |
# from os import popen
from pathlib import Path
from qbay.models import *
import subprocess
# Set the current folder
current_folder = Path(__file__).parent
# Creating two Users for testing of order functionality
register("OrderUser1",
"order_user1@qbay.com",
"Password99@")
register("OrderUser2",
"order_user2@qbay.com",
"Password99@")
# Create four products for testing of order functionality
create_product("order product1",
"24 character description",
11.0, "order_user1@qbay.com",
datetime.date(2022, 9, 29))
create_product("order product2",
"24 character description",
101.0, "order_user2@qbay.com",
datetime.date(2022, 9, 29))
create_product("order product3",
"24 character description",
11.0, "order_user2@qbay.com",
datetime.date(2022, 9, 29))
create_product("order product4",
"24 character description",
11.0, "order_user2@qbay.com",
datetime.date(2022, 9, 29))
# Helper function called in each testing block
def compare_input_output(input_file, output_file):
"""
A function that compares the output generated from
running the qbay frontend (on a given an input file),
to the expected output found in a text file.
Parameters:
input_file (string): file with expected input
output_file (string): file with expected output
"""
expected_in = open(current_folder.joinpath(
'input_output/' + input_file))
expected_out = open(current_folder.joinpath(
'input_output/' + output_file)).read()
# pip the input
output = subprocess.run(
['python', '-m', 'qbay'],
stdin=expected_in,
capture_output=True,
text=True,
).stdout
assert output.strip() == expected_out.strip()
def test_buyer_not_seller():
"""
Blackbox input partition test for "user cannot place
an order for his/her products" requirement of order().
There are two partitions:
Case 1: Prospective buyer is seller (expect failure)
Case 2: Prospective buyer is not seller (expect success)
"""
# list of input/output files used to test each partition
input_files = ['buyer_is_seller.in',
'buyer_not_seller.in']
output_files = ['buyer_is_seller.out',
'buyer_not_seller.out']
for i in range(0, 2):
compare_input_output(input_files[i], output_files[i])
def test_price_vs_balance():
"""
Blackbox input partition test for "user cannot place an
order that costs more than his/her balance" requirement
of order(). There are two partitions:
Case 1: User balance is less than cost (expect failure)
Case 2: User balance is great than cost (expect success)
"""
# list of input/output files used to test each partition
input_files = ['price_over_balance.in',
'price_under_balance.in']
output_files = ['price_over_balance.out',
'price_under_balance.out']
for i in range(0, 2):
compare_input_output(input_files[i], output_files[i])
def test_sold_products_hidden():
"""
Tests that a previously purchased product does not appear
in the lists of products available for sale.
"""
input_file = "sold_products_hidden.in"
output_file = "sold_products_hidden.out"
compare_input_output(input_file, output_file)
def test_sold_visible_for_seller():
"""
Tests that a User can view the products that they have sold.
"""
input_file = "sold_visible_for_seller.in"
output_file = "sold_visible_for_seller.out"
compare_input_output(input_file, output_file)
| [
2,
422,
28686,
1330,
1461,
268,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
10662,
24406,
13,
27530,
1330,
1635,
198,
11748,
850,
14681,
198,
198,
2,
5345,
262,
1459,
9483,
198,
14421,
62,
43551,
796,
10644,
7,
834,
7753,
834,
737,... | 2.530242 | 1,488 |