content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from fastapi import HTTPException, status, Depends
from fastapi.security import SecurityScopes
from jose import jwt, JWTError
from .context import oauth2_scheme, TokenData
from ..crud.user_crud import UserCRUD, UserInDB
from pydantic import ValidationError
from ..crud.deps import get_crud_obj
from ..configuration import config | [
6738,
3049,
15042,
1330,
14626,
16922,
11,
3722,
11,
2129,
2412,
198,
6738,
3049,
15042,
13,
12961,
1330,
4765,
3351,
13920,
198,
6738,
474,
577,
1330,
474,
46569,
11,
449,
39386,
12331,
198,
6738,
764,
22866,
1330,
267,
18439,
17,
62,
... | 3.452632 | 95 |
# Copyright (c) 2001-2022 Aspose Pty Ltd. All Rights Reserved.
#
# This file is part of Aspose.Words. The source code in this file
# is only intended as a supplement to the documentation, and is provided
# "as is", without warranty of any kind, either expressed or implied.
import aspose.words as aw
from api_example_base import ApiExampleBase, MY_DIR
| [
2,
15069,
357,
66,
8,
5878,
12,
1238,
1828,
1081,
3455,
350,
774,
12052,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
1081,
3455,
13,
37117,
13,
383,
2723,
2438,
287,
428,
2393,
198,
2,
318,
691,
5292,
35... | 3.612245 | 98 |
# coding: utf-8
"""
tweak-api
Tweak API to integrate with all the Tweak services. You can find out more about Tweak at <a href='https://www.tweak.com'>https://www.tweak.com</a>, #tweak.
OpenAPI spec version: 1.0.8-beta.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
# import models into model package
from .axes import Axes
from .billing import Billing
from .billing_address import BillingAddress
from .billing_bank_account import BillingBankAccount
from .billing_card import BillingCard
from .billing_invoice import BillingInvoice
from .billing_invoice_line import BillingInvoiceLine
from .billing_limit import BillingLimit
from .billing_limit_counter import BillingLimitCounter
from .billing_limit_log import BillingLimitLog
from .billing_plan import BillingPlan
from .billing_source import BillingSource
from .billing_source_ach_credit_transfer import BillingSourceAchCreditTransfer
from .billing_source_owner import BillingSourceOwner
from .billing_source_receiver import BillingSourceReceiver
from .billing_source_redirect import BillingSourceRedirect
from .billing_source_sepa_debit import BillingSourceSepaDebit
from .billing_source_sofort import BillingSourceSofort
from .billing_subscription import BillingSubscription
from .billing_subscription_item import BillingSubscriptionItem
from .bounds import Bounds
from .builder_asset_background_folder import BuilderAssetBackgroundFolder
from .cloudinary_image import CloudinaryImage
from .customer import Customer
from .customer_permission_set import CustomerPermissionSet
from .data_source_mongo import DataSourceMongo
from .data_source_ms_sql import DataSourceMsSql
from .data_source_my_sql import DataSourceMySql
from .data_source_oracle import DataSourceOracle
from .data_source_postgre_sql import DataSourcePostgreSql
from .data_source_rest import DataSourceRest
from .data_source_soap import DataSourceSoap
from .design import Design
from .design_comment import DesignComment
from .design_export import DesignExport
from .design_folder import DesignFolder
from .design_member import DesignMember
from .design_permission_set import DesignPermissionSet
from .design_tag import DesignTag
from .dimensions import Dimensions
from .dynamic_data import DynamicData
from .dynamic_data_operation_soap import DynamicDataOperationSoap
from .flash_var import FlashVar
from .image import Image
from .image_folder import ImageFolder
from .image_folder_member import ImageFolderMember
from .inline_response_200 import InlineResponse200
from .inline_response_200_1 import InlineResponse2001
from .inline_response_200_2 import InlineResponse2002
from .inline_response_200_3 import InlineResponse2003
from .inline_response_200_4 import InlineResponse2004
from .invitation_ticket import InvitationTicket
from .notification import Notification
from .notification_button import NotificationButton
from .object_id import ObjectID
from .portal import Portal
from .portal_image_folder import PortalImageFolder
from .portal_member import PortalMember
from .portal_permission_set import PortalPermissionSet
from .portal_template import PortalTemplate
from .portal_template_folder import PortalTemplateFolder
from .product import Product
from .product_group import ProductGroup
from .product_material import ProductMaterial
from .product_pdf_color_profile import ProductPdfColorProfile
from .product_size import ProductSize
from .product_size_material import ProductSizeMaterial
from .product_tag import ProductTag
from .product_type import ProductType
from .public_v1_team_member import PublicV1TeamMember
from .tag import Tag
from .team import Team
from .team_brand import TeamBrand
from .team_builder_config import TeamBuilderConfig
from .team_builder_config_product_group import TeamBuilderConfigProductGroup
from .team_builder_config_product_size import TeamBuilderConfigProductSize
from .team_builder_config_product_size_material import TeamBuilderConfigProductSizeMaterial
from .team_builder_config_product_type import TeamBuilderConfigProductType
from .team_member import TeamMember
from .team_member_access_token import TeamMemberAccessToken
from .team_permission_set import TeamPermissionSet
from .team_template_folder import TeamTemplateFolder
from .template import Template
from .template_member import TemplateMember
from .template_permission_set import TemplatePermissionSet
from .template_tag import TemplateTag
from .test_cache import TestCache
from .workflow import Workflow
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
25393,
12,
15042,
628,
220,
220,
220,
24205,
461,
7824,
284,
19386,
351,
477,
262,
24205,
461,
2594,
13,
220,
921,
460,
1064,
503,
517,
546,
24205,
461,
220,
220... | 3.736765 | 1,360 |
from cloudinary.models import CloudinaryField
from django.db import models
| [
6738,
6279,
3219,
13,
27530,
1330,
10130,
3219,
15878,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
628,
198
] | 3.95 | 20 |
from .settings import *
DYNAMICFORMS.update({
'template': 'dynamicforms/jquery_ui/',
})
| [
6738,
764,
33692,
1330,
1635,
198,
198,
35,
40760,
2390,
2149,
13775,
5653,
13,
19119,
15090,
198,
220,
220,
220,
705,
28243,
10354,
705,
67,
28995,
23914,
14,
73,
22766,
62,
9019,
14,
3256,
198,
30072,
198
] | 2.513514 | 37 |
import sys
from django.core.management.base import BaseCommand
try:
input = raw_input
except NameError:
pass
from stormpath.error import Error as StormpathError
from stormpath.resources.provider import Provider
from django_stormpath import social
| [
11748,
25064,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
198,
28311,
25,
198,
220,
220,
220,
5128,
796,
8246,
62,
15414,
198,
16341,
6530,
12331,
25,
198,
220,
220,
220,
1208,
198,
198,
6738,
... | 3.48 | 75 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import datetime as dt
from autoslug import AutoSlugField
from autoslug.utils import slugify
from django.core.exceptions import ValidationError
from django.db import models
from django.template.defaultfilters import truncatechars_html
from django.utils.translation import gettext_lazy as _
from filer.fields.image import FilerImageField
from filer.models import ThumbnailOption
from meta.models import ModelMeta
from conference.cfp.models import Submission, WorkshopSubmission
class Slot(ModelMeta, models.Model):
"""
Model for conference time slots. It can be for a talk, a workshop, or a custom time slot (i. e. coffee break)
"""
talk = models.ForeignKey(
Submission, related_name='talks', limit_choices_to={'selected': True}, null=True, blank=True
)
slug = AutoSlugField(
_('Slug'), max_length=400, blank=True, populate_from='generated_slug', always_update=True
)
workshop = models.ForeignKey(
WorkshopSubmission, related_name='workshops', limit_choices_to={'selected': True}, null=True, blank=True
)
name = models.CharField(
_('Name'), max_length=250, null=True, blank=True,
help_text=_('Field for time slots that does not relate to a Talk or a Workshop.')
)
mugshot = FilerImageField(verbose_name=_('Speaker mughshot'), null=True, blank=True)
twitter = models.CharField(_('Twitter'), max_length=200, default='', blank=True)
schedule_abstract = models.TextField(_('Schedule abstract'), blank=True, null=True)
day = models.DateField(_('Date'))
start = models.TimeField(_('Start'))
duration = models.DurationField(_('Duration'))
sprint_days = models.BooleanField(_('Part of sprint days'), default=False)
show_end_time = models.BooleanField(_('Show end time in schedule'), default=False)
slides = models.URLField(_('Speaker slides'), blank=True, null=True)
video = models.URLField(_('Talk video'), blank=True, null=True)
_metadata = {
'title': 'title',
'description': 'get_meta_abstract',
'image': 'get_image',
}
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
is_talk.short_description = _('Talk')
is_talk.boolean = True
is_workshop.short_description = _('Workshop')
is_workshop.boolean = True
is_custom.short_description = _('Custom')
is_custom.boolean = True
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
4818,
8079,
355,
288,
83,
198,
198,
6738,
44619,
... | 2.881222 | 884 |
import subprocess
import os
from makecfg.config import Config | [
11748,
850,
14681,
198,
11748,
28686,
198,
6738,
787,
37581,
13,
11250,
1330,
17056
] | 4.357143 | 14 |
#!/usr/bin/env python
# encoding: utf-8
from .models import Comment
from django import forms
from django.forms import ModelForm
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
6738,
764,
27530,
1330,
18957,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
23914,
1330,
9104,
8479,
198,
6738,
42625,
14... | 3.283582 | 67 |
import io
import os
import re
from setuptools import find_packages, setup
VERSION_RE = re.compile(r"__version__\s*=\s*\"(.*?)\"")
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*args):
"""Reads complete file contents."""
return io.open(os.path.join(HERE, *args), encoding="utf-8").read()
def get_version():
"""Reads the version from this module."""
init = read("pygitguardian", "__init__.py")
return VERSION_RE.search(init).group(1)
setup(
name="pygitguardian",
version=get_version(),
packages=find_packages(exclude=["tests"]),
description="Python Wrapper for GitGuardian's API -- Scan security policy breaks everywhere",
long_description=read("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/GitGuardian/py-gitguardian",
author="GitGuardian",
author_email="support@gitguardian.com",
maintainer="GitGuardian",
install_requires=["marshmallow>=3.5", "requests>=2"],
include_package_data=True,
zip_safe=True,
license="MIT",
keywords="api-client devsecops secrets-detection security-tools library gitguardian",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
"Topic :: Security",
],
)
| [
11748,
33245,
198,
11748,
28686,
198,
11748,
302,
198,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
628,
198,
43717,
62,
2200,
796,
302,
13,
5589,
576,
7,
81,
1,
834,
9641,
834,
59,
82,
9,
28,
59,
82,
9,
7879,
... | 2.701258 | 636 |
from abc import ABC, abstractmethod
from typing import List
from domain.Contest.contest import Contest
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
7386,
13,
4264,
395,
13,
3642,
395,
1330,
27297,
628
] | 4.038462 | 26 |
# -*- coding: utf-8 -*-
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
# here you define the operation it should perform
# just return self
class SelectColumnsTransfomer(BaseEstimator, TransformerMixin):
""" Select dataframe columns
"""
class DataframeFunctionTransformer(BaseEstimator, TransformerMixin):
"""
Apply an arbitrary function to a Dataframe column, as you would use a `map` funcion
"""
def __init__(self, column_name, func, none_treatment=None):
"""
:param column_name: the name of the dataframe column to which the function will be applied
:param func: the function object, e.g. lambda
:param none_treatment: what to do with NaN, Nones, etc. Default behaviour is to perform no
special treatment, i.e. the function itself should treat nulls. Other options: 'return_none',
returns the input itself in case it's null-lie (as per pd.isnull)
"""
self.column_name = column_name
self.func = func
self.none_treatment = none_treatment
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
8692,
1330,
7308,
22362,
320,
1352,
11,
3602,
16354,
35608,
259,
198,
220,
220,
220,
1303,
994,
345,
... | 2.86911 | 382 |
import multiprocessing
import qrcode
import random
import string
import numpy as np
IMAGE_SIZE = 21
STRING_LENGTH = 10
CHARACTER_SET = string.ascii_lowercase + string.ascii_uppercase + \
string.digits
def randomString():
"""
Return a random string where characters are drawn from a fixed
set of characters.
"""
return "".join(random.choice(CHARACTER_SET) for _ in range(STRING_LENGTH))
def qrCodeMatrix(data):
"""
Encode the given data in a QR code and return it as a numpy array of `0`
and `1`s.
"""
qr = qrcode.QRCode(
version=None, # automatically determine size
error_correction=qrcode.constants.ERROR_CORRECT_L, # 7% error tolerance
box_size=1, # single pixels for one box
border=0, # no border
)
qr.add_data(data)
qr.make(fit=True) # fit=True automatically determines the size
# Reverse black <-> white and convert to numpy array
return 1 - np.asarray(qr.get_matrix(), dtype=np.float)
def dataToVector(data):
"""
Returns the first letter of the given string, encoded as a numpy vector
of length `len(CHARACTER_SET)`.
"""
letter = data[0]
nums = map(lambda l: l == letter, CHARACTER_SET)
return np.asarray(list(nums), dtype=np.float)
def getRandomBatch(size):
"""
Return a number of QR code matrices and the corresponding labels.
"""
numProcesses = max(1, multiprocessing.cpu_count() - 2)
pool = multiprocessing.Pool(numProcesses)
strings = [randomString() for _ in range(size)]
X = pool.map(qrCodeMatrix, strings)
y = pool.map(dataToVector, strings)
X = np.asarray(X)
X = X[..., np.newaxis]
pool.close()
return X, y
| [
11748,
18540,
305,
919,
278,
198,
11748,
10662,
6015,
1098,
198,
11748,
4738,
198,
11748,
4731,
198,
11748,
299,
32152,
355,
45941,
628,
198,
3955,
11879,
62,
33489,
796,
2310,
198,
18601,
2751,
62,
43,
49494,
796,
838,
198,
38019,
2246... | 2.591252 | 663 |
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure overwriting read-only files works as expected (via win-tool).
"""
import TestGyp
import filecmp
import os
import stat
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
# First, create the source files.
os.makedirs('subdir')
read_only_files = ['read-only-file', 'subdir/A', 'subdir/B', 'subdir/C']
for f in read_only_files:
test.write(f, 'source_contents')
test.chmod(f, stat.S_IREAD)
if os.access(f, os.W_OK):
test.fail_test()
# Second, create the read-only destination files. Note that we are creating
# them where the ninja and win-tool will try to copy them to, in order to test
# that copies overwrite the files.
os.makedirs(test.built_file_path('dest/subdir'))
for f in read_only_files:
f = os.path.join('dest', f)
test.write(test.built_file_path(f), 'SHOULD BE OVERWRITTEN')
test.chmod(test.built_file_path(f), stat.S_IREAD)
# Ensure not writable.
if os.access(test.built_file_path(f), os.W_OK):
test.fail_test()
test.run_gyp('copies_readonly_files.gyp')
test.build('copies_readonly_files.gyp')
# Check the destination files were overwritten by ninja.
for f in read_only_files:
f = os.path.join('dest', f)
test.must_contain(test.built_file_path(f), 'source_contents')
# This will fail if the files are not the same mode or contents.
for f in read_only_files:
if not filecmp.cmp(f, test.built_file_path(os.path.join('dest', f))):
test.fail_test()
test.pass_test()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
201,
198,
2,
15069,
357,
66,
8,
1946,
3012,
3457,
13,
1439,
2489,
10395,
13,
201,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
46... | 2.512894 | 698 |
#!/usr/bin/env python
import psutil
import netifaces
import sys
import logging
import subprocess
# Add various metrics from this code - https://gitlab.ncl.ac.uk/cs-support-group/lcd-monitor-panel/blob/master/data.py
# logging
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
logger = logging.getLogger(__name__)
if __name__ == "__main__":
# check if process is running or not
print Isrunning('firefox')
# Kernel modules
total_kernel_mods,kernel_mods = get_kernel_modules_details()
#print total_kernel_mods,kernel_mods
for item in kernel_mods:
# print kernel module - name,size, and count
print dict(item)
# interface details
network_interfaces = get_nw_interfaces()
for iface in network_interfaces:
iface_results = nw_interface_details(iface)
print "Interface results: \n %s " %str(iface_results)
iface_status = get_interface_status(iface)
print "Interface status - %s" % iface_status
# check if interface is in promiscous mode or not.
network_interfaces = get_nw_interfaces()
for iface in network_interfaces:
promiscous_status = get_promiscous_status(iface)
print "Interface-%s promiscous status-%s " %(iface, promiscous_status)
sys.exit(1)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
26692,
22602,
198,
11748,
2010,
361,
2114,
198,
11748,
25064,
198,
11748,
18931,
198,
11748,
850,
14681,
198,
198,
2,
3060,
2972,
20731,
422,
428,
2438,
532,
3740,
1378,
18300,
23... | 2.898305 | 413 |
#!/usr/bin/env python
import boto.ec2
import urllib2
import sys
import os
# Get dev
try:
dev = str(sys.argv[1])
except IndexError:
print "Provide block device i.e. xvdf"
sys.exit(1)
# Convert dev to mapping format
if 'nvme' in dev:
# For newer instances which expose EBS volumes as NVMe devices, translate the
# device name so boto can discover it.
output = os.popen('sudo /usr/local/sbin/cfncluster-ebsnvme-id -v /dev/nvme1').read().split(":")[1].strip()
print output
sys.exit(0)
else:
dev = dev.replace('xvd', 'sd')
dev = '/dev/' + dev
# Get instance ID
instanceId = urllib2.urlopen("http://169.254.169.254/latest/meta-data/instance-id").read()
# Get region
region = urllib2.urlopen("http://169.254.169.254/latest/meta-data/placement/availability-zone").read()
region = region[:-1]
# Connect to AWS using boto
conn = boto.ec2.connect_to_region(region)
# Get blockdevicemapping
attrib = conn.get_instance_attribute(instanceId, 'blockDeviceMapping')
devmap = attrib.get('blockDeviceMapping')
if devmap.has_key(dev):
print devmap[dev].volume_id
else:
sys.exit(1)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
275,
2069,
13,
721,
17,
198,
11748,
2956,
297,
571,
17,
198,
11748,
25064,
198,
11748,
28686,
198,
198,
2,
3497,
1614,
198,
28311,
25,
198,
220,
1614,
796,
965,
7,
17597,... | 2.694581 | 406 |
from invoke import task
@task
@task
| [
6738,
26342,
1330,
4876,
628,
198,
31,
35943,
628,
198,
31,
35943,
198
] | 3.076923 | 13 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import rospy
from flexbe_core import EventState, Logger
from sonia_common.msg import AddPose
from geometry_msgs.msg import Point, Vector3
from nav_msgs.msg import Odometry
class create_absolute_depth(EventState):
'''
(!!!! DEPRECATED !!!!!) Move the submarine by defining every parameter.
-- positionZ uint8 The absolute depth desired to reach
<= continue Indicates that the pose has been created
''' | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
220,
198,
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
686,
2777,
88,
198,
198,
6738,
7059,
1350,
62,
7295,
1330,
8558,
9012,
11,
5972,
1362,
198,
6738,
336... | 2.637755 | 196 |
n = int(input())
matrix = []
for i in range(n):
a = input().strip()
matrix.append(a)
if 'm' in a:
m_r = i
m_c = a.index('m')
if 'p' in a:
p_r = i
p_c = a.index('p')
r_diff = m_r - p_r
c_diff = m_c - p_c
while True:
if r_diff != 0:
if r_diff<0:
r_diff += 1
print('DOWN')
elif r_diff>0:
r_diff -= 1
print('UP')
if c_diff != 0:
if c_diff<0:
c_diff += 1
print('RIGHT')
elif c_diff>0:
c_diff -= 1
print('LEFT')
if r_diff == 0 and c_diff == 0:
break
'''
Sample input
3
---
-m-
p--
Sample output
DOWN
LEFT
'''
| [
77,
796,
493,
7,
15414,
28955,
198,
6759,
8609,
796,
17635,
198,
1640,
1312,
287,
2837,
7,
77,
2599,
198,
220,
220,
220,
257,
796,
5128,
22446,
36311,
3419,
198,
220,
220,
220,
17593,
13,
33295,
7,
64,
8,
628,
220,
220,
220,
611,
... | 1.587234 | 470 |
# Copyright 2014-2017 Canonical Limited.
#
# This file is part of charms.reactive.
#
# charms.reactive is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import types
import mock
import unittest
from charmhelpers.core import hookenv
from charms.reactive import relations
| [
2,
15069,
1946,
12,
5539,
19507,
605,
15302,
13,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
41700,
13,
260,
5275,
13,
198,
2,
198,
2,
41700,
13,
260,
5275,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198... | 3.72973 | 222 |
"""
Utilities for managing chunked file uploads.
See https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html#chunked-saving
"""
import base64
import contextvars
import time
# Used as a "registry" for uploads.
content_chunks = contextvars.ContextVar("jupyterlab_content_chunks", default={})
def store_content_chunk(path: str, content: str):
"""Store a base64 chunk in the registry as bytes"""
current_value = content_chunks.get()
if path not in current_value:
current_value[path] = {"started_at": time.time(), "chunks": []}
current_value[path]["chunks"].append(
base64.b64decode(content.encode("ascii"), validate=True)
)
def assemble_chunks(path: str) -> str:
"""Assemble the chunk bytes into a single base64 string"""
current_value = content_chunks.get()
if path not in current_value:
raise ValueError(f"No chunk for path {path}")
return base64.b64encode(b"".join(current_value[path]["chunks"])).decode("ascii")
def delete_chunks(path):
"""Should be called once the upload is complete to free the memory"""
current_value = content_chunks.get()
del current_value[path]
def prune_stale_chunks():
"""Called periodically to avoid keeping large objects in memory
when a chunked upload does not finish"""
current_value = content_chunks.get()
now = time.time()
stale_paths = []
for path, chunk_info in current_value.items():
if now - chunk_info["started_at"] > 3600:
stale_paths.append(path)
for path in stale_paths:
del current_value[path]
| [
37811,
198,
18274,
2410,
329,
11149,
16058,
276,
2393,
9516,
82,
13,
198,
6214,
3740,
1378,
73,
929,
88,
353,
12,
11295,
2070,
13,
961,
83,
704,
420,
82,
13,
952,
14,
268,
14,
31284,
14,
2302,
1571,
14,
3642,
658,
13,
6494,
2,
3... | 2.750859 | 582 |
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EmailCommseqWebhookSendTestRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'cart_id': 'str',
'cart_item_ids': 'list[str]',
'email': 'str',
'esp_commseq_step_uuid': 'str',
'esp_commseq_uuid': 'str',
'name': 'str',
'order_id': 'str'
}
attribute_map = {
'cart_id': 'cart_id',
'cart_item_ids': 'cart_item_ids',
'email': 'email',
'esp_commseq_step_uuid': 'esp_commseq_step_uuid',
'esp_commseq_uuid': 'esp_commseq_uuid',
'name': 'name',
'order_id': 'order_id'
}
def __init__(self, cart_id=None, cart_item_ids=None, email=None, esp_commseq_step_uuid=None, esp_commseq_uuid=None, name=None, order_id=None): # noqa: E501
"""EmailCommseqWebhookSendTestRequest - a model defined in Swagger""" # noqa: E501
self._cart_id = None
self._cart_item_ids = None
self._email = None
self._esp_commseq_step_uuid = None
self._esp_commseq_uuid = None
self._name = None
self._order_id = None
self.discriminator = None
if cart_id is not None:
self.cart_id = cart_id
if cart_item_ids is not None:
self.cart_item_ids = cart_item_ids
if email is not None:
self.email = email
if esp_commseq_step_uuid is not None:
self.esp_commseq_step_uuid = esp_commseq_step_uuid
if esp_commseq_uuid is not None:
self.esp_commseq_uuid = esp_commseq_uuid
if name is not None:
self.name = name
if order_id is not None:
self.order_id = order_id
@property
def cart_id(self):
"""Gets the cart_id of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:return: The cart_id of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:rtype: str
"""
return self._cart_id
@cart_id.setter
def cart_id(self, cart_id):
"""Sets the cart_id of this EmailCommseqWebhookSendTestRequest.
:param cart_id: The cart_id of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:type: str
"""
self._cart_id = cart_id
@property
def cart_item_ids(self):
"""Gets the cart_item_ids of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:return: The cart_item_ids of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:rtype: list[str]
"""
return self._cart_item_ids
@cart_item_ids.setter
def cart_item_ids(self, cart_item_ids):
"""Sets the cart_item_ids of this EmailCommseqWebhookSendTestRequest.
:param cart_item_ids: The cart_item_ids of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:type: list[str]
"""
self._cart_item_ids = cart_item_ids
@property
def email(self):
"""Gets the email of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:return: The email of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this EmailCommseqWebhookSendTestRequest.
:param email: The email of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:type: str
"""
self._email = email
@property
def esp_commseq_step_uuid(self):
"""Gets the esp_commseq_step_uuid of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:return: The esp_commseq_step_uuid of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:rtype: str
"""
return self._esp_commseq_step_uuid
@esp_commseq_step_uuid.setter
def esp_commseq_step_uuid(self, esp_commseq_step_uuid):
"""Sets the esp_commseq_step_uuid of this EmailCommseqWebhookSendTestRequest.
:param esp_commseq_step_uuid: The esp_commseq_step_uuid of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:type: str
"""
self._esp_commseq_step_uuid = esp_commseq_step_uuid
@property
def esp_commseq_uuid(self):
"""Gets the esp_commseq_uuid of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:return: The esp_commseq_uuid of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:rtype: str
"""
return self._esp_commseq_uuid
@esp_commseq_uuid.setter
def esp_commseq_uuid(self, esp_commseq_uuid):
"""Sets the esp_commseq_uuid of this EmailCommseqWebhookSendTestRequest.
:param esp_commseq_uuid: The esp_commseq_uuid of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:type: str
"""
self._esp_commseq_uuid = esp_commseq_uuid
@property
def name(self):
"""Gets the name of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:return: The name of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EmailCommseqWebhookSendTestRequest.
:param name: The name of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:type: str
"""
self._name = name
@property
def order_id(self):
"""Gets the order_id of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:return: The order_id of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:rtype: str
"""
return self._order_id
@order_id.setter
def order_id(self, order_id):
"""Sets the order_id of this EmailCommseqWebhookSendTestRequest.
:param order_id: The order_id of this EmailCommseqWebhookSendTestRequest. # noqa: E501
:type: str
"""
self._order_id = order_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EmailCommseqWebhookSendTestRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EmailCommseqWebhookSendTestRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
14563,
43476,
8324,
7824,
569,
17,
628,
220,
220,
220,
14563,
43476,
30617,
7824,
10628,
362,
220,
1303,
645,
20402,
25,
412,
33548,
628,
220,
220,
220,
4946,
1761... | 2.195929 | 3,685 |
import RLAgents
import numpy
import matplotlib.pyplot as plt
raw_score_col = 3
norm_score_col = 4
result_path = "./results/"
files = []
files.append("./models/ppo_baseline/run_0/result/result.log")
files.append("./models/ppo_baseline/run_1/result/result.log")
files.append("./models/ppo_baseline/run_2/result/result.log")
files.append("./models/ppo_baseline/run_3/result/result.log")
files.append("./models/ppo_baseline/run_4/result/result.log")
files.append("./models/ppo_baseline/run_5/result/result.log")
files.append("./models/ppo_baseline/run_6/result/result.log")
files.append("./models/ppo_baseline/run_7/result/result.log")
stats_baseline = RLAgents.RLStatsCompute(files)
files = []
files.append("./models/ppo_curiosity/run_0/result/result.log")
files.append("./models/ppo_curiosity/run_1/result/result.log")
files.append("./models/ppo_curiosity/run_2/result/result.log")
files.append("./models/ppo_curiosity/run_3/result/result.log")
files.append("./models/ppo_curiosity/run_4/result/result.log")
files.append("./models/ppo_curiosity/run_5/result/result.log")
files.append("./models/ppo_curiosity/run_6/result/result.log")
files.append("./models/ppo_curiosity/run_7/result/result.log")
stats_curiosity = RLAgents.RLStatsCompute(files)
files = []
files.append("./models/ppo_entropy/run_0/result/result.log")
files.append("./models/ppo_entropy/run_1/result/result.log")
files.append("./models/ppo_entropy/run_2/result/result.log")
files.append("./models/ppo_entropy/run_3/result/result.log")
files.append("./models/ppo_entropy/run_4/result/result.log")
files.append("./models/ppo_entropy/run_5/result/result.log")
files.append("./models/ppo_entropy/run_6/result/result.log")
files.append("./models/ppo_entropy/run_7/result/result.log")
stats_entropy = RLAgents.RLStatsCompute(files)
plt.cla()
plt.ylabel("score")
plt.xlabel("iteration")
plt.grid(color='black', linestyle='-', linewidth=0.1)
plt.plot(stats_baseline.mean[0], stats_baseline.mean[raw_score_col], label="baseline", color='deepskyblue')
plt.fill_between(stats_baseline.mean[0], stats_baseline.lower[4], stats_baseline.upper[4], color='deepskyblue', alpha=0.2)
plt.plot(stats_curiosity.mean[0], stats_curiosity.mean[raw_score_col], label="RND", color='limegreen')
plt.fill_between(stats_curiosity.mean[0], stats_curiosity.lower[4], stats_curiosity.upper[4], color='limegreen', alpha=0.2)
plt.plot(stats_entropy.mean[0], stats_entropy.mean[raw_score_col], label="RND + entropy", color='red')
plt.fill_between(stats_entropy.mean[0], stats_entropy.lower[4], stats_entropy.upper[4], color='red', alpha=0.2)
plt.legend(loc='upper left', borderaxespad=0.)
plt.savefig(result_path + "score_per_iteration.png", dpi = 300)
#curiosity agent
plt.cla()
plt.ylabel("internal motivation")
plt.xlabel("iteration")
plt.grid(color='black', linestyle='-', linewidth=0.1)
plt.plot(stats_curiosity.mean[0], stats_curiosity.mean[7], label="curiosity", color='deepskyblue')
plt.fill_between(stats_curiosity.mean[0], stats_curiosity.lower[7], stats_curiosity.upper[7], color='deepskyblue', alpha=0.2)
plt.legend(loc='upper right', borderaxespad=0.)
plt.savefig(result_path + "rnd_internal_motivation.png", dpi = 300)
plt.cla()
plt.ylabel("advantages")
plt.xlabel("iteration")
plt.grid(color='black', linestyle='-', linewidth=0.1)
plt.plot(stats_curiosity.mean[0], stats_curiosity.mean[8], label="external advantages", color='deepskyblue')
plt.fill_between(stats_curiosity.mean[0], stats_curiosity.lower[8], stats_curiosity.upper[8], color='deepskyblue', alpha=0.2)
plt.plot(stats_curiosity.mean[0], stats_curiosity.mean[9], label="curiosity advantages", color='limegreen')
plt.fill_between(stats_curiosity.mean[0], stats_curiosity.lower[9], stats_curiosity.upper[9], color='limegreen', alpha=0.2)
plt.legend(loc='lower right', borderaxespad=0.)
plt.savefig(result_path + "rnd_advantages.png", dpi = 300)
#curiosity + entropy agent
plt.cla()
plt.ylabel("internal motivation")
plt.xlabel("iteration")
plt.grid(color='black', linestyle='-', linewidth=0.1)
plt.plot(stats_entropy.mean[0], stats_entropy.mean[8], label="curiosity", color='deepskyblue', alpha=0.5)
plt.fill_between(stats_entropy.mean[0], stats_entropy.lower[8], stats_entropy.upper[8], color='deepskyblue', alpha=0.2)
plt.plot(stats_entropy.mean[0], stats_entropy.mean[9], label="entropy", color='red', alpha=0.5)
plt.fill_between(stats_entropy.mean[0], stats_entropy.lower[9], stats_entropy.upper[9], color='red', alpha=0.2)
plt.legend(loc='upper right', borderaxespad=0.)
plt.savefig(result_path + "entropy_internal_motivation.png", dpi = 300)
plt.cla()
plt.ylabel("advantages")
plt.xlabel("iteration")
plt.grid(color='black', linestyle='-', linewidth=0.1)
plt.plot(stats_entropy.mean[0], stats_entropy.mean[10], label="external advantages", color='deepskyblue', alpha=0.5)
plt.fill_between(stats_entropy.mean[0], stats_entropy.lower[10], stats_entropy.upper[10], color='deepskyblue', alpha=0.2)
plt.plot(stats_entropy.mean[0], stats_entropy.mean[11], label="curiosity advantages", color='limegreen', alpha=0.5)
plt.fill_between(stats_entropy.mean[0], stats_entropy.lower[11], stats_entropy.upper[11], color='limegreen', alpha=0.2)
plt.plot(stats_entropy.mean[0], stats_entropy.mean[12], label="entropy advantages", color='red', alpha=0.5)
plt.fill_between(stats_entropy.mean[0], stats_entropy.lower[12], stats_entropy.upper[12], color='red', alpha=0.2)
plt.legend(loc='upper right', borderaxespad=0.)
plt.savefig(result_path + "entropy_advantages.png", dpi = 300)
| [
11748,
45715,
10262,
658,
198,
11748,
299,
32152,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
1831,
62,
26675,
62,
4033,
796,
513,
198,
27237,
62,
26675,
62,
4033,
796,
604,
198,
198,
20274,
62,
6978,
... | 2.580645 | 2,139 |
import types
from .node import Node # noqa: F401
from ..base import TributaryException
class LazyGraph(object):
"""Wrapper class around a collection of lazy nodes."""
def node(self, name, readonly=False, nullable=True, value=None): # noqa: F811
"""method to create a lazy node attached to a graph.
Args:
name (str): name to represent the node
readonly (bool): whether the node should be settable
nullable (bool): whether node can have value None
value (any): initial value for node
Returns:
BaseNode: the newly constructed lazy node
"""
if not hasattr(self, "_LazyGraph__nodes"):
self.__nodes = {}
if name not in self.__nodes:
if not isinstance(value, Node):
value = Node(
name=name,
derived=False,
readonly=readonly,
nullable=nullable,
value=value,
)
self.__nodes[name] = value
setattr(self, name, self.__nodes[name])
return self.__nodes[name]
| [
11748,
3858,
198,
6738,
764,
17440,
1330,
19081,
220,
1303,
645,
20402,
25,
376,
21844,
198,
6738,
11485,
8692,
1330,
309,
2455,
560,
16922,
628,
198,
4871,
406,
12582,
37065,
7,
15252,
2599,
198,
220,
220,
220,
37227,
36918,
2848,
1398... | 2.101818 | 550 |
#!/usr/bin/env python3
import player
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
2137,
628
] | 2.923077 | 13 |
# -*- coding: utf-8 -*-
import hashlib
import json
from ..base.simple_downloader import SimpleDownloader
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
12234,
8019,
198,
11748,
33918,
198,
198,
6738,
11485,
8692,
13,
36439,
62,
15002,
263,
1330,
17427,
10002,
263,
628
] | 3 | 36 |
from typing import List
from ee.clickhouse.queries.funnels.base import ClickhouseFunnelBase
| [
6738,
19720,
1330,
7343,
198,
198,
6738,
304,
68,
13,
12976,
4803,
13,
421,
10640,
13,
12543,
19423,
13,
8692,
1330,
6914,
4803,
24629,
4954,
14881,
628
] | 3.481481 | 27 |
import time
from src.dao.music_dao import MusicDao
from src.dao.music_list_dao import MusicListDao
from src.entity.music import Music
from src.entity.music_list import MusicList
from src.service.play_list import PlayList
if __name__ == "__main__":
pass
| [
11748,
640,
198,
198,
6738,
12351,
13,
67,
5488,
13,
28965,
62,
67,
5488,
1330,
7849,
35,
5488,
198,
6738,
12351,
13,
67,
5488,
13,
28965,
62,
4868,
62,
67,
5488,
1330,
7849,
8053,
35,
5488,
198,
6738,
12351,
13,
26858,
13,
28965,
... | 2.965909 | 88 |
from typing import Generic, TypeVar, List
from data_structures.doubly_linked_list import DoublyLinkedList
T = TypeVar('T')
class QueueList(Generic[T]):
"""
Basic queue backed with a list. Not optimal since list has O(N) complexity for
inserting and removing elements at the beginning
"""
class QueueLinkedList(Generic[T]):
"""
Queue backed by a doubly linked list
"""
# O(1)
# O(1)
| [
6738,
19720,
1330,
42044,
11,
5994,
19852,
11,
7343,
198,
6738,
1366,
62,
7249,
942,
13,
67,
12944,
306,
62,
25614,
62,
4868,
1330,
5728,
36874,
11280,
276,
8053,
198,
198,
51,
796,
5994,
19852,
10786,
51,
11537,
628,
198,
4871,
4670,... | 2.85906 | 149 |
from models.networks.nestedT import Nest
from models.networks.swin_unet import SwinTransformerSys
from models.networks.swin_transformer import SwinTransformer
from models.networks.axialnet import axialunet, logo,MedT,gated
from models.networks.vit_seg_modeling_gate import VisionTransformer_AG
from .unet_2D import *
from .unet_3D import *
from .unet_nonlocal_2D import *
from .unet_nonlocal_3D import *
from .unet_grid_attention_3D import *
from .unet_CT_dsv_3D import *
from .unet_CT_single_att_dsv_3D import *
from .unet_CT_multi_att_dsv_3D import *
from .unet_CT_multi_att_dsv_2D import *
from .sononet import *
from .sononet_grid_attention import *
from .vit_seg_modeling import *
import pywick.models.segmentation as pws
from .vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg
| [
6738,
4981,
13,
3262,
5225,
13,
77,
7287,
51,
1330,
21420,
198,
6738,
4981,
13,
3262,
5225,
13,
2032,
259,
62,
403,
316,
1330,
2451,
259,
8291,
16354,
44387,
198,
6738,
4981,
13,
3262,
5225,
13,
2032,
259,
62,
7645,
16354,
1330,
245... | 2.670068 | 294 |
"""
-*- test-case-name: PyHouse.src.Modules.web.test.test_web_rooms -*-
@name: PyHouse/src/Modules/web/web_rooms.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2013-2016 by D. Brian Kimmel
@license: MIT License
@note: Created on Jun 3, 2013
@summary: Web interface to rooms for the selected house.
"""
# Import system type stuff
import os
from nevow import loaders
from nevow import athena
# Import PyMh files and modules.
from Modules.Web.web_utils import JsonUnicode, GetJSONHouseInfo
from Modules.Housing.rooms import Maint as roomMaint
from Modules.Computer import logging_pyh as Logger
# Handy helper for finding external resources nearby.
webpath = os.path.join(os.path.split(__file__)[0])
templatepath = os.path.join(webpath, 'template')
g_debug = 0
LOG = Logger.getLogger('PyHouse.webRooms ')
# ## END DBK
| [
37811,
198,
12,
9,
12,
1332,
12,
7442,
12,
3672,
25,
9485,
18102,
13,
10677,
13,
5841,
5028,
13,
12384,
13,
9288,
13,
9288,
62,
12384,
62,
9649,
532,
9,
12,
198,
198,
31,
3672,
25,
220,
220,
220,
220,
220,
9485,
18102,
14,
10677... | 2.761006 | 318 |
import argparse
import random
import re
import socket
import sys
from urllib2 import HTTPError
# Libraries
from lib.core.dork_check import DorkScanner
from lib.core.errors import GoogleBlockException
from lib.core.hash_cracking import HashCracker
from lib.core.hash_cracking.hash_checker import HashChecker
from lib.core.port_scan import PortScanner
from lib.core.proxy_finder import attempt_to_connect_to_proxies
from lib.core.sql_scan.xss_scan import xss
from lib.core.sql_scan import SQLiScanner
# Settings
from lib.core.settings import BANNER
from lib.core.settings import GOOGLE_TEMP_BLOCK_ERROR_MESSAGE
from lib.core.settings import IP_ADDRESS_REGEX
from lib.core.settings import LEGAL_DISC
from lib.core.settings import LOGGER
from lib.core.settings import LONG_LEGAL_DISCLAIMER
from lib.core.settings import QUERY_REGEX
from lib.core.settings import URL_REGEX
from lib.core.settings import VERSION_STRING
from lib.core.settings import WORDLIST_LINKS
from lib.core.settings import create_wordlist
from lib.core.settings import RANDOM_USER_AGENT
if __name__ == '__main__':
opts = argparse.ArgumentParser()
opts.add_argument('-d', '--dork-check', metavar='DORK', dest="dorkcheck",
help="Provide a Google dork to check for possible injectable sites")
opts.add_argument('-c', '--hash-crack', metavar="HASH", dest="hash", nargs=1,
help="Specify a hash to crack and a hash type, IE: -c <HASH>:md5 (default all)")
opts.add_argument('-p', '--port-scan', metavar="HOST", dest="portscan",
help="Provide a host to scan for open ports")
opts.add_argument('-s', '--sqli-scanner', metavar="URL", dest="sqliscan",
help="Provide a URL to scan for SQL injection flaws")
opts.add_argument("-v", '--verify-hash', metavar="HASH", dest="hashcheck",
help="Verify a given hash type. (MD5, WHIRLPOOL, SHA256, etc..)")
opts.add_argument("-f", "--find-proxies", action="store_true", dest="proxysearch",
help="Attempt to find some proxies automatically")
opts.add_argument('-x', '--xss', metavar="URL", dest="xssScan",
help="Check if a URL is vulnerable to XSS")
opts.add_argument('-l', '--legal', action="store_true", dest="legal",
help="Display the legal information")
opts.add_argument('--version', action="store_true", dest="version",
help="Show the version number and exit")
opts.add_argument('--rand-wordlist', action="store_true", dest="random_wordlist",
help="Create a random wordlist to use for dictionary attacks"),
opts.add_argument("--proxy", metavar="PROXY", dest="configProxy",
help="Configure the program to use a proxy when connecting")
opts.add_argument('--rand-agent', action="store_true", dest="randomUserAgent",
help="Use a random user agent from a file list")
args = opts.parse_args()
print(BANNER + "\033[91m{}\033[0m".format(LEGAL_DISC) + "\n") if args.legal is False else \
BANNER + "\033[91m{}\033[0m".format(LONG_LEGAL_DISCLAIMER + "\n")
try:
if args.version is True: # Show the version number and exit
LOGGER.info(VERSION_STRING)
sys.exit(0)
if args.random_wordlist is True: # Create a random wordlist
LOGGER.info("Creating a random wordlist..")
create_wordlist(random.choice(WORDLIST_LINKS))
LOGGER.info("Wordlist created, resuming process..")
if args.proxysearch is True: # Find some proxies
LOGGER.info("Starting proxy search..")
attempt_to_connect_to_proxies()
if args.hashcheck is not None: # Check what hash type you have
LOGGER.info("Analyzing hash: '{}'".format(args.hashcheck))
HashChecker(args.hashcheck).obtain_hash_type()
if args.sqliscan is not None: # SQLi scanning
try:
if QUERY_REGEX.match(args.sqliscan):
LOGGER.info("Starting SQLi scan on '{}'..".format(args.sqliscan))
LOGGER.info(SQLiScanner(args.sqliscan).sqli_search())
else:
LOGGER.error("URL does not contain a query (GET) parameter. Example: http://example.com/php?id=2")
except HTTPError as e:
error_message = "URL: '{}' threw an exception: '{}' ".format(args.sqliscan, e)
error_message += "and Pybelt is unable to resolve the URL, "
error_message += "this could mean that the URL is not allowing connections "
error_message += "or that the URL is bad. Attempt to connect "
error_message += "to the URL manually, if a connection occurs "
error_message += "make an issue."
LOGGER.fatal(error_message)
if args.dorkcheck is not None: # Dork checker, check if your dork isn't shit
LOGGER.info("Starting dork scan, using query: '{}'..".format(args.dorkcheck))
try:
LOGGER.info(DorkScanner(args.dorkcheck).check_urls_for_queries())
except HTTPError:
LOGGER.fatal(GoogleBlockException(GOOGLE_TEMP_BLOCK_ERROR_MESSAGE))
if args.hash is not None: # Try and crack a hash
try:
items = list(''.join(args.hash).split(":"))
if items[1] == "all":
LOGGER.info("Starting hash cracking without knowledge of algorithm...")
HashCracker(items[0]).try_all_algorithms()
else:
LOGGER.info("Starting hash cracking using %s as algorithm type.." % items[1])
HashCracker(items[0], type=items[1]).try_certain_algorithm()
except IndexError:
error_message = "You must specify a hash type in order for this to work. "
error_message += "Example: 'python pybelt.py -c 098f6bcd4621d373cade4e832627b4f6:md5'"
LOGGER.fatal(error_message)
if args.portscan is not None: # Scan a given host for open ports
if re.search(IP_ADDRESS_REGEX, sys.argv[2]) is not None:
LOGGER.info("Starting port scan on IP: {}".format(args.portscan))
LOGGER.info(PortScanner(args.portscan).connect_to_host())
elif re.search(URL_REGEX, sys.argv[2]) is not None and re.search(QUERY_REGEX, sys.argv[2]) is None:
try:
LOGGER.info("Fetching resolve IP...")
ip_address = socket.gethostbyname(args.portscan)
LOGGER.info("Done! IP: {}".format(ip_address))
LOGGER.info("Starting scan on URL: {} IP: {}".format(args.portscan, ip_address))
PortScanner(ip_address).connect_to_host()
except socket.gaierror:
error_message = "Unable to resolve IP address from {}.".format(args.portscan)
error_message += " You can manually get the IP address and try again,"
error_message += " dropping the query parameter in the URL (IE php?id=),"
error_message += " or dropping the http or https"
error_message += " and adding www in place of it. IE www.google.com"
error_message += " may fix this issue."
LOGGER.fatal(error_message)
else:
error_message = "You need to provide a host to scan,"
error_message += " this can be given in the form of a URL "
error_message += "or a IP address."
LOGGER.fatal(error_message)
if args.xssScan is not None: # Scan a URL for XSS vulnerabilities
if QUERY_REGEX.match(args.xssScan):
proxy = args.configProxy if args.configProxy is not None else None
header = RANDOM_USER_AGENT if args.randomUserAgent is not False else None
if args.configProxy is not None:
LOGGER.info("Proxy configured, running through: {}".format(args.configProxy))
if args.randomUserAgent is True:
LOGGER.info("Grabbed random user agent: {}".format(header))
LOGGER.info("Searching: {} for XSS vulnerabilities..".format(args.xssScan, proxy=proxy, headers=header))
if not xss.main(args.xssScan, proxy=proxy, headers=header):
LOGGER.error("{} does not appear to be vulnerable to XSS".format(args.xssScan))
else:
LOGGER.info("{} seems to be vulnerable to XSS.".format(args.xssScan))
else:
error_message = "The URL you provided does not contain a query "
error_message += "(GET) parameter. In order for this scan you run "
error_message += "successfully you will need to provide a URL with "
error_message += "A query (GET) parameter example: http://127.0.0.1/php?id=2"
LOGGER.fatal(error_message)
except KeyboardInterrupt: # Why you abort me?! :c
LOGGER.error("User aborted.")
| [
11748,
1822,
29572,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
17802,
198,
11748,
25064,
198,
6738,
2956,
297,
571,
17,
1330,
14626,
12331,
198,
198,
2,
46267,
198,
6738,
9195,
13,
7295,
13,
67,
967,
62,
9122,
1330,
360,
967,
3335... | 2.264179 | 4,073 |
import tensorflow as tf
from nalp.datasets import ImageDataset
from nalp.models import GAN
# Loading the MNIST dataset
(x, y), (_, _) = tf.keras.datasets.mnist.load_data()
# Creating an Image Dataset
dataset = ImageDataset(x, batch_size=256, shape=(x.shape[0], 784), normalize=True)
# Creating the GAN
gan = GAN(input_shape=(784,), noise_dim=100, n_samplings=3, alpha=0.01)
# Compiling the GAN
gan.compile(
d_optimizer=tf.optimizers.Adam(learning_rate=0.0001),
g_optimizer=tf.optimizers.Adam(learning_rate=0.0001),
)
# Fitting the GAN
gan.fit(dataset.batches, epochs=150)
# Saving GAN weights
gan.save_weights("trained/gan", save_format="tf")
| [
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
299,
282,
79,
13,
19608,
292,
1039,
1330,
7412,
27354,
292,
316,
198,
6738,
299,
282,
79,
13,
27530,
1330,
402,
1565,
198,
198,
2,
12320,
262,
29060,
8808,
27039,
198,
7,
87,
11,... | 2.51145 | 262 |
# Generated by Django 2.2.2 on 2019-06-24 17:22
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
17,
319,
13130,
12,
3312,
12,
1731,
1596,
25,
1828,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.communication_not_done_reason import (
CommunicationNotDoneReason as CommunicationNotDoneReason_,
)
__all__ = ["CommunicationNotDoneReason"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class CommunicationNotDoneReason(CommunicationNotDoneReason_):
"""
CommunicationNotDoneReason
Codes for the reason why a communication did not happen.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/communication-not-done-reason
"""
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
277,
71,
343,
13,
37540,
13,
27160,
316,
1330,
11052,
7248,
355,
4808,
11395,
7248,
198,
198,
6738,
267,
2840,
62,
69,
71,
343,
13,
26791,
1330,
11052,
7248,
628,
198,
6738,
267,
2840,
... | 3.090909 | 209 |
from argparse import Namespace
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .query import QueryBuilder
# this class is what is used to generate the output for the each query in the execution block
| [
6738,
1822,
29572,
1330,
28531,
10223,
198,
6738,
19720,
1330,
4377,
198,
6738,
19720,
1330,
360,
713,
198,
6738,
19720,
1330,
7343,
198,
6738,
19720,
1330,
32233,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
198,
198,
361,
41876,
62,
... | 4.144737 | 76 |
import regex
from botbase import *
args = sys.argv
query = p.data.api.ListGenerator("abuselog", aflprop = "details", aflfilter = args[1], site = site)
query.set_maximum_items(args[2])
query.set_query_increment(50)
extra = r".{,100}"
pattern = regex.compile (r"(?V1)"+extra+r"(?:"+input("Regex: ")+r")"+extra, flags = regex.IGNORECASE)
for hit in query:
details = hit["details"]
var = details.get(args[3])
if var:
match = pattern.search(var)
if match: print(details["page_title"], match.group())
| [
11748,
40364,
198,
6738,
10214,
8692,
1330,
1635,
198,
198,
22046,
796,
25064,
13,
853,
85,
198,
198,
22766,
796,
279,
13,
7890,
13,
15042,
13,
8053,
8645,
1352,
7203,
397,
84,
741,
519,
1600,
257,
2704,
22930,
796,
366,
36604,
1600,
... | 2.581633 | 196 |
import warnings
import numpy as np
import pylab as plt
from matplotlib.gridspec import GridSpec
from matplotlib.cbook import mplDeprecation
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
# always reload gridspec_helper.py
from importlib import reload
import gridspec_helper; reload(gridspec_helper)
### fig generator method using LiteFigAxe class
### ==================================================
# called by lite_layout()
def make_lite_figure(layout,name,fig_size=None):
"""
input: an instance of LiteFigAxe
returns: fig,axes handles
"""
if fig_size == None:
fig_size = tuple(layout.size)
fig = plt.figure(figsize=fig_size)
gridspecs = gridspec_converter(layout.layout)
ax1 = fig.add_subplot(gridspecs[0])
axes_dic = {}
# The axes are stored in gridspec in the order specified in the layout_array
# so we just need to fill a dic with keys named '1','2','3',etc.
# Here we do that while accounting for the shared axes specification
for i,gridspec in enumerate(gridspecs):
with warnings.catch_warnings():
warnings.filterwarnings('ignore',category=mplDeprecation)
axes_dic[str(1+i)] = fig.add_subplot(gridspec)
# add inward-pointing ticks on all 4 sides and set their visibility
for idx,key in enumerate(axes_dic.keys()):
ax = axes_dic[key]
ax.minorticks_on()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.tick_params(axis='both', direction='in',width=1.)
ax.tick_params(which='minor',axis='both',direction='in',width=1.)
# add any cbar axes
if max(layout.cbars) > 0:
Nax = len(gridspecs)
for n in range(Nax):
ax = axes_dic[str(1+n)]
# first encode the vertical or horizontal orientation
# into the colorbar key
ori_tag = get_cax_orientation(name,layout.caxori[n])
cax_key = 'cax' + str(n+1)
# now make the cbar axis
cax_divider = make_axes_locatable(ax)
location = get_cax_location(name,layout.caxloc[n])
if location is not None:
cax = cax_divider.append_axes(location, size="3%", pad="2%")
axes_dic[cax_key] = cax
if ori_tag is not None: # add colorbar using dummy data
cb_key = ori_tag + 'cb' + str(n+1)
A = np.array([[1., 2.],[3., 4.]])
pim = ax.imshow(A)
if ori_tag=='v':
ori = "vertical"
else:
ori = "horizontal"
cb = fig.colorbar(pim, cax=cax, orientation=ori, extend='both')
axes_dic[cb_key] = cb
if ori_tag=='v':
ori = "vertical"
else:
ori = "horizontal"
cax.xaxis.set_ticks_position("top")
else:
axes_dic[cax_key] = None
plt.tight_layout()
return fig,axes_dic
### fig generator method using FigAxe class
### ==================================================
# called by custom_layout()
def make_figure(layout,name,fig_size=None):
"""
input: an instance of LiteFigAxe
returns: fig,axes handles
"""
if fig_size == None:
fig_size = tuple(layout.size)
fig = plt.figure(figsize=fig_size)
gridspecs = gridspec_converter(layout.layout)
# head axis (panel 1 by default)
hax = fig.add_subplot(gridspecs[layout.head-1])
axes_dic = {}
# The axes are stored in gridspec in the order specified in the layout_array
# so we just need to fill a dic with keys named '1','2','3',etc.
# Here we do that while accounting for the shared axes specification
idx = 1
for gridspec,share_x,share_y in zip(gridspecs,layout.share_x,layout.share_y):
with warnings.catch_warnings():
warnings.filterwarnings('ignore',category=mplDeprecation)
if gridspec != gridspecs[layout.head-1]:
if share_x > 0 and share_y > 0:
axes_dic[str(idx)] = fig.add_subplot(gridspec,sharex=hax,sharey=hax)
elif share_x > 0:
axes_dic[str(idx)] = fig.add_subplot(gridspec,sharex=hax)
elif share_y > 0:
axes_dic[str(idx)] = fig.add_subplot(gridspec,sharey=hax)
else:
axes_dic[str(idx)] = fig.add_subplot(gridspec)
else:
axes_dic[str(idx)] = fig.add_subplot(gridspec)
idx += 1
# add inward-pointing ticks on all 4 sides and set their visibility
for idx,key in enumerate(axes_dic.keys()):
ax = axes_dic[key]
ax.minorticks_on()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.tick_params(axis='both', direction='in',width=1.)
ax.tick_params(which='minor',axis='both',direction='in',width=1.)
# hide axis tick labels
plt.setp(ax.get_xticklabels(), visible=not(layout.hide_x[idx]))
plt.setp(ax.get_yticklabels(), visible=not(layout.hide_y[idx]))
# add any cbar axes
if max(layout.cbars) > 0:
Nax = len(gridspecs)
for n in range(Nax):
ax = axes_dic[str(1+n)]
# first encode the vertical or horizontal orientation
# into the colorbar key
ori_tag = get_cax_orientation(name,layout.caxori[n])
cax_key = 'cax' + str(n+1)
# now make the cbar axis
cax_divider = make_axes_locatable(ax)
location = get_cax_location(name,layout.caxloc[n])
if location is not None:
cax = cax_divider.append_axes(location, size="3%", pad="2%")
axes_dic[cax_key] = cax
if ori_tag is not None: # add colorbar using dummy data
cb_key = ori_tag + 'cb' + str(n+1)
A = np.array([[1., 2.],[3., 4.]])
pim = ax.imshow(A)
if ori_tag=='v':
ori = "vertical"
else:
ori = "horizontal"
cb = fig.colorbar(pim, cax=cax, orientation=ori, extend='both')
axes_dic[cb_key] = cb
if ori_tag=='v':
ori = "vertical"
else:
ori = "horizontal"
cax.xaxis.set_ticks_position("top")
# remove cbar if not specified in layout
if layout.cbars[n] == 0:
axes_dic[cax_key] = None
axes_dic[cb_key] = None
cb.remove()
else:
axes_dic[cax_key] = None
# lastly, apply specifications defined in the layout
plt.subplots_adjust(left=layout.adjust[0], right=layout.adjust[1],
bottom=layout.adjust[2],top=layout.adjust[3],
wspace=layout.adjust[4],hspace=layout.adjust[5])
return fig,axes_dic
### general methods
### ==================================================
# use to choose among layouts in gridspec_helper
def use_layout(name='default', fig_size=None):
"""
input: a string specifying the layout
returns: fig,axes handles
"""
layouts_custom = gridspec_helper.custom_layouts()
layouts_lite = gridspec_helper.lite_layouts()
if name in layouts_custom.keys():
layout = gridspec_helper.custom(name)
print('{}'.format(layout.art))
fig,axes = make_figure(layout,name,fig_size)
return fig,axes
elif name in layouts_lite.keys():
layout = gridspec_helper.lite(name)
fig,axes = make_lite_figure(layout,name,fig_size)
return fig,axes
else:
print('Layout {} not found. Choose from these names:'.format(name))
help(show_art=False)
import sys
sys.exit()
# helper function to plot any layout in gridspec_helper
# helper function to show layout names in gridspec_helper
# core method for utilizing gridspec with a layout array
# method to assign location of colorbar
# method to assign orientation of colorbar
| [
11748,
14601,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
279,
2645,
397,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
2164,
2340,
43106,
1330,
24846,
22882,
198,
6738,
2603,
29487,
8019,
13,
66,
2070,
1330,
285,
489,
12156,
8344... | 2.081061 | 3,960 |
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt, QObject, QEvent
from .datatable_view import DataTableView
from .datatable_header import HeaderView
import pandas as pd
from typing import Dict, List, Union, Iterable, Any
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
33734,
11,
1195,
10267,
11,
1195,
9237,
198,
6738,
764,
19608,
21156,
62,
1177,
1330,
6060,
10962,
7680,
198,
67... | 3.052632 | 76 |
from random import randint
# 0: draw 1:lost 2:won
matrix = [[0,1,2],
[2,0,1],
[1,2,0]]
choices = ["rock", "paper", "scissors"]
choice_map = {"rock": 0, "paper":1, "scissors":2}
comp_choice = randint(0,2)
user_choice = input("Please choose rock, paper or scissors:\n").lower().strip()
user_choice = choice_map.get(user_choice)
outcome = matrix[user_choice][comp_choice]
outcomes = ["Tie", "You lost", "You won"]
print(f"Compter's choice: {choices[comp_choice]}")
print(outcomes[outcome])
| [
6738,
4738,
1330,
43720,
600,
198,
2,
657,
25,
3197,
352,
25,
33224,
362,
25,
26502,
198,
6759,
8609,
796,
16410,
15,
11,
16,
11,
17,
4357,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
685,
17,
11,
15,
11,
16,
4357,
198,
... | 2.495098 | 204 |
import mxnet as mx
import numpy as np
import Queue
import threading
import time
from rcnn.io.rpn import get_rpn_testbatch
| [
11748,
285,
87,
3262,
355,
285,
87,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4670,
518,
198,
11748,
4704,
278,
198,
11748,
640,
198,
198,
6738,
48321,
20471,
13,
952,
13,
81,
21999,
1330,
651,
62,
81,
21999,
62,
9288,
43501,
... | 2.840909 | 44 |
cat = Cat('Harry', 2)
cat.speak() | [
198,
9246,
796,
5181,
10786,
18308,
3256,
362,
8,
198,
9246,
13,
47350,
3419
] | 2.428571 | 14 |
import json
from app import db
class Note(db.Model):
"""A note about an information item"""
__tablename__ = 'notes'
id = db.Column(db.Integer, primary_key=True)
location = db.Column(db.String, nullable=None)
description = db.Column(db.String)
tags = db.Column(db.String)
def to_dict(self):
"""Returns with the dictionary representation of the note"""
fields = {
'id': self.id,
'location': self.location,
'description': self.description,
'tags': self.tags
}
return fields
def to_json(self):
"""Returns with the JSON representation of the note"""
return json.dumps(self.to_dict())
class Tag(db.Model):
"""Tag as a label or keyword"""
__tablename__ = 'tags'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=None)
def to_dict(self):
"""Returns with the dictionary representation of the tag"""
fields = {
'id': self.id,
'name': self.location
}
return fields
def to_json(self):
"""Returns with the JSON representation of the tag"""
return json.dumps(self.to_dict())
class Association(db.Model):
"""Association which binds the notes and tags"""
__tablename__ = 'associations'
note_id = db.Column(db.Integer, db.ForeignKey('notes.id'), primary_key=True)
tag_id = db.Column(db.Integer, db.ForeignKey('tags.id'), primary_key=True)
note = db.relationship('Note', backref='tag_association')
tag = db.relationship('Tag', backref='note_association')
| [
11748,
33918,
198,
198,
6738,
598,
1330,
20613,
628,
198,
4871,
5740,
7,
9945,
13,
17633,
2599,
198,
220,
220,
220,
37227,
32,
3465,
546,
281,
1321,
2378,
37811,
628,
220,
220,
220,
11593,
8658,
11925,
480,
834,
796,
705,
17815,
6,
... | 2.497703 | 653 |
import pandas as pd
import numpy as np
from joblib import Parallel, delayed
import vrp.io as io
import vrp.constructive as c
import vrp.sim as sim
from vrp.constructive import SequentialClarkeWright
from vrp.sim import ILSWithConstructive
def multiple_replications(scenario, n_reps=10, n_jobs=1):
'''
Multiple independent replications of DialysisSim for a
scenario
Parameters
----------
scenario : dataclass Scenario
parameters for the scenario.
n_reps : int, optional
number of independent replications. The default is 10.
n_jobs : int, optional
No.of cores for parallel reps (-1 for all cores). The default is 1.
Returns
-------
res : List
'''
costs = Parallel(n_jobs=n_jobs)(delayed(single_run)(scenario, i)
for i in range(n_reps))
return costs
def single_run(scenario, i=0):
'''
Single replication of the simulation model.
'''
#heuristic procedure that creates routes
cw = SequentialClarkeWright(WAREHOUSE,
c.adjacent_route_tail,
c.merge_tail)
if MODE == 'CW':
solver = cw
elif MODE == 'ILS':
solver = ILSWithConstructive(cw, WAREHOUSE, iterations=ILS_ITER)
elif MODE == 'BRUTE':
solver = sim.BruteForceRouteTuner(vrp_solver=cw,
warehouse=WAREHOUSE)
model = sim.TransportExperiment(scenario, solver)
costs = model.single_replication()
return costs
if __name__ == '__main__':
#location of warehouse/depot
WAREHOUSE = 'L51'
#No. of Iterated Local Search Iterations
ILS_ITER = 20
#no. of independent repliations
N_REPS = 1000
#-1 for parallel replications; 1 for single; 2 for 2 etc.
N_JOBS = -1
#MODE: 'CW', 'BRUTE', 'ILS'
MODE = 'ILS'
#random number seed for reproducible runs
SEED = 999
np.random.seed(seed=SEED)
#load travel time matrix
full_matrix = io.load_travel_time()
postcode_counts = io.load_patient_postcode_count()
postcode_distribution = sim.create_postcode_distribution(postcode_counts)
#sim parameters
scenario_20 = sim.Scenario(n_patients=20,
warehouse=WAREHOUSE,
vehicle_capacities=[2, 3, 4],
cost_matrix = full_matrix,
postcode_distribution=postcode_distribution,
p_positive=1.0,
p_transport=1.0)
scenario_30 = sim.Scenario(n_patients=30,
warehouse=WAREHOUSE,
vehicle_capacities=[2, 3, 4],
cost_matrix = full_matrix,
postcode_distribution=postcode_distribution,
p_positive=1.0,
p_transport=1.0)
scenario_40 = sim.Scenario(n_patients=40,
warehouse=WAREHOUSE,
vehicle_capacities=[2, 3, 4],
cost_matrix = full_matrix,
postcode_distribution=postcode_distribution,
p_positive=1,
p_transport=1.0)
scenario_60 = sim.Scenario(n_patients=60,
warehouse=WAREHOUSE,
vehicle_capacities=[2, 3, 4],
cost_matrix = full_matrix,
postcode_distribution=postcode_distribution,
p_positive=1.0,
p_transport=1.0)
scenarios = {}
scenarios['20_positive'] = scenario_20
scenarios['40_positive'] = scenario_40
scenarios['60_positive'] = scenario_60
scenario_results = {}
for scenario_name, scenario in scenarios.items():
print(f'Running scenario: {scenario_name}...', end=' ')
results = multiple_replications(scenario, n_reps=N_REPS, n_jobs=N_JOBS)
#results = single_run(scenario)
file_name = f'vrp/output/{scenario_name}_{MODE}.csv'
pd.DataFrame(results).to_csv(file_name)
print('done.')
print('All experiments completed.')
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1693,
8019,
1330,
42945,
11,
11038,
198,
198,
11748,
410,
81,
79,
13,
952,
355,
33245,
198,
11748,
410,
81,
79,
13,
41571,
425,
355,
269,
198,
1174... | 1.928634 | 2,284 |
INPUT_FIELD_NAME = "input_data"
| [
1268,
30076,
62,
44603,
62,
20608,
796,
366,
15414,
62,
7890,
1,
198
] | 2.461538 | 13 |
N, A = int(input()), int(input())
if N % 500 <= A:
print('Yes')
else:
print('No') | [
45,
11,
317,
796,
493,
7,
15414,
3419,
828,
493,
7,
15414,
28955,
198,
198,
361,
399,
4064,
5323,
19841,
317,
25,
198,
220,
220,
220,
3601,
10786,
5297,
11537,
198,
17772,
25,
198,
220,
220,
220,
3601,
10786,
2949,
11537
] | 2.195122 | 41 |
from zope.interface import providedBy
from pyramid.interfaces import (
IAuthenticationPolicy,
IAuthorizationPolicy,
ISecuredView,
IView,
IViewClassifier,
)
from pyramid.compat import map_
from pyramid.threadlocal import get_current_registry
Everyone = 'system.Everyone'
Authenticated = 'system.Authenticated'
Allow = 'Allow'
Deny = 'Deny'
class AllPermissionsList(object):
""" Stand in 'permission list' to represent all permissions """
ALL_PERMISSIONS = AllPermissionsList()
DENY_ALL = (Deny, Everyone, ALL_PERMISSIONS)
NO_PERMISSION_REQUIRED = '__no_permission_required__'
def has_permission(permission, context, request):
""" Provided a permission (a string or unicode object), a context
(a :term:`resource` instance) and a request object, return an
instance of :data:`pyramid.security.Allowed` if the permission
is granted in this context to the user implied by the
request. Return an instance of :mod:`pyramid.security.Denied`
if this permission is not granted in this context to this user.
This function delegates to the current authentication and
authorization policies. Return
:data:`pyramid.security.Allowed` unconditionally if no
authentication policy has been configured in this application."""
try:
reg = request.registry
except AttributeError:
reg = get_current_registry() # b/c
authn_policy = reg.queryUtility(IAuthenticationPolicy)
if authn_policy is None:
return Allowed('No authentication policy in use.')
authz_policy = reg.queryUtility(IAuthorizationPolicy)
if authz_policy is None:
raise ValueError('Authentication policy registered without '
'authorization policy') # should never happen
principals = authn_policy.effective_principals(request)
return authz_policy.permits(context, principals, permission)
def authenticated_userid(request):
""" Return the userid of the currently authenticated user or
``None`` if there is no :term:`authentication policy` in effect or
there is no currently authenticated user."""
try:
reg = request.registry
except AttributeError:
reg = get_current_registry() # b/c
policy = reg.queryUtility(IAuthenticationPolicy)
if policy is None:
return None
return policy.authenticated_userid(request)
def unauthenticated_userid(request):
""" Return an object which represents the *claimed* (not verified) user
id of the credentials present in the request. ``None`` if there is no
:term:`authentication policy` in effect or there is no user data
associated with the current request. This differs from
:func:`~pyramid.security.authenticated_userid`, because the effective
authentication policy will not ensure that a record associated with the
userid exists in persistent storage."""
try:
reg = request.registry
except AttributeError:
reg = get_current_registry() # b/c
policy = reg.queryUtility(IAuthenticationPolicy)
if policy is None:
return None
return policy.unauthenticated_userid(request)
def effective_principals(request):
""" Return the list of 'effective' :term:`principal` identifiers
for the ``request``. This will include the userid of the
currently authenticated user if a user is currently
authenticated. If no :term:`authentication policy` is in effect,
this will return an empty sequence."""
try:
reg = request.registry
except AttributeError:
reg = get_current_registry() # b/c
policy = reg.queryUtility(IAuthenticationPolicy)
if policy is None:
return [Everyone]
return policy.effective_principals(request)
def principals_allowed_by_permission(context, permission):
""" Provided a ``context`` (a resource object), and a ``permission``
(a string or unicode object), if a :term:`authorization policy` is
in effect, return a sequence of :term:`principal` ids that possess
the permission in the ``context``. If no authorization policy is
in effect, this will return a sequence with the single value
:mod:`pyramid.security.Everyone` (the special principal
identifier representing all principals).
.. note::
even if an :term:`authorization policy` is in effect,
some (exotic) authorization policies may not implement the
required machinery for this function; those will cause a
:exc:`NotImplementedError` exception to be raised when this
function is invoked.
"""
reg = get_current_registry()
policy = reg.queryUtility(IAuthorizationPolicy)
if policy is None:
return [Everyone]
return policy.principals_allowed_by_permission(context, permission)
def view_execution_permitted(context, request, name=''):
""" If the view specified by ``context`` and ``name`` is protected
by a :term:`permission`, check the permission associated with the
view using the effective authentication/authorization policies and
the ``request``. Return a boolean result. If no
:term:`authorization policy` is in effect, or if the view is not
protected by a permission, return ``True``. If no view can view found,
an exception will be raised.
.. versionchanged:: 1.4a4
An exception is raised if no view is found.
"""
try:
reg = request.registry
except AttributeError:
reg = get_current_registry() # b/c
provides = [IViewClassifier] + map_(providedBy, (request, context))
view = reg.adapters.lookup(provides, ISecuredView, name=name)
if view is None:
view = reg.adapters.lookup(provides, IView, name=name)
if view is None:
raise TypeError('No registered view satisfies the constraints. '
'It would not make sense to claim that this view '
'"is" or "is not" permitted.')
return Allowed(
'Allowed: view name %r in context %r (no permission defined)' %
(name, context))
return view.__permitted__(context, request)
def remember(request, principal, **kw):
""" Return a sequence of header tuples (e.g. ``[('Set-Cookie',
'foo=abc')]``) suitable for 'remembering' a set of credentials
implied by the data passed as ``principal`` and ``*kw`` using the
current :term:`authentication policy`. Common usage might look
like so within the body of a view function (``response`` is
assumed to be a :term:`WebOb` -style :term:`response` object
computed previously by the view code)::
from pyramid.security import remember
headers = remember(request, 'chrism', password='123', max_age='86400')
response.headerlist.extend(headers)
return response
If no :term:`authentication policy` is in use, this function will
always return an empty sequence. If used, the composition and
meaning of ``**kw`` must be agreed upon by the calling code and
the effective authentication policy."""
try:
reg = request.registry
except AttributeError:
reg = get_current_registry() # b/c
policy = reg.queryUtility(IAuthenticationPolicy)
if policy is None:
return []
else:
return policy.remember(request, principal, **kw)
def forget(request):
""" Return a sequence of header tuples (e.g. ``[('Set-Cookie',
'foo=abc')]``) suitable for 'forgetting' the set of credentials
possessed by the currently authenticated user. A common usage
might look like so within the body of a view function
(``response`` is assumed to be an :term:`WebOb` -style
:term:`response` object computed previously by the view code)::
from pyramid.security import forget
headers = forget(request)
response.headerlist.extend(headers)
return response
If no :term:`authentication policy` is in use, this function will
always return an empty sequence."""
try:
reg = request.registry
except AttributeError:
reg = get_current_registry() # b/c
policy = reg.queryUtility(IAuthenticationPolicy)
if policy is None:
return []
else:
return policy.forget(request)
class Denied(PermitsResult):
""" An instance of ``Denied`` is returned when a security-related
API or other :app:`Pyramid` code denies an action unrelated to
an ACL check. It evaluates equal to all boolean false types. It
has an attribute named ``msg`` describing the circumstances for
the deny."""
boolval = 0
class Allowed(PermitsResult):
""" An instance of ``Allowed`` is returned when a security-related
API or other :app:`Pyramid` code allows an action unrelated to
an ACL check. It evaluates equal to all boolean true types. It
has an attribute named ``msg`` describing the circumstances for
the allow."""
boolval = 1
class ACLDenied(ACLPermitsResult):
""" An instance of ``ACLDenied`` represents that a security check made
explicitly against ACL was denied. It evaluates equal to all boolean
false types. It also has the following attributes: ``acl``, ``ace``,
``permission``, ``principals``, and ``context``. These attributes
indicate the security values involved in the request. Its __str__ method
prints a summary of these attributes for debugging purposes. The same
summary is available as the ``msg`` attribute."""
boolval = 0
class ACLAllowed(ACLPermitsResult):
""" An instance of ``ACLAllowed`` represents that a security check made
explicitly against ACL was allowed. It evaluates equal to all boolean
true types. It also has the following attributes: ``acl``, ``ace``,
``permission``, ``principals``, and ``context``. These attributes
indicate the security values involved in the request. Its __str__ method
prints a summary of these attributes for debugging purposes. The same
summary is available as the ``msg`` attribute."""
boolval = 1
| [
6738,
1976,
3008,
13,
39994,
1330,
2810,
3886,
198,
198,
6738,
27944,
13,
3849,
32186,
1330,
357,
198,
220,
220,
220,
314,
47649,
3299,
36727,
11,
198,
220,
220,
220,
314,
13838,
1634,
36727,
11,
198,
220,
220,
220,
3180,
721,
1522,
... | 3.063132 | 3,263 |
import os, time, sys, random, string, subprocess, pprint
from server import DEFAULT_CONFIG
from util import Logger, getTime, full_path, randString, updateConfig
from queue import FIFOQueue, TaskObject, ShortestTaskQueue
if __name__ == "__main__":
if len(sys.argv) < 2:
c = CompletionServiceClient()
elif len(sys.argv) < 3:
c = CompletionServiceClient(config_file=sys.argv[1])
else:
c = CompletionServiceClient(config_file=sys.argv[1], runmode=sys.argv[2])
| [
11748,
28686,
11,
640,
11,
25064,
11,
4738,
11,
4731,
11,
850,
14681,
11,
279,
4798,
198,
198,
6738,
4382,
1330,
5550,
38865,
62,
10943,
16254,
198,
6738,
7736,
1330,
5972,
1362,
11,
651,
7575,
11,
1336,
62,
6978,
11,
43720,
10100,
... | 2.8 | 170 |
import hashlib
import hmac
import json
import uuid
import os
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from Crypto.Protocol.KDF import scrypt
from eth_keys import keys
from eth_utils import (
big_endian_to_int,
decode_hex,
encode_hex,
int_to_big_endian,
is_dict,
is_string,
keccak,
remove_0x_prefix,
to_dict,
)
@to_dict
#
# Version 3 creators
#
DKLEN = 32
SCRYPT_R = 1
SCRYPT_P = 8
#
# Verson 3 decoder
#
#
# Key derivation
#
#
# Encryption and Decryption
#
#
# Utility
#
| [
11748,
12234,
8019,
198,
11748,
289,
20285,
198,
11748,
33918,
198,
11748,
334,
27112,
198,
11748,
28686,
198,
6738,
45898,
13,
71,
1031,
6759,
13,
19795,
20288,
13,
66,
541,
7084,
1330,
44334,
11,
16113,
11,
12881,
198,
6738,
45898,
13... | 2.456693 | 254 |
import os
import random
import cv2
import numpy as np
import PIL.Image
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from nets.nets_utility import *
import torchvision.transforms.functional as F
if __name__ == "__main__":
batch_size = 3
gpu_device = "cuda:0"
shuffle = True
# address
project_address = os.getcwd()
train_dir = os.path.join(os.path.join(os.path.join(project_address, "datasets"), "used_for_nets"), "litt_train2017")
val_dir = os.path.join(os.path.join(os.path.join(project_address, "datasets"), "used_for_nets"), "litt_val2017")
print('train_dir', train_dir)
# datasets
data_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4500517361627943], [0.26465333914691797])
])
image_datasets = {}
image_datasets['train'] = COCODataset(train_dir, transform=data_transforms)
image_datasets['val'] = COCODataset(val_dir, transform=data_transforms)
dataloaders = {}
dataloaders['train'] = DataLoader(
image_datasets['train'],
batch_size=batch_size,
shuffle=shuffle,
num_workers=1
)
dataloaders['val'] = DataLoader(
image_datasets['val'],
batch_size=batch_size,
shuffle=shuffle,
num_workers=1
)
datasets_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
print('datasets_size', datasets_sizes) | [
11748,
28686,
198,
11748,
4738,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
350,
4146,
13,
5159,
198,
6738,
28034,
13,
26791,
13,
7890,
13,
19608,
292,
316,
1330,
16092,
292,
316,
198,
6738,
28034,
13,
267... | 2.416532 | 617 |
"""A cache for thumbnailed images."""
from __future__ import unicode_literals
import collections
import sys
import os.path
try:
from io import BytesIO as StringIO
except Exception as e:
from cStringIO import StringIO
from PIL import Image
from django.core.files.base import File
from optimizations.assetcache import default_asset_cache, Asset, AdaptiveAsset
from optimizations.propertycache import cached_property
class Size(collections.namedtuple("SizeBase", ("width", "height",))):
"""Represents the size of an image."""
def __new__(cls, width, height):
"""Creats a new Size."""
if width is not None:
width = int(width)
if height is not None:
height = int(height)
return tuple.__new__(cls, (width, height))
@property
def aspect(self):
"""Returns the aspect ratio of this size."""
return float(self.width) / float(self.height)
def intersect(self, size):
"""
Returns a Size that represents the intersection of this and another
Size.
"""
return Size(min(self.width, size.width), min(self.height, size.height))
def constrain(self, reference):
"""
Returns a new Size that is this Size shrunk to fit inside.
"""
reference_aspect = reference.aspect
width = min(round(self.height * reference_aspect), self.width)
height = min(round(self.width / reference_aspect), self.height)
return Size(width, height)
def scale(self, x_scale, y_scale):
"""Returns a new Size with it's width and height scaled."""
return Size(float(self.width) * x_scale, float(self.height) * y_scale)
# Size adjustment callbacks. These are used to determine the display and data size of the thumbnail.
def _replace_null(value, fallback):
"""Replaces a null value with a fallback."""
if value is None:
return fallback
return value
def _size(reference, size):
"""Ignores the reference size, and just returns the desired size."""
return Size(
_replace_null(size.width, reference.width),
_replace_null(size.height, reference.height),
)
def _size_proportional(reference, size):
"""Adjusts the desired size to match the aspect ratio of the reference."""
if size.width is None and size.height is None:
return _size(reference, size)
return Size(
_replace_null(size.width, sys.maxsize),
_replace_null(size.height, sys.maxsize),
).constrain(reference)
# Resize callbacks. These are used to actually resize the image data.
def _resize(image, image_size, thumbnail_display_size, thumbnail_image_size):
"""
Resizes the image to exactly match the desired data size, ignoring aspect
ratio.
"""
return image.resize(thumbnail_image_size, Image.ANTIALIAS)
def _resize_cropped(image, image_size, thumbnail_display_size, thumbnail_image_size):
"""
Resizes the image to fit the desired size, preserving aspect ratio by
cropping, if required.
"""
# Resize with nice filter.
image_aspect = image_size.aspect
if image_aspect > thumbnail_image_size.aspect:
# Too wide.
pre_cropped_size = Size(thumbnail_image_size.height * image_aspect, thumbnail_image_size.height)
else:
# Too tall.
pre_cropped_size = Size(thumbnail_image_size.width, thumbnail_image_size.width / image_aspect)
# Crop.
image = image.resize(pre_cropped_size, Image.ANTIALIAS)
source_x = int((pre_cropped_size.width - thumbnail_image_size.width) / 2)
source_y = int((pre_cropped_size.height - thumbnail_image_size.height) / 2)
return image.crop((
source_x,
source_y,
source_x + thumbnail_image_size.width,
source_y + thumbnail_image_size.height,
))
# Methods of generating thumbnails.
PROPORTIONAL = "proportional"
RESIZE = "resize"
CROP = "crop"
ResizeMethod = collections.namedtuple("ResizeMethod", ("get_display_size", "get_data_size", "do_resize", "hash_key",))
_methods = {
PROPORTIONAL: ResizeMethod(_size_proportional, _size, _resize, "resize"),
RESIZE: ResizeMethod(_size, _size, _resize, "resize"),
CROP: ResizeMethod(_size, _size_proportional, _resize_cropped, "crop"),
}
class ThumbnailError(Exception):
"""Something went wrong with thumbnail generation."""
class ThumbnailAsset(Asset):
"""An asset representing a thumbnailed file."""
def __init__(self, asset, width, height, method):
"""Initializes the asset."""
self._asset = asset
self._width = width
self._height = height
self._method = method
def open(self):
"""Returns an open File for this asset."""
return self._asset.open()
def get_name(self):
"""Returns the name of this asset."""
return self._asset.get_name()
def get_url(self):
"""Returns the frontend URL of this asset."""
return self._asset.get_url()
def get_path(self):
"""Returns the filesystem path of this asset."""
return self._asset.get_path()
def get_id_params(self):
""""Returns the params which should be used to generate the id."""
params = super(ThumbnailAsset, self).get_id_params()
params["width"] = self._width is None and -1 or self._width
params["height"] = self._height is None and -1 or self._height
params["method"] = self._method.hash_key
return params
@cached_property
def _image_data_and_size(self):
"""Returns the image data used by this thumbnail asset."""
image_data = open_image(self._asset)
return image_data, Size(*image_data.size)
def get_save_meta(self):
"""Returns the meta parameters to associate with the asset in the asset cache."""
method = self._method
requested_size = Size(self._width, self._height)
_, original_size = self._image_data_and_size
# Calculate the final width and height of the thumbnail.
display_size = method.get_display_size(original_size, requested_size)
return {
"size": display_size
}
def save(self, storage, name, meta):
"""Saves this asset to the given storage."""
method = self._method
# Calculate sizes.
display_size = meta["size"]
image_data, original_size = self._image_data_and_size
data_size = method.get_data_size(display_size, display_size.intersect(original_size))
# Check whether we need to make a thumbnail.
if data_size == original_size:
super(ThumbnailAsset, self).save(storage, name, meta)
else:
# Use efficient image loading.
image_data.draft(None, data_size)
# Resize the image data.
try:
image_data = method.do_resize(image_data, original_size, display_size, data_size)
except Exception as ex: # HACK: PIL raises all sorts of Exceptions :(
raise ThumbnailError(str(ex))
# Parse the image format.
_, extension = os.path.splitext(name)
format = extension.lstrip(".").upper().replace("JPG", "JPEG") or "PNG"
# If we're saving to PNG, make sure we're not in CMYK.
if image_data.mode == "CMYK" and format == "PNG":
image_data = image_data.convert("RGB")
# If the storage has a path, then save it efficiently.
try:
thumbnail_path = storage.path(name)
except NotImplementedError:
# No path for the storage, so save it in a memory buffer.
buffer = StringIO()
try:
image_data.save(buffer, format)
except Exception as ex: # HACK: PIL raises all sorts of Exceptions :(
raise ThumbnailError(str(ex))
# Write the file.
buffer.seek(0, os.SEEK_END)
buffer_length = buffer.tell()
buffer.seek(0)
file = File(buffer)
file.size = buffer_length
storage.save(name, file)
else:
# We can do an efficient streaming save.
try:
os.makedirs(os.path.dirname(thumbnail_path))
except OSError:
pass
try:
image_data.save(thumbnail_path, format)
except Exception as ex: # HACK: PIL raises all sorts of Exceptions :(
try:
raise ThumbnailError(str(ex))
finally:
# Remove an incomplete file, if present.
try:
os.unlink(thumbnail_path)
except:
pass
def open_image(asset):
"""Opens the image represented by the given asset."""
try:
asset_path = asset.get_path()
except NotImplementedError:
return Image.open(StringIO(asset.get_contents()))
else:
return Image.open(asset_path)
class Thumbnail(object):
"""A generated thumbnail."""
def __init__(self, asset_cache, asset):
"""Initializes the thumbnail."""
self._asset_cache = asset_cache
self._asset = asset
self.name = asset.get_name()
@cached_property
@property
def width(self):
"""The width of the thumbnail."""
return self._asset_name_and_meta[1]["size"][0]
@property
def height(self):
"""The width of the thumbnail."""
return self._asset_name_and_meta[1]["size"][1]
@property
def url(self):
"""The URL of the thumbnail."""
return self._asset_cache._storage.url(self._asset_name_and_meta[0])
@property
def path(self):
"""The path of the thumbnail."""
return self._asset_cache._storage.path(self._asset_name_and_meta[0])
class ThumbnailCache(object):
"""A cache of thumbnailed images."""
def __init__(self, asset_cache=default_asset_cache):
"""Initializes the thumbnail cache."""
self._asset_cache = asset_cache
def get_thumbnail(self, asset, width=None, height=None, method=PROPORTIONAL):
"""
Returns a thumbnail of the given size.
Either or both of width and height may be None, in which case the
image's original size will be used.
"""
# Lookup the method.
try:
method = _methods[method]
except KeyError:
raise ValueError("{method} is not a valid thumbnail method. Should be one of {methods}.".format(
method = method,
methods = ", ".join(_methods.keys())
))
# Adapt the asset.
asset = AdaptiveAsset(asset)
# Create the thumbnail.
return Thumbnail(self._asset_cache, ThumbnailAsset(asset, width, height, method))
# The default thumbnail cache.
default_thumbnail_cache = ThumbnailCache()
| [
37811,
32,
12940,
329,
294,
10269,
6255,
4263,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
17268,
198,
11748,
25064,
198,
11748,
28686,
13,
6978,
198,
198,
28311,
25,
198,
220,
220,
220,... | 2.395887 | 4,620 |
import numpy as np
from models import Vector, Bounds
from data_info import TILE_TYPES, DUNGEON_DIMENSION
| [
11748,
299,
32152,
355,
45941,
201,
198,
6738,
4981,
1330,
20650,
11,
347,
3733,
201,
198,
201,
198,
6738,
1366,
62,
10951,
1330,
31598,
2538,
62,
9936,
47,
1546,
11,
360,
4944,
8264,
1340,
62,
35,
3955,
16938,
2849,
201,
198,
201,
... | 2.604651 | 43 |
#!/usr/bin/env python3
# ## ###############################################
#
# pwm.py
# Controls a 7-segments display using Raspberry Pi
# and a 74LS47 driver
#
# Autor: Mauricio Matamoros
# License: MIT
#
# ## ###############################################
# Future imports (Python 2.7 compatibility)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Importa la librería de control del GPIO de la Raspberry Pi
import RPi.GPIO as GPIO
# Importa la función sleep del módulo time
from time import sleep
# Desactivar advertencias (warnings)
# GPIO.setwarnings(False)
# Configurar la librería para usar el número de pin.
GPIO.setmode(GPIO.BOARD)
# Configurar pines 36, 38, 40 y 37 como salida y habilitar en bajo
GPIO.setup(36, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(38, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(40, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(37, GPIO.OUT, initial=GPIO.LOW)
# Mapea bits a los pines de la GPIO
flag = True
while flag:
try:
num = int(input("Ingrese número entero: "))
bcd7(num)
except:
flag = False
#end try
#end while
# Reinicia los puertos GPIO (cambian de salida a entrada)
GPIO.cleanup()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
22492,
1303,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
279,
26377,
13,
9078,
198,
2,
36357,
257,
767,
12,
325,
11726,
3359,
1262,
24244,
13993,
198,
2,
290,
257,
8915,
6561,
... | 2.811321 | 424 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
628
] | 2.891892 | 37 |
import tensorflow as tf
import cv2
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
#Utility for image loading and prepeocessing\
#Load the image
low_res_image = load_img('./cartoonized_image.jpg', None, './cartoonized_image.jpg')
#Load the model
interpreter = tf.lite.Interpreter(model_path=f'./esrgan_dr.tflite')
interpreter.allocate_tensors()
#Set model Input
input_details = interpreter.get_input_details()
interpreter.allocate_tensors()
#Invoke the interpreter to run inference
interpreter.set_tensor(input_details[0]['index'], low_res_image)
interpreter.invoke()
#Retrieve the enhanced image
# 检索增强的图像
enhanced_img = interpreter.tensor(
interpreter.get_output_details()[0]['index']
)()
a = tf.cast(tf.clip_by_value(enhanced_img[0], 0, 255), tf.uint8)
super_resolution_img = Image.fromarray(a.numpy(), 'RGB')
super_resolution_img = super_resolution_img.resize((512, 512))
down_sampled_image = Image.open('./cartoonized_image.jpg').resize((512, 512))
get_concat_h(down_sampled_image, super_resolution_img)
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
2,
18274,
879,
329,
2939,
110... | 2.664975 | 394 |
# We iterate hexagonal numbers and check whether they are pentagonal.
# By definition, every hexagonal number is a triangle number.
import math
n_h = 144
n_p = 0.5
while not n_p.is_integer():
h = n_h*(2*n_h - 1)
n_p = (1 + math.sqrt(1 + 24*h))/6
n_h += 1
print(h)
# Copyright Junipyr. All rights reserved.
# https://github.com/Junipyr | [
2,
775,
11629,
378,
17910,
27923,
3146,
290,
2198,
1771,
484,
389,
28145,
27923,
13,
201,
198,
2,
2750,
6770,
11,
790,
17910,
27923,
1271,
318,
257,
22950,
1271,
13,
201,
198,
201,
198,
11748,
10688,
201,
198,
201,
198,
201,
198,
77... | 2.354839 | 155 |
from django.conf import settings
from django.shortcuts import render
def handle_sphinx_doc_index(
request,
data=None):
"""handle_sphinx_doc_index
Generic handler for sending the browser to the
sphinx documentation index:
<repo>/webapp/drf_network_pipeline/docs/build/html/index.html
:param request: HTTPRequest
:param data: extra data
"""
return render(
request,
settings.DEFAULT_DOC_INDEX_HTML)
# end of handle_sphinx_doc_index
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
628,
198,
4299,
5412,
62,
82,
746,
28413,
62,
15390,
62,
9630,
7,
198,
220,
220,
220,
220,
220,
220,
220,
2581,
11,
198,
220,
220,
220... | 2.580311 | 193 |
"""
Test Object locations going down
"""
import logging
import time
from teuthology import misc as teuthology
from tasks import ceph_manager
from tasks.util.rados import rados
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Test handling of object location going down
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'lost_unfound task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
manager.wait_for_clean()
# something that is always there
dummyfile = '/etc/fstab'
# take 0, 1 out
manager.mark_out_osd(0)
manager.mark_out_osd(1)
manager.wait_for_clean()
# delay recovery, and make the pg log very long (to prevent backfill)
manager.raw_cluster_cmd(
'tell', 'osd.0',
'injectargs',
'--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
)
# delay recovery, and make the pg log very long (to prevent backfill)
manager.raw_cluster_cmd(
'tell', 'osd.1',
'injectargs',
'--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
)
# delay recovery, and make the pg log very long (to prevent backfill)
manager.raw_cluster_cmd(
'tell', 'osd.2',
'injectargs',
'--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
)
# delay recovery, and make the pg log very long (to prevent backfill)
manager.raw_cluster_cmd(
'tell', 'osd.3',
'injectargs',
'--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
)
# kludge to make sure they get a map
rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile])
# create old objects
for f in range(1, 10):
rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
manager.mark_out_osd(3)
manager.wait_till_active()
manager.mark_in_osd(0)
manager.wait_till_active()
manager.flush_pg_stats([2, 0])
manager.mark_out_osd(2)
manager.wait_till_active()
# bring up 1
manager.mark_in_osd(1)
manager.wait_till_active()
manager.flush_pg_stats([0, 1])
log.info("Getting unfound objects")
unfound = manager.get_num_unfound_objects()
assert not unfound
manager.kill_osd(2)
manager.mark_down_osd(2)
manager.kill_osd(3)
manager.mark_down_osd(3)
manager.flush_pg_stats([0, 1])
log.info("Getting unfound objects")
unfound = manager.get_num_unfound_objects()
assert unfound
| [
37811,
198,
14402,
9515,
7064,
1016,
866,
198,
37811,
198,
11748,
18931,
198,
11748,
640,
198,
6738,
573,
1071,
1435,
1330,
12747,
355,
573,
1071,
1435,
198,
6738,
8861,
1330,
269,
27446,
62,
37153,
198,
6738,
8861,
13,
22602,
13,
6335,... | 2.258567 | 1,284 |
"""Schema v7
Revision ID: b76eab0a059
Revises: a695de64338
Create Date: 2016-12-07
"""
# revision identifiers, used by Alembic.
revision = 'b76eab0a059'
down_revision = 'a695de64338'
import os
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), *['..'] * 5)))
from alembic import op
from king_phisher.server.database import manager as db_manager
import sqlalchemy
| [
37811,
27054,
2611,
410,
22,
198,
198,
18009,
1166,
4522,
25,
275,
4304,
68,
397,
15,
64,
46712,
198,
18009,
2696,
25,
257,
37381,
2934,
2414,
28460,
198,
16447,
7536,
25,
1584,
12,
1065,
12,
2998,
198,
198,
37811,
198,
198,
2,
1844... | 2.606452 | 155 |
import os
from glob import glob
from setuptools import setup
package_name = 'openpose_ros2'
setup(
name=package_name,
version='1.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name), glob('launch/*.py')),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='Interaction Lab.',
maintainer_email='is0436er@ed.ritsumei.ac.jp',
description='A ROS2 package that call the OpenPose from ROS2.',
license='MIT License',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'openpose_ros2 = openpose_ros2.openpose_node:main',
],
},
)
| [
11748,
28686,
198,
6738,
15095,
1330,
15095,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
26495,
62,
3672,
796,
705,
9654,
3455,
62,
4951,
17,
6,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
28,
26495,
62,
3672,
11,
198... | 2.355491 | 346 |
#
# "Cascading CLI" pattern "time" CLI
#
# This CLI provides a "time" command for the main CLI.
import time
from abstrys.cli.common import CliCommand
class TimeCommand(CliCommand):
"""
A CLI command that returns the current time.
"""
def set_up_parser(self, parser):
"""
Set up the time command arguments.
"""
parser.add_argument("--timezone", "-tz",
nargs=1,
help="return the current time in the given time zone")
def run(self, args):
"""
Prints the time.
"""
vargs = vars(args)
if vargs['timezone'] != None:
import os
os.environ['TZ'] = vargs['timezone'][0]
time.tzset()
print("The current time is: " + str(time.ctime()))
| [
2,
220,
198,
2,
366,
34,
3372,
4980,
43749,
1,
3912,
366,
2435,
1,
43749,
198,
2,
198,
2,
770,
43749,
3769,
257,
366,
2435,
1,
3141,
329,
262,
1388,
43749,
13,
198,
198,
11748,
640,
198,
6738,
16552,
19753,
13,
44506,
13,
11321,
... | 2.240793 | 353 |
from .base import BaseModel
| [
6738,
764,
8692,
1330,
7308,
17633,
628
] | 4.142857 | 7 |
import os
import ssl
import cv2 as cv
import numpy as np
from distutils.util import strtobool
import aiohttp
from aiohttp import web
import jinja2
import aiohttp_jinja2
from camera import VideoCamera
# settings
DEBUG = strtobool(os.environ.get('DEBUG', 'True'))
PORT = int(os.environ.get('PORT', 8088))
SSL = strtobool(os.environ.get('SSL', 'True'))
SSL_CRT_PATH = os.environ.get('SSL_CRT_PATH', 'certificate/sslcert.crt')
SSL_KEY_PATH = os.environ.get('SSL_KEY_PATH', 'certificate/sslcert.key')
REVERSE_PROXY_WS_URL = os.environ.get('REVERSE_PROXY_WS_URL', 'wss://0.0.0.0:8088/ws')
JINJA2_TEMPLATES_DIR = 'templates'
routes = web.RouteTableDef()
@routes.get('/')
@aiohttp_jinja2.template('index.html')
@routes.get('/ws')
app = web.Application(debug=DEBUG)
app.add_routes(routes)
aiohttp_jinja2.setup(
app,
loader=jinja2.FileSystemLoader(JINJA2_TEMPLATES_DIR)
)
if __name__ == '__main__':
if SSL:
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.check_hostname = False
ssl_context.load_cert_chain(SSL_CRT_PATH, SSL_KEY_PATH)
web.run_app(app, port=PORT, ssl_context=ssl_context)
else:
web.run_app(app, port=PORT)
| [
11748,
28686,
198,
11748,
264,
6649,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1233,
26791,
13,
22602,
1330,
965,
83,
672,
970,
198,
198,
11748,
257,
952,
4023,
198,
6738,
257,
952,
4023,
... | 2.264045 | 534 |
# Copyright (c) 2018-2019, Krzysztof Rusek
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# author: Krzysztof Rusek, AGH
import tensorflow as tf
from tensorflow import keras
import numpy as np
import argparse
hparams = tf.contrib.training.HParams(
node_count=14,
link_state_dim=4,
path_state_dim=2,
T=3,
readout_units=8,
learning_rate=0.001,
batch_size=32,
dropout_rate=0.5,
l2=0.1,
l2_2=0.01,
learn_embedding=True, # If false, only the readout is trained
readout_layers=2, # number of hidden layers in readout model
)
def scale_fn(k, val):
'''Scales given feature
Args:
k: key
val: tensor value
'''
if k == 'traffic':
return (val-0.18)/.15
if k == 'capacities':
return val/10.0
return val
def parse(serialized, target=None, normalize=True):
'''
Target is the name of predicted variable-deprecated
'''
with tf.device("/cpu:0"):
with tf.name_scope('parse'):
#TODO add feature spec class
features = tf.io.parse_single_example(
serialized,
features={
'traffic':tf.VarLenFeature(tf.float32),
'delay':tf.VarLenFeature(tf.float32),
'logdelay':tf.VarLenFeature(tf.float32),
'jitter':tf.VarLenFeature(tf.float32),
'drops':tf.VarLenFeature(tf.float32),
'packets':tf.VarLenFeature(tf.float32),
'capacities':tf.VarLenFeature(tf.float32),
'links':tf.VarLenFeature(tf.int64),
'paths':tf.VarLenFeature(tf.int64),
'sequences':tf.VarLenFeature(tf.int64),
'n_links':tf.FixedLenFeature([],tf.int64),
'n_paths':tf.FixedLenFeature([],tf.int64),
'n_total':tf.FixedLenFeature([],tf.int64)
})
for k in ['traffic','delay','logdelay','jitter','drops','packets','capacities','links','paths','sequences']:
features[k] = tf.sparse.to_dense( features[k] )
if normalize:
features[k] = scale_fn(k, features[k])
#return {k:v for k,v in features.items() if k is not target },features[target]
return features
def serving_input_receiver_fn():
"""
This is used to define inputs to serve the model.
returns: ServingInputReceiver
"""
receiver_tensors = {
'capacities': tf.placeholder(tf.float32, [None]),
'traffic': tf.placeholder(tf.float32, [None]),
'links': tf.placeholder(tf.int32, [None]),
'paths': tf.placeholder(tf.int32, [None]),
'sequences': tf.placeholder(tf.int32, [None]),
'n_links': tf.placeholder(tf.int32, []),
'n_paths':tf.placeholder(tf.int32, []),
}
# Convert give inputs to adjust to the model.
features = {k: scale_fn(k,v) for k,v in receiver_tensors.items() }
return tf.estimator.export.ServingInputReceiver(receiver_tensors=receiver_tensors,
features=features)
if __name__ == '__main__':
main()
| [
2,
15069,
357,
66,
8,
2864,
12,
23344,
11,
13685,
89,
893,
89,
1462,
69,
371,
1904,
74,
198,
2,
1439,
2489,
10395,
13,
198,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
... | 2.344914 | 1,986 |
"""Code to calculate the mean and standard deviation of dataset.
Author: Srinivasan G
"""
import os
from PIL import Image
import numpy as np
import time
def find_mean_standard_deviation(image_dir):
"""Return the mean and standard deviation of dataset.
Mean and Standard Deviation required for image normalization.
Args:
image_dir: Input directory containing all image
Returns:
None
Raises:
No Exception
"""
since = time.time()
n = 0
s = np.zeros(3)
sq = np.zeros(3)
#data_dir = os.chdir(image_dir)
image_folders = os.listdir(image_dir)
print(f'Sub-folders: {image_folders}')
for sub_dir in image_folders:
temp = image_dir + sub_dir
for image_name in os.listdir(temp):
if image_name.endswith(".jpg"):
img = Image.open(temp + "/" + image_name)
x = np.array(img)/255
s += x.sum(axis=(0, 1))
sq += np.sum(np.square(x), axis=(0, 1))
n += x.shape[0]*x.shape[1]
mean = s/n
std_deviation = np.sqrt((sq/n - np.square(mean)))
print(f'Mean: {mean}')
print(f'Std: {std_deviation}')
# print(mean, sq/n, std_deviation, n)
time_elapsed = time.time() - since
print('Processing completed in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
| [
37811,
10669,
284,
15284,
262,
1612,
290,
3210,
28833,
286,
27039,
13,
198,
198,
13838,
25,
311,
12769,
38630,
272,
402,
198,
37811,
198,
11748,
28686,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
64... | 2.217742 | 620 |
import cv2
import os
import keras_ocr
import numpy as np
import pytesseract
import easyocr
import matplotlib.pyplot as plt
| [
11748,
269,
85,
17,
198,
11748,
28686,
198,
11748,
41927,
292,
62,
1696,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
83,
408,
263,
529,
198,
11748,
2562,
1696,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
1... | 2.396552 | 58 |
"""
https://portswigger.net/web-security/information-disclosure/exploiting/lab-infoleak-via-backup-files
""" | [
37811,
198,
5450,
1378,
634,
2032,
15249,
13,
3262,
14,
12384,
12,
12961,
14,
17018,
12,
6381,
17966,
14,
20676,
78,
1780,
14,
23912,
12,
10745,
2305,
461,
12,
8869,
12,
1891,
929,
12,
16624,
198,
37811
] | 2.918919 | 37 |
# trafcapEthernetPacket.py
#
# Copyright (c) 2013 Protectus,LLC. All Rights Reserved.
#
# Classes to help pull data off the wire and update mongo
import subprocess
import time
from trafcap import trafcap
from datetime import datetime
import traceback
import sys
class EthernetPacket(object):
"""
Parent class for handling non-IP packets
"""
@classmethod
# Legend for how packet data is stored in Info dictionaries
# src dst
# [addr, bytes], [addr, bytes]
i_src=0; i_dst=1
i_addr=0; i_bytes=1; i_pkt=2
i_tb=2; i_te=3; i_pkts=4; i_ci=5; i_proto=6
i_msg=7
i_ldwt=8 # last_db_write_time
i_csldw=9 # changed_since_last_db_write
i_id=10 # mongo object id
i_vl=11 # vlan id
# Legend for how data is stored in the Session Bytes dictionary
# and the Capture Bytes dictionary
b_key=0; b_src=0; b_dst=1; b_msg=2; b_vl=3
b_sb=1; b_se=2;
b_array=3; b_offset=0; b_bytes1=1; b_bytes2=2
b_pkts=4
b_ldwt=5 # last_db_write_time
b_csldw=6 # changed_since_last_db_write
# src, dst, msg, ??
capture_dict_key = ('0', b'0', b'', None)
# Legend for Group dictionary data structure:
g_src=0; g_b1=1
g_dst=2; g_b2=3
g_msg=4
g_tbm=5; g_tem=6
g_ns=7; g_ne=8
g_b=9; g_offset=0; g_1=1; g_2=2
g_pkts=10
g_proto=11
g_id=12
g_vl=13 # vlan id
#@classmethod
#def buildCriteriaDoc(pc, ci, si, a_info):
# session_criteria = {"s":a_info[ci][pc.i_addr],
# "d":a_info[si][pc.i_addr],
# "m":a_info[pc.i_msg],
# "tbm":trafcap.secondsToMinute(a_info[pc.i_tb]),
# "tem":{'$gte':trafcap.secondsToMinute(a_info[pc.i_tb])}}
# return session_criteria
#@classmethod
#def buildInfoDoc(pc, ci, si, a_info):
# return
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
#@classmethod
#def initializeCaptureInfo(pc):
# return
class OtherPacket(EthernetPacket):
"""
For handling Other packets
"""
# Legend for Other packet data list returned by the parse method:
p_src=0; p_dst=1
p_addr=0; p_bytes=1; p_pkts=2
p_etime=2
p_proto=3
p_msg=4
p_ci=5
p_vl=6
# TCP & UDP based protocols that should not appear in Other traffic
leaked_protos_to_ignore = [
'BROWSER',
'CLDAP',
'DHCP',
'DNS',
'HTTP',
'ICMP',
'IPv4',
'LLMNR',
'MDNS',
'NBNS',
'NTP',
'OCSP',
'SSDP',
'SSL',
'SSLv2',
'TCP',
'TLSv1',
'TLSv1.2',
'UDP',
'QUIC',
]
@classmethod
@classmethod
@classmethod
| [
2,
1291,
69,
11128,
36,
490,
3262,
47,
8317,
13,
9078,
198,
2,
198,
2,
15069,
357,
66,
8,
2211,
21916,
385,
11,
3069,
34,
13,
220,
1439,
6923,
33876,
13,
198,
2,
198,
2,
38884,
284,
1037,
2834,
1366,
572,
262,
6503,
290,
4296,
... | 1.674988 | 2,083 |
'''
Copyright (C) 2021 CG Cookie
http://cgcookie.com
hello@cgcookie.com
Created by Jonathan Denning, Jonathan Williamson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
DEBUG_COLOR_CLEAN = False
DEBUG_PROPERTY = 'style' # selector, style, content, size, layout, blocks
DEBUG_COLOR = 1 # 0:time since change, 1:time of change
DEBUG_DIRTY = False
DEBUG_LIST = False
CACHE_METHOD = 2 # 0:none, 1:only root, 2:hierarchical, 3:text leaves
ASYNC_IMAGE_LOADING = True
| [
7061,
6,
198,
15269,
357,
34,
8,
33448,
29925,
39606,
198,
4023,
1378,
66,
36484,
18055,
13,
785,
198,
31373,
31,
66,
36484,
18055,
13,
785,
198,
198,
41972,
416,
11232,
5601,
768,
11,
11232,
34974,
628,
220,
220,
220,
770,
1430,
31... | 3.026882 | 372 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Messages used by extra tabs pages."""
__author__ = 'Todd Larsen (tlarsen@google.com)'
EXTRA_TABS_DESCRIPTION = """
Extra tabs appear on the course navbar.
"""
EXTRA_TABS_TITLE_DESCRIPTION = """
This is the name of this tab displayed on the course navbar.
"""
EXTRA_TAB_POSITION_DESCRIPTION = """
This indicates if this tab is right or left aligned. Tabs aligned on the same
side are displayed in the order added here.
"""
EXTRA_TABS_VISIBILITY_DESCRIPTION = """
This indicates if this tab is visible to everyone or only registered students.
"""
EXTRA_TABS_URL_DESCRIPTION = """
If a URL is provided, this tab will link to that URL. Otherwise, it will
display the "Tab Content" in a page. Links to other sites must start with
"http" or "https".
"""
EXTRA_TABS_CONTENT_DESCRIPTION = """
This content will be displayed on a page accessed from the tab. If the
"Tab URL" is provided, that will be used instead.
"""
| [
2,
15069,
1853,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.546729 | 428 |
# works 100%
Size = [4,3,2,1,5]
Dir = [0,1,0,0,0]
print(solution(Size, Dir)) | [
2,
2499,
1802,
4,
198,
198,
10699,
796,
685,
19,
11,
18,
11,
17,
11,
16,
11,
20,
60,
198,
35277,
796,
685,
15,
11,
16,
11,
15,
11,
15,
11,
15,
60,
198,
197,
628,
198,
4798,
7,
82,
2122,
7,
10699,
11,
36202,
4008
] | 1.76087 | 46 |
#!/usr/bin/env python
# Copyrights. All rights reserved.
# ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland,
# Space Center (eSpace), 2018
# See the LICENSE.TXT file for more details.
"""Script performs benchmarking on flyingthings3D.
Benchmarking in performed with maximum disparity of 191 on
960 x 540 full-size images.
Depending on the parameters, the benchmarking can be performed
using "psm" or "crl" protocol. "psm" protocol is described in
"Pyramid stereo matching network" by Jia-Ren Chang et al.
"crl" protocol is described in "Cascade Residual Learning:
A Two-stage Convolutional Neural Network for Stereo Matching"
by Jiahao Pang. According to the "crl" protocol examples where more
than "maximum_percentage_of_large_disparities"=25% of pixels have
disparity larger than "large_disparity"=300 pixels are excluded
from the evaluation. Note, that according to both protocols pixels
with ground truth disparity larger than maximum_disparity=192 are
excluded from evaluation, since network this is a largest disparity
that network can produce.
Optionally, the user can pass to the script:
"dataset_folder" with flyinghtings3d dataset;
"experiment_folder" where experiment results are be saved;
"checkpoint_file" with checkpoint that will be loaded
to perform evaluation training.
"is_psm_protocol" if this flag is set than evaluation is performed
according to "psm" protocol, otherwise "crl" protocol
is used.
Example call:
./benchmark_on_flyingthings3d.py \
--experiment_folder experiments/flyingthings3d \
--dataset_folder datasets/flyingthings3d \
--checkpoint_file experiments/flyingthings3d/003_checkpoint.bin \
--is_psm_protocol
"""
import os
import click
from torch.utils import data
from practical_deep_stereo import flyingthings3d_dataset
from practical_deep_stereo import network
from practical_deep_stereo import pds_trainer
@click.command()
@click.option(
'--dataset_folder',
default='datasets/flyingthings3d',
type=click.Path(exists=True))
@click.option(
'--experiment_folder',
default='experiments/flyingthings3d_benchmarking',
type=click.Path(exists=False))
@click.option(
'--checkpoint_file',
default='experiments/flyingthings3d/010_checkpoint.bin',
type=click.Path(exists=True))
@click.option('--is_psm_protocol', is_flag=True)
if __name__ == '__main__':
benchmark_on_flyingthings3d()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
6955,
49158,
13,
1439,
2489,
10395,
13,
198,
2,
39031,
2538,
20634,
56,
51,
25994,
22125,
48,
8924,
376,
1961,
1137,
21358,
5550,
9131,
2937,
1565,
12161,
11,
14679,
11,
198,
2,
4... | 3.08091 | 791 |
from datasets.dataset import Dataset
from torchvision import datasets, transforms
import torch
import random
| [
198,
6738,
40522,
13,
19608,
292,
316,
1330,
16092,
292,
316,
198,
6738,
28034,
10178,
1330,
40522,
11,
31408,
198,
11748,
28034,
198,
11748,
4738,
628,
628,
198
] | 4.071429 | 28 |
from __future__ import print_function
from types import SimpleNamespace
from unittest import mock
import collections
import functools
import io
import lasagne
import numpy as np
import pytest
from .. import drivers
from .. import execution
from .. import recorders
from ..execution import DataTables, DataStore
from ..lasagne_toppings import param_file
from ..run.gan import init_driver
from ..ssnode import DEFAULT_PARAMS
HistoryRecord = collections.namedtuple('HistoryRecord', ['args', 'result'])
@pytest.mark.parametrize('iterations', range(1, 4))
@pytest.mark.parametrize('repeats, shifts, last_shift', [
([9], [+1], +1),
([9], [+1], -1),
([0, 5, 4], [-1, +1, -1], +1),
([1, 5, 3], [-1, +1, -1], +1),
([2, 5, 2], [-1, +1, -1], +1),
([3, 5, 1], [-1, +1, -1], +1),
([4, 5, 0], [-1, +1, -1], +1),
])
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
3858,
1330,
17427,
36690,
10223,
198,
6738,
555,
715,
395,
1330,
15290,
198,
11748,
17268,
198,
11748,
1257,
310,
10141,
198,
11748,
33245,
198,
198,
11748,
39990,
21080,
198,... | 2.586103 | 331 |
# Copyright (c) 2019 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import MagicMock, patch
from wca.nodes import Task
from wca.config import register
from wca.extra.static_allocator import StaticAllocator
from tests.tester.tester import IntegrationTester, FileCheck, MetricCheck
@patch('tests.tester.tester._delete_cgroup')
@patch('tests.tester.tester._create_cgroup')
@patch('sys.exit')
| [
2,
15069,
357,
66,
8,
13130,
8180,
10501,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,... | 3.541985 | 262 |
# -*- coding: utf-8 -*-
{
'name': 'Product Email Template',
'depends': ['account'],
'author': 'OpenERP SA',
'category': 'Accounting & Finance',
'description': """
Add email templates to products to be send on invoice confirmation
==================================================================
With this module, link your products to a template to send complete information and tools to your customer.
For instance when invoicing a training, the training agenda and materials will automatically be sent to your customers.'
""",
'website': 'https://www.odoo.com',
'demo': [
'data/product_demo.xml',
],
'data': [
'views/product_view.xml',
'views/email_template_view.xml',
],
'installable': True,
'auto_install': False,
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
90,
198,
220,
220,
220,
705,
3672,
10354,
705,
15667,
9570,
37350,
3256,
198,
220,
220,
220,
705,
10378,
2412,
10354,
37250,
23317,
6,
4357,
198,
220,
220,
220,
7... | 3.041825 | 263 |
import numpy as np
WALL_LEN = 20.0
NUM_PATHS = 8.0
WALL_FROM_CENTER = 8.0
TARGET_DIST = 25.0
xml_block = '<body mocap=\"true\" pos=\"{x} {y} 1\" euler="0 0 {z_rot}\">\n\
\t<geom type=\"box\" size=\"{wall_len} 0.5 2\" group=\"1\" condim=\"3\" conaffinity=\"1\"/>\n\
</body>'
increments = 2*np.pi / NUM_PATHS
half_increments = np.pi / NUM_PATHS
angles = np.linspace(0.0, 2*np.pi, num=NUM_PATHS, endpoint=False)
for a in angles:
left_and_right = []
for inc in [-half_increments, half_increments]:
xy = np.array([np.cos(a+inc), np.sin(a+inc)]) * WALL_FROM_CENTER
left_and_right.append(
xy + np.array([np.cos(a), np.sin(a)]) * WALL_LEN
)
xy += np.array([np.cos(a), np.sin(a)]) * WALL_LEN / 2.0
print(
xml_block.format(
**{
'x': '%.2f' % xy[0],
'y': '%.2f' % xy[1],
'z_rot': '%.2f' % (a * 360.0 / (2*np.pi)),
'wall_len': WALL_LEN / 2.0
}
)
)
end_block_len = np.linalg.norm(left_and_right[0] - left_and_right[1]) + 1.0
mid_point = sum(left_and_right) / 2.0
print(
xml_block.format(
**{
'x': '%.2f' % mid_point[0],
'y': '%.2f' % mid_point[1],
'z_rot': '%.2f' % ((a + np.pi/2.0) * 360.0 / (2*np.pi)),
'wall_len': end_block_len / 2.0
}
)
)
# print('\n')
for i, a in enumerate(angles):
xy = np.array([np.cos(a), np.sin(a)]) * TARGET_DIST
print(
"<site name=\"target{num}\" pos=\"{x} {y} .01\" rgba=\"0.75 0 0.75 1\" type=\"sphere\" size=\"1\"/>".format(
num=i,
x='%.2f' % xy[0],
y='%.2f' % xy[1]
)
)
| [
11748,
299,
32152,
355,
45941,
198,
198,
54,
7036,
62,
43,
1677,
796,
1160,
13,
15,
198,
41359,
62,
47,
1404,
7998,
796,
807,
13,
15,
198,
54,
7036,
62,
10913,
2662,
62,
43960,
1137,
796,
807,
13,
15,
198,
51,
46095,
62,
35,
880... | 1.687736 | 1,060 |
from abc import ABC, abstractmethod
from typing import List
from mpmath import eye, mpc, matrix
from mpmath import expm as mp_expm
from numpy import ndarray, identity
from numpy.linalg import matrix_power
from scipy.linalg import expm as scipy_expm
from .matrices import Matrix
class TrotterFirstOrder(ProductFormula):
r"""Approximate e^{-i \sum H_j t} to first order.
Computes e^{-i \sum H_j t} as (\prod_{j=1}^J e^{-i H_j t/m})^m.
"""
def __init__(self, steps: int = 0) -> None:
"""
Parameters
----------
steps: int
Number of timesteps taken (m).
"""
self.m = steps
def set_steps(self, steps: int) -> None:
"""
Parameters
----------
steps: int
Number of timesteps taken (m).
"""
self.m = steps
def __call__(self, time: float, matrices: List[Matrix]) -> Matrix:
r"""Approximate e^{-i * sum(matrices) * time} to first order.
No optimization for len(matrices) == 2 (symmetric Strang splitting).
"""
self._ensure_dimensions(matrices)
result = self._eye(matrices)
if self.m == 0:
return result
t_prime = mpc(time) / self.m
for H in matrices:
result = result @ self._expm(-1j * t_prime, H)
return self._matpow(result, self.m)
class TrotterSecondOrder(ProductFormula):
r"""Approximate e^{-i \sum H_j t} to second order.
Computes e^{-i \sum H_j t} as
((\prod_{j=1}^J e^{-i H_j t/2m})(\prod_{j=J}^1 e^{-i H_j t/2m}))^m.
"""
def __init__(self, steps: int = 0) -> None:
"""
Parameters
----------
steps: int
Number of timesteps taken (m).
"""
self.m = steps
def set_steps(self, steps: int) -> None:
"""
Parameters
----------
steps: int
Number of timesteps taken (m).
"""
self.m = steps
def __call__(self, time: float, matrices: List[Matrix]) -> Matrix:
r"""Approximate e^{-i * sum(matrices) * time} to second order.
No optimization for len(matrices) == 2 (symmetric Strang splitting).
"""
self._ensure_dimensions(matrices)
result = self._eye(matrices)
if self.m == 0:
return result
if len(matrices) == 1:
return self._expm(-1j * time * matrices[0])
t_prime = mpc(time) / (2 * self.m)
result = result @ self._expm(-2j * t_prime, matrices[0])
for H in matrices[1:-1]:
result = result @ self._expm(-1j * t_prime, H)
result = result @ self._expm(-2j * t_prime, matrices[-1])
for H in reversed(matrices[1:-1]):
result = result @ self._expm(-1j * t_prime, H)
return (self._expm(1j * t_prime, matrices[0]) @
self._matpow(result, self.m) @
self._expm(-1j * t_prime, matrices[0]))
class ExactMatrixExponential(ProductFormula):
r"""Compute e^{-i \sum H_j t} exactly"""
def set_steps(self, steps: int = 0) -> None:
"""Exact exponentiation doesn't use steps, arguments are ignored"""
pass
def __call__(self, time: float, matrices: List[Matrix]) -> Matrix:
r"""Compute e^{-i * sum(matrices) * time} exactly by summing up matrices.
"""
self._ensure_dimensions(matrices)
return self._expm(-1j * time, sum(matrices))
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
285,
4426,
776,
1330,
4151,
11,
285,
14751,
11,
17593,
198,
6738,
285,
4426,
776,
1330,
1033,
76,
355,
29034,
62,
1069,
4426,
198,
6738,
299,
... | 2.155334 | 1,603 |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class SQLServerInstanceVersion(object):
"""Implementation of the 'SQLServerInstanceVersion' model.
Specifies the Server Instance Version.
Attributes:
build (int): Specfies the build.
major_version (int): Specfies the major version.
minor_version (int): Specfies the minor version.
revision (int): Specfies the revision.
version_string (string): Specfies the version string.
"""
# Create a mapping from Model property names to API property names
_names = {
"build":'build',
"major_version":'majorVersion',
"minor_version":'minorVersion',
"revision":'revision',
"version_string":'versionString'
}
def __init__(self,
build=None,
major_version=None,
minor_version=None,
revision=None,
version_string=None):
"""Constructor for the SQLServerInstanceVersion class"""
# Initialize members of the class
self.build = build
self.major_version = major_version
self.minor_version = minor_version
self.revision = revision
self.version_string = version_string
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
build = dictionary.get('build')
major_version = dictionary.get('majorVersion')
minor_version = dictionary.get('minorVersion')
revision = dictionary.get('revision')
version_string = dictionary.get('versionString')
# Return an object of this model
return cls(build,
major_version,
minor_version,
revision,
version_string)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
13130,
1766,
956,
414,
3457,
13,
628,
198,
4871,
49747,
6561,
18497,
33384,
14815,
7,
15252,
2599,
628,
220,
220,
220,
37227,
3546,
32851,
286,
262,
705,
50,
... | 2.385254 | 963 |
print("Entre com o numero: ")
num=int(input())
print("Fatorial de ", num, " igual a:", fat(num))
| [
198,
4798,
7203,
14539,
260,
401,
267,
997,
3529,
25,
366,
8,
198,
22510,
28,
600,
7,
15414,
28955,
198,
197,
198,
4798,
7203,
37,
21592,
390,
33172,
997,
11,
366,
45329,
723,
257,
25,
1600,
3735,
7,
22510,
4008,
198
] | 2.439024 | 41 |
from AutoPoem.Model import *
from config import Config
if __name__ == '__main__':
model = PoetryModel(Config)
for i in range(3):
# 给出第一句话进行预测
sen = model.predict_sen('山为斜好几,')
print(sen)
| [
6738,
11160,
18833,
368,
13,
17633,
1330,
1635,
198,
6738,
4566,
1330,
17056,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
2746,
796,
7695,
11973,
17633,
7,
16934,
8,
198,
220,
220,
220,
329,... | 1.774194 | 124 |
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
import unittest
from simpa.core.device_digital_twins import RSOMExplorerP50
from simpa.core.device_digital_twins import InVision256TF
from simpa.core.device_digital_twins import MSOTAcuityEcho
from simpa.core.device_digital_twins import PhotoacousticDevice, LinearArrayDetectionGeometry, \
PencilArrayIlluminationGeometry
| [
2,
30628,
55,
12,
8979,
15269,
8206,
25,
33448,
7458,
286,
49452,
8366,
11998,
11,
32975,
37,
57,
198,
2,
30628,
55,
12,
8979,
15269,
8206,
25,
33448,
2365,
988,
10299,
17231,
75,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
... | 3.228188 | 149 |
import numpy as np
import tensorflow as tf
from ammf.core import format_checker
from ammf.core import box_3d_encoder
def np_box_3d_to_box_8co(box_3d):
"""Computes the 3D bounding box corner positions from Box3D format.
The order of corners are preserved during this conversion.
Args:
box_3d: 1 x 7 ndarray of box_3d in the format
[x, y, z, l, w, h, ry]
Returns:
corners_3d: An ndarray or a tensor of shape (3 x 8) representing
the box as corners in the following format ->
[[x1,...,x8], [y1...,y8], [z1,...,z8]].
"""
format_checker.check_box_3d_format(box_3d)
ry = box_3d[6]
# Compute transform matrix
# This includes rotation and translation
rot = np.array([[np.cos(ry), 0, np.sin(ry), box_3d[0]],
[0, 1, 0, box_3d[1]],
[-np.sin(ry), 0, np.cos(ry), box_3d[2]]])
length = box_3d[3]
width = box_3d[4]
height = box_3d[5]
# 3D BB corners
x_corners = np.array([length / 2, length / 2,
-length / 2, -length / 2,
length / 2, length / 2,
-length / 2, -length / 2])
y_corners = np.array([0.0, 0.0, 0.0, 0.0,
-height, -height, -height, -height])
z_corners = np.array([width / 2, -width / 2,
-width / 2, width / 2,
width / 2, -width / 2,
-width / 2, width / 2])
# Create a ones column
ones_col = np.ones(x_corners.shape)
# Append the column of ones to be able to multiply
box_8c = np.dot(rot, np.array([x_corners,
y_corners,
z_corners,
ones_col]))
# Ignore the fourth column
box_8c = box_8c[0:3]
return box_8c
def tf_box_3d_to_box_8co(boxes_3d):
"""Computes the 3D bounding box corner positions from Box3D format.
The order of corners are preserved during this conversion.
Args:
boxes_3d: N x 7 tensor of box_3d in the format
[x, y, z, l, w, h, ry]
Returns:
corners_3d: An ndarray or a tensor of shape (N x 3 x 8) representing
the box as corners in following format -> [[[x1,...,x8],[y1...,y8],
[z1,...,z8]]].
"""
format_checker.check_box_3d_format(boxes_3d)
all_rys = boxes_3d[:, 6]
ry_sin = tf.sin(all_rys)
ry_cos = tf.cos(all_rys)
zeros = tf.zeros_like(all_rys, dtype=tf.float32)
ones = tf.ones_like(all_rys, dtype=tf.float32)
# Rotation matrix
rot_mats = tf.stack([tf.stack([ry_cos, zeros, ry_sin], axis=1),
tf.stack([zeros, ones, zeros], axis=1),
tf.stack([-ry_sin, zeros, ry_cos], axis=1)],
axis=2)
length = boxes_3d[:, 3]
width = boxes_3d[:, 4]
height = boxes_3d[:, 5]
half_length = length / 2
half_width = width / 2
x_corners = tf.stack([half_length, half_length,
-half_length, -half_length,
half_length, half_length,
-half_length, -half_length], axis=1)
y_corners = tf.stack([zeros, zeros, zeros, zeros,
-height, -height, -height, -height], axis=1)
z_corners = tf.stack([half_width, -half_width,
-half_width, half_width,
half_width, -half_width,
-half_width, half_width], axis=1)
corners = tf.stack([x_corners,
y_corners,
z_corners], axis=1)
boxes_8c = tf.matmul(rot_mats, corners,
transpose_a=True,
transpose_b=False)
# Translate the corners
corners_3d_x = boxes_8c[:, 0] + tf.reshape(boxes_3d[:, 0], (-1, 1))
corners_3d_y = boxes_8c[:, 1] + tf.reshape(boxes_3d[:, 1], (-1, 1))
corners_3d_z = boxes_8c[:, 2] + tf.reshape(boxes_3d[:, 2], (-1, 1))
boxes_8c = tf.stack([corners_3d_x,
corners_3d_y,
corners_3d_z], axis=1)
return boxes_8c
def np_box_3d_to_box_8c(box_3d):
"""Computes the 3D bounding box corner positions from box_3d format.
This function does not preserve corners order but rather the corners
are rotated to the nearest 90 degree angle. This helps in calculating
the closest corner to corner when comparing the corners to the ground-
truth boxes.
Args:
box_3d: ndarray of size (7,) representing box_3d in the format
[x, y, z, l, w, h, ry]
Returns:
corners_3d: An ndarray or a tensor of shape (3 x 8) representing
the box as corners in following format -> [[x1,...,x8],[y1...,y8],
[z1,...,z8]].
"""
format_checker.check_box_3d_format(box_3d)
# This function is vectorized and returns an ndarray
anchor = box_3d_encoder.box_3d_to_anchor(box_3d, ortho_rotate=True)[0]
centroid_x = anchor[0]
centroid_y = anchor[1]
centroid_z = anchor[2]
dim_x = anchor[3]
dim_y = anchor[4]
dim_z = anchor[5]
half_dim_x = dim_x / 2
half_dim_z = dim_z / 2
# 3D BB corners
x_corners = np.array([half_dim_x, half_dim_x,
-half_dim_x, -half_dim_x,
half_dim_x, half_dim_x,
-half_dim_x, -half_dim_x])
y_corners = np.array([0.0, 0.0, 0.0, 0.0,
-dim_y, -dim_y, -dim_y, -dim_y])
z_corners = np.array([half_dim_z, -half_dim_z,
-half_dim_z, half_dim_z,
half_dim_z, -half_dim_z,
-half_dim_z, half_dim_z])
ry = box_3d[6]
# Find nearest 90 degree
half_pi = np.pi / 2
ortho_ry = np.round(ry / half_pi) * half_pi
# Find rotation to make the box ortho aligned
ry_diff = ry - ortho_ry
# Compute transform matrix
# This includes rotation and translation
rot = np.array([[np.cos(ry_diff), 0, np.sin(ry_diff), centroid_x],
[0, 1, 0, centroid_y],
[-np.sin(ry_diff), 0, np.cos(ry_diff), centroid_z]])
# Create a ones column
ones_col = np.ones(x_corners.shape)
# Append the column of ones to be able to multiply
box_8c = np.dot(rot, np.array([x_corners,
y_corners,
z_corners,
ones_col]))
# Ignore the fourth column
box_8c = box_8c[0:3]
return box_8c
def tf_box_3d_to_box_8c(boxes_3d):
"""Computes the 3D bounding box corner positions from box_3d format.
This function does not preserve corners order during conversion from
box_3d -> box_8c. Instead of using the box_3d's orientation, 'ry',
nearest 90 degree angle is selected to create an axis-aligned box.
This helps in calculating the closest corner to corner when comparing
the corners to the ground-truth boxes.
Args:
boxes_3d: N x 7 tensor of box_3d in the format
[x, y, z, l, w, h, ry]
Returns:
corners_3d: A tensor of shape (N x 3 x 8) representing
the box as corners in following format -> [[[x1,...,x8],[y1...,y8],
[z1,...,z8]]].
"""
format_checker.check_box_3d_format(boxes_3d)
anchors = box_3d_encoder.tf_box_3d_to_anchor(boxes_3d)
centroid_x = anchors[:, 0]
centroid_y = anchors[:, 1]
centroid_z = anchors[:, 2]
dim_x = anchors[:, 3]
dim_y = anchors[:, 4]
dim_z = anchors[:, 5]
all_rys = boxes_3d[:, 6]
# Find nearest 90 degree
half_pi = np.pi / 2
ortho_rys = tf.round(all_rys / half_pi) * half_pi
ry_diff = all_rys - ortho_rys
ry_sin = tf.sin(ry_diff)
ry_cos = tf.cos(ry_diff)
zeros = tf.zeros_like(ry_diff, dtype=tf.float32)
ones = tf.ones_like(ry_diff, dtype=tf.float32)
# Rotation matrix
rot_mats = tf.stack([tf.stack([ry_cos, zeros, ry_sin], axis=1),
tf.stack([zeros, ones, zeros], axis=1),
tf.stack([-ry_sin, zeros, ry_cos], axis=1)],
axis=2)
half_dim_x = dim_x / 2
half_dim_z = dim_z / 2
x_corners = tf.stack([half_dim_x, half_dim_x,
-half_dim_x, -half_dim_x,
half_dim_x, half_dim_x,
-half_dim_x, -half_dim_x], axis=1)
y_corners = tf.stack([zeros, zeros, zeros, zeros,
-dim_y, -dim_y, -dim_y, -dim_y], axis=1)
z_corners = tf.stack([half_dim_z, -half_dim_z,
-half_dim_z, half_dim_z,
half_dim_z, -half_dim_z,
-half_dim_z, half_dim_z], axis=1)
corners = tf.stack([x_corners,
y_corners,
z_corners], axis=1)
boxes_8c = tf.matmul(rot_mats, corners,
transpose_a=True,
transpose_b=False)
# Translate the corners
corners_3d_x = boxes_8c[:, 0] + tf.reshape(centroid_x, (-1, 1))
corners_3d_y = boxes_8c[:, 1] + tf.reshape(centroid_y, (-1, 1))
corners_3d_z = boxes_8c[:, 2] + tf.reshape(centroid_z, (-1, 1))
boxes_8c = tf.stack([corners_3d_x,
corners_3d_y,
corners_3d_z], axis=1)
return boxes_8c
def align_boxes_8c(boxes_8c):
"""Finds the min/max of each corner to align irregular corners.
In the case where the regressed corners might be skewed, it tries to
align each face corners of the box to line up, resulting to an aligned
3D box shape. It finds the min/max of corners for each axis, and re-assigns
the corners. Note this assumes *certain order* of corners.
Args:
boxes_8c: An ndarray or a tensor of shape (N x 3 x 8) representing
the box corners.
Returns
aligned_boxes_8c: An ndarray or a tensor of shape (N x 3 x 8)
representing the box corners.
"""
format_checker.check_box_8c_format(boxes_8c)
x_corners = boxes_8c[:, 0]
y_corners = boxes_8c[:, 1]
z_corners = boxes_8c[:, 2]
min_x = tf.reduce_min(x_corners, axis=1)
##########################
# X-Corners P3, P4, P7, P8
##########################
corner_x3 = min_x
corner_x4 = min_x
corner_x7 = min_x
corner_x8 = min_x
##########################
# X-Corners P1, P2, P5, P6
##########################
max_x = tf.reduce_max(x_corners, axis=1)
corner_x1 = max_x
corner_x2 = max_x
corner_x5 = max_x
corner_x6 = max_x
##########################
# Z-Corners P2, P3, P6, P7
##########################
min_z = tf.reduce_min(z_corners, axis=1)
corner_z2 = min_z
corner_z3 = min_z
corner_z6 = min_z
corner_z7 = min_z
##########################
# Z-Corners P1, P4, P5, P6
##########################
max_z = tf.reduce_max(z_corners, axis=1)
corner_z1 = max_z
corner_z4 = max_z
corner_z5 = max_z
corner_z8 = max_z
##########################
# Y-Corners P1, P2, P3, P4
##########################
# Take the max of the four top y-corners
# This is because y-axis is facing downwards
corner_max_y = tf.reduce_max(y_corners, axis=1)
corner_y1 = corner_y2 = corner_y3 = corner_y4 = corner_max_y
##########################
# Y-Corners P5, P6, P7, P8
##########################
# Take the min of the four bottom y-corners
corner_min_y = tf.reduce_min(y_corners, axis=1)
corner_y5 = corner_y6 = corner_y7 = corner_y8 = corner_min_y
x_corners = tf.stack([corner_x1, corner_x2, corner_x3,
corner_x4, corner_x5, corner_x6,
corner_x7, corner_x8], axis=1)
y_corners = tf.stack([corner_y1, corner_y2, corner_y3,
corner_y4, corner_y5, corner_y6,
corner_y7, corner_y8], axis=1)
z_corners = tf.stack([corner_z1, corner_z2, corner_z3,
corner_z4, corner_z5, corner_z6,
corner_z7, corner_z8], axis=1)
aligned_boxes_8c = tf.stack([x_corners, y_corners, z_corners], axis=1)
return aligned_boxes_8c
def box_8c_to_box_3d(box_8c):
"""Computes the 3D bounding box corner positions from 8 corners.
To go back from 8-corner representation to box3D, we need to reverse
the transformation done in 'box_3d_to_box_8c'. The first thing we need
is orientation, this is estimated by calculating the midpoints of
P1 -> P2 and P3 -> P4. Connecting these midpoints, results to a vector
which gives us the direction of the corners. However note that y-axis
is facing downwards and hence we negate this orientation.
Next we calculate the centroids by taking the average of four corners
for x and z axes. We then translate the centroids back to the origin
and then multiply by the rotation matrix, however now we are rotating
the opposite direction, so the angle signs are reversed. After rotation
we can translate the corners back however, there is one additional step
before translation. Since we plan to regress corners, it is expected
for the corners to be skewed, i.e. resulting to non-rectangular shapes.
Hence we attempt to align the corners (by min/maxing the corners and
aligning them by the min and max values for each corner. After this step
we can translate back, and calculate length, width and height.
Args:
box_8c: An ndarray or a tensor of shape (N x 3 x 8) representing
the box corners.
Returns:
corners_3d: An ndarray or a tensor of shape (3 x 8) representing
the box as corners in this format -> [[x1,...,x8],[y1...,y8],
[z1,...,z8]].
"""
format_checker.check_box_8c_format(box_8c)
#######################
# calculate orientation
#######################
x_corners = box_8c[:, 0]
y_corners = box_8c[:, 1]
z_corners = box_8c[:, 2]
x12_midpoint = (x_corners[:, 0] + x_corners[:, 1]) / 2
z12_midpoint = (z_corners[:, 0] + z_corners[:, 1]) / 2
x34_midpoint = (x_corners[:, 2] + x_corners[:, 3]) / 2
z34_midpoint = (z_corners[:, 2] + z_corners[:, 3]) / 2
# We use the midpoints to get a vector to figure out
# the orientation
delta_x = x12_midpoint - x34_midpoint
delta_z = z12_midpoint - z34_midpoint
# negate the orientation since y is downwards
rys = -tf.atan2(delta_z, delta_x)
# Calcuate the centroid by averaging four corners
center_x = tf.reduce_mean(x_corners[:, 0:4], axis=1)
center_z = tf.reduce_mean(z_corners[:, 0:4], axis=1)
# Translate the centroid to the origin before rotation
translated_x = box_8c[:, 0] - tf.reshape(center_x, (-1, 1))
translated_z = box_8c[:, 2] - tf.reshape(center_z, (-1, 1))
# The sign for the angle needs to be flipped because we
# want to rotate back i.e. reverse rotation op we did during
# transforming box_3d -> box_8c
ry_sin = tf.sin(-rys)
ry_cos = tf.cos(-rys)
zeros = tf.zeros_like(rys, dtype=tf.float32)
ones = tf.ones_like(rys, dtype=tf.float32)
rotation_mats = tf.stack([
tf.stack([ry_cos, zeros, ry_sin], axis=1),
tf.stack([zeros, ones, zeros], axis=1),
tf.stack([-ry_sin, zeros, ry_cos], axis=1)], axis=2)
corners = tf.stack([translated_x,
y_corners,
translated_z], axis=1)
# Rotate the corners
corners_3d = tf.matmul(rotation_mats, corners,
transpose_a=True,
transpose_b=False)
# Align the corners in case they are skewed
aligned_corners = align_boxes_8c(corners_3d)
# Translate the corners back
aligned_corners_x = aligned_corners[:, 0] + tf.reshape(center_x, (-1, 1))
aligned_corners_z = aligned_corners[:, 2] + tf.reshape(center_z, (-1, 1))
new_x_corners = aligned_corners_x
new_y_corners = aligned_corners[:, 1]
new_z_corners = aligned_corners_z
x_b_right = new_x_corners[:, 1]
x_b_left = new_x_corners[:, 2]
z_b_left = new_z_corners[:, 2]
z_t_left = new_z_corners[:, 3]
corner_y1 = new_y_corners[:, 0]
corner_y5 = new_y_corners[:, 4]
length = x_b_right - x_b_left
width = z_t_left - z_b_left
height = corner_y1 - corner_y5
# Re-calculate the centroid
center_x = tf.reduce_mean(new_x_corners[:, 0:4], axis=1)
center_z = tf.reduce_mean(new_z_corners[:, 0:4], axis=1)
center_y = corner_y1
box_3d = tf.stack([center_x, center_y, center_z,
length, width, height, rys], axis=1)
return box_3d
def tf_box_8c_to_offsets(boxes_8c,
boxes_8c_gt):
"""Converts corner boxes to corner offsets.
It subtracts the ground-truth box corners from the predicted corners
and normalizes the offsets by the diagonal of the proposed boxes.
Args:
boxes_8c: A tensor of shape (N x 3 x 8) representing the box corners.
boxes_8c_gt: A tensor of shape (N x 3 x 8) representing the box
corners ground-truth.
Returns:
A tensor of dim (N x 3 x 8) representing the offsets.
"""
# Get the diagonal of the boxes
diagonals = tf_box_8c_diagonal_length(boxes_8c)
offsets = tf.subtract(boxes_8c_gt, boxes_8c)
# Reshape the offsets to a (24 x N) vector
reshaped_offsets = tf.reshape(offsets, (24, -1))
ones = tf.ones_like(reshaped_offsets)
# This gives diagonals of shape (24 x N)
# This now enables us to divide acorss N batches
diagonals_mult = tf.multiply(ones, diagonals)
# Normalize the offsets by the box_8c diagonal
offsets_norm = tf.divide(reshaped_offsets, diagonals_mult)
reshaped_offsets_norm = tf.reshape(offsets_norm,
[-1, 3, 8])
return reshaped_offsets_norm
def tf_offsets_to_box_8c(boxes_8c,
offsets):
"""Converts corner ofsets to box corners.
It multiplies the diagonals with the offsets and then adds it back
to the box corners.
Args:
box_8c: A tensor of shape (N x 3 x 8) representing the box corners.
offsets: A tensor vector of shape (N x 3 x 8) representing the corner
offsets.
Returns:
A tensor of dim (N x 3 x 8) representing the corners.
"""
# Get the diagonal of the boxes
diagonals = tf_box_8c_diagonal_length(boxes_8c)
# Reshape the offsets to a (24 x N) vector
reshaped_offsets = tf.reshape(offsets, (24, -1))
ones = tf.ones_like(reshaped_offsets)
# This gives diagonals of shape (24 x N)
diagonals_mult = tf.multiply(ones, diagonals)
offsets_back = tf.multiply(reshaped_offsets, diagonals_mult)
reshaped_offsets_back = tf.reshape(offsets_back,
[-1, 3, 8])
# Multiply the offsets by the normalization factor i.e. diagonals
return tf.add(reshaped_offsets_back, boxes_8c)
def tf_box_8c_diagonal_length(boxes_8c):
"""Returns the diagonal lengths of box_8c
Args:
boxes_3d: An tensor of shape (N x 3 x 8) of boxes in box_8c
format.
Returns:
Diagonal of all boxes, a tensor of (N,) shape.
"""
# Grab two opposite corners
p1 = boxes_8c[:, :, 0]
p7 = boxes_8c[:, :, 6]
x_diffs = tf.square((p1[:, 0] - p7[:, 0]))
y_diffs = tf.square((p1[:, 1] - p7[:, 1]))
z_diffs = tf.square((p1[:, 2] - p7[:, 2]))
return tf.sqrt(x_diffs + y_diffs + z_diffs)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
716,
76,
69,
13,
7295,
1330,
5794,
62,
9122,
263,
198,
6738,
716,
76,
69,
13,
7295,
1330,
3091,
62,
18,
67,
62,
12685,
12342,
628,
198,
4299,
... | 2.072832 | 9,515 |
"""Table Model."""
from config.database import Model
from orator.orm import belongs_to
class Table(Model):
"""Table Model."""
__fillable__ = ['user_id', 'oppo_id', 'token', 'move', 'completed',
'last_move_timestamp', 'next_id', 'owner', 'winner', 'msg']
__table__ = 'chesses'
@belongs_to('user_id', 'email')
| [
37811,
10962,
9104,
526,
15931,
198,
198,
6738,
4566,
13,
48806,
1330,
9104,
198,
6738,
393,
1352,
13,
579,
1330,
14448,
62,
1462,
628,
198,
4871,
8655,
7,
17633,
2599,
198,
220,
220,
220,
37227,
10962,
9104,
526,
15931,
198,
220,
220... | 2.450704 | 142 |
'''
Plot functions to graphically present simulation results
'''
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
def setup_plots(suptitle):
'''
Basic setup of plots so it can be reused on plot functions
Parameters
----------
suptitle: string
Description of the plot that will appear on the top
Returns
-------
Figure and axis matplotlib structs
'''
plt.rc('font', family='serif')
plt.rc('font', size=44)
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0), useMathText=True)
fig, ax = plt.subplots(1, 1, figsize=(16, 12))
# fig.suptitle(suptitle)
# for item in ([ax.title, ax.xaxis.label, ax.yaxis.label]):
# item.set_fontsize(30)
# for item in (ax.get_xticklabels() + ax.get_yticklabels()):
# item.set_fontsize(26)
# item.set_fontweight("normal")
# font = {'weight' : 'normal'}
# matplotlib.rc('font', **font)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# Provide tick lines across the plot to help viewers trace along
# the axis ticks.
plt.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.3)
# Remove the tick marks; they are unnecessary with the tick lines we just
# plotted.
plt.tick_params(axis='both', which='both', bottom=True, top=False,
labelbottom=True, left=False, right=False, labelleft=True)
return fig, ax
def plot_b_converging(b_converging, params):
'''
Plot the data each user is trying to offload till convergence
Parameters
----------
b_converging: 2-d array
Contains on each row the amount of data each user is trying to offload. Each row is
a different iteration
Returns
-------
Plot
'''
result = b_converging
# Each row on the transposed matrix contains the data the user offloads
# in each iteration. Different rows mean different user.
result = np.transpose(result)
suptitle = "Data each user is trying to offload in each iteration"
if params["ONE_FIGURE"] == False:
fig, ax = setup_plots(suptitle)
for index, row in enumerate(result):
# # display only some of the users on the plot
# if index%11 == 0:
# line = plt.plot(row, lw=4)
line = plt.plot(row, '-', lw=2, color='0.5')
average = np.mean(result, axis=0)
line = plt.plot(average, '-', lw=4, color='black')
plt.xlabel('iterations', fontweight='normal')
plt.ylabel('Amount of Offloaded Data [bits]', fontweight='normal')
plt.ticklabel_format(style='sci', axis='y', scilimits=(7,7), useMathText=True)
grey_lines = mlines.Line2D([], [], lw = 2, color='0.5', label='each user')
black_line = mlines.Line2D([], [], lw = 4, color='k', label='average')
plt.legend(handles=[grey_lines, black_line], loc=1, prop={'size': 24})
path_name = "b_converging"
if params["SAVE_FIGS"] == True and params["ONE_FIGURE"] == False:
plt.savefig("plots/" + path_name + ".png")
else:
plt.show(block=False)
def plot_expected_utility_converging(expected_utility_converging, params):
'''
Plot the expected utility of each user till convergence
Parameters
----------
expected_utility_converging: 2-d array
Contains on each row the expected utility of each user. Each row is
a different iteration
Returns
-------
Plot
'''
result = expected_utility_converging
# Each row on the transposed matrix contains the data the user offloads
# in each iteration. Different rows mean different user.
result = np.transpose(result)
suptitle = "Expected utility of each user in each iteration"
if params["ONE_FIGURE"] == False:
fig, ax = setup_plots(suptitle)
for index, row in enumerate(result):
# # display only some of the users on the plot
# if index%11 == 0:
# line = plt.plot(row, lw=4)
line = plt.plot(row, '-', lw=2, color='0.5')
average = np.mean(result, axis=0)
line = plt.plot(average, '-', lw=4, color='k')
plt.xlabel('iterations', fontweight='normal')
plt.ylabel("User's Expected Utility", fontweight='normal')
grey_lines = mlines.Line2D([], [], lw = 2, color='0.5', label='each user')
black_line = mlines.Line2D([], [], lw = 4, color='k', label='average')
plt.legend(handles=[grey_lines, black_line], loc=1, prop={'size': 24})
path_name = "expected_utility"
if params["SAVE_FIGS"] == True and params["ONE_FIGURE"] == False:
plt.savefig("plots/" + path_name + ".png")
else:
plt.show(block=False)
def plot_pricing_converging(pricing_converging, params):
'''
Plot the pricing set for each user till convergence
Parameters
----------
pricing_converging: 2-d array
Contains on each row the pricing for each user. Each row is
a different iteration
Returns
-------
Plot
'''
result = pricing_converging
# Each row on the transposed matrix contains the data the user offloads
# in each iteration. Different rows mean different user.
result = np.transpose(result)
suptitle = "Pricing each user in each iteration"
if params["ONE_FIGURE"] == False:
fig, ax = setup_plots(suptitle)
plt.ticklabel_format(style='sci', axis='y', scilimits=(7,7), useMathText=True)
for index, row in enumerate(result):
# # display only some of the users on the plot
# if index%11 == 0:
# line = plt.plot(row, lw=4)
line = plt.plot(row, '-', lw=2, color='0.5')
average = np.mean(result, axis=0)
line = plt.plot(average, '-', lw=4, color='k')
plt.xlabel('iterations', fontweight='normal')
plt.ylabel('Pricing', fontweight='normal')
grey_lines = mlines.Line2D([], [], lw = 2, color='0.5', label='each user')
black_line = mlines.Line2D([], [], lw = 4, color='k', label='average')
plt.legend(handles=[grey_lines, black_line], loc=1, prop={'size': 24})
path_name = "pricing"
if params["SAVE_FIGS"] == True and params["ONE_FIGURE"] == False:
plt.savefig("plots/" + path_name + ".png")
else:
plt.show(block=False)
def plot_PoF_converging(PoF_converging, params):
'''
Plot the probability of failure of MEC server till convergence
Parameters
----------
PoF_converging: 1-d array
Contains the probability of failure of the MEC server in each iteration
Returns
-------
Plot
'''
result = PoF_converging
# Each row on the transposed matrix contains the data the user offloads
# in each iteration. Different rows mean different user.
result = np.transpose(result)
suptitle = "Probability of failure of MEC server in each iteration"
if params["ONE_FIGURE"] == False:
fig, ax = setup_plots(suptitle)
line = plt.plot(result, '--', lw=4, color='k')
plt.xlabel('iterations', fontweight='normal')
plt.ylabel('PoF', fontweight='normal')
path_name = "PoF"
if params["SAVE_FIGS"] == True and params["ONE_FIGURE"] == False:
plt.savefig("plots/" + path_name + ".png")
else:
plt.show(block=False)
def plot_expected_utility_and_pricing_converging(expected_utility_converging, pricing_converging, params):
'''
Plot the average explitic utility and pricing of users till convergence
Parameters
----------
expected_utility_converging: 2-d array
Contains on each row the expected utility of each user. Each row is
a different iteration
pricing_converging: 2-d array
Contains on each row the pricing for each user. Each row is
a different iteration
Returns
-------
Plot
'''
# colors = ['k', '0.5']
# line_types = ['--', ':']
colors = ['darkorange', 'darkgreen']
line_types = ['-', '-']
result1 = expected_utility_converging
result2 = pricing_converging
# Each row on the transposed matrix contains the data the user offloads
# in each iteration. Different rows mean different user.
result1 = np.transpose(result1)
result2 = np.transpose(result2)
suptitle = "Average expected utility and expected pricing for each user in each iteration"
if params["ONE_FIGURE"] == False:
plt.rc('font', family='serif')
plt.rc('font', size=44)
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0), useMathText=True)
fig = plt.figure(figsize=(16,12))
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.85)
ax2 = host.twinx() # instantiate a second axes that shares the same x-axis
ax2.axis["right"].toggle(all=True)
ax2.ticklabel_format(style='sci', axis='y', scilimits=(7,7), useMathText=True)
average1 = np.mean(result1, axis=0)
line1, = host.plot(average1, line_types[0], lw=4, color=colors[0], label='expected utility')
host.set_xlabel('iterations', fontweight='normal')
host.set_ylabel('Average Expected Utility', fontweight='normal')
ax2.set_ylabel('Average Pricing', fontweight='normal')
average2 = np.mean(result2, axis=0)
line2, = ax2.plot(average2, line_types[1], lw=4, color=colors[1], label="pricing")
# host.axis["left"].label.set_color(line1.get_color())
# ax2.axis["right"].label.set_color(line2.get_color())
host.legend(loc=1, prop={'size': 24})
path_name = "expected_utility_and_pricing"
if params["SAVE_FIGS"] == True and params["ONE_FIGURE"] == False:
plt.savefig("plots/" + path_name + ".png")
else:
plt.show(block=False)
| [
7061,
6,
198,
43328,
5499,
284,
4823,
1146,
1944,
18640,
2482,
198,
7061,
6,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
... | 2.54982 | 3,894 |
import requests
from .base_wrapper import ApiWrapperBase, ApiUrls, RequestParms
| [
11748,
7007,
198,
6738,
764,
8692,
62,
48553,
1330,
5949,
72,
36918,
2848,
14881,
11,
5949,
72,
16692,
7278,
11,
19390,
47,
8357,
628,
198
] | 3.28 | 25 |
from comments.serializers import CommentSerializer
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from classifier.classification import Classification
from classifier.classification_json import JSONClassificationResponse
class CommentsSentiment(APIView):
"""
Get comments sentiment polarity
"""
def get(self, request, format=None):
'''
Single comment sentiment classification:
QUERY_PARAMS:
comment: string comment
classifier_type: 'SVM' or 'MNB'
classes: 2 or 5
'''
comment = request.QUERY_PARAMS.get('comment', 'comentario no encontrado :(')
classifier_type = request.QUERY_PARAMS.get('classifier_type','SVM')
no_classes = int(request.QUERY_PARAMS.get('no_classes', 5))
classify = Classification()
sentiment = classify.classify_comment(comment, classifier_type=classifier_type, no_classes=no_classes)
serializer = CommentSerializer()
serialized = serializer.serialize({comment:sentiment})
return Response(serialized)
def post(self, request, format=None):
'''
Multiple comment sentiment classification:
JSON_FORMAT:
See JSON_FORMAT_for_REST_Service.txt for details on input/output json format
'''
serializer = CommentSerializer()
print type(request.DATA)
json_response = JSONClassificationResponse()
deserialized = serializer.deserialize(request.DATA)
if deserialized:
return Response(json_response.classification_response(request.DATA), status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| [
6738,
3651,
13,
46911,
11341,
1330,
18957,
32634,
7509,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
26429,
198,
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6... | 2.702096 | 668 |
import os
import sys
from os.path import join as pjoin
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import logging
from PIL import Image
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils import data
from torchvision import transforms
logger = logging.getLogger("Logger")
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
28686,
13,
6978,
1330,
4654,
355,
279,
22179,
198,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
366,
492,
48774,... | 3.26 | 100 |
'''
Created on Feb 27, 2011
@author: Blodstone
'''
from nltk.corpus import wordnet, stopwords
from nltk.tokenize import word_tokenize
import string
import time
from operator import itemgetter
from IOPreprocessing.DataPreparation import DataRetrieval
#definitely false but for testing sake
if __name__ == "__main__":
lesk = AdaptedLesk(6)
start = time.time()
tokenized = [ "What", "is", "the", "scientific", "name", "for", "elephant" ]
target= 'elephant'
tagged = { "What" : "WP", "name" : "NN", "for" : "IN", "is" : "VBZ", "elephant" : "NN", "the" : "DT", "scientific" : "JJ" }
a = wordnet.synset(lesk.wsd(tokenized,target,tagged))
print a.definition, str(a)
finish = time.time()- start
print str(finish)
#print str(lesk.oSL("the youngest member of a group (not necessarily young)", "(physics) a manifestation of energy; the transfer of energy from one physical system to another expressed as the product of a force and the distance through which it moves a body in the direction of that force")) | [
7061,
6,
201,
198,
41972,
319,
3158,
2681,
11,
2813,
201,
198,
201,
198,
31,
9800,
25,
1086,
375,
6440,
201,
198,
7061,
6,
201,
198,
201,
198,
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
1573,
3262,
11,
2245,
10879,
201,
198,
... | 2.676399 | 411 |
from __future__ import absolute_import
from PyQt4.QtGui import QToolBar, QLabel, QPixmap, QApplication, QCursor
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSlot, Qt
from views.core import centraltabwidget
from gui.wellplot.subplots.wellplotwidget import WellPlotWidget
from globalvalues.appsettings import AppSettings
from gui.wellplot.settings.templatesettingsdialog import TemplateSettingsDialog
from gui.signals.wellplotsignals import WellPlotSignals
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
try:
_encoding = QtGui.QApplication.UnicodeUTF8
except AttributeError:
import logging
logger = logging.getLogger('console')
__Instance = None
# LogSettingsToolbar Singleton
| [
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
9485,
48,
83,
19,
13,
48,
83,
8205,
72,
1330,
1195,
25391,
10374,
11,
1195,
33986,
11,
1195,
47,
844,
8899,
11,
1195,
23416,
11,
36070,
21471,
198,
6738,
9485,
48,... | 3.049793 | 241 |
from flask import Flask, jsonify, request
from flask_cors import CORS
from pymongo import MongoClient
from db_access import WorkshopDb
from config_manager import ConfigManager
from task01_connect import Task01_Connect
from task02_static import Task02_StaticIp
from task03_webserver import Task03_WebServer
from task04_api import Task04_ApiResponse
from task05_temperature import Task05_ApiResponse
from task06_time import Task06_ApiResponse
from task07_lights import Task07_Lights
from task08_tv import Task08_TvControl
from task09_ac import Task09_AcControl
from task10_screen import Task10_Screen
app = Flask(__name__)
CORS(app)
db_client = MongoClient('mongodb://localhost:27017/')
db_handle = db_client['esp8266_workshop']
groups_collection = db_handle['groups']
tasks_collection = db_handle['tasks']
tasks_classes = [
Task01_Connect,
Task02_StaticIp,
Task03_WebServer,
Task04_ApiResponse,
Task05_ApiResponse,
Task06_ApiResponse,
Task07_Lights,
Task08_TvControl,
Task09_AcControl,
Task10_Screen
]
tasks_objects = [t() for t in tasks_classes]
config = ConfigManager()
@app.route('/')
@app.route('/tasks')
@app.route('/groups')
@app.route('/group/<group_id>')
@app.route('/test/<task_id>', methods=['POST'])
@app.route('/skip/<task_id>', methods=['POST'])
@app.route('/bonus/<bonus_id>', methods=['POST'])
@app.route('/reload_yaml')
if __name__ == '__main__':
# start the server
app.run(debug=False, host="0.0.0.0")
| [
6738,
42903,
1330,
46947,
11,
33918,
1958,
11,
2581,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
198,
6738,
279,
4948,
25162,
1330,
42591,
11792,
198,
6738,
20613,
62,
15526,
1330,
26701,
43832,
198,
6738,
4566,
62,
37153,
1330,
17... | 2.722936 | 545 |
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""Clone of Nmap's first generation OS fingerprinting.
This code works with the first-generation OS detection and
nmap-os-fingerprints, which has been removed from Nmap on November 3,
2007 (https://github.com/nmap/nmap/commit/50c49819), which means it is
outdated.
To get the last published version of this outdated fingerprint
database, you can fetch it from
<https://raw.githubusercontent.com/nmap/nmap/9efe1892/nmap-os-fingerprints>.
"""
from __future__ import absolute_import
import os
import re
from scapy.data import KnowledgeBase
from scapy.config import conf
from scapy.arch import WINDOWS
from scapy.error import warning
from scapy.layers.inet import IP, TCP, UDP, ICMP, UDPerror, IPerror
from scapy.packet import NoPayload
from scapy.sendrecv import sr
from scapy.compat import *
import scapy.modules.six as six
if WINDOWS:
conf.nmap_base = os.environ["ProgramFiles"] + "\\nmap\\nmap-os-fingerprints"
else:
conf.nmap_base = "/usr/share/nmap/nmap-os-fingerprints"
######################
## nmap OS fp stuff ##
######################
_NMAP_LINE = re.compile('^([^\\(]*)\\(([^\\)]*)\\)$')
class NmapKnowledgeBase(KnowledgeBase):
"""A KnowledgeBase specialized in Nmap first-generation OS
fingerprints database. Loads from conf.nmap_base when self.filename is
None.
"""
nmap_kdb = NmapKnowledgeBase(None)
@conf.commands.register
def nmap_fp(target, oport=80, cport=81):
"""nmap fingerprinting
nmap_fp(target, [oport=80,] [cport=81,]) -> list of best guesses with accuracy
"""
sigs = nmap_sig(target, oport, cport)
return nmap_search(sigs)
@conf.commands.register
| [
2235,
770,
2393,
318,
636,
286,
1446,
12826,
198,
2235,
4091,
2638,
1378,
2503,
13,
2363,
7959,
13,
2398,
14,
42068,
14,
1416,
12826,
329,
517,
4175,
602,
198,
2235,
15069,
357,
34,
8,
39393,
347,
295,
10989,
1279,
28864,
31,
2363,
... | 2.970636 | 613 |
from eval import load_model
import torch
import matplotlib.pyplot as plt
from torchvision import transforms
from torch.utils.data import DataLoader
from PIL import Image
import cv2
import argparse
import os
if __name__ == "__main__":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Using {device}")
args = parse_opt()
model = load_model(device, args.model_path)
if not os.path.exists(args.out_path):
os.mkdir(args.out_path)
Inference(device, model, args.img_path, args.out_path)
print(f"Predicted images are saved in: {args.out_path}") | [
6738,
5418,
1330,
3440,
62,
19849,
198,
11748,
28034,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
28034,
10178,
1330,
31408,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
6738,
350,
4146,
13... | 2.715556 | 225 |
import copy
if __name__ == '__main__':
print(parse_and_execute([x.strip() for x in open('input/08').readlines()]))
print(parse_modify_and_execute([x.strip() for x in open('input/08').readlines()])) | [
11748,
4866,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3601,
7,
29572,
62,
392,
62,
41049,
26933,
87,
13,
36311,
3419,
329,
2124,
287,
1280,
10786,
15414,
14,
2919,
27691,
... | 2.65 | 80 |
import os
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import app, db
env_name = os.getenv('FLASK_ENV')
app = app(env_name)
migrate = Migrate(app=app, db=db)
manager = Manager(app=app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| [
11748,
28686,
198,
6738,
42903,
62,
12048,
1330,
9142,
198,
6738,
42903,
62,
76,
42175,
1330,
337,
42175,
11,
337,
42175,
21575,
198,
6738,
598,
1330,
598,
11,
20613,
198,
198,
24330,
62,
3672,
796,
28686,
13,
1136,
24330,
10786,
3697,
... | 2.824561 | 114 |
#!/usr/bin/python
# coding=utf-8
#__author__ = 'CY'
from flask import render_template, request, flash,abort,redirect,url_for
from flask.ext.login import login_required, current_user
from . import main
from .. import db
from sqlalchemy import and_,desc,or_
from app.models import users,backhosts,customers,backarchives,config,backfailed,count_day_status,count_mon_status
from config import Config
import os,json,string,datetime
from random import choice
import py_compile
@main.route('/',methods=['GET', 'POST'])
@login_required
@main.route('/build_config/',methods=['GET', 'POST'])
@login_required
@main.route('/set_config/',methods=['GET', 'POST'])
@login_required
@main.route('/api/', methods=['GET', 'POST'])
@main.route('/add_backnode/',methods=['GET', 'POST'])
@login_required
@main.route('/backnode/',methods=['GET', 'POST'])
@main.route('/backmanage/',methods=['GET', 'POST'])
@login_required
@main.route('/customer/',methods=['GET', 'POST'])
@login_required
@main.route('/failed_customer/',methods=['GET', 'POST'])
@login_required
@main.route('/help/',methods=['GET', 'POST'])
@login_required
| [
2,
48443,
14629,
14,
8800,
14,
29412,
201,
198,
2,
19617,
28,
40477,
12,
23,
201,
198,
2,
834,
9800,
834,
796,
705,
34,
56,
6,
201,
198,
6738,
42903,
1330,
220,
8543,
62,
28243,
11,
2581,
11,
7644,
11,
397,
419,
11,
445,
1060,
... | 2.548246 | 456 |
import tensorflow as tf
def down_sampling_module(input_tensor):
"""Downsampling Module"""
channels = list(input_tensor.shape)[-1]
main_branch = tf.keras.layers.Conv2D(
channels, kernel_size=(1, 1))(input_tensor)
main_branch = tf.nn.relu(main_branch)
# main_branch = tf.keras.layers.Conv2D(
# channels, kernel_size=(3, 3), padding='same')(input_tensor)
main_branch = tf.keras.layers.Conv2D(
channels, kernel_size=(3, 3), padding='same')(main_branch)
main_branch = tf.nn.relu(main_branch)
main_branch = tf.keras.layers.MaxPooling2D()(main_branch)
main_branch = tf.keras.layers.Conv2D(
channels * 2, kernel_size=(1, 1))(main_branch)
skip_branch = tf.keras.layers.MaxPooling2D()(input_tensor)
skip_branch = tf.keras.layers.Conv2D(
channels * 2, kernel_size=(1, 1))(skip_branch)
return tf.keras.layers.Add()([skip_branch, main_branch])
def up_sampling_module(input_tensor):
"""Upsampling Module"""
channels = list(input_tensor.shape)[-1]
main_branch = tf.keras.layers.Conv2D(
channels, kernel_size=(1, 1))(input_tensor)
main_branch = tf.nn.relu(main_branch)
# main_branch = tf.keras.layers.Conv2D(
# channels, kernel_size=(3, 3), padding='same')(input_tensor)
main_branch = tf.keras.layers.Conv2D(
channels, kernel_size=(3, 3), padding='same')(main_branch)
main_branch = tf.nn.relu(main_branch)
main_branch = tf.keras.layers.UpSampling2D()(main_branch)
main_branch = tf.keras.layers.Conv2D(
channels // 2, kernel_size=(1, 1))(main_branch)
skip_branch = tf.keras.layers.UpSampling2D()(input_tensor)
skip_branch = tf.keras.layers.Conv2D(
channels // 2, kernel_size=(1, 1))(skip_branch)
return tf.keras.layers.Add()([skip_branch, main_branch])
| [
11748,
11192,
273,
11125,
355,
48700,
628,
198,
4299,
866,
62,
37687,
11347,
62,
21412,
7,
15414,
62,
83,
22854,
2599,
198,
220,
220,
220,
37227,
8048,
37687,
11347,
19937,
37811,
198,
220,
220,
220,
9619,
796,
1351,
7,
15414,
62,
83,... | 2.177458 | 834 |
from functools import lru_cache
from sqlsite.database import install_function
import re
PATH_MATCH_FUNCTION_NAME = "PATH_MATCH"
PATTERN_COMPONENT_RE = re.compile(r"<(?P<param_type>[a-z]+):(?P<param_name>[a-z_]+)>")
PATTERN_PARAM_TYPES = {
"str": "[^/]+",
"int": "[0-9]+",
"slug": "[-a-zA-Z0-9_]+",
"uuid": "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}",
"path": ".+",
}
@lru_cache(maxsize=64)
def pattern_to_regex(pattern):
"""
Convert a pattern containing <type:name> syntax to a regex.
Based on similar code in Django. This is trivially cacheable
as the same input pattern will always return the same regex.
"""
parts = ["^"]
while True:
match = PATTERN_COMPONENT_RE.search(pattern)
if not match:
parts.append(re.escape(pattern))
break
parts.append(re.escape(pattern[: match.start()]))
pattern = pattern[match.end() :]
param_type, param_name = match.group("param_type", "param_name")
param_regex = PATTERN_PARAM_TYPES[param_type]
parts.append(f"(?P<{param_name}>{param_regex})")
parts.append("$")
return "".join(parts)
def search_path(pattern, path):
"""
Given a pattern (ie the contents of the pattern column in the route table) and the
path of an incoming request (without the leading slash), convert the
pattern to a regex and return a match object captured from the path
"""
regex = pattern_to_regex(pattern)
if regex.endswith("/$"):
regex = regex[:-2] + "/?$"
return re.search(regex, path)
def create_path_match_function(path):
"""
Given the path of an incoming request, create a function that can be used to
check whether a pattern matches the path.
"""
return path_match
def install_path_match_function(db, path):
"""
Install a custom function to match path patterns in the database
to the path of the incoming request
"""
install_function(
db,
PATH_MATCH_FUNCTION_NAME,
create_path_match_function(path),
numargs=1,
deterministic=True,
)
| [
6738,
1257,
310,
10141,
1330,
300,
622,
62,
23870,
198,
6738,
19862,
7278,
578,
13,
48806,
1330,
2721,
62,
8818,
198,
198,
11748,
302,
198,
198,
34219,
62,
44,
11417,
62,
42296,
4177,
2849,
62,
20608,
796,
366,
34219,
62,
44,
11417,
... | 2.391499 | 894 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
import boto3
import os
import traceback
from string import ascii_lowercase
from random import choice
import json
import logging
import time
from typing import Optional, Union
from pathlib import Path
from cloudformation_cli_python_lib import SessionProxy, exceptions
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
| [
11748,
275,
2069,
18,
198,
11748,
28686,
198,
11748,
12854,
1891,
198,
6738,
4731,
1330,
355,
979,
72,
62,
21037,
7442,
198,
6738,
4738,
1330,
3572,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
640,
198,
6738,
19720,
1330,
32233,
1... | 3.645161 | 93 |