hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79012566e499cbd48cac0b024328d5d1ff49f78a | 272 | py | Python | apps/challenge/serializers/clan.py | mehrbodjavadi79/AIC21-Backend | 9f4342781f0722804a2eb704b43b52984c81b40a | [
"MIT"
] | 3 | 2021-03-12T18:32:39.000Z | 2021-11-08T10:21:04.000Z | apps/challenge/serializers/clan.py | mehrbodjavadi79/AIC21-Backend | 9f4342781f0722804a2eb704b43b52984c81b40a | [
"MIT"
] | null | null | null | apps/challenge/serializers/clan.py | mehrbodjavadi79/AIC21-Backend | 9f4342781f0722804a2eb704b43b52984c81b40a | [
"MIT"
] | 2 | 2021-01-29T14:52:53.000Z | 2022-03-05T10:24:24.000Z | from rest_framework import serializers
from apps.challenge.models import Clan
class ClanSerializer(serializers.ModelSerializer):
class Meta:
model = Clan
fields = (
'name', 'leader', 'image', 'score', 'wins', 'losses', 'draws'
)
| 22.666667 | 73 | 0.632353 | from rest_framework import serializers
from apps.challenge.models import Clan
class ClanSerializer(serializers.ModelSerializer):
class Meta:
model = Clan
fields = (
'name', 'leader', 'image', 'score', 'wins', 'losses', 'draws'
)
| true | true |
79012662b4270c3d1b6d7ea3dfe0f5c6576f3f9a | 302 | py | Python | djcookie_demo_proj/conftest.py | muutttu/djcookie_demo_proj | a023fdbdbf90560daf29cf9ee626393a3b4ea30d | [
"MIT"
] | null | null | null | djcookie_demo_proj/conftest.py | muutttu/djcookie_demo_proj | a023fdbdbf90560daf29cf9ee626393a3b4ea30d | [
"MIT"
] | null | null | null | djcookie_demo_proj/conftest.py | muutttu/djcookie_demo_proj | a023fdbdbf90560daf29cf9ee626393a3b4ea30d | [
"MIT"
] | null | null | null | import pytest
from djcookie_demo_proj.users.models import User
from djcookie_demo_proj.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| 20.133333 | 64 | 0.791391 | import pytest
from djcookie_demo_proj.users.models import User
from djcookie_demo_proj.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| true | true |
790126d1da63826dbfb07210b34108d89c42d1a0 | 2,992 | py | Python | app/app.py | tjdaley/publicdataws | 1aa4a98cf47fae10cc0f59a8d01168df806b4919 | [
"MIT"
] | null | null | null | app/app.py | tjdaley/publicdataws | 1aa4a98cf47fae10cc0f59a8d01168df806b4919 | [
"MIT"
] | null | null | null | app/app.py | tjdaley/publicdataws | 1aa4a98cf47fae10cc0f59a8d01168df806b4919 | [
"MIT"
] | null | null | null | """
app.py - Flask-based server.
@author Thomas J. Daley, J.D.
@version: 0.0.1
Copyright (c) 2019 by Thomas J. Daley, J.D.
"""
import argparse
import random
from flask import Flask, render_template, request, flash, redirect, url_for, session, jsonify
from wtforms import Form, StringField, TextAreaField, PasswordField, validators
from functools import wraps
from views.decorators import is_admin_user, is_logged_in, is_case_set
from webservice import WebService
from util.database import Database
from views.admin.admin_routes import admin_routes
from views.cases.case_routes import case_routes
from views.discovery.discovery_routes import discovery_routes
from views.drivers.driver_routes import driver_routes
from views.info.info_routes import info_routes
from views.login.login import login
from views.objections.objection_routes import objection_routes
from views.real_property.real_property_routes import rp_routes
from views.responses.response_routes import response_routes
from views.vehicles.vehicle_routes import vehicle_routes
from views.decorators import is_admin_user, is_case_set, is_logged_in
WEBSERVICE = None
DATABASE = Database()
DATABASE.connect()
app = Flask(__name__)
app.register_blueprint(admin_routes)
app.register_blueprint(case_routes)
app.register_blueprint(discovery_routes)
app.register_blueprint(driver_routes)
app.register_blueprint(info_routes)
app.register_blueprint(login)
app.register_blueprint(objection_routes)
app.register_blueprint(rp_routes)
app.register_blueprint(response_routes)
app.register_blueprint(vehicle_routes)
# Helper to create Public Data credentials from session variables
def pd_credentials(mysession) -> dict:
return {
"username": session["pd_username"],
"password": session["pd_password"]
}
@app.route('/', methods=['GET'])
def index():
return render_template('home.html')
@app.route('/attorney/find/<string:bar_number>', methods=['POST'])
@is_logged_in
def find_attorney(bar_number: str):
attorney = DATABASE.attorney(bar_number)
if attorney:
attorney['success'] = True
return jsonify(attorney)
return jsonify(
{
'success': False,
'message': "Unable to find attorney having Bar Number {}"
.format(bar_number)
}
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Webservice for DiscoveryBot")
parser.add_argument(
"--debug",
help="Run server in debug mode",
action='store_true'
)
parser.add_argument(
"--port",
help="TCP port to listen on",
type=int,
default=5001
)
parser.add_argument(
"--zillowid",
"-z",
help="Zillow API credential from https://www.zillow.com/howto/api/APIOverview.htm" # NOQA
)
args = parser.parse_args()
WEBSERVICE = WebService(args.zillowid)
app.secret_key = "SDFIIUWER*HGjdf8*"
app.run(debug=args.debug, port=args.port)
| 28.769231 | 98 | 0.734626 | import argparse
import random
from flask import Flask, render_template, request, flash, redirect, url_for, session, jsonify
from wtforms import Form, StringField, TextAreaField, PasswordField, validators
from functools import wraps
from views.decorators import is_admin_user, is_logged_in, is_case_set
from webservice import WebService
from util.database import Database
from views.admin.admin_routes import admin_routes
from views.cases.case_routes import case_routes
from views.discovery.discovery_routes import discovery_routes
from views.drivers.driver_routes import driver_routes
from views.info.info_routes import info_routes
from views.login.login import login
from views.objections.objection_routes import objection_routes
from views.real_property.real_property_routes import rp_routes
from views.responses.response_routes import response_routes
from views.vehicles.vehicle_routes import vehicle_routes
from views.decorators import is_admin_user, is_case_set, is_logged_in
WEBSERVICE = None
DATABASE = Database()
DATABASE.connect()
app = Flask(__name__)
app.register_blueprint(admin_routes)
app.register_blueprint(case_routes)
app.register_blueprint(discovery_routes)
app.register_blueprint(driver_routes)
app.register_blueprint(info_routes)
app.register_blueprint(login)
app.register_blueprint(objection_routes)
app.register_blueprint(rp_routes)
app.register_blueprint(response_routes)
app.register_blueprint(vehicle_routes)
def pd_credentials(mysession) -> dict:
return {
"username": session["pd_username"],
"password": session["pd_password"]
}
@app.route('/', methods=['GET'])
def index():
return render_template('home.html')
@app.route('/attorney/find/<string:bar_number>', methods=['POST'])
@is_logged_in
def find_attorney(bar_number: str):
attorney = DATABASE.attorney(bar_number)
if attorney:
attorney['success'] = True
return jsonify(attorney)
return jsonify(
{
'success': False,
'message': "Unable to find attorney having Bar Number {}"
.format(bar_number)
}
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Webservice for DiscoveryBot")
parser.add_argument(
"--debug",
help="Run server in debug mode",
action='store_true'
)
parser.add_argument(
"--port",
help="TCP port to listen on",
type=int,
default=5001
)
parser.add_argument(
"--zillowid",
"-z",
help="Zillow API credential from https://www.zillow.com/howto/api/APIOverview.htm"
)
args = parser.parse_args()
WEBSERVICE = WebService(args.zillowid)
app.secret_key = "SDFIIUWER*HGjdf8*"
app.run(debug=args.debug, port=args.port)
| true | true |
790126edbd7d91701d3b3d02b8852ca27bbe5606 | 416 | py | Python | Taller-de-Estrucuras-de-Control-Repeticion/ejercicio 9.py | Davidpadilla1234/Taller-de-Estrucuras-de-Control-Repeticion | 5fd3f058a8007e6e6d886959149c7d5e42f26a3a | [
"MIT"
] | null | null | null | Taller-de-Estrucuras-de-Control-Repeticion/ejercicio 9.py | Davidpadilla1234/Taller-de-Estrucuras-de-Control-Repeticion | 5fd3f058a8007e6e6d886959149c7d5e42f26a3a | [
"MIT"
] | null | null | null | Taller-de-Estrucuras-de-Control-Repeticion/ejercicio 9.py | Davidpadilla1234/Taller-de-Estrucuras-de-Control-Repeticion | 5fd3f058a8007e6e6d886959149c7d5e42f26a3a | [
"MIT"
] | null | null | null | un = 0
re = 0
gramo = 0
mientras que es cierto :
numero = int ( entrada ( "" ))
si ( numero == 4 ):
descanso
si ( numero == 1 ):
un = un + 1
elif ( numero == 2 ):
re = re + 1
elif ( numero == 3 ):
gramo = gramo + 1
elif ( numero == 4 ):
descanso
print ( f"MUITO OBRIGADO \n Alcohol:, { a } \n Gasolina: { g } \n Diesel: { d } " ) | 24.470588 | 88 | 0.435096 | un = 0
re = 0
gramo = 0
mientras que es cierto :
numero = int ( entrada ( "" ))
si ( numero == 4 ):
descanso
si ( numero == 1 ):
un = un + 1
elif ( numero == 2 ):
re = re + 1
elif ( numero == 3 ):
gramo = gramo + 1
elif ( numero == 4 ):
descanso
print ( f"MUITO OBRIGADO \n Alcohol:, { a } \n Gasolina: { g } \n Diesel: { d } " ) | false | true |
79012714af783f40450f1f7a34a623e977db7c54 | 286 | py | Python | vmall/pipelines.py | gikoluo/vmall | 285e4156ba68d7e14be417801c262897101c8486 | [
"MIT"
] | null | null | null | vmall/pipelines.py | gikoluo/vmall | 285e4156ba68d7e14be417801c262897101c8486 | [
"MIT"
] | null | null | null | vmall/pipelines.py | gikoluo/vmall | 285e4156ba68d7e14be417801c262897101c8486 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class VmallPipeline(object):
def process_item(self, item, spider):
return item
| 23.833333 | 65 | 0.70979 |
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class VmallPipeline(object):
def process_item(self, item, spider):
return item
| true | true |
79012948ddbd0051e478defc15a182e6c0e5cda8 | 2,987 | py | Python | setup.py | maxwu/ci-stat | e8ed2f5d4d43afdc3afc0689435881202efbcbb3 | [
"MIT"
] | 1 | 2017-04-23T02:52:02.000Z | 2017-04-23T02:52:02.000Z | setup.py | maxwu/cistat | e8ed2f5d4d43afdc3afc0689435881202efbcbb3 | [
"MIT"
] | 2 | 2017-05-28T12:48:35.000Z | 2017-06-19T11:22:40.000Z | setup.py | maxwu/cistat | e8ed2f5d4d43afdc3afc0689435881202efbcbb3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Public Title.
Doc str for module users
.. moduleauthor:: Max Wu <http://maxwu.me>
.. References::
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
.. Test Samples in doctest format
>>> None
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
with open(path.join(here, 'src', 'cistat', 'version.py')) as f:
exec(f.read())
VERSION = get_version()
setup(
name='cistat',
version=VERSION,
description='A sample Python project',
long_description=long_description,
# The project's main homepage.
url='https://github.com/maxwu/cistat',
# Author details
author='Max Wu',
author_email='maxwunj@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Testers, Developers',
'Topic :: Software Test :: Statistic Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
# 'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='CI Stat CircleCI',
packages=find_packages("src"),
package_dir={"": "src"},
install_requires=required,
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'cistat-cli=cistat:cli_app',
],
},
)
| 29.87 | 79 | 0.634416 |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
with open(path.join(here, 'src', 'cistat', 'version.py')) as f:
exec(f.read())
VERSION = get_version()
setup(
name='cistat',
version=VERSION,
description='A sample Python project',
long_description=long_description,
url='https://github.com/maxwu/cistat',
# Author details
author='Max Wu',
author_email='maxwunj@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Testers, Developers',
'Topic :: Software Test :: Statistic Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
# 'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='CI Stat CircleCI',
packages=find_packages("src"),
package_dir={"": "src"},
install_requires=required,
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'cistat-cli=cistat:cli_app',
],
},
)
| true | true |
79012996927d899f532bbb0579b4390baf69efd5 | 4,685 | py | Python | InteractionTracker/accounts/models.py | desertzebra/Lean-UX-Platform | 1b61a4b4e0af6fc08e052fb22b4141e65122ef9a | [
"Apache-2.0"
] | 34 | 2019-03-11T08:10:16.000Z | 2021-12-14T05:53:22.000Z | InteractionTracker/accounts/models.py | shahidzaffar/Lean-UX-Platform | 40c46c0421dd21cdfca254db689bf566c95e4d6a | [
"Apache-2.0"
] | 6 | 2020-11-17T06:57:39.000Z | 2022-01-04T16:51:41.000Z | InteractionTracker/accounts/models.py | shahidzaffar/Lean-UX-Platform | 40c46c0421dd21cdfca254db689bf566c95e4d6a | [
"Apache-2.0"
] | 28 | 2019-03-11T08:10:19.000Z | 2021-12-14T06:02:37.000Z | """
# Interaction Tracker
# @license http://www.apache.org/licenses/LICENSE-2.0
# Author @ Jamil Hussain, Zaki
"""
from django.conf import settings
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser
)
from django.core.validators import RegexValidator
from django.db import models
from django.db.models.signals import post_save
# Create your models here.
from .utils import code_generator
USERNAME_REGEX = '^[a-zA-Z0-9.+-]*$'
class MyUserManager(BaseUserManager):
def create_user(self, username, email, password=None):
"""
Creates and saves a User with the given email, date of
birth and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
username = username,
email=self.normalize_email(email),
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
"""
Creates and saves a superuser with the given email, date of
birth and password.
"""
user = self.create_user(
username,
email,
password=password,
)
user.is_admin = True
user.is_staff = True
user.save(using=self._db)
return user
#The data should be collected on the bases of user ID. This Model store information about the user
class MyUser(AbstractBaseUser):
username = models.CharField(
max_length=255,
validators=[
RegexValidator(
regex = USERNAME_REGEX,
message = 'Username must be Alpahnumeric or contain any of the following: ". @ + -" ',
code='invalid_username'
)],
unique=True,
)
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
GENDER_CHOICES = (
('male', 'Male'),
('female', 'Female'),
)
password = models.CharField(max_length=255)
date_of_birth= models.DateField(blank=True,null=True)
gender= models.CharField(max_length=50, null=True, choices=GENDER_CHOICES)
height= models.IntegerField(blank=True,null=True)
weight=models.IntegerField(blank=True,null=True)
user_sight = models.CharField(max_length=50, null=True)
user_hearing = models.CharField(max_length=50, null=True)
user_touch = models.CharField(max_length=50, null=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
objects = MyUserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def get_full_name(self):
# The user is identified by their email address
return self.email
def get_short_name(self):
# The user is identified by their email address
return self.email
def __str__(self): # __unicode__ on Python 2
return self.email
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
class ActivationProfile(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
key = models.CharField(max_length=120)
expired = models.BooleanField(default=False)
def save(self, *args, **kwargs):
self.key = code_generator()
super(ActivationProfile, self).save(*args, **kwargs)
def post_save_activation_receiver(sender, instance, created, *args, **kwargs):
if created:
#send email
print('activation created')
post_save.connect(post_save_activation_receiver, sender=ActivationProfile)
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
city = models.CharField(max_length=120, null=True, blank=True)
def __str__(self):
return str(self.user.username)
def __unicode__(self):
return str(self.user.username)
def post_save_user_model_receiver(sender, instance, created, *args, **kwargs):
if created:
try:
Profile.objects.create(user=instance)
ActivationProfile.objects.create(user=instance)
except:
pass
post_save.connect(post_save_user_model_receiver, sender=settings.AUTH_USER_MODEL)
| 29.099379 | 110 | 0.646105 |
from django.conf import settings
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser
)
from django.core.validators import RegexValidator
from django.db import models
from django.db.models.signals import post_save
from .utils import code_generator
USERNAME_REGEX = '^[a-zA-Z0-9.+-]*$'
class MyUserManager(BaseUserManager):
def create_user(self, username, email, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
username = username,
email=self.normalize_email(email),
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
user = self.create_user(
username,
email,
password=password,
)
user.is_admin = True
user.is_staff = True
user.save(using=self._db)
return user
class MyUser(AbstractBaseUser):
username = models.CharField(
max_length=255,
validators=[
RegexValidator(
regex = USERNAME_REGEX,
message = 'Username must be Alpahnumeric or contain any of the following: ". @ + -" ',
code='invalid_username'
)],
unique=True,
)
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
GENDER_CHOICES = (
('male', 'Male'),
('female', 'Female'),
)
password = models.CharField(max_length=255)
date_of_birth= models.DateField(blank=True,null=True)
gender= models.CharField(max_length=50, null=True, choices=GENDER_CHOICES)
height= models.IntegerField(blank=True,null=True)
weight=models.IntegerField(blank=True,null=True)
user_sight = models.CharField(max_length=50, null=True)
user_hearing = models.CharField(max_length=50, null=True)
user_touch = models.CharField(max_length=50, null=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
objects = MyUserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
class ActivationProfile(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
key = models.CharField(max_length=120)
expired = models.BooleanField(default=False)
def save(self, *args, **kwargs):
self.key = code_generator()
super(ActivationProfile, self).save(*args, **kwargs)
def post_save_activation_receiver(sender, instance, created, *args, **kwargs):
if created:
print('activation created')
post_save.connect(post_save_activation_receiver, sender=ActivationProfile)
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
city = models.CharField(max_length=120, null=True, blank=True)
def __str__(self):
return str(self.user.username)
def __unicode__(self):
return str(self.user.username)
def post_save_user_model_receiver(sender, instance, created, *args, **kwargs):
if created:
try:
Profile.objects.create(user=instance)
ActivationProfile.objects.create(user=instance)
except:
pass
post_save.connect(post_save_user_model_receiver, sender=settings.AUTH_USER_MODEL)
| true | true |
79012ae549279f92da52b3f15e21f5a44f6e7d42 | 753 | py | Python | AudioFile.py | CoryXie/SpeechShadowing | a0190ce488b754e5c9a6828a3510a665294864f0 | [
"MIT"
] | null | null | null | AudioFile.py | CoryXie/SpeechShadowing | a0190ce488b754e5c9a6828a3510a665294864f0 | [
"MIT"
] | null | null | null | AudioFile.py | CoryXie/SpeechShadowing | a0190ce488b754e5c9a6828a3510a665294864f0 | [
"MIT"
] | null | null | null |
from pydub import AudioSegment
from pydub.playback import play
import os
import utils
class audiofile:
def __init__(self, file):
""" Init audio stream """
self.file = file
def play(self):
""" Play entire file """
utils.displayInfoMessage('Playing Audio')
pathparts = self.file.rsplit(".", 1)
fileformat = pathparts[1]
song = AudioSegment.from_file(self.file, format=fileformat)
play(song)
utils.displayInfoMessage('')
utils.displayErrorMessage('')
def length(self):
pathparts = self.file.rsplit(".", 1)
fileformat = pathparts[1]
song = AudioSegment.from_file(self.file, format=fileformat)
return song.duration_seconds | 25.965517 | 67 | 0.628154 |
from pydub import AudioSegment
from pydub.playback import play
import os
import utils
class audiofile:
def __init__(self, file):
self.file = file
def play(self):
utils.displayInfoMessage('Playing Audio')
pathparts = self.file.rsplit(".", 1)
fileformat = pathparts[1]
song = AudioSegment.from_file(self.file, format=fileformat)
play(song)
utils.displayInfoMessage('')
utils.displayErrorMessage('')
def length(self):
pathparts = self.file.rsplit(".", 1)
fileformat = pathparts[1]
song = AudioSegment.from_file(self.file, format=fileformat)
return song.duration_seconds | true | true |
79012b0a8825f72490fa1900112437ae89e9a963 | 5,320 | py | Python | python_scripts/03_categorical_pipeline_sol_01.py | leonsor/scikit-learn-mooc | 27c5caf7b0d2f0cc734baee59ad65efc263704cd | [
"CC-BY-4.0"
] | 1 | 2022-01-25T19:20:21.000Z | 2022-01-25T19:20:21.000Z | python_scripts/03_categorical_pipeline_sol_01.py | gmash24/scikit-learn-mooc | b58f051efb591a38859a4242369c9494ccac6a17 | [
"CC-BY-4.0"
] | null | null | null | python_scripts/03_categorical_pipeline_sol_01.py | gmash24/scikit-learn-mooc | b58f051efb591a38859a4242369c9494ccac6a17 | [
"CC-BY-4.0"
] | null | null | null | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # 📃 Solution for Exercise M1.04
#
# The goal of this exercise is to evaluate the impact of using an arbitrary
# integer encoding for categorical variables along with a linear
# classification model such as Logistic Regression.
#
# To do so, let's try to use `OrdinalEncoder` to preprocess the categorical
# variables. This preprocessor is assembled in a pipeline with
# `LogisticRegression`. The generalization performance of the pipeline can be
# evaluated by cross-validation and then compared to the score obtained when
# using `OneHotEncoder` or to some other baseline score.
#
# First, we load the dataset.
# %%
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
# %%
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
# %% [markdown]
# In the previous notebook, we used `sklearn.compose.make_column_selector` to
# automatically select columns with a specific data type (also called `dtype`).
# Here, we will use this selector to get only the columns containing strings
# (column with `object` dtype) that correspond to categorical features in our
# dataset.
# %%
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
# %% [markdown]
# Define a scikit-learn pipeline composed of an `OrdinalEncoder` and a
# `LogisticRegression` classifier.
#
# Because `OrdinalEncoder` can raise errors if it sees an unknown category at
# prediction time, you can set the `handle_unknown="use_encoded_value"` and
# `unknown_value` parameters. You can refer to the
# [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html)
# for more details regarding these parameters.
# %%
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
# solution
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
# %% [markdown]
# Your model is now defined. Evaluate it using a cross-validation using
# `sklearn.model_selection.cross_validate`.
#
# ```{note}
# Be aware that if an error happened during the cross-validation,
# `cross_validate` will raise a warning and return NaN (Not a Number)
# as scores. To make it raise a standard Python exception with a traceback,
# you can pass the `error_score="raise"` argument in the call to
# `cross_validate`. An exception will be raised instead of a warning at the first
# encountered problem and `cross_validate` will stop right away instead of
# returning NaN values. This is particularly handy when developing
# complex machine learning pipelines.
# ```
# %%
from sklearn.model_selection import cross_validate
# solution
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
# %% [markdown] tags=["solution"]
# Using an arbitrary mapping from string labels to integers as done here causes
# the linear model to make bad assumptions on the relative ordering of
# categories.
#
# This prevents the model from learning anything predictive enough and the
# cross-validated score is even lower than the baseline we obtained by ignoring
# the input data and just constantly predicting the most frequent class:
# %% tags=["solution"]
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
# %% [markdown]
# Now, we would like to compare the generalization performance of our previous
# model with a new model where instead of using an `OrdinalEncoder`, we will
# use a `OneHotEncoder`. Repeat the model evaluation using cross-validation.
# Compare the score of both models and conclude on the impact of choosing a
# specific encoding strategy when using a linear model.
# %%
from sklearn.preprocessing import OneHotEncoder
# solution
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
# %% [markdown] tags=["solution"]
# With the linear classifier chosen, using an encoding that does not assume
# any ordering lead to much better result.
#
# The important message here is: linear model and `OrdinalEncoder` are used
# together only for ordinal categorical features, i.e. features that have a
# specific ordering. Otherwise, your model will perform poorly.
| 37.202797 | 123 | 0.755827 |
cessor is assembled in a pipeline with
# `LogisticRegression`. The generalization performance of the pipeline can be
# evaluated by cross-validation and then compared to the score obtained when
# using `OneHotEncoder` or to some other baseline score.
#
# First, we load the dataset.
# %%
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
# %%
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
# %% [markdown]
# In the previous notebook, we used `sklearn.compose.make_column_selector` to
# automatically select columns with a specific data type (also called `dtype`).
# Here, we will use this selector to get only the columns containing strings
# (column with `object` dtype) that correspond to categorical features in our
# dataset.
# %%
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
# %% [markdown]
# Define a scikit-learn pipeline composed of an `OrdinalEncoder` and a
# `LogisticRegression` classifier.
#
# Because `OrdinalEncoder` can raise errors if it sees an unknown category at
# prediction time, you can set the `handle_unknown="use_encoded_value"` and
# `unknown_value` parameters. You can refer to the
# [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html)
# for more details regarding these parameters.
# %%
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
# solution
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
# %% [markdown]
# Your model is now defined. Evaluate it using a cross-validation using
# `sklearn.model_selection.cross_validate`.
#
# ```{note}
# Be aware that if an error happened during the cross-validation,
# `cross_validate` will raise a warning and return NaN (Not a Number)
# as scores. To make it raise a standard Python exception with a traceback,
# you can pass the `error_score="raise"` argument in the call to
# `cross_validate`. An exception will be raised instead of a warning at the first
# encountered problem and `cross_validate` will stop right away instead of
# returning NaN values. This is particularly handy when developing
# complex machine learning pipelines.
# ```
# %%
from sklearn.model_selection import cross_validate
# solution
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
# %% [markdown] tags=["solution"]
# Using an arbitrary mapping from string labels to integers as done here causes
# the linear model to make bad assumptions on the relative ordering of
# categories.
#
# This prevents the model from learning anything predictive enough and the
# cross-validated score is even lower than the baseline we obtained by ignoring
# the input data and just constantly predicting the most frequent class:
# %% tags=["solution"]
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
# %% [markdown]
# Now, we would like to compare the generalization performance of our previous
# model with a new model where instead of using an `OrdinalEncoder`, we will
# use a `OneHotEncoder`. Repeat the model evaluation using cross-validation.
# Compare the score of both models and conclude on the impact of choosing a
# specific encoding strategy when using a linear model.
# %%
from sklearn.preprocessing import OneHotEncoder
# solution
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
# %% [markdown] tags=["solution"]
# With the linear classifier chosen, using an encoding that does not assume
# any ordering lead to much better result.
#
# The important message here is: linear model and `OrdinalEncoder` are used
# together only for ordinal categorical features, i.e. features that have a
# specific ordering. Otherwise, your model will perform poorly.
| true | true |
79012b6bcab788292571d51f675e4af415834ad9 | 2,816 | py | Python | data.py | IFFranciscoME/trading-project | 55c6ce679b6d859f80a4726266e0c4bc719755b5 | [
"MIT"
] | 1 | 2022-03-08T06:03:42.000Z | 2022-03-08T06:03:42.000Z | data.py | IFFranciscoME/trading-project | 55c6ce679b6d859f80a4726266e0c4bc719755b5 | [
"MIT"
] | null | null | null | data.py | IFFranciscoME/trading-project | 55c6ce679b6d859f80a4726266e0c4bc719755b5 | [
"MIT"
] | null | null | null |
# -- --------------------------------------------------------------------------------------------------- -- #
# -- project: A python project for algorithmic trading in FXCM -- #
# -- --------------------------------------------------------------------------------------------------- -- #
# -- script: requirements.txt : text file with the required libraries for the project -- #
# -- author: YOUR GITHUB USER NAME -- #
# -- license: MIT License -- #
# -- --------------------------------------------------------------------------------------------------- -- #
# -- Template repository: https://github.com/IFFranciscoME/trading-project -- #
# -- --------------------------------------------------------------------------------------------------- -- #
# -- Packages for the script
import fxcmpy
import pandas as pd
# -- --------------------------------------------------------------------------------------------------- -- #
# -- --------------------------------------------------------------------------------------------------- -- #
api_token = "ba432..." # This token is obtained in the fxcm trading station platform
con = fxcmpy.fxcmpy(access_token=api_token, server='demo', log_level='error', log_file='fxcm_logs.txt')
# -- --------------------------------------------------------------------------------------------------- -- #
# -- --------------------------------------------------------------------------------------------------- -- #
def fxcm_ohlc(p_instrument, p_period, p_ini, p_end):
"""
to download OHLC prices from FXCM broker
Parameters
----------
p_instrument: str
The name of the instrument according to fxcmpy
p_freq: str
The frequency or granularity of prices, according to fxcmpy
p_ini: str
Initial timestamp, in format "yyyy-mm-dd hh:mm:ss"
p_end: str
final timestamp, in format "yyyy-mm-dd hh:mm:ss"
Returns
-------
data_ohlc: DataFrame
with columns Open, High, Low, Close and Timestamp as index
"""
data_ohlc = con.get_candles(instrument=p_instrument, period=p_period,
start=p_ini, end=p_end)
data_ohlc['open'] = (data_ohlc['bidopen'] + data_ohlc['askopen'])*0.5
data_ohlc['high'] = (data_ohlc['bidhigh'] + data_ohlc['askhigh'])*0.5
data_ohlc['low'] = (data_ohlc['bidlow'] + data_ohlc['asklow'])*0.5
data_ohlc['close'] = (data_ohlc['bidclose'] + data_ohlc['askclose'])*0.5
data_ohlc = data_ohlc[['open', 'high', 'low', 'close']]
data_ohlc.index.name = 'timestamp'
return data_ohlc
| 44.698413 | 109 | 0.393111 |
import fxcmpy
import pandas as pd
api_token = "ba432..."
con = fxcmpy.fxcmpy(access_token=api_token, server='demo', log_level='error', log_file='fxcm_logs.txt')
def fxcm_ohlc(p_instrument, p_period, p_ini, p_end):
data_ohlc = con.get_candles(instrument=p_instrument, period=p_period,
start=p_ini, end=p_end)
data_ohlc['open'] = (data_ohlc['bidopen'] + data_ohlc['askopen'])*0.5
data_ohlc['high'] = (data_ohlc['bidhigh'] + data_ohlc['askhigh'])*0.5
data_ohlc['low'] = (data_ohlc['bidlow'] + data_ohlc['asklow'])*0.5
data_ohlc['close'] = (data_ohlc['bidclose'] + data_ohlc['askclose'])*0.5
data_ohlc = data_ohlc[['open', 'high', 'low', 'close']]
data_ohlc.index.name = 'timestamp'
return data_ohlc
| true | true |
79012c98f41ed59afab30e18ab63b304f177d16d | 22,952 | py | Python | egs/librispeech/ASR/conformer_mmi/decode.py | aarora8/icefall | 8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a | [
"Apache-2.0"
] | null | null | null | egs/librispeech/ASR/conformer_mmi/decode.py | aarora8/icefall | 8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a | [
"Apache-2.0"
] | null | null | null | egs/librispeech/ASR/conformer_mmi/decode.py | aarora8/icefall | 8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2021 Xiaomi Corporation (Author: Liyong Guo, Fangjun Kuang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import k2
import sentencepiece as spm
import torch
import torch.nn as nn
from asr_datamodule import LibriSpeechAsrDataModule
from conformer import Conformer
from icefall.bpe_graph_compiler import BpeCtcTrainingGraphCompiler
from icefall.checkpoint import average_checkpoints, load_checkpoint
from icefall.decode import (
get_lattice,
nbest_decoding,
nbest_oracle,
one_best_decoding,
rescore_with_attention_decoder,
rescore_with_n_best_list,
rescore_with_whole_lattice,
)
from icefall.lexicon import Lexicon
from icefall.utils import (
AttributeDict,
get_texts,
setup_logger,
store_transcripts,
str2bool,
write_error_stats,
)
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--epoch",
type=int,
default=34,
help="It specifies the checkpoint to use for decoding."
"Note: Epoch counts from 0.",
)
parser.add_argument(
"--avg",
type=int,
default=20,
help="Number of checkpoints to average. Automatically select "
"consecutive checkpoints before the checkpoint specified by "
"'--epoch'. ",
)
parser.add_argument(
"--method",
type=str,
default="attention-decoder",
help="""Decoding method.
Supported values are:
- (0) ctc-decoding. Use CTC decoding. It uses a sentence piece
model, i.e., lang_dir/bpe.model, to convert word pieces to words.
It needs neither a lexicon nor an n-gram LM.
- (1) 1best. Extract the best path from the decoding lattice as the
decoding result.
- (2) nbest. Extract n paths from the decoding lattice; the path
with the highest score is the decoding result.
- (3) nbest-rescoring. Extract n paths from the decoding lattice,
rescore them with an n-gram LM (e.g., a 4-gram LM), the path with
the highest score is the decoding result.
- (4) whole-lattice-rescoring. Rescore the decoding lattice with an
n-gram LM (e.g., a 4-gram LM), the best path of rescored lattice
is the decoding result.
- (5) attention-decoder. Extract n paths from the LM rescored
lattice, the path with the highest score is the decoding result.
- (6) nbest-oracle. Its WER is the lower bound of any n-best
rescoring method can achieve. Useful for debugging n-best
rescoring method.
""",
)
parser.add_argument(
"--num-paths",
type=int,
default=100,
help="""Number of paths for n-best based decoding method.
Used only when "method" is one of the following values:
nbest, nbest-rescoring, attention-decoder, and nbest-oracle
""",
)
parser.add_argument(
"--nbest-scale",
type=float,
default=0.5,
help="""The scale to be applied to `lattice.scores`.
It's needed if you use any kinds of n-best based rescoring.
Used only when "method" is one of the following values:
nbest, nbest-rescoring, attention-decoder, and nbest-oracle
A smaller value results in more unique paths.
""",
)
parser.add_argument(
"--export",
type=str2bool,
default=False,
help="""When enabled, the averaged model is saved to
conformer_ctc/exp/pretrained.pt. Note: only model.state_dict() is saved.
pretrained.pt contains a dict {"model": model.state_dict()},
which can be loaded by `icefall.checkpoint.load_checkpoint()`.
""",
)
parser.add_argument(
"--exp-dir",
type=str,
default="conformer_mmi/exp_500",
help="The experiment dir",
)
parser.add_argument(
"--lang-dir",
type=str,
default="data/lang_bpe_500",
help="The lang dir",
)
parser.add_argument(
"--num-decoder-layers",
type=int,
default=6,
help="Number of attention decoder layers",
)
return parser
def get_params() -> AttributeDict:
params = AttributeDict(
{
"lm_dir": Path("data/lm"),
# parameters for conformer
"subsampling_factor": 4,
"vgg_frontend": False,
"use_feat_batchnorm": True,
"feature_dim": 80,
"nhead": 8,
"attention_dim": 512,
# parameters for decoding
"search_beam": 20,
"output_beam": 8,
"min_active_states": 30,
"max_active_states": 10000,
"use_double_scores": True,
}
)
return params
def decode_one_batch(
params: AttributeDict,
model: nn.Module,
HLG: Optional[k2.Fsa],
H: Optional[k2.Fsa],
bpe_model: Optional[spm.SentencePieceProcessor],
batch: dict,
word_table: k2.SymbolTable,
sos_id: int,
eos_id: int,
G: Optional[k2.Fsa] = None,
) -> Dict[str, List[List[str]]]:
"""Decode one batch and return the result in a dict. The dict has the
following format:
- key: It indicates the setting used for decoding. For example,
if no rescoring is used, the key is the string `no_rescore`.
If LM rescoring is used, the key is the string `lm_scale_xxx`,
where `xxx` is the value of `lm_scale`. An example key is
`lm_scale_0.7`
- value: It contains the decoding result. `len(value)` equals to
batch size. `value[i]` is the decoding result for the i-th
utterance in the given batch.
Args:
params:
It's the return value of :func:`get_params`.
- params.method is "1best", it uses 1best decoding without LM rescoring.
- params.method is "nbest", it uses nbest decoding without LM rescoring.
- params.method is "nbest-rescoring", it uses nbest LM rescoring.
- params.method is "whole-lattice-rescoring", it uses whole lattice LM
rescoring.
model:
The neural model.
HLG:
The decoding graph. Used only when params.method is NOT ctc-decoding.
H:
The ctc topo. Used only when params.method is ctc-decoding.
bpe_model:
The BPE model. Used only when params.method is ctc-decoding.
batch:
It is the return value from iterating
`lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation
for the format of the `batch`.
word_table:
The word symbol table.
sos_id:
The token ID of the SOS.
eos_id:
The token ID of the EOS.
G:
An LM. It is not None when params.method is "nbest-rescoring"
or "whole-lattice-rescoring". In general, the G in HLG
is a 3-gram LM, while this G is a 4-gram LM.
Returns:
Return the decoding result. See above description for the format of
the returned dict.
"""
if HLG is not None:
device = HLG.device
else:
device = H.device
feature = batch["inputs"]
assert feature.ndim == 3
feature = feature.to(device)
# at entry, feature is (N, T, C)
supervisions = batch["supervisions"]
nnet_output, memory, memory_key_padding_mask = model(feature, supervisions)
# nnet_output is (N, T, C)
supervision_segments = torch.stack(
(
supervisions["sequence_idx"],
supervisions["start_frame"] // params.subsampling_factor,
supervisions["num_frames"] // params.subsampling_factor,
),
1,
).to(torch.int32)
if H is None:
assert HLG is not None
decoding_graph = HLG
else:
assert HLG is None
assert bpe_model is not None
decoding_graph = H
lattice = get_lattice(
nnet_output=nnet_output,
decoding_graph=decoding_graph,
supervision_segments=supervision_segments,
search_beam=params.search_beam,
output_beam=params.output_beam,
min_active_states=params.min_active_states,
max_active_states=params.max_active_states,
subsampling_factor=params.subsampling_factor,
)
if params.method == "ctc-decoding":
best_path = one_best_decoding(
lattice=lattice, use_double_scores=params.use_double_scores
)
# Note: `best_path.aux_labels` contains token IDs, not word IDs
# since we are using H, not HLG here.
#
# token_ids is a lit-of-list of IDs
token_ids = get_texts(best_path)
# hyps is a list of str, e.g., ['xxx yyy zzz', ...]
hyps = bpe_model.decode(token_ids)
# hyps is a list of list of str, e.g., [['xxx', 'yyy', 'zzz'], ... ]
hyps = [s.split() for s in hyps]
key = "ctc-decoding"
return {key: hyps}
if params.method == "nbest-oracle":
# Note: You can also pass rescored lattices to it.
# We choose the HLG decoded lattice for speed reasons
# as HLG decoding is faster and the oracle WER
# is only slightly worse than that of rescored lattices.
best_path = nbest_oracle(
lattice=lattice,
num_paths=params.num_paths,
ref_texts=supervisions["text"],
word_table=word_table,
nbest_scale=params.nbest_scale,
oov="<UNK>",
)
hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps]
key = f"oracle_{params.num_paths}_nbest_scale_{params.nbest_scale}" # noqa
return {key: hyps}
if params.method in ["1best", "nbest"]:
if params.method == "1best":
best_path = one_best_decoding(
lattice=lattice, use_double_scores=params.use_double_scores
)
key = "no_rescore"
else:
best_path = nbest_decoding(
lattice=lattice,
num_paths=params.num_paths,
use_double_scores=params.use_double_scores,
nbest_scale=params.nbest_scale,
)
key = f"no_rescore-nbest-scale-{params.nbest_scale}-{params.num_paths}" # noqa
hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps]
return {key: hyps}
assert params.method in [
"nbest-rescoring",
"whole-lattice-rescoring",
"attention-decoder",
]
lm_scale_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
lm_scale_list += [0.8, 0.9, 1.0, 1.1, 1.2, 1.3]
lm_scale_list += [1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]
if params.method == "nbest-rescoring":
best_path_dict = rescore_with_n_best_list(
lattice=lattice,
G=G,
num_paths=params.num_paths,
lm_scale_list=lm_scale_list,
nbest_scale=params.nbest_scale,
)
elif params.method == "whole-lattice-rescoring":
best_path_dict = rescore_with_whole_lattice(
lattice=lattice,
G_with_epsilon_loops=G,
lm_scale_list=lm_scale_list,
)
elif params.method == "attention-decoder":
# lattice uses a 3-gram Lm. We rescore it with a 4-gram LM.
rescored_lattice = rescore_with_whole_lattice(
lattice=lattice,
G_with_epsilon_loops=G,
lm_scale_list=None,
)
# TODO: pass `lattice` instead of `rescored_lattice` to
# `rescore_with_attention_decoder`
best_path_dict = rescore_with_attention_decoder(
lattice=rescored_lattice,
num_paths=params.num_paths,
model=model,
memory=memory,
memory_key_padding_mask=memory_key_padding_mask,
sos_id=sos_id,
eos_id=eos_id,
nbest_scale=params.nbest_scale,
)
else:
assert False, f"Unsupported decoding method: {params.method}"
ans = dict()
if best_path_dict is not None:
for lm_scale_str, best_path in best_path_dict.items():
hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps]
ans[lm_scale_str] = hyps
else:
for lm_scale in lm_scale_list:
ans["empty"] = [[] * lattice.shape[0]]
return ans
def decode_dataset(
dl: torch.utils.data.DataLoader,
params: AttributeDict,
model: nn.Module,
HLG: Optional[k2.Fsa],
H: Optional[k2.Fsa],
bpe_model: Optional[spm.SentencePieceProcessor],
word_table: k2.SymbolTable,
sos_id: int,
eos_id: int,
G: Optional[k2.Fsa] = None,
) -> Dict[str, List[Tuple[List[str], List[str]]]]:
"""Decode dataset.
Args:
dl:
PyTorch's dataloader containing the dataset to decode.
params:
It is returned by :func:`get_params`.
model:
The neural model.
HLG:
The decoding graph. Used only when params.method is NOT ctc-decoding.
H:
The ctc topo. Used only when params.method is ctc-decoding.
bpe_model:
The BPE model. Used only when params.method is ctc-decoding.
word_table:
It is the word symbol table.
sos_id:
The token ID for SOS.
eos_id:
The token ID for EOS.
G:
An LM. It is not None when params.method is "nbest-rescoring"
or "whole-lattice-rescoring". In general, the G in HLG
is a 3-gram LM, while this G is a 4-gram LM.
Returns:
Return a dict, whose key may be "no-rescore" if no LM rescoring
is used, or it may be "lm_scale_0.7" if LM rescoring is used.
Its value is a list of tuples. Each tuple contains two elements:
The first is the reference transcript, and the second is the
predicted result.
"""
results = []
num_cuts = 0
try:
num_batches = len(dl)
except TypeError:
num_batches = "?"
results = defaultdict(list)
for batch_idx, batch in enumerate(dl):
texts = batch["supervisions"]["text"]
hyps_dict = decode_one_batch(
params=params,
model=model,
HLG=HLG,
H=H,
bpe_model=bpe_model,
batch=batch,
word_table=word_table,
G=G,
sos_id=sos_id,
eos_id=eos_id,
)
for lm_scale, hyps in hyps_dict.items():
this_batch = []
assert len(hyps) == len(texts)
for hyp_words, ref_text in zip(hyps, texts):
ref_words = ref_text.split()
this_batch.append((ref_words, hyp_words))
results[lm_scale].extend(this_batch)
num_cuts += len(batch["supervisions"]["text"])
if batch_idx % 100 == 0:
batch_str = f"{batch_idx}/{num_batches}"
logging.info(
f"batch {batch_str}, cuts processed until now is {num_cuts}"
)
return results
def save_results(
params: AttributeDict,
test_set_name: str,
results_dict: Dict[str, List[Tuple[List[int], List[int]]]],
):
if params.method == "attention-decoder":
# Set it to False since there are too many logs.
enable_log = False
else:
enable_log = True
test_set_wers = dict()
for key, results in results_dict.items():
recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt"
store_transcripts(filename=recog_path, texts=results)
if enable_log:
logging.info(f"The transcripts are stored in {recog_path}")
# The following prints out WERs, per-word error statistics and aligned
# ref/hyp pairs.
errs_filename = params.exp_dir / f"errs-{test_set_name}-{key}.txt"
with open(errs_filename, "w") as f:
wer = write_error_stats(
f, f"{test_set_name}-{key}", results, enable_log=enable_log
)
test_set_wers[key] = wer
if enable_log:
logging.info(
"Wrote detailed error stats to {}".format(errs_filename)
)
test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1])
errs_info = params.exp_dir / f"wer-summary-{test_set_name}.txt"
with open(errs_info, "w") as f:
print("settings\tWER", file=f)
for key, val in test_set_wers:
print("{}\t{}".format(key, val), file=f)
s = "\nFor {}, WER of different settings are:\n".format(test_set_name)
note = "\tbest for {}".format(test_set_name)
for key, val in test_set_wers:
s += "{}\t{}{}\n".format(key, val, note)
note = ""
logging.info(s)
@torch.no_grad()
def main():
parser = get_parser()
LibriSpeechAsrDataModule.add_arguments(parser)
args = parser.parse_args()
args.exp_dir = Path(args.exp_dir)
args.lang_dir = Path(args.lang_dir)
params = get_params()
params.update(vars(args))
setup_logger(f"{params.exp_dir}/log-{params.method}/log-decode")
logging.info("Decoding started")
logging.info(params)
lexicon = Lexicon(params.lang_dir)
max_token_id = max(lexicon.tokens)
num_classes = max_token_id + 1 # +1 for the blank
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda", 0)
logging.info(f"device: {device}")
graph_compiler = BpeCtcTrainingGraphCompiler(
params.lang_dir,
device=device,
sos_token="<sos/eos>",
eos_token="<sos/eos>",
)
sos_id = graph_compiler.sos_id
eos_id = graph_compiler.eos_id
if params.method == "ctc-decoding":
HLG = None
H = k2.ctc_topo(
max_token=max_token_id,
modified=False,
device=device,
)
bpe_model = spm.SentencePieceProcessor()
bpe_model.load(str(params.lang_dir / "bpe.model"))
else:
H = None
bpe_model = None
HLG = k2.Fsa.from_dict(
torch.load(f"{params.lang_dir}/HLG.pt", map_location="cpu")
)
HLG = HLG.to(device)
assert HLG.requires_grad is False
if not hasattr(HLG, "lm_scores"):
HLG.lm_scores = HLG.scores.clone()
if params.method in (
"nbest-rescoring",
"whole-lattice-rescoring",
"attention-decoder",
):
if not (params.lm_dir / "G_4_gram.pt").is_file():
logging.info("Loading G_4_gram.fst.txt")
logging.warning("It may take 8 minutes.")
with open(params.lm_dir / "G_4_gram.fst.txt") as f:
first_word_disambig_id = lexicon.word_table["#0"]
G = k2.Fsa.from_openfst(f.read(), acceptor=False)
# G.aux_labels is not needed in later computations, so
# remove it here.
del G.aux_labels
# CAUTION: The following line is crucial.
# Arcs entering the back-off state have label equal to #0.
# We have to change it to 0 here.
G.labels[G.labels >= first_word_disambig_id] = 0
G = k2.Fsa.from_fsas([G]).to(device)
G = k2.arc_sort(G)
torch.save(G.as_dict(), params.lm_dir / "G_4_gram.pt")
else:
logging.info("Loading pre-compiled G_4_gram.pt")
d = torch.load(params.lm_dir / "G_4_gram.pt", map_location="cpu")
G = k2.Fsa.from_dict(d).to(device)
if params.method in ["whole-lattice-rescoring", "attention-decoder"]:
# Add epsilon self-loops to G as we will compose
# it with the whole lattice later
G = k2.add_epsilon_self_loops(G)
G = k2.arc_sort(G)
G = G.to(device)
# G.lm_scores is used to replace HLG.lm_scores during
# LM rescoring.
G.lm_scores = G.scores.clone()
else:
G = None
model = Conformer(
num_features=params.feature_dim,
nhead=params.nhead,
d_model=params.attention_dim,
num_classes=num_classes,
subsampling_factor=params.subsampling_factor,
num_decoder_layers=params.num_decoder_layers,
vgg_frontend=params.vgg_frontend,
use_feat_batchnorm=params.use_feat_batchnorm,
)
if params.avg == 1:
load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
else:
start = params.epoch - params.avg + 1
filenames = []
for i in range(start, params.epoch + 1):
if start >= 0:
filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
logging.info(f"averaging {filenames}")
model.load_state_dict(average_checkpoints(filenames))
if params.export:
logging.info(f"Export averaged model to {params.exp_dir}/pretrained.pt")
torch.save(
{"model": model.state_dict()}, f"{params.exp_dir}/pretrained.pt"
)
return
model.to(device)
model.eval()
num_param = sum([p.numel() for p in model.parameters()])
logging.info(f"Number of model parameters: {num_param}")
librispeech = LibriSpeechAsrDataModule(args)
# CAUTION: `test_sets` is for displaying only.
# If you want to skip test-clean, you have to skip
# it inside the for loop. That is, use
#
# if test_set == 'test-clean': continue
#
test_sets = ["test-clean", "test-other"]
for test_set, test_dl in zip(test_sets, librispeech.test_dataloaders()):
results_dict = decode_dataset(
dl=test_dl,
params=params,
model=model,
HLG=HLG,
H=H,
bpe_model=bpe_model,
word_table=lexicon.word_table,
G=G,
sos_id=sos_id,
eos_id=eos_id,
)
save_results(
params=params, test_set_name=test_set, results_dict=results_dict
)
logging.info("Done!")
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
if __name__ == "__main__":
main()
| 33.02446 | 91 | 0.605132 |
import argparse
import logging
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import k2
import sentencepiece as spm
import torch
import torch.nn as nn
from asr_datamodule import LibriSpeechAsrDataModule
from conformer import Conformer
from icefall.bpe_graph_compiler import BpeCtcTrainingGraphCompiler
from icefall.checkpoint import average_checkpoints, load_checkpoint
from icefall.decode import (
get_lattice,
nbest_decoding,
nbest_oracle,
one_best_decoding,
rescore_with_attention_decoder,
rescore_with_n_best_list,
rescore_with_whole_lattice,
)
from icefall.lexicon import Lexicon
from icefall.utils import (
AttributeDict,
get_texts,
setup_logger,
store_transcripts,
str2bool,
write_error_stats,
)
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--epoch",
type=int,
default=34,
help="It specifies the checkpoint to use for decoding."
"Note: Epoch counts from 0.",
)
parser.add_argument(
"--avg",
type=int,
default=20,
help="Number of checkpoints to average. Automatically select "
"consecutive checkpoints before the checkpoint specified by "
"'--epoch'. ",
)
parser.add_argument(
"--method",
type=str,
default="attention-decoder",
help="""Decoding method.
Supported values are:
- (0) ctc-decoding. Use CTC decoding. It uses a sentence piece
model, i.e., lang_dir/bpe.model, to convert word pieces to words.
It needs neither a lexicon nor an n-gram LM.
- (1) 1best. Extract the best path from the decoding lattice as the
decoding result.
- (2) nbest. Extract n paths from the decoding lattice; the path
with the highest score is the decoding result.
- (3) nbest-rescoring. Extract n paths from the decoding lattice,
rescore them with an n-gram LM (e.g., a 4-gram LM), the path with
the highest score is the decoding result.
- (4) whole-lattice-rescoring. Rescore the decoding lattice with an
n-gram LM (e.g., a 4-gram LM), the best path of rescored lattice
is the decoding result.
- (5) attention-decoder. Extract n paths from the LM rescored
lattice, the path with the highest score is the decoding result.
- (6) nbest-oracle. Its WER is the lower bound of any n-best
rescoring method can achieve. Useful for debugging n-best
rescoring method.
""",
)
parser.add_argument(
"--num-paths",
type=int,
default=100,
help="""Number of paths for n-best based decoding method.
Used only when "method" is one of the following values:
nbest, nbest-rescoring, attention-decoder, and nbest-oracle
""",
)
parser.add_argument(
"--nbest-scale",
type=float,
default=0.5,
help="""The scale to be applied to `lattice.scores`.
It's needed if you use any kinds of n-best based rescoring.
Used only when "method" is one of the following values:
nbest, nbest-rescoring, attention-decoder, and nbest-oracle
A smaller value results in more unique paths.
""",
)
parser.add_argument(
"--export",
type=str2bool,
default=False,
help="""When enabled, the averaged model is saved to
conformer_ctc/exp/pretrained.pt. Note: only model.state_dict() is saved.
pretrained.pt contains a dict {"model": model.state_dict()},
which can be loaded by `icefall.checkpoint.load_checkpoint()`.
""",
)
parser.add_argument(
"--exp-dir",
type=str,
default="conformer_mmi/exp_500",
help="The experiment dir",
)
parser.add_argument(
"--lang-dir",
type=str,
default="data/lang_bpe_500",
help="The lang dir",
)
parser.add_argument(
"--num-decoder-layers",
type=int,
default=6,
help="Number of attention decoder layers",
)
return parser
def get_params() -> AttributeDict:
params = AttributeDict(
{
"lm_dir": Path("data/lm"),
# parameters for conformer
"subsampling_factor": 4,
"vgg_frontend": False,
"use_feat_batchnorm": True,
"feature_dim": 80,
"nhead": 8,
"attention_dim": 512,
# parameters for decoding
"search_beam": 20,
"output_beam": 8,
"min_active_states": 30,
"max_active_states": 10000,
"use_double_scores": True,
}
)
return params
def decode_one_batch(
params: AttributeDict,
model: nn.Module,
HLG: Optional[k2.Fsa],
H: Optional[k2.Fsa],
bpe_model: Optional[spm.SentencePieceProcessor],
batch: dict,
word_table: k2.SymbolTable,
sos_id: int,
eos_id: int,
G: Optional[k2.Fsa] = None,
) -> Dict[str, List[List[str]]]:
if HLG is not None:
device = HLG.device
else:
device = H.device
feature = batch["inputs"]
assert feature.ndim == 3
feature = feature.to(device)
# at entry, feature is (N, T, C)
supervisions = batch["supervisions"]
nnet_output, memory, memory_key_padding_mask = model(feature, supervisions)
# nnet_output is (N, T, C)
supervision_segments = torch.stack(
(
supervisions["sequence_idx"],
supervisions["start_frame"] // params.subsampling_factor,
supervisions["num_frames"] // params.subsampling_factor,
),
1,
).to(torch.int32)
if H is None:
assert HLG is not None
decoding_graph = HLG
else:
assert HLG is None
assert bpe_model is not None
decoding_graph = H
lattice = get_lattice(
nnet_output=nnet_output,
decoding_graph=decoding_graph,
supervision_segments=supervision_segments,
search_beam=params.search_beam,
output_beam=params.output_beam,
min_active_states=params.min_active_states,
max_active_states=params.max_active_states,
subsampling_factor=params.subsampling_factor,
)
if params.method == "ctc-decoding":
best_path = one_best_decoding(
lattice=lattice, use_double_scores=params.use_double_scores
)
# Note: `best_path.aux_labels` contains token IDs, not word IDs
# since we are using H, not HLG here.
#
# token_ids is a lit-of-list of IDs
token_ids = get_texts(best_path)
# hyps is a list of str, e.g., ['xxx yyy zzz', ...]
hyps = bpe_model.decode(token_ids)
# hyps is a list of list of str, e.g., [['xxx', 'yyy', 'zzz'], ... ]
hyps = [s.split() for s in hyps]
key = "ctc-decoding"
return {key: hyps}
if params.method == "nbest-oracle":
# Note: You can also pass rescored lattices to it.
# We choose the HLG decoded lattice for speed reasons
# as HLG decoding is faster and the oracle WER
# is only slightly worse than that of rescored lattices.
best_path = nbest_oracle(
lattice=lattice,
num_paths=params.num_paths,
ref_texts=supervisions["text"],
word_table=word_table,
nbest_scale=params.nbest_scale,
oov="<UNK>",
)
hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps]
key = f"oracle_{params.num_paths}_nbest_scale_{params.nbest_scale}" # noqa
return {key: hyps}
if params.method in ["1best", "nbest"]:
if params.method == "1best":
best_path = one_best_decoding(
lattice=lattice, use_double_scores=params.use_double_scores
)
key = "no_rescore"
else:
best_path = nbest_decoding(
lattice=lattice,
num_paths=params.num_paths,
use_double_scores=params.use_double_scores,
nbest_scale=params.nbest_scale,
)
key = f"no_rescore-nbest-scale-{params.nbest_scale}-{params.num_paths}" # noqa
hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps]
return {key: hyps}
assert params.method in [
"nbest-rescoring",
"whole-lattice-rescoring",
"attention-decoder",
]
lm_scale_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
lm_scale_list += [0.8, 0.9, 1.0, 1.1, 1.2, 1.3]
lm_scale_list += [1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]
if params.method == "nbest-rescoring":
best_path_dict = rescore_with_n_best_list(
lattice=lattice,
G=G,
num_paths=params.num_paths,
lm_scale_list=lm_scale_list,
nbest_scale=params.nbest_scale,
)
elif params.method == "whole-lattice-rescoring":
best_path_dict = rescore_with_whole_lattice(
lattice=lattice,
G_with_epsilon_loops=G,
lm_scale_list=lm_scale_list,
)
elif params.method == "attention-decoder":
# lattice uses a 3-gram Lm. We rescore it with a 4-gram LM.
rescored_lattice = rescore_with_whole_lattice(
lattice=lattice,
G_with_epsilon_loops=G,
lm_scale_list=None,
)
# TODO: pass `lattice` instead of `rescored_lattice` to
# `rescore_with_attention_decoder`
best_path_dict = rescore_with_attention_decoder(
lattice=rescored_lattice,
num_paths=params.num_paths,
model=model,
memory=memory,
memory_key_padding_mask=memory_key_padding_mask,
sos_id=sos_id,
eos_id=eos_id,
nbest_scale=params.nbest_scale,
)
else:
assert False, f"Unsupported decoding method: {params.method}"
ans = dict()
if best_path_dict is not None:
for lm_scale_str, best_path in best_path_dict.items():
hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps]
ans[lm_scale_str] = hyps
else:
for lm_scale in lm_scale_list:
ans["empty"] = [[] * lattice.shape[0]]
return ans
def decode_dataset(
dl: torch.utils.data.DataLoader,
params: AttributeDict,
model: nn.Module,
HLG: Optional[k2.Fsa],
H: Optional[k2.Fsa],
bpe_model: Optional[spm.SentencePieceProcessor],
word_table: k2.SymbolTable,
sos_id: int,
eos_id: int,
G: Optional[k2.Fsa] = None,
) -> Dict[str, List[Tuple[List[str], List[str]]]]:
results = []
num_cuts = 0
try:
num_batches = len(dl)
except TypeError:
num_batches = "?"
results = defaultdict(list)
for batch_idx, batch in enumerate(dl):
texts = batch["supervisions"]["text"]
hyps_dict = decode_one_batch(
params=params,
model=model,
HLG=HLG,
H=H,
bpe_model=bpe_model,
batch=batch,
word_table=word_table,
G=G,
sos_id=sos_id,
eos_id=eos_id,
)
for lm_scale, hyps in hyps_dict.items():
this_batch = []
assert len(hyps) == len(texts)
for hyp_words, ref_text in zip(hyps, texts):
ref_words = ref_text.split()
this_batch.append((ref_words, hyp_words))
results[lm_scale].extend(this_batch)
num_cuts += len(batch["supervisions"]["text"])
if batch_idx % 100 == 0:
batch_str = f"{batch_idx}/{num_batches}"
logging.info(
f"batch {batch_str}, cuts processed until now is {num_cuts}"
)
return results
def save_results(
params: AttributeDict,
test_set_name: str,
results_dict: Dict[str, List[Tuple[List[int], List[int]]]],
):
if params.method == "attention-decoder":
# Set it to False since there are too many logs.
enable_log = False
else:
enable_log = True
test_set_wers = dict()
for key, results in results_dict.items():
recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt"
store_transcripts(filename=recog_path, texts=results)
if enable_log:
logging.info(f"The transcripts are stored in {recog_path}")
# The following prints out WERs, per-word error statistics and aligned
# ref/hyp pairs.
errs_filename = params.exp_dir / f"errs-{test_set_name}-{key}.txt"
with open(errs_filename, "w") as f:
wer = write_error_stats(
f, f"{test_set_name}-{key}", results, enable_log=enable_log
)
test_set_wers[key] = wer
if enable_log:
logging.info(
"Wrote detailed error stats to {}".format(errs_filename)
)
test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1])
errs_info = params.exp_dir / f"wer-summary-{test_set_name}.txt"
with open(errs_info, "w") as f:
print("settings\tWER", file=f)
for key, val in test_set_wers:
print("{}\t{}".format(key, val), file=f)
s = "\nFor {}, WER of different settings are:\n".format(test_set_name)
note = "\tbest for {}".format(test_set_name)
for key, val in test_set_wers:
s += "{}\t{}{}\n".format(key, val, note)
note = ""
logging.info(s)
@torch.no_grad()
def main():
parser = get_parser()
LibriSpeechAsrDataModule.add_arguments(parser)
args = parser.parse_args()
args.exp_dir = Path(args.exp_dir)
args.lang_dir = Path(args.lang_dir)
params = get_params()
params.update(vars(args))
setup_logger(f"{params.exp_dir}/log-{params.method}/log-decode")
logging.info("Decoding started")
logging.info(params)
lexicon = Lexicon(params.lang_dir)
max_token_id = max(lexicon.tokens)
num_classes = max_token_id + 1 # +1 for the blank
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda", 0)
logging.info(f"device: {device}")
graph_compiler = BpeCtcTrainingGraphCompiler(
params.lang_dir,
device=device,
sos_token="<sos/eos>",
eos_token="<sos/eos>",
)
sos_id = graph_compiler.sos_id
eos_id = graph_compiler.eos_id
if params.method == "ctc-decoding":
HLG = None
H = k2.ctc_topo(
max_token=max_token_id,
modified=False,
device=device,
)
bpe_model = spm.SentencePieceProcessor()
bpe_model.load(str(params.lang_dir / "bpe.model"))
else:
H = None
bpe_model = None
HLG = k2.Fsa.from_dict(
torch.load(f"{params.lang_dir}/HLG.pt", map_location="cpu")
)
HLG = HLG.to(device)
assert HLG.requires_grad is False
if not hasattr(HLG, "lm_scores"):
HLG.lm_scores = HLG.scores.clone()
if params.method in (
"nbest-rescoring",
"whole-lattice-rescoring",
"attention-decoder",
):
if not (params.lm_dir / "G_4_gram.pt").is_file():
logging.info("Loading G_4_gram.fst.txt")
logging.warning("It may take 8 minutes.")
with open(params.lm_dir / "G_4_gram.fst.txt") as f:
first_word_disambig_id = lexicon.word_table["#0"]
G = k2.Fsa.from_openfst(f.read(), acceptor=False)
# G.aux_labels is not needed in later computations, so
# remove it here.
del G.aux_labels
# CAUTION: The following line is crucial.
# Arcs entering the back-off state have label equal to #0.
# We have to change it to 0 here.
G.labels[G.labels >= first_word_disambig_id] = 0
G = k2.Fsa.from_fsas([G]).to(device)
G = k2.arc_sort(G)
torch.save(G.as_dict(), params.lm_dir / "G_4_gram.pt")
else:
logging.info("Loading pre-compiled G_4_gram.pt")
d = torch.load(params.lm_dir / "G_4_gram.pt", map_location="cpu")
G = k2.Fsa.from_dict(d).to(device)
if params.method in ["whole-lattice-rescoring", "attention-decoder"]:
# Add epsilon self-loops to G as we will compose
# it with the whole lattice later
G = k2.add_epsilon_self_loops(G)
G = k2.arc_sort(G)
G = G.to(device)
# G.lm_scores is used to replace HLG.lm_scores during
# LM rescoring.
G.lm_scores = G.scores.clone()
else:
G = None
model = Conformer(
num_features=params.feature_dim,
nhead=params.nhead,
d_model=params.attention_dim,
num_classes=num_classes,
subsampling_factor=params.subsampling_factor,
num_decoder_layers=params.num_decoder_layers,
vgg_frontend=params.vgg_frontend,
use_feat_batchnorm=params.use_feat_batchnorm,
)
if params.avg == 1:
load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
else:
start = params.epoch - params.avg + 1
filenames = []
for i in range(start, params.epoch + 1):
if start >= 0:
filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
logging.info(f"averaging {filenames}")
model.load_state_dict(average_checkpoints(filenames))
if params.export:
logging.info(f"Export averaged model to {params.exp_dir}/pretrained.pt")
torch.save(
{"model": model.state_dict()}, f"{params.exp_dir}/pretrained.pt"
)
return
model.to(device)
model.eval()
num_param = sum([p.numel() for p in model.parameters()])
logging.info(f"Number of model parameters: {num_param}")
librispeech = LibriSpeechAsrDataModule(args)
# CAUTION: `test_sets` is for displaying only.
# If you want to skip test-clean, you have to skip
# it inside the for loop. That is, use
#
# if test_set == 'test-clean': continue
#
test_sets = ["test-clean", "test-other"]
for test_set, test_dl in zip(test_sets, librispeech.test_dataloaders()):
results_dict = decode_dataset(
dl=test_dl,
params=params,
model=model,
HLG=HLG,
H=H,
bpe_model=bpe_model,
word_table=lexicon.word_table,
G=G,
sos_id=sos_id,
eos_id=eos_id,
)
save_results(
params=params, test_set_name=test_set, results_dict=results_dict
)
logging.info("Done!")
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
if __name__ == "__main__":
main()
| true | true |
79012e1fe05f97211877e7f016653b49e98150cb | 5,754 | py | Python | src/index_runner/es_indexers/sample_set.py | slebras/index_runner | 997374fdf47c58b001e1fd844b4af27f6af3d76c | [
"MIT"
] | null | null | null | src/index_runner/es_indexers/sample_set.py | slebras/index_runner | 997374fdf47c58b001e1fd844b4af27f6af3d76c | [
"MIT"
] | null | null | null | src/index_runner/es_indexers/sample_set.py | slebras/index_runner | 997374fdf47c58b001e1fd844b4af27f6af3d76c | [
"MIT"
] | null | null | null | from src.utils.config import config
import json
# import uuid
import requests
_NAMESPACE = "WS"
_VER_NAMESPACE = "WSVER"
_SAMPLE_NAMESPACE = "SMP"
# versioned and non-versioned index have same version
_SAMPLE_SET_INDEX_VERSION = 1
_SAMPLE_SET_INDEX_NAME = 'sample_set_' + str(_SAMPLE_SET_INDEX_VERSION)
_VER_SAMPLE_SET_INDEX_NAME = 'sample_set_version_' + str(_SAMPLE_SET_INDEX_VERSION)
# versioned and non-versioned index have same version
_SAMPLE_INDEX_VERSION = 1
_SAMPLE_INDEX_NAME = 'sample_' + str(_SAMPLE_INDEX_VERSION)
# _VER_SAMPLE_INDEX_NAME = 'sample_version_' + str(_SAMPLE_INDEX_VERSION)
def _get_sample(sample_info):
""" Get sample from SampleService
sample_info - dict containing 'id' and 'version' of a sample
"""
headers = {"Authorization": config()['ws_token']}
params = {
"id": sample_info['id']
}
if sample_info.get('version'):
params['version'] = sample_info['version']
payload = {
"method": "SampleService.get_sample",
"id": "", # str(uuid.uuid4()),
"params": [params],
"version": "1.1"
}
resp = requests.post(url=config()['sample_service_url'], headers=headers, data=json.dumps(payload))
if not resp.ok:
raise RuntimeError(f"Returned from sample service with status {resp.status_code} - {resp.text}")
resp_json = resp.json()
if resp_json.get('error'):
raise RuntimeError(f"Error from SampleService - {resp_json['error']}")
sample = resp_json['result'][0]
return sample
def _flatten_meta(meta, prefix=None):
""" Flattens metadata fields in a Sample object. Fields are concatenated into a
single string field to save into an Elasticsearch index
meta - Sample Metadata to be flattened
prefix - (optional) prefix for the metadata values. default=None
"""
new_meta = {}
for key in meta:
if prefix:
val = prefix + ":"
else:
val = ""
if "value" in meta[key]:
val += str(meta[key]['value'])
if "units" in meta[key]:
val += ";" + str(meta[key]['units'])
new_meta[key] = val
return new_meta
def _combine_meta(meta, flattened_meta, idx):
""" Combine newly flattened metadata with existing metadata. This Function is designed to keep the indexing
of the different metadata fields consistent for each node within the sample node tree s.t. all the
fields in index (idx) 0 will be from item 0 in the node tree. Empty string ("") entries are Empty and
added simply so that the indexing of all fields line up.
meta - existing metadata.
flattened_meta - newly flattened metadata.
idx - current index of ndoe_tree.
"""
for key in flattened_meta:
if key in meta:
meta[key] += ["" for _ in range(idx - len(meta[key]))] + [flattened_meta[key]]
else:
meta[key] = ["" for _ in range(idx)] + [flattened_meta[key]]
return meta
def index_sample_set(obj_data, ws_info, obj_data_v1):
"""Indexer for KBaseSets.SampleSet object type"""
info = obj_data['info']
if not obj_data.get('data'):
raise Exception("no data in object")
data = obj_data['data']
workspace_id = info[6]
object_id = info[0]
version = info[4]
sample_set_id = f"{_NAMESPACE}::{workspace_id}:{object_id}"
ver_sample_set_id = f"{_VER_NAMESPACE}::{workspace_id}:{object_id}:{version}"
sample_set_index = {
"_action": "index",
"doc": {
"description": data["description"],
"sample_ids": [s['id'] for s in data['samples']],
"sample_names": [s['name'] for s in data['samples']],
"sample_versions": [s['version'] for s in data['samples']]
},
"index": _SAMPLE_SET_INDEX_NAME,
"id": sample_set_id
}
yield sample_set_index
ver_sample_set_index = dict(sample_set_index)
ver_sample_set_index['index'] = _VER_SAMPLE_SET_INDEX_NAME
ver_sample_set_index['id'] = ver_sample_set_id
yield ver_sample_set_index
for samp in data["samples"]:
# query the sample service for sample
sample = _get_sample(samp)
sample_id = f"{_SAMPLE_NAMESPACE}::{sample['id']}:{sample['version']}"
# not sure on how we need to handle more than 1 node.
if len(sample['node_tree']) == 1:
meta_controlled = _flatten_meta(
sample['node_tree'][0]['meta_controlled']
)
meta_user = _flatten_meta(
sample['node_tree'][0]['meta_user']
)
meta_controlled['node_id'] = sample['node_tree'][0]['id']
else:
meta_controlled, meta_user = {}, {}
for idx, node in enumerate(sample['node_tree']):
meta_controlled = _combine_meta(
meta_controlled,
_flatten_meta(
node['meta_controlled']
),
idx
)
meta_user = _combine_meta(
meta_user,
_flatten_meta(
node['meta_user']
),
idx
)
meta_controlled['node_id'] = node['id']
sample_index = {
"_action": "index",
"doc": {
"save_date": sample['save_date'],
"sample_version": sample['version'],
"name": sample['name'],
"parent_id": sample_set_id,
**meta_user,
**meta_controlled
},
"index": _SAMPLE_INDEX_NAME,
"id": sample_id
}
yield sample_index
| 36.417722 | 111 | 0.590024 | from src.utils.config import config
import json
import requests
_NAMESPACE = "WS"
_VER_NAMESPACE = "WSVER"
_SAMPLE_NAMESPACE = "SMP"
_SAMPLE_SET_INDEX_VERSION = 1
_SAMPLE_SET_INDEX_NAME = 'sample_set_' + str(_SAMPLE_SET_INDEX_VERSION)
_VER_SAMPLE_SET_INDEX_NAME = 'sample_set_version_' + str(_SAMPLE_SET_INDEX_VERSION)
_SAMPLE_INDEX_VERSION = 1
_SAMPLE_INDEX_NAME = 'sample_' + str(_SAMPLE_INDEX_VERSION)
def _get_sample(sample_info):
headers = {"Authorization": config()['ws_token']}
params = {
"id": sample_info['id']
}
if sample_info.get('version'):
params['version'] = sample_info['version']
payload = {
"method": "SampleService.get_sample",
"id": "",
"params": [params],
"version": "1.1"
}
resp = requests.post(url=config()['sample_service_url'], headers=headers, data=json.dumps(payload))
if not resp.ok:
raise RuntimeError(f"Returned from sample service with status {resp.status_code} - {resp.text}")
resp_json = resp.json()
if resp_json.get('error'):
raise RuntimeError(f"Error from SampleService - {resp_json['error']}")
sample = resp_json['result'][0]
return sample
def _flatten_meta(meta, prefix=None):
new_meta = {}
for key in meta:
if prefix:
val = prefix + ":"
else:
val = ""
if "value" in meta[key]:
val += str(meta[key]['value'])
if "units" in meta[key]:
val += ";" + str(meta[key]['units'])
new_meta[key] = val
return new_meta
def _combine_meta(meta, flattened_meta, idx):
for key in flattened_meta:
if key in meta:
meta[key] += ["" for _ in range(idx - len(meta[key]))] + [flattened_meta[key]]
else:
meta[key] = ["" for _ in range(idx)] + [flattened_meta[key]]
return meta
def index_sample_set(obj_data, ws_info, obj_data_v1):
info = obj_data['info']
if not obj_data.get('data'):
raise Exception("no data in object")
data = obj_data['data']
workspace_id = info[6]
object_id = info[0]
version = info[4]
sample_set_id = f"{_NAMESPACE}::{workspace_id}:{object_id}"
ver_sample_set_id = f"{_VER_NAMESPACE}::{workspace_id}:{object_id}:{version}"
sample_set_index = {
"_action": "index",
"doc": {
"description": data["description"],
"sample_ids": [s['id'] for s in data['samples']],
"sample_names": [s['name'] for s in data['samples']],
"sample_versions": [s['version'] for s in data['samples']]
},
"index": _SAMPLE_SET_INDEX_NAME,
"id": sample_set_id
}
yield sample_set_index
ver_sample_set_index = dict(sample_set_index)
ver_sample_set_index['index'] = _VER_SAMPLE_SET_INDEX_NAME
ver_sample_set_index['id'] = ver_sample_set_id
yield ver_sample_set_index
for samp in data["samples"]:
sample = _get_sample(samp)
sample_id = f"{_SAMPLE_NAMESPACE}::{sample['id']}:{sample['version']}"
if len(sample['node_tree']) == 1:
meta_controlled = _flatten_meta(
sample['node_tree'][0]['meta_controlled']
)
meta_user = _flatten_meta(
sample['node_tree'][0]['meta_user']
)
meta_controlled['node_id'] = sample['node_tree'][0]['id']
else:
meta_controlled, meta_user = {}, {}
for idx, node in enumerate(sample['node_tree']):
meta_controlled = _combine_meta(
meta_controlled,
_flatten_meta(
node['meta_controlled']
),
idx
)
meta_user = _combine_meta(
meta_user,
_flatten_meta(
node['meta_user']
),
idx
)
meta_controlled['node_id'] = node['id']
sample_index = {
"_action": "index",
"doc": {
"save_date": sample['save_date'],
"sample_version": sample['version'],
"name": sample['name'],
"parent_id": sample_set_id,
**meta_user,
**meta_controlled
},
"index": _SAMPLE_INDEX_NAME,
"id": sample_id
}
yield sample_index
| true | true |
79012e8ba9e024bf36080d05dc89f82953d35edf | 1,068 | py | Python | oct/util/playbook.py | staebler/origin-ci-tool | 2cb86c3cad7a37450e711571ac75997118c899e5 | [
"Apache-2.0"
] | 23 | 2017-01-06T21:32:09.000Z | 2022-03-14T17:14:49.000Z | oct/util/playbook.py | staebler/origin-ci-tool | 2cb86c3cad7a37450e711571ac75997118c899e5 | [
"Apache-2.0"
] | 129 | 2017-01-06T18:29:51.000Z | 2022-01-27T17:37:21.000Z | oct/util/playbook.py | staebler/origin-ci-tool | 2cb86c3cad7a37450e711571ac75997118c899e5 | [
"Apache-2.0"
] | 52 | 2017-01-06T16:03:49.000Z | 2022-01-24T18:58:58.000Z | # coding=utf-8
"""
A utility module for working with playbooks in the `origin-ci-tool` repository.
"""
from __future__ import absolute_import, division, print_function
from os.path import abspath, dirname, exists, join
from click import ClickException
def playbook_path(playbook_name):
"""
Get the path to the named playbook. To allow for
as much brevity as possible in the given playbook
name, we will attempt to search under:
- oct/playbooks
- openshift-ansible/playbooks
:param playbook_name: the name of the playbook
:type playbook_name: str
:return: the path to the playbook
:rtype: str
:raises ClickException: when no playbook is found
"""
from ..oct import __file__ as root_path
for parent_repo in ['oct', 'openshift-ansible']:
playbook_file = join(abspath(dirname(root_path)), 'ansible', parent_repo, 'playbooks', playbook_name + '.yml')
if exists(playbook_file):
return playbook_file
raise ClickException('No playbook named {} found!'.format(playbook_name))
| 29.666667 | 118 | 0.707865 |
from __future__ import absolute_import, division, print_function
from os.path import abspath, dirname, exists, join
from click import ClickException
def playbook_path(playbook_name):
from ..oct import __file__ as root_path
for parent_repo in ['oct', 'openshift-ansible']:
playbook_file = join(abspath(dirname(root_path)), 'ansible', parent_repo, 'playbooks', playbook_name + '.yml')
if exists(playbook_file):
return playbook_file
raise ClickException('No playbook named {} found!'.format(playbook_name))
| true | true |
79012f8da0a0040279480db03d5e4b0738c17c2c | 3,354 | py | Python | pridcon/utils.py | Mirindi95/PrIDcon | 2deaef197727e62f8a56c2ba0ebfb9594e55ad12 | [
"MIT"
] | 1 | 2021-02-11T10:46:23.000Z | 2021-02-11T10:46:23.000Z | pridcon/utils.py | Mirindi95/PrIDcon | 2deaef197727e62f8a56c2ba0ebfb9594e55ad12 | [
"MIT"
] | null | null | null | pridcon/utils.py | Mirindi95/PrIDcon | 2deaef197727e62f8a56c2ba0ebfb9594e55ad12 | [
"MIT"
] | null | null | null | def read_fasta(filename):
"""Returns a list of tuples of each header and sequence in a fasta (or multifasta) file.
first element in tuple is header and second the sequence.
Key Arguments:
filename -- fasta file.
"""
tmp_seq = None
seqs_list = []
with open(filename, 'r') as fasta_file:
for line in fasta_file:
line = line.replace('\n','')
if '>' in line:
if tmp_seq != None:
seqs_list.append((hd, tmp_seq))
tmp_seq = ''
hd = line.replace('>','')
else:
tmp_seq += line
seqs_list.append((hd, tmp_seq))
try:
assert len(seqs_list) > 0
except AssertionError:
print('The selected file is not a Fasta file.')
else:
return seqs_list
def write_fasta(outfile, seq_dict):
"""Writes fasta with dictionary where keys are headers and values sequences.
Key Arguments:
outfile.
"""
step = 70
with open(outfile, 'w') as file:
for header, sequence in seq_dict.items():
sequence_list = [sequence[i - step: i] for i in range(step, len(sequence) + 1, step)]
last = sequence[step * (len(sequence) // step):]
if last != '':
sequence_list.append(last)
sequence = '\n'.join(sequence_list)
file.write('>' + header + '\n' + sequence + '\n')
def reads_generator(fasta_file, read_length, k):
"""This function simulates the reads generation from a fasta file with a coverage not less than 50.
It will return a list of tuples. First element in tuple is read ID and second the sequence.
Key Arguments:
fasta_file -- fasta file.
read_length -- size of reads.
"""
reads_list = []
overlap = k - 1
input_header, input_seq = read_fasta(fasta_file)[0]
n = len(input_seq)
for i in range(0, n - overlap, read_length - overlap):
read_seq = input_seq[i: i + read_length]
reads_list.append(read_seq)
return [('{}_{}'.format(input_header, i), read) for i, read in enumerate(reads_list)]
def write_fastq(reads_list, filename):
"""This function created a FASTQ file from a list of read generated by the reads_generator function.
Key Arguments:
reads_list -- list of reads generated with reads_generator.
filename -- name of output file WITH EXTENSION.
"""
with open(filename, 'w') as fastq_file:
for read_id, read in reads_list:
fastq_file.write('@{}\n'.format(read_id))
fastq_file.write(read + '\n')
fastq_file.write('+\n')
fastq_file.write('I' * len(read) + '\n') # max possible score
def read_fastq(filename):
"""This function reads a FASTQ file storing the read and its ID in a dictionary where keys are IDs and read value.
This function does not consider + and score lines.
Key Arguments:
filename -- name of FASTQ input file.
"""
reads_dict = dict()
with open(filename, 'r') as fastq_file:
for line in fastq_file:
if '@' in line:
reads_dict[line[1:].replace('\n', '')] = next(
fastq_file).replace('\n', '')
next(fastq_file)
next(fastq_file)
return reads_dict
| 37.266667 | 118 | 0.589147 | def read_fasta(filename):
tmp_seq = None
seqs_list = []
with open(filename, 'r') as fasta_file:
for line in fasta_file:
line = line.replace('\n','')
if '>' in line:
if tmp_seq != None:
seqs_list.append((hd, tmp_seq))
tmp_seq = ''
hd = line.replace('>','')
else:
tmp_seq += line
seqs_list.append((hd, tmp_seq))
try:
assert len(seqs_list) > 0
except AssertionError:
print('The selected file is not a Fasta file.')
else:
return seqs_list
def write_fasta(outfile, seq_dict):
step = 70
with open(outfile, 'w') as file:
for header, sequence in seq_dict.items():
sequence_list = [sequence[i - step: i] for i in range(step, len(sequence) + 1, step)]
last = sequence[step * (len(sequence) // step):]
if last != '':
sequence_list.append(last)
sequence = '\n'.join(sequence_list)
file.write('>' + header + '\n' + sequence + '\n')
def reads_generator(fasta_file, read_length, k):
reads_list = []
overlap = k - 1
input_header, input_seq = read_fasta(fasta_file)[0]
n = len(input_seq)
for i in range(0, n - overlap, read_length - overlap):
read_seq = input_seq[i: i + read_length]
reads_list.append(read_seq)
return [('{}_{}'.format(input_header, i), read) for i, read in enumerate(reads_list)]
def write_fastq(reads_list, filename):
with open(filename, 'w') as fastq_file:
for read_id, read in reads_list:
fastq_file.write('@{}\n'.format(read_id))
fastq_file.write(read + '\n')
fastq_file.write('+\n')
fastq_file.write('I' * len(read) + '\n')
def read_fastq(filename):
reads_dict = dict()
with open(filename, 'r') as fastq_file:
for line in fastq_file:
if '@' in line:
reads_dict[line[1:].replace('\n', '')] = next(
fastq_file).replace('\n', '')
next(fastq_file)
next(fastq_file)
return reads_dict
| true | true |
790131cd9d7c076bd447ebb36385a3eb1d0961be | 1,519 | py | Python | train-app/helper_functions.py | pivotal-legacy/moves | c946a66b35af355353b648f3736ceee46c00edeb | [
"Apache-2.0"
] | 4 | 2018-04-27T14:30:15.000Z | 2020-01-15T01:54:21.000Z | train-app/helper_functions.py | pivotal-legacy/moves | c946a66b35af355353b648f3736ceee46c00edeb | [
"Apache-2.0"
] | null | null | null | train-app/helper_functions.py | pivotal-legacy/moves | c946a66b35af355353b648f3736ceee46c00edeb | [
"Apache-2.0"
] | null | null | null | import json
import os
import pandas
import redis
import types
def json2redis(data,r):
if isinstance(data, types.ListType):
for row in data:
channel = row['channel']
data_type = row['data_type']
rkey = 'channel_{}_{}'.format(channel,data_type)
r.lpush(rkey,row)
else:
channel = data['channel']
data_type = data['data_type']
rkey = 'channel_{}_{}'.format(channel,data_type)
r.lpush(rkey,data)
# initialize redis connection for local and CF deployment
def connect_redis_db(redis_service_name = None):
if os.getenv('NODE_ENV') == 'micropcf':
DB_HOST = os.getenv('REDIS_HOST')
DB_PORT = os.getenv('REDIS_PORT')
DB_PW = os.getenv('REDIS_PASSWORD')
REDIS_DB = 0
elif os.environ.get('VCAP_SERVICES') is None: # running locally
DB_HOST = 'localhost'
DB_PORT = 6379
DB_PW = ''
REDIS_DB = 1
else: # running on CF
env_vars = os.environ['VCAP_SERVICES']
rediscloud_service = json.loads(env_vars)[redis_service_name][0]
credentials = rediscloud_service['credentials']
DB_HOST = credentials['host']
DB_PORT = credentials['port']
DB_PW = password=credentials['password']
REDIS_DB = 0
return redis.StrictRedis(host=DB_HOST,
port=DB_PORT,
password=DB_PW,
db=REDIS_DB)
| 29.784314 | 72 | 0.574062 | import json
import os
import pandas
import redis
import types
def json2redis(data,r):
if isinstance(data, types.ListType):
for row in data:
channel = row['channel']
data_type = row['data_type']
rkey = 'channel_{}_{}'.format(channel,data_type)
r.lpush(rkey,row)
else:
channel = data['channel']
data_type = data['data_type']
rkey = 'channel_{}_{}'.format(channel,data_type)
r.lpush(rkey,data)
def connect_redis_db(redis_service_name = None):
if os.getenv('NODE_ENV') == 'micropcf':
DB_HOST = os.getenv('REDIS_HOST')
DB_PORT = os.getenv('REDIS_PORT')
DB_PW = os.getenv('REDIS_PASSWORD')
REDIS_DB = 0
elif os.environ.get('VCAP_SERVICES') is None:
DB_HOST = 'localhost'
DB_PORT = 6379
DB_PW = ''
REDIS_DB = 1
else:
env_vars = os.environ['VCAP_SERVICES']
rediscloud_service = json.loads(env_vars)[redis_service_name][0]
credentials = rediscloud_service['credentials']
DB_HOST = credentials['host']
DB_PORT = credentials['port']
DB_PW = password=credentials['password']
REDIS_DB = 0
return redis.StrictRedis(host=DB_HOST,
port=DB_PORT,
password=DB_PW,
db=REDIS_DB)
| true | true |
790132aad9c447a13fd54c300127bc54b57c25a8 | 1,464 | py | Python | detectron/lib/python3.6/site-packages/caffe2/python/onnx/onnxifi.py | JustinBear99/Mask_RCNN | d43eaf7c6ebf29d4d6da796a0f7ff5561e21acff | [
"Apache-2.0"
] | null | null | null | detectron/lib/python3.6/site-packages/caffe2/python/onnx/onnxifi.py | JustinBear99/Mask_RCNN | d43eaf7c6ebf29d4d6da796a0f7ff5561e21acff | [
"Apache-2.0"
] | null | null | null | detectron/lib/python3.6/site-packages/caffe2/python/onnx/onnxifi.py | JustinBear99/Mask_RCNN | d43eaf7c6ebf29d4d6da796a0f7ff5561e21acff | [
"Apache-2.0"
] | null | null | null | ## @package onnx
#Module caffe2.python.onnx.onnxifi
"""
ONNXIFI a Caffe2 net
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python._import_c_extension as C
import numpy as np
def onnxifi_caffe2_net(
pred_net,
input_shapes,
max_batch_size=1,
max_seq_size=1,
debug=False,
use_onnx=True,
merge_fp32_inputs_into_fp16=False,
adjust_batch=True,
black_list=None,
weight_names=None):
"""
Transform the caffe2_net by collapsing ONNXIFI-runnable nodes into Onnxifi c2 ops
"""
shape_hints = {}
for k, v in input_shapes.items():
shape_hints[k] = v
pred_net_str = C.onnxifi(pred_net.SerializeToString(),
shape_hints,
black_list if black_list else [],
weight_names if weight_names is not None else [],
max_batch_size,
max_seq_size,
adjust_batch,
debug,
merge_fp32_inputs_into_fp16,
use_onnx)
pred_net_cut = caffe2_pb2.NetDef()
pred_net_cut.ParseFromString(pred_net_str)
return pred_net_cut
| 29.877551 | 85 | 0.602459 | re__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python._import_c_extension as C
import numpy as np
def onnxifi_caffe2_net(
pred_net,
input_shapes,
max_batch_size=1,
max_seq_size=1,
debug=False,
use_onnx=True,
merge_fp32_inputs_into_fp16=False,
adjust_batch=True,
black_list=None,
weight_names=None):
shape_hints = {}
for k, v in input_shapes.items():
shape_hints[k] = v
pred_net_str = C.onnxifi(pred_net.SerializeToString(),
shape_hints,
black_list if black_list else [],
weight_names if weight_names is not None else [],
max_batch_size,
max_seq_size,
adjust_batch,
debug,
merge_fp32_inputs_into_fp16,
use_onnx)
pred_net_cut = caffe2_pb2.NetDef()
pred_net_cut.ParseFromString(pred_net_str)
return pred_net_cut
| true | true |
79013348c6a77e28ea1c7c9c2338a7ab1db8253a | 2,952 | py | Python | python/evaluate_division.py | shub0/leetcode | 8221d10f201d001abcb15b27c9cf4b8cd5060f1f | [
"BSD-3-Clause"
] | null | null | null | python/evaluate_division.py | shub0/leetcode | 8221d10f201d001abcb15b27c9cf4b8cd5060f1f | [
"BSD-3-Clause"
] | null | null | null | python/evaluate_division.py | shub0/leetcode | 8221d10f201d001abcb15b27c9cf4b8cd5060f1f | [
"BSD-3-Clause"
] | null | null | null | '''
Equations are given in the format A / B = k, where A and B are variables represented as strings, and k is a real number (floating point number). Given some queries, return the answers. If the answer does not exist, return -1.0.
Example:
Given a / b = 2.0, b / c = 3.0.
queries are: a / c = ?, b / a = ?, a / e = ?, a / a = ?, x / x = ? .
return [6.0, 0.5, -1.0, 1.0, -1.0 ].
The input is: vector<pair<string, string>> equations, vector<double>& values, vector<pair<string, string>> queries , where equations.size() == values.size(), and the values are positive. This represents the equations. Return vector<double>.
According to the example above:
equations = [ ["a", "b"], ["b", "c"] ],
values = [2.0, 3.0],
queries = [ ["a", "c"], ["b", "a"], ["a", "e"], ["a", "a"], ["x", "x"] ].
The input is always valid. You may assume that evaluating the queries will result in no division by zero and there is no contradiction.
'''
class Solution(object):
def buildGraph(self, edges, vals):
"""
:type edge: List[[str, str]]
:type vals: List[Double]
:rtype: dict[dict]
"""
import collections
graph = collections.defaultdict(dict)
for index, val in enumerate(vals):
start = edges[index][0]
end = edges[index][1]
graph[start][end] = val
graph[end][start] = 1 / val
return graph
def insert(self, start, end, val):
self.graph[start][end] = val
self.graph[end][start] = 1 / val
def search(self, start, end):
val = 1.0
visited = dict()
size = len(self.graph)
mark = set()
mark.add(start)
visited[start] = 1.0
while (len(mark) > 0) and (end not in visited):
src = mark.pop()
for (dest, val) in self.graph[src].items():
if dest not in visited:
mark.add(dest)
visited[dest] = visited[src] * val
return visited.get(end, -1.0)
def calcEquation(self, equations, values, queries):
"""
:type equations: List[List[str]]
:type values: List[float]
:type queries: List[List[str]]
:rtype: List[float]
"""
self.graph = self.buildGraph(equations, values)
output = list()
for (start, end) in queries:
if start not in self.graph or end not in self.graph:
output.append(-1.0)
continue
val = self.search(start, end)
if val > 0:
output.append(val)
self.insert(start, end, val)
else:
output.append(-1.0)
return output
solution = Solution()
equations = [["x1","x2"],["x2","x3"],["x3","x4"],["x4","x5"]]
values = [3.0,4.0,5.0,6.0]
queries = [["x1","x5"],["x5","x2"],["x2","x4"],["x2","x2"],["x2","x9"],["x9","x9"]]
print solution.calcEquation(equations, values, queries)
| 35.566265 | 240 | 0.542005 | '''
Equations are given in the format A / B = k, where A and B are variables represented as strings, and k is a real number (floating point number). Given some queries, return the answers. If the answer does not exist, return -1.0.
Example:
Given a / b = 2.0, b / c = 3.0.
queries are: a / c = ?, b / a = ?, a / e = ?, a / a = ?, x / x = ? .
return [6.0, 0.5, -1.0, 1.0, -1.0 ].
The input is: vector<pair<string, string>> equations, vector<double>& values, vector<pair<string, string>> queries , where equations.size() == values.size(), and the values are positive. This represents the equations. Return vector<double>.
According to the example above:
equations = [ ["a", "b"], ["b", "c"] ],
values = [2.0, 3.0],
queries = [ ["a", "c"], ["b", "a"], ["a", "e"], ["a", "a"], ["x", "x"] ].
The input is always valid. You may assume that evaluating the queries will result in no division by zero and there is no contradiction.
'''
class Solution(object):
def buildGraph(self, edges, vals):
"""
:type edge: List[[str, str]]
:type vals: List[Double]
:rtype: dict[dict]
"""
import collections
graph = collections.defaultdict(dict)
for index, val in enumerate(vals):
start = edges[index][0]
end = edges[index][1]
graph[start][end] = val
graph[end][start] = 1 / val
return graph
def insert(self, start, end, val):
self.graph[start][end] = val
self.graph[end][start] = 1 / val
def search(self, start, end):
val = 1.0
visited = dict()
size = len(self.graph)
mark = set()
mark.add(start)
visited[start] = 1.0
while (len(mark) > 0) and (end not in visited):
src = mark.pop()
for (dest, val) in self.graph[src].items():
if dest not in visited:
mark.add(dest)
visited[dest] = visited[src] * val
return visited.get(end, -1.0)
def calcEquation(self, equations, values, queries):
"""
:type equations: List[List[str]]
:type values: List[float]
:type queries: List[List[str]]
:rtype: List[float]
"""
self.graph = self.buildGraph(equations, values)
output = list()
for (start, end) in queries:
if start not in self.graph or end not in self.graph:
output.append(-1.0)
continue
val = self.search(start, end)
if val > 0:
output.append(val)
self.insert(start, end, val)
else:
output.append(-1.0)
return output
solution = Solution()
equations = [["x1","x2"],["x2","x3"],["x3","x4"],["x4","x5"]]
values = [3.0,4.0,5.0,6.0]
queries = [["x1","x5"],["x5","x2"],["x2","x4"],["x2","x2"],["x2","x9"],["x9","x9"]]
print solution.calcEquation(equations, values, queries)
| false | true |
79013424f233c162748f93eb9247598f7aa19885 | 934 | py | Python | human_services/locations/serializers.py | DarwishMenna/pathways-backend | e9825e0373c586ce8f07ee8b70aecc7de679fb41 | [
"BSD-3-Clause"
] | null | null | null | human_services/locations/serializers.py | DarwishMenna/pathways-backend | e9825e0373c586ce8f07ee8b70aecc7de679fb41 | [
"BSD-3-Clause"
] | null | null | null | human_services/locations/serializers.py | DarwishMenna/pathways-backend | e9825e0373c586ce8f07ee8b70aecc7de679fb41 | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import serializers
from human_services.locations import models
from human_services.addresses.serializers import AddressSerializer
from human_services.phone_at_location.serializers import PhoneAtLocationSerializer
class LocationAddressSerializer(serializers.ModelSerializer):
address = AddressSerializer()
class Meta:
model = models.LocationAddress
fields = ('address_type', 'address')
class LocationSerializer(serializers.ModelSerializer):
latitude = serializers.ReadOnlyField(source='point.x')
longitude = serializers.ReadOnlyField(source='point.y')
addresses = LocationAddressSerializer(source='location_addresses', many=True)
phone_numbers = PhoneAtLocationSerializer(many=True)
class Meta:
model = models.Location
fields = ('id', 'name', 'organization_id', 'latitude',
'longitude', 'description', 'addresses', 'phone_numbers')
| 38.916667 | 82 | 0.756959 | from rest_framework import serializers
from human_services.locations import models
from human_services.addresses.serializers import AddressSerializer
from human_services.phone_at_location.serializers import PhoneAtLocationSerializer
class LocationAddressSerializer(serializers.ModelSerializer):
address = AddressSerializer()
class Meta:
model = models.LocationAddress
fields = ('address_type', 'address')
class LocationSerializer(serializers.ModelSerializer):
latitude = serializers.ReadOnlyField(source='point.x')
longitude = serializers.ReadOnlyField(source='point.y')
addresses = LocationAddressSerializer(source='location_addresses', many=True)
phone_numbers = PhoneAtLocationSerializer(many=True)
class Meta:
model = models.Location
fields = ('id', 'name', 'organization_id', 'latitude',
'longitude', 'description', 'addresses', 'phone_numbers')
| true | true |
790134f9806150a2e7d8202fa607f56425581494 | 2,714 | py | Python | services/pv_simulator/main.py | reynierg/pv_simulator_challenge | ab7903a3ef60e92a0ff96122d24409cd1d399b2a | [
"MIT"
] | null | null | null | services/pv_simulator/main.py | reynierg/pv_simulator_challenge | ab7903a3ef60e92a0ff96122d24409cd1d399b2a | [
"MIT"
] | null | null | null | services/pv_simulator/main.py | reynierg/pv_simulator_challenge | ab7903a3ef60e92a0ff96122d24409cd1d399b2a | [
"MIT"
] | null | null | null | import logging
import os
from pathlib import Path
import typing
from logging.handlers import RotatingFileHandler
from dotenv import load_dotenv
import services.pv_simulator.constants as constants
from services.pv_simulator.main_loop import MainLoop
from services.pv_simulator.mq_receiver import MQReceiver, MQReceiverFactory
from services.pv_simulator.pv_power_value_calculator import PVPowerValueCalculator
from services.pv_simulator.typing_custom_protocols import (
MQReceiverProtocol,
PVPowerValueCalculatorProtocol
)
from services.pv_simulator import utils
current_dir_path: Path = Path(__file__).parent.absolute()
load_dotenv(dotenv_path=f"{current_dir_path}/.env")
def get_test_modules_names() -> typing.List[str]:
from services.pv_simulator.tests.unit import constants_for_tests
return constants_for_tests.TESTS_MODULES
def get_mq_receiver(callback: typing.Callable[[], bool]) -> MQReceiverProtocol:
return MQReceiverFactory.get_mq_receiver(constants.MQ_RECEIVER_TYPE, check_if_must_exit=callback)
def get_pv_power_value_calculator() -> PVPowerValueCalculatorProtocol:
return PVPowerValueCalculator(constants.MINUTES_DATA_SET, constants.PV_POWER_VALUES_DATA_SET)
def main(sys_argv: typing.List[str]) -> None:
"""PV simulator execution entry point.
Parameters
----------
sys_argv : list
contains the list of arguments passed to the CLI during its execution. The first argument contains the
executed script name.
"""
main_logger: typing.Optional[logging.Logger] = None
try:
must_exit_after_24h = os.getenv("MUST_EXIT_AFTER_24H", "0")
must_exit_after_24h = \
True if must_exit_after_24h.isdecimal() and int(must_exit_after_24h) == 1 else False
main_logger = utils.initialize_loggers(current_dir_path)
main_loop: MainLoop = MainLoop(constants.LOGGER_NAME,
constants.RESULTS_LOGGER_NAME,
current_dir_path,
must_exit_after_24h,
get_mq_receiver,
get_pv_power_value_calculator,
tests_modules_names_provider=get_test_modules_names)
main_loop.handle_arguments(sys_argv)
except KeyboardInterrupt:
if main_logger is not None:
main_logger.exception("Required to abort:")
else:
import traceback
traceback.print_exc()
except Exception:
if main_logger is not None:
main_logger.exception("Error:")
else:
import traceback
traceback.print_exc()
| 36.675676 | 110 | 0.695652 | import logging
import os
from pathlib import Path
import typing
from logging.handlers import RotatingFileHandler
from dotenv import load_dotenv
import services.pv_simulator.constants as constants
from services.pv_simulator.main_loop import MainLoop
from services.pv_simulator.mq_receiver import MQReceiver, MQReceiverFactory
from services.pv_simulator.pv_power_value_calculator import PVPowerValueCalculator
from services.pv_simulator.typing_custom_protocols import (
MQReceiverProtocol,
PVPowerValueCalculatorProtocol
)
from services.pv_simulator import utils
current_dir_path: Path = Path(__file__).parent.absolute()
load_dotenv(dotenv_path=f"{current_dir_path}/.env")
def get_test_modules_names() -> typing.List[str]:
from services.pv_simulator.tests.unit import constants_for_tests
return constants_for_tests.TESTS_MODULES
def get_mq_receiver(callback: typing.Callable[[], bool]) -> MQReceiverProtocol:
return MQReceiverFactory.get_mq_receiver(constants.MQ_RECEIVER_TYPE, check_if_must_exit=callback)
def get_pv_power_value_calculator() -> PVPowerValueCalculatorProtocol:
return PVPowerValueCalculator(constants.MINUTES_DATA_SET, constants.PV_POWER_VALUES_DATA_SET)
def main(sys_argv: typing.List[str]) -> None:
main_logger: typing.Optional[logging.Logger] = None
try:
must_exit_after_24h = os.getenv("MUST_EXIT_AFTER_24H", "0")
must_exit_after_24h = \
True if must_exit_after_24h.isdecimal() and int(must_exit_after_24h) == 1 else False
main_logger = utils.initialize_loggers(current_dir_path)
main_loop: MainLoop = MainLoop(constants.LOGGER_NAME,
constants.RESULTS_LOGGER_NAME,
current_dir_path,
must_exit_after_24h,
get_mq_receiver,
get_pv_power_value_calculator,
tests_modules_names_provider=get_test_modules_names)
main_loop.handle_arguments(sys_argv)
except KeyboardInterrupt:
if main_logger is not None:
main_logger.exception("Required to abort:")
else:
import traceback
traceback.print_exc()
except Exception:
if main_logger is not None:
main_logger.exception("Error:")
else:
import traceback
traceback.print_exc()
| true | true |
790135152f1349225398169e57cf2612a9d3d016 | 486 | py | Python | build/chrome_extension_version.py | meacer/deasciifier | 213abe2aceaace673a0c78b9c746513bb75dd94f | [
"MIT"
] | 80 | 2015-12-18T03:38:13.000Z | 2022-03-14T12:14:20.000Z | build/chrome_extension_version.py | meacer/deasciifier | 213abe2aceaace673a0c78b9c746513bb75dd94f | [
"MIT"
] | 11 | 2016-01-09T09:28:23.000Z | 2021-07-09T00:44:26.000Z | build/chrome_extension_version.py | meacer/deasciifier | 213abe2aceaace673a0c78b9c746513bb75dd94f | [
"MIT"
] | 15 | 2015-09-18T11:23:03.000Z | 2021-06-07T12:52:18.000Z | """
Reads the version information from the manifest of the Chrome extension.
Author: Mustafa Emre Acer
"""
import json
import sys
def ReadChromeExtensionVersion(manifest_path):
with open(manifest_path) as manifest_file:
manifest = json.load(manifest_file)
print(manifest['version'])
if __name__ == "__main__":
if len(sys.argv) > 1:
ReadChromeExtensionVersion(sys.argv[1])
else:
print('\nUsage: chrome_extension_version.py <manifest_path>\n')
exit(-1)
| 22.090909 | 74 | 0.726337 |
import json
import sys
def ReadChromeExtensionVersion(manifest_path):
with open(manifest_path) as manifest_file:
manifest = json.load(manifest_file)
print(manifest['version'])
if __name__ == "__main__":
if len(sys.argv) > 1:
ReadChromeExtensionVersion(sys.argv[1])
else:
print('\nUsage: chrome_extension_version.py <manifest_path>\n')
exit(-1)
| true | true |
790135935c28da7cda36cc78a1ae42b690316fb1 | 613 | py | Python | backend/app/database.py | lupinthe14th/ptodo | 6851f834936d944cf3f4759eb84863c695bd45c1 | [
"MIT"
] | null | null | null | backend/app/database.py | lupinthe14th/ptodo | 6851f834936d944cf3f4759eb84863c695bd45c1 | [
"MIT"
] | 2 | 2021-08-16T08:07:45.000Z | 2021-08-16T12:14:22.000Z | backend/app/database.py | lupinthe14th/ptodo | 6851f834936d944cf3f4759eb84863c695bd45c1 | [
"MIT"
] | null | null | null | import os
from databases import Database
from sqlalchemy import MetaData, create_engine
SQLALCHEMY_DATABASE_URL = (
os.environ.get("DATABASE_URL")
or '{}://{}:{}@{}:{}/{}'.format(
os.environ.get("DATABASE"),
os.environ.get("DB_USERNAME"),
os.environ.get("DB_PASSWORD"),
os.environ.get("DB_HOST"),
os.environ.get("DB_PORT"),
os.environ.get("DB_NAME"),
)
)
database = Database(
SQLALCHEMY_DATABASE_URL,
ssl=False,
min_size=5,
max_size=20,
)
engine = create_engine(
SQLALCHEMY_DATABASE_URL,
echo=False,
)
metadata = MetaData()
| 19.774194 | 46 | 0.631321 | import os
from databases import Database
from sqlalchemy import MetaData, create_engine
SQLALCHEMY_DATABASE_URL = (
os.environ.get("DATABASE_URL")
or '{}://{}:{}@{}:{}/{}'.format(
os.environ.get("DATABASE"),
os.environ.get("DB_USERNAME"),
os.environ.get("DB_PASSWORD"),
os.environ.get("DB_HOST"),
os.environ.get("DB_PORT"),
os.environ.get("DB_NAME"),
)
)
database = Database(
SQLALCHEMY_DATABASE_URL,
ssl=False,
min_size=5,
max_size=20,
)
engine = create_engine(
SQLALCHEMY_DATABASE_URL,
echo=False,
)
metadata = MetaData()
| true | true |
7901359d780dedf2d8569f0323f6b8404acbf4c5 | 5,972 | py | Python | test/functional/wallet_txn_clone.py | weicrypto/wei | 031b608636db4309ecc9d5e6d0512179fb50fd4e | [
"MIT"
] | null | null | null | test/functional/wallet_txn_clone.py | weicrypto/wei | 031b608636db4309ecc9d5e6d0512179fb50fd4e | [
"MIT"
] | null | null | null | test/functional/wallet_txn_clone.py | weicrypto/wei | 031b608636db4309ecc9d5e6d0512179fb50fd4e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
# All nodes should start with 12,500 WEI:
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress() # bug workaround, coins generated assigned to first getnewaddress!
self.nodes[0].settxfee(.001)
node0_address1 = self.nodes[0].getnewaddress()
node0_txid1 = self.nodes[0].sendtoaddress(node0_address1, 12190)
node0_tx1 = self.nodes[0].gettransaction(node0_txid1)
node0_address2 = self.nodes[0].getnewaddress()
node0_txid2 = self.nodes[0].sendtoaddress(node0_address2, 290)
node0_tx2 = self.nodes[0].gettransaction(node0_txid2)
assert_equal(self.nodes[0].getbalance(),
starting_balance + node0_tx1["fee"] + node0_tx2["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress()
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendtoaddress(node1_address, 400)
txid2 = self.nodes[0].sendtoaddress(node1_address, 200)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 400 WEI serialized is 00902f5009000000
pos0 = 2*(4+1+36+1+4+1)
hex400 = "00902f5009000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 400 and clone_raw[pos0 : pos0 + 16] != hex400 or rawtx1["vout"][0]["value"] != 400 and clone_raw[pos0 : pos0 + 16] == hex400):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransactionwithwallet(clone_raw, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 500WEI for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + node0_tx1["fee"] + node0_tx2["fee"]
if self.options.mine_block:
expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(node0_tx1["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(node0_tx2["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
self.sync_blocks()
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 1000 WEI for 2 matured,
# less possible orphaned matured subsidy
expected += 1000
if (self.options.mine_block):
expected -= 500
assert_equal(self.nodes[0].getbalance(), expected)
if __name__ == '__main__':
TxnMallTest().main()
| 45.587786 | 168 | 0.64501 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress()
self.nodes[0].settxfee(.001)
node0_address1 = self.nodes[0].getnewaddress()
node0_txid1 = self.nodes[0].sendtoaddress(node0_address1, 12190)
node0_tx1 = self.nodes[0].gettransaction(node0_txid1)
node0_address2 = self.nodes[0].getnewaddress()
node0_txid2 = self.nodes[0].sendtoaddress(node0_address2, 290)
node0_tx2 = self.nodes[0].gettransaction(node0_txid2)
assert_equal(self.nodes[0].getbalance(),
starting_balance + node0_tx1["fee"] + node0_tx2["fee"])
node1_address = self.nodes[1].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_address, 400)
txid2 = self.nodes[0].sendtoaddress(node1_address, 200)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 400 WEI serialized is 00902f5009000000
pos0 = 2*(4+1+36+1+4+1)
hex400 = "00902f5009000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 400 and clone_raw[pos0 : pos0 + 16] != hex400 or rawtx1["vout"][0]["value"] != 400 and clone_raw[pos0 : pos0 + 16] == hex400):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransactionwithwallet(clone_raw, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
if (self.options.mine_block):
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + node0_tx1["fee"] + node0_tx2["fee"]
if self.options.mine_block:
expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(node0_tx1["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(node0_tx2["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
self.sync_blocks()
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 1000 WEI for 2 matured,
expected += 1000
if (self.options.mine_block):
expected -= 500
assert_equal(self.nodes[0].getbalance(), expected)
if __name__ == '__main__':
TxnMallTest().main()
| true | true |
790136c258112456331c611d13cd5bdbb73254a1 | 1,094 | py | Python | server/tests/test_repository.py | 74th/vscode-book-python | a3a90ee57851bca9898c60e8fa74c92e53e9469b | [
"MIT"
] | 3 | 2020-04-26T15:35:01.000Z | 2020-08-15T07:02:58.000Z | server/tests/test_repository.py | 74th/vscode-book-python | a3a90ee57851bca9898c60e8fa74c92e53e9469b | [
"MIT"
] | null | null | null | server/tests/test_repository.py | 74th/vscode-book-python | a3a90ee57851bca9898c60e8fa74c92e53e9469b | [
"MIT"
] | 2 | 2021-08-20T06:39:40.000Z | 2022-03-07T12:24:42.000Z | from typing import List
import unittest
from model import tasks
class TestRepository(unittest.TestCase):
def test_list(self):
rep = tasks.Repository()
l = rep.list()
self.assertEqual(len(l), 2)
self.assertEqual(l[0].id, 1)
self.assertEqual(l[0].text, "task1")
self.assertEqual(l[0].done, False)
self.assertEqual(l[1].id, 2)
rep._tasks[0].done = True
l = rep.list()
self.assertEqual(len(l), 1)
self.assertEqual(l[0].id, 2)
self.assertEqual(l[0].done, False)
def test_add(self):
rep = tasks.Repository()
task = tasks.Task(100, "new task")
rep.add(task)
l = rep.list()
self.assertEqual(len(l), 3)
self.assertEqual(l[2].id, 3)
self.assertEqual(l[2].text, "new task")
self.assertEqual(l[2].done, False)
def test_done(self):
rep = tasks.Repository()
rep.done(1)
l = rep.list()
self.assertEqual(len(l), 1)
self.assertEqual(l[0].id, 2)
self.assertEqual(l[0].done, False)
| 24.311111 | 47 | 0.565814 | from typing import List
import unittest
from model import tasks
class TestRepository(unittest.TestCase):
def test_list(self):
rep = tasks.Repository()
l = rep.list()
self.assertEqual(len(l), 2)
self.assertEqual(l[0].id, 1)
self.assertEqual(l[0].text, "task1")
self.assertEqual(l[0].done, False)
self.assertEqual(l[1].id, 2)
rep._tasks[0].done = True
l = rep.list()
self.assertEqual(len(l), 1)
self.assertEqual(l[0].id, 2)
self.assertEqual(l[0].done, False)
def test_add(self):
rep = tasks.Repository()
task = tasks.Task(100, "new task")
rep.add(task)
l = rep.list()
self.assertEqual(len(l), 3)
self.assertEqual(l[2].id, 3)
self.assertEqual(l[2].text, "new task")
self.assertEqual(l[2].done, False)
def test_done(self):
rep = tasks.Repository()
rep.done(1)
l = rep.list()
self.assertEqual(len(l), 1)
self.assertEqual(l[0].id, 2)
self.assertEqual(l[0].done, False)
| true | true |
7901378cfed666fdb4826694ca54d6ccc4f71d02 | 2,563 | py | Python | examples/Rest/ex17_mount_virtual_media_iso.py | killionadmin/ILOscripts | 951f53df9bf58bc7b186a501e2d123645f0e55a3 | [
"Apache-2.0"
] | 27 | 2015-04-07T13:44:20.000Z | 2016-03-26T01:23:58.000Z | examples/Rest/ex17_mount_virtual_media_iso.py | killionadmin/ILOscripts | 951f53df9bf58bc7b186a501e2d123645f0e55a3 | [
"Apache-2.0"
] | 5 | 2017-05-11T23:36:34.000Z | 2018-05-27T09:11:17.000Z | examples/Rest/ex17_mount_virtual_media_iso.py | killionadmin/ILOscripts | 951f53df9bf58bc7b186a501e2d123645f0e55a3 | [
"Apache-2.0"
] | 13 | 2015-03-25T19:03:36.000Z | 2016-03-11T13:21:18.000Z | # Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from _restobject import RestObject
def ex17_mount_virtual_media_iso(restobj, iso_url, boot_on_next_server_reset):
sys.stdout.write("\nEXAMPLE 17: Mount iLO Virtual Media DVD ISO from URL\n")
instances = restobj.search_for_type("Manager.")
for instance in instances:
rsp = restobj.rest_get(instance["href"])
rsp = restobj.rest_get(rsp.dict["links"]["VirtualMedia"]["href"])
for vmlink in rsp.dict["links"]["Member"]:
response = restobj.rest_get(vmlink["href"])
if response.status == 200 and "DVD" in response.dict["MediaTypes"]:
body = {"Image": iso_url}
if (iso_url is not None and \
boot_on_next_server_reset is not None):
body["Oem"] = {"Hp": {"BootOnNextServerReset": \
boot_on_next_server_reset}}
response = restobj.rest_patch(vmlink["href"], body)
restobj.error_handler(response)
elif response.status != 200:
restobj.error_handler(response)
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_https_url = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
#Create a REST object
REST_OBJ = RestObject(iLO_https_url, iLO_account, iLO_password)
ex17_mount_virtual_media_iso(REST_OBJ, "http://10.0.0.100/test.iso", True)
| 42.716667 | 81 | 0.630901 |
import sys
from _restobject import RestObject
def ex17_mount_virtual_media_iso(restobj, iso_url, boot_on_next_server_reset):
sys.stdout.write("\nEXAMPLE 17: Mount iLO Virtual Media DVD ISO from URL\n")
instances = restobj.search_for_type("Manager.")
for instance in instances:
rsp = restobj.rest_get(instance["href"])
rsp = restobj.rest_get(rsp.dict["links"]["VirtualMedia"]["href"])
for vmlink in rsp.dict["links"]["Member"]:
response = restobj.rest_get(vmlink["href"])
if response.status == 200 and "DVD" in response.dict["MediaTypes"]:
body = {"Image": iso_url}
if (iso_url is not None and \
boot_on_next_server_reset is not None):
body["Oem"] = {"Hp": {"BootOnNextServerReset": \
boot_on_next_server_reset}}
response = restobj.rest_patch(vmlink["href"], body)
restobj.error_handler(response)
elif response.status != 200:
restobj.error_handler(response)
if __name__ == "__main__":
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
REST_OBJ = RestObject(iLO_https_url, iLO_account, iLO_password)
ex17_mount_virtual_media_iso(REST_OBJ, "http://10.0.0.100/test.iso", True)
| true | true |
7901379d9c793494228ad5ddf8e61c6d86d5ff2a | 117,457 | py | Python | python/paddle/tensor/linalg.py | DevilCarp/Paddle | 04325d2cbefb029a4478bdc069d3279cd566ac6a | [
"Apache-2.0"
] | 2 | 2022-03-30T09:55:45.000Z | 2022-03-30T09:55:49.000Z | python/paddle/tensor/linalg.py | DevilCarp/Paddle | 04325d2cbefb029a4478bdc069d3279cd566ac6a | [
"Apache-2.0"
] | null | null | null | python/paddle/tensor/linalg.py | DevilCarp/Paddle | 04325d2cbefb029a4478bdc069d3279cd566ac6a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ..fluid.layer_helper import LayerHelper
from ..framework import _varbase_creator, _dygraph_tracer
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..static import Variable
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from ..fluid.layers import transpose, cast # noqa: F401
from ..fluid import layers
import paddle
from paddle.common_ops_import import core
from paddle.common_ops_import import VarDesc
from paddle import _C_ops
__all__ = []
def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
"""
Applies matrix multiplication to two tensors. `matmul` follows
the complete broadcast rules,
and its behavior is consistent with `np.matmul`.
Currently, the input tensors' number of dimensions can be any, `matmul` can be used to
achieve the `dot`, `matmul` and `batchmatmul`.
The actual behavior depends on the shapes of :math:`x`, :math:`y` and the
flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:
- If a transpose flag is specified, the last two dimensions of the tensor
are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor
is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas
for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`.
The multiplication behavior depends on the dimensions of `x` and `y`. Specifically:
- If both tensors are 1-dimensional, the dot product result is obtained.
- If both tensors are 2-dimensional, the matrix-matrix product is obtained.
- If the `x` is 1-dimensional and the `y` is 2-dimensional,
a `1` is prepended to its dimension in order to conduct the matrix multiply.
After the matrix multiply, the prepended dimension is removed.
- If the `x` is 2-dimensional and `y` is 1-dimensional,
the matrix-vector product is obtained.
- If both arguments are at least 1-dimensional and at least one argument
is N-dimensional (where N > 2), then a batched matrix multiply is obtained.
If the first argument is 1-dimensional, a 1 is prepended to its dimension
in order to conduct the batched matrix multiply and removed after.
If the second argument is 1-dimensional, a 1 is appended to its
dimension for the purpose of the batched matrix multiple and removed after.
The non-matrix (exclude the last two dimensions) dimensions are
broadcasted according the broadcast rule.
For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor,
out will be a (j, k, n, p) tensor.
Args:
x (Tensor): The input tensor which is a Tensor.
y (Tensor): The input tensor which is a Tensor.
transpose_x (bool): Whether to transpose :math:`x` before multiplication.
transpose_y (bool): Whether to transpose :math:`y` before multiplication.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Tensor: The output Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
# vector * vector
x_data = np.random.random([10]).astype(np.float32)
y_data = np.random.random([10]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [1]
# matrix * vector
x_data = np.random.random([10, 5]).astype(np.float32)
y_data = np.random.random([5]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [10]
# batched matrix * broadcasted vector
x_data = np.random.random([10, 5, 2]).astype(np.float32)
y_data = np.random.random([2]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [10, 5]
# batched matrix * batched matrix
x_data = np.random.random([10, 5, 2]).astype(np.float32)
y_data = np.random.random([10, 2, 5]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [10, 5, 5]
# batched matrix * broadcasted matrix
x_data = np.random.random([10, 1, 5, 2]).astype(np.float32)
y_data = np.random.random([1, 3, 2, 5]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [10, 3, 5, 5]
"""
if in_dygraph_mode():
return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y)
if _in_legacy_dygraph():
op_type = 'matmul_v2'
op = getattr(_C_ops, op_type)
return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y)
attrs = {
'trans_x': transpose_x,
'trans_y': transpose_y,
}
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(
val, name,
['float16', 'float32', 'float64', 'complex64', 'complex128'],
'matmul')
__check_input(x, y)
helper = LayerHelper('matmul_v2', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matmul_v2',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
def norm(x, p='fro', axis=None, keepdim=False, name=None):
"""
Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean
or 2-norm, and in general the p-norm for p > 0) of a given tensor.
.. note::
This norm API is different from `numpy.linalg.norm`.
This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm.
But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor.
For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM.
Args:
x (Tensor): The input tensor could be N-D tensor, and the input data
type could be float32 or float64.
p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`,
`inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm.
Default value is `fro`.
axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int
or list(int)/tuple(int) with only one element, the vector norm is computed over the axis.
If `axis < 0`, the dimension to norm operation is rank(input) + axis.
If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis.
Defalut value is `None`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have fewer dimension
than the :attr:`input` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: results of norm operation on the specified axis of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
shape=[2, 3, 4]
np_input = np.arange(24).astype('float32') - 12
np_input = np_input.reshape(shape)
x = paddle.to_tensor(np_input)
#[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]]
# [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]]
# compute frobenius norm along last two dimensions.
out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1])
# out_fro.numpy() [17.435596 16.911535 16.7332 16.911535]
# compute 2-order vector norm along last dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=-1)
#out_pnorm.numpy(): [[21.118711 13.190906 5.477226]
# [ 3.7416575 11.224972 19.131126]]
# compute 2-order norm along [0,1] dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1])
#out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535]
# compute inf-order norm
out_pnorm = paddle.linalg.norm(x, p=np.inf)
#out_pnorm.numpy() = [12.]
out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0)
#out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]]
# compute -inf-order norm
out_pnorm = paddle.linalg.norm(x, p=-np.inf)
#out_pnorm.numpy(): [0.]
out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0)
#out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]]
"""
def frobenius_norm(input, dim=None, keepdim=False, name=None):
"""
The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.
Args:
input (Variable): Tensor, data type float32, float64.
dim (list, optional): None for last two dimensions.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
"""
if dim is not None and not (isinstance(dim, list) and len(dim) == 2):
raise ValueError(
"The dim of frobenius norm op should be None or two elements list!"
)
if paddle.in_dynamic_mode():
if dim is None:
return _C_ops.frobenius_norm(input, 'keep_dim', keepdim,
'reduce_all', True)
return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim,
'reduce_all', False)
attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False}
if dim is None:
attrs['reduce_all'] = True
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'frobenius_norm')
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(
type='frobenius_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def vector_norm(input,
porder=None,
axis=None,
keepdim=False,
asvector=False,
name=None):
"""
Calculate the p-order vector norm for certain dimension of Tensor `input`.
Args:
input (Variable): Tensor, data type float32, float64.
porder (float, optional): None for porder=2.0.
axis (int, optional): None for last dimension.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
"""
if paddle.in_dynamic_mode():
if axis is None: axis = -1
return _C_ops.p_norm(input, 'porder', porder, 'axis', axis,
'keepdim', keepdim, 'asvector', asvector)
if porder is not None:
check_type(porder, 'porder', (float, int), 'p_norm')
if axis is not None:
check_type(axis, 'axis', (int), 'p_norm')
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'p_norm')
attrs = {
'axis': axis if axis is not None else -1,
'porder': float(porder) if porder is not None else 2.0,
'keepdim': keepdim,
'asvector': asvector,
'epsilon': 1e-12,
}
helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(
type='p_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def inf_norm(input,
porder=None,
axis=axis,
keepdim=False,
asvector=False,
name=None):
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out})
reduce_out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
reduce_all = True if axis == None or axis == [] or asvector == True else False
axis = axis if axis != None and axis != [] else [0]
reduce_type = 'reduce_max' if porder == np.float(
'inf') else 'reduce_min'
helper.append_op(
type=reduce_type,
inputs={'X': out},
outputs={'Out': reduce_out},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
return reduce_out
def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None):
"""
NOTE:
This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm.
"""
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
abs_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='abs', inputs={'X': input}, outputs={'Out': abs_out})
pow_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='pow',
inputs={'X': abs_out},
outputs={'Out': pow_out},
attrs={'factor': porder})
sum_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='reduce_sum',
inputs={'X': pow_out},
outputs={'Out': sum_out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': True if axis is None else False
})
porder
block.append_op(
type='pow',
inputs={'X': sum_out},
outputs={'Out': out},
attrs={'factor': float(1. / porder)})
return out
if axis is None and p is not None:
if isinstance(p, str):
if p == "fro":
return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)
else:
raise ValueError(
"only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(
x,
porder=p,
axis=axis,
keepdim=keepdim,
asvector=True,
name=name)
else:
raise ValueError("only valid p type is string or float, found {}".
format(type(p)))
if isinstance(axis, tuple):
axis = list(axis)
if isinstance(axis, list) and len(axis) == 1:
axis = axis[0]
#calculate vector norm, where axis is int or list with only one integer
if isinstance(axis, int):
if isinstance(p, str):
if p == "fro":
return vector_norm(
x,
porder=2,
axis=axis,
keepdim=keepdim,
asvector=False,
name=name)
else:
raise ValueError(
"only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(
x,
axis=axis,
porder=p,
keepdim=keepdim,
asvector=False,
name=name)
else:
raise ValueError(
"unspport p for p-order vector norm. except float, found {}".
format(p))
#calculate matrix norm, where axis is list with two integers
elif isinstance(axis, list) and len(axis) == 2:
if p == "fro":
return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)
elif p == np.inf or p == -np.inf:
return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name)
elif p == 0:
raise ValueError(
"just suport axis type int or list (length of list <=1) if p = 0, found {}".
format(axis))
else:
return p_matrix_norm(
x, porder=p, axis=axis, keepdim=keepdim, name=name)
else:
raise ValueError(
"except axis type int or list (length of list <=2), found {}".
format(axis))
def dist(x, y, p=2, name=None):
r"""
This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure
of distance. The shapes of x and y must be broadcastable. The definition is as follows, for
details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_:
- Each input has at least one dimension.
- Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist.
Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be
obtained as follows:
1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the
tensor with fewer dimensions.
For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the
dimension of y.
x (4-D Tensor): 8 x 1 x 6 x 1
y (4-D Tensor): 1 x 7 x 1 x 5
2. Determine the size of each dimension of the output z: choose the maximum value from the
two input dimensions.
z (4-D Tensor): 8 x 7 x 6 x 5
If the number of dimensions of the two inputs are the same, the size of the output can be
directly determined in step 2. When p takes different values, the norm formula is as follows:
When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z.
.. math::
||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p}
When p = inf, the inf-norm of z is the maximum element of z.
.. math::
||z||_\infty=\max_i |z_i|
When p = -inf, the negative-inf-norm of z is the minimum element of z.
.. math::
||z||_{-\infty}=\min_i |z_i|
Otherwise, the p-norm of z follows the formula,
.. math::
||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}}
Args:
x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64.
y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64.
p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2.
Returns:
Tensor: Tensor that is the p-norm of (x - y).
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32")
y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32")
out = paddle.dist(x, y, 0)
print(out) # out = [1.]
out = paddle.dist(x, y, 2)
print(out) # out = [2.]
out = paddle.dist(x, y, float("inf"))
print(out) # out = [2.]
out = paddle.dist(x, y, float("-inf"))
print(out) # out = [0.]
"""
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist')
check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist')
check_type(p, 'p', (float, int), 'dist')
helper = LayerHelper("dist", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
inputs = {"X": [x], "Y": [y]}
outputs = {'Out': [out]}
attrs = {"p": float(p)}
helper.append_op(
type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def cond(x, p=None, name=None):
"""
Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``.
Args:
x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions
for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``.
And the input data type could be ``float32`` or ``float64``.
p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`,
`inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`.
name (str, optional): The default value is `None`. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: computing results of condition number, its data type is the same as input Tensor ``x``.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]])
# compute conditional number when p is None
out = paddle.linalg.cond(x)
# out.numpy() [1.4142135]
# compute conditional number when order of the norm is 'fro'
out_fro = paddle.linalg.cond(x, p='fro')
# out_fro.numpy() [3.1622777]
# compute conditional number when order of the norm is 'nuc'
out_nuc = paddle.linalg.cond(x, p='nuc')
# out_nuc.numpy() [9.2426405]
# compute conditional number when order of the norm is 1
out_1 = paddle.linalg.cond(x, p=1)
# out_1.numpy() [2.]
# compute conditional number when order of the norm is -1
out_minus_1 = paddle.linalg.cond(x, p=-1)
# out_minus_1.numpy() [1.]
# compute conditional number when order of the norm is 2
out_2 = paddle.linalg.cond(x, p=2)
# out_2.numpy() [1.4142135]
# compute conditional number when order of the norm is -1
out_minus_2 = paddle.linalg.cond(x, p=-2)
# out_minus_2.numpy() [0.70710677]
# compute conditional number when order of the norm is inf
out_inf = paddle.linalg.cond(x, p=np.inf)
# out_inf.numpy() [2.]
# compute conditional number when order of the norm is -inf
out_minus_inf = paddle.linalg.cond(x, p=-np.inf)
# out_minus_inf.numpy() [1.]
a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32'))
# a.numpy()
# [[[ 0.14063153 -0.996288 0.7996131 -0.02571543]
# [-0.16303636 1.5534962 -0.49919784 -0.04402903]
# [-1.1341571 -0.6022629 0.5445269 0.29154757]
# [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]]
# [[-0.58081484 0.12402827 0.7229862 -0.55046535]
# [-0.15178485 -1.1604939 0.75810957 0.30971205]
# [-0.9669573 1.0940945 -0.27363303 -0.35416734]
# [-1.216529 2.0018666 -0.7773689 -0.17556527]]]
a_cond_fro = paddle.linalg.cond(a, p='fro')
# a_cond_fro.numpy() [31.572273 28.120834]
b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64'))
# b.numpy()
# [[[ 1.61707487 0.46829144 0.38130416 0.82546736]
# [-1.72710298 0.08866375 -0.62518804 0.16128892]
# [-0.02822879 -1.67764516 0.11141444 0.3220113 ]]
# [[ 0.22524372 0.62474921 -0.85503233 -1.03960523]
# [-0.76620689 0.56673047 0.85064753 -0.45158196]
# [ 1.47595418 2.23646462 1.5701758 0.10497519]]]
b_cond_2 = paddle.linalg.cond(b, p=2)
# b_cond_2.numpy() [3.30064451 2.51976252]
"""
def mat_norm(input, porder=1., axis=None):
"""
NOTE:
Calculate the matrix norm of a square matrix or batches of square matrices,
when porder is in (1, -1, inf, -inf)
"""
reduce_all = True if axis is None or axis == [] else False
axis = axis if axis != None and axis != [] else [0]
keepdim = False
if paddle.in_dynamic_mode():
abs_out = _C_ops.abs(input)
sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim',
keepdim, 'reduce_all', reduce_all)
if porder == 1 or porder == np.inf:
return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim',
keepdim, 'reduce_all', reduce_all)
if porder == -1 or porder == -np.inf:
return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim',
keepdim, 'reduce_all', reduce_all)
block = LayerHelper('norm', **locals())
abs_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
sum_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='abs', inputs={'X': input}, outputs={'Out': abs_out})
block.append_op(
type='reduce_sum',
inputs={'X': abs_out},
outputs={'Out': sum_out},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
if porder == 1 or porder == np.inf:
block.append_op(
type='reduce_max',
inputs={'X': sum_out},
outputs={'Out': out},
attrs={
'dim': [-1],
'keep_dim': keepdim,
'reduce_all': reduce_all
})
if porder == -1 or porder == -np.inf:
block.append_op(
type='reduce_min',
inputs={'X': sum_out},
outputs={'Out': out},
attrs={
'dim': [-1],
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
def fro_norm(input, porder=2, axis=[-1]):
"""
NOTE:
Calculate the frobenius norm of a square matrix or batches of square matrices.
"""
reduce_all = True if axis is None or axis == [] else False
keepdim = False
if paddle.in_dynamic_mode():
pow_out = _C_ops.pow(input, 'factor', porder)
sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim',
keepdim, 'reduce_all', reduce_all)
sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim',
keepdim, 'reduce_all', reduce_all)
return _C_ops.pow(sum_out_2, 'factor', float(1. / porder))
block = LayerHelper('norm', **locals())
pow_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
sum_out_1 = block.create_variable_for_type_inference(
dtype=block.input_dtype())
sum_out_2 = block.create_variable_for_type_inference(
dtype=block.input_dtype())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='pow',
inputs={'X': input},
outputs={'Out': pow_out},
attrs={'factor': porder})
block.append_op(
type='reduce_sum',
inputs={'X': pow_out},
outputs={'Out': sum_out_1},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
block.append_op(
type='reduce_sum',
inputs={'X': sum_out_1},
outputs={'Out': sum_out_2},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
block.append_op(
type='pow',
inputs={'X': sum_out_2},
outputs={'Out': out},
attrs={'factor': float(1. / porder)})
return out
def svd_norm(input, porder, axis=[-1]):
"""
NOTE:
Calculate the matrix norm, which is related to singular values, of a matrix
or batches of matrices, including nuclear norm, 2-norm and (-2)-norm.
"""
reduce_all = True if axis is None or axis == [] else False
keepdim = False
u, s, vh = svd(input, full_matrices=False)
if paddle.in_dynamic_mode():
if porder == "nuc":
return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim,
'reduce_all', reduce_all)
max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim,
'reduce_all', reduce_all)
min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim,
'reduce_all', reduce_all)
if porder == 2:
return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis,
'use_mkldnn', False)
if porder == -2:
return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis,
'use_mkldnn', False)
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
if porder == "nuc":
block.append_op(
type='reduce_sum',
inputs={'X': s},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
max_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
min_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='reduce_max',
inputs={'X': s},
outputs={'Out': max_out},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
block.append_op(
type='reduce_min',
inputs={'X': s},
outputs={'Out': min_out},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
if porder == 2:
block.append_op(
type='elementwise_div',
inputs={'X': max_out,
'Y': min_out},
outputs={'Out': out},
attrs={'aixs': axis,
'use_mkldnn': False})
return out
if porder == -2:
block.append_op(
type='elementwise_div',
inputs={'X': min_out,
'Y': max_out},
outputs={'Out': out},
attrs={'aixs': axis,
'use_mkldnn': False})
return out
def empty_tensor(input, shape):
if paddle.in_dynamic_mode():
return input.reshape(shape)
raise ValueError("only support x is nonempty tensor in static mode")
x_shape = list(x.shape)
if not len(x_shape) >= 2:
raise ValueError("input should be a matrix or batches of matrices, " +
"but the dimention of received input is {}".format(
len(x_shape)))
if p == None:
p = 2
x_size = 0 if (0 in x_shape) else 1
if p in ("fro", "nuc", 1, -1, np.inf, -np.inf):
if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]:
if x_size == 0:
return empty_tensor(x, x_shape[:-2])
x_inv = x.inverse()
if p == "fro":
return fro_norm(x) * fro_norm(x_inv)
if p == "nuc":
return svd_norm(x, p) * svd_norm(x_inv, p)
if p in (1, -1):
return mat_norm(
x, porder=p, axis=[-2]) * mat_norm(
x_inv, porder=p, axis=[-2])
if p in (np.inf, -np.inf):
return mat_norm(
x, porder=p, axis=[-1]) * mat_norm(
x_inv, porder=p, axis=[-1])
else:
raise ValueError("only support p is {} when input is a ".format(p) +
"square matrix or batches of square matrices")
elif p in (2, -2):
if x_size == 0:
return empty_tensor(x, x_shape[:-2])
return svd_norm(x, porder=p)
else:
raise ValueError(
"unsupported {} for p, only supporting ('fro', 'nuc', ".format(
p) + "1, -1, 2, -2, inf, -inf) or none")
def dot(x, y, name=None):
"""
This operator calculates inner product for vectors.
.. note::
Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix
is the batch dimension, which means that the vectors of multiple batches are dotted.
Parameters:
x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64``
y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64``
name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`
Returns:
Tensor: the calculated result Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)
y_data = np.random.uniform(1, 3, [10]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.dot(x, y)
print(z)
"""
op_type = 'dot'
# skip var type check in dygraph mode to improve efficiency
if paddle.in_dynamic_mode():
op = getattr(_C_ops, op_type)
return op(x, y)
assert x is not None, 'x cannot be None in {}'.format(op_type)
assert y is not None, 'y cannot be None in {}'.format(op_type)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
op_type)
check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'],
op_type)
helper = LayerHelper(op_type, **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="dot", inputs={'X': x,
'Y': y}, attrs={}, outputs={"Out": out})
return out
def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None):
"""
Estimate the covariance matrix of the input variables, given data and weights.
A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix.
For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix
element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself.
Parameters:
x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below.
rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True
ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True
fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None
aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None
name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`
Returns:
Tensor: The covariance matrix Tensor of the variables.
Examples:
.. code-block:: python
import paddle
xt = paddle.rand((3,4))
paddle.linalg.cov(xt)
'''
Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
[[0.07918842, 0.06127326, 0.01493049],
[0.06127326, 0.06166256, 0.00302668],
[0.01493049, 0.00302668, 0.01632146]])
'''
"""
op_type = 'cov'
if len(x.shape) > 2 or len(x.shape) < 1:
raise ValueError(
"Input(x) only support N-D (1<=N<=2) tensor in cov, but received "
"length of Input(input) is %s." % len(x.shape))
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov')
nx = x
if len(x.shape) == 1:
nx = x.reshape((1, -1))
if not rowvar and nx.shape[0] != 1:
nx = nx.t()
w = None
observation_num = nx.shape[1]
if fweights is not None:
w = fweights.astype(nx.dtype)
if len(w.shape) > 1:
raise ValueError(
"Input(fweights) only support N-D (N<=1) tensor in cov, but received "
"shape of Input(input) is %s." % len(fweights.shape))
if fweights.shape[0] != observation_num:
raise ValueError(
"The number of Input(fweights) should equal to x's dim[1]: {}, but received "
"size of Input(fweights) is {}.".format(observation_num,
fweights.shape[0]))
if fweights.min() < 0:
raise ValueError(
"The value of Input(fweights) cannot be negtive, but received "
"min of Input(fweights) is {}.".format(fweights.min()))
if not paddle.all(fweights == paddle.round(fweights.astype('float64'))):
raise ValueError("Input(fweights) must be integer ")
if aweights is not None:
aw = aweights.astype(nx.dtype)
if len(aw.shape) > 1:
raise ValueError(
"Input(aweights) only support N-D (N<=1) tensor in cov, but received "
"length of Input(input) is %s." % len(aweights.shape))
check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'],
'cov')
if aweights.shape[0] != observation_num:
raise ValueError(
"The number of Input(aweights) should equal to x's dim[1]: {}, but received "
"size of Input(aweights) is {}.".format(observation_num,
aweights.shape[0]))
if aweights.min() < 0:
raise ValueError(
"The value of Input(aweights) cannot be negtive, but received "
"min of Input(aweights) is {}.".format(aweights.min()))
if w is not None:
w = w * aw
else:
w = aw
w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype)
if fweights is not None or aweights is not None:
w_sum = w.sum()
if w_sum.item() == 0:
raise ValueError("The sum of weights is zero, can't be normalized.")
if w is not None:
nx_w = nx * w
avg = (nx_w).sum(axis=1) / w_sum
else:
avg = nx.sum(axis=1) / w_sum
nx_w = nx
if w is not None and aweights is not None and ddof == True:
norm_factor = w_sum - (w * aweights).sum() / w_sum
else:
norm_factor = w_sum - ddof
if norm_factor <= 0:
norm_factor = paddle.to_tensor(0, dtype=nx.dtype)
nx = nx - avg.unsqueeze(1)
xxt = paddle.mm(nx, nx_w.t().conj())
cov = paddle.divide(xxt, norm_factor).squeeze()
return cov
def t(input, name=None):
"""
Transpose <=2-D tensor.
0-D and 1-D tensors are returned as it is and 2-D tensor is equal to
the paddle.transpose function which perm dimensions set 0 and 1.
Args:
input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64.
For Example:
.. code-block:: text
# Example 1 (0-D tensor)
x = tensor([0.79])
paddle.t(x) = tensor([0.79])
# Example 2 (1-D tensor)
x = tensor([0.79, 0.84, 0.32])
paddle.t(x) = tensor([0.79, 0.84, 0.32])
# Example 3 (2-D tensor)
x = tensor([0.79, 0.84, 0.32],
[0.64, 0.14, 0.57])
paddle.t(x) = tensor([0.79, 0.64],
[0.84, 0.14],
[0.32, 0.57])
Examples:
.. code-block:: python
import paddle
x = paddle.ones(shape=[2, 3], dtype='int32')
x_transposed = paddle.t(x)
print(x_transposed.shape)
# [3, 2]
"""
if len(input.shape) > 2:
raise ValueError(
"Input(input) only support N-D (N<=2) tensor, but received "
"length of Input(input) is %s. Perhaps you can use paddle."
"tensor.transpose() instead." % len(input.shape))
if paddle.in_dynamic_mode():
if len(input.shape) == 1:
return input
# 2-D tensor
perm = [1, 0]
out, _ = _C_ops.transpose2(input, 'axis', perm)
return out
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32',
'int64'], 'transpose')
helper = LayerHelper('t', **locals())
out = helper.create_variable_for_type_inference(input.dtype)
input_shape = helper.create_variable_for_type_inference(input.dtype)
if len(input.shape) == 1:
out = input
else:
helper.append_op(
type='transpose2',
inputs={'X': [input]},
outputs={'Out': [out],
'XShape': [input_shape]},
attrs={'axis': [1, 0]})
return out
def cross(x, y, axis=None, name=None):
"""
Computes the cross product between two tensors along an axis.
Inputs must have the same shape, and the length of their axes should be equal to 3.
If `axis` is not given, it defaults to the first axis found with the length 3.
Args:
x (Tensor): The first input tensor.
y (Tensor): The second input tensor.
axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor. A Tensor with same data type as `x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
[3.0, 3.0, 3.0]])
y = paddle.to_tensor([[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0]])
z1 = paddle.cross(x, y)
# [[-1. -1. -1.]
# [ 2. 2. 2.]
# [-1. -1. -1.]]
z2 = paddle.cross(x, y, axis=1)
# [[0. 0. 0.]
# [0. 0. 0.]
# [0. 0. 0.]]
"""
if in_dygraph_mode():
return _C_ops.final_state_cross(x, y, axis)
else:
if _in_legacy_dygraph():
if axis is not None:
return _C_ops.cross(x, y, 'dim', axis)
else:
return _C_ops.cross(x, y)
else:
helper = LayerHelper("cross", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
attrs = dict()
attrs['dim'] = axis
helper.append_op(
type='cross',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
def cholesky(x, upper=False, name=None):
r"""
Computes the Cholesky decomposition of one symmetric positive-definite
matrix or batches of symmetric positive-definite matrice.
If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` ,
and the returned matrix :math:`U` is upper-triangular. Otherwise, the
decomposition has the form :math:`A = LL^{T}` , and the returned matrix
:math:`L` is lower-triangular.
Args:
x (Tensor): The input tensor. Its shape should be `[*, M, M]`,
where * is zero or more batch dimensions, and matrices on the
inner-most 2 dimensions all should be symmetric positive-definite.
Its data type should be float32 or float64.
upper (bool): The flag indicating whether to return upper or lower
triangular matrices. Default: False.
Returns:
Tensor: A Tensor with same shape and data type as `x`. It represents \
triangular matrices generated by Cholesky decomposition.
Examples:
.. code-block:: python
import paddle
import numpy as np
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03
x = paddle.to_tensor(x_data)
out = paddle.linalg.cholesky(x, upper=False)
print(out)
# [[1.190523 0. 0. ]
# [0.9906703 0.27676893 0. ]
# [1.25450498 0.05600871 0.06400121]]
"""
if paddle.in_dynamic_mode():
return _C_ops.cholesky(x, "upper", upper)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky')
check_type(upper, 'upper', bool, 'cholesky')
helper = LayerHelper('cholesky', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='cholesky',
inputs={'X': [x]},
outputs={'Out': out},
attrs={'upper': upper})
return out
def matrix_rank(x, tol=None, hermitian=False, name=None):
r"""
Computes the rank of a matrix.
The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False,
or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True.
Args:
x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch
of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64.
tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest
singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed
with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch.
hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian,
enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use
the lower triangular of the matrix to compute.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: Rank of tensor x.
Examples:
.. code-block:: python
import paddle
a = paddle.eye(10)
b = paddle.linalg.matrix_rank(a)
print(b)
# b = [10]
c = paddle.ones(shape=[3, 4, 5, 5])
d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True)
print(d)
# d = [[1, 1, 1, 1],
# [1, 1, 1, 1],
# [1, 1, 1, 1]]
"""
if paddle.in_dynamic_mode():
if tol is None:
tol_tensor = None
tol_attr = 0.0
use_default_tol = True
elif isinstance(tol, Variable):
if tol.dtype != x.dtype:
tol_tensor = cast(tol, x.dtype)
else:
tol_tensor = tol
tol_attr = 0.0
use_default_tol = False
else:
tol_tensor = None
tol_attr = float(tol)
use_default_tol = False
return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian',
hermitian, 'use_default_tol', use_default_tol)
inputs = {}
attrs = {}
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank')
inputs['X'] = x
if tol is None:
attrs['use_default_tol'] = True
elif isinstance(tol, Variable):
check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank')
attrs['use_default_tol'] = False
if tol.dtype != x.dtype:
inputs['TolTensor'] = cast(tol, x.dtype)
else:
inputs['TolTensor'] = tol
else:
check_type(tol, 'tol', float, 'matrix_rank')
attrs['use_default_tol'] = False
attrs['tol'] = tol
check_type(hermitian, 'hermitian', bool, 'matrix_rank')
attrs['hermitian'] = hermitian
helper = LayerHelper('matrix_rank', **locals())
out = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def bmm(x, y, name=None):
"""
Applies batched matrix multiplication to two tensors.
Both of the two input tensors must be three-dementional and share the same batch size.
if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor.
Args:
x (Tensor): The input Tensor.
y (Tensor): The input Tensor.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Tensor: The product Tensor.
Examples:
.. code-block:: python
import paddle
# In imperative mode:
# size x: (2, 2, 3) and y: (2, 3, 2)
x = paddle.to_tensor([[[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0]],
[[3.0, 3.0, 3.0],
[4.0, 4.0, 4.0]]])
y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],
[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])
out = paddle.bmm(x, y)
#output size: (2, 2, 2)
#output value:
#[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]]
out_np = out.numpy()
"""
x_shape = x.shape
y_shape = y.shape
if not len(x_shape) == len(y_shape) == 3:
raise ValueError(
"x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}".
format(x_shape, y_shape))
if x_shape[2] != y_shape[1]:
raise ValueError(
"x's width must be equal with y's height. But received x's shape: {}, y's shape: {}".
format(x_shape, y_shape))
if x_shape[0] != y_shape[0]:
raise ValueError(
"x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}".
format(x_shape, y_shape))
if paddle.in_dynamic_mode():
return _C_ops.bmm(x, y)
helper = LayerHelper('bmm', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out
def histogram(input, bins=100, min=0, max=0, name=None):
"""
Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max.
If min and max are both zero, the minimum and maximum values of the data are used.
Args:
input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor
should be float32, float64, int32, int64.
bins (int): number of histogram bins
min (int): lower end of the range (inclusive)
max (int): upper end of the range (inclusive)
Returns:
Tensor: data type is int64, shape is (nbins,).
Examples:
.. code-block:: python
import paddle
inputs = paddle.to_tensor([1, 2, 1])
result = paddle.histogram(inputs, bins=4, min=0, max=3)
print(result) # [0, 2, 1, 0]
"""
if paddle.in_dynamic_mode():
return _C_ops.histogram(input, "bins", bins, "min", min, "max", max)
helper = LayerHelper('histogram', **locals())
check_variable_and_dtype(
input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram')
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op(
type='histogram',
inputs={'X': input},
outputs={'Out': out},
attrs={'bins': bins,
'min': min,
'max': max})
return out
def bincount(x, weights=None, minlength=0, name=None):
"""
Computes frequency of each value in the input tensor.
Args:
x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor.
weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None.
minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The tensor of frequency.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 1, 4, 5])
result1 = paddle.bincount(x)
print(result1) # [0, 2, 1, 0, 1, 1]
w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5])
result2 = paddle.bincount(x, weights=w)
print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000]
"""
if x.dtype not in [paddle.int32, paddle.int64]:
raise TypeError("Elements in Input(x) should all be integers")
if paddle.in_dynamic_mode():
return _C_ops.bincount(x, weights, "minlength", minlength)
helper = LayerHelper('bincount', **locals())
check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount')
if weights is not None:
check_variable_and_dtype(weights, 'Weights',
['int32', 'int64', 'float32', 'float64'],
'bincount')
out = helper.create_variable_for_type_inference(dtype=weights.dtype)
else:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='bincount',
inputs={'X': x,
'Weights': weights},
outputs={'Out': out},
attrs={'minlength': minlength})
return out
def mv(x, vec, name=None):
"""
Performs a matrix-vector product of the matrix x and the vector vec.
Args:
x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x
should be one of float32, float64.
vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x
should be one of float32, float64.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The tensor which is producted by x and vec.
Examples:
.. code-block:: python
# x: [M, N], vec: [N]
# paddle.mv(x, vec) # out: [M]
import numpy as np
import paddle
x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64")
x = paddle.to_tensor(x_data)
vec_data = np.array([3, 5, 1])
vec = paddle.to_tensor(vec_data).astype("float64")
out = paddle.mv(x, vec)
"""
if in_dygraph_mode():
return _C_ops.final_state_mv(x, vec)
else:
if _in_legacy_dygraph():
out = _C_ops.mv(x, vec)
return out
else:
def __check_input(x, vec):
var_names = {'x': x, 'vec': vec}
for name, val in var_names.items():
check_variable_and_dtype(val, name, ['float32', 'float64'],
'mv')
x_shape = list(x.shape)
vec_shape = list(vec.shape)
if len(x_shape) != 2:
raise ValueError(
"x should be 2-dimensional. But received x's dimention: {}".
format(x_shape))
if len(vec_shape) != 1:
raise ValueError(
"vec should be 1-dimensional. But received vec's dimention: {}".
format(vec_shape))
__check_input(x, vec)
helper = LayerHelper('mv', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='mv', inputs={'X': x,
'Vec': vec}, outputs={'Out': out})
return out
def det(x, name=None):
"""
Calculates determinant value of a square matrix or batches of square matrices.
Args:
x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size
`(*, n, n)` where `*` is one or more batch dimensions.
Returns:
y (Tensor):the determinant value of a square matrix or batches of square matrices.
Examples:
.. code-block:: python
import paddle
x = paddle.randn([3,3,3])
A = paddle.linalg.det(x)
print(A)
# [ 0.02547996, 2.52317095, -6.15900707])
"""
if paddle.in_dynamic_mode():
return _C_ops.determinant(x)
check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det')
input_shape = list(x.shape)
assert len(input_shape) >= 2, \
"The x must be at least 2-dimensional, " \
"but received Input x's dimensional: %s.\n" % \
len(input_shape)
assert (input_shape[-1] == input_shape[-2]), \
"Expect squared input," \
"but received %s by %s matrix.\n" \
%(input_shape[-2], input_shape[-1]) \
helper = LayerHelper('determinant', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]})
return out
def slogdet(x, name=None):
"""
Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant.
The determinant can be computed with ``sign * exp(logabsdet)
Supports input of float, double
Note that for matrices that have zero determinant, this returns ``(0, -inf)``
Args:
x (Tensor): the batch of matrices of size :math:`(*, n, n)`
where math:`*` is one or more batch dimensions.
Returns:
y (Tensor): A tensor containing the sign of the determinant and the natural logarithm
of the absolute value of determinant, respectively.
Examples:
.. code-block:: python
import paddle
x = paddle.randn([3,3,3])
A = paddle.linalg.slogdet(x)
print(A)
# [[ 1. , 1. , -1. ],
# [-0.98610914, -0.43010661, -0.10872950]])
"""
if paddle.in_dynamic_mode():
return _C_ops.slogdeterminant(x)
check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet')
input_shape = list(x.shape)
assert len(input_shape) >= 2, \
"The x must be at least 2-dimensional, " \
"but received Input x's dimensional: %s.\n" % \
len(input_shape)
assert (input_shape[-1] == input_shape[-2]), \
"Expect squared input," \
"but received %s by %s matrix.\n" \
%(input_shape[-2], input_shape[-1]) \
helper = LayerHelper('slogdeterminant', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]})
return out
def svd(x, full_matrices=False, name=None):
r"""
Computes the singular value decomposition of one matrix or a batch of regular matrices.
Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies:
.. math::
X = U * diag(S) * VT
Args:
x (Tensor): The input tensor. Its shape should be `[..., N, M]`,
where `...` is zero or more batch dimensions. N and M can be arbitraty
positive number. Note that if x is sigular matrices, the grad is numerical
instable. The data type of x should be float32 or float64.
full_matrices (bool): A flag to control the behavor of svd.
If full_matrices = True, svd op will compute full U and V matrics,
which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N).
If full_matrices = False, svd op will use a economic method to store U and V.
which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N).
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]`
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64')
x = x.reshape([3, 2])
u, s, vh = paddle.linalg.svd(x)
print (u)
#U = [[ 0.27364809, -0.21695147 ],
# [ 0.37892198, -0.87112408 ],
# [ 0.8840446 , 0.44053933 ]]
print (s)
#S = [8.14753743, 0.78589688]
print (vh)
#VT= [[ 0.51411221, 0.85772294],
# [ 0.85772294, -0.51411221]]
# one can verify : U * S * VT == X
# U * UH == I
# V * VH == I
"""
if paddle.in_dynamic_mode():
return _C_ops.svd(x, 'full_matrices', full_matrices)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd')
check_type(full_matrices, 'full_matrices', bool, 'svd')
helper = LayerHelper('svd', **locals())
u = helper.create_variable_for_type_inference(dtype=x.dtype)
vh = helper.create_variable_for_type_inference(dtype=x.dtype)
s = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['full_matrices'] = full_matrices
helper.append_op(
type='svd',
inputs={'X': [x]},
outputs={'U': u,
'VH': vh,
'S': s},
attrs=attrs, )
return u, s, vh
def matrix_power(x, n, name=None):
r"""
Computes the n-th power of a square matrix or a batch of square matrices.
Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be
an exponent, the equation should be:
.. math::
Out = X ^ {n}
Specifically,
- If `n > 0`, it returns the matrix or a batch of matrices raised to the power
of `n`.
- If `n = 0`, it returns the identity matrix or a batch of identity matrices.
- If `n < 0`, it returns the inverse of each matrix (if invertible) raised to
the power of `abs(n)`.
Args:
x (Tensor): A square matrix or a batch of square matrices to be raised
to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
n (int): The exponent. It can be any positive, negative integer or zero.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its
data type should be the same as that of `x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 2, 3],
[1, 4, 9],
[1, 8, 27]], dtype='float64')
print(paddle.linalg.matrix_power(x, 2))
# [[6. , 34. , 102.],
# [14. , 90. , 282.],
# [36. , 250., 804.]]
print(paddle.linalg.matrix_power(x, 0))
# [[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]]
print(paddle.linalg.matrix_power(x, -2))
# [[ 12.91666667, -12.75000000, 2.83333333 ],
# [-7.66666667 , 8. , -1.83333333 ],
# [ 1.80555556 , -1.91666667 , 0.44444444 ]]
"""
if paddle.in_dynamic_mode():
return _C_ops.matrix_power(x, "n", n)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power')
check_type(n, 'n', int, 'matrix_power')
helper = LayerHelper('matrix_power', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matrix_power',
inputs={'X': x},
outputs={'Out': out},
attrs={'n': n})
return out
def qr(x, mode="reduced", name=None):
r"""
Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now).
Args:
x (Tensor): The input tensor. Its shape should be `[..., M, N]`,
where ... is zero or more batch dimensions. M and N can be arbitrary
positive number. The data type of x should be float32 or float64.
mode (str, optional): A flag to control the behavior of qr, the default is "reduced".
Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`:
If mode = "reduced", qr op will return reduced Q and R matrices,
which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`.
If mode = "complete", qr op will return complete Q and R matrices,
which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`.
If mode = "r", qr op will only return reduced R matrix, which means
R's shape is `[..., K, N]`.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R.
If mode = "r", qr will return a tensor which represents R.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64')
q, r = paddle.linalg.qr(x)
print (q)
print (r)
# Q = [[-0.16903085, 0.89708523],
# [-0.50709255, 0.27602622],
# [-0.84515425, -0.34503278]])
# R = [[-5.91607978, -7.43735744],
# [ 0. , 0.82807867]])
# one can verify : X = Q * R ;
"""
if paddle.in_dynamic_mode():
q, r = _C_ops.qr(x, 'mode', mode)
if mode == "r":
return r
else:
return q, r
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr')
check_type(mode, 'mode', str, 'qr')
helper = LayerHelper('qr', **locals())
q = helper.create_variable_for_type_inference(dtype=x.dtype)
r = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['mode'] = mode
helper.append_op(
type='qr', inputs={'X': [x]}, outputs={'Q': q,
'R': r}, attrs=attrs)
if mode == "r":
return r
else:
return q, r
def lu(x, pivot=True, get_infos=False, name=None):
r"""
Computes the LU factorization of an N-D(N>=2) matrix x.
Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and
upper triangular matrix U are combined to a single LU matrix.
Pivoting is done if pivot is set to True.
P mat can be get by pivots:
# ones = eye(rows) #eye matrix of rank rows
# for i in range(cols):
# swap(ones[i], ones[pivots[i]])
# return ones
Args:
X (Tensor): the tensor to factor of N-dimensions(N>=2).
pivot (bool, optional): controls whether pivoting is done. Default: True.
get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
factorization (Tensor): LU matrix, the factorization of input X.
pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the
intermediate transpositions of rows. The final permutation `perm` could be
reconstructed by this, details refer to upper example.
infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2))
where non-zero values indicate whether factorization for the matrix or each minibatch
has succeeded or failed.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64')
lu,p,info = paddle.linalg.lu(x, get_infos=True)
# >>> lu:
# Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[5. , 6. ],
# [0.20000000, 0.80000000],
# [0.60000000, 0.50000000]])
# >>> p
# Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
# [3, 3])
# >>> info
# Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
# 0)
P,L,U = paddle.linalg.lu_unpack(lu,p)
# >>> P
# (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[0., 1., 0.],
# [0., 0., 1.],
# [1., 0., 0.]]),
# >>> L
# Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[1. , 0. ],
# [0.20000000, 1. ],
# [0.60000000, 0.50000000]]),
# >>> U
# Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[5. , 6. ],
# [0. , 0.80000000]]))
# one can verify : X = P @ L @ U ;
"""
if paddle.in_dynamic_mode():
LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot)
if get_infos:
return LU, Piv, Info
else:
return LU, Piv
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu')
helper = LayerHelper('lu', **locals())
lu = helper.create_variable_for_type_inference(dtype=x.dtype)
p = helper.create_variable_for_type_inference(dtype='int')
info = helper.create_variable_for_type_inference(dtype='int')
attrs = dict()
attrs['pivots'] = pivot
helper.append_op(
type='lu',
inputs={'X': x},
outputs={'Out': lu,
'Pivots': p,
'Infos': info},
attrs=attrs)
if get_infos:
return lu, p, info
else:
return lu, p
def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None):
r"""
Unpack L U and P to single matrix tensor .
unpack L and U matrix from LU, unpack permutation matrix P from Pivtos .
P mat can be get by pivots:
# ones = eye(rows) #eye matrix of rank rows
# for i in range(cols):
# swap(ones[i], ones[pivots[i]])
Args:
x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U.
y (Tensor): Pivots get from paddle.linalg.lu.
unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True.
unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
P (Tensor): Permutation matrix P of lu factorization.
L (Tensor): The lower triangular matrix tensor of lu factorization.
U (Tensor): The upper triangular matrix tensor of lu factorization.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64')
lu,p,info = paddle.linalg.lu(x, get_infos=True)
# >>> lu:
# Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[5. , 6. ],
# [0.20000000, 0.80000000],
# [0.60000000, 0.50000000]])
# >>> p
# Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
# [3, 3])
# >>> info
# Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
# 0)
P,L,U = paddle.linalg.lu_unpack(lu,p)
# >>> P
# (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[0., 1., 0.],
# [0., 0., 1.],
# [1., 0., 0.]]),
# >>> L
# Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[1. , 0. ],
# [0.20000000, 1. ],
# [0.60000000, 0.50000000]]),
# >>> U
# Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[5. , 6. ],
# [0. , 0.80000000]]))
# one can verify : X = P @ L @ U ;
"""
if paddle.in_dynamic_mode():
P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata,
'unpack_pivots', unpack_pivots)
return P, L, U
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack')
helper = LayerHelper('lu_unpack', **locals())
p = helper.create_variable_for_type_inference(dtype=x.dtype)
l = helper.create_variable_for_type_inference(dtype=x.dtype)
u = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['unpack_ludata'] = unpack_ludata
attrs['unpack_pivots'] = unpack_pivots
helper.append_op(
type='lu_unpack',
inputs={'X': x,
'Pivots': y},
outputs={'Pmat': p,
'L': l,
'U': u},
attrs=attrs)
return p, l, u
def eig(x, name=None):
"""
This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices.
.. note::
If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster.
If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead.
If the matrix is of any shape, please use :ref:`paddle.linalg.svd`.
This API is only supported on CPU device.
The output datatype is always complex for both real and complex input.
Args:
x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``,
``float64``, ``compplex64`` or ``complex128``.
name (str, optional): The default value is `None`. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values.
Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.device.set_device("cpu")
x_data = np.array([[1.6707249, 7.2249975, 6.5045543],
[9.956216, 8.749598, 6.066444 ],
[4.4251957, 1.7983172, 0.370647 ]]).astype("float32")
x = paddle.to_tensor(x_data)
w, v = paddle.linalg.eig(x)
print(w)
# Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False,
# [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) ,
# (0.18518077798279986+0j)],
# [(-0.8308237755993192+0j) , (0.3463813401919749+0j) ,
# (-0.6837005269141947+0j) ],
# [(-0.23142567697893396+0j), (0.4944999840400175+0j) ,
# (0.7058765252952796+0j) ]])
print(v)
# Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False,
# [ (16.50471283351188+0j) , (-5.5034820550763515+0j) ,
# (-0.21026087843552282+0j)])
"""
if paddle.in_dynamic_mode():
w, v = _C_ops.eig(x)
return w, v
check_variable_and_dtype(
x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig')
helper = LayerHelper('eig', **locals())
w = helper.create_variable_for_type_inference(x.dtype)
v = helper.create_variable_for_type_inference(x.dtype)
inputs = {'X': x}
outputs = {'Eigenvalues': w, 'Eigenvectors': v}
helper.append_op(type='eig', inputs=inputs, outputs=outputs)
return w, v
def eigvals(x, name=None):
"""
Compute the eigenvalues of one or more general matrices.
Warning:
The gradient kernel of this operator does not yet developed.
If you need back propagation through this operator, please replace it with paddle.linalg.eig.
Args:
x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed.
Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions.
Its data type should be float32, float64, complex64, or complex128.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`.
The eigenvalues are complex-valued even when `x` is real.
Examples:
.. code-block:: python
import paddle
paddle.set_device("cpu")
paddle.seed(1234)
x = paddle.rand(shape=[3, 3], dtype='float64')
# [[0.02773777, 0.93004224, 0.06911496],
# [0.24831591, 0.45733623, 0.07717843],
# [0.48016702, 0.14235102, 0.42620817]])
print(paddle.linalg.eigvals(x))
# [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128
"""
check_variable_and_dtype(x, 'dtype',
['float32', 'float64', 'complex64',
'complex128'], 'eigvals')
x_shape = list(x.shape)
if len(x_shape) < 2:
raise ValueError(
"The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}".
format(len(x_shape), x_shape))
if x_shape[-1] != x_shape[-2]:
raise ValueError(
"The last two dimensions of Input(x) should be equal, but received x's shape = {}".
format(x_shape))
if paddle.in_dynamic_mode():
return _C_ops.eigvals(x)
helper = LayerHelper('eigvals', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out})
return out
def multi_dot(x, name=None):
"""
Multi_dot is an operator that calculates multiple matrix multiplications.
Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not
support batched inputs.
The input tensor in [x] must be 2-D except for the first and last can be 1-D.
If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector
of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it
is treated as a column vector of shape(n, 1).
If the first and last tensor are 2-D matrix, then the output is also 2-D matrix,
otherwise the output is a 1-D vector.
Multi_dot will select the lowest cost multiplication order for calculation. The
cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c.
Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively,
we can calculate the cost of different multiplication orders as follows:
- Cost((AB)C) = 20x5x100 + 20x100x10 = 30000
- Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000
In this case, multiplying B and C first, then multiply A, which is 5 times faster
than sequential calculation.
Args:
x ([Tensor]): The input tensors which is a list Tensor.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Tensor: The output Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
# A * B
A_data = np.random.random([3, 4]).astype(np.float32)
B_data = np.random.random([4, 5]).astype(np.float32)
A = paddle.to_tensor(A_data)
B = paddle.to_tensor(B_data)
out = paddle.linalg.multi_dot([A, B])
print(out.numpy().shape)
# [3, 5]
# A * B * C
A_data = np.random.random([10, 5]).astype(np.float32)
B_data = np.random.random([5, 8]).astype(np.float32)
C_data = np.random.random([8, 7]).astype(np.float32)
A = paddle.to_tensor(A_data)
B = paddle.to_tensor(B_data)
C = paddle.to_tensor(C_data)
out = paddle.linalg.multi_dot([A, B, C])
print(out.numpy().shape)
# [10, 7]
"""
if paddle.in_dynamic_mode():
return _C_ops.multi_dot(x)
check_type(x, 'x', (list, tuple), 'multi_dot')
for id, item in enumerate(x):
check_variable_and_dtype(item, 'x[' + str(id) + ']',
['float16', 'float32', 'float64'], 'multi_dot')
if item.dtype != x[0].dtype:
raise TypeError(
"All the Tensors in the input must have the same data type.")
helper = LayerHelper('multi_dot', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out})
return out
def eigh(x, UPLO='L', name=None):
"""
Compute the eigenvalues and eigenvectors of a
complex Hermitian (conjugate symmetric) or a real symmetric matrix.
Args:
x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x
should be one of float32, float64, complex64, complex128.
UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix,
"'U' represents the upper triangular matrix.".
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op.
out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op.
Examples:
.. code-block:: python
import numpy as np
import paddle
x_data = np.array([[1, -2j], [2j, 5]])
x = paddle.to_tensor(x_data)
out_value, out_vector = paddle.linalg.eigh(x, UPLO='L')
print(out_value)
#[0.17157288, 5.82842712]
print(out_vector)
#[(-0.9238795325112867+0j), (-0.3826834323650898+0j)],
#[ 0.3826834323650898j , -0.9238795325112867j ]]
"""
if paddle.in_dynamic_mode():
return _C_ops.eigh(x, 'UPLO', UPLO)
def __check_input(x, UPLO):
x_shape = list(x.shape)
if len(x.shape) < 2:
raise ValueError(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s." % len(x.shape))
if x_shape[-1] != x_shape[-2]:
raise ValueError(
"The input matrix must be batches of square matrices. But received x's dimention: {}".
format(x_shape))
if UPLO != 'L' and UPLO != 'U':
raise ValueError(
"UPLO must be L or U. But received UPLO is: {}".format(UPLO))
__check_input(x, UPLO)
helper = LayerHelper('eigh', **locals())
check_variable_and_dtype(
x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh')
out_value = helper.create_variable_for_type_inference(dtype=x.dtype)
out_vector = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='eigh',
inputs={'X': x},
outputs={'Eigenvalues': out_value,
'Eigenvectors': out_vector},
attrs={'UPLO': UPLO})
return out_value, out_vector
def pinv(x, rcond=1e-15, hermitian=False, name=None):
r"""
Calculate pseudo inverse via SVD(singular value decomposition)
of one matrix or batches of regular matrix.
.. math::
if hermitian == False:
x = u * s * vt (SVD)
out = v * 1/s * ut
else:
x = u * s * ut (eigh)
out = u * 1/s * u.conj().transpose(-2,-1)
If x is hermitian or symmetric matrix, svd will be replaced with eigh.
Args:
x(Tensor): The input tensor. Its shape should be (*, m, n)
where * is zero or more batch dimensions. m and n can be
arbitraty positive number. The data type of x should be
float32 or float64 or complex64 or complex128. When data
type is complex64 or cpmplex128, hermitian should be set
True.
rcond(Tensor, optional): the tolerance value to determine
when is a singular value zero. Defalut:1e-15.
hermitian(bool, optional): indicates whether x is Hermitian
if complex or symmetric if real. Default: False.
name(str|None): A name for this layer(optional). If set None,
the layer will be named automatically.
Returns:
Tensor: The tensor with same data type with x. it represents
pseudo inverse of x. Its shape should be (*, n, m).
Examples:
.. code-block:: python
import paddle
x = paddle.arange(15).reshape((3, 5)).astype('float64')
input = paddle.to_tensor(x)
out = paddle.linalg.pinv(input)
print(input)
print(out)
# input:
# [[0. , 1. , 2. , 3. , 4. ],
# [5. , 6. , 7. , 8. , 9. ],
# [10., 11., 12., 13., 14.]]
# out:
# [[-0.22666667, -0.06666667, 0.09333333],
# [-0.12333333, -0.03333333, 0.05666667],
# [-0.02000000, 0.00000000, 0.02000000],
# [ 0.08333333, 0.03333333, -0.01666667],
# [ 0.18666667, 0.06666667, -0.05333333]]
# one can verify : x * out * x = x ;
# or out * x * out = x ;
"""
if paddle.in_dynamic_mode():
if not hermitian:
# combine svd and matmul op
u, s, vt = _C_ops.svd(x, 'full_matrices', False)
max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \
'reduce_all', False)
rcond = paddle.to_tensor(rcond, dtype=x.dtype)
cutoff = rcond * max_singular_val
y = float('inf')
y = paddle.to_tensor(y, dtype=x.dtype)
condition = s > cutoff
cond_int = layers.cast(condition, s.dtype)
cond_not_int = layers.cast(layers.logical_not(condition), s.dtype)
out1 = layers.elementwise_mul(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2)
st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2])
dims = list(range(len(vt.shape)))
perm = dims[:-2] + [dims[-1]] + [dims[-2]]
v, _ = _C_ops.transpose2(vt, 'axis', perm)
out_1 = v * st
out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y',
True)
return out_2
else:
# combine eigh and matmul op
s, u = _C_ops.eigh(x, 'UPLO', 'L')
s_abs = paddle.abs(s)
max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \
'reduce_all', False)
rcond = paddle.to_tensor(rcond, dtype=s.dtype)
cutoff = rcond * max_singular_val
y = float('inf')
y = paddle.to_tensor(y, dtype=s.dtype)
condition = s_abs > cutoff
cond_int = layers.cast(condition, s.dtype)
cond_not_int = layers.cast(layers.logical_not(condition), s.dtype)
out1 = layers.elementwise_mul(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2)
st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2])
out_1 = u * st
u_conj = _C_ops.conj(u)
out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y',
True)
return out_2
else:
if not hermitian:
helper = LayerHelper('pinv', **locals())
dtype = x.dtype
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv')
u = helper.create_variable_for_type_inference(dtype)
s = helper.create_variable_for_type_inference(dtype)
vt = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='svd',
inputs={'X': [x]},
outputs={'U': u,
'VH': vt,
'S': s},
attrs={'full_matrices': False}, )
max_singular_val = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='reduce_max',
inputs={'X': s},
outputs={'Out': max_singular_val},
attrs={'dim': [-1],
'keep_dim': True,
'reduce_all': False})
rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype)
cutoff = rcond * max_singular_val
y = float('inf')
y = layers.fill_constant(shape=[1], value=y, dtype=dtype)
condition = s > cutoff
cond_int = layers.cast(condition, dtype)
cond_not_int = layers.cast(layers.logical_not(condition), dtype)
out1 = layers.elementwise_mul(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2)
st = helper.create_variable_for_type_inference(dtype=dtype)
st_shape = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='unsqueeze2',
inputs={'X': singular},
attrs={'axes': [-2]},
outputs={'Out': st,
'XShape': st_shape})
dims = list(range(len(vt.shape)))
perm = dims[:-2] + [dims[-1]] + [dims[-2]]
v = helper.create_variable_for_type_inference(dtype)
v_shape = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='transpose2',
inputs={'X': [vt]},
outputs={'Out': [v],
'XShape': [v_shape]},
attrs={'axis': perm})
out_1 = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='elementwise_mul',
inputs={'X': v,
'Y': st},
outputs={'Out': out_1},
attrs={'axis': -1,
'use_mkldnn': False})
out_1 = helper.append_activation(out_1)
out_2 = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='matmul_v2',
inputs={'X': out_1,
'Y': u},
outputs={'Out': out_2},
attrs={'trans_x': False,
'trans_y': True}, )
return out_2
else:
helper = LayerHelper('pinv', **locals())
dtype = x.dtype
check_variable_and_dtype(
x, 'dtype', ['float32', 'float64', 'complex64',
'complex128'], 'pinv')
if dtype == paddle.complex128:
s_type = 'float64'
elif dtype == paddle.complex64:
s_type = 'float32'
else:
s_type = dtype
u = helper.create_variable_for_type_inference(dtype)
s = helper.create_variable_for_type_inference(s_type)
helper.append_op(
type='eigh',
inputs={'X': x},
outputs={'Eigenvalues': s,
'Eigenvectors': u},
attrs={'UPLO': 'L'})
s_abs = helper.create_variable_for_type_inference(s_type)
helper.append_op(
type='abs', inputs={'X': s}, outputs={'Out': s_abs})
max_singular_val = helper.create_variable_for_type_inference(s_type)
helper.append_op(
type='reduce_max',
inputs={'X': s_abs},
outputs={'Out': max_singular_val},
attrs={'dim': [-1],
'keep_dim': True,
'reduce_all': False})
rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type)
cutoff = rcond * max_singular_val
y = float('inf')
y = layers.fill_constant(shape=[1], value=y, dtype=s_type)
condition = s_abs > cutoff
cond_int = layers.cast(condition, s_type)
cond_not_int = layers.cast(layers.logical_not(condition), s_type)
out1 = layers.elementwise_mul(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2)
st = helper.create_variable_for_type_inference(dtype=s_type)
st_shape = helper.create_variable_for_type_inference(dtype=s_type)
helper.append_op(
type='unsqueeze2',
inputs={'X': singular},
attrs={'axes': [-2]},
outputs={'Out': st,
'XShape': st_shape})
out_1 = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='elementwise_mul',
inputs={'X': u,
'Y': st},
outputs={'Out': out_1},
attrs={'axis': -1,
'use_mkldnn': False})
out_1 = helper.append_activation(out_1)
u_conj = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='conj', inputs={'X': u}, outputs={'Out': [u_conj]})
out_2 = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='matmul_v2',
inputs={'X': out_1,
'Y': u_conj},
outputs={'Out': out_2},
attrs={'trans_x': False,
'trans_y': True}, )
return out_2
def solve(x, y, name=None):
r"""
Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'.
Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be
a vector/matrix or a batch of vectors/matrices, the equation should be:
.. math::
Out = X^-1 * Y
Specifically,
- This system of linear equations has one solution if and only if input 'X' is invertible.
Args:
x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
name(str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'.
Its data type should be the same as that of `x`.
Examples:
.. code-block:: python
# a square system of linear equations:
# 2*X0 + X1 = 9
# X0 + 2*X1 = 8
import paddle
import numpy as np
np_x = np.array([[3, 1],[1, 2]])
np_y = np.array([9, 8])
x = paddle.to_tensor(np_x, dtype="float64")
y = paddle.to_tensor(np_y, dtype="float64")
out = paddle.linalg.solve(x, y)
print(out)
# [2., 3.])
"""
if paddle.in_dynamic_mode():
return _C_ops.solve(x, y)
inputs = {"X": [x], "Y": [y]}
helper = LayerHelper("solve", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="solve", inputs={"X": x,
"Y": y}, outputs={"Out": out})
return out
def triangular_solve(x,
y,
upper=True,
transpose=False,
unitriangular=False,
name=None):
r"""
Computes the solution of a system of equations with a triangular coefficient matrix `x` and
multiple right-hand sides `y` .
Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs
is also batches.
Args:
x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is
zero or more batch dimensions. Its data type should be float32 or float64.
upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular
system of equations. Default: True.
transpose (bool, optional): whether `x` should be transposed before calculation. Default: False.
unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed
to be 1 and not referenced from `x` . Default: False.
name(str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The solution of the system of equations. Its data type should be the same as that of `x`.
Examples:
.. code-block:: python
# a square system of linear equations:
# x1 + x2 + x3 = 0
# 2*x2 + x3 = -9
# -x3 = 5
import paddle
import numpy as np
x = paddle.to_tensor([[1, 1, 1],
[0, 2, 1],
[0, 0,-1]], dtype="float64")
y = paddle.to_tensor([[0], [-9], [5]], dtype="float64")
out = paddle.linalg.triangular_solve(x, y, upper=True)
print(out)
# [7, -2, -5]
"""
if paddle.in_dynamic_mode():
return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose',
transpose, 'unitriangular',
unitriangular)
inputs = {"X": [x], "Y": [y]}
helper = LayerHelper("triangular_solve", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='triangular_solve',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={
'upper': upper,
'transpose': transpose,
'unitriangular': unitriangular
})
return out
def cholesky_solve(x, y, upper=False, name=None):
r"""
Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B.
Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs
is also batches.
Args:
x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is
zero or more batch dimensions. Its data type should be float32 or float64.
upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False.
name(str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The solution of the system of equations. Its data type is the same as that of `x`.
Examples:
.. code-block:: python
import paddle
u = paddle.to_tensor([[1, 1, 1],
[0, 2, 1],
[0, 0,-1]], dtype="float64")
b = paddle.to_tensor([[0], [-9], [5]], dtype="float64")
out = paddle.linalg.cholesky_solve(b, u, upper=True)
print(out)
# [-2.5, -7, 9.5]
"""
if paddle.in_dynamic_mode():
return _C_ops.cholesky_solve(x, y, 'upper', upper)
helper = LayerHelper("cholesky_solve", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='cholesky_solve',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'upper': upper})
return out
def eigvalsh(x, UPLO='L', name=None):
"""
Computes the eigenvalues of a
complex Hermitian (conjugate symmetric) or a real symmetric matrix.
Args:
x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x
should be one of float32, float64, complex64, complex128.
UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’).
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The tensor eigenvalues in ascending order.
Examples:
.. code-block:: python
import numpy as np
import paddle
x_data = np.array([[1, -2j], [2j, 5]])
x = paddle.to_tensor(x_data)
out_value = paddle.eigvalsh(x, UPLO='L')
print(out_value)
#[0.17157288, 5.82842712]
"""
if paddle.in_dynamic_mode():
is_test = x.stop_gradient
values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test)
return values
def __check_input(x, UPLO):
x_shape = list(x.shape)
if len(x.shape) < 2:
raise ValueError(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s." % len(x.shape))
if x_shape[-1] != x_shape[-2]:
raise ValueError(
"The input matrix must be batches of square matrices. But received x's dimention: {}".
format(x_shape))
if UPLO != 'L' and UPLO != 'U':
raise ValueError(
"UPLO must be L or U. But received UPLO is: {}".format(UPLO))
__check_input(x, UPLO)
helper = LayerHelper('eigvalsh', **locals())
check_variable_and_dtype(x, 'dtype',
['float32', 'float64', 'complex64', 'complex128'],
'eigvalsh')
out_value = helper.create_variable_for_type_inference(dtype=x.dtype)
out_vector = helper.create_variable_for_type_inference(dtype=x.dtype)
is_test = x.stop_gradient
helper.append_op(
type='eigvalsh',
inputs={'X': x},
outputs={'Eigenvalues': out_value,
'Eigenvectors': out_vector},
attrs={'UPLO': UPLO,
'is_test': is_test})
return out_value
def lstsq(x, y, rcond=None, driver=None, name=None):
"""
Computes a solution to
the least squares problem of a system of linear equations.
Args:
x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x``
should be one of float32, float64.
y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y``
should be one of float32, float64.
rcond(float, optional): The default value is None. A float pointing number used to determine
the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the
machine precision of x_dtype.
driver(str, optional): The default value is None. The name of LAPACK method to be used. For
CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only
valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’
for CUDA inputs.
name(str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``).
``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals``
is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed
when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor
with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in
(‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with
shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when
``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor.
Examples:
.. code-block:: python
import paddle
paddle.set_device("cpu")
x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]])
y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]])
results = paddle.linalg.lstsq(x, y, driver="gelsd")
print(results[0])
# [[ 0.78350395, -0.22165027, -0.62371236],
# [-0.11340097, 0.78866047, 1.14948535]]
print(results[1])
# [19.81443405, 10.43814468, 30.56185532])
print(results[2])
# 2
print(results[3])
# [9.03455734, 1.54167950]
x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]])
y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]])
results = paddle.linalg.lstsq(x, y, driver="gels")
print(results[0])
# [[ 0.39386186, 0.10230173, 0.93606132],
# [ 0.10741687, -0.29028133, 0.11892585],
# [-0.05115091, 0.51918161, -0.19948854]]
print(results[1])
# []
"""
device = paddle.get_device()
if device == "cpu":
if driver not in (None, "gels", "gelss", "gelsd", "gelsy"):
raise ValueError(
"Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}".
format(driver))
driver = "gelsy" if driver is None else driver
elif "gpu" in device:
if driver not in (None, "gels"):
raise ValueError(
"Only support valid driver is 'gels' or None for CUDA inputs. But got {}".
format(driver))
driver = "gels" if driver is None else driver
else:
raise RuntimeError("Only support lstsq api for CPU or CUDA device.")
if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64):
pass
else:
raise ValueError(
"Only support x and y have the same dtype such as 'float32' and 'float64'."
)
if rcond is None:
if x.dtype == paddle.float32:
rcond = 1e-7 * max(x.shape[-2], x.shape[-1])
elif x.dtype == paddle.float64:
rcond = 1e-15 * max(x.shape[-2], x.shape[-1])
if paddle.in_dynamic_mode():
solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond,
"driver", driver)
if x.shape[-2] > x.shape[-1]:
matmul_out = _varbase_creator(dtype=x.dtype)
_C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y',
False)
minus_out = _C_ops.elementwise_sub(matmul_out, y)
pow_out = _C_ops.pow(minus_out, 'factor', 2)
residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim',
False, 'reduce_all', False)
else:
residuals = paddle.empty(shape=[0], dtype=x.dtype)
if driver == "gels":
rank = paddle.empty(shape=[0], dtype=paddle.int32)
singular_values = paddle.empty(shape=[0], dtype=x.dtype)
elif driver == "gelsy":
singular_values = paddle.empty(shape=[0], dtype=x.dtype)
return solution, residuals, rank, singular_values
helper = LayerHelper('lstsq', **locals())
check_variable_and_dtype(
x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq')
check_variable_and_dtype(
y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq')
solution = helper.create_variable_for_type_inference(dtype=x.dtype)
residuals = helper.create_variable_for_type_inference(dtype=x.dtype)
rank = helper.create_variable_for_type_inference(dtype=paddle.int32)
singular_values = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='lstsq',
inputs={'X': x,
'Y': y},
outputs={
'Solution': solution,
'Rank': rank,
'SingularValues': singular_values
},
attrs={'rcond': rcond,
'driver': driver})
matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype)
minus_out = helper.create_variable_for_type_inference(dtype=x.dtype)
pow_out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matmul_v2',
inputs={'X': x,
'Y': solution},
outputs={'Out': matmul_out},
attrs={
'trans_x': False,
'trans_y': False,
})
helper.append_op(
type='elementwise_sub',
inputs={'X': matmul_out,
'Y': y},
outputs={'Out': minus_out})
helper.append_op(
type='pow',
inputs={'X': minus_out},
outputs={'Out': pow_out},
attrs={'factor': 2})
helper.append_op(
type='reduce_sum',
inputs={'X': pow_out},
outputs={'Out': residuals},
attrs={'dim': [-2],
'keep_dim': False,
'reduce_all': False})
if driver == "gels":
rank = paddle.static.data(name='rank', shape=[0])
singular_values = paddle.static.data(name='singular_values', shape=[0])
elif driver == "gelsy":
singular_values = paddle.static.data(name='singular_values', shape=[0])
return solution, residuals, rank, singular_values
| 39.074185 | 182 | 0.553913 |
import numpy as np
from ..fluid.layer_helper import LayerHelper
from ..framework import _varbase_creator, _dygraph_tracer
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..static import Variable
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from ..fluid.layers import transpose, cast
from ..fluid import layers
import paddle
from paddle.common_ops_import import core
from paddle.common_ops_import import VarDesc
from paddle import _C_ops
__all__ = []
def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
if in_dygraph_mode():
return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y)
if _in_legacy_dygraph():
op_type = 'matmul_v2'
op = getattr(_C_ops, op_type)
return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y)
attrs = {
'trans_x': transpose_x,
'trans_y': transpose_y,
}
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(
val, name,
['float16', 'float32', 'float64', 'complex64', 'complex128'],
'matmul')
__check_input(x, y)
helper = LayerHelper('matmul_v2', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matmul_v2',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
def norm(x, p='fro', axis=None, keepdim=False, name=None):
def frobenius_norm(input, dim=None, keepdim=False, name=None):
if dim is not None and not (isinstance(dim, list) and len(dim) == 2):
raise ValueError(
"The dim of frobenius norm op should be None or two elements list!"
)
if paddle.in_dynamic_mode():
if dim is None:
return _C_ops.frobenius_norm(input, 'keep_dim', keepdim,
'reduce_all', True)
return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim,
'reduce_all', False)
attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False}
if dim is None:
attrs['reduce_all'] = True
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'frobenius_norm')
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(
type='frobenius_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def vector_norm(input,
porder=None,
axis=None,
keepdim=False,
asvector=False,
name=None):
if paddle.in_dynamic_mode():
if axis is None: axis = -1
return _C_ops.p_norm(input, 'porder', porder, 'axis', axis,
'keepdim', keepdim, 'asvector', asvector)
if porder is not None:
check_type(porder, 'porder', (float, int), 'p_norm')
if axis is not None:
check_type(axis, 'axis', (int), 'p_norm')
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'p_norm')
attrs = {
'axis': axis if axis is not None else -1,
'porder': float(porder) if porder is not None else 2.0,
'keepdim': keepdim,
'asvector': asvector,
'epsilon': 1e-12,
}
helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(
type='p_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def inf_norm(input,
porder=None,
axis=axis,
keepdim=False,
asvector=False,
name=None):
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out})
reduce_out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
reduce_all = True if axis == None or axis == [] or asvector == True else False
axis = axis if axis != None and axis != [] else [0]
reduce_type = 'reduce_max' if porder == np.float(
'inf') else 'reduce_min'
helper.append_op(
type=reduce_type,
inputs={'X': out},
outputs={'Out': reduce_out},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
return reduce_out
def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None):
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
abs_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='abs', inputs={'X': input}, outputs={'Out': abs_out})
pow_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='pow',
inputs={'X': abs_out},
outputs={'Out': pow_out},
attrs={'factor': porder})
sum_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='reduce_sum',
inputs={'X': pow_out},
outputs={'Out': sum_out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': True if axis is None else False
})
porder
block.append_op(
type='pow',
inputs={'X': sum_out},
outputs={'Out': out},
attrs={'factor': float(1. / porder)})
return out
if axis is None and p is not None:
if isinstance(p, str):
if p == "fro":
return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)
else:
raise ValueError(
"only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(
x,
porder=p,
axis=axis,
keepdim=keepdim,
asvector=True,
name=name)
else:
raise ValueError("only valid p type is string or float, found {}".
format(type(p)))
if isinstance(axis, tuple):
axis = list(axis)
if isinstance(axis, list) and len(axis) == 1:
axis = axis[0]
if isinstance(axis, int):
if isinstance(p, str):
if p == "fro":
return vector_norm(
x,
porder=2,
axis=axis,
keepdim=keepdim,
asvector=False,
name=name)
else:
raise ValueError(
"only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(
x,
axis=axis,
porder=p,
keepdim=keepdim,
asvector=False,
name=name)
else:
raise ValueError(
"unspport p for p-order vector norm. except float, found {}".
format(p))
elif isinstance(axis, list) and len(axis) == 2:
if p == "fro":
return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)
elif p == np.inf or p == -np.inf:
return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name)
elif p == 0:
raise ValueError(
"just suport axis type int or list (length of list <=1) if p = 0, found {}".
format(axis))
else:
return p_matrix_norm(
x, porder=p, axis=axis, keepdim=keepdim, name=name)
else:
raise ValueError(
"except axis type int or list (length of list <=2), found {}".
format(axis))
def dist(x, y, p=2, name=None):
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist')
check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist')
check_type(p, 'p', (float, int), 'dist')
helper = LayerHelper("dist", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
inputs = {"X": [x], "Y": [y]}
outputs = {'Out': [out]}
attrs = {"p": float(p)}
helper.append_op(
type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def cond(x, p=None, name=None):
def mat_norm(input, porder=1., axis=None):
reduce_all = True if axis is None or axis == [] else False
axis = axis if axis != None and axis != [] else [0]
keepdim = False
if paddle.in_dynamic_mode():
abs_out = _C_ops.abs(input)
sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim',
keepdim, 'reduce_all', reduce_all)
if porder == 1 or porder == np.inf:
return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim',
keepdim, 'reduce_all', reduce_all)
if porder == -1 or porder == -np.inf:
return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim',
keepdim, 'reduce_all', reduce_all)
block = LayerHelper('norm', **locals())
abs_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
sum_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='abs', inputs={'X': input}, outputs={'Out': abs_out})
block.append_op(
type='reduce_sum',
inputs={'X': abs_out},
outputs={'Out': sum_out},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
if porder == 1 or porder == np.inf:
block.append_op(
type='reduce_max',
inputs={'X': sum_out},
outputs={'Out': out},
attrs={
'dim': [-1],
'keep_dim': keepdim,
'reduce_all': reduce_all
})
if porder == -1 or porder == -np.inf:
block.append_op(
type='reduce_min',
inputs={'X': sum_out},
outputs={'Out': out},
attrs={
'dim': [-1],
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
def fro_norm(input, porder=2, axis=[-1]):
reduce_all = True if axis is None or axis == [] else False
keepdim = False
if paddle.in_dynamic_mode():
pow_out = _C_ops.pow(input, 'factor', porder)
sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim',
keepdim, 'reduce_all', reduce_all)
sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim',
keepdim, 'reduce_all', reduce_all)
return _C_ops.pow(sum_out_2, 'factor', float(1. / porder))
block = LayerHelper('norm', **locals())
pow_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
sum_out_1 = block.create_variable_for_type_inference(
dtype=block.input_dtype())
sum_out_2 = block.create_variable_for_type_inference(
dtype=block.input_dtype())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='pow',
inputs={'X': input},
outputs={'Out': pow_out},
attrs={'factor': porder})
block.append_op(
type='reduce_sum',
inputs={'X': pow_out},
outputs={'Out': sum_out_1},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
block.append_op(
type='reduce_sum',
inputs={'X': sum_out_1},
outputs={'Out': sum_out_2},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
block.append_op(
type='pow',
inputs={'X': sum_out_2},
outputs={'Out': out},
attrs={'factor': float(1. / porder)})
return out
def svd_norm(input, porder, axis=[-1]):
reduce_all = True if axis is None or axis == [] else False
keepdim = False
u, s, vh = svd(input, full_matrices=False)
if paddle.in_dynamic_mode():
if porder == "nuc":
return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim,
'reduce_all', reduce_all)
max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim,
'reduce_all', reduce_all)
min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim,
'reduce_all', reduce_all)
if porder == 2:
return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis,
'use_mkldnn', False)
if porder == -2:
return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis,
'use_mkldnn', False)
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
if porder == "nuc":
block.append_op(
type='reduce_sum',
inputs={'X': s},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
max_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
min_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='reduce_max',
inputs={'X': s},
outputs={'Out': max_out},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
block.append_op(
type='reduce_min',
inputs={'X': s},
outputs={'Out': min_out},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
if porder == 2:
block.append_op(
type='elementwise_div',
inputs={'X': max_out,
'Y': min_out},
outputs={'Out': out},
attrs={'aixs': axis,
'use_mkldnn': False})
return out
if porder == -2:
block.append_op(
type='elementwise_div',
inputs={'X': min_out,
'Y': max_out},
outputs={'Out': out},
attrs={'aixs': axis,
'use_mkldnn': False})
return out
def empty_tensor(input, shape):
if paddle.in_dynamic_mode():
return input.reshape(shape)
raise ValueError("only support x is nonempty tensor in static mode")
x_shape = list(x.shape)
if not len(x_shape) >= 2:
raise ValueError("input should be a matrix or batches of matrices, " +
"but the dimention of received input is {}".format(
len(x_shape)))
if p == None:
p = 2
x_size = 0 if (0 in x_shape) else 1
if p in ("fro", "nuc", 1, -1, np.inf, -np.inf):
if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]:
if x_size == 0:
return empty_tensor(x, x_shape[:-2])
x_inv = x.inverse()
if p == "fro":
return fro_norm(x) * fro_norm(x_inv)
if p == "nuc":
return svd_norm(x, p) * svd_norm(x_inv, p)
if p in (1, -1):
return mat_norm(
x, porder=p, axis=[-2]) * mat_norm(
x_inv, porder=p, axis=[-2])
if p in (np.inf, -np.inf):
return mat_norm(
x, porder=p, axis=[-1]) * mat_norm(
x_inv, porder=p, axis=[-1])
else:
raise ValueError("only support p is {} when input is a ".format(p) +
"square matrix or batches of square matrices")
elif p in (2, -2):
if x_size == 0:
return empty_tensor(x, x_shape[:-2])
return svd_norm(x, porder=p)
else:
raise ValueError(
"unsupported {} for p, only supporting ('fro', 'nuc', ".format(
p) + "1, -1, 2, -2, inf, -inf) or none")
def dot(x, y, name=None):
op_type = 'dot'
if paddle.in_dynamic_mode():
op = getattr(_C_ops, op_type)
return op(x, y)
assert x is not None, 'x cannot be None in {}'.format(op_type)
assert y is not None, 'y cannot be None in {}'.format(op_type)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
op_type)
check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'],
op_type)
helper = LayerHelper(op_type, **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="dot", inputs={'X': x,
'Y': y}, attrs={}, outputs={"Out": out})
return out
def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None):
op_type = 'cov'
if len(x.shape) > 2 or len(x.shape) < 1:
raise ValueError(
"Input(x) only support N-D (1<=N<=2) tensor in cov, but received "
"length of Input(input) is %s." % len(x.shape))
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov')
nx = x
if len(x.shape) == 1:
nx = x.reshape((1, -1))
if not rowvar and nx.shape[0] != 1:
nx = nx.t()
w = None
observation_num = nx.shape[1]
if fweights is not None:
w = fweights.astype(nx.dtype)
if len(w.shape) > 1:
raise ValueError(
"Input(fweights) only support N-D (N<=1) tensor in cov, but received "
"shape of Input(input) is %s." % len(fweights.shape))
if fweights.shape[0] != observation_num:
raise ValueError(
"The number of Input(fweights) should equal to x's dim[1]: {}, but received "
"size of Input(fweights) is {}.".format(observation_num,
fweights.shape[0]))
if fweights.min() < 0:
raise ValueError(
"The value of Input(fweights) cannot be negtive, but received "
"min of Input(fweights) is {}.".format(fweights.min()))
if not paddle.all(fweights == paddle.round(fweights.astype('float64'))):
raise ValueError("Input(fweights) must be integer ")
if aweights is not None:
aw = aweights.astype(nx.dtype)
if len(aw.shape) > 1:
raise ValueError(
"Input(aweights) only support N-D (N<=1) tensor in cov, but received "
"length of Input(input) is %s." % len(aweights.shape))
check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'],
'cov')
if aweights.shape[0] != observation_num:
raise ValueError(
"The number of Input(aweights) should equal to x's dim[1]: {}, but received "
"size of Input(aweights) is {}.".format(observation_num,
aweights.shape[0]))
if aweights.min() < 0:
raise ValueError(
"The value of Input(aweights) cannot be negtive, but received "
"min of Input(aweights) is {}.".format(aweights.min()))
if w is not None:
w = w * aw
else:
w = aw
w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype)
if fweights is not None or aweights is not None:
w_sum = w.sum()
if w_sum.item() == 0:
raise ValueError("The sum of weights is zero, can't be normalized.")
if w is not None:
nx_w = nx * w
avg = (nx_w).sum(axis=1) / w_sum
else:
avg = nx.sum(axis=1) / w_sum
nx_w = nx
if w is not None and aweights is not None and ddof == True:
norm_factor = w_sum - (w * aweights).sum() / w_sum
else:
norm_factor = w_sum - ddof
if norm_factor <= 0:
norm_factor = paddle.to_tensor(0, dtype=nx.dtype)
nx = nx - avg.unsqueeze(1)
xxt = paddle.mm(nx, nx_w.t().conj())
cov = paddle.divide(xxt, norm_factor).squeeze()
return cov
def t(input, name=None):
if len(input.shape) > 2:
raise ValueError(
"Input(input) only support N-D (N<=2) tensor, but received "
"length of Input(input) is %s. Perhaps you can use paddle."
"tensor.transpose() instead." % len(input.shape))
if paddle.in_dynamic_mode():
if len(input.shape) == 1:
return input
# 2-D tensor
perm = [1, 0]
out, _ = _C_ops.transpose2(input, 'axis', perm)
return out
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32',
'int64'], 'transpose')
helper = LayerHelper('t', **locals())
out = helper.create_variable_for_type_inference(input.dtype)
input_shape = helper.create_variable_for_type_inference(input.dtype)
if len(input.shape) == 1:
out = input
else:
helper.append_op(
type='transpose2',
inputs={'X': [input]},
outputs={'Out': [out],
'XShape': [input_shape]},
attrs={'axis': [1, 0]})
return out
def cross(x, y, axis=None, name=None):
if in_dygraph_mode():
return _C_ops.final_state_cross(x, y, axis)
else:
if _in_legacy_dygraph():
if axis is not None:
return _C_ops.cross(x, y, 'dim', axis)
else:
return _C_ops.cross(x, y)
else:
helper = LayerHelper("cross", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
attrs = dict()
attrs['dim'] = axis
helper.append_op(
type='cross',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
def cholesky(x, upper=False, name=None):
if paddle.in_dynamic_mode():
return _C_ops.cholesky(x, "upper", upper)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky')
check_type(upper, 'upper', bool, 'cholesky')
helper = LayerHelper('cholesky', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='cholesky',
inputs={'X': [x]},
outputs={'Out': out},
attrs={'upper': upper})
return out
def matrix_rank(x, tol=None, hermitian=False, name=None):
if paddle.in_dynamic_mode():
if tol is None:
tol_tensor = None
tol_attr = 0.0
use_default_tol = True
elif isinstance(tol, Variable):
if tol.dtype != x.dtype:
tol_tensor = cast(tol, x.dtype)
else:
tol_tensor = tol
tol_attr = 0.0
use_default_tol = False
else:
tol_tensor = None
tol_attr = float(tol)
use_default_tol = False
return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian',
hermitian, 'use_default_tol', use_default_tol)
inputs = {}
attrs = {}
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank')
inputs['X'] = x
if tol is None:
attrs['use_default_tol'] = True
elif isinstance(tol, Variable):
check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank')
attrs['use_default_tol'] = False
if tol.dtype != x.dtype:
inputs['TolTensor'] = cast(tol, x.dtype)
else:
inputs['TolTensor'] = tol
else:
check_type(tol, 'tol', float, 'matrix_rank')
attrs['use_default_tol'] = False
attrs['tol'] = tol
check_type(hermitian, 'hermitian', bool, 'matrix_rank')
attrs['hermitian'] = hermitian
helper = LayerHelper('matrix_rank', **locals())
out = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def bmm(x, y, name=None):
x_shape = x.shape
y_shape = y.shape
if not len(x_shape) == len(y_shape) == 3:
raise ValueError(
"x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}".
format(x_shape, y_shape))
if x_shape[2] != y_shape[1]:
raise ValueError(
"x's width must be equal with y's height. But received x's shape: {}, y's shape: {}".
format(x_shape, y_shape))
if x_shape[0] != y_shape[0]:
raise ValueError(
"x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}".
format(x_shape, y_shape))
if paddle.in_dynamic_mode():
return _C_ops.bmm(x, y)
helper = LayerHelper('bmm', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out
def histogram(input, bins=100, min=0, max=0, name=None):
if paddle.in_dynamic_mode():
return _C_ops.histogram(input, "bins", bins, "min", min, "max", max)
helper = LayerHelper('histogram', **locals())
check_variable_and_dtype(
input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram')
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op(
type='histogram',
inputs={'X': input},
outputs={'Out': out},
attrs={'bins': bins,
'min': min,
'max': max})
return out
def bincount(x, weights=None, minlength=0, name=None):
if x.dtype not in [paddle.int32, paddle.int64]:
raise TypeError("Elements in Input(x) should all be integers")
if paddle.in_dynamic_mode():
return _C_ops.bincount(x, weights, "minlength", minlength)
helper = LayerHelper('bincount', **locals())
check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount')
if weights is not None:
check_variable_and_dtype(weights, 'Weights',
['int32', 'int64', 'float32', 'float64'],
'bincount')
out = helper.create_variable_for_type_inference(dtype=weights.dtype)
else:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='bincount',
inputs={'X': x,
'Weights': weights},
outputs={'Out': out},
attrs={'minlength': minlength})
return out
def mv(x, vec, name=None):
if in_dygraph_mode():
return _C_ops.final_state_mv(x, vec)
else:
if _in_legacy_dygraph():
out = _C_ops.mv(x, vec)
return out
else:
def __check_input(x, vec):
var_names = {'x': x, 'vec': vec}
for name, val in var_names.items():
check_variable_and_dtype(val, name, ['float32', 'float64'],
'mv')
x_shape = list(x.shape)
vec_shape = list(vec.shape)
if len(x_shape) != 2:
raise ValueError(
"x should be 2-dimensional. But received x's dimention: {}".
format(x_shape))
if len(vec_shape) != 1:
raise ValueError(
"vec should be 1-dimensional. But received vec's dimention: {}".
format(vec_shape))
__check_input(x, vec)
helper = LayerHelper('mv', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='mv', inputs={'X': x,
'Vec': vec}, outputs={'Out': out})
return out
def det(x, name=None):
if paddle.in_dynamic_mode():
return _C_ops.determinant(x)
check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det')
input_shape = list(x.shape)
assert len(input_shape) >= 2, \
"The x must be at least 2-dimensional, " \
"but received Input x's dimensional: %s.\n" % \
len(input_shape)
assert (input_shape[-1] == input_shape[-2]), \
"Expect squared input," \
"but received %s by %s matrix.\n" \
%(input_shape[-2], input_shape[-1]) \
helper = LayerHelper('determinant', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]})
return out
def slogdet(x, name=None):
if paddle.in_dynamic_mode():
return _C_ops.slogdeterminant(x)
check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet')
input_shape = list(x.shape)
assert len(input_shape) >= 2, \
"The x must be at least 2-dimensional, " \
"but received Input x's dimensional: %s.\n" % \
len(input_shape)
assert (input_shape[-1] == input_shape[-2]), \
"Expect squared input," \
"but received %s by %s matrix.\n" \
%(input_shape[-2], input_shape[-1]) \
helper = LayerHelper('slogdeterminant', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]})
return out
def svd(x, full_matrices=False, name=None):
if paddle.in_dynamic_mode():
return _C_ops.svd(x, 'full_matrices', full_matrices)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd')
check_type(full_matrices, 'full_matrices', bool, 'svd')
helper = LayerHelper('svd', **locals())
u = helper.create_variable_for_type_inference(dtype=x.dtype)
vh = helper.create_variable_for_type_inference(dtype=x.dtype)
s = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['full_matrices'] = full_matrices
helper.append_op(
type='svd',
inputs={'X': [x]},
outputs={'U': u,
'VH': vh,
'S': s},
attrs=attrs, )
return u, s, vh
def matrix_power(x, n, name=None):
if paddle.in_dynamic_mode():
return _C_ops.matrix_power(x, "n", n)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power')
check_type(n, 'n', int, 'matrix_power')
helper = LayerHelper('matrix_power', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matrix_power',
inputs={'X': x},
outputs={'Out': out},
attrs={'n': n})
return out
def qr(x, mode="reduced", name=None):
if paddle.in_dynamic_mode():
q, r = _C_ops.qr(x, 'mode', mode)
if mode == "r":
return r
else:
return q, r
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr')
check_type(mode, 'mode', str, 'qr')
helper = LayerHelper('qr', **locals())
q = helper.create_variable_for_type_inference(dtype=x.dtype)
r = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['mode'] = mode
helper.append_op(
type='qr', inputs={'X': [x]}, outputs={'Q': q,
'R': r}, attrs=attrs)
if mode == "r":
return r
else:
return q, r
def lu(x, pivot=True, get_infos=False, name=None):
if paddle.in_dynamic_mode():
LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot)
if get_infos:
return LU, Piv, Info
else:
return LU, Piv
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu')
helper = LayerHelper('lu', **locals())
lu = helper.create_variable_for_type_inference(dtype=x.dtype)
p = helper.create_variable_for_type_inference(dtype='int')
info = helper.create_variable_for_type_inference(dtype='int')
attrs = dict()
attrs['pivots'] = pivot
helper.append_op(
type='lu',
inputs={'X': x},
outputs={'Out': lu,
'Pivots': p,
'Infos': info},
attrs=attrs)
if get_infos:
return lu, p, info
else:
return lu, p
def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None):
if paddle.in_dynamic_mode():
P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata,
'unpack_pivots', unpack_pivots)
return P, L, U
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack')
helper = LayerHelper('lu_unpack', **locals())
p = helper.create_variable_for_type_inference(dtype=x.dtype)
l = helper.create_variable_for_type_inference(dtype=x.dtype)
u = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['unpack_ludata'] = unpack_ludata
attrs['unpack_pivots'] = unpack_pivots
helper.append_op(
type='lu_unpack',
inputs={'X': x,
'Pivots': y},
outputs={'Pmat': p,
'L': l,
'U': u},
attrs=attrs)
return p, l, u
def eig(x, name=None):
if paddle.in_dynamic_mode():
w, v = _C_ops.eig(x)
return w, v
check_variable_and_dtype(
x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig')
helper = LayerHelper('eig', **locals())
w = helper.create_variable_for_type_inference(x.dtype)
v = helper.create_variable_for_type_inference(x.dtype)
inputs = {'X': x}
outputs = {'Eigenvalues': w, 'Eigenvectors': v}
helper.append_op(type='eig', inputs=inputs, outputs=outputs)
return w, v
def eigvals(x, name=None):
check_variable_and_dtype(x, 'dtype',
['float32', 'float64', 'complex64',
'complex128'], 'eigvals')
x_shape = list(x.shape)
if len(x_shape) < 2:
raise ValueError(
"The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}".
format(len(x_shape), x_shape))
if x_shape[-1] != x_shape[-2]:
raise ValueError(
"The last two dimensions of Input(x) should be equal, but received x's shape = {}".
format(x_shape))
if paddle.in_dynamic_mode():
return _C_ops.eigvals(x)
helper = LayerHelper('eigvals', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out})
return out
def multi_dot(x, name=None):
if paddle.in_dynamic_mode():
return _C_ops.multi_dot(x)
check_type(x, 'x', (list, tuple), 'multi_dot')
for id, item in enumerate(x):
check_variable_and_dtype(item, 'x[' + str(id) + ']',
['float16', 'float32', 'float64'], 'multi_dot')
if item.dtype != x[0].dtype:
raise TypeError(
"All the Tensors in the input must have the same data type.")
helper = LayerHelper('multi_dot', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out})
return out
def eigh(x, UPLO='L', name=None):
if paddle.in_dynamic_mode():
return _C_ops.eigh(x, 'UPLO', UPLO)
def __check_input(x, UPLO):
x_shape = list(x.shape)
if len(x.shape) < 2:
raise ValueError(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s." % len(x.shape))
if x_shape[-1] != x_shape[-2]:
raise ValueError(
"The input matrix must be batches of square matrices. But received x's dimention: {}".
format(x_shape))
if UPLO != 'L' and UPLO != 'U':
raise ValueError(
"UPLO must be L or U. But received UPLO is: {}".format(UPLO))
__check_input(x, UPLO)
helper = LayerHelper('eigh', **locals())
check_variable_and_dtype(
x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh')
out_value = helper.create_variable_for_type_inference(dtype=x.dtype)
out_vector = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='eigh',
inputs={'X': x},
outputs={'Eigenvalues': out_value,
'Eigenvectors': out_vector},
attrs={'UPLO': UPLO})
return out_value, out_vector
def pinv(x, rcond=1e-15, hermitian=False, name=None):
if paddle.in_dynamic_mode():
if not hermitian:
# combine svd and matmul op
u, s, vt = _C_ops.svd(x, 'full_matrices', False)
max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \
'reduce_all', False)
rcond = paddle.to_tensor(rcond, dtype=x.dtype)
cutoff = rcond * max_singular_val
y = float('inf')
y = paddle.to_tensor(y, dtype=x.dtype)
condition = s > cutoff
cond_int = layers.cast(condition, s.dtype)
cond_not_int = layers.cast(layers.logical_not(condition), s.dtype)
out1 = layers.elementwise_mul(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2)
st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2])
dims = list(range(len(vt.shape)))
perm = dims[:-2] + [dims[-1]] + [dims[-2]]
v, _ = _C_ops.transpose2(vt, 'axis', perm)
out_1 = v * st
out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y',
True)
return out_2
else:
# combine eigh and matmul op
s, u = _C_ops.eigh(x, 'UPLO', 'L')
s_abs = paddle.abs(s)
max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \
'reduce_all', False)
rcond = paddle.to_tensor(rcond, dtype=s.dtype)
cutoff = rcond * max_singular_val
y = float('inf')
y = paddle.to_tensor(y, dtype=s.dtype)
condition = s_abs > cutoff
cond_int = layers.cast(condition, s.dtype)
cond_not_int = layers.cast(layers.logical_not(condition), s.dtype)
out1 = layers.elementwise_mul(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2)
st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2])
out_1 = u * st
u_conj = _C_ops.conj(u)
out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y',
True)
return out_2
else:
if not hermitian:
helper = LayerHelper('pinv', **locals())
dtype = x.dtype
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv')
u = helper.create_variable_for_type_inference(dtype)
s = helper.create_variable_for_type_inference(dtype)
vt = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='svd',
inputs={'X': [x]},
outputs={'U': u,
'VH': vt,
'S': s},
attrs={'full_matrices': False}, )
max_singular_val = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='reduce_max',
inputs={'X': s},
outputs={'Out': max_singular_val},
attrs={'dim': [-1],
'keep_dim': True,
'reduce_all': False})
rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype)
cutoff = rcond * max_singular_val
y = float('inf')
y = layers.fill_constant(shape=[1], value=y, dtype=dtype)
condition = s > cutoff
cond_int = layers.cast(condition, dtype)
cond_not_int = layers.cast(layers.logical_not(condition), dtype)
out1 = layers.elementwise_mul(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2)
st = helper.create_variable_for_type_inference(dtype=dtype)
st_shape = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='unsqueeze2',
inputs={'X': singular},
attrs={'axes': [-2]},
outputs={'Out': st,
'XShape': st_shape})
dims = list(range(len(vt.shape)))
perm = dims[:-2] + [dims[-1]] + [dims[-2]]
v = helper.create_variable_for_type_inference(dtype)
v_shape = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='transpose2',
inputs={'X': [vt]},
outputs={'Out': [v],
'XShape': [v_shape]},
attrs={'axis': perm})
out_1 = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='elementwise_mul',
inputs={'X': v,
'Y': st},
outputs={'Out': out_1},
attrs={'axis': -1,
'use_mkldnn': False})
out_1 = helper.append_activation(out_1)
out_2 = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='matmul_v2',
inputs={'X': out_1,
'Y': u},
outputs={'Out': out_2},
attrs={'trans_x': False,
'trans_y': True}, )
return out_2
else:
helper = LayerHelper('pinv', **locals())
dtype = x.dtype
check_variable_and_dtype(
x, 'dtype', ['float32', 'float64', 'complex64',
'complex128'], 'pinv')
if dtype == paddle.complex128:
s_type = 'float64'
elif dtype == paddle.complex64:
s_type = 'float32'
else:
s_type = dtype
u = helper.create_variable_for_type_inference(dtype)
s = helper.create_variable_for_type_inference(s_type)
helper.append_op(
type='eigh',
inputs={'X': x},
outputs={'Eigenvalues': s,
'Eigenvectors': u},
attrs={'UPLO': 'L'})
s_abs = helper.create_variable_for_type_inference(s_type)
helper.append_op(
type='abs', inputs={'X': s}, outputs={'Out': s_abs})
max_singular_val = helper.create_variable_for_type_inference(s_type)
helper.append_op(
type='reduce_max',
inputs={'X': s_abs},
outputs={'Out': max_singular_val},
attrs={'dim': [-1],
'keep_dim': True,
'reduce_all': False})
rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type)
cutoff = rcond * max_singular_val
y = float('inf')
y = layers.fill_constant(shape=[1], value=y, dtype=s_type)
condition = s_abs > cutoff
cond_int = layers.cast(condition, s_type)
cond_not_int = layers.cast(layers.logical_not(condition), s_type)
out1 = layers.elementwise_mul(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2)
st = helper.create_variable_for_type_inference(dtype=s_type)
st_shape = helper.create_variable_for_type_inference(dtype=s_type)
helper.append_op(
type='unsqueeze2',
inputs={'X': singular},
attrs={'axes': [-2]},
outputs={'Out': st,
'XShape': st_shape})
out_1 = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='elementwise_mul',
inputs={'X': u,
'Y': st},
outputs={'Out': out_1},
attrs={'axis': -1,
'use_mkldnn': False})
out_1 = helper.append_activation(out_1)
u_conj = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='conj', inputs={'X': u}, outputs={'Out': [u_conj]})
out_2 = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='matmul_v2',
inputs={'X': out_1,
'Y': u_conj},
outputs={'Out': out_2},
attrs={'trans_x': False,
'trans_y': True}, )
return out_2
def solve(x, y, name=None):
if paddle.in_dynamic_mode():
return _C_ops.solve(x, y)
inputs = {"X": [x], "Y": [y]}
helper = LayerHelper("solve", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="solve", inputs={"X": x,
"Y": y}, outputs={"Out": out})
return out
def triangular_solve(x,
y,
upper=True,
transpose=False,
unitriangular=False,
name=None):
if paddle.in_dynamic_mode():
return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose',
transpose, 'unitriangular',
unitriangular)
inputs = {"X": [x], "Y": [y]}
helper = LayerHelper("triangular_solve", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='triangular_solve',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={
'upper': upper,
'transpose': transpose,
'unitriangular': unitriangular
})
return out
def cholesky_solve(x, y, upper=False, name=None):
if paddle.in_dynamic_mode():
return _C_ops.cholesky_solve(x, y, 'upper', upper)
helper = LayerHelper("cholesky_solve", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='cholesky_solve',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'upper': upper})
return out
def eigvalsh(x, UPLO='L', name=None):
if paddle.in_dynamic_mode():
is_test = x.stop_gradient
values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test)
return values
def __check_input(x, UPLO):
x_shape = list(x.shape)
if len(x.shape) < 2:
raise ValueError(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s." % len(x.shape))
if x_shape[-1] != x_shape[-2]:
raise ValueError(
"The input matrix must be batches of square matrices. But received x's dimention: {}".
format(x_shape))
if UPLO != 'L' and UPLO != 'U':
raise ValueError(
"UPLO must be L or U. But received UPLO is: {}".format(UPLO))
__check_input(x, UPLO)
helper = LayerHelper('eigvalsh', **locals())
check_variable_and_dtype(x, 'dtype',
['float32', 'float64', 'complex64', 'complex128'],
'eigvalsh')
out_value = helper.create_variable_for_type_inference(dtype=x.dtype)
out_vector = helper.create_variable_for_type_inference(dtype=x.dtype)
is_test = x.stop_gradient
helper.append_op(
type='eigvalsh',
inputs={'X': x},
outputs={'Eigenvalues': out_value,
'Eigenvectors': out_vector},
attrs={'UPLO': UPLO,
'is_test': is_test})
return out_value
def lstsq(x, y, rcond=None, driver=None, name=None):
device = paddle.get_device()
if device == "cpu":
if driver not in (None, "gels", "gelss", "gelsd", "gelsy"):
raise ValueError(
"Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}".
format(driver))
driver = "gelsy" if driver is None else driver
elif "gpu" in device:
if driver not in (None, "gels"):
raise ValueError(
"Only support valid driver is 'gels' or None for CUDA inputs. But got {}".
format(driver))
driver = "gels" if driver is None else driver
else:
raise RuntimeError("Only support lstsq api for CPU or CUDA device.")
if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64):
pass
else:
raise ValueError(
"Only support x and y have the same dtype such as 'float32' and 'float64'."
)
if rcond is None:
if x.dtype == paddle.float32:
rcond = 1e-7 * max(x.shape[-2], x.shape[-1])
elif x.dtype == paddle.float64:
rcond = 1e-15 * max(x.shape[-2], x.shape[-1])
if paddle.in_dynamic_mode():
solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond,
"driver", driver)
if x.shape[-2] > x.shape[-1]:
matmul_out = _varbase_creator(dtype=x.dtype)
_C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y',
False)
minus_out = _C_ops.elementwise_sub(matmul_out, y)
pow_out = _C_ops.pow(minus_out, 'factor', 2)
residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim',
False, 'reduce_all', False)
else:
residuals = paddle.empty(shape=[0], dtype=x.dtype)
if driver == "gels":
rank = paddle.empty(shape=[0], dtype=paddle.int32)
singular_values = paddle.empty(shape=[0], dtype=x.dtype)
elif driver == "gelsy":
singular_values = paddle.empty(shape=[0], dtype=x.dtype)
return solution, residuals, rank, singular_values
helper = LayerHelper('lstsq', **locals())
check_variable_and_dtype(
x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq')
check_variable_and_dtype(
y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq')
solution = helper.create_variable_for_type_inference(dtype=x.dtype)
residuals = helper.create_variable_for_type_inference(dtype=x.dtype)
rank = helper.create_variable_for_type_inference(dtype=paddle.int32)
singular_values = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='lstsq',
inputs={'X': x,
'Y': y},
outputs={
'Solution': solution,
'Rank': rank,
'SingularValues': singular_values
},
attrs={'rcond': rcond,
'driver': driver})
matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype)
minus_out = helper.create_variable_for_type_inference(dtype=x.dtype)
pow_out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matmul_v2',
inputs={'X': x,
'Y': solution},
outputs={'Out': matmul_out},
attrs={
'trans_x': False,
'trans_y': False,
})
helper.append_op(
type='elementwise_sub',
inputs={'X': matmul_out,
'Y': y},
outputs={'Out': minus_out})
helper.append_op(
type='pow',
inputs={'X': minus_out},
outputs={'Out': pow_out},
attrs={'factor': 2})
helper.append_op(
type='reduce_sum',
inputs={'X': pow_out},
outputs={'Out': residuals},
attrs={'dim': [-2],
'keep_dim': False,
'reduce_all': False})
if driver == "gels":
rank = paddle.static.data(name='rank', shape=[0])
singular_values = paddle.static.data(name='singular_values', shape=[0])
elif driver == "gelsy":
singular_values = paddle.static.data(name='singular_values', shape=[0])
return solution, residuals, rank, singular_values
| true | true |
790137aeac521a7e1591dc80402f5641e9f47a43 | 944 | py | Python | libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/settings_memory_scope.py | Fl4v/botbuilder-python | 4003d713beb8fb986a01cfd11632eabc65858618 | [
"MIT"
] | 388 | 2019-05-07T15:53:21.000Z | 2022-03-28T20:29:46.000Z | libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/settings_memory_scope.py | Fl4v/botbuilder-python | 4003d713beb8fb986a01cfd11632eabc65858618 | [
"MIT"
] | 1,286 | 2019-05-07T23:38:19.000Z | 2022-03-31T10:44:16.000Z | libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/settings_memory_scope.py | Fl4v/botbuilder-python | 4003d713beb8fb986a01cfd11632eabc65858618 | [
"MIT"
] | 168 | 2019-05-14T20:23:25.000Z | 2022-03-16T06:49:14.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.dialogs.memory import scope_path
from .memory_scope import MemoryScope
class SettingsMemoryScope(MemoryScope):
def __init__(self):
super().__init__(scope_path.SETTINGS)
self._empty_settings = {}
self.include_in_snapshot = False
def get_memory(self, dialog_context: "DialogContext") -> object:
if not dialog_context:
raise TypeError(f"Expecting: DialogContext, but received None")
settings: dict = dialog_context.context.turn_state.get(
scope_path.SETTINGS, None
)
if not settings:
settings = self._empty_settings
return settings
def set_memory(self, dialog_context: "DialogContext", memory: object):
raise Exception(
f"{self.__class__.__name__}.set_memory not supported (read only)"
)
| 29.5 | 77 | 0.680085 |
from botbuilder.dialogs.memory import scope_path
from .memory_scope import MemoryScope
class SettingsMemoryScope(MemoryScope):
def __init__(self):
super().__init__(scope_path.SETTINGS)
self._empty_settings = {}
self.include_in_snapshot = False
def get_memory(self, dialog_context: "DialogContext") -> object:
if not dialog_context:
raise TypeError(f"Expecting: DialogContext, but received None")
settings: dict = dialog_context.context.turn_state.get(
scope_path.SETTINGS, None
)
if not settings:
settings = self._empty_settings
return settings
def set_memory(self, dialog_context: "DialogContext", memory: object):
raise Exception(
f"{self.__class__.__name__}.set_memory not supported (read only)"
)
| true | true |
79013a152deb5f66e4e3a2ee929bdbc26933be29 | 2,232 | py | Python | Samples/codes/matopt_review/add_objective.py | wilsongis/3DP_Experiments | da9bd3b4ba1d82bac7dcfa27d86634add59db087 | [
"MIT",
"Unlicense"
] | null | null | null | Samples/codes/matopt_review/add_objective.py | wilsongis/3DP_Experiments | da9bd3b4ba1d82bac7dcfa27d86634add59db087 | [
"MIT",
"Unlicense"
] | null | null | null | Samples/codes/matopt_review/add_objective.py | wilsongis/3DP_Experiments | da9bd3b4ba1d82bac7dcfa27d86634add59db087 | [
"MIT",
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (c) 2021 Showa Denko Materials co., Ltd. All rights reserved.
This software is for non-profit use only.
THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN THIS SOFTWARE.
"""
import time
import numpy as np
from GPyOpt.core.task.objective import Objective
class MultiObjective(Objective):
"""
Class to handle problems with multiple objective functions.
param func: objective function.
param n_obj: number of objective functions
param num_cores: number of cores to use in the process of evaluating the objective (default, 1).
param objective_name: name of the objective function.
param batch_type: Type of batch used. Only 'synchronous' evaluations are possible at the moment.
param space: Not in use.
"""
def __init__(self, func, n_obj, num_cores = 1, objective_name = 'no_name', batch_type = 'synchronous', space = None):
self.func = func
self.n_procs = num_cores
self.num_evaluations = 0
self.space = space
self.objective_name = objective_name
self.n_obj = n_obj
def evaluate(self, x):
"""
Performs the evaluation of the objective at x.
"""
f_evals, cost_evals = self._eval_func(x)
return f_evals, cost_evals
def _eval_func(self, x):
"""
Performs sequential evaluations of the function at x (single location or batch). The computing time of each
evaluation is also provided.
"""
cost_evals = []
f_evals = np.empty(shape=[0, self.n_obj])
for i in range(x.shape[0]):
st_time = time.time()
rlt = self.func(np.atleast_2d(x[i]))
f_evals = np.vstack([f_evals,rlt])
cost_evals += [time.time()-st_time]
return f_evals, cost_evals
| 34.875 | 121 | 0.673835 |
import time
import numpy as np
from GPyOpt.core.task.objective import Objective
class MultiObjective(Objective):
def __init__(self, func, n_obj, num_cores = 1, objective_name = 'no_name', batch_type = 'synchronous', space = None):
self.func = func
self.n_procs = num_cores
self.num_evaluations = 0
self.space = space
self.objective_name = objective_name
self.n_obj = n_obj
def evaluate(self, x):
f_evals, cost_evals = self._eval_func(x)
return f_evals, cost_evals
def _eval_func(self, x):
cost_evals = []
f_evals = np.empty(shape=[0, self.n_obj])
for i in range(x.shape[0]):
st_time = time.time()
rlt = self.func(np.atleast_2d(x[i]))
f_evals = np.vstack([f_evals,rlt])
cost_evals += [time.time()-st_time]
return f_evals, cost_evals
| true | true |
79013ad6e0a2166ad422ee11f1165082ac96f88d | 2,155 | py | Python | greentest/test__core_stat.py | pubnub/gevent | e2ec8bb71fcccd01cee76fa97ca22bd467741a8b | [
"MIT"
] | 1 | 2020-03-21T05:34:18.000Z | 2020-03-21T05:34:18.000Z | greentest/test__core_stat.py | pubnub/gevent | e2ec8bb71fcccd01cee76fa97ca22bd467741a8b | [
"MIT"
] | null | null | null | greentest/test__core_stat.py | pubnub/gevent | e2ec8bb71fcccd01cee76fa97ca22bd467741a8b | [
"MIT"
] | 1 | 2021-01-13T11:20:12.000Z | 2021-01-13T11:20:12.000Z | from __future__ import print_function
import gevent
import gevent.core
import os
import time
filename = 'tmp.test__core_stat.%s' % os.getpid()
hub = gevent.get_hub()
DELAY = 0.5
EV_USE_INOTIFY = getattr(gevent.core, 'EV_USE_INOTIFY', None)
try:
open(filename, 'wb', buffering=0).close()
assert os.path.exists(filename), filename
def write():
f = open(filename, 'wb', buffering=0)
f.write(b'x')
f.close()
start = time.time()
greenlet = gevent.spawn_later(DELAY, write)
# If we don't specify an interval, we default to zero.
# libev interprets that as meaning to use its default interval,
# which is about 5 seconds. If we go below it's minimum check
# threshold, it bumps it up to the minimum.
watcher = hub.loop.stat(filename, interval=-1)
if hasattr(watcher, 'path'):
assert watcher.path == filename
assert watcher.interval == -1
with gevent.Timeout(5 + DELAY + 0.5):
hub.wait(watcher)
reaction = time.time() - start - DELAY
print('Watcher %s reacted after %.4f seconds (write)' % (watcher, reaction))
if reaction >= DELAY and EV_USE_INOTIFY:
print('WARNING: inotify failed (write)')
assert reaction >= 0.0, 'Watcher %s reacted too early (write): %.3fs' % (watcher, reaction)
assert watcher.attr is not None, watcher.attr
assert watcher.prev is not None, watcher.prev
# The watcher interval changed after it started; -1 is illegal
assert watcher.interval != -1
greenlet.join()
gevent.spawn_later(DELAY, os.unlink, filename)
start = time.time()
with gevent.Timeout(5 + DELAY + 0.5):
hub.wait(watcher)
reaction = time.time() - start - DELAY
print('Watcher %s reacted after %.4f seconds (unlink)' % (watcher, reaction))
if reaction >= DELAY and EV_USE_INOTIFY:
print('WARNING: inotify failed (unlink)')
assert reaction >= 0.0, 'Watcher %s reacted too early (unlink): %.3fs' % (watcher, reaction)
assert watcher.attr is None, watcher.attr
assert watcher.prev is not None, watcher.prev
finally:
if os.path.exists(filename):
os.unlink(filename)
| 31.691176 | 96 | 0.664501 | from __future__ import print_function
import gevent
import gevent.core
import os
import time
filename = 'tmp.test__core_stat.%s' % os.getpid()
hub = gevent.get_hub()
DELAY = 0.5
EV_USE_INOTIFY = getattr(gevent.core, 'EV_USE_INOTIFY', None)
try:
open(filename, 'wb', buffering=0).close()
assert os.path.exists(filename), filename
def write():
f = open(filename, 'wb', buffering=0)
f.write(b'x')
f.close()
start = time.time()
greenlet = gevent.spawn_later(DELAY, write)
# libev interprets that as meaning to use its default interval,
# which is about 5 seconds. If we go below it's minimum check
watcher = hub.loop.stat(filename, interval=-1)
if hasattr(watcher, 'path'):
assert watcher.path == filename
assert watcher.interval == -1
with gevent.Timeout(5 + DELAY + 0.5):
hub.wait(watcher)
reaction = time.time() - start - DELAY
print('Watcher %s reacted after %.4f seconds (write)' % (watcher, reaction))
if reaction >= DELAY and EV_USE_INOTIFY:
print('WARNING: inotify failed (write)')
assert reaction >= 0.0, 'Watcher %s reacted too early (write): %.3fs' % (watcher, reaction)
assert watcher.attr is not None, watcher.attr
assert watcher.prev is not None, watcher.prev
assert watcher.interval != -1
greenlet.join()
gevent.spawn_later(DELAY, os.unlink, filename)
start = time.time()
with gevent.Timeout(5 + DELAY + 0.5):
hub.wait(watcher)
reaction = time.time() - start - DELAY
print('Watcher %s reacted after %.4f seconds (unlink)' % (watcher, reaction))
if reaction >= DELAY and EV_USE_INOTIFY:
print('WARNING: inotify failed (unlink)')
assert reaction >= 0.0, 'Watcher %s reacted too early (unlink): %.3fs' % (watcher, reaction)
assert watcher.attr is None, watcher.attr
assert watcher.prev is not None, watcher.prev
finally:
if os.path.exists(filename):
os.unlink(filename)
| true | true |
79013cc0569ba7b48a8c8143a6b1ddacf4ff6392 | 10,201 | py | Python | subset_selection/code/measures/contrastive/contrastive.py | JiwanChung/acav100m | 51cb948d5682da69334a8d05d2df631971b60215 | [
"MIT"
] | 27 | 2021-10-13T07:49:14.000Z | 2022-03-15T06:58:00.000Z | subset_selection/code/measures/contrastive/contrastive.py | JiwanChung/acav100m | 51cb948d5682da69334a8d05d2df631971b60215 | [
"MIT"
] | 3 | 2021-08-30T21:29:45.000Z | 2021-11-18T08:02:32.000Z | subset_selection/code/measures/contrastive/contrastive.py | JiwanChung/acav100m | 51cb948d5682da69334a8d05d2df631971b60215 | [
"MIT"
] | 6 | 2021-08-30T18:48:32.000Z | 2021-12-16T22:11:37.000Z | import csv
from pathlib import Path
import torch
import pandas
import numpy as np
from utils import peek, load_json, dump_json
from .module import ContrastiveModule
from mps import distributed as du
from save import format_rows
def get_penultimates(keys):
penultimates = {}
for key in keys:
view = key[:key.find('_')] # get dataset+model name
layer_name = key[key.find('_') + 1:]
if view not in penultimates:
penultimates[view] = view + '_' + layer_name
elif layer_name > penultimates[view]:
penultimates[view] = view + '_' + layer_name
keys = sorted(list(penultimates.keys()))
return [penultimates[k] for k in keys]
def get_optimizer(params, lr=1e-3):
optimizer = torch.optim.AdamW(
params,
lr=lr,
betas=(0.9, 0.999),
eps=1e-6,
amsgrad=True,
)
return optimizer
def set_lr(optimizer, lr):
for param in optimizer.param_groups:
param['lr'] = lr
return optimizer
def lr_func_linear(current_step, num_training_steps, num_warmup_steps=3):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))
def update_lr(optimizer, epoch, num_epochs, base_lr=1e-3, num_warmup_steps=3):
lr = lr_func_linear(epoch + 1, num_epochs + 1, num_warmup_steps) * base_lr
optimizer = set_lr(optimizer, lr)
return optimizer, lr
class Contrastive:
def __init__(self, num_epochs=1, device='cpu', base_lr=1e-4,
num_warmup_steps=3, distributed=False):
self.num_epochs = num_epochs
self.device = device
self.base_lr = base_lr
self.num_warmup_steps = num_warmup_steps
self.distributed = distributed
self.epoch = 0
# sizes = self.get_sizes(train)
sizes = self.default_sizes
self.model = ContrastiveModule(*sizes, use_global_batch=distributed)
self.model = self.model.to(self.device)
def init(self, clustering_combinations, candidates):
pass
@property
def default_sizes(self):
# video (slowfast) : 2304, audio (VGGish) : 128
return [2304, 128]
def get_sizes(self, train):
class_data = peek(train)
row = class_data[0]
penultimates = get_penultimates(list(row['features'].keys()))
return [row['features'][k].shape[-1] for k in penultimates]
def get_feature_names(self, train):
class_data = peek(train)
row = peek(class_data)
return sorted(list(row.keys()))
def train_batch(self, batch, optimizer):
moved = []
for feature in batch:
moved.append(feature.to(self.device))
loss, acc = self.model(*moved)
loss.backward()
if self.distributed:
self.model.average_gradient()
optimizer.step()
return loss.item(), acc.item()
def _get_features(self, batch):
unique_ids = pandas.Series(batch['idx']).drop_duplicates().index.tolist()
filenames = [batch['filename'][idx] for idx in unique_ids]
ids = [batch['idx'][idx] for idx in unique_ids]
shard_names = [batch['shard_name'][idx] for idx in unique_ids]
metas = [{'id': idx, 'filename': filename, 'shard_name': shard_name}
for idx, filename, shard_name in zip(ids, filenames, shard_names)]
video_features = batch['SLOWFAST_8x8_R50/kinetics-400']['layer_4']
audio_features = batch['VGGish/YouTube-8M']['layer_4']
unique_ids = torch.Tensor(unique_ids).long()
video_features = video_features.index_select(dim=0, index=unique_ids)
audio_features = audio_features.index_select(dim=0, index=unique_ids)
return metas, [video_features, audio_features]
def get_features(self, batch):
metas, [video_features, audio_features] = self._get_features(batch)
if self.distributed:
i = du.get_rank()
total = du.get_world_size()
metas = metas[i::total]
video_features = video_features[i::total]
audio_features = audio_features[i::total]
return metas, [video_features, audio_features]
def train(self, args, path, dataloader, log_every=1, verbose=True):
self.model.train()
optimizer = get_optimizer(self.model.parameters(), self.base_lr)
for epoch in range(self.epoch, self.num_epochs):
optimizer, lr = update_lr(optimizer, epoch, self.num_epochs, self.base_lr,
self.num_warmup_steps)
epoch_loss = []
epoch_acc = []
pbar = dataloader
for count, batch in enumerate(pbar):
_, features = self.get_features(batch)
loss, acc = self.train_batch(features, optimizer)
epoch_loss.append(loss)
epoch_acc.append(acc)
if verbose and count % log_every == 0:
print("(node {}) training epoch ({}/{}) iter ({}/{}) (lr: {:04f}, loss: {:04f}, acc: {:04f})".format(
du.get_rank(), epoch, self.num_epochs, count, len(dataloader), lr, loss, acc))
epoch_loss = np.array(epoch_loss).mean()
epoch_acc = np.array(epoch_acc).mean()
if verbose:
print("(node {}) epoch ({}/{}) done (lr: {:04f}, loss: {:04f}, acc: {:04f})".format(
du.get_rank(), epoch, self.num_epochs, lr, epoch_loss, epoch_acc))
self.epoch = epoch
self.save_cache(args, path, epoch, verbose)
return
def get_cache_path_run(self, args, epoch):
cache_dir = args.data.output.path.parent / 'caches'
cache_dir.mkdir(parents=True, exist_ok=True)
pid = args.parent_pid
rank = args.node_rank
i = args.chunk_num
name = "contrastive_model_cache_epoch_{}_{}_{}_{}.pkl".format(epoch, pid, rank, i)
path = str(cache_dir / name)
key_name = "contrastive_model_cache_epoch_{}_{}_{}_{}.json".format(epoch, pid, rank, i)
key_path = str(cache_dir / key_name)
return path, key_path
def get_cache_path_load(self, args, path, epoch):
cache_dir = args.data.output.path.parent / 'caches'
cache_dir.mkdir(parents=True, exist_ok=True)
keys = list(cache_dir.glob("contrastive_model_cache_epoch_{}_*.json".format(epoch)))
if len(keys) == 0:
return None
keys = {p.stem: set(load_json(p)) for p in keys}
path = set([Path(p).stem for p in path])
intersections = [(k, len(v & path)) for k, v in keys.items() if len(path - v) == 0]
if len(intersections) == 0:
return None
key = max(intersections, key=lambda x: x[1])[0]
path = cache_dir / key
path = path.parent / (path.stem + '.pkl')
return path
def save_cache(self, args, chunks, epoch, verbose=True):
path, key_path = self.get_cache_path_run(args, epoch)
dt = {
'epoch': self.epoch,
'base_lr': self.base_lr,
'model': self.model.state_dict()
}
if verbose:
print("saved cache file: {}".format(Path(path).stem))
torch.save(dt, path)
keys = [Path(p).stem for p in chunks]
dump_json(keys, key_path)
def load_cache(self, args, path, epoch):
path = self.get_cache_path_load(args, path, epoch)
assert path is not None, 'no cache file'
dt = torch.load(path)
self.epoch = dt['epoch']
self.base_lr = dt['base_lr']
self.model.load_state_dict(dt['model'])
def infer_batch(self, batch):
moved = []
for feature in batch:
moved.append(feature.to(self.device))
logits = self.model.infer(*moved)
return logits.detach().cpu()
def infer(self, args, dataloader, json_metas, subset_size, log_every=1, verbose=True):
self.model.eval()
with torch.no_grad():
logits, filename_ids = self._infer(args, dataloader, json_metas, log_every, verbose)
if subset_size > logits.shape[0]:
subset_size = logits.shape[0]
scores, ids = logits.topk(subset_size, sorted=True)
return scores, ids, filename_ids
def _infer(self, args, dataloader, json_metas, log_every=1, verbose=True):
logits = []
pbar = dataloader
metas = []
for count, batch in enumerate(pbar):
batch_metas, features = self.get_features(batch)
logit = self.infer_batch(features)
logits.append(logit)
metas.extend(batch_metas)
if verbose and count % log_every == 0:
print("inference iter ({}/{}) saving caches".format(count, len(dataloader)))
logits = torch.cat(logits, dim=0)
self.save_inference(args, logits, metas, json_metas)
logits = []
metas = []
if len(metas) > 0:
logits = torch.cat(logits, dim=0)
self.save_inference(args, logits, metas, json_metas)
print("done: inference iter ({}/{}) saving caches".format(count, len(dataloader)))
return logits, metas
def save_inference(self, args, logits, metas, json_metas):
cache_dir = args.data.output.path.parent / 'caches'
cache_dir.mkdir(parents=True, exist_ok=True)
pid = args.parent_pid
local_rank = du.get_rank()
output_name = Path(args.data.output.path).stem
name = "{}_contrastive_inferred_cache_{}_{}.csv".format(output_name, pid, local_rank)
scores = logits.numpy().tolist()
rows = [{'score': score, **v} for score, v in zip(scores, metas)]
lines = format_rows(rows, json_metas, sharded_meta=True,
headers=['score', 'shard_name', 'filename', 'id', 'segment'])
print("saving cache to {}".format(cache_dir / name))
with open(cache_dir / name, 'a+') as f:
writer = csv.writer(f)
for line in lines:
writer.writerow(line)
| 39.692607 | 121 | 0.605921 | import csv
from pathlib import Path
import torch
import pandas
import numpy as np
from utils import peek, load_json, dump_json
from .module import ContrastiveModule
from mps import distributed as du
from save import format_rows
def get_penultimates(keys):
penultimates = {}
for key in keys:
view = key[:key.find('_')]
layer_name = key[key.find('_') + 1:]
if view not in penultimates:
penultimates[view] = view + '_' + layer_name
elif layer_name > penultimates[view]:
penultimates[view] = view + '_' + layer_name
keys = sorted(list(penultimates.keys()))
return [penultimates[k] for k in keys]
def get_optimizer(params, lr=1e-3):
optimizer = torch.optim.AdamW(
params,
lr=lr,
betas=(0.9, 0.999),
eps=1e-6,
amsgrad=True,
)
return optimizer
def set_lr(optimizer, lr):
for param in optimizer.param_groups:
param['lr'] = lr
return optimizer
def lr_func_linear(current_step, num_training_steps, num_warmup_steps=3):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))
def update_lr(optimizer, epoch, num_epochs, base_lr=1e-3, num_warmup_steps=3):
lr = lr_func_linear(epoch + 1, num_epochs + 1, num_warmup_steps) * base_lr
optimizer = set_lr(optimizer, lr)
return optimizer, lr
class Contrastive:
def __init__(self, num_epochs=1, device='cpu', base_lr=1e-4,
num_warmup_steps=3, distributed=False):
self.num_epochs = num_epochs
self.device = device
self.base_lr = base_lr
self.num_warmup_steps = num_warmup_steps
self.distributed = distributed
self.epoch = 0
sizes = self.default_sizes
self.model = ContrastiveModule(*sizes, use_global_batch=distributed)
self.model = self.model.to(self.device)
def init(self, clustering_combinations, candidates):
pass
@property
def default_sizes(self):
return [2304, 128]
def get_sizes(self, train):
class_data = peek(train)
row = class_data[0]
penultimates = get_penultimates(list(row['features'].keys()))
return [row['features'][k].shape[-1] for k in penultimates]
def get_feature_names(self, train):
class_data = peek(train)
row = peek(class_data)
return sorted(list(row.keys()))
def train_batch(self, batch, optimizer):
moved = []
for feature in batch:
moved.append(feature.to(self.device))
loss, acc = self.model(*moved)
loss.backward()
if self.distributed:
self.model.average_gradient()
optimizer.step()
return loss.item(), acc.item()
def _get_features(self, batch):
unique_ids = pandas.Series(batch['idx']).drop_duplicates().index.tolist()
filenames = [batch['filename'][idx] for idx in unique_ids]
ids = [batch['idx'][idx] for idx in unique_ids]
shard_names = [batch['shard_name'][idx] for idx in unique_ids]
metas = [{'id': idx, 'filename': filename, 'shard_name': shard_name}
for idx, filename, shard_name in zip(ids, filenames, shard_names)]
video_features = batch['SLOWFAST_8x8_R50/kinetics-400']['layer_4']
audio_features = batch['VGGish/YouTube-8M']['layer_4']
unique_ids = torch.Tensor(unique_ids).long()
video_features = video_features.index_select(dim=0, index=unique_ids)
audio_features = audio_features.index_select(dim=0, index=unique_ids)
return metas, [video_features, audio_features]
def get_features(self, batch):
metas, [video_features, audio_features] = self._get_features(batch)
if self.distributed:
i = du.get_rank()
total = du.get_world_size()
metas = metas[i::total]
video_features = video_features[i::total]
audio_features = audio_features[i::total]
return metas, [video_features, audio_features]
def train(self, args, path, dataloader, log_every=1, verbose=True):
self.model.train()
optimizer = get_optimizer(self.model.parameters(), self.base_lr)
for epoch in range(self.epoch, self.num_epochs):
optimizer, lr = update_lr(optimizer, epoch, self.num_epochs, self.base_lr,
self.num_warmup_steps)
epoch_loss = []
epoch_acc = []
pbar = dataloader
for count, batch in enumerate(pbar):
_, features = self.get_features(batch)
loss, acc = self.train_batch(features, optimizer)
epoch_loss.append(loss)
epoch_acc.append(acc)
if verbose and count % log_every == 0:
print("(node {}) training epoch ({}/{}) iter ({}/{}) (lr: {:04f}, loss: {:04f}, acc: {:04f})".format(
du.get_rank(), epoch, self.num_epochs, count, len(dataloader), lr, loss, acc))
epoch_loss = np.array(epoch_loss).mean()
epoch_acc = np.array(epoch_acc).mean()
if verbose:
print("(node {}) epoch ({}/{}) done (lr: {:04f}, loss: {:04f}, acc: {:04f})".format(
du.get_rank(), epoch, self.num_epochs, lr, epoch_loss, epoch_acc))
self.epoch = epoch
self.save_cache(args, path, epoch, verbose)
return
def get_cache_path_run(self, args, epoch):
cache_dir = args.data.output.path.parent / 'caches'
cache_dir.mkdir(parents=True, exist_ok=True)
pid = args.parent_pid
rank = args.node_rank
i = args.chunk_num
name = "contrastive_model_cache_epoch_{}_{}_{}_{}.pkl".format(epoch, pid, rank, i)
path = str(cache_dir / name)
key_name = "contrastive_model_cache_epoch_{}_{}_{}_{}.json".format(epoch, pid, rank, i)
key_path = str(cache_dir / key_name)
return path, key_path
def get_cache_path_load(self, args, path, epoch):
cache_dir = args.data.output.path.parent / 'caches'
cache_dir.mkdir(parents=True, exist_ok=True)
keys = list(cache_dir.glob("contrastive_model_cache_epoch_{}_*.json".format(epoch)))
if len(keys) == 0:
return None
keys = {p.stem: set(load_json(p)) for p in keys}
path = set([Path(p).stem for p in path])
intersections = [(k, len(v & path)) for k, v in keys.items() if len(path - v) == 0]
if len(intersections) == 0:
return None
key = max(intersections, key=lambda x: x[1])[0]
path = cache_dir / key
path = path.parent / (path.stem + '.pkl')
return path
def save_cache(self, args, chunks, epoch, verbose=True):
path, key_path = self.get_cache_path_run(args, epoch)
dt = {
'epoch': self.epoch,
'base_lr': self.base_lr,
'model': self.model.state_dict()
}
if verbose:
print("saved cache file: {}".format(Path(path).stem))
torch.save(dt, path)
keys = [Path(p).stem for p in chunks]
dump_json(keys, key_path)
def load_cache(self, args, path, epoch):
path = self.get_cache_path_load(args, path, epoch)
assert path is not None, 'no cache file'
dt = torch.load(path)
self.epoch = dt['epoch']
self.base_lr = dt['base_lr']
self.model.load_state_dict(dt['model'])
def infer_batch(self, batch):
moved = []
for feature in batch:
moved.append(feature.to(self.device))
logits = self.model.infer(*moved)
return logits.detach().cpu()
def infer(self, args, dataloader, json_metas, subset_size, log_every=1, verbose=True):
self.model.eval()
with torch.no_grad():
logits, filename_ids = self._infer(args, dataloader, json_metas, log_every, verbose)
if subset_size > logits.shape[0]:
subset_size = logits.shape[0]
scores, ids = logits.topk(subset_size, sorted=True)
return scores, ids, filename_ids
def _infer(self, args, dataloader, json_metas, log_every=1, verbose=True):
logits = []
pbar = dataloader
metas = []
for count, batch in enumerate(pbar):
batch_metas, features = self.get_features(batch)
logit = self.infer_batch(features)
logits.append(logit)
metas.extend(batch_metas)
if verbose and count % log_every == 0:
print("inference iter ({}/{}) saving caches".format(count, len(dataloader)))
logits = torch.cat(logits, dim=0)
self.save_inference(args, logits, metas, json_metas)
logits = []
metas = []
if len(metas) > 0:
logits = torch.cat(logits, dim=0)
self.save_inference(args, logits, metas, json_metas)
print("done: inference iter ({}/{}) saving caches".format(count, len(dataloader)))
return logits, metas
def save_inference(self, args, logits, metas, json_metas):
cache_dir = args.data.output.path.parent / 'caches'
cache_dir.mkdir(parents=True, exist_ok=True)
pid = args.parent_pid
local_rank = du.get_rank()
output_name = Path(args.data.output.path).stem
name = "{}_contrastive_inferred_cache_{}_{}.csv".format(output_name, pid, local_rank)
scores = logits.numpy().tolist()
rows = [{'score': score, **v} for score, v in zip(scores, metas)]
lines = format_rows(rows, json_metas, sharded_meta=True,
headers=['score', 'shard_name', 'filename', 'id', 'segment'])
print("saving cache to {}".format(cache_dir / name))
with open(cache_dir / name, 'a+') as f:
writer = csv.writer(f)
for line in lines:
writer.writerow(line)
| true | true |
79013db0531ce24e3ece32d8ed1fde8567c149f2 | 643 | py | Python | World 1/First attempts/ex010 - Real to Dollar.py | MiguelChichorro/PythonExercises | 3b2726e7d9ef92c1eb6b977088692c42a2a7b86e | [
"MIT"
] | 2 | 2021-04-23T19:18:06.000Z | 2021-05-15T17:45:21.000Z | World 1/First attempts/ex010 - Real to Dollar.py | MiguelChichorro/PythonExercises | 3b2726e7d9ef92c1eb6b977088692c42a2a7b86e | [
"MIT"
] | 1 | 2021-05-14T00:29:23.000Z | 2021-05-14T00:29:23.000Z | World 1/First attempts/ex010 - Real to Dollar.py | MiguelChichorro/PythonExercises | 3b2726e7d9ef92c1eb6b977088692c42a2a7b86e | [
"MIT"
] | 1 | 2021-05-14T00:19:33.000Z | 2021-05-14T00:19:33.000Z | colors = {"clean": "\033[m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cian": "\033[36m"}
n1 = float(input("Enter how many money do you have in your wallet(in Real): R$"))
print("You have {}R${:.2f}{} reals and you can buy {}US${:.2f}{} dollars"
"\nBuy {}EUR${:.2f}{} and buy {}GBP${:.2f}{}"
.format(colors["green"], n1, colors["clean"],
colors["blue"], n1/5.59, colors["clean"],
colors["red"], n1/6.69, colors["clean"],
colors["yellow"], n1/7.79, colors["clean"]))
| 42.866667 | 81 | 0.479005 | colors = {"clean": "\033[m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cian": "\033[36m"}
n1 = float(input("Enter how many money do you have in your wallet(in Real): R$"))
print("You have {}R${:.2f}{} reals and you can buy {}US${:.2f}{} dollars"
"\nBuy {}EUR${:.2f}{} and buy {}GBP${:.2f}{}"
.format(colors["green"], n1, colors["clean"],
colors["blue"], n1/5.59, colors["clean"],
colors["red"], n1/6.69, colors["clean"],
colors["yellow"], n1/7.79, colors["clean"]))
| true | true |
79013db42e504c5f46de3998836df5093f4d04a3 | 9,602 | py | Python | dnacentersdk/api/v1_3_1/clients.py | oboehmer/dnacentersdk | 25c4e99900640deee91a56aa886874d9cb0ca960 | [
"MIT"
] | 32 | 2019-09-05T05:16:56.000Z | 2022-03-22T09:50:38.000Z | dnacentersdk/api/v1_3_1/clients.py | oboehmer/dnacentersdk | 25c4e99900640deee91a56aa886874d9cb0ca960 | [
"MIT"
] | 35 | 2019-09-07T18:58:54.000Z | 2022-03-24T19:29:36.000Z | dnacentersdk/api/v1_3_1/clients.py | oboehmer/dnacentersdk | 25c4e99900640deee91a56aa886874d9cb0ca960 | [
"MIT"
] | 18 | 2019-09-09T11:07:21.000Z | 2022-03-25T08:49:59.000Z | # -*- coding: utf-8 -*-
"""Cisco DNA Center Clients API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class Clients(object):
"""Cisco DNA Center Clients API (version: 1.3.1).
Wraps the DNA Center Clients
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new Clients
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(Clients, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_client_enrichment_details(self,
headers=None,
**request_parameters):
"""Enriches a given network End User context (a network user-id or
end user's device Mac Address) with details about the
user, the devices that the user is connected to and the
assurance issues that the user is impacted by.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'entity_type' in headers:
check_type(headers.get('entity_type'),
basestring, may_be_none=False)
if 'entity_value' in headers:
check_type(headers.get('entity_value'),
basestring, may_be_none=False)
if 'issueCategory' in headers:
check_type(headers.get('issueCategory'),
basestring)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-enrichment-details')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
def get_overall_client_health(self,
timestamp=None,
headers=None,
**request_parameters):
"""Returns Overall Client Health information by Client type (Wired
and Wireless) for any given point of time.
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-health')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
def get_client_detail(self,
mac_address,
timestamp=None,
headers=None,
**request_parameters):
"""Returns detailed Client information retrieved by Mac Address for
any given point of time. .
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
mac_address(basestring): MAC Address of the client.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
check_type(mac_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
'macAddress':
mac_address,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-detail')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
| 36.509506 | 108 | 0.614559 |
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class Clients(object):
def __init__(self, session, object_factory, request_validator):
check_type(session, RestSession)
super(Clients, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_client_enrichment_details(self,
headers=None,
**request_parameters):
check_type(headers, dict)
if headers is not None:
if 'entity_type' in headers:
check_type(headers.get('entity_type'),
basestring, may_be_none=False)
if 'entity_value' in headers:
check_type(headers.get('entity_value'),
basestring, may_be_none=False)
if 'issueCategory' in headers:
check_type(headers.get('issueCategory'),
basestring)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-enrichment-details')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
def get_overall_client_health(self,
timestamp=None,
headers=None,
**request_parameters):
check_type(headers, dict)
check_type(timestamp, (basestring, int))
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-health')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
def get_client_detail(self,
mac_address,
timestamp=None,
headers=None,
**request_parameters):
check_type(headers, dict)
check_type(timestamp, (basestring, int))
check_type(mac_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
'macAddress':
mac_address,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-detail')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
| true | true |
79013dba763510046e4193907736efb522e1a25c | 17,668 | py | Python | src/dispatch/messaging.py | oliverzgy/dispatch | 1cb6199ab58bf9bda89f9a0430ea4933d27232dc | [
"Apache-2.0"
] | null | null | null | src/dispatch/messaging.py | oliverzgy/dispatch | 1cb6199ab58bf9bda89f9a0430ea4933d27232dc | [
"Apache-2.0"
] | null | null | null | src/dispatch/messaging.py | oliverzgy/dispatch | 1cb6199ab58bf9bda89f9a0430ea4933d27232dc | [
"Apache-2.0"
] | null | null | null | import copy
from enum import Enum
from jinja2 import Template
from typing import List
from dispatch.conversation.enums import ConversationButtonActions
from dispatch.incident.enums import IncidentStatus
from .config import (
DISPATCH_UI_URL,
INCIDENT_RESOURCE_CONVERSATION_REFERENCE_DOCUMENT,
INCIDENT_RESOURCE_EXECUTIVE_REPORT_DOCUMENT,
INCIDENT_RESOURCE_INCIDENT_FAQ_DOCUMENT,
INCIDENT_RESOURCE_INCIDENT_REVIEW_DOCUMENT,
INCIDENT_RESOURCE_INVESTIGATION_DOCUMENT,
INCIDENT_RESOURCE_INVESTIGATION_SHEET,
)
class MessageType(str, Enum):
incident_daily_summary = "incident-daily-summary"
incident_daily_summary_no_incidents = "incident-daily-summary-no-incidents"
incident_executive_report = "incident-executive-report"
incident_notification = "incident-notification"
incident_participant_welcome = "incident-participant-welcome"
incident_resources_message = "incident-resources-message"
incident_tactical_report = "incident-tactical-report"
incident_task_list = "incident-task-list"
incident_task_reminder = "incident-task-reminder"
incident_status_reminder = "incident-status-reminder"
incident_participant_suggested_reading = "incident-participant-suggested-reading"
INCIDENT_STATUS_DESCRIPTIONS = {
IncidentStatus.active: "This incident is under active investigation.",
IncidentStatus.stable: "This incident is stable, the bulk of the investigation has been completed or most of the risk has been mitigated.",
IncidentStatus.closed: "This no longer requires additional involvement, long term incident action items have been assigned to their respective owners.",
}
INCIDENT_TASK_REMINDER_DESCRIPTION = """
You are assigned to the following incident tasks.
This is a reminder that these tasks have passed their due date.
Please review and update them as appropriate. Resolving them will stop the reminders.""".replace(
"\n", " "
).strip()
INCIDENT_TASK_LIST_DESCRIPTION = """The following are open incident tasks."""
INCIDENT_DAILY_SUMMARY_DESCRIPTION = """
Daily Incidents Summary""".replace(
"\n", " "
).strip()
INCIDENT_DAILY_SUMMARY_ACTIVE_INCIDENTS_DESCRIPTION = f"""
Active Incidents (<{DISPATCH_UI_URL}/incidents/status|Details>)""".replace(
"\n", " "
).strip()
INCIDENT_DAILY_SUMMARY_NO_ACTIVE_INCIDENTS_DESCRIPTION = """
There are no active incidents at this moment.""".replace(
"\n", " "
).strip()
INCIDENT_DAILY_SUMMARY_STABLE_CLOSED_INCIDENTS_DESCRIPTION = """
Stable or Closed Incidents (last 24 hours)""".replace(
"\n", " "
).strip()
INCIDENT_DAILY_SUMMARY_NO_STABLE_CLOSED_INCIDENTS_DESCRIPTION = """
There are no stable or closed incidents in the last 24 hours.""".replace(
"\n", " "
).strip()
INCIDENT_COMMANDER_DESCRIPTION = """
The Incident Commander (IC) is responsible for
knowing the full context of the incident.
Contact them about any questions or concerns.""".replace(
"\n", " "
).strip()
INCIDENT_COMMANDER_READDED_DESCRIPTION = """
{{ commander_fullname }} (Incident Commander) has been re-added to the conversation.
Please, handoff the Incident Commander role before leaving the conversation.""".replace(
"\n", " "
).strip()
INCIDENT_TICKET_DESCRIPTION = """
Ticket for tracking purposes. It contains a description of
the incident and links to resources.""".replace(
"\n", " "
).strip()
INCIDENT_CONVERSATION_DESCRIPTION = """
Private conversation for real-time discussion. All incident participants get added to it.
""".replace(
"\n", " "
).strip()
INCIDENT_CONVERSATION_REFERENCE_DOCUMENT_DESCRIPTION = """
Document containing the list of slash commands available to the Incident Commander (IC)
and participants in the incident conversation.""".replace(
"\n", " "
).strip()
INCIDENT_CONFERENCE_DESCRIPTION = """
Video conference and phone bridge to be used throughout the incident. Password: {{conference_challenge if conference_challenge else 'N/A'}}
""".replace(
"\n", ""
).strip()
INCIDENT_STORAGE_DESCRIPTION = """
Common storage for all incident artifacts and
documents. Add logs, screen captures, or any other data collected during the
investigation to this drive. It is shared with all incident participants.""".replace(
"\n", " "
).strip()
INCIDENT_INVESTIGATION_DOCUMENT_DESCRIPTION = """
This is a document for all incident facts and context. All
incident participants are expected to contribute to this document.
It is shared with all incident participants.""".replace(
"\n", " "
).strip()
INCIDENT_INVESTIGATION_SHEET_DESCRIPTION = """
This is a sheet for tracking impacted assets. All
incident participants are expected to contribute to this sheet.
It is shared with all incident participants.""".replace(
"\n", " "
).strip()
INCIDENT_FAQ_DOCUMENT_DESCRIPTION = """
First time responding to an information security incident? This
document answers common questions encountered when
helping us respond to an incident.""".replace(
"\n", " "
).strip()
INCIDENT_REVIEW_DOCUMENT_DESCRIPTION = """
This document will capture all lessons learned, questions, and action items raised during the incident.""".replace(
"\n", " "
).strip()
INCIDENT_EXECUTIVE_REPORT_DOCUMENT_DESCRIPTION = """
This is a document that contains an executive report about the incident.""".replace(
"\n", " "
).strip()
INCIDENT_DOCUMENT_DESCRIPTIONS = {
INCIDENT_RESOURCE_CONVERSATION_REFERENCE_DOCUMENT: INCIDENT_CONVERSATION_REFERENCE_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_EXECUTIVE_REPORT_DOCUMENT: INCIDENT_EXECUTIVE_REPORT_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_INCIDENT_FAQ_DOCUMENT: INCIDENT_FAQ_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_INCIDENT_REVIEW_DOCUMENT: INCIDENT_REVIEW_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_INVESTIGATION_DOCUMENT: INCIDENT_INVESTIGATION_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_INVESTIGATION_SHEET: INCIDENT_INVESTIGATION_SHEET_DESCRIPTION,
}
INCIDENT_PARTICIPANT_WELCOME_DESCRIPTION = """
You\'re being contacted because we think you may
be able to help us during this information security incident.
Please review the content below and join us in the
incident Slack channel.""".replace(
"\n", " "
).strip()
INCIDENT_WELCOME_CONVERSATION_COPY = """
This is the incident conversation. Please pull in any
individuals you feel may be able to help resolve this incident.""".replace(
"\n", " "
).strip()
INCIDENT_PARTICIPANT_SUGGESTED_READING_DESCRIPTION = """
Dispatch thinks the following documents might be
relevant to this incident.""".replace(
"\n", " "
).strip()
INCIDENT_NOTIFICATION_PURPOSES_FYI = """
This message is for notification purposes only.""".replace(
"\n", " "
).strip()
INCIDENT_CAN_REPORT_REMINDER = """
It's time to send a new CAN report. Go to the Demisto UI and run the
CAN Report playbook from the Playground Work Plan.""".replace(
"\n", " "
).strip()
INCIDENT_VULNERABILITY_DESCRIPTION = """
We are tracking the details of the vulnerability that led to this incident
in the VUL Jira issue linked above.""".replace(
"\n", " "
).strip()
INCIDENT_STABLE_DESCRIPTION = """
The risk has been contained and the incident marked as stable.""".replace(
"\n", " "
).strip()
INCIDENT_CLOSED_DESCRIPTION = """
The incident has been resolved and marked as closed.""".replace(
"\n", " "
).strip()
INCIDENT_TACTICAL_REPORT_DESCRIPTION = """
The following conditions, actions, and needs summarize the current status of the incident.""".replace(
"\n", " "
).strip()
INCIDENT_NEW_ROLE_DESCRIPTION = """
{{assigner_fullname}} has assigned the role of {{assignee_role}} to {{assignee_fullname}}.
Please, contact {{assignee_firstname}} about any questions or concerns.""".replace(
"\n", " "
).strip()
INCIDENT_REPORT_REMINDER_DESCRIPTION = """You have not provided a {{report_type}} for this incident recently.
You can use `{{command}}` in the conversation to assist you in writing one.""".replace(
"\n", " "
).strip()
INCIDENT_STATUS_REMINDER_DESCRIPTION = """You have not updated the status for this incident recently. If the incident has been resolved,
you can use `{{command}}` in the conversation to assist you in closing your incident.""".replace(
"\n", " "
).strip()
INCIDENT_TASK_NEW_DESCRIPTION = """
The following incident task has been created in the incident document.\n\n*Description:* {{task_description}}\n\n*Assignees:* {{task_assignees|join(',')}}"""
INCIDENT_TASK_RESOLVED_DESCRIPTION = """
The following incident task has been resolved in the incident document.\n\n*Description:* {{task_description}}\n\n*Assignees:* {{task_assignees|join(',')}}"""
INCIDENT_WORKFLOW_CREATED_DESCRIPTION = """
A new workflow instance has been created.
\n\n *Creator:* {{instance_creator_name}}
"""
INCIDENT_WORKFLOW_UPDATE_DESCRIPTION = """
This workflow's status has changed from *{{ instance_status_old }}* to *{{ instance_status_new }}*.
\n\n*Workflow Description*: {{workflow_description}}
\n\n *Creator:* {{instance_creator_name}}
"""
INCIDENT_WORKFLOW_COMPLETE_DESCRIPTION = """
This workflow's status has changed from *{{ instance_status_old }}* to *{{ instance_status_new }}*.
\n\n *Workflow Description:* {{workflow_description}}
\n\n *Creator:* {{instance_creator_name}}
{% if instance_artifacts %}
\n\n *Workflow Artifacts:*
\n\n {% for a in instance_artifacts %}- <{{a.weblink}}|{{a.name}}> \n\n{% endfor %}
{% endif %}
"""
INCIDENT_TYPE_CHANGE_DESCRIPTION = """
The incident type has been changed from *{{ incident_type_old }}* to *{{ incident_type_new }}*."""
INCIDENT_STATUS_CHANGE_DESCRIPTION = """
The incident status has been changed from *{{ incident_status_old }}* to *{{ incident_status_new }}*."""
INCIDENT_PRIORITY_CHANGE_DESCRIPTION = """
The incident priority has been changed from *{{ incident_priority_old }}* to *{{ incident_priority_new }}*."""
INCIDENT_NAME_WITH_ENGAGEMENT = {
"title": "{{name}} Incident Notification",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_NOTIFICATION_PURPOSES_FYI,
"button_text": "Join Incident",
"button_value": "{{incident_id}}",
"button_action": ConversationButtonActions.invite_user,
}
INCIDENT_NAME = {
"title": "{{name}} Incident Notification",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_NOTIFICATION_PURPOSES_FYI,
}
INCIDENT_TITLE = {"title": "Incident Title", "text": "{{title}}"}
INCIDENT_DESCRIPTION = {"title": "Incident Description", "text": "{{description}}"}
INCIDENT_STATUS = {
"title": "Incident Status - {{status}}",
"status_mapping": INCIDENT_STATUS_DESCRIPTIONS,
}
INCIDENT_TYPE = {"title": "Incident Type - {{type}}", "text": "{{type_description}}"}
INCIDENT_PRIORITY = {
"title": "Incident Priority - {{priority}}",
"text": "{{priority_description}}",
}
INCIDENT_PRIORITY_FYI = {
"title": "Incident Priority - {{priority}}",
"text": "{{priority_description}}",
}
INCIDENT_COMMANDER = {
"title": "Incident Commander - {{commander_fullname}}",
"title_link": "{{commander_weblink}}",
"text": INCIDENT_COMMANDER_DESCRIPTION,
}
INCIDENT_CONFERENCE = {
"title": "Incident Conference",
"title_link": "{{conference_weblink}}",
"text": INCIDENT_CONFERENCE_DESCRIPTION,
}
INCIDENT_STORAGE = {
"title": "Incident Storage",
"title_link": "{{storage_weblink}}",
"text": INCIDENT_STORAGE_DESCRIPTION,
}
INCIDENT_CONVERSATION_COMMANDS_REFERENCE_DOCUMENT = {
"title": "Incident Conversation Commands Reference Document",
"title_link": "{{conversation_commands_reference_document_weblink}}",
"text": INCIDENT_CONVERSATION_REFERENCE_DOCUMENT_DESCRIPTION,
}
INCIDENT_INVESTIGATION_DOCUMENT = {
"title": "Incident Investigation Document",
"title_link": "{{document_weblink}}",
"text": INCIDENT_INVESTIGATION_DOCUMENT_DESCRIPTION,
}
INCIDENT_INVESTIGATION_SHEET = {
"title": "Incident Investigation Sheet",
"title_link": "{{sheet_weblink}}",
"text": INCIDENT_INVESTIGATION_SHEET_DESCRIPTION,
}
INCIDENT_REVIEW_DOCUMENT = {
"title": "Incident Review Document",
"title_link": "{{review_document_weblink}}",
"text": INCIDENT_REVIEW_DOCUMENT_DESCRIPTION,
}
INCIDENT_FAQ_DOCUMENT = {
"title": "Incident FAQ Document",
"title_link": "{{faq_weblink}}",
"text": INCIDENT_FAQ_DOCUMENT_DESCRIPTION,
}
INCIDENT_TYPE_CHANGE = {"title": "Incident Type Change", "text": INCIDENT_TYPE_CHANGE_DESCRIPTION}
INCIDENT_STATUS_CHANGE = {
"title": "Incident Status Change",
"text": INCIDENT_STATUS_CHANGE_DESCRIPTION,
}
INCIDENT_PRIORITY_CHANGE = {
"title": "Incident Priority Change",
"text": INCIDENT_PRIORITY_CHANGE_DESCRIPTION,
}
INCIDENT_PARTICIPANT_SUGGESTED_READING_ITEM = {
"title": "{{name}}",
"title_link": "{{weblink}}",
"text": "{{description}}",
}
INCIDENT_PARTICIPANT_WELCOME = {
"title": "Welcome to {{name}}",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_PARTICIPANT_WELCOME_DESCRIPTION,
}
INCIDENT_PARTICIPANT_WELCOME_MESSAGE = [
INCIDENT_PARTICIPANT_WELCOME,
INCIDENT_TITLE,
INCIDENT_STATUS,
INCIDENT_TYPE,
INCIDENT_PRIORITY,
INCIDENT_COMMANDER,
INCIDENT_INVESTIGATION_DOCUMENT,
INCIDENT_STORAGE,
INCIDENT_CONFERENCE,
INCIDENT_CONVERSATION_COMMANDS_REFERENCE_DOCUMENT,
INCIDENT_FAQ_DOCUMENT,
]
INCIDENT_RESOURCES_MESSAGE = [
INCIDENT_COMMANDER,
INCIDENT_INVESTIGATION_DOCUMENT,
INCIDENT_REVIEW_DOCUMENT,
INCIDENT_STORAGE,
INCIDENT_CONFERENCE,
INCIDENT_CONVERSATION_COMMANDS_REFERENCE_DOCUMENT,
INCIDENT_FAQ_DOCUMENT,
]
INCIDENT_NOTIFICATION_COMMON = [INCIDENT_TITLE]
INCIDENT_NOTIFICATION = INCIDENT_NOTIFICATION_COMMON.copy()
INCIDENT_NOTIFICATION.extend(
[INCIDENT_STATUS, INCIDENT_TYPE, INCIDENT_PRIORITY_FYI, INCIDENT_COMMANDER]
)
INCIDENT_TACTICAL_REPORT = [
{"title": "Incident Tactical Report", "text": INCIDENT_TACTICAL_REPORT_DESCRIPTION},
{"title": "Conditions", "text": "{{conditions}}"},
{"title": "Actions", "text": "{{actions}}"},
{"title": "Needs", "text": "{{needs}}"},
]
INCIDENT_EXECUTIVE_REPORT = [
{"title": "Incident Title", "text": "{{title}}"},
{"title": "Current Status", "text": "{{current_status}}"},
{"title": "Overview", "text": "{{overview}}"},
{"title": "Next Steps", "text": "{{next_steps}}"},
]
INCIDENT_REPORT_REMINDER = [
{
"title": "{{name}} Incident - {{report_type}} Reminder",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_REPORT_REMINDER_DESCRIPTION,
},
INCIDENT_TITLE,
]
INCIDENT_STATUS_REMINDER = [
{
"title": "{{name}} Incident - Status Reminder",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_STATUS_REMINDER_DESCRIPTION,
},
INCIDENT_TITLE,
INCIDENT_STATUS,
]
INCIDENT_TASK_REMINDER = [
{"title": "Incident - {{ name }}", "text": "{{ title }}"},
{"title": "Creator", "text": "{{ creator }}"},
{"title": "Description", "text": "{{ description }}"},
{"title": "Priority", "text": "{{ priority }}"},
{"title": "Created At", "text": "", "datetime": "{{ created_at}}"},
{"title": "Resolve By", "text": "", "datetime": "{{ resolve_by }}"},
{"title": "Link", "text": "{{ weblink }}"},
]
INCIDENT_NEW_ROLE_NOTIFICATION = [
{
"title": "New {{assignee_role}} - {{assignee_fullname}}",
"title_link": "{{assignee_weblink}}",
"text": INCIDENT_NEW_ROLE_DESCRIPTION,
}
]
INCIDENT_TASK_NEW_NOTIFICATION = [
{
"title": "New Incident Task",
"title_link": "{{task_weblink}}",
"text": INCIDENT_TASK_NEW_DESCRIPTION,
}
]
INCIDENT_TASK_RESOLVED_NOTIFICATION = [
{
"title": "Resolved Incident Task",
"title_link": "{{task_weblink}}",
"text": INCIDENT_TASK_RESOLVED_DESCRIPTION,
}
]
INCIDENT_WORKFLOW_CREATED_NOTIFICATION = [
{
"title": "Workflow Created - {{workflow_name}}",
"text": INCIDENT_WORKFLOW_CREATED_DESCRIPTION,
}
]
INCIDENT_WORKFLOW_UPDATE_NOTIFICATION = [
{
"title": "Workflow Status Change - {{workflow_name}}",
"title_link": "{{instance_weblink}}",
"text": INCIDENT_WORKFLOW_UPDATE_DESCRIPTION,
}
]
INCIDENT_WORKFLOW_COMPLETE_NOTIFICATION = [
{
"title": "Workflow Completed - {{workflow_name}}",
"title_link": "{{instance_weblink}}",
"text": INCIDENT_WORKFLOW_COMPLETE_DESCRIPTION,
}
]
INCIDENT_COMMANDER_READDED_NOTIFICATION = [
{"title": "Incident Commander Re-Added", "text": INCIDENT_COMMANDER_READDED_DESCRIPTION}
]
def render_message_template(message_template: List[dict], **kwargs):
"""Renders the jinja data included in the template itself."""
data = []
new_copy = copy.deepcopy(message_template)
for d in new_copy:
if d.get("status_mapping"):
d["text"] = d["status_mapping"][kwargs["status"]]
if d.get("datetime"):
d["datetime"] = Template(d["datetime"]).render(**kwargs)
d["text"] = Template(d["text"]).render(**kwargs)
d["title"] = Template(d["title"]).render(**kwargs)
if d.get("title_link"):
d["title_link"] = Template(d["title_link"]).render(**kwargs)
if d["title_link"] == "None": # skip blocks with no content
continue
# skip blocks that do not have new links rendered, as no real value was provided
if not d["title_link"]:
continue
if d.get("button_text"):
d["button_text"] = Template(d["button_text"]).render(**kwargs)
if d.get("button_value"):
d["button_value"] = Template(d["button_value"]).render(**kwargs)
data.append(d)
return data
| 33.210526 | 158 | 0.712305 | import copy
from enum import Enum
from jinja2 import Template
from typing import List
from dispatch.conversation.enums import ConversationButtonActions
from dispatch.incident.enums import IncidentStatus
from .config import (
DISPATCH_UI_URL,
INCIDENT_RESOURCE_CONVERSATION_REFERENCE_DOCUMENT,
INCIDENT_RESOURCE_EXECUTIVE_REPORT_DOCUMENT,
INCIDENT_RESOURCE_INCIDENT_FAQ_DOCUMENT,
INCIDENT_RESOURCE_INCIDENT_REVIEW_DOCUMENT,
INCIDENT_RESOURCE_INVESTIGATION_DOCUMENT,
INCIDENT_RESOURCE_INVESTIGATION_SHEET,
)
class MessageType(str, Enum):
incident_daily_summary = "incident-daily-summary"
incident_daily_summary_no_incidents = "incident-daily-summary-no-incidents"
incident_executive_report = "incident-executive-report"
incident_notification = "incident-notification"
incident_participant_welcome = "incident-participant-welcome"
incident_resources_message = "incident-resources-message"
incident_tactical_report = "incident-tactical-report"
incident_task_list = "incident-task-list"
incident_task_reminder = "incident-task-reminder"
incident_status_reminder = "incident-status-reminder"
incident_participant_suggested_reading = "incident-participant-suggested-reading"
INCIDENT_STATUS_DESCRIPTIONS = {
IncidentStatus.active: "This incident is under active investigation.",
IncidentStatus.stable: "This incident is stable, the bulk of the investigation has been completed or most of the risk has been mitigated.",
IncidentStatus.closed: "This no longer requires additional involvement, long term incident action items have been assigned to their respective owners.",
}
INCIDENT_TASK_REMINDER_DESCRIPTION = """
You are assigned to the following incident tasks.
This is a reminder that these tasks have passed their due date.
Please review and update them as appropriate. Resolving them will stop the reminders.""".replace(
"\n", " "
).strip()
INCIDENT_TASK_LIST_DESCRIPTION = """The following are open incident tasks."""
INCIDENT_DAILY_SUMMARY_DESCRIPTION = """
Daily Incidents Summary""".replace(
"\n", " "
).strip()
INCIDENT_DAILY_SUMMARY_ACTIVE_INCIDENTS_DESCRIPTION = f"""
Active Incidents (<{DISPATCH_UI_URL}/incidents/status|Details>)""".replace(
"\n", " "
).strip()
INCIDENT_DAILY_SUMMARY_NO_ACTIVE_INCIDENTS_DESCRIPTION = """
There are no active incidents at this moment.""".replace(
"\n", " "
).strip()
INCIDENT_DAILY_SUMMARY_STABLE_CLOSED_INCIDENTS_DESCRIPTION = """
Stable or Closed Incidents (last 24 hours)""".replace(
"\n", " "
).strip()
INCIDENT_DAILY_SUMMARY_NO_STABLE_CLOSED_INCIDENTS_DESCRIPTION = """
There are no stable or closed incidents in the last 24 hours.""".replace(
"\n", " "
).strip()
INCIDENT_COMMANDER_DESCRIPTION = """
The Incident Commander (IC) is responsible for
knowing the full context of the incident.
Contact them about any questions or concerns.""".replace(
"\n", " "
).strip()
INCIDENT_COMMANDER_READDED_DESCRIPTION = """
{{ commander_fullname }} (Incident Commander) has been re-added to the conversation.
Please, handoff the Incident Commander role before leaving the conversation.""".replace(
"\n", " "
).strip()
INCIDENT_TICKET_DESCRIPTION = """
Ticket for tracking purposes. It contains a description of
the incident and links to resources.""".replace(
"\n", " "
).strip()
INCIDENT_CONVERSATION_DESCRIPTION = """
Private conversation for real-time discussion. All incident participants get added to it.
""".replace(
"\n", " "
).strip()
INCIDENT_CONVERSATION_REFERENCE_DOCUMENT_DESCRIPTION = """
Document containing the list of slash commands available to the Incident Commander (IC)
and participants in the incident conversation.""".replace(
"\n", " "
).strip()
INCIDENT_CONFERENCE_DESCRIPTION = """
Video conference and phone bridge to be used throughout the incident. Password: {{conference_challenge if conference_challenge else 'N/A'}}
""".replace(
"\n", ""
).strip()
INCIDENT_STORAGE_DESCRIPTION = """
Common storage for all incident artifacts and
documents. Add logs, screen captures, or any other data collected during the
investigation to this drive. It is shared with all incident participants.""".replace(
"\n", " "
).strip()
INCIDENT_INVESTIGATION_DOCUMENT_DESCRIPTION = """
This is a document for all incident facts and context. All
incident participants are expected to contribute to this document.
It is shared with all incident participants.""".replace(
"\n", " "
).strip()
INCIDENT_INVESTIGATION_SHEET_DESCRIPTION = """
This is a sheet for tracking impacted assets. All
incident participants are expected to contribute to this sheet.
It is shared with all incident participants.""".replace(
"\n", " "
).strip()
INCIDENT_FAQ_DOCUMENT_DESCRIPTION = """
First time responding to an information security incident? This
document answers common questions encountered when
helping us respond to an incident.""".replace(
"\n", " "
).strip()
INCIDENT_REVIEW_DOCUMENT_DESCRIPTION = """
This document will capture all lessons learned, questions, and action items raised during the incident.""".replace(
"\n", " "
).strip()
INCIDENT_EXECUTIVE_REPORT_DOCUMENT_DESCRIPTION = """
This is a document that contains an executive report about the incident.""".replace(
"\n", " "
).strip()
INCIDENT_DOCUMENT_DESCRIPTIONS = {
INCIDENT_RESOURCE_CONVERSATION_REFERENCE_DOCUMENT: INCIDENT_CONVERSATION_REFERENCE_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_EXECUTIVE_REPORT_DOCUMENT: INCIDENT_EXECUTIVE_REPORT_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_INCIDENT_FAQ_DOCUMENT: INCIDENT_FAQ_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_INCIDENT_REVIEW_DOCUMENT: INCIDENT_REVIEW_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_INVESTIGATION_DOCUMENT: INCIDENT_INVESTIGATION_DOCUMENT_DESCRIPTION,
INCIDENT_RESOURCE_INVESTIGATION_SHEET: INCIDENT_INVESTIGATION_SHEET_DESCRIPTION,
}
INCIDENT_PARTICIPANT_WELCOME_DESCRIPTION = """
You\'re being contacted because we think you may
be able to help us during this information security incident.
Please review the content below and join us in the
incident Slack channel.""".replace(
"\n", " "
).strip()
INCIDENT_WELCOME_CONVERSATION_COPY = """
This is the incident conversation. Please pull in any
individuals you feel may be able to help resolve this incident.""".replace(
"\n", " "
).strip()
INCIDENT_PARTICIPANT_SUGGESTED_READING_DESCRIPTION = """
Dispatch thinks the following documents might be
relevant to this incident.""".replace(
"\n", " "
).strip()
INCIDENT_NOTIFICATION_PURPOSES_FYI = """
This message is for notification purposes only.""".replace(
"\n", " "
).strip()
INCIDENT_CAN_REPORT_REMINDER = """
It's time to send a new CAN report. Go to the Demisto UI and run the
CAN Report playbook from the Playground Work Plan.""".replace(
"\n", " "
).strip()
INCIDENT_VULNERABILITY_DESCRIPTION = """
We are tracking the details of the vulnerability that led to this incident
in the VUL Jira issue linked above.""".replace(
"\n", " "
).strip()
INCIDENT_STABLE_DESCRIPTION = """
The risk has been contained and the incident marked as stable.""".replace(
"\n", " "
).strip()
INCIDENT_CLOSED_DESCRIPTION = """
The incident has been resolved and marked as closed.""".replace(
"\n", " "
).strip()
INCIDENT_TACTICAL_REPORT_DESCRIPTION = """
The following conditions, actions, and needs summarize the current status of the incident.""".replace(
"\n", " "
).strip()
INCIDENT_NEW_ROLE_DESCRIPTION = """
{{assigner_fullname}} has assigned the role of {{assignee_role}} to {{assignee_fullname}}.
Please, contact {{assignee_firstname}} about any questions or concerns.""".replace(
"\n", " "
).strip()
INCIDENT_REPORT_REMINDER_DESCRIPTION = """You have not provided a {{report_type}} for this incident recently.
You can use `{{command}}` in the conversation to assist you in writing one.""".replace(
"\n", " "
).strip()
INCIDENT_STATUS_REMINDER_DESCRIPTION = """You have not updated the status for this incident recently. If the incident has been resolved,
you can use `{{command}}` in the conversation to assist you in closing your incident.""".replace(
"\n", " "
).strip()
INCIDENT_TASK_NEW_DESCRIPTION = """
The following incident task has been created in the incident document.\n\n*Description:* {{task_description}}\n\n*Assignees:* {{task_assignees|join(',')}}"""
INCIDENT_TASK_RESOLVED_DESCRIPTION = """
The following incident task has been resolved in the incident document.\n\n*Description:* {{task_description}}\n\n*Assignees:* {{task_assignees|join(',')}}"""
INCIDENT_WORKFLOW_CREATED_DESCRIPTION = """
A new workflow instance has been created.
\n\n *Creator:* {{instance_creator_name}}
"""
INCIDENT_WORKFLOW_UPDATE_DESCRIPTION = """
This workflow's status has changed from *{{ instance_status_old }}* to *{{ instance_status_new }}*.
\n\n*Workflow Description*: {{workflow_description}}
\n\n *Creator:* {{instance_creator_name}}
"""
INCIDENT_WORKFLOW_COMPLETE_DESCRIPTION = """
This workflow's status has changed from *{{ instance_status_old }}* to *{{ instance_status_new }}*.
\n\n *Workflow Description:* {{workflow_description}}
\n\n *Creator:* {{instance_creator_name}}
{% if instance_artifacts %}
\n\n *Workflow Artifacts:*
\n\n {% for a in instance_artifacts %}- <{{a.weblink}}|{{a.name}}> \n\n{% endfor %}
{% endif %}
"""
INCIDENT_TYPE_CHANGE_DESCRIPTION = """
The incident type has been changed from *{{ incident_type_old }}* to *{{ incident_type_new }}*."""
INCIDENT_STATUS_CHANGE_DESCRIPTION = """
The incident status has been changed from *{{ incident_status_old }}* to *{{ incident_status_new }}*."""
INCIDENT_PRIORITY_CHANGE_DESCRIPTION = """
The incident priority has been changed from *{{ incident_priority_old }}* to *{{ incident_priority_new }}*."""
INCIDENT_NAME_WITH_ENGAGEMENT = {
"title": "{{name}} Incident Notification",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_NOTIFICATION_PURPOSES_FYI,
"button_text": "Join Incident",
"button_value": "{{incident_id}}",
"button_action": ConversationButtonActions.invite_user,
}
INCIDENT_NAME = {
"title": "{{name}} Incident Notification",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_NOTIFICATION_PURPOSES_FYI,
}
INCIDENT_TITLE = {"title": "Incident Title", "text": "{{title}}"}
INCIDENT_DESCRIPTION = {"title": "Incident Description", "text": "{{description}}"}
INCIDENT_STATUS = {
"title": "Incident Status - {{status}}",
"status_mapping": INCIDENT_STATUS_DESCRIPTIONS,
}
INCIDENT_TYPE = {"title": "Incident Type - {{type}}", "text": "{{type_description}}"}
INCIDENT_PRIORITY = {
"title": "Incident Priority - {{priority}}",
"text": "{{priority_description}}",
}
INCIDENT_PRIORITY_FYI = {
"title": "Incident Priority - {{priority}}",
"text": "{{priority_description}}",
}
INCIDENT_COMMANDER = {
"title": "Incident Commander - {{commander_fullname}}",
"title_link": "{{commander_weblink}}",
"text": INCIDENT_COMMANDER_DESCRIPTION,
}
INCIDENT_CONFERENCE = {
"title": "Incident Conference",
"title_link": "{{conference_weblink}}",
"text": INCIDENT_CONFERENCE_DESCRIPTION,
}
INCIDENT_STORAGE = {
"title": "Incident Storage",
"title_link": "{{storage_weblink}}",
"text": INCIDENT_STORAGE_DESCRIPTION,
}
INCIDENT_CONVERSATION_COMMANDS_REFERENCE_DOCUMENT = {
"title": "Incident Conversation Commands Reference Document",
"title_link": "{{conversation_commands_reference_document_weblink}}",
"text": INCIDENT_CONVERSATION_REFERENCE_DOCUMENT_DESCRIPTION,
}
INCIDENT_INVESTIGATION_DOCUMENT = {
"title": "Incident Investigation Document",
"title_link": "{{document_weblink}}",
"text": INCIDENT_INVESTIGATION_DOCUMENT_DESCRIPTION,
}
INCIDENT_INVESTIGATION_SHEET = {
"title": "Incident Investigation Sheet",
"title_link": "{{sheet_weblink}}",
"text": INCIDENT_INVESTIGATION_SHEET_DESCRIPTION,
}
INCIDENT_REVIEW_DOCUMENT = {
"title": "Incident Review Document",
"title_link": "{{review_document_weblink}}",
"text": INCIDENT_REVIEW_DOCUMENT_DESCRIPTION,
}
INCIDENT_FAQ_DOCUMENT = {
"title": "Incident FAQ Document",
"title_link": "{{faq_weblink}}",
"text": INCIDENT_FAQ_DOCUMENT_DESCRIPTION,
}
INCIDENT_TYPE_CHANGE = {"title": "Incident Type Change", "text": INCIDENT_TYPE_CHANGE_DESCRIPTION}
INCIDENT_STATUS_CHANGE = {
"title": "Incident Status Change",
"text": INCIDENT_STATUS_CHANGE_DESCRIPTION,
}
INCIDENT_PRIORITY_CHANGE = {
"title": "Incident Priority Change",
"text": INCIDENT_PRIORITY_CHANGE_DESCRIPTION,
}
INCIDENT_PARTICIPANT_SUGGESTED_READING_ITEM = {
"title": "{{name}}",
"title_link": "{{weblink}}",
"text": "{{description}}",
}
INCIDENT_PARTICIPANT_WELCOME = {
"title": "Welcome to {{name}}",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_PARTICIPANT_WELCOME_DESCRIPTION,
}
INCIDENT_PARTICIPANT_WELCOME_MESSAGE = [
INCIDENT_PARTICIPANT_WELCOME,
INCIDENT_TITLE,
INCIDENT_STATUS,
INCIDENT_TYPE,
INCIDENT_PRIORITY,
INCIDENT_COMMANDER,
INCIDENT_INVESTIGATION_DOCUMENT,
INCIDENT_STORAGE,
INCIDENT_CONFERENCE,
INCIDENT_CONVERSATION_COMMANDS_REFERENCE_DOCUMENT,
INCIDENT_FAQ_DOCUMENT,
]
INCIDENT_RESOURCES_MESSAGE = [
INCIDENT_COMMANDER,
INCIDENT_INVESTIGATION_DOCUMENT,
INCIDENT_REVIEW_DOCUMENT,
INCIDENT_STORAGE,
INCIDENT_CONFERENCE,
INCIDENT_CONVERSATION_COMMANDS_REFERENCE_DOCUMENT,
INCIDENT_FAQ_DOCUMENT,
]
INCIDENT_NOTIFICATION_COMMON = [INCIDENT_TITLE]
INCIDENT_NOTIFICATION = INCIDENT_NOTIFICATION_COMMON.copy()
INCIDENT_NOTIFICATION.extend(
[INCIDENT_STATUS, INCIDENT_TYPE, INCIDENT_PRIORITY_FYI, INCIDENT_COMMANDER]
)
INCIDENT_TACTICAL_REPORT = [
{"title": "Incident Tactical Report", "text": INCIDENT_TACTICAL_REPORT_DESCRIPTION},
{"title": "Conditions", "text": "{{conditions}}"},
{"title": "Actions", "text": "{{actions}}"},
{"title": "Needs", "text": "{{needs}}"},
]
INCIDENT_EXECUTIVE_REPORT = [
{"title": "Incident Title", "text": "{{title}}"},
{"title": "Current Status", "text": "{{current_status}}"},
{"title": "Overview", "text": "{{overview}}"},
{"title": "Next Steps", "text": "{{next_steps}}"},
]
INCIDENT_REPORT_REMINDER = [
{
"title": "{{name}} Incident - {{report_type}} Reminder",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_REPORT_REMINDER_DESCRIPTION,
},
INCIDENT_TITLE,
]
INCIDENT_STATUS_REMINDER = [
{
"title": "{{name}} Incident - Status Reminder",
"title_link": "{{ticket_weblink}}",
"text": INCIDENT_STATUS_REMINDER_DESCRIPTION,
},
INCIDENT_TITLE,
INCIDENT_STATUS,
]
INCIDENT_TASK_REMINDER = [
{"title": "Incident - {{ name }}", "text": "{{ title }}"},
{"title": "Creator", "text": "{{ creator }}"},
{"title": "Description", "text": "{{ description }}"},
{"title": "Priority", "text": "{{ priority }}"},
{"title": "Created At", "text": "", "datetime": "{{ created_at}}"},
{"title": "Resolve By", "text": "", "datetime": "{{ resolve_by }}"},
{"title": "Link", "text": "{{ weblink }}"},
]
INCIDENT_NEW_ROLE_NOTIFICATION = [
{
"title": "New {{assignee_role}} - {{assignee_fullname}}",
"title_link": "{{assignee_weblink}}",
"text": INCIDENT_NEW_ROLE_DESCRIPTION,
}
]
INCIDENT_TASK_NEW_NOTIFICATION = [
{
"title": "New Incident Task",
"title_link": "{{task_weblink}}",
"text": INCIDENT_TASK_NEW_DESCRIPTION,
}
]
INCIDENT_TASK_RESOLVED_NOTIFICATION = [
{
"title": "Resolved Incident Task",
"title_link": "{{task_weblink}}",
"text": INCIDENT_TASK_RESOLVED_DESCRIPTION,
}
]
INCIDENT_WORKFLOW_CREATED_NOTIFICATION = [
{
"title": "Workflow Created - {{workflow_name}}",
"text": INCIDENT_WORKFLOW_CREATED_DESCRIPTION,
}
]
INCIDENT_WORKFLOW_UPDATE_NOTIFICATION = [
{
"title": "Workflow Status Change - {{workflow_name}}",
"title_link": "{{instance_weblink}}",
"text": INCIDENT_WORKFLOW_UPDATE_DESCRIPTION,
}
]
INCIDENT_WORKFLOW_COMPLETE_NOTIFICATION = [
{
"title": "Workflow Completed - {{workflow_name}}",
"title_link": "{{instance_weblink}}",
"text": INCIDENT_WORKFLOW_COMPLETE_DESCRIPTION,
}
]
INCIDENT_COMMANDER_READDED_NOTIFICATION = [
{"title": "Incident Commander Re-Added", "text": INCIDENT_COMMANDER_READDED_DESCRIPTION}
]
def render_message_template(message_template: List[dict], **kwargs):
data = []
new_copy = copy.deepcopy(message_template)
for d in new_copy:
if d.get("status_mapping"):
d["text"] = d["status_mapping"][kwargs["status"]]
if d.get("datetime"):
d["datetime"] = Template(d["datetime"]).render(**kwargs)
d["text"] = Template(d["text"]).render(**kwargs)
d["title"] = Template(d["title"]).render(**kwargs)
if d.get("title_link"):
d["title_link"] = Template(d["title_link"]).render(**kwargs)
if d["title_link"] == "None":
continue
if not d["title_link"]:
continue
if d.get("button_text"):
d["button_text"] = Template(d["button_text"]).render(**kwargs)
if d.get("button_value"):
d["button_value"] = Template(d["button_value"]).render(**kwargs)
data.append(d)
return data
| true | true |
79013df3b6da8fda656da7100801368f656631f3 | 8,946 | py | Python | qtum_electrum/plugins/hw_wallet/qt.py | mikehash/qtum-electrum | f6d9d8babd4a1468d8f0a289b3e2b533ac04d040 | [
"MIT"
] | null | null | null | qtum_electrum/plugins/hw_wallet/qt.py | mikehash/qtum-electrum | f6d9d8babd4a1468d8f0a289b3e2b533ac04d040 | [
"MIT"
] | null | null | null | qtum_electrum/plugins/hw_wallet/qt.py | mikehash/qtum-electrum | f6d9d8babd4a1468d8f0a289b3e2b533ac04d040 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
from PyQt5.QtWidgets import QVBoxLayout, QLabel
from qtum_electrum.gui.qt.password_dialog import PasswordLayout, PW_PASSPHRASE
from qtum_electrum.gui.qt.util import *
from qtum_electrum.i18n import _
from qtum_electrum.util import PrintError
# The trickiest thing about this handler was getting windows properly
# parented on MacOSX.
class QtHandlerBase(QObject, PrintError):
'''An interface between the GUI (here, QT) and the device handling
logic for handling I/O.'''
passphrase_signal = pyqtSignal(object, object)
message_signal = pyqtSignal(object, object)
error_signal = pyqtSignal(object, object)
word_signal = pyqtSignal(object)
clear_signal = pyqtSignal()
query_signal = pyqtSignal(object, object)
yes_no_signal = pyqtSignal(object)
status_signal = pyqtSignal(object)
def __init__(self, win, device):
super(QtHandlerBase, self).__init__()
self.clear_signal.connect(self.clear_dialog)
self.error_signal.connect(self.error_dialog)
self.message_signal.connect(self.message_dialog)
self.passphrase_signal.connect(self.passphrase_dialog)
self.word_signal.connect(self.word_dialog)
self.query_signal.connect(self.win_query_choice)
self.yes_no_signal.connect(self.win_yes_no_question)
self.status_signal.connect(self._update_status)
self.win = win
self.device = device
self.dialog = None
self.done = threading.Event()
def top_level_window(self):
return self.win.top_level_window()
def update_status(self, paired):
self.status_signal.emit(paired)
def _update_status(self, paired):
if hasattr(self, 'button'):
button = self.button
icon_name = button.icon_paired if paired else button.icon_unpaired
button.setIcon(read_QIcon(icon_name))
def query_choice(self, msg, labels):
self.done.clear()
self.query_signal.emit(msg, labels)
self.done.wait()
return self.choice
def yes_no_question(self, msg):
self.done.clear()
self.yes_no_signal.emit(msg)
self.done.wait()
return self.ok
def show_message(self, msg, on_cancel=None):
self.message_signal.emit(msg, on_cancel)
def show_error(self, msg, blocking=False):
self.done.clear()
self.error_signal.emit(msg, blocking)
if blocking:
self.done.wait()
def finished(self):
self.clear_signal.emit()
def get_word(self, msg):
self.done.clear()
self.word_signal.emit(msg)
self.done.wait()
return self.word
def get_passphrase(self, msg, confirm):
self.done.clear()
self.passphrase_signal.emit(msg, confirm)
self.done.wait()
return self.passphrase
def passphrase_dialog(self, msg, confirm):
# If confirm is true, require the user to enter the passphrase twice
parent = self.top_level_window()
d = WindowModalDialog(parent, _("Enter Passphrase"))
if confirm:
OK_button = OkButton(d)
playout = PasswordLayout(msg=msg, kind=PW_PASSPHRASE, OK_button=OK_button)
vbox = QVBoxLayout()
vbox.addLayout(playout.layout())
vbox.addLayout(Buttons(CancelButton(d), OK_button))
d.setLayout(vbox)
passphrase = playout.new_password() if d.exec_() else None
else:
pw = QLineEdit()
pw.setEchoMode(2)
pw.setMinimumWidth(200)
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(msg))
vbox.addWidget(pw)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
d.setLayout(vbox)
passphrase = pw.text() if d.exec_() else None
self.passphrase = passphrase
self.done.set()
def word_dialog(self, msg):
dialog = WindowModalDialog(self.top_level_window(), "")
hbox = QHBoxLayout(dialog)
hbox.addWidget(QLabel(msg))
text = QLineEdit()
text.setMaximumWidth(100)
text.returnPressed.connect(dialog.accept)
hbox.addWidget(text)
hbox.addStretch(1)
dialog.exec_() # Firmware cannot handle cancellation
self.word = text.text()
self.done.set()
def message_dialog(self, msg, on_cancel):
# Called more than once during signing, to confirm output and fee
self.clear_dialog()
title = _('Please check your %s device') % self.device
self.dialog = dialog = WindowModalDialog(self.top_level_window(), title)
l = QLabel(msg)
vbox = QVBoxLayout(dialog)
vbox.addWidget(l)
if on_cancel:
dialog.rejected.connect(on_cancel)
vbox.addLayout(Buttons(CancelButton(dialog)))
dialog.show()
def error_dialog(self, msg, blocking):
self.win.show_error(msg, parent=self.top_level_window())
if blocking:
self.done.set()
def clear_dialog(self):
if self.dialog:
self.dialog.accept()
self.dialog = None
def win_query_choice(self, msg, labels):
self.choice = self.win.query_choice(msg, labels)
self.done.set()
def win_yes_no_question(self, msg):
self.ok = self.win.question(msg)
self.done.set()
from qtum_electrum.plugin import hook
from qtum_electrum.util import UserCancelled
from qtum_electrum.gui.qt.main_window import StatusBarButton
class QtPluginBase(object):
@hook
def load_wallet(self, wallet, window):
for keystore in wallet.get_keystores():
if not isinstance(keystore, self.keystore_class):
continue
if not self.libraries_available:
message = keystore.plugin.get_library_not_available_message()
window.show_error(message)
return
tooltip = self.device + '\n' + (keystore.label or 'unnamed')
cb = partial(self.show_settings_dialog, window, keystore)
button = StatusBarButton(QIcon(self.icon_unpaired), tooltip, cb)
button.icon_paired = self.icon_paired
button.icon_unpaired = self.icon_unpaired
window.statusBar().addPermanentWidget(button)
handler = self.create_handler(window)
handler.button = button
keystore.handler = handler
keystore.thread = TaskThread(window, window.on_error)
self.add_show_address_on_hw_device_button_for_receive_addr(wallet, keystore, window)
# Trigger a pairing
keystore.thread.add(partial(self.get_client, keystore))
def choose_device(self, window, keystore):
'''This dialog box should be usable even if the user has
forgotten their PIN or it is in bootloader mode.'''
device_id = self.device_manager().xpub_id(keystore.xpub)
if not device_id:
try:
info = self.device_manager().select_device(self, keystore.handler, keystore)
except UserCancelled:
return
device_id = info.device.id_
return device_id
def show_settings_dialog(self, window, keystore):
device_id = self.choose_device(window, keystore)
def add_show_address_on_hw_device_button_for_receive_addr(self, wallet, keystore, main_window):
plugin = keystore.plugin
receive_address_e = main_window.receive_address_e
def show_address():
addr = receive_address_e.text()
keystore.thread.add(partial(plugin.show_address, wallet, addr, keystore))
receive_address_e.addButton("eye1.png", show_address, _("Show on {}").format(plugin.device))
| 37.90678 | 100 | 0.666331 |
import threading
from PyQt5.QtWidgets import QVBoxLayout, QLabel
from qtum_electrum.gui.qt.password_dialog import PasswordLayout, PW_PASSPHRASE
from qtum_electrum.gui.qt.util import *
from qtum_electrum.i18n import _
from qtum_electrum.util import PrintError
class QtHandlerBase(QObject, PrintError):
passphrase_signal = pyqtSignal(object, object)
message_signal = pyqtSignal(object, object)
error_signal = pyqtSignal(object, object)
word_signal = pyqtSignal(object)
clear_signal = pyqtSignal()
query_signal = pyqtSignal(object, object)
yes_no_signal = pyqtSignal(object)
status_signal = pyqtSignal(object)
def __init__(self, win, device):
super(QtHandlerBase, self).__init__()
self.clear_signal.connect(self.clear_dialog)
self.error_signal.connect(self.error_dialog)
self.message_signal.connect(self.message_dialog)
self.passphrase_signal.connect(self.passphrase_dialog)
self.word_signal.connect(self.word_dialog)
self.query_signal.connect(self.win_query_choice)
self.yes_no_signal.connect(self.win_yes_no_question)
self.status_signal.connect(self._update_status)
self.win = win
self.device = device
self.dialog = None
self.done = threading.Event()
def top_level_window(self):
return self.win.top_level_window()
def update_status(self, paired):
self.status_signal.emit(paired)
def _update_status(self, paired):
if hasattr(self, 'button'):
button = self.button
icon_name = button.icon_paired if paired else button.icon_unpaired
button.setIcon(read_QIcon(icon_name))
def query_choice(self, msg, labels):
self.done.clear()
self.query_signal.emit(msg, labels)
self.done.wait()
return self.choice
def yes_no_question(self, msg):
self.done.clear()
self.yes_no_signal.emit(msg)
self.done.wait()
return self.ok
def show_message(self, msg, on_cancel=None):
self.message_signal.emit(msg, on_cancel)
def show_error(self, msg, blocking=False):
self.done.clear()
self.error_signal.emit(msg, blocking)
if blocking:
self.done.wait()
def finished(self):
self.clear_signal.emit()
def get_word(self, msg):
self.done.clear()
self.word_signal.emit(msg)
self.done.wait()
return self.word
def get_passphrase(self, msg, confirm):
self.done.clear()
self.passphrase_signal.emit(msg, confirm)
self.done.wait()
return self.passphrase
def passphrase_dialog(self, msg, confirm):
parent = self.top_level_window()
d = WindowModalDialog(parent, _("Enter Passphrase"))
if confirm:
OK_button = OkButton(d)
playout = PasswordLayout(msg=msg, kind=PW_PASSPHRASE, OK_button=OK_button)
vbox = QVBoxLayout()
vbox.addLayout(playout.layout())
vbox.addLayout(Buttons(CancelButton(d), OK_button))
d.setLayout(vbox)
passphrase = playout.new_password() if d.exec_() else None
else:
pw = QLineEdit()
pw.setEchoMode(2)
pw.setMinimumWidth(200)
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(msg))
vbox.addWidget(pw)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
d.setLayout(vbox)
passphrase = pw.text() if d.exec_() else None
self.passphrase = passphrase
self.done.set()
def word_dialog(self, msg):
dialog = WindowModalDialog(self.top_level_window(), "")
hbox = QHBoxLayout(dialog)
hbox.addWidget(QLabel(msg))
text = QLineEdit()
text.setMaximumWidth(100)
text.returnPressed.connect(dialog.accept)
hbox.addWidget(text)
hbox.addStretch(1)
dialog.exec_()
self.word = text.text()
self.done.set()
def message_dialog(self, msg, on_cancel):
self.clear_dialog()
title = _('Please check your %s device') % self.device
self.dialog = dialog = WindowModalDialog(self.top_level_window(), title)
l = QLabel(msg)
vbox = QVBoxLayout(dialog)
vbox.addWidget(l)
if on_cancel:
dialog.rejected.connect(on_cancel)
vbox.addLayout(Buttons(CancelButton(dialog)))
dialog.show()
def error_dialog(self, msg, blocking):
self.win.show_error(msg, parent=self.top_level_window())
if blocking:
self.done.set()
def clear_dialog(self):
if self.dialog:
self.dialog.accept()
self.dialog = None
def win_query_choice(self, msg, labels):
self.choice = self.win.query_choice(msg, labels)
self.done.set()
def win_yes_no_question(self, msg):
self.ok = self.win.question(msg)
self.done.set()
from qtum_electrum.plugin import hook
from qtum_electrum.util import UserCancelled
from qtum_electrum.gui.qt.main_window import StatusBarButton
class QtPluginBase(object):
@hook
def load_wallet(self, wallet, window):
for keystore in wallet.get_keystores():
if not isinstance(keystore, self.keystore_class):
continue
if not self.libraries_available:
message = keystore.plugin.get_library_not_available_message()
window.show_error(message)
return
tooltip = self.device + '\n' + (keystore.label or 'unnamed')
cb = partial(self.show_settings_dialog, window, keystore)
button = StatusBarButton(QIcon(self.icon_unpaired), tooltip, cb)
button.icon_paired = self.icon_paired
button.icon_unpaired = self.icon_unpaired
window.statusBar().addPermanentWidget(button)
handler = self.create_handler(window)
handler.button = button
keystore.handler = handler
keystore.thread = TaskThread(window, window.on_error)
self.add_show_address_on_hw_device_button_for_receive_addr(wallet, keystore, window)
keystore.thread.add(partial(self.get_client, keystore))
def choose_device(self, window, keystore):
device_id = self.device_manager().xpub_id(keystore.xpub)
if not device_id:
try:
info = self.device_manager().select_device(self, keystore.handler, keystore)
except UserCancelled:
return
device_id = info.device.id_
return device_id
def show_settings_dialog(self, window, keystore):
device_id = self.choose_device(window, keystore)
def add_show_address_on_hw_device_button_for_receive_addr(self, wallet, keystore, main_window):
plugin = keystore.plugin
receive_address_e = main_window.receive_address_e
def show_address():
addr = receive_address_e.text()
keystore.thread.add(partial(plugin.show_address, wallet, addr, keystore))
receive_address_e.addButton("eye1.png", show_address, _("Show on {}").format(plugin.device))
| true | true |
79013fd897fcea8f2d6ff9c9d4ee59018a422948 | 696 | py | Python | models/model_canteen/serializers.py | SanjarbekSaminjonov/musofirlar.backend | 23b09e90cc4e3d153063ad1768b5ae1c18ff866d | [
"Apache-2.0"
] | 1 | 2021-12-23T12:43:17.000Z | 2021-12-23T12:43:17.000Z | models/model_canteen/serializers.py | SanjarbekSaminjonov/musofirlar.backend | 23b09e90cc4e3d153063ad1768b5ae1c18ff866d | [
"Apache-2.0"
] | null | null | null | models/model_canteen/serializers.py | SanjarbekSaminjonov/musofirlar.backend | 23b09e90cc4e3d153063ad1768b5ae1c18ff866d | [
"Apache-2.0"
] | null | null | null | from rest_framework import serializers
from .models import Canteen
from accounts.serializers import UserForSerializer
from model_location.serializers import CityViewSerializer
from model_media.serializers import MediaViewSerializer
# Canteen model serializer
class CanteenSerializer(serializers.ModelSerializer):
class Meta:
model = Canteen
fields = '__all__'
# Canteen model serializer to view
class CanteenViewSerializer(serializers.ModelSerializer):
user = UserForSerializer(read_only=True)
city = CityViewSerializer(read_only=True)
images = MediaViewSerializer(read_only=True, many=True)
class Meta:
model = Canteen
fields = '__all__'
| 29 | 59 | 0.777299 | from rest_framework import serializers
from .models import Canteen
from accounts.serializers import UserForSerializer
from model_location.serializers import CityViewSerializer
from model_media.serializers import MediaViewSerializer
class CanteenSerializer(serializers.ModelSerializer):
class Meta:
model = Canteen
fields = '__all__'
class CanteenViewSerializer(serializers.ModelSerializer):
user = UserForSerializer(read_only=True)
city = CityViewSerializer(read_only=True)
images = MediaViewSerializer(read_only=True, many=True)
class Meta:
model = Canteen
fields = '__all__'
| true | true |
790140b0d712bb4a09b103664f2e674d3005dd79 | 4,637 | py | Python | autobuild/create-j2-confs.py | mastrip2/blockchain-configuration-files | 71e4df86220c85e768f9250ac8c2920ed525b21e | [
"MIT"
] | null | null | null | autobuild/create-j2-confs.py | mastrip2/blockchain-configuration-files | 71e4df86220c85e768f9250ac8c2920ed525b21e | [
"MIT"
] | null | null | null | autobuild/create-j2-confs.py | mastrip2/blockchain-configuration-files | 71e4df86220c85e768f9250ac8c2920ed525b21e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json
import os, sys, os.path
import string
from configparser import ConfigParser
J2_CONF_PATH='autobuild/configs/'
def write_file(filename, data):
with open(filename, "w") as fname:
json.dump(data, fname, indent = 4)
return
def get_wallet_conf(path):
wallet_conf_parser = ConfigParser()
with open(path) as wallet_stream:
wallet_conf_parser.read_string("[top]\n" + wallet_stream.read())
return dict(wallet_conf_parser.items('top'))
def get_xbridge_conf(path, ticker):
xbridge_conf_parser = ConfigParser()
xbridge_conf_parser.read(path)
return dict(xbridge_conf_parser.items(ticker))
with open('manifest.json') as json_file:
data = json.load(json_file)
tickers = list(set([chain['ticker'] for chain in data]))
tickers.sort(key = lambda t:t, reverse = False)
for ticker in tickers:
chains = [chain for chain in data if chain['ticker'] == ticker]
chains.sort(key = lambda c:c['ver_id'], reverse = False)
template_data = {}
# get latest version
latest_version_chain = chains[-1]
xbridge_conf_data = get_xbridge_conf('xbridge-confs/' + latest_version_chain['xbridge_conf'], latest_version_chain['ticker'])
wallet_conf_data = get_wallet_conf('wallet-confs/' + latest_version_chain['wallet_conf'])
template_data['Title'] = xbridge_conf_data['title']
template_data['Address'] = xbridge_conf_data['address']
template_data['Ip'] = xbridge_conf_data['ip']
template_data['rpcPort'] = '{{ rpcPort|default(' + wallet_conf_data['rpcport'] + ')}}'
template_data['p2pPort'] = '{{ p2pPort|default(' + wallet_conf_data['port'] + ')}}'
template_data['Username'] = '{{ rpcusername }}'
template_data['Password'] = '{{ rpcpassword }}'
if 'addressprefix' in xbridge_conf_data:
template_data['AddressPrefix'] = xbridge_conf_data['addressprefix']
if 'scriptprefix' in xbridge_conf_data:
template_data['ScriptPrefix'] = xbridge_conf_data['scriptprefix']
if 'secretprefix' in xbridge_conf_data:
template_data['SecretPrefix'] = xbridge_conf_data['secretprefix']
if 'coin' in xbridge_conf_data:
template_data['COIN'] = xbridge_conf_data['coin']
if 'minimumamount' in xbridge_conf_data:
template_data['MinimumAmount'] = xbridge_conf_data['minimumamount']
if 'dustamount' in xbridge_conf_data:
template_data['DustAmount'] = xbridge_conf_data['dustamount']
if 'createtxmethod' in xbridge_conf_data:
template_data['CreateTxMethod'] = xbridge_conf_data['createtxmethod']
if 'getnewkeysupported' in xbridge_conf_data:
template_data['GetNewKeySupported'] = xbridge_conf_data['getnewkeysupported']
if 'importwithnoscansupported' in xbridge_conf_data:
template_data['ImportWithNoScanSupported'] = xbridge_conf_data['importwithnoscansupported']
if 'mintxfee' in xbridge_conf_data:
template_data['MinTxFee'] = xbridge_conf_data['mintxfee']
if 'blocktime' in xbridge_conf_data:
template_data['BlockTime'] = xbridge_conf_data['blocktime']
if 'txversion' in xbridge_conf_data:
template_data['TxVersion'] = xbridge_conf_data['txversion']
if 'feeperbyte' in xbridge_conf_data:
template_data['FeePerByte'] = xbridge_conf_data['feeperbyte']
if 'confirmations' in xbridge_conf_data:
template_data['Confirmations'] = xbridge_conf_data['confirmations']
coin_base_j2_data_versions = {}
for chain in chains:
wallet_conf_data = get_wallet_conf('wallet-confs/' + chain['wallet_conf'])
xbridge_conf_data = get_xbridge_conf('xbridge-confs/' + chain['xbridge_conf'], chain['ticker'])
# get first of versions list of chain
version = chain['versions'][0]
coin_base_j2_data_versions[version] = {
'legacy': 'addresstype' in wallet_conf_data,
'deprecatedrpc': 'deprecatedrpc' in wallet_conf_data,
'xbridge_conf': chain['xbridge_conf'],
'wallet_conf': chain['wallet_conf'],
'GetNewKeySupported': 'GetNewKeySupported' in xbridge_conf_data
}
template_data['versions'] = coin_base_j2_data_versions
template = {}
template[ticker] = template_data
write_file(J2_CONF_PATH + chain['ticker'].lower() + '.base.j2', template)
print(','.join(tickers)) | 45.460784 | 133 | 0.659694 |
import json
import os, sys, os.path
import string
from configparser import ConfigParser
J2_CONF_PATH='autobuild/configs/'
def write_file(filename, data):
with open(filename, "w") as fname:
json.dump(data, fname, indent = 4)
return
def get_wallet_conf(path):
wallet_conf_parser = ConfigParser()
with open(path) as wallet_stream:
wallet_conf_parser.read_string("[top]\n" + wallet_stream.read())
return dict(wallet_conf_parser.items('top'))
def get_xbridge_conf(path, ticker):
xbridge_conf_parser = ConfigParser()
xbridge_conf_parser.read(path)
return dict(xbridge_conf_parser.items(ticker))
with open('manifest.json') as json_file:
data = json.load(json_file)
tickers = list(set([chain['ticker'] for chain in data]))
tickers.sort(key = lambda t:t, reverse = False)
for ticker in tickers:
chains = [chain for chain in data if chain['ticker'] == ticker]
chains.sort(key = lambda c:c['ver_id'], reverse = False)
template_data = {}
latest_version_chain = chains[-1]
xbridge_conf_data = get_xbridge_conf('xbridge-confs/' + latest_version_chain['xbridge_conf'], latest_version_chain['ticker'])
wallet_conf_data = get_wallet_conf('wallet-confs/' + latest_version_chain['wallet_conf'])
template_data['Title'] = xbridge_conf_data['title']
template_data['Address'] = xbridge_conf_data['address']
template_data['Ip'] = xbridge_conf_data['ip']
template_data['rpcPort'] = '{{ rpcPort|default(' + wallet_conf_data['rpcport'] + ')}}'
template_data['p2pPort'] = '{{ p2pPort|default(' + wallet_conf_data['port'] + ')}}'
template_data['Username'] = '{{ rpcusername }}'
template_data['Password'] = '{{ rpcpassword }}'
if 'addressprefix' in xbridge_conf_data:
template_data['AddressPrefix'] = xbridge_conf_data['addressprefix']
if 'scriptprefix' in xbridge_conf_data:
template_data['ScriptPrefix'] = xbridge_conf_data['scriptprefix']
if 'secretprefix' in xbridge_conf_data:
template_data['SecretPrefix'] = xbridge_conf_data['secretprefix']
if 'coin' in xbridge_conf_data:
template_data['COIN'] = xbridge_conf_data['coin']
if 'minimumamount' in xbridge_conf_data:
template_data['MinimumAmount'] = xbridge_conf_data['minimumamount']
if 'dustamount' in xbridge_conf_data:
template_data['DustAmount'] = xbridge_conf_data['dustamount']
if 'createtxmethod' in xbridge_conf_data:
template_data['CreateTxMethod'] = xbridge_conf_data['createtxmethod']
if 'getnewkeysupported' in xbridge_conf_data:
template_data['GetNewKeySupported'] = xbridge_conf_data['getnewkeysupported']
if 'importwithnoscansupported' in xbridge_conf_data:
template_data['ImportWithNoScanSupported'] = xbridge_conf_data['importwithnoscansupported']
if 'mintxfee' in xbridge_conf_data:
template_data['MinTxFee'] = xbridge_conf_data['mintxfee']
if 'blocktime' in xbridge_conf_data:
template_data['BlockTime'] = xbridge_conf_data['blocktime']
if 'txversion' in xbridge_conf_data:
template_data['TxVersion'] = xbridge_conf_data['txversion']
if 'feeperbyte' in xbridge_conf_data:
template_data['FeePerByte'] = xbridge_conf_data['feeperbyte']
if 'confirmations' in xbridge_conf_data:
template_data['Confirmations'] = xbridge_conf_data['confirmations']
coin_base_j2_data_versions = {}
for chain in chains:
wallet_conf_data = get_wallet_conf('wallet-confs/' + chain['wallet_conf'])
xbridge_conf_data = get_xbridge_conf('xbridge-confs/' + chain['xbridge_conf'], chain['ticker'])
version = chain['versions'][0]
coin_base_j2_data_versions[version] = {
'legacy': 'addresstype' in wallet_conf_data,
'deprecatedrpc': 'deprecatedrpc' in wallet_conf_data,
'xbridge_conf': chain['xbridge_conf'],
'wallet_conf': chain['wallet_conf'],
'GetNewKeySupported': 'GetNewKeySupported' in xbridge_conf_data
}
template_data['versions'] = coin_base_j2_data_versions
template = {}
template[ticker] = template_data
write_file(J2_CONF_PATH + chain['ticker'].lower() + '.base.j2', template)
print(','.join(tickers)) | true | true |
7901417a42ec3b2302ff2baee07aab0372416755 | 5,613 | py | Python | data/librispeech.py | vadimkantorov/deepspeech.pytorch | d59213e397e9df3e9dbf5b62932a28979ce28197 | [
"MIT"
] | null | null | null | data/librispeech.py | vadimkantorov/deepspeech.pytorch | d59213e397e9df3e9dbf5b62932a28979ce28197 | [
"MIT"
] | null | null | null | data/librispeech.py | vadimkantorov/deepspeech.pytorch | d59213e397e9df3e9dbf5b62932a28979ce28197 | [
"MIT"
] | null | null | null | import os
import wget
import tarfile
import argparse
import subprocess
from utils import create_manifest
from tqdm import tqdm
import shutil
parser = argparse.ArgumentParser(description='Processes and downloads LibriSpeech dataset.')
parser.add_argument("--target-dir", default='LibriSpeech_dataset/', type=str, help="Directory to store the dataset.")
parser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')
parser.add_argument('--files-to-use', default="train-clean-100.tar.gz,"
"train-clean-360.tar.gz,train-other-500.tar.gz,"
"dev-clean.tar.gz,dev-other.tar.gz,"
"test-clean.tar.gz,test-other.tar.gz", type=str,
help='list of file names to download')
parser.add_argument('--min-duration', default=1, type=int,
help='Prunes training samples shorter than the min duration (given in seconds, default 1)')
parser.add_argument('--max-duration', default=15, type=int,
help='Prunes training samples longer than the max duration (given in seconds, default 15)')
parser.add_argument('--remove-tarballs', action = 'store_true')
args = parser.parse_args()
LIBRI_SPEECH_URLS = {
"train": ["http://www.openslr.org/resources/12/train-clean-100.tar.gz",
"http://www.openslr.org/resources/12/train-clean-360.tar.gz",
"http://www.openslr.org/resources/12/train-other-500.tar.gz"],
"val": ["http://www.openslr.org/resources/12/dev-clean.tar.gz",
"http://www.openslr.org/resources/12/dev-other.tar.gz"],
"test_clean": ["http://www.openslr.org/resources/12/test-clean.tar.gz"],
"test_other": ["http://www.openslr.org/resources/12/test-other.tar.gz"]
}
def _preprocess_transcript(phrase):
return phrase.strip().upper()
def _process_file(wav_dir, txt_dir, base_filename, root_dir):
full_recording_path = os.path.join(root_dir, base_filename)
assert os.path.exists(full_recording_path) and os.path.exists(root_dir)
wav_recording_path = os.path.join(wav_dir, base_filename.replace(".flac", ".wav"))
subprocess.call(["sox {} -r {} -b 16 -c 1 {}".format(full_recording_path, str(args.sample_rate),
wav_recording_path)], shell=True)
# process transcript
txt_transcript_path = os.path.join(txt_dir, base_filename.replace(".flac", ".txt"))
transcript_file = os.path.join(root_dir, "-".join(base_filename.split('-')[:-1]) + ".trans.txt")
assert os.path.exists(transcript_file), "Transcript file {} does not exist.".format(transcript_file)
transcriptions = open(transcript_file).read().strip().split("\n")
transcriptions = {t.split()[0].split("-")[-1]: " ".join(t.split()[1:]) for t in transcriptions}
with open(txt_transcript_path, "w") as f:
key = base_filename.replace(".flac", "").split("-")[-1]
assert key in transcriptions, "{} is not in the transcriptions".format(key)
f.write(_preprocess_transcript(transcriptions[key]))
f.flush()
def main():
target_dl_dir = args.target_dir
if not os.path.exists(target_dl_dir):
os.makedirs(target_dl_dir)
files_to_dl = args.files_to_use.strip().split(',')
for split_type, lst_libri_urls in LIBRI_SPEECH_URLS.items():
split_dir = os.path.join(target_dl_dir, split_type)
if not os.path.exists(split_dir):
os.makedirs(split_dir)
split_wav_dir = os.path.join(split_dir, "wav")
if not os.path.exists(split_wav_dir):
os.makedirs(split_wav_dir)
split_txt_dir = os.path.join(split_dir, "txt")
if not os.path.exists(split_txt_dir):
os.makedirs(split_txt_dir)
extracted_dir = os.path.join(split_dir, "LibriSpeech")
if os.path.exists(extracted_dir):
shutil.rmtree(extracted_dir)
for url in lst_libri_urls:
# check if we want to dl this file
dl_flag = False
for f in files_to_dl:
if url.find(f) != -1:
dl_flag = True
if not dl_flag:
print("Skipping url: {}".format(url))
continue
filename = url.split("/")[-1]
target_filename = os.path.join(split_dir, filename)
if not os.path.exists(target_filename):
wget.download(url, split_dir)
print("Unpacking {}...".format(filename))
tar = tarfile.open(target_filename)
tar.extractall(split_dir)
tar.close()
if args.remove_tarballs:
os.remove(target_filename)
print("Converting flac files to wav and extracting transcripts...")
assert os.path.exists(extracted_dir), "Archive {} was not properly uncompressed.".format(filename)
for root, subdirs, files in tqdm(os.walk(extracted_dir)):
for f in files:
if f.find(".flac") != -1:
_process_file(wav_dir=split_wav_dir, txt_dir=split_txt_dir,
base_filename=f, root_dir=root)
print("Finished {}".format(url))
shutil.rmtree(extracted_dir)
if split_type == 'train': # Prune to min/max duration
create_manifest(split_dir, 'libri_' + split_type + '_manifest.csv', args.min_duration, args.max_duration)
else:
create_manifest(split_dir, 'libri_' + split_type + '_manifest.csv')
if __name__ == "__main__":
main()
| 49.236842 | 117 | 0.623018 | import os
import wget
import tarfile
import argparse
import subprocess
from utils import create_manifest
from tqdm import tqdm
import shutil
parser = argparse.ArgumentParser(description='Processes and downloads LibriSpeech dataset.')
parser.add_argument("--target-dir", default='LibriSpeech_dataset/', type=str, help="Directory to store the dataset.")
parser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')
parser.add_argument('--files-to-use', default="train-clean-100.tar.gz,"
"train-clean-360.tar.gz,train-other-500.tar.gz,"
"dev-clean.tar.gz,dev-other.tar.gz,"
"test-clean.tar.gz,test-other.tar.gz", type=str,
help='list of file names to download')
parser.add_argument('--min-duration', default=1, type=int,
help='Prunes training samples shorter than the min duration (given in seconds, default 1)')
parser.add_argument('--max-duration', default=15, type=int,
help='Prunes training samples longer than the max duration (given in seconds, default 15)')
parser.add_argument('--remove-tarballs', action = 'store_true')
args = parser.parse_args()
LIBRI_SPEECH_URLS = {
"train": ["http://www.openslr.org/resources/12/train-clean-100.tar.gz",
"http://www.openslr.org/resources/12/train-clean-360.tar.gz",
"http://www.openslr.org/resources/12/train-other-500.tar.gz"],
"val": ["http://www.openslr.org/resources/12/dev-clean.tar.gz",
"http://www.openslr.org/resources/12/dev-other.tar.gz"],
"test_clean": ["http://www.openslr.org/resources/12/test-clean.tar.gz"],
"test_other": ["http://www.openslr.org/resources/12/test-other.tar.gz"]
}
def _preprocess_transcript(phrase):
return phrase.strip().upper()
def _process_file(wav_dir, txt_dir, base_filename, root_dir):
full_recording_path = os.path.join(root_dir, base_filename)
assert os.path.exists(full_recording_path) and os.path.exists(root_dir)
wav_recording_path = os.path.join(wav_dir, base_filename.replace(".flac", ".wav"))
subprocess.call(["sox {} -r {} -b 16 -c 1 {}".format(full_recording_path, str(args.sample_rate),
wav_recording_path)], shell=True)
txt_transcript_path = os.path.join(txt_dir, base_filename.replace(".flac", ".txt"))
transcript_file = os.path.join(root_dir, "-".join(base_filename.split('-')[:-1]) + ".trans.txt")
assert os.path.exists(transcript_file), "Transcript file {} does not exist.".format(transcript_file)
transcriptions = open(transcript_file).read().strip().split("\n")
transcriptions = {t.split()[0].split("-")[-1]: " ".join(t.split()[1:]) for t in transcriptions}
with open(txt_transcript_path, "w") as f:
key = base_filename.replace(".flac", "").split("-")[-1]
assert key in transcriptions, "{} is not in the transcriptions".format(key)
f.write(_preprocess_transcript(transcriptions[key]))
f.flush()
def main():
target_dl_dir = args.target_dir
if not os.path.exists(target_dl_dir):
os.makedirs(target_dl_dir)
files_to_dl = args.files_to_use.strip().split(',')
for split_type, lst_libri_urls in LIBRI_SPEECH_URLS.items():
split_dir = os.path.join(target_dl_dir, split_type)
if not os.path.exists(split_dir):
os.makedirs(split_dir)
split_wav_dir = os.path.join(split_dir, "wav")
if not os.path.exists(split_wav_dir):
os.makedirs(split_wav_dir)
split_txt_dir = os.path.join(split_dir, "txt")
if not os.path.exists(split_txt_dir):
os.makedirs(split_txt_dir)
extracted_dir = os.path.join(split_dir, "LibriSpeech")
if os.path.exists(extracted_dir):
shutil.rmtree(extracted_dir)
for url in lst_libri_urls:
dl_flag = False
for f in files_to_dl:
if url.find(f) != -1:
dl_flag = True
if not dl_flag:
print("Skipping url: {}".format(url))
continue
filename = url.split("/")[-1]
target_filename = os.path.join(split_dir, filename)
if not os.path.exists(target_filename):
wget.download(url, split_dir)
print("Unpacking {}...".format(filename))
tar = tarfile.open(target_filename)
tar.extractall(split_dir)
tar.close()
if args.remove_tarballs:
os.remove(target_filename)
print("Converting flac files to wav and extracting transcripts...")
assert os.path.exists(extracted_dir), "Archive {} was not properly uncompressed.".format(filename)
for root, subdirs, files in tqdm(os.walk(extracted_dir)):
for f in files:
if f.find(".flac") != -1:
_process_file(wav_dir=split_wav_dir, txt_dir=split_txt_dir,
base_filename=f, root_dir=root)
print("Finished {}".format(url))
shutil.rmtree(extracted_dir)
if split_type == 'train':
create_manifest(split_dir, 'libri_' + split_type + '_manifest.csv', args.min_duration, args.max_duration)
else:
create_manifest(split_dir, 'libri_' + split_type + '_manifest.csv')
if __name__ == "__main__":
main()
| true | true |
790141eeb0852216b33c0d0efb7f741677b338eb | 1,701 | py | Python | app/core/migrations/0001_initial.py | Huxteen/Django-recipe-app-API-Docker-TDD-CI-CD | dc610cd341477ddf2686fcb419e5ba9dda113c89 | [
"MIT"
] | 2 | 2020-12-09T08:27:03.000Z | 2021-02-13T12:39:53.000Z | app/core/migrations/0001_initial.py | Huxteen/Django-recipe-app-API-Docker-TDD-CI-CD | dc610cd341477ddf2686fcb419e5ba9dda113c89 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | Huxteen/Django-recipe-app-API-Docker-TDD-CI-CD | dc610cd341477ddf2686fcb419e5ba9dda113c89 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.2 on 2019-09-01 09:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.029412 | 266 | 0.637272 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| true | true |
790142b269f640eaae6cdf66f9c7a34c83d113b9 | 460 | py | Python | test/test_tiddler_fields_as_strings.py | funkyeah/tiddlyweb | 2346e6c05aa03ae9c8f2687d9ef9e46103267a8e | [
"BSD-3-Clause"
] | null | null | null | test/test_tiddler_fields_as_strings.py | funkyeah/tiddlyweb | 2346e6c05aa03ae9c8f2687d9ef9e46103267a8e | [
"BSD-3-Clause"
] | null | null | null | test/test_tiddler_fields_as_strings.py | funkyeah/tiddlyweb | 2346e6c05aa03ae9c8f2687d9ef9e46103267a8e | [
"BSD-3-Clause"
] | null | null | null | """
Make sure that tiddler fields which are not strings
are stringified, otherwise, the text serialization will
assplode.
"""
from tiddlyweb.serializer import Serializer
from tiddlyweb.model.tiddler import Tiddler
def setup_module(module):
pass
def test_float_field():
tiddler = Tiddler('foo', 'bar')
tiddler.fields['float'] = 100.5
serializer = Serializer('text')
serializer.object = tiddler
assert '100.5' in '%s' % serializer
| 20.909091 | 56 | 0.719565 |
from tiddlyweb.serializer import Serializer
from tiddlyweb.model.tiddler import Tiddler
def setup_module(module):
pass
def test_float_field():
tiddler = Tiddler('foo', 'bar')
tiddler.fields['float'] = 100.5
serializer = Serializer('text')
serializer.object = tiddler
assert '100.5' in '%s' % serializer
| true | true |
7901435434c7c8c085d66898465f595a7218f96f | 7,419 | py | Python | algorithms/hash/simhash.py | SylvanasSun/code-snippets | 8a393c50955a6a1ad0ca9809f3f7501faae52c51 | [
"MIT"
] | 1 | 2018-01-31T03:43:08.000Z | 2018-01-31T03:43:08.000Z | algorithms/hash/simhash.py | SylvanasSun/code-snippets | 8a393c50955a6a1ad0ca9809f3f7501faae52c51 | [
"MIT"
] | null | null | null | algorithms/hash/simhash.py | SylvanasSun/code-snippets | 8a393c50955a6a1ad0ca9809f3f7501faae52c51 | [
"MIT"
] | null | null | null | # Created by SylvanasSun in 2017.10.17
# !/usr/bin/python
# -*- coding: utf-8 -*-
import collections
import jieba
from jieba import analyse
# TODO: Change default hash algorithms to the other algorithms of high-performance.
def _default_hashfunc(content, hashbits):
"""
Default hash function is variable-length version of Python's builtin hash.
:param content: data that needs to hash.
:return: return a decimal number.
"""
if content == "":
return 0
x = ord(content[0]) << 7
m = 1000003
mask = 2 ** hashbits - 1
for c in content:
x = ((x * m) ^ ord(c)) & mask
x ^= len(content)
if x == -1:
x = -2
return x
# TODO: Change default toknizer to the c/c++ version or other tokenizer of high-performance.
def _default_tokenizer_func(content, keyword_weight_pair):
"""
Default tokenizer function that uses jieba tokenizer.
:param keyword_weight_pair: maximum pair number of the keyword-weight list.
:return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...].
"""
seg_list = jieba.lcut_for_search(content)
# Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight
return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True)
class Simhash(object):
"""
Class Simhash implements simhash algorithms of the Google for filter duplicate content.
Simhash algorithms idea is will reduce the dimension of content and compares the
difference of the "Hamming Distance" implements filter duplicate content.
About simhash algorithms the more introduction: https://en.wikipedia.org/wiki/SimHash
Simhash default tokenizer is jieba (https://github.com/fxsjy/jieba).
"""
def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None):
"""
:param data: data that needs to be encode.
:param keyword_weight_pair: maximum pair number of the keyword-weight list.
:param hash_bit_number: maximum bit number for hashcode.
:param hashfunc: hash function,its first parameter must be data that needs to be encode
and the second parameter must be hash bit number.
:param tokenizer_func: tokenizer function,its first parameter must be content that
needs to be tokenizer and the second parameter must be
keyword_weight_pair.
"""
if hashfunc is None:
self.hashfunc = _default_hashfunc
else:
self.hashfunc = hashfunc
if tokenizer_func is None:
self.tokenizer_func = _default_tokenizer_func
else:
self.tokenizer_func = tokenizer_func
self.hash_bit_number = hash_bit_number
self.keyword_weight_pari = keyword_weight_pair
if isinstance(data, Simhash):
self.hash = data.hash
elif isinstance(data, int):
self.hash = data
else:
self.simhash(data)
def __str__(self):
return str(self.hash)
def simhash(self, content):
"""
Select policies for simhash on the different types of content.
"""
if content is None:
self.hash = -1
return
if isinstance(content, str):
features = self.tokenizer_func(content, self.keyword_weight_pari)
self.hash = self.build_from_features(features)
elif isinstance(content, collections.Iterable):
self.hash = self.build_from_features(content)
elif isinstance(content, int):
self.hash = content
else:
raise Exception("Unsupported parameter type %s" % type(content))
def build_from_features(self, features):
"""
:param features: a list of (token,weight) tuples or a token -> weight dict,
if is a string so it need compute weight (a weight of 1 will be assumed).
:return: a decimal digit for the accumulative result of each after handled features-weight pair.
"""
v = [0] * self.hash_bit_number
if isinstance(features, dict):
features = features.items()
# Starting longitudinal accumulation of bits, current bit add current weight
# when the current bits equal 1 and else current bit minus the current weight.
for f in features:
if isinstance(f, str):
h = self.hashfunc(f, self.hash_bit_number)
w = 1
else:
assert isinstance(f, collections.Iterable)
h = self.hashfunc(f[0], self.hash_bit_number)
w = f[1]
for i in range(self.hash_bit_number):
bitmask = 1 << i
v[i] += w if h & bitmask else -w
# Just record weight of the non-negative
fingerprint = 0
for i in range(self.hash_bit_number):
if v[i] >= 0:
fingerprint += 1 << i
return fingerprint
def is_equal(self, another, limit=0.8):
"""
Determine two simhash are similar or not similar.
:param another: another simhash.
:param limit: a limit of the similarity.
:return: if similarity greater than limit return true and else return false.
"""
if another is None:
raise Exception("Parameter another is null")
if isinstance(another, int):
distance = self.hamming_distance(another)
elif isinstance(another, Simhash):
assert self.hash_bit_number == another.hash_bit_number
distance = self.hamming_distance(another.hash)
else:
raise Exception("Unsupported parameter type %s" % type(another))
similarity = float(self.hash_bit_number - distance) / self.hash_bit_number
if similarity > limit:
return True
return False
def hamming_distance(self, another):
"""
Compute hamming distance,hamming distance is a total number of different bits of two binary numbers.
:param another: another simhash value.
:return: a hamming distance that current simhash and another simhash.
"""
x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1)
result = 0
while x:
result += 1
x &= x - 1
return result
if __name__ == "__main__":
sentence_A = """
明朝军制建立在军户制度上,军户即为中国古代世代从军、充当军差的人户。
东晋南北朝时,士兵及家属的户籍隶于军府称为军户。军户子弟世袭为兵未经准许不得脱离军籍。
北魏军户亦有用俘虏充当的。元朝实行军户制度,军户必须出成年男子到军队服役,父死子替,兄亡弟代,世代相袭。
"""
sentence_B = """
明朝的军制是在元朝基础上改进,而没有采用唐宋时期的募兵制。
元朝的军制是建立在游牧民族制度上发展而来,游牧民族在战争是全民征兵,实际上是军户制度。
建立元朝以后,蒙古族还是全部军户,对于占领区招降的军队,也实行军户制度。
"""
sentence_C = "You know nothing Jon Snow!"
sentence_D = "Jon Snow: I konw nothing."
simhash_A = Simhash(sentence_A)
simhash_B = Simhash(sentence_B)
simhash_C = Simhash(sentence_C)
simhash_D = Simhash(sentence_D)
print(simhash_A)
print(simhash_B)
print(simhash_C)
print(simhash_D)
assert simhash_A.is_equal(simhash_B) is True
assert simhash_B.is_equal(simhash_C) is False
assert simhash_C.is_equal(simhash_D) is True
| 36.190244 | 117 | 0.626365 |
import collections
import jieba
from jieba import analyse
def _default_hashfunc(content, hashbits):
if content == "":
return 0
x = ord(content[0]) << 7
m = 1000003
mask = 2 ** hashbits - 1
for c in content:
x = ((x * m) ^ ord(c)) & mask
x ^= len(content)
if x == -1:
x = -2
return x
def _default_tokenizer_func(content, keyword_weight_pair):
seg_list = jieba.lcut_for_search(content)
return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True)
class Simhash(object):
def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None):
if hashfunc is None:
self.hashfunc = _default_hashfunc
else:
self.hashfunc = hashfunc
if tokenizer_func is None:
self.tokenizer_func = _default_tokenizer_func
else:
self.tokenizer_func = tokenizer_func
self.hash_bit_number = hash_bit_number
self.keyword_weight_pari = keyword_weight_pair
if isinstance(data, Simhash):
self.hash = data.hash
elif isinstance(data, int):
self.hash = data
else:
self.simhash(data)
def __str__(self):
return str(self.hash)
def simhash(self, content):
if content is None:
self.hash = -1
return
if isinstance(content, str):
features = self.tokenizer_func(content, self.keyword_weight_pari)
self.hash = self.build_from_features(features)
elif isinstance(content, collections.Iterable):
self.hash = self.build_from_features(content)
elif isinstance(content, int):
self.hash = content
else:
raise Exception("Unsupported parameter type %s" % type(content))
def build_from_features(self, features):
v = [0] * self.hash_bit_number
if isinstance(features, dict):
features = features.items()
for f in features:
if isinstance(f, str):
h = self.hashfunc(f, self.hash_bit_number)
w = 1
else:
assert isinstance(f, collections.Iterable)
h = self.hashfunc(f[0], self.hash_bit_number)
w = f[1]
for i in range(self.hash_bit_number):
bitmask = 1 << i
v[i] += w if h & bitmask else -w
fingerprint = 0
for i in range(self.hash_bit_number):
if v[i] >= 0:
fingerprint += 1 << i
return fingerprint
def is_equal(self, another, limit=0.8):
if another is None:
raise Exception("Parameter another is null")
if isinstance(another, int):
distance = self.hamming_distance(another)
elif isinstance(another, Simhash):
assert self.hash_bit_number == another.hash_bit_number
distance = self.hamming_distance(another.hash)
else:
raise Exception("Unsupported parameter type %s" % type(another))
similarity = float(self.hash_bit_number - distance) / self.hash_bit_number
if similarity > limit:
return True
return False
def hamming_distance(self, another):
x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1)
result = 0
while x:
result += 1
x &= x - 1
return result
if __name__ == "__main__":
sentence_A = """
明朝军制建立在军户制度上,军户即为中国古代世代从军、充当军差的人户。
东晋南北朝时,士兵及家属的户籍隶于军府称为军户。军户子弟世袭为兵未经准许不得脱离军籍。
北魏军户亦有用俘虏充当的。元朝实行军户制度,军户必须出成年男子到军队服役,父死子替,兄亡弟代,世代相袭。
"""
sentence_B = """
明朝的军制是在元朝基础上改进,而没有采用唐宋时期的募兵制。
元朝的军制是建立在游牧民族制度上发展而来,游牧民族在战争是全民征兵,实际上是军户制度。
建立元朝以后,蒙古族还是全部军户,对于占领区招降的军队,也实行军户制度。
"""
sentence_C = "You know nothing Jon Snow!"
sentence_D = "Jon Snow: I konw nothing."
simhash_A = Simhash(sentence_A)
simhash_B = Simhash(sentence_B)
simhash_C = Simhash(sentence_C)
simhash_D = Simhash(sentence_D)
print(simhash_A)
print(simhash_B)
print(simhash_C)
print(simhash_D)
assert simhash_A.is_equal(simhash_B) is True
assert simhash_B.is_equal(simhash_C) is False
assert simhash_C.is_equal(simhash_D) is True
| true | true |
7901435b0febe6d85163a17baac50a97baf9102a | 8,086 | py | Python | Dev/Cpp/CreateHeader.py | Shockblast/Effekseer | bac86c0fc965f04a0f57c5863d37a9c2d5c3be97 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-12-21T07:03:42.000Z | 2021-12-21T07:03:42.000Z | Dev/Cpp/CreateHeader.py | Shockblast/Effekseer | bac86c0fc965f04a0f57c5863d37a9c2d5c3be97 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | Dev/Cpp/CreateHeader.py | Shockblast/Effekseer | bac86c0fc965f04a0f57c5863d37a9c2d5c3be97 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | import os
import re
import codecs
def isValidLine(line):
if re.search('include \"', line) == None or line.find('.PSVita') != -1 or line.find('.PS4') != -1 or line.find('.Switch') != -1 or line.find('.XBoxOne') != -1:
return True
return False
class CreateHeader:
def __init__(self):
self.lines = []
def addLine(self,line):
self.lines.append(line)
def readLines(self,path):
f = codecs.open(path, 'r','utf-8_sig')
line = f.readline()
while line:
if isValidLine(line):
self.lines.append(line.strip(os.linesep))
line = f.readline()
f.close()
def output(self,path):
f = codecs.open(path, 'w','utf-8_sig')
for line in self.lines:
f.write(line + os.linesep)
f.close()
effekseerHeader = CreateHeader()
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Base.Pre.h')
effekseerHeader.readLines('Effekseer/Effekseer/Utils/Effekseer.CustomAllocator.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Vector2D.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Vector3D.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Color.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.RectF.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Matrix43.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Matrix44.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.File.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.DefaultFile.h')
effekseerHeader.readLines('Effekseer/Effekseer/Backend/GraphicsDevice.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Resource.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Effect.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Manager.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Setting.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Server.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Client.h')
effekseerHeader.addLine('')
effekseerHeader.addLine('#include "Effekseer.Modules.h"')
effekseerHeader.addLine('')
effekseerHeader.output('Effekseer/Effekseer.h')
effekseerSimdHeader = CreateHeader()
effekseerSimdHeader.addLine('#pragma once')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Base.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Float4_Gen.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Float4_NEON.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Float4_SSE.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Int4_Gen.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Int4_NEON.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Int4_SSE.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Bridge_Gen.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Bridge_NEON.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Bridge_SSE.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Vec2f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Vec3f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Vec4f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Mat43f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Mat44f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Quaternionf.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Utils.h')
effekseerSimdHeader.output('Effekseer/Effekseer.SIMD.h')
effekseerModulesHeader = CreateHeader()
effekseerModulesHeader.addLine('#pragma once')
effekseerModulesHeader.addLine('')
effekseerModulesHeader.addLine('#include "Effekseer.h"')
effekseerModulesHeader.addLine('#include "Effekseer.SIMD.h"')
effekseerModulesHeader.addLine('')
effekseerModulesHeader.addLine('// A header to access internal data of effekseer')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Parameter/Effekseer.Parameters.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.SpriteRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.RibbonRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.RingRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.ModelRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.TrackRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.EffectLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.TextureLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Model/Model.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Model/ModelLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.MaterialLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Model/Model.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.Curve.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.CurveLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Sound/Effekseer.SoundPlayer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.SoundLoader.h')
effekseerModulesHeader.output('Effekseer/Effekseer.Modules.h')
effekseerRendererDX9Header = CreateHeader()
effekseerRendererDX9Header.readLines('EffekseerRendererDX9/EffekseerRenderer/EffekseerRendererDX9.Base.Pre.h')
effekseerRendererDX9Header.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererDX9Header.readLines('EffekseerRendererDX9/EffekseerRenderer/EffekseerRendererDX9.Renderer.h')
effekseerRendererDX9Header.output('EffekseerRendererDX9/EffekseerRendererDX9.h')
effekseerRendererDX11Header = CreateHeader()
effekseerRendererDX11Header.readLines('EffekseerRendererDX11/EffekseerRenderer/EffekseerRendererDX11.Base.Pre.h')
effekseerRendererDX11Header.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererDX11Header.readLines('EffekseerRendererDX11/EffekseerRenderer/EffekseerRendererDX11.Renderer.h')
effekseerRendererDX11Header.output('EffekseerRendererDX11/EffekseerRendererDX11.h')
effekseerRendererDX12Header = CreateHeader()
effekseerRendererDX12Header.readLines('EffekseerRendererDX12/EffekseerRenderer/EffekseerRendererDX12.Base.Pre.h')
effekseerRendererDX12Header.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererDX12Header.readLines('EffekseerRendererDX12/EffekseerRenderer/EffekseerRendererDX12.Renderer.h')
effekseerRendererDX12Header.readLines('EffekseerRendererLLGI/Common.h')
effekseerRendererDX12Header.output('EffekseerRendererDX12/EffekseerRendererDX12.h')
effekseerRendererVulkanHeader = CreateHeader()
effekseerRendererVulkanHeader.readLines('EffekseerRendererVulkan/EffekseerRenderer/EffekseerRendererVulkan.Base.Pre.h')
effekseerRendererVulkanHeader.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererVulkanHeader.readLines('EffekseerRendererVulkan/EffekseerRenderer/EffekseerRendererVulkan.Renderer.h')
effekseerRendererVulkanHeader.readLines('EffekseerRendererLLGI/Common.h')
effekseerRendererVulkanHeader.output('EffekseerRendererVulkan/EffekseerRendererVulkan.h')
effekseerRendererGLHeader = CreateHeader()
effekseerRendererGLHeader.readLines('EffekseerRendererGL/EffekseerRenderer/EffekseerRendererGL.Base.Pre.h')
effekseerRendererGLHeader.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererGLHeader.readLines('EffekseerRendererGL/EffekseerRenderer/EffekseerRendererGL.Renderer.h')
effekseerRendererGLHeader.output('EffekseerRendererGL/EffekseerRendererGL.h')
effekseerRendererMetalHeader = CreateHeader()
effekseerRendererMetalHeader.readLines('EffekseerRendererMetal/EffekseerRenderer/EffekseerRendererMetal.Base.Pre.h')
effekseerRendererMetalHeader.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererMetalHeader.readLines('EffekseerRendererMetal/EffekseerRenderer/EffekseerRendererMetal.Renderer.h')
effekseerRendererMetalHeader.readLines('EffekseerRendererLLGI/Common.h')
effekseerRendererMetalHeader.output('EffekseerRendererMetal/EffekseerRendererMetal.h')
| 57.757143 | 160 | 0.852461 | import os
import re
import codecs
def isValidLine(line):
if re.search('include \"', line) == None or line.find('.PSVita') != -1 or line.find('.PS4') != -1 or line.find('.Switch') != -1 or line.find('.XBoxOne') != -1:
return True
return False
class CreateHeader:
def __init__(self):
self.lines = []
def addLine(self,line):
self.lines.append(line)
def readLines(self,path):
f = codecs.open(path, 'r','utf-8_sig')
line = f.readline()
while line:
if isValidLine(line):
self.lines.append(line.strip(os.linesep))
line = f.readline()
f.close()
def output(self,path):
f = codecs.open(path, 'w','utf-8_sig')
for line in self.lines:
f.write(line + os.linesep)
f.close()
effekseerHeader = CreateHeader()
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Base.Pre.h')
effekseerHeader.readLines('Effekseer/Effekseer/Utils/Effekseer.CustomAllocator.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Vector2D.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Vector3D.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Color.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.RectF.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Matrix43.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Matrix44.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.File.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.DefaultFile.h')
effekseerHeader.readLines('Effekseer/Effekseer/Backend/GraphicsDevice.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Resource.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Effect.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Manager.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Setting.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Server.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Client.h')
effekseerHeader.addLine('')
effekseerHeader.addLine('#include "Effekseer.Modules.h"')
effekseerHeader.addLine('')
effekseerHeader.output('Effekseer/Effekseer.h')
effekseerSimdHeader = CreateHeader()
effekseerSimdHeader.addLine('#pragma once')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Base.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Float4_Gen.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Float4_NEON.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Float4_SSE.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Int4_Gen.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Int4_NEON.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Int4_SSE.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Bridge_Gen.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Bridge_NEON.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Bridge_SSE.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Vec2f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Vec3f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Vec4f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Mat43f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Mat44f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Quaternionf.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Utils.h')
effekseerSimdHeader.output('Effekseer/Effekseer.SIMD.h')
effekseerModulesHeader = CreateHeader()
effekseerModulesHeader.addLine('#pragma once')
effekseerModulesHeader.addLine('')
effekseerModulesHeader.addLine('#include "Effekseer.h"')
effekseerModulesHeader.addLine('#include "Effekseer.SIMD.h"')
effekseerModulesHeader.addLine('')
effekseerModulesHeader.addLine('// A header to access internal data of effekseer')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Parameter/Effekseer.Parameters.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.SpriteRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.RibbonRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.RingRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.ModelRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.TrackRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.EffectLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.TextureLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Model/Model.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Model/ModelLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.MaterialLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Model/Model.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.Curve.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.CurveLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Sound/Effekseer.SoundPlayer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.SoundLoader.h')
effekseerModulesHeader.output('Effekseer/Effekseer.Modules.h')
effekseerRendererDX9Header = CreateHeader()
effekseerRendererDX9Header.readLines('EffekseerRendererDX9/EffekseerRenderer/EffekseerRendererDX9.Base.Pre.h')
effekseerRendererDX9Header.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererDX9Header.readLines('EffekseerRendererDX9/EffekseerRenderer/EffekseerRendererDX9.Renderer.h')
effekseerRendererDX9Header.output('EffekseerRendererDX9/EffekseerRendererDX9.h')
effekseerRendererDX11Header = CreateHeader()
effekseerRendererDX11Header.readLines('EffekseerRendererDX11/EffekseerRenderer/EffekseerRendererDX11.Base.Pre.h')
effekseerRendererDX11Header.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererDX11Header.readLines('EffekseerRendererDX11/EffekseerRenderer/EffekseerRendererDX11.Renderer.h')
effekseerRendererDX11Header.output('EffekseerRendererDX11/EffekseerRendererDX11.h')
effekseerRendererDX12Header = CreateHeader()
effekseerRendererDX12Header.readLines('EffekseerRendererDX12/EffekseerRenderer/EffekseerRendererDX12.Base.Pre.h')
effekseerRendererDX12Header.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererDX12Header.readLines('EffekseerRendererDX12/EffekseerRenderer/EffekseerRendererDX12.Renderer.h')
effekseerRendererDX12Header.readLines('EffekseerRendererLLGI/Common.h')
effekseerRendererDX12Header.output('EffekseerRendererDX12/EffekseerRendererDX12.h')
effekseerRendererVulkanHeader = CreateHeader()
effekseerRendererVulkanHeader.readLines('EffekseerRendererVulkan/EffekseerRenderer/EffekseerRendererVulkan.Base.Pre.h')
effekseerRendererVulkanHeader.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererVulkanHeader.readLines('EffekseerRendererVulkan/EffekseerRenderer/EffekseerRendererVulkan.Renderer.h')
effekseerRendererVulkanHeader.readLines('EffekseerRendererLLGI/Common.h')
effekseerRendererVulkanHeader.output('EffekseerRendererVulkan/EffekseerRendererVulkan.h')
effekseerRendererGLHeader = CreateHeader()
effekseerRendererGLHeader.readLines('EffekseerRendererGL/EffekseerRenderer/EffekseerRendererGL.Base.Pre.h')
effekseerRendererGLHeader.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererGLHeader.readLines('EffekseerRendererGL/EffekseerRenderer/EffekseerRendererGL.Renderer.h')
effekseerRendererGLHeader.output('EffekseerRendererGL/EffekseerRendererGL.h')
effekseerRendererMetalHeader = CreateHeader()
effekseerRendererMetalHeader.readLines('EffekseerRendererMetal/EffekseerRenderer/EffekseerRendererMetal.Base.Pre.h')
effekseerRendererMetalHeader.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererMetalHeader.readLines('EffekseerRendererMetal/EffekseerRenderer/EffekseerRendererMetal.Renderer.h')
effekseerRendererMetalHeader.readLines('EffekseerRendererLLGI/Common.h')
effekseerRendererMetalHeader.output('EffekseerRendererMetal/EffekseerRendererMetal.h')
| true | true |
7901444372c85a2b15f7dcba7de55b077aa0b721 | 7,836 | py | Python | contrib/bitrpc/bitrpc.py | iannkwon/sarnath | 634234991c0919b24ba22138ae5cd84323c119fa | [
"MIT"
] | null | null | null | contrib/bitrpc/bitrpc.py | iannkwon/sarnath | 634234991c0919b24ba22138ae5cd84323c119fa | [
"MIT"
] | null | null | null | contrib/bitrpc/bitrpc.py | iannkwon/sarnath | 634234991c0919b24ba22138ae5cd84323c119fa | [
"MIT"
] | null | null | null | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Sarnath address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Sarnath address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 24.110769 | 79 | 0.668198 | from jsonrpc import ServiceProxy
import sys
import string
rpcuser = ""
rpcpass = ""
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Sarnath address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Sarnath address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| false | true |
790145a3f15acd1ed0115e580f9a967616d05789 | 4,622 | py | Python | chrome/common/extensions/docs/server2/render_servlet_test.py | SlimKatLegacy/android_external_chromium_org | ee480ef5039d7c561fc66ccf52169ead186f1bea | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2015-03-04T02:36:53.000Z | 2016-06-25T11:22:17.000Z | chrome/common/extensions/docs/server2/render_servlet_test.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | chrome/common/extensions/docs/server2/render_servlet_test.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4 | 2015-02-09T08:49:30.000Z | 2017-08-26T02:03:34.000Z | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from local_file_system import LocalFileSystem
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Request, Response
from test_util import ReadFile
class _RenderServletDelegate(RenderServlet.Delegate):
def CreateServerInstance(self):
return ServerInstance.ForTest(LocalFileSystem.Create())
class RenderServletTest(unittest.TestCase):
def _Render(self, path):
return RenderServlet(Request.ForTest(path),
_RenderServletDelegate()).Get()
def testExtensionAppRedirect(self):
self.assertEqual(
Response.Redirect('/extensions/storage.html', permanent=False),
self._Render('storage.html'))
def testChannelRedirect(self):
self.assertEqual(
Response.Redirect('/extensions/storage.html', permanent=True),
self._Render('stable/extensions/storage.html'))
def testNotFound(self):
def create_404_response(real_path):
real_404 = self._Render(real_path)
self.assertEqual(200, real_404.status)
real_404.status = 404
return real_404
root_404 = create_404_response('404.html')
extensions_404 = create_404_response('extensions/404.html')
apps_404 = create_404_response('apps/404.html')
# Note: would test that root_404 != extensions and apps but it's not
# necessarily true.
self.assertNotEqual(extensions_404, apps_404)
self.assertEqual(root_404, self._Render('not_found.html'))
self.assertEqual(root_404, self._Render('not_found/not_found.html'))
self.assertEqual(extensions_404, self._Render('extensions/not_found.html'))
self.assertEqual(
extensions_404, self._Render('extensions/manifest/not_found.html'))
self.assertEqual(
extensions_404,
self._Render('extensions/manifest/not_found/not_found.html'))
self.assertEqual(apps_404, self._Render('apps/not_found.html'))
self.assertEqual(apps_404, self._Render('apps/manifest/not_found.html'))
self.assertEqual(
apps_404, self._Render('apps/manifest/not_found/not_found.html'))
def testSampleFile(self):
sample_file = 'extensions/talking_alarm_clock/background.js'
response = self._Render('extensions/examples/%s' % sample_file)
self.assertEqual(200, response.status)
self.assertTrue(response.headers['Content-Type'] in (
'application/javascript; charset=utf-8',
'application/x-javascript; charset=utf-8'))
self.assertEqual(ReadFile('%s/%s' % (EXAMPLES, sample_file)),
response.content.ToString())
def testSampleZip(self):
sample_dir = 'extensions/talking_alarm_clock'
response = self._Render('extensions/examples/%s.zip' % sample_dir)
self.assertEqual(200, response.status)
self.assertEqual('application/zip', response.headers['Content-Type'])
def testStaticFile(self):
static_file = 'css/site.css'
response = self._Render('static/%s' % static_file)
self.assertEqual(200, response.status)
self.assertEqual('text/css; charset=utf-8',
response.headers['Content-Type'])
self.assertEqual(ReadFile('%s/%s' % (STATIC_DOCS, static_file)),
response.content.ToString())
def testHtmlTemplate(self):
html_file = 'extensions/storage.html'
response = self._Render(html_file)
self.assertEqual(200, response.status)
self.assertEqual('text/html; charset=utf-8',
response.headers.get('Content-Type'))
# Can't really test rendering all that well.
self.assertTrue(len(response.content) >
len(ReadFile('%s/%s' % (PUBLIC_TEMPLATES, html_file))))
def testDevelopersGoogleComRedirect(self):
def assert_redirect(request_path):
response = self._Render(request_path)
self.assertEqual(('//developers.google.com/chrome', False),
response.GetRedirect())
assert_redirect('')
assert_redirect('index.html')
def testIndexRedirect(self):
response = self._Render('extensions')
self.assertEqual(('/extensions/index.html', False),
response.GetRedirect())
def testOtherRedirectsJsonRedirect(self):
response = self._Render('apps/webview_tag.html')
self.assertEqual(('/apps/tags/webview.html', False),
response.GetRedirect())
if __name__ == '__main__':
unittest.main()
| 38.198347 | 79 | 0.706621 |
import unittest
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from local_file_system import LocalFileSystem
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Request, Response
from test_util import ReadFile
class _RenderServletDelegate(RenderServlet.Delegate):
def CreateServerInstance(self):
return ServerInstance.ForTest(LocalFileSystem.Create())
class RenderServletTest(unittest.TestCase):
def _Render(self, path):
return RenderServlet(Request.ForTest(path),
_RenderServletDelegate()).Get()
def testExtensionAppRedirect(self):
self.assertEqual(
Response.Redirect('/extensions/storage.html', permanent=False),
self._Render('storage.html'))
def testChannelRedirect(self):
self.assertEqual(
Response.Redirect('/extensions/storage.html', permanent=True),
self._Render('stable/extensions/storage.html'))
def testNotFound(self):
def create_404_response(real_path):
real_404 = self._Render(real_path)
self.assertEqual(200, real_404.status)
real_404.status = 404
return real_404
root_404 = create_404_response('404.html')
extensions_404 = create_404_response('extensions/404.html')
apps_404 = create_404_response('apps/404.html')
# necessarily true.
self.assertNotEqual(extensions_404, apps_404)
self.assertEqual(root_404, self._Render('not_found.html'))
self.assertEqual(root_404, self._Render('not_found/not_found.html'))
self.assertEqual(extensions_404, self._Render('extensions/not_found.html'))
self.assertEqual(
extensions_404, self._Render('extensions/manifest/not_found.html'))
self.assertEqual(
extensions_404,
self._Render('extensions/manifest/not_found/not_found.html'))
self.assertEqual(apps_404, self._Render('apps/not_found.html'))
self.assertEqual(apps_404, self._Render('apps/manifest/not_found.html'))
self.assertEqual(
apps_404, self._Render('apps/manifest/not_found/not_found.html'))
def testSampleFile(self):
sample_file = 'extensions/talking_alarm_clock/background.js'
response = self._Render('extensions/examples/%s' % sample_file)
self.assertEqual(200, response.status)
self.assertTrue(response.headers['Content-Type'] in (
'application/javascript; charset=utf-8',
'application/x-javascript; charset=utf-8'))
self.assertEqual(ReadFile('%s/%s' % (EXAMPLES, sample_file)),
response.content.ToString())
def testSampleZip(self):
sample_dir = 'extensions/talking_alarm_clock'
response = self._Render('extensions/examples/%s.zip' % sample_dir)
self.assertEqual(200, response.status)
self.assertEqual('application/zip', response.headers['Content-Type'])
def testStaticFile(self):
static_file = 'css/site.css'
response = self._Render('static/%s' % static_file)
self.assertEqual(200, response.status)
self.assertEqual('text/css; charset=utf-8',
response.headers['Content-Type'])
self.assertEqual(ReadFile('%s/%s' % (STATIC_DOCS, static_file)),
response.content.ToString())
def testHtmlTemplate(self):
html_file = 'extensions/storage.html'
response = self._Render(html_file)
self.assertEqual(200, response.status)
self.assertEqual('text/html; charset=utf-8',
response.headers.get('Content-Type'))
# Can't really test rendering all that well.
self.assertTrue(len(response.content) >
len(ReadFile('%s/%s' % (PUBLIC_TEMPLATES, html_file))))
def testDevelopersGoogleComRedirect(self):
def assert_redirect(request_path):
response = self._Render(request_path)
self.assertEqual(('//developers.google.com/chrome', False),
response.GetRedirect())
assert_redirect('')
assert_redirect('index.html')
def testIndexRedirect(self):
response = self._Render('extensions')
self.assertEqual(('/extensions/index.html', False),
response.GetRedirect())
def testOtherRedirectsJsonRedirect(self):
response = self._Render('apps/webview_tag.html')
self.assertEqual(('/apps/tags/webview.html', False),
response.GetRedirect())
if __name__ == '__main__':
unittest.main()
| true | true |
7901468931f7270d19bbbb42637612296457425b | 2,036 | py | Python | src/helpers/api_handler.py | alejandrodlsp/grogu-bot | d35a504ee550fede4e8ea20e799079f8e8b2887b | [
"MIT"
] | 1 | 2021-04-15T13:47:32.000Z | 2021-04-15T13:47:32.000Z | src/helpers/api_handler.py | alejandrodlsp/razer-bot | d35a504ee550fede4e8ea20e799079f8e8b2887b | [
"MIT"
] | null | null | null | src/helpers/api_handler.py | alejandrodlsp/razer-bot | d35a504ee550fede4e8ea20e799079f8e8b2887b | [
"MIT"
] | null | null | null | import discord
import io
import aiohttp
from aiohttp import request, ClientSession
from src.embeds.image_embed import ImageEmbed
async def request_canvas_image(ctx, url, member: discord.Member = None, params={}, is_gif=False):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
async with ClientSession() as wastedSession:
async with wastedSession.get(f'{url}?avatar={member.avatar_url_as(format="png", size=1024)}{params_url}') as wastedImage:
imageData = io.BytesIO(await wastedImage.read())
await wastedSession.close()
await ctx.send(file=discord.File(imageData, 'image.gif' if is_gif else 'image.png'))
async def request_image(ctx, url, params={}, key="link", title="Requested image", description="", footer=""):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
async with request("GET", url + params_url) as response:
if response.status == 200:
data = await response.json()
if "caption" in data:
title = data["caption"]
embed = ImageEmbed(
ctx,
title,
description,
footer,
data[key]
)
await embed.send()
else:
await ctx.send(f"API returned a {response.status} status :((")
async def request_text(ctx, url, key, params={}, text_format="{}"):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
print(params_url)
async with request("GET", url + params_url) as response:
if response.status == 200:
data = await response.json()
print(data)
await ctx.send(text_format.format(data[key]))
else:
await ctx.send(f"API returned a {response.status} status :((")
| 40.72 | 129 | 0.556483 | import discord
import io
import aiohttp
from aiohttp import request, ClientSession
from src.embeds.image_embed import ImageEmbed
async def request_canvas_image(ctx, url, member: discord.Member = None, params={}, is_gif=False):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
async with ClientSession() as wastedSession:
async with wastedSession.get(f'{url}?avatar={member.avatar_url_as(format="png", size=1024)}{params_url}') as wastedImage:
imageData = io.BytesIO(await wastedImage.read())
await wastedSession.close()
await ctx.send(file=discord.File(imageData, 'image.gif' if is_gif else 'image.png'))
async def request_image(ctx, url, params={}, key="link", title="Requested image", description="", footer=""):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
async with request("GET", url + params_url) as response:
if response.status == 200:
data = await response.json()
if "caption" in data:
title = data["caption"]
embed = ImageEmbed(
ctx,
title,
description,
footer,
data[key]
)
await embed.send()
else:
await ctx.send(f"API returned a {response.status} status :((")
async def request_text(ctx, url, key, params={}, text_format="{}"):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
print(params_url)
async with request("GET", url + params_url) as response:
if response.status == 200:
data = await response.json()
print(data)
await ctx.send(text_format.format(data[key]))
else:
await ctx.send(f"API returned a {response.status} status :((")
| true | true |
790148793111aef084f8e513723e3e42a6349e6a | 2,408 | py | Python | 0-python-tutorial/25-dates05_strftime23_z.py | luis2ra/py3-00-w3schools | 6bb851837f8ef9520491d13fa2c909047c9b18cf | [
"MIT"
] | null | null | null | 0-python-tutorial/25-dates05_strftime23_z.py | luis2ra/py3-00-w3schools | 6bb851837f8ef9520491d13fa2c909047c9b18cf | [
"MIT"
] | null | null | null | 0-python-tutorial/25-dates05_strftime23_z.py | luis2ra/py3-00-w3schools | 6bb851837f8ef9520491d13fa2c909047c9b18cf | [
"MIT"
] | null | null | null | # Demo Python Datetime - The strftime() Method
'''
The strftime() Method
The datetime object has a method for formatting date objects into readable strings.
The method is called strftime(), and takes one parameter, format, to specify the format of the returned string.
Directive Description Example
%a Weekday, short version Wed
%A Weekday, full version Wednesday
%w Weekday as a number 0-6, 0 is Sunday 3
%d Day of month 01-31 31
%b Month name, short version Dec
%B Month name, full version December
%m Month as a number 01-12 12
%y Year, short version, without century 18
%Y Year, full version 2018
%H Hour 00-23 17
%I Hour 00-12 05
%p AM/PM PM
%M Minute 00-59 41
%S Second 00-59 08
%f Microsecond 000000-999999 548513
%z UTC offset +0100
%Z Timezone CST
%j Day number of year 001-366 365
%U Week number of year, Sunday as the first day of week, 00-53 52
%W Week number of year, Monday as the first day of week, 00-53 52
%c Local version of date and time Mon Dec 31 17:41:00 2018
%x Local version of date 12/31/18
%X Local version of time 17:41:00
%% A % character %
'''
import datetime
x = datetime.datetime.now()
print(x)
print(x.strftime("%z")) | 57.333333 | 111 | 0.354236 |
import datetime
x = datetime.datetime.now()
print(x)
print(x.strftime("%z")) | true | true |
790148d82deb5ee6e891b87a5fd4d5b8e5c875cf | 6,580 | py | Python | st2reactor/st2reactor/rules/tester.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | null | null | null | st2reactor/st2reactor/rules/tester.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | 15 | 2021-02-11T22:58:54.000Z | 2021-08-06T18:03:47.000Z | st2reactor/st2reactor/rules/tester.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | 1 | 2021-07-10T15:02:29.000Z | 2021-07-10T15:02:29.000Z | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import mock
from jinja2.exceptions import UndefinedError
from st2common import log as logging
from st2common.content.loader import MetaLoader
from st2common.models.db.rule import RuleDB
from st2common.models.db.trigger import TriggerDB
from st2common.models.db.trigger import TriggerInstanceDB
from st2common.models.system.common import ResourceReference
from st2common.persistence.reactor import Rule, TriggerInstance, Trigger
from st2reactor.rules.enforcer import RuleEnforcer
from st2reactor.rules.matcher import RulesMatcher
__all__ = [
'RuleTester'
]
LOG = logging.getLogger(__name__)
class RuleTester(object):
def __init__(self, rule_file_path=None, rule_ref=None, trigger_instance_file_path=None,
trigger_instance_id=None):
"""
:param rule_file_path: Path to the file containing rule definition.
:type rule_file_path: ``str``
:param trigger_instance_file_path: Path to the file containg trigger instance definition.
:type trigger_instance_file_path: ``str``
"""
self._rule_file_path = rule_file_path
self._rule_ref = rule_ref
self._trigger_instance_file_path = trigger_instance_file_path
self._trigger_instance_id = trigger_instance_id
self._meta_loader = MetaLoader()
def evaluate(self):
"""
Evaluate trigger instance against the rule.
:return: ``True`` if the rule matches, ``False`` otherwise.
:rtype: ``boolean``
"""
rule_db = self._get_rule_db()
trigger_instance_db, trigger_db = self._get_trigger_instance_db()
# The trigger check needs to be performed here as that is not performed
# by RulesMatcher.
if rule_db.trigger != trigger_db.ref:
LOG.info('rule.trigger "%s" and trigger.ref "%s" do not match.',
rule_db.trigger, trigger_db.ref)
return False
# Check if rule matches criteria.
matcher = RulesMatcher(trigger_instance=trigger_instance_db, trigger=trigger_db,
rules=[rule_db], extra_info=True)
matching_rules = matcher.get_matching_rules()
# Rule does not match so early exit.
if len(matching_rules) < 1:
return False
# Check if rule can be enforced
enforcer = RuleEnforcer(trigger_instance=trigger_instance_db, rule=rule_db)
runner_type_db = mock.Mock()
runner_type_db.runner_parameters = {}
action_db = mock.Mock()
action_db.parameters = {}
params = rule_db.action.parameters # pylint: disable=no-member
context, additional_contexts = enforcer.get_action_execution_context(action_db=action_db,
trace_context=None)
# Note: We only return partially resolved parameters.
# To be able to return all parameters we would need access to corresponding ActionDB,
# RunnerTypeDB and ConfigDB object, but this would add a dependency on the database and the
# tool is meant to be used standalone.
try:
params = enforcer.get_resolved_parameters(action_db=action_db,
runnertype_db=runner_type_db,
params=params,
context=context,
additional_contexts=additional_contexts)
LOG.info('Action parameters resolved to:')
for param in six.iteritems(params):
LOG.info('\t%s: %s', param[0], param[1])
return True
except (UndefinedError, ValueError) as e:
LOG.error('Failed to resolve parameters\n\tOriginal error : %s', six.text_type(e))
return False
except:
LOG.exception('Failed to resolve parameters.')
return False
def _get_rule_db(self):
if self._rule_file_path:
return self._get_rule_db_from_file(
file_path=os.path.realpath(self._rule_file_path))
elif self._rule_ref:
return Rule.get_by_ref(self._rule_ref)
raise ValueError('One of _rule_file_path or _rule_ref should be specified.')
def _get_trigger_instance_db(self):
if self._trigger_instance_file_path:
return self._get_trigger_instance_db_from_file(
file_path=os.path.realpath(self._trigger_instance_file_path))
elif self._trigger_instance_id:
trigger_instance_db = TriggerInstance.get_by_id(self._trigger_instance_id)
trigger_db = Trigger.get_by_ref(trigger_instance_db.trigger)
return trigger_instance_db, trigger_db
raise ValueError('One of _trigger_instance_file_path or'
'_trigger_instance_id should be specified.')
def _get_rule_db_from_file(self, file_path):
data = self._meta_loader.load(file_path=file_path)
pack = data.get('pack', 'unknown')
name = data.get('name', 'unknown')
trigger = data['trigger']['type']
criteria = data.get('criteria', None)
action = data.get('action', {})
rule_db = RuleDB(pack=pack, name=name, trigger=trigger, criteria=criteria, action=action,
enabled=True)
rule_db.id = 'rule_tester_rule'
return rule_db
def _get_trigger_instance_db_from_file(self, file_path):
data = self._meta_loader.load(file_path=file_path)
instance = TriggerInstanceDB(**data)
instance.id = 'rule_tester_instance'
trigger_ref = ResourceReference.from_string_reference(instance['trigger'])
trigger_db = TriggerDB(pack=trigger_ref.pack, name=trigger_ref.name, type=trigger_ref.ref)
return instance, trigger_db
| 41.125 | 99 | 0.658663 |
from __future__ import absolute_import
import os
import six
import mock
from jinja2.exceptions import UndefinedError
from st2common import log as logging
from st2common.content.loader import MetaLoader
from st2common.models.db.rule import RuleDB
from st2common.models.db.trigger import TriggerDB
from st2common.models.db.trigger import TriggerInstanceDB
from st2common.models.system.common import ResourceReference
from st2common.persistence.reactor import Rule, TriggerInstance, Trigger
from st2reactor.rules.enforcer import RuleEnforcer
from st2reactor.rules.matcher import RulesMatcher
__all__ = [
'RuleTester'
]
LOG = logging.getLogger(__name__)
class RuleTester(object):
def __init__(self, rule_file_path=None, rule_ref=None, trigger_instance_file_path=None,
trigger_instance_id=None):
self._rule_file_path = rule_file_path
self._rule_ref = rule_ref
self._trigger_instance_file_path = trigger_instance_file_path
self._trigger_instance_id = trigger_instance_id
self._meta_loader = MetaLoader()
def evaluate(self):
rule_db = self._get_rule_db()
trigger_instance_db, trigger_db = self._get_trigger_instance_db()
if rule_db.trigger != trigger_db.ref:
LOG.info('rule.trigger "%s" and trigger.ref "%s" do not match.',
rule_db.trigger, trigger_db.ref)
return False
matcher = RulesMatcher(trigger_instance=trigger_instance_db, trigger=trigger_db,
rules=[rule_db], extra_info=True)
matching_rules = matcher.get_matching_rules()
if len(matching_rules) < 1:
return False
enforcer = RuleEnforcer(trigger_instance=trigger_instance_db, rule=rule_db)
runner_type_db = mock.Mock()
runner_type_db.runner_parameters = {}
action_db = mock.Mock()
action_db.parameters = {}
params = rule_db.action.parameters
context, additional_contexts = enforcer.get_action_execution_context(action_db=action_db,
trace_context=None)
try:
params = enforcer.get_resolved_parameters(action_db=action_db,
runnertype_db=runner_type_db,
params=params,
context=context,
additional_contexts=additional_contexts)
LOG.info('Action parameters resolved to:')
for param in six.iteritems(params):
LOG.info('\t%s: %s', param[0], param[1])
return True
except (UndefinedError, ValueError) as e:
LOG.error('Failed to resolve parameters\n\tOriginal error : %s', six.text_type(e))
return False
except:
LOG.exception('Failed to resolve parameters.')
return False
def _get_rule_db(self):
if self._rule_file_path:
return self._get_rule_db_from_file(
file_path=os.path.realpath(self._rule_file_path))
elif self._rule_ref:
return Rule.get_by_ref(self._rule_ref)
raise ValueError('One of _rule_file_path or _rule_ref should be specified.')
def _get_trigger_instance_db(self):
if self._trigger_instance_file_path:
return self._get_trigger_instance_db_from_file(
file_path=os.path.realpath(self._trigger_instance_file_path))
elif self._trigger_instance_id:
trigger_instance_db = TriggerInstance.get_by_id(self._trigger_instance_id)
trigger_db = Trigger.get_by_ref(trigger_instance_db.trigger)
return trigger_instance_db, trigger_db
raise ValueError('One of _trigger_instance_file_path or'
'_trigger_instance_id should be specified.')
def _get_rule_db_from_file(self, file_path):
data = self._meta_loader.load(file_path=file_path)
pack = data.get('pack', 'unknown')
name = data.get('name', 'unknown')
trigger = data['trigger']['type']
criteria = data.get('criteria', None)
action = data.get('action', {})
rule_db = RuleDB(pack=pack, name=name, trigger=trigger, criteria=criteria, action=action,
enabled=True)
rule_db.id = 'rule_tester_rule'
return rule_db
def _get_trigger_instance_db_from_file(self, file_path):
data = self._meta_loader.load(file_path=file_path)
instance = TriggerInstanceDB(**data)
instance.id = 'rule_tester_instance'
trigger_ref = ResourceReference.from_string_reference(instance['trigger'])
trigger_db = TriggerDB(pack=trigger_ref.pack, name=trigger_ref.name, type=trigger_ref.ref)
return instance, trigger_db
| true | true |
790148dd4299989881099779cd6bebf759687808 | 133 | py | Python | basics/math.py | augustoscher/python-excercises | 502fb3c15597033ba19e32f871be12d347a9aa2a | [
"MIT"
] | null | null | null | basics/math.py | augustoscher/python-excercises | 502fb3c15597033ba19e32f871be12d347a9aa2a | [
"MIT"
] | null | null | null | basics/math.py | augustoscher/python-excercises | 502fb3c15597033ba19e32f871be12d347a9aa2a | [
"MIT"
] | null | null | null | print()
print("--- Math ---")
print(1+1)
print(1*3)
print(1/2)
print(3**2)
print(4%2)
print(4%2 == 0)
print(type(1))
print(type(1.0)) | 13.3 | 21 | 0.586466 | print()
print("--- Math ---")
print(1+1)
print(1*3)
print(1/2)
print(3**2)
print(4%2)
print(4%2 == 0)
print(type(1))
print(type(1.0)) | true | true |
7901498ed94e775640b57a61992c701e7ec1f47f | 2,606 | py | Python | forOutput.py | laughingclouds/dt-mst-project | cec68a4d5065544b2de11268ced82ec4d9cc7aca | [
"MIT"
] | 1 | 2021-12-23T13:40:57.000Z | 2021-12-23T13:40:57.000Z | forOutput.py | laughingclouds/dt-mst-project | cec68a4d5065544b2de11268ced82ec4d9cc7aca | [
"MIT"
] | null | null | null | forOutput.py | laughingclouds/dt-mst-project | cec68a4d5065544b2de11268ced82ec4d9cc7aca | [
"MIT"
] | null | null | null | """Python 3.9.5"""
import cv2
import HandTrackingModule as htm
def thumbIncrementCheck(lmList: list[list[int]]) -> int:
"""Checks whether your thumb is up or not.
No matter what hand you use.
returns 1 if thumb is up else 0"""
count = 0
t_x = lmList[4][1]
p_x = lmList[17][1]
if t_x > p_x: # If true: RIGHT hand
if lmList[4][1] >= lmList[2][1]:
count += 1
else: # ELse: LEFT hand
if lmList[4][1] <= lmList[2][1]:
count += 1
return count
def textOutput(count, cc) -> str:
"""Returns an appropriate text output depending on
`count` and `cc`."""
text = "NOTHING"
if (count, cc) == (2, 2):
text = "SCISSOR"
elif count == 0:
text = "ROCK"
elif count == 5:
text = "PAPER"
else:
pass
return text
def main():
# cap = cv2.VideoCapture(0) # opens the camera
detector = htm.HandDetector()
while True:
success, img = cv2.imread("/home/laughinglouds/Pictures/Webcam/2021-04-13-133250.jpg")
img = detector.findHands(img)
lmlist = detector.findPosition(img, draw=True)
# If a hand is not detected value will be 0
# else non-zero (21)
hand_exists = len(lmlist)
tipIDs = [4, 8, 12, 16, 20] # Represents fingertips
dipIDs = [2, 7, 11, 15, 19] # Represents landmarks below the tips
count = 0 # keeps count of how many fingers are up
cc = 0 # for later checking if `Scissor` or not
if hand_exists:
# Looping for the five fingers
for i in range(0, 5):
if i == 0:
count += thumbIncrementCheck(lmlist)
else:
# 8: Index finger
# 12: Middle finger
if (lmlist[tipIDs[i]][2] < lmlist[dipIDs[i]][2]) and (
tipIDs[i] in (8, 12) # if either index or middle
):
count += 1
cc += 1
elif lmlist[tipIDs[i]][2] < lmlist[dipIDs[i]][2]:
count += 1
# print(cc)
else:
count = -1
txt = textOutput(count, cc)
# (10, 140) is coordinate of txt on the screen
cv2.putText(img, str(txt), (10, 140), cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 255), 3)
cv2.imshow("Image", img)
# close key isn't working for me
# os: linux mint 20.1
if cv2.waitKey(1) & 0xFF == ord("q"):
break
if __name__ == "__main__":
main()
| 29.954023 | 94 | 0.506523 | import cv2
import HandTrackingModule as htm
def thumbIncrementCheck(lmList: list[list[int]]) -> int:
count = 0
t_x = lmList[4][1]
p_x = lmList[17][1]
if t_x > p_x:
if lmList[4][1] >= lmList[2][1]:
count += 1
else:
if lmList[4][1] <= lmList[2][1]:
count += 1
return count
def textOutput(count, cc) -> str:
text = "NOTHING"
if (count, cc) == (2, 2):
text = "SCISSOR"
elif count == 0:
text = "ROCK"
elif count == 5:
text = "PAPER"
else:
pass
return text
def main():
m.HandDetector()
while True:
success, img = cv2.imread("/home/laughinglouds/Pictures/Webcam/2021-04-13-133250.jpg")
img = detector.findHands(img)
lmlist = detector.findPosition(img, draw=True)
hand_exists = len(lmlist)
tipIDs = [4, 8, 12, 16, 20]
dipIDs = [2, 7, 11, 15, 19]
count = 0
cc = 0
if hand_exists:
for i in range(0, 5):
if i == 0:
count += thumbIncrementCheck(lmlist)
else:
if (lmlist[tipIDs[i]][2] < lmlist[dipIDs[i]][2]) and (
tipIDs[i] in (8, 12)
):
count += 1
cc += 1
elif lmlist[tipIDs[i]][2] < lmlist[dipIDs[i]][2]:
count += 1
else:
count = -1
txt = textOutput(count, cc)
cv2.putText(img, str(txt), (10, 140), cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 255), 3)
cv2.imshow("Image", img)
# os: linux mint 20.1
if cv2.waitKey(1) & 0xFF == ord("q"):
break
if __name__ == "__main__":
main()
| true | true |
79014af2e7225e0cb519cbbac0773a3b3e85bd74 | 821 | py | Python | bot/src/utils/player.py | SlenderCylinder/pomoji | 50a11b969faff28d55b0d6aef48a7f0f2888b66a | [
"MIT"
] | 1 | 2021-10-02T18:27:07.000Z | 2021-10-02T18:27:07.000Z | bot/src/utils/player.py | SlenderCylinder/pomoji | 50a11b969faff28d55b0d6aef48a7f0f2888b66a | [
"MIT"
] | null | null | null | bot/src/utils/player.py | SlenderCylinder/pomoji | 50a11b969faff28d55b0d6aef48a7f0f2888b66a | [
"MIT"
] | null | null | null | from asyncio import sleep
from discord import FFmpegPCMAudio, PCMVolumeTransformer
from configs import bot_enum
from ..session.Session import Session
async def alert(session: Session):
vc = session.ctx.voice_client
if not vc:
return
path = bot_enum.AlertPath.POMO_END
if session.state == bot_enum.State.COUNTDOWN:
pass
elif session.stats.pomos_completed % session.settings.intervals == 0:
path = bot_enum.AlertPath.LONG_BREAK_START
elif session.state != bot_enum.State.POMODORO:
path = bot_enum.AlertPath.POMO_START
source = PCMVolumeTransformer(FFmpegPCMAudio(path, executable='/usr/bin/ffmpeg'),
volume=0.1)
if vc.is_playing():
vc.stop()
vc.play(source)
while vc.is_playing():
await sleep(1)
| 29.321429 | 85 | 0.678441 | from asyncio import sleep
from discord import FFmpegPCMAudio, PCMVolumeTransformer
from configs import bot_enum
from ..session.Session import Session
async def alert(session: Session):
vc = session.ctx.voice_client
if not vc:
return
path = bot_enum.AlertPath.POMO_END
if session.state == bot_enum.State.COUNTDOWN:
pass
elif session.stats.pomos_completed % session.settings.intervals == 0:
path = bot_enum.AlertPath.LONG_BREAK_START
elif session.state != bot_enum.State.POMODORO:
path = bot_enum.AlertPath.POMO_START
source = PCMVolumeTransformer(FFmpegPCMAudio(path, executable='/usr/bin/ffmpeg'),
volume=0.1)
if vc.is_playing():
vc.stop()
vc.play(source)
while vc.is_playing():
await sleep(1)
| true | true |
79014fb49be25fcd4e3e5a7e64bdb05ad9edbbd2 | 7,622 | py | Python | tools/calibration-96tof1/tof_calib/regwrite_generator.py | AkshayKurhade/aditof_sdk | dd6146e7cb65b56d4ce61e4fd771e94ea4976a89 | [
"BSD-3-Clause"
] | 53 | 2019-09-09T21:17:45.000Z | 2022-01-29T13:43:50.000Z | tools/calibration-96tof1/tof_calib/regwrite_generator.py | AkshayKurhade/aditof_sdk | dd6146e7cb65b56d4ce61e4fd771e94ea4976a89 | [
"BSD-3-Clause"
] | 352 | 2019-05-13T16:22:27.000Z | 2022-03-28T08:44:59.000Z | tools/calibration-96tof1/tof_calib/regwrite_generator.py | AkshayKurhade/aditof_sdk | dd6146e7cb65b56d4ce61e4fd771e94ea4976a89 | [
"BSD-3-Clause"
] | 52 | 2019-05-17T08:09:53.000Z | 2022-03-08T06:54:48.000Z | #
# BSD 3-Clause License
#
# Copyright (c) 2019, Analog Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This class is used to generate delay register writes
import re
class regwrite_generator(object):
def __init__(self, seq_file):
self.code_dict = {}
self.data_dict = {}
self.seq_file = seq_file
def create_code_dict(self, text):
reg = re.compile(r'([0-9a-f]{4} [0-9a-f]{4})')
rawinfo = re.findall(reg, text)
for x in rawinfo:
s_line = re.split(r'\s', x)
addr = int(s_line[0],16)
data = int(s_line[2],16)
self.code_dict[addr] = data
return self.code_dict
def create_seq_info(self):
data_name = ['PulseCount', 'LD1_Tap', 'LD2_Tap', 'LD3_Tap', 'LD4_Tap', 'LD5_Tap', 'Pos_Off', 'Vec_Off', 'Start_Loc', 'Tbl_Len']
reg = re.compile(r'([0-9a-zA-Z]+)')
myfile = open(self.seq_file, 'r')
for line in myfile:
rawInfo = re.findall(reg, line)
if len(rawInfo) == 1:
currLabel = rawInfo[0]
if len(rawInfo) == 4:
curr_mode = rawInfo[1]
curr_seq = rawInfo[3]
i = 0
if curr_mode in self.data_dict:
self.data_dict[curr_mode][curr_seq] = {}
else:
self.data_dict[curr_mode] = {}
self.data_dict[curr_mode][curr_seq] = {}
for i in range(10):
rawInfo = re.findall(reg, myfile.readline())
self.data_dict[curr_mode][curr_seq][data_name[i]] = [int(rawInfo[0], 16), int(rawInfo[1], 16)]
myfile.close()
return self.data_dict
# Given mode, sweep specified ld for all sequences
def delay_sequences(self, mode, delay, ld):
delay_writes = {}
for x in self.data_dict[str(mode)]:
writes = self.delay_sequence_ld(delay, ld, self.data_dict[str(mode)][x])
delay_writes = dict(delay_writes, **writes)
return delay_writes
def generate_delay_writes(self, mode, delay_min, delay_max, ld):
writes_dict = {}
for x in range(delay_min, delay_max):
writes_dict[x] = self.delay_sequences(mode, x, ld)
return writes_dict
def setbit(self, bit, vec):
bit = 1 << bit
vec = vec | bit
return vec
def unsetbit(self, bit, vec):
bit = 1 << bit
bit = ~bit
vec = vec & bit
return vec
def get_blanking_values(self, ld, seq_dict):
pos_len = seq_dict['Tbl_Len'][1] & 0x00ff
vec_len = (seq_dict['Tbl_Len'][1] & 0xff00) >> 8
if pos_len != vec_len:
print('Table length not equal')
start_loc = seq_dict['Start_Loc'][1]
pos_len = seq_dict['Tbl_Len'][1] & 0x00ff
vec_len = (seq_dict['Tbl_Len'][1] & 0xff00) >> 8
pos_ptr = (seq_dict['Pos_Off'][1] * 2) + 0x4000
vec_ptr = (seq_dict['Vec_Off'][1] * 2) + 0x4000
blk_pos = -1
blk_neg = -1
for i in range(vec_len):
curr_vec = self.code_dict[vec_ptr + i]
if ((curr_vec >> (ld - 1)) & 0x0001) == 1:
if blk_pos == -1:
blk_pos = i
elif blk_neg == -1:
blk_neg = i
start_pos = start_loc + 2
pos_tbl = []
for i in range(pos_len):
if i == 0:
pos_tbl.append(self.code_dict[pos_ptr+i] + start_pos)
else:
pos_tbl.append(self.code_dict[pos_ptr+i] + pos_tbl[i-1])
blk_pos = pos_tbl[blk_pos]
blk_neg = pos_tbl[blk_neg]
return blk_pos, blk_neg
# Delay Sequence LD
def delay_sequence_ld(self, delay, ld, seq_dict):
taps = seq_dict['LD' + str(ld) + '_Tap'][1]
taps_addr = seq_dict['LD' + str(ld) + '_Tap'][0]
tap_pos = taps & 0x00ff
tap_neg = (taps & 0xff00) >> 8
blk_pos, blk_neg = self.get_blanking_values(ld, seq_dict)
blk_pos_shift = 0
blk_neg_shift = 0
tap_pos = tap_pos + delay
tap_neg = tap_neg + delay
while tap_pos >= 128:
blk_pos_shift += 1
tap_pos -= 128
while tap_neg >= 128:
blk_neg_shift += 1
tap_neg -= 128
while tap_pos < 0:
blk_pos_shift -= 1
tap_pos += 128
while tap_neg < 0:
blk_neg_shift -= 1
tap_neg += 128
blk_pos = blk_pos + blk_pos_shift
blk_neg = blk_neg + blk_neg_shift
tap_write = {}
tap_write[hex(taps_addr)] = (tap_neg << 8) + tap_pos
blk_writes = self.set_blanking_values(blk_pos, blk_neg, ld, seq_dict)
writes = dict(tap_write, **blk_writes)
return writes
# Set blanking vals
def set_blanking_values(self, blk_pos, blk_neg, ld, seq_dict):
start_loc = seq_dict['Start_Loc'][1]
pos_len = seq_dict['Tbl_Len'][1] & 0x00ff
vec_len = (seq_dict['Tbl_Len'][1] & 0xff00) >> 8
pos_ptr = (seq_dict['Pos_Off'][1] * 2) + 0x4000
vec_ptr = (seq_dict['Vec_Off'][1] * 2) + 0x4000
start_pos = start_loc + 2
pos_tbl = []
for i in range(pos_len):
if i == 0:
pos_tbl.append(self.code_dict[pos_ptr+i] + start_pos)
else:
pos_tbl.append(self.code_dict[pos_ptr+i] + pos_tbl[i-1])
blk_pos_loc = pos_tbl.index(blk_pos)
blk_neg_loc = pos_tbl.index(blk_neg)
blk_writes = {}
for i in range(vec_len):
if i == blk_pos_loc:
curr_vec = self.setbit(ld-1, self.code_dict[vec_ptr + i])
elif i == blk_neg_loc:
curr_vec = self.setbit(ld-1, self.code_dict[vec_ptr + i])
else:
curr_vec = self.unsetbit(ld-1, self.code_dict[vec_ptr + i])
blk_writes[hex(vec_ptr + i)] = curr_vec
return blk_writes
| 34.963303 | 135 | 0.575571 |
import re
class regwrite_generator(object):
def __init__(self, seq_file):
self.code_dict = {}
self.data_dict = {}
self.seq_file = seq_file
def create_code_dict(self, text):
reg = re.compile(r'([0-9a-f]{4} [0-9a-f]{4})')
rawinfo = re.findall(reg, text)
for x in rawinfo:
s_line = re.split(r'\s', x)
addr = int(s_line[0],16)
data = int(s_line[2],16)
self.code_dict[addr] = data
return self.code_dict
def create_seq_info(self):
data_name = ['PulseCount', 'LD1_Tap', 'LD2_Tap', 'LD3_Tap', 'LD4_Tap', 'LD5_Tap', 'Pos_Off', 'Vec_Off', 'Start_Loc', 'Tbl_Len']
reg = re.compile(r'([0-9a-zA-Z]+)')
myfile = open(self.seq_file, 'r')
for line in myfile:
rawInfo = re.findall(reg, line)
if len(rawInfo) == 1:
currLabel = rawInfo[0]
if len(rawInfo) == 4:
curr_mode = rawInfo[1]
curr_seq = rawInfo[3]
i = 0
if curr_mode in self.data_dict:
self.data_dict[curr_mode][curr_seq] = {}
else:
self.data_dict[curr_mode] = {}
self.data_dict[curr_mode][curr_seq] = {}
for i in range(10):
rawInfo = re.findall(reg, myfile.readline())
self.data_dict[curr_mode][curr_seq][data_name[i]] = [int(rawInfo[0], 16), int(rawInfo[1], 16)]
myfile.close()
return self.data_dict
def delay_sequences(self, mode, delay, ld):
delay_writes = {}
for x in self.data_dict[str(mode)]:
writes = self.delay_sequence_ld(delay, ld, self.data_dict[str(mode)][x])
delay_writes = dict(delay_writes, **writes)
return delay_writes
def generate_delay_writes(self, mode, delay_min, delay_max, ld):
writes_dict = {}
for x in range(delay_min, delay_max):
writes_dict[x] = self.delay_sequences(mode, x, ld)
return writes_dict
def setbit(self, bit, vec):
bit = 1 << bit
vec = vec | bit
return vec
def unsetbit(self, bit, vec):
bit = 1 << bit
bit = ~bit
vec = vec & bit
return vec
def get_blanking_values(self, ld, seq_dict):
pos_len = seq_dict['Tbl_Len'][1] & 0x00ff
vec_len = (seq_dict['Tbl_Len'][1] & 0xff00) >> 8
if pos_len != vec_len:
print('Table length not equal')
start_loc = seq_dict['Start_Loc'][1]
pos_len = seq_dict['Tbl_Len'][1] & 0x00ff
vec_len = (seq_dict['Tbl_Len'][1] & 0xff00) >> 8
pos_ptr = (seq_dict['Pos_Off'][1] * 2) + 0x4000
vec_ptr = (seq_dict['Vec_Off'][1] * 2) + 0x4000
blk_pos = -1
blk_neg = -1
for i in range(vec_len):
curr_vec = self.code_dict[vec_ptr + i]
if ((curr_vec >> (ld - 1)) & 0x0001) == 1:
if blk_pos == -1:
blk_pos = i
elif blk_neg == -1:
blk_neg = i
start_pos = start_loc + 2
pos_tbl = []
for i in range(pos_len):
if i == 0:
pos_tbl.append(self.code_dict[pos_ptr+i] + start_pos)
else:
pos_tbl.append(self.code_dict[pos_ptr+i] + pos_tbl[i-1])
blk_pos = pos_tbl[blk_pos]
blk_neg = pos_tbl[blk_neg]
return blk_pos, blk_neg
def delay_sequence_ld(self, delay, ld, seq_dict):
taps = seq_dict['LD' + str(ld) + '_Tap'][1]
taps_addr = seq_dict['LD' + str(ld) + '_Tap'][0]
tap_pos = taps & 0x00ff
tap_neg = (taps & 0xff00) >> 8
blk_pos, blk_neg = self.get_blanking_values(ld, seq_dict)
blk_pos_shift = 0
blk_neg_shift = 0
tap_pos = tap_pos + delay
tap_neg = tap_neg + delay
while tap_pos >= 128:
blk_pos_shift += 1
tap_pos -= 128
while tap_neg >= 128:
blk_neg_shift += 1
tap_neg -= 128
while tap_pos < 0:
blk_pos_shift -= 1
tap_pos += 128
while tap_neg < 0:
blk_neg_shift -= 1
tap_neg += 128
blk_pos = blk_pos + blk_pos_shift
blk_neg = blk_neg + blk_neg_shift
tap_write = {}
tap_write[hex(taps_addr)] = (tap_neg << 8) + tap_pos
blk_writes = self.set_blanking_values(blk_pos, blk_neg, ld, seq_dict)
writes = dict(tap_write, **blk_writes)
return writes
def set_blanking_values(self, blk_pos, blk_neg, ld, seq_dict):
start_loc = seq_dict['Start_Loc'][1]
pos_len = seq_dict['Tbl_Len'][1] & 0x00ff
vec_len = (seq_dict['Tbl_Len'][1] & 0xff00) >> 8
pos_ptr = (seq_dict['Pos_Off'][1] * 2) + 0x4000
vec_ptr = (seq_dict['Vec_Off'][1] * 2) + 0x4000
start_pos = start_loc + 2
pos_tbl = []
for i in range(pos_len):
if i == 0:
pos_tbl.append(self.code_dict[pos_ptr+i] + start_pos)
else:
pos_tbl.append(self.code_dict[pos_ptr+i] + pos_tbl[i-1])
blk_pos_loc = pos_tbl.index(blk_pos)
blk_neg_loc = pos_tbl.index(blk_neg)
blk_writes = {}
for i in range(vec_len):
if i == blk_pos_loc:
curr_vec = self.setbit(ld-1, self.code_dict[vec_ptr + i])
elif i == blk_neg_loc:
curr_vec = self.setbit(ld-1, self.code_dict[vec_ptr + i])
else:
curr_vec = self.unsetbit(ld-1, self.code_dict[vec_ptr + i])
blk_writes[hex(vec_ptr + i)] = curr_vec
return blk_writes
| true | true |
79014fb79c69546571e7d06f362eb01471029ddd | 5,941 | py | Python | treedb/backend/models.py | glottolog/treedb | 4aa735632d6add5c81cc1d7be42833446e2a447a | [
"MIT"
] | 4 | 2019-07-13T14:39:01.000Z | 2021-04-17T13:38:47.000Z | treedb/backend/models.py | glottolog/treedb | 4aa735632d6add5c81cc1d7be42833446e2a447a | [
"MIT"
] | 1 | 2020-12-02T12:02:47.000Z | 2020-12-02T15:05:25.000Z | treedb/backend/models.py | glottolog/treedb | 4aa735632d6add5c81cc1d7be42833446e2a447a | [
"MIT"
] | 2 | 2020-04-11T19:46:48.000Z | 2020-04-13T19:40:24.000Z | """Dataset, producer, and config metadata."""
import logging
import warnings
import sqlalchemy as sa
from .._globals import REGISTRY as registry
from .. import _tools
from .. import backend as _backend
__all__ = ['Dataset', 'Producer', 'Config']
log = logging.getLogger(__name__)
@registry.mapped
class Dataset:
"""Git commit loaded into the database."""
__tablename__ = '__dataset__'
id = sa.Column(sa.Integer, sa.CheckConstraint('id = 1'), primary_key=True)
title = sa.Column(sa.Text, sa.CheckConstraint("title != ''"), nullable=False)
git_commit = sa.Column(sa.String(40), sa.CheckConstraint('length(git_commit) = 40'),
nullable=False, unique=True)
git_describe = sa.Column(sa.Text, sa.CheckConstraint("git_describe != ''"),
nullable=False, unique=True)
clean = sa.Column(sa.Boolean(create_constraint=True), nullable=False)
version = sa.Column(sa.Text, sa.CheckConstraint("version != ''"))
exclude_raw = sa.Column(sa.Boolean(create_constraint=True), nullable=False)
@classmethod
def get_dataset(cls, *, bind, strict, fallback=None):
table = cls.__tablename__
log.debug('read %r from %r', table, bind)
try:
result, = _backend.iterrows(sa.select(cls), mappings=True, bind=bind)
except sa.exc.OperationalError as e:
if 'no such table' in e.orig.args[0]:
pass
else:
log.exception('error selecting %r', table)
if strict: # pragma: no cover
raise RuntimeError('failed to select %r from %r', table, bind) from e
return fallback
except ValueError as e:
log.exception('error selecting %r', table)
if 'not enough values to unpack' in e.args[0] and not strict:
return fallback
else: # pragma: no cover
raise RuntimeError('failed to select %r from %r', table, bind) from e
except Exception as e: # pragma: no cover
log.exception('error selecting %r', table)
raise RuntimeError('failed to select %r from %r', table, bind) from e
else:
return result
@classmethod
def log_dataset(cls, params, *,
ignore_dirty: bool = False,
also_print: bool = False, print_file=None):
name = cls.__tablename__
log.info('git describe %(git_describe)r clean: %(clean)r', params)
log.debug('%s.title: %r', name, params['title'])
log.info('%s.git_commit: %r', name, params['git_commit'])
if 'version' in params:
log.info('%s.version: %r', name, params['version'])
log.debug('%s.exclude_raw: %r', name, params['exclude_raw'])
if also_print or print_file is not None:
print('git describe {git_describe!r}'
' clean: {clean!r}'.format_map(params),
file=print_file)
print(f"{name}.title: {params['title']!r}'",
file=print_file)
print(f"{name}.git_commit: {params['git_commit']!r}",
file=print_file)
if 'version' in params:
print(f"{name}.version: {params['version']!r}",
file=print_file)
print(f"{name}.exclude_raw: {params['exclude_raw']!r}",
file=print_file)
if not params['clean'] and not ignore_dirty:
warnings.warn(f'{name} not clean,'
' pass ignore_dirty=True to disable') # pragma: no cover
@registry.mapped
class Producer:
"""Name and version of the package that created a __dataset__."""
__tablename__ = '__producer__'
id = sa.Column(sa.Integer, sa.CheckConstraint('id = 1'), primary_key=True)
name = sa.Column(sa.Text, sa.CheckConstraint("name != ''"),
unique=True, nullable=False)
version = sa.Column(sa.Text, sa.CheckConstraint("version != ''"),
nullable=False)
@classmethod
def get_producer(cls, *, bind):
result, = _backend.iterrows(sa.select(cls), mappings=True, bind=bind)
return result
@classmethod
def log_producer(cls, params, *, also_print=False, print_file=None):
name = cls.__tablename__
log.info('%s.name: %s', name, params['name'])
log.info('%s.version: %s', name, params['version'])
if also_print or print_file is not None:
print(f"{name}.name: {params['name']}", file=print_file)
print(f"{name}.version: {params['version']}", file=print_file)
@registry.mapped
class Config:
"""Configuration setting from ``glottolog/config/*.ini``."""
__tablename__ = '_config'
filename = sa.Column(sa.String, sa.CheckConstraint("filename != ''"),
primary_key=True)
section = sa.Column(sa.String, sa.CheckConstraint("section != ''"),
primary_key=True)
option = sa.Column(sa.String, sa.CheckConstraint("option != ''"),
primary_key=True)
value = sa.Column(sa.Text, sa.CheckConstraint("value != ''"),
nullable=False)
line = sa.Column(sa.Integer, sa.CheckConstraint('line > 0'),
nullable=False)
__table_args__ = (sa.UniqueConstraint(filename, line),
{'info': {'without_rowid': True}})
@classmethod
def load(cls, filename: str, *, bind,
_groupby_section=_tools.groupby_itemgetter(0)):
select_values = (sa.select(Config.section, Config.option, Config.value)
.filter_by(filename=filename)
.order_by('section', 'option'))
result = _backend.iterrows(select_values, bind=bind)
return {section: {option: value for _, option, value in grp}
for section, grp in _groupby_section(result)}
| 38.083333 | 89 | 0.586265 |
import logging
import warnings
import sqlalchemy as sa
from .._globals import REGISTRY as registry
from .. import _tools
from .. import backend as _backend
__all__ = ['Dataset', 'Producer', 'Config']
log = logging.getLogger(__name__)
@registry.mapped
class Dataset:
__tablename__ = '__dataset__'
id = sa.Column(sa.Integer, sa.CheckConstraint('id = 1'), primary_key=True)
title = sa.Column(sa.Text, sa.CheckConstraint("title != ''"), nullable=False)
git_commit = sa.Column(sa.String(40), sa.CheckConstraint('length(git_commit) = 40'),
nullable=False, unique=True)
git_describe = sa.Column(sa.Text, sa.CheckConstraint("git_describe != ''"),
nullable=False, unique=True)
clean = sa.Column(sa.Boolean(create_constraint=True), nullable=False)
version = sa.Column(sa.Text, sa.CheckConstraint("version != ''"))
exclude_raw = sa.Column(sa.Boolean(create_constraint=True), nullable=False)
@classmethod
def get_dataset(cls, *, bind, strict, fallback=None):
table = cls.__tablename__
log.debug('read %r from %r', table, bind)
try:
result, = _backend.iterrows(sa.select(cls), mappings=True, bind=bind)
except sa.exc.OperationalError as e:
if 'no such table' in e.orig.args[0]:
pass
else:
log.exception('error selecting %r', table)
if strict:
raise RuntimeError('failed to select %r from %r', table, bind) from e
return fallback
except ValueError as e:
log.exception('error selecting %r', table)
if 'not enough values to unpack' in e.args[0] and not strict:
return fallback
else:
raise RuntimeError('failed to select %r from %r', table, bind) from e
except Exception as e:
log.exception('error selecting %r', table)
raise RuntimeError('failed to select %r from %r', table, bind) from e
else:
return result
@classmethod
def log_dataset(cls, params, *,
ignore_dirty: bool = False,
also_print: bool = False, print_file=None):
name = cls.__tablename__
log.info('git describe %(git_describe)r clean: %(clean)r', params)
log.debug('%s.title: %r', name, params['title'])
log.info('%s.git_commit: %r', name, params['git_commit'])
if 'version' in params:
log.info('%s.version: %r', name, params['version'])
log.debug('%s.exclude_raw: %r', name, params['exclude_raw'])
if also_print or print_file is not None:
print('git describe {git_describe!r}'
' clean: {clean!r}'.format_map(params),
file=print_file)
print(f"{name}.title: {params['title']!r}'",
file=print_file)
print(f"{name}.git_commit: {params['git_commit']!r}",
file=print_file)
if 'version' in params:
print(f"{name}.version: {params['version']!r}",
file=print_file)
print(f"{name}.exclude_raw: {params['exclude_raw']!r}",
file=print_file)
if not params['clean'] and not ignore_dirty:
warnings.warn(f'{name} not clean,'
' pass ignore_dirty=True to disable') # pragma: no cover
@registry.mapped
class Producer:
__tablename__ = '__producer__'
id = sa.Column(sa.Integer, sa.CheckConstraint('id = 1'), primary_key=True)
name = sa.Column(sa.Text, sa.CheckConstraint("name != ''"),
unique=True, nullable=False)
version = sa.Column(sa.Text, sa.CheckConstraint("version != ''"),
nullable=False)
@classmethod
def get_producer(cls, *, bind):
result, = _backend.iterrows(sa.select(cls), mappings=True, bind=bind)
return result
@classmethod
def log_producer(cls, params, *, also_print=False, print_file=None):
name = cls.__tablename__
log.info('%s.name: %s', name, params['name'])
log.info('%s.version: %s', name, params['version'])
if also_print or print_file is not None:
print(f"{name}.name: {params['name']}", file=print_file)
print(f"{name}.version: {params['version']}", file=print_file)
@registry.mapped
class Config:
__tablename__ = '_config'
filename = sa.Column(sa.String, sa.CheckConstraint("filename != ''"),
primary_key=True)
section = sa.Column(sa.String, sa.CheckConstraint("section != ''"),
primary_key=True)
option = sa.Column(sa.String, sa.CheckConstraint("option != ''"),
primary_key=True)
value = sa.Column(sa.Text, sa.CheckConstraint("value != ''"),
nullable=False)
line = sa.Column(sa.Integer, sa.CheckConstraint('line > 0'),
nullable=False)
__table_args__ = (sa.UniqueConstraint(filename, line),
{'info': {'without_rowid': True}})
@classmethod
def load(cls, filename: str, *, bind,
_groupby_section=_tools.groupby_itemgetter(0)):
select_values = (sa.select(Config.section, Config.option, Config.value)
.filter_by(filename=filename)
.order_by('section', 'option'))
result = _backend.iterrows(select_values, bind=bind)
return {section: {option: value for _, option, value in grp}
for section, grp in _groupby_section(result)}
| true | true |
7901512d30d8703abdf0b24e4949e10e4f4a3655 | 1,443 | py | Python | zappa_boilerplate/user/forms.py | 402900550b/dtnewman2 | f0113c2835741185b9a71003d6340f97f5d6cc99 | [
"MIT"
] | 17 | 2017-01-30T03:22:47.000Z | 2019-07-25T15:15:46.000Z | zappa_boilerplate/user/forms.py | 402900550b/dtnewman2 | f0113c2835741185b9a71003d6340f97f5d6cc99 | [
"MIT"
] | 2 | 2017-09-16T01:01:45.000Z | 2020-10-28T18:13:18.000Z | zappa_boilerplate/user/forms.py | 402900550b/dtnewman2 | f0113c2835741185b9a71003d6340f97f5d6cc99 | [
"MIT"
] | 1 | 2017-09-29T05:04:24.000Z | 2017-09-29T05:04:24.000Z | # -*- coding: utf-8 -*-
from zappa_boilerplate.database import db_session
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email, EqualTo, Length
from .models import User
class RegisterForm(Form):
username = StringField('Username',
validators=[DataRequired(), Length(min=3, max=25)])
email = StringField('Email',
validators=[DataRequired(), Email(), Length(min=6, max=40)])
password = PasswordField('Password',
validators=[DataRequired(), Length(min=6, max=40)])
confirm = PasswordField('Verify password',
[DataRequired(), EqualTo('password', message='Passwords must match')])
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
initial_validation = super(RegisterForm, self).validate()
if not initial_validation:
return False
user = db_session.query(User).filter_by(username=self.username.data).first()
if user:
self.username.errors.append("Username already registered")
return False
user = User.query.filter_by(email=self.email.data).first()
if user:
self.email.errors.append("Email already registered")
return False
return True | 40.083333 | 98 | 0.630631 |
from zappa_boilerplate.database import db_session
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email, EqualTo, Length
from .models import User
class RegisterForm(Form):
username = StringField('Username',
validators=[DataRequired(), Length(min=3, max=25)])
email = StringField('Email',
validators=[DataRequired(), Email(), Length(min=6, max=40)])
password = PasswordField('Password',
validators=[DataRequired(), Length(min=6, max=40)])
confirm = PasswordField('Verify password',
[DataRequired(), EqualTo('password', message='Passwords must match')])
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
initial_validation = super(RegisterForm, self).validate()
if not initial_validation:
return False
user = db_session.query(User).filter_by(username=self.username.data).first()
if user:
self.username.errors.append("Username already registered")
return False
user = User.query.filter_by(email=self.email.data).first()
if user:
self.email.errors.append("Email already registered")
return False
return True | true | true |
790151e176bfd6fcb23347679865ca39841044c6 | 24 | py | Python | data/studio21_generated/introductory/3210/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | data/studio21_generated/introductory/3210/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | data/studio21_generated/introductory/3210/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | def get_strings(city):
| 12 | 22 | 0.75 | def get_strings(city):
| false | true |
790151f0296ff43bb49f8d7c43dbacbf5f057c04 | 652 | py | Python | client/permissions.py | My-Garage/resourceideaapi | b872a6f15277989870572ba6e523c9dc378b7a24 | [
"MIT"
] | 1 | 2021-01-20T14:40:06.000Z | 2021-01-20T14:40:06.000Z | client/permissions.py | My-Garage/resourceideaapi | b872a6f15277989870572ba6e523c9dc378b7a24 | [
"MIT"
] | null | null | null | client/permissions.py | My-Garage/resourceideaapi | b872a6f15277989870572ba6e523c9dc378b7a24 | [
"MIT"
] | null | null | null | """Permissions for the client app"""
from rest_framework import permissions
class ClientPermissions(permissions.BasePermission):
"""Handles authorization of requests to the client app."""
def has_permission(self, request, view):
if view.action == 'create' \
and request.user.has_perm('client.add_client'):
return True
if view.action in ['update', 'partial_update'] \
and request.user.has_perm('client.change_client'):
return True
if view.action in ['list', 'retrieve'] \
and request.user.has_perm('client.view_client'):
return True
| 34.315789 | 66 | 0.628834 | from rest_framework import permissions
class ClientPermissions(permissions.BasePermission):
def has_permission(self, request, view):
if view.action == 'create' \
and request.user.has_perm('client.add_client'):
return True
if view.action in ['update', 'partial_update'] \
and request.user.has_perm('client.change_client'):
return True
if view.action in ['list', 'retrieve'] \
and request.user.has_perm('client.view_client'):
return True
| true | true |
7901527760d3dc547db45060c8ee8e9110e8f7a8 | 2,648 | py | Python | server/home.py | poke19962008/Source-Code-Classifier | a2ef331ee3d3f705588bad9c91397f9b2b46d12d | [
"MIT",
"Unlicense"
] | 1 | 2016-11-06T07:39:15.000Z | 2016-11-06T07:39:15.000Z | server/home.py | poke19962008/Source-Code-Classifier | a2ef331ee3d3f705588bad9c91397f9b2b46d12d | [
"MIT",
"Unlicense"
] | null | null | null | server/home.py | poke19962008/Source-Code-Classifier | a2ef331ee3d3f705588bad9c91397f9b2b46d12d | [
"MIT",
"Unlicense"
] | null | null | null | import time
from check_lang import check_py,check_rb,check_j,check_c,check_cpp
from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
import subprocess
import json
from json import JSONEncoder
from main import predict
app = Flask(__name__)
CORS(app)
@app.route("/")
def hello():
return '<form action="/check" method="POST"><input name="code" size="135"><input type="submit" value="Code Here"></form>'
@app.route("/check", methods=['POST'])
def echo():
codes = []
filename = str(int(time.time()))
dataDict = json.loads(request.data)
# print dataDict
# print "------------"
with open('code/'+filename,'w+') as outfile:
outfile.write(str(dataDict['sc']))
codes.append(int(check_c("code/"+filename)))
codes.append(int(check_cpp("code/"+filename)))
codes.append(int(check_py("code/"+filename)))
codes.append(int(check_rb("code/"+filename)))
codes.append(1)
print codes
zero = 0
count = 0
correct_count = 0
for code in codes:
count = count+1
if code==0:
zero = zero + 1
correct_count = count
print zero
if(zero == 1):
if(correct_count==1):
jsonString = {'cpp': 0.0, 'ruby': 0.0, 'c': 1.0, 'py': 0.0, 'java': 0.0}
return jsonify(jsonString)
elif(correct_count==2):
jsonString = {'cpp': 1.0, 'ruby': 0.0, 'c': 0.0, 'py': 0.0, 'java': 0.0}
return jsonify(jsonString)
elif(correct_count==3):
jsonString = {'cpp': 0.0, 'ruby': 0.0, 'c': 0.0, 'py': 1.0, 'java': 0.0}
return jsonify(jsonString)
elif(correct_count==4):
jsonString = {'cpp': 0.0, 'ruby': 1.0, 'c': 0.0, 'py': 0.0, 'java': 0.0}
return jsonify(jsonString)
else:
x = predict(dataDict['sc'])
print x
# return JSONEncoder().encode(x)
return jsonify({'cpp': round(x['cpp'], 2), 'ruby': round(x['ruby'], 2), 'c': round(x['c'], 2), 'py': round(x['py'], 2), 'java': round(x['java'], 2)})
#if score of cpp is eqgreater than 0.5 then run it to check if it runs then cpp else java
# sa = []
# score_cpp = x['cpp']
# score_ruby = x['ruby']
# score_c = x['c']
# score_py = x['py']
# score_java = x['java']
#
# sa.append(score_c)
# sa.append(score_cpp)
# sa.append(score_java)
# sa.append(score_py)
# sa.append(score_ruby)
#
# print sa
# return ''.join([str(code) for code in codes])+" "+str(x)
if __name__ == "__main__":
app.run(host= '0.0.0.0')
| 28.473118 | 157 | 0.554003 | import time
from check_lang import check_py,check_rb,check_j,check_c,check_cpp
from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
import subprocess
import json
from json import JSONEncoder
from main import predict
app = Flask(__name__)
CORS(app)
@app.route("/")
def hello():
return '<form action="/check" method="POST"><input name="code" size="135"><input type="submit" value="Code Here"></form>'
@app.route("/check", methods=['POST'])
def echo():
codes = []
filename = str(int(time.time()))
dataDict = json.loads(request.data)
with open('code/'+filename,'w+') as outfile:
outfile.write(str(dataDict['sc']))
codes.append(int(check_c("code/"+filename)))
codes.append(int(check_cpp("code/"+filename)))
codes.append(int(check_py("code/"+filename)))
codes.append(int(check_rb("code/"+filename)))
codes.append(1)
print codes
zero = 0
count = 0
correct_count = 0
for code in codes:
count = count+1
if code==0:
zero = zero + 1
correct_count = count
print zero
if(zero == 1):
if(correct_count==1):
jsonString = {'cpp': 0.0, 'ruby': 0.0, 'c': 1.0, 'py': 0.0, 'java': 0.0}
return jsonify(jsonString)
elif(correct_count==2):
jsonString = {'cpp': 1.0, 'ruby': 0.0, 'c': 0.0, 'py': 0.0, 'java': 0.0}
return jsonify(jsonString)
elif(correct_count==3):
jsonString = {'cpp': 0.0, 'ruby': 0.0, 'c': 0.0, 'py': 1.0, 'java': 0.0}
return jsonify(jsonString)
elif(correct_count==4):
jsonString = {'cpp': 0.0, 'ruby': 1.0, 'c': 0.0, 'py': 0.0, 'java': 0.0}
return jsonify(jsonString)
else:
x = predict(dataDict['sc'])
print x
return jsonify({'cpp': round(x['cpp'], 2), 'ruby': round(x['ruby'], 2), 'c': round(x['c'], 2), 'py': round(x['py'], 2), 'java': round(x['java'], 2)})
if __name__ == "__main__":
app.run(host= '0.0.0.0')
| false | true |
790152d0f649115c5bffe92159f13e26fa8f9fc0 | 1,605 | py | Python | scalyr_agent/third_party/pysnmp/carrier/asyncore/dispatch.py | code-sauce/scalyr-agent-2 | 41023d5c1272186193dd02900782b150dda5f38e | [
"Apache-2.0"
] | 67 | 2015-02-03T00:35:33.000Z | 2022-03-23T10:14:26.000Z | scalyr_agent/third_party/pysnmp/carrier/asyncore/dispatch.py | code-sauce/scalyr-agent-2 | 41023d5c1272186193dd02900782b150dda5f38e | [
"Apache-2.0"
] | 578 | 2015-04-09T08:58:56.000Z | 2022-03-30T12:13:21.000Z | scalyr_agent/third_party/pysnmp/carrier/asyncore/dispatch.py | code-sauce/scalyr-agent-2 | 41023d5c1272186193dd02900782b150dda5f38e | [
"Apache-2.0"
] | 58 | 2015-01-15T22:00:43.000Z | 2022-02-18T15:48:31.000Z | # Implements I/O over asynchronous sockets
from time import time
from sys import exc_info
from traceback import format_exception
from asyncore import socket_map
from asyncore import loop
from pysnmp.carrier.base import AbstractTransportDispatcher
from pysnmp.error import PySnmpError
class AsyncoreDispatcher(AbstractTransportDispatcher):
def __init__(self):
self.__sockMap = {} # use own map for MT safety
self.timeout = 0.5
AbstractTransportDispatcher.__init__(self)
def getSocketMap(self): return self.__sockMap
def setSocketMap(self, sockMap=socket_map): self.__sockMap = sockMap
def registerTransport(self, tDomain, t):
AbstractTransportDispatcher.registerTransport(self, tDomain, t)
t.registerSocket(self.__sockMap)
def unregisterTransport(self, tDomain):
self.getTransport(tDomain).unregisterSocket(self.__sockMap)
AbstractTransportDispatcher.unregisterTransport(self, tDomain)
def transportsAreWorking(self):
for transport in self.__sockMap.values():
if transport.writable():
return 1
return 0
def runDispatcher(self, timeout=0.0):
while self.jobsArePending() or self.transportsAreWorking():
try:
loop(timeout and timeout or self.timeout,
use_poll=True, map=self.__sockMap, count=1)
except KeyboardInterrupt:
raise
except:
raise PySnmpError('poll error: %s' % ';'.join(format_exception(*exc_info())))
self.handleTimerTick(time())
| 37.325581 | 93 | 0.685358 |
from time import time
from sys import exc_info
from traceback import format_exception
from asyncore import socket_map
from asyncore import loop
from pysnmp.carrier.base import AbstractTransportDispatcher
from pysnmp.error import PySnmpError
class AsyncoreDispatcher(AbstractTransportDispatcher):
def __init__(self):
self.__sockMap = {}
self.timeout = 0.5
AbstractTransportDispatcher.__init__(self)
def getSocketMap(self): return self.__sockMap
def setSocketMap(self, sockMap=socket_map): self.__sockMap = sockMap
def registerTransport(self, tDomain, t):
AbstractTransportDispatcher.registerTransport(self, tDomain, t)
t.registerSocket(self.__sockMap)
def unregisterTransport(self, tDomain):
self.getTransport(tDomain).unregisterSocket(self.__sockMap)
AbstractTransportDispatcher.unregisterTransport(self, tDomain)
def transportsAreWorking(self):
for transport in self.__sockMap.values():
if transport.writable():
return 1
return 0
def runDispatcher(self, timeout=0.0):
while self.jobsArePending() or self.transportsAreWorking():
try:
loop(timeout and timeout or self.timeout,
use_poll=True, map=self.__sockMap, count=1)
except KeyboardInterrupt:
raise
except:
raise PySnmpError('poll error: %s' % ';'.join(format_exception(*exc_info())))
self.handleTimerTick(time())
| true | true |
7901540d8570ef9a1bfd9dec9917f2c247c1890c | 3,382 | py | Python | app/user/tests/test_user_api.py | reallyusefulengine/django_rest_recipe | 49943404bfe4b6a84c3b4f6d8332f952982b3281 | [
"MIT"
] | null | null | null | app/user/tests/test_user_api.py | reallyusefulengine/django_rest_recipe | 49943404bfe4b6a84c3b4f6d8332f952982b3281 | [
"MIT"
] | null | null | null | app/user/tests/test_user_api.py | reallyusefulengine/django_rest_recipe | 49943404bfe4b6a84c3b4f6d8332f952982b3281 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""The the user API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_successful(self):
"""Test creating user with valid payload is successful"""
payload = {
'email': 'john@doe.com',
'password': 'testpass',
'name': 'John Doe'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
payload = {
'email': 'john@doe.com',
'password': 'testpass',
"name": 'John Doe'
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""tests that the password must be more than 5 characters"""
payload = {
'email': 'john@doe.com',
'password': 'pass',
"name": 'John Doe'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for a user"""
payload = {'email': 'test@django.io', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertTrue(res.status_code, status.HTTP_200_OK)
self.assertIn('token', res.data)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='test@django.com', password='testpass')
payload = {'email': 'test@django.com', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_create_token_no_user(self):
"""Test that token is not created if user does not exist"""
payload = {'email': 'test@django.com', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_create_token_no_missing_field(self):
"""Test that token is not created if email/password not given"""
res = self.client.post(
TOKEN_URL,
{'email': 'test@django.com', 'password': ''})
self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
| 37.577778 | 77 | 0.63631 | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def test_create_valid_user_successful(self):
payload = {
'email': 'john@doe.com',
'password': 'testpass',
'name': 'John Doe'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
payload = {
'email': 'john@doe.com',
'password': 'testpass',
"name": 'John Doe'
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
payload = {
'email': 'john@doe.com',
'password': 'pass',
"name": 'John Doe'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
payload = {'email': 'test@django.io', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertTrue(res.status_code, status.HTTP_200_OK)
self.assertIn('token', res.data)
def test_create_token_invalid_credentials(self):
create_user(email='test@django.com', password='testpass')
payload = {'email': 'test@django.com', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_create_token_no_user(self):
payload = {'email': 'test@django.com', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
def test_create_token_no_missing_field(self):
res = self.client.post(
TOKEN_URL,
{'email': 'test@django.com', 'password': ''})
self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotIn('token', res.data)
| true | true |
790154b62d7337a78a9bce3b537685b14b4ca1a2 | 13,300 | py | Python | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | """
Telnet server.
Example usage::
class MyTelnetApplication(TelnetApplication):
def client_connected(self, telnet_connection):
# Set CLI with simple prompt.
telnet_connection.set_application(
telnet_connection.create_prompt_application(...))
def handle_command(self, telnet_connection, document):
# When the client enters a command, just reply.
telnet_connection.send('You said: %r\n\n' % document.text)
...
a = MyTelnetApplication()
TelnetServer(application=a, host='127.0.0.1', port=23).run()
"""
from __future__ import unicode_literals
import socket
import select
import threading
import os
import fcntl
from six import int2byte, text_type, binary_type
from codecs import getincrementaldecoder
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.eventloop.base import EventLoop
from prompt_toolkit.interface import CommandLineInterface, Application
from prompt_toolkit.layout.screen import Size
from prompt_toolkit.shortcuts import create_prompt_application
from prompt_toolkit.terminal.vt100_input import InputStream
from prompt_toolkit.terminal.vt100_output import Vt100_Output
from .log import logger
from .protocol import IAC, DO, LINEMODE, SB, MODE, SE, WILL, ECHO, NAWS, SUPPRESS_GO_AHEAD
from .protocol import TelnetProtocolParser
from .application import TelnetApplication
__all__ = (
'TelnetServer',
)
def _initialize_telnet(connection):
logger.info('Initializing telnet connection')
# Iac Do Linemode
connection.send(IAC + DO + LINEMODE)
# Suppress Go Ahead. (This seems important for Putty to do correct echoing.)
# This will allow bi-directional operation.
connection.send(IAC + WILL + SUPPRESS_GO_AHEAD)
# Iac sb
connection.send(IAC + SB + LINEMODE + MODE + int2byte(0) + IAC + SE)
# IAC Will Echo
connection.send(IAC + WILL + ECHO)
# Negotiate window size
connection.send(IAC + DO + NAWS)
class _ConnectionStdout(object):
"""
Wrapper around socket which provides `write` and `flush` methods for the
Vt100_Output output.
"""
def __init__(self, connection, encoding):
self._encoding = encoding
self._connection = connection
self._buffer = []
def write(self, data):
assert isinstance(data, text_type)
self._buffer.append(data.encode(self._encoding))
self.flush()
def flush(self):
try:
self._connection.send(b''.join(self._buffer))
except socket.error as e:
logger.error("Couldn't send data over socket: %s" % e)
self._buffer = []
class TelnetConnection(object):
"""
Class that represents one Telnet connection.
"""
def __init__(self, conn, addr, application, server, encoding):
assert isinstance(addr, tuple) # (addr, port) tuple
assert isinstance(application, TelnetApplication)
assert isinstance(server, TelnetServer)
assert isinstance(encoding, text_type) # e.g. 'utf-8'
self.conn = conn
self.addr = addr
self.application = application
self.closed = False
self.handling_command = True
self.server = server
self.encoding = encoding
self.callback = None # Function that handles the CLI result.
# Create "Output" object.
self.size = Size(rows=40, columns=79)
# Initialize.
_initialize_telnet(conn)
# Create output.
def get_size():
return self.size
self.stdout = _ConnectionStdout(conn, encoding=encoding)
self.vt100_output = Vt100_Output(self.stdout, get_size, write_binary=False)
# Create an eventloop (adaptor) for the CommandLineInterface.
self.eventloop = _TelnetEventLoopInterface(server)
# Set default CommandLineInterface.
self.set_application(create_prompt_application())
# Call client_connected
application.client_connected(self)
# Draw for the first time.
self.handling_command = False
self.cli._redraw()
def set_application(self, app, callback=None):
"""
Set ``CommandLineInterface`` instance for this connection.
(This can be replaced any time.)
:param cli: CommandLineInterface instance.
:param callback: Callable that takes the result of the CLI.
"""
assert isinstance(app, Application)
assert callback is None or callable(callback)
self.cli = CommandLineInterface(
application=app,
eventloop=self.eventloop,
output=self.vt100_output)
self.callback = callback
# Create a parser, and parser callbacks.
cb = self.cli.create_eventloop_callbacks()
inputstream = InputStream(cb.feed_key)
# Input decoder for stdin. (Required when working with multibyte
# characters, like chinese input.)
stdin_decoder_cls = getincrementaldecoder(self.encoding)
stdin_decoder = [stdin_decoder_cls()] # nonlocal
# Tell the CLI that it's running. We don't start it through the run()
# call, but will still want _redraw() to work.
self.cli._is_running = True
def data_received(data):
""" TelnetProtocolParser 'data_received' callback """
assert isinstance(data, binary_type)
try:
result = stdin_decoder[0].decode(data)
inputstream.feed(result)
except UnicodeDecodeError:
stdin_decoder[0] = stdin_decoder_cls()
return ''
def size_received(rows, columns):
""" TelnetProtocolParser 'size_received' callback """
self.size = Size(rows=rows, columns=columns)
cb.terminal_size_changed()
self.parser = TelnetProtocolParser(data_received, size_received)
def feed(self, data):
"""
Handler for incoming data. (Called by TelnetServer.)
"""
assert isinstance(data, binary_type)
self.parser.feed(data)
# Render again.
self.cli._redraw()
# When a return value has been set (enter was pressed), handle command.
if self.cli.is_returning:
try:
return_value = self.cli.return_value()
except (EOFError, KeyboardInterrupt) as e:
# Control-D or Control-C was pressed.
logger.info('%s, closing connection.', type(e).__name__)
self.close()
return
# Handle CLI command
self._handle_command(return_value)
def _handle_command(self, command):
"""
Handle command. This will run in a separate thread, in order not
to block the event loop.
"""
logger.info('Handle command %r', command)
def in_executor():
self.handling_command = True
try:
if self.callback is not None:
self.callback(self, command)
finally:
self.server.call_from_executor(done)
def done():
self.handling_command = False
# Reset state and draw again. (If the connection is still open --
# the application could have called TelnetConnection.close()
if not self.closed:
self.cli.reset()
self.cli.buffers[DEFAULT_BUFFER].reset()
self.cli.renderer.request_absolute_cursor_position()
self.vt100_output.flush()
self.cli._redraw()
self.server.run_in_executor(in_executor)
def erase_screen(self):
"""
Erase output screen.
"""
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush()
def send(self, data):
"""
Send text to the client.
"""
assert isinstance(data, text_type)
# When data is send back to the client, we should replace the line
# endings. (We didn't allocate a real pseudo terminal, and the telnet
# connection is raw, so we are responsible for inserting \r.)
self.stdout.write(data.replace('\n', '\r\n'))
self.stdout.flush()
def close(self):
"""
Close the connection.
"""
self.application.client_leaving(self)
self.conn.close()
self.closed = True
class _TelnetEventLoopInterface(EventLoop):
"""
Eventloop object to be assigned to `CommandLineInterface`.
"""
def __init__(self, server):
self._server = server
def close(self):
" Ignore. "
def stop(self):
" Ignore. "
def run_in_executor(self, callback):
self._server.run_in_executor(callback)
def call_from_executor(self, callback, _max_postpone_until=None):
self._server.call_from_executor(callback)
def add_reader(self, fd, callback):
raise NotImplementedError
def remove_reader(self, fd):
raise NotImplementedError
class TelnetServer(object):
"""
Telnet server implementation.
"""
def __init__(self, host='127.0.0.1', port=23, application=None, encoding='utf-8'):
assert isinstance(host, text_type)
assert isinstance(port, int)
assert isinstance(application, TelnetApplication)
assert isinstance(encoding, text_type)
self.host = host
self.port = port
self.application = application
self.encoding = encoding
self.connections = set()
self._calls_from_executor = []
# Create a pipe for inter thread communication.
self._schedule_pipe = os.pipe()
fcntl.fcntl(self._schedule_pipe[0], fcntl.F_SETFL, os.O_NONBLOCK)
@classmethod
def create_socket(cls, host, port):
# Create and bind socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(4)
return s
def run_in_executor(self, callback):
threading.Thread(target=callback).start()
def call_from_executor(self, callback):
self._calls_from_executor.append(callback)
if self._schedule_pipe:
os.write(self._schedule_pipe[1], b'x')
def _process_callbacks(self):
"""
Process callbacks from `call_from_executor` in eventloop.
"""
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
# Process calls from executor.
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c()
def run(self):
"""
Run the eventloop for the telnet server.
"""
listen_socket = self.create_socket(self.host, self.port)
logger.info('Listening for telnet connections on %s port %r', self.host, self.port)
try:
while True:
# Removed closed connections.
self.connections = set([c for c in self.connections if not c.closed])
# Ignore connections handling commands.
connections = set([c for c in self.connections if not c.handling_command])
# Wait for next event.
read_list = (
[listen_socket, self._schedule_pipe[0]] +
[c.conn for c in connections])
read, _, _ = select.select(read_list, [], [])
for s in read:
# When the socket itself is ready, accept a new connection.
if s == listen_socket:
self._accept(listen_socket)
# If we receive something on our "call_from_executor" pipe, process
# these callbacks in a thread safe way.
elif s == self._schedule_pipe[0]:
self._process_callbacks()
# Handle incoming data on socket.
else:
self._handle_incoming_data(s)
finally:
listen_socket.close()
def _accept(self, listen_socket):
"""
Accept new incoming connection.
"""
conn, addr = listen_socket.accept()
connection = TelnetConnection(conn, addr, self.application, self, encoding=self.encoding)
self.connections.add(connection)
logger.info('New connection %r %r', *addr)
def _handle_incoming_data(self, conn):
"""
Handle incoming data on socket.
"""
connection = [c for c in self.connections if c.conn == conn][0]
data = conn.recv(1024)
if data:
connection.feed(data)
else:
self.connections.remove(connection)
| 32.598039 | 98 | 0.60015 | from __future__ import unicode_literals
import socket
import select
import threading
import os
import fcntl
from six import int2byte, text_type, binary_type
from codecs import getincrementaldecoder
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.eventloop.base import EventLoop
from prompt_toolkit.interface import CommandLineInterface, Application
from prompt_toolkit.layout.screen import Size
from prompt_toolkit.shortcuts import create_prompt_application
from prompt_toolkit.terminal.vt100_input import InputStream
from prompt_toolkit.terminal.vt100_output import Vt100_Output
from .log import logger
from .protocol import IAC, DO, LINEMODE, SB, MODE, SE, WILL, ECHO, NAWS, SUPPRESS_GO_AHEAD
from .protocol import TelnetProtocolParser
from .application import TelnetApplication
__all__ = (
'TelnetServer',
)
def _initialize_telnet(connection):
logger.info('Initializing telnet connection')
connection.send(IAC + DO + LINEMODE)
connection.send(IAC + WILL + SUPPRESS_GO_AHEAD)
connection.send(IAC + SB + LINEMODE + MODE + int2byte(0) + IAC + SE)
connection.send(IAC + WILL + ECHO)
connection.send(IAC + DO + NAWS)
class _ConnectionStdout(object):
def __init__(self, connection, encoding):
self._encoding = encoding
self._connection = connection
self._buffer = []
def write(self, data):
assert isinstance(data, text_type)
self._buffer.append(data.encode(self._encoding))
self.flush()
def flush(self):
try:
self._connection.send(b''.join(self._buffer))
except socket.error as e:
logger.error("Couldn't send data over socket: %s" % e)
self._buffer = []
class TelnetConnection(object):
def __init__(self, conn, addr, application, server, encoding):
assert isinstance(addr, tuple) # (addr, port) tuple
assert isinstance(application, TelnetApplication)
assert isinstance(server, TelnetServer)
assert isinstance(encoding, text_type) # e.g. 'utf-8'
self.conn = conn
self.addr = addr
self.application = application
self.closed = False
self.handling_command = True
self.server = server
self.encoding = encoding
self.callback = None # Function that handles the CLI result.
# Create "Output" object.
self.size = Size(rows=40, columns=79)
# Initialize.
_initialize_telnet(conn)
# Create output.
def get_size():
return self.size
self.stdout = _ConnectionStdout(conn, encoding=encoding)
self.vt100_output = Vt100_Output(self.stdout, get_size, write_binary=False)
# Create an eventloop (adaptor) for the CommandLineInterface.
self.eventloop = _TelnetEventLoopInterface(server)
# Set default CommandLineInterface.
self.set_application(create_prompt_application())
# Call client_connected
application.client_connected(self)
# Draw for the first time.
self.handling_command = False
self.cli._redraw()
def set_application(self, app, callback=None):
assert isinstance(app, Application)
assert callback is None or callable(callback)
self.cli = CommandLineInterface(
application=app,
eventloop=self.eventloop,
output=self.vt100_output)
self.callback = callback
# Create a parser, and parser callbacks.
cb = self.cli.create_eventloop_callbacks()
inputstream = InputStream(cb.feed_key)
# Input decoder for stdin. (Required when working with multibyte
# characters, like chinese input.)
stdin_decoder_cls = getincrementaldecoder(self.encoding)
stdin_decoder = [stdin_decoder_cls()] # nonlocal
# Tell the CLI that it's running. We don't start it through the run()
# call, but will still want _redraw() to work.
self.cli._is_running = True
def data_received(data):
assert isinstance(data, binary_type)
try:
result = stdin_decoder[0].decode(data)
inputstream.feed(result)
except UnicodeDecodeError:
stdin_decoder[0] = stdin_decoder_cls()
return ''
def size_received(rows, columns):
self.size = Size(rows=rows, columns=columns)
cb.terminal_size_changed()
self.parser = TelnetProtocolParser(data_received, size_received)
def feed(self, data):
assert isinstance(data, binary_type)
self.parser.feed(data)
# Render again.
self.cli._redraw()
# When a return value has been set (enter was pressed), handle command.
if self.cli.is_returning:
try:
return_value = self.cli.return_value()
except (EOFError, KeyboardInterrupt) as e:
# Control-D or Control-C was pressed.
logger.info('%s, closing connection.', type(e).__name__)
self.close()
return
# Handle CLI command
self._handle_command(return_value)
def _handle_command(self, command):
logger.info('Handle command %r', command)
def in_executor():
self.handling_command = True
try:
if self.callback is not None:
self.callback(self, command)
finally:
self.server.call_from_executor(done)
def done():
self.handling_command = False
# Reset state and draw again. (If the connection is still open --
# the application could have called TelnetConnection.close()
if not self.closed:
self.cli.reset()
self.cli.buffers[DEFAULT_BUFFER].reset()
self.cli.renderer.request_absolute_cursor_position()
self.vt100_output.flush()
self.cli._redraw()
self.server.run_in_executor(in_executor)
def erase_screen(self):
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush()
def send(self, data):
assert isinstance(data, text_type)
# When data is send back to the client, we should replace the line
# endings. (We didn't allocate a real pseudo terminal, and the telnet
self.stdout.write(data.replace('\n', '\r\n'))
self.stdout.flush()
def close(self):
self.application.client_leaving(self)
self.conn.close()
self.closed = True
class _TelnetEventLoopInterface(EventLoop):
def __init__(self, server):
self._server = server
def close(self):
def stop(self):
def run_in_executor(self, callback):
self._server.run_in_executor(callback)
def call_from_executor(self, callback, _max_postpone_until=None):
self._server.call_from_executor(callback)
def add_reader(self, fd, callback):
raise NotImplementedError
def remove_reader(self, fd):
raise NotImplementedError
class TelnetServer(object):
def __init__(self, host='127.0.0.1', port=23, application=None, encoding='utf-8'):
assert isinstance(host, text_type)
assert isinstance(port, int)
assert isinstance(application, TelnetApplication)
assert isinstance(encoding, text_type)
self.host = host
self.port = port
self.application = application
self.encoding = encoding
self.connections = set()
self._calls_from_executor = []
self._schedule_pipe = os.pipe()
fcntl.fcntl(self._schedule_pipe[0], fcntl.F_SETFL, os.O_NONBLOCK)
@classmethod
def create_socket(cls, host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(4)
return s
def run_in_executor(self, callback):
threading.Thread(target=callback).start()
def call_from_executor(self, callback):
self._calls_from_executor.append(callback)
if self._schedule_pipe:
os.write(self._schedule_pipe[1], b'x')
def _process_callbacks(self):
os.read(self._schedule_pipe[0], 1024)
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c()
def run(self):
listen_socket = self.create_socket(self.host, self.port)
logger.info('Listening for telnet connections on %s port %r', self.host, self.port)
try:
while True:
self.connections = set([c for c in self.connections if not c.closed])
connections = set([c for c in self.connections if not c.handling_command])
read_list = (
[listen_socket, self._schedule_pipe[0]] +
[c.conn for c in connections])
read, _, _ = select.select(read_list, [], [])
for s in read:
if s == listen_socket:
self._accept(listen_socket)
elif s == self._schedule_pipe[0]:
self._process_callbacks()
else:
self._handle_incoming_data(s)
finally:
listen_socket.close()
def _accept(self, listen_socket):
conn, addr = listen_socket.accept()
connection = TelnetConnection(conn, addr, self.application, self, encoding=self.encoding)
self.connections.add(connection)
logger.info('New connection %r %r', *addr)
def _handle_incoming_data(self, conn):
connection = [c for c in self.connections if c.conn == conn][0]
data = conn.recv(1024)
if data:
connection.feed(data)
else:
self.connections.remove(connection)
| true | true |
790154c57657825a11f9b518ab6fbf9dc7d8e942 | 40,675 | py | Python | gem5/src/dev/arm/RealView.py | gem5-graphics/gem5-graphics | a5ce9f4e1e954f16524c431da64ac8c57b3e212e | [
"BSD-3-Clause"
] | 22 | 2018-07-03T16:46:51.000Z | 2022-03-22T08:29:36.000Z | gem5/src/dev/arm/RealView.py | gem5-graphics/gem5-graphics | a5ce9f4e1e954f16524c431da64ac8c57b3e212e | [
"BSD-3-Clause"
] | null | null | null | gem5/src/dev/arm/RealView.py | gem5-graphics/gem5-graphics | a5ce9f4e1e954f16524c431da64ac8c57b3e212e | [
"BSD-3-Clause"
] | 25 | 2017-12-02T00:46:04.000Z | 2022-02-18T19:28:53.000Z | # Copyright (c) 2009-2017 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
# Gabe Black
# William Wang
from m5.params import *
from m5.proxy import *
from ClockDomain import ClockDomain
from VoltageDomain import VoltageDomain
from Device import BasicPioDevice, PioDevice, IsaFake, BadAddr, DmaDevice
from PciHost import *
from Ethernet import NSGigE, IGbE_igb, IGbE_e1000
from Ide import *
from Platform import Platform
from Terminal import Terminal
from Uart import Uart
from SimpleMemory import SimpleMemory
from Gic import *
from EnergyCtrl import EnergyCtrl
from ClockDomain import SrcClockDomain
from SubSystem import SubSystem
# Platforms with KVM support should generally use in-kernel GIC
# emulation. Use a GIC model that automatically switches between
# gem5's GIC model and KVM's GIC model if KVM is available.
try:
from KvmGic import MuxingKvmGic
kvm_gicv2_class = MuxingKvmGic
except ImportError:
# KVM support wasn't compiled into gem5. Fallback to a
# software-only GIC.
kvm_gicv2_class = Pl390
pass
class AmbaPioDevice(BasicPioDevice):
type = 'AmbaPioDevice'
abstract = True
cxx_header = "dev/arm/amba_device.hh"
amba_id = Param.UInt32("ID of AMBA device for kernel detection")
class AmbaIntDevice(AmbaPioDevice):
type = 'AmbaIntDevice'
abstract = True
cxx_header = "dev/arm/amba_device.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num = Param.UInt32("Interrupt number that connects to GIC")
int_delay = Param.Latency("100ns",
"Time between action and interrupt generation by device")
class AmbaDmaDevice(DmaDevice):
type = 'AmbaDmaDevice'
abstract = True
cxx_header = "dev/arm/amba_device.hh"
pio_addr = Param.Addr("Address for AMBA slave interface")
pio_latency = Param.Latency("10ns", "Time between action and write/read result by AMBA DMA Device")
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num = Param.UInt32("Interrupt number that connects to GIC")
amba_id = Param.UInt32("ID of AMBA device for kernel detection")
class A9SCU(BasicPioDevice):
type = 'A9SCU'
cxx_header = "dev/arm/a9scu.hh"
class ArmPciIntRouting(Enum): vals = [
'ARM_PCI_INT_STATIC',
'ARM_PCI_INT_DEV',
'ARM_PCI_INT_PIN',
]
class GenericArmPciHost(GenericPciHost):
type = 'GenericArmPciHost'
cxx_header = "dev/arm/pci_host.hh"
int_policy = Param.ArmPciIntRouting("PCI interrupt routing policy")
int_base = Param.Unsigned("PCI interrupt base")
int_count = Param.Unsigned("Maximum number of interrupts used by this host")
class RealViewCtrl(BasicPioDevice):
type = 'RealViewCtrl'
cxx_header = "dev/arm/rv_ctrl.hh"
proc_id0 = Param.UInt32(0x0C000000, "Processor ID, SYS_PROCID")
proc_id1 = Param.UInt32(0x0C000222, "Processor ID, SYS_PROCID1")
idreg = Param.UInt32(0x00000000, "ID Register, SYS_ID")
class RealViewOsc(ClockDomain):
type = 'RealViewOsc'
cxx_header = "dev/arm/rv_ctrl.hh"
parent = Param.RealViewCtrl(Parent.any, "RealView controller")
# TODO: We currently don't have the notion of a clock source,
# which means we have to associate oscillators with a voltage
# source.
voltage_domain = Param.VoltageDomain(Parent.voltage_domain,
"Voltage domain")
# See ARM DUI 0447J (ARM Motherboard Express uATX -- V2M-P1) and
# the individual core/logic tile reference manuals for details
# about the site/position/dcc/device allocation.
site = Param.UInt8("Board Site")
position = Param.UInt8("Position in device stack")
dcc = Param.UInt8("Daughterboard Configuration Controller")
device = Param.UInt8("Device ID")
freq = Param.Clock("Default frequency")
class RealViewTemperatureSensor(SimObject):
type = 'RealViewTemperatureSensor'
cxx_header = "dev/arm/rv_ctrl.hh"
parent = Param.RealViewCtrl(Parent.any, "RealView controller")
system = Param.System(Parent.any, "system")
# See ARM DUI 0447J (ARM Motherboard Express uATX -- V2M-P1) and
# the individual core/logic tile reference manuals for details
# about the site/position/dcc/device allocation.
site = Param.UInt8("Board Site")
position = Param.UInt8("Position in device stack")
dcc = Param.UInt8("Daughterboard Configuration Controller")
device = Param.UInt8("Device ID")
class VExpressMCC(SubSystem):
"""ARM V2M-P1 Motherboard Configuration Controller
This subsystem describes a subset of the devices that sit behind the
motherboard configuration controller on the the ARM Motherboard
Express (V2M-P1) motherboard. See ARM DUI 0447J for details.
"""
class Osc(RealViewOsc):
site, position, dcc = (0, 0, 0)
class Temperature(RealViewTemperatureSensor):
site, position, dcc = (0, 0, 0)
osc_mcc = Osc(device=0, freq="50MHz")
osc_clcd = Osc(device=1, freq="23.75MHz")
osc_peripheral = Osc(device=2, freq="24MHz")
osc_system_bus = Osc(device=4, freq="24MHz")
# See Table 4.19 in ARM DUI 0447J (Motherboard Express uATX TRM).
temp_crtl = Temperature(device=0)
class CoreTile2A15DCC(SubSystem):
"""ARM CoreTile Express A15x2 Daughterboard Configuration Controller
This subsystem describes a subset of the devices that sit behind the
daughterboard configuration controller on a CoreTile Express A15x2. See
ARM DUI 0604E for details.
"""
class Osc(RealViewOsc):
site, position, dcc = (1, 0, 0)
# See Table 2.8 in ARM DUI 0604E (CoreTile Express A15x2 TRM)
osc_cpu = Osc(device=0, freq="60MHz")
osc_hsbm = Osc(device=4, freq="40MHz")
osc_pxl = Osc(device=5, freq="23.75MHz")
osc_smb = Osc(device=6, freq="50MHz")
osc_sys = Osc(device=7, freq="60MHz")
osc_ddr = Osc(device=8, freq="40MHz")
class VGic(PioDevice):
type = 'VGic'
cxx_header = "dev/arm/vgic.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
platform = Param.Platform(Parent.any, "Platform this device is part of.")
vcpu_addr = Param.Addr(0, "Address for vcpu interfaces")
hv_addr = Param.Addr(0, "Address for hv control")
pio_delay = Param.Latency('10ns', "Delay for PIO r/w")
# The number of list registers is not currently configurable at runtime.
ppint = Param.UInt32("HV maintenance interrupt number")
class AmbaFake(AmbaPioDevice):
type = 'AmbaFake'
cxx_header = "dev/arm/amba_fake.hh"
ignore_access = Param.Bool(False, "Ignore reads/writes to this device, (e.g. IsaFake + AMBA)")
amba_id = 0;
class Pl011(Uart):
type = 'Pl011'
cxx_header = "dev/arm/pl011.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num = Param.UInt32("Interrupt number that connects to GIC")
end_on_eot = Param.Bool(False, "End the simulation when a EOT is received on the UART")
int_delay = Param.Latency("100ns", "Time between action and interrupt generation by UART")
class Sp804(AmbaPioDevice):
type = 'Sp804'
cxx_header = "dev/arm/timer_sp804.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num0 = Param.UInt32("Interrupt number that connects to GIC")
clock0 = Param.Clock('1MHz', "Clock speed of the input")
int_num1 = Param.UInt32("Interrupt number that connects to GIC")
clock1 = Param.Clock('1MHz', "Clock speed of the input")
amba_id = 0x00141804
class CpuLocalTimer(BasicPioDevice):
type = 'CpuLocalTimer'
cxx_header = "dev/arm/timer_cpulocal.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num_timer = Param.UInt32("Interrrupt number used per-cpu to GIC")
int_num_watchdog = Param.UInt32("Interrupt number for per-cpu watchdog to GIC")
class GenericTimer(SimObject):
type = 'GenericTimer'
cxx_header = "dev/arm/generic_timer.hh"
system = Param.ArmSystem(Parent.any, "system")
gic = Param.BaseGic(Parent.any, "GIC to use for interrupting")
# @todo: for now only two timers per CPU is supported, which is the
# normal behaviour when security extensions are disabled.
int_phys = Param.UInt32("Physical timer interrupt number")
int_virt = Param.UInt32("Virtual timer interrupt number")
class GenericTimerMem(PioDevice):
type = 'GenericTimerMem'
cxx_header = "dev/arm/generic_timer.hh"
gic = Param.BaseGic(Parent.any, "GIC to use for interrupting")
base = Param.Addr(0, "Base address")
int_phys = Param.UInt32("Interrupt number")
int_virt = Param.UInt32("Interrupt number")
class PL031(AmbaIntDevice):
type = 'PL031'
cxx_header = "dev/arm/rtc_pl031.hh"
time = Param.Time('01/01/2009', "System time to use ('Now' for actual time)")
amba_id = 0x00341031
class Pl050(AmbaIntDevice):
type = 'Pl050'
cxx_header = "dev/arm/kmi.hh"
vnc = Param.VncInput(Parent.any, "Vnc server for remote frame buffer display")
is_mouse = Param.Bool(False, "Is this interface a mouse, if not a keyboard")
int_delay = '1us'
amba_id = 0x00141050
class Pl111(AmbaDmaDevice):
type = 'Pl111'
cxx_header = "dev/arm/pl111.hh"
pixel_clock = Param.Clock('24MHz', "Pixel clock")
vnc = Param.VncInput(Parent.any, "Vnc server for remote frame buffer display")
amba_id = 0x00141111
enable_capture = Param.Bool(True, "capture frame to system.framebuffer.bmp")
class HDLcd(AmbaDmaDevice):
type = 'HDLcd'
cxx_header = "dev/arm/hdlcd.hh"
vnc = Param.VncInput(Parent.any, "Vnc server for remote frame buffer "
"display")
amba_id = 0x00141000
workaround_swap_rb = Param.Bool(False, "Workaround incorrect color "
"selector order in some kernels")
workaround_dma_line_count = Param.Bool(True, "Workaround incorrect "
"DMA line count (off by 1)")
enable_capture = Param.Bool(True, "capture frame to system.framebuffer.bmp")
pixel_buffer_size = Param.MemorySize32("2kB", "Size of address range")
pxl_clk = Param.ClockDomain("Pixel clock source")
pixel_chunk = Param.Unsigned(32, "Number of pixels to handle in one batch")
virt_refresh_rate = Param.Frequency("20Hz", "Frame refresh rate "
"in KVM mode")
class RealView(Platform):
type = 'RealView'
cxx_header = "dev/arm/realview.hh"
system = Param.System(Parent.any, "system")
_mem_regions = [(Addr(0), Addr('256MB'))]
def _on_chip_devices(self):
return []
def _off_chip_devices(self):
return []
_off_chip_ranges = []
def _attach_device(self, device, bus, dma_ports=None):
if hasattr(device, "pio"):
device.pio = bus.master
if hasattr(device, "dma"):
if dma_ports is None:
device.dma = bus.slave
else:
dma_ports.append(device.dma)
def _attach_io(self, devices, *args, **kwargs):
for d in devices:
self._attach_device(d, *args, **kwargs)
def _attach_clk(self, devices, clkdomain):
for d in devices:
if hasattr(d, "clk_domain"):
d.clk_domain = clkdomain
def attachPciDevices(self):
pass
def enableMSIX(self):
pass
def onChipIOClkDomain(self, clkdomain):
self._attach_clk(self._on_chip_devices(), clkdomain)
def offChipIOClkDomain(self, clkdomain):
self._attach_clk(self._off_chip_devices(), clkdomain)
def attachOnChipIO(self, bus, bridge=None, **kwargs):
self._attach_io(self._on_chip_devices(), bus, **kwargs)
if bridge:
bridge.ranges = self._off_chip_ranges
def attachIO(self, *args, **kwargs):
self._attach_io(self._off_chip_devices(), *args, **kwargs)
def setupBootLoader(self, mem_bus, cur_sys, loc):
self.nvmem = SimpleMemory(range = AddrRange('2GB', size = '64MB'),
conf_table_reported = False)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = loc('boot.arm')
cur_sys.atags_addr = 0x100
cur_sys.load_addr_mask = 0xfffffff
cur_sys.load_offset = 0
# Reference for memory map and interrupt number
# RealView Platform Baseboard Explore for Cortex-A9 User Guide(ARM DUI 0440A)
# Chapter 4: Programmer's Reference
class RealViewPBX(RealView):
uart = Pl011(pio_addr=0x10009000, int_num=44)
realview_io = RealViewCtrl(pio_addr=0x10000000)
mcc = VExpressMCC()
dcc = CoreTile2A15DCC()
gic = Pl390()
pci_host = GenericPciHost(
conf_base=0x30000000, conf_size='256MB', conf_device_bits=16,
pci_pio_base=0)
timer0 = Sp804(int_num0=36, int_num1=36, pio_addr=0x10011000)
timer1 = Sp804(int_num0=37, int_num1=37, pio_addr=0x10012000)
local_cpu_timer = CpuLocalTimer(int_num_timer=29, int_num_watchdog=30, pio_addr=0x1f000600)
clcd = Pl111(pio_addr=0x10020000, int_num=55)
kmi0 = Pl050(pio_addr=0x10006000, int_num=52)
kmi1 = Pl050(pio_addr=0x10007000, int_num=53, is_mouse=True)
a9scu = A9SCU(pio_addr=0x1f000000)
cf_ctrl = IdeController(disks=[], pci_func=0, pci_dev=7, pci_bus=2,
io_shift = 1, ctrl_offset = 2, Command = 0x1,
BAR0 = 0x18000000, BAR0Size = '16B',
BAR1 = 0x18000100, BAR1Size = '1B',
BAR0LegacyIO = True, BAR1LegacyIO = True)
l2x0_fake = IsaFake(pio_addr=0x1f002000, pio_size=0xfff)
flash_fake = IsaFake(pio_addr=0x40000000, pio_size=0x20000000,
fake_mem=True)
dmac_fake = AmbaFake(pio_addr=0x10030000)
uart1_fake = AmbaFake(pio_addr=0x1000a000)
uart2_fake = AmbaFake(pio_addr=0x1000b000)
uart3_fake = AmbaFake(pio_addr=0x1000c000)
smc_fake = AmbaFake(pio_addr=0x100e1000)
sp810_fake = AmbaFake(pio_addr=0x10001000, ignore_access=True)
watchdog_fake = AmbaFake(pio_addr=0x10010000)
gpio0_fake = AmbaFake(pio_addr=0x10013000)
gpio1_fake = AmbaFake(pio_addr=0x10014000)
gpio2_fake = AmbaFake(pio_addr=0x10015000)
ssp_fake = AmbaFake(pio_addr=0x1000d000)
sci_fake = AmbaFake(pio_addr=0x1000e000)
aaci_fake = AmbaFake(pio_addr=0x10004000)
mmc_fake = AmbaFake(pio_addr=0x10005000)
rtc = PL031(pio_addr=0x10017000, int_num=42)
energy_ctrl = EnergyCtrl(pio_addr=0x1000f000)
# Attach I/O devices that are on chip and also set the appropriate
# ranges for the bridge
def attachOnChipIO(self, bus, bridge=None, dma_ports =[]):
self.gic.pio = bus.master
self.l2x0_fake.pio = bus.master
self.a9scu.pio = bus.master
self.local_cpu_timer.pio = bus.master
if bridge:
# Bridge ranges based on excluding what is part of on-chip I/O
# (gic, l2x0, a9scu, local_cpu_timer)
bridge.ranges = [AddrRange(self.realview_io.pio_addr,
self.a9scu.pio_addr - 1),
AddrRange(self.flash_fake.pio_addr,
self.flash_fake.pio_addr + \
self.flash_fake.pio_size - 1)]
# Set the clock domain for IO objects that are considered
# to be "close" to the cores.
def onChipIOClkDomain(self, clkdomain):
self.gic.clk_domain = clkdomain
self.l2x0_fake.clk_domain = clkdomain
self.a9scu.clkdomain = clkdomain
self.local_cpu_timer.clk_domain = clkdomain
# Attach I/O devices to specified bus object. Can't do this
# earlier, since the bus object itself is typically defined at the
# System level.
def attachIO(self, bus, dma_ports = []):
self.uart.pio = bus.master
self.realview_io.pio = bus.master
self.pci_host.pio = bus.master
self.timer0.pio = bus.master
self.timer1.pio = bus.master
self.clcd.pio = bus.master
if dma_ports.count(self.clcd.dma) == 0:
self.clcd.dma = bus.slave
self.kmi0.pio = bus.master
self.kmi1.pio = bus.master
self.cf_ctrl.pio = bus.master
if dma_ports.count(self.cf_ctrl.dma) == 0:
self.cf_ctrl.dma = bus.slave
self.dmac_fake.pio = bus.master
self.uart1_fake.pio = bus.master
self.uart2_fake.pio = bus.master
self.uart3_fake.pio = bus.master
self.smc_fake.pio = bus.master
self.sp810_fake.pio = bus.master
self.watchdog_fake.pio = bus.master
self.gpio0_fake.pio = bus.master
self.gpio1_fake.pio = bus.master
self.gpio2_fake.pio = bus.master
self.ssp_fake.pio = bus.master
self.sci_fake.pio = bus.master
self.aaci_fake.pio = bus.master
self.mmc_fake.pio = bus.master
self.rtc.pio = bus.master
self.flash_fake.pio = bus.master
self.energy_ctrl.pio = bus.master
# Set the clock domain for IO objects that are considered
# to be "far" away from the cores.
def offChipIOClkDomain(self, clkdomain):
self.uart.clk_domain = clkdomain
self.realview_io.clk_domain = clkdomain
self.timer0.clk_domain = clkdomain
self.timer1.clk_domain = clkdomain
self.clcd.clk_domain = clkdomain
self.kmi0.clk_domain = clkdomain
self.kmi1.clk_domain = clkdomain
self.cf_ctrl.clk_domain = clkdomain
self.dmac_fake.clk_domain = clkdomain
self.uart1_fake.clk_domain = clkdomain
self.uart2_fake.clk_domain = clkdomain
self.uart3_fake.clk_domain = clkdomain
self.smc_fake.clk_domain = clkdomain
self.sp810_fake.clk_domain = clkdomain
self.watchdog_fake.clk_domain = clkdomain
self.gpio0_fake.clk_domain = clkdomain
self.gpio1_fake.clk_domain = clkdomain
self.gpio2_fake.clk_domain = clkdomain
self.ssp_fake.clk_domain = clkdomain
self.sci_fake.clk_domain = clkdomain
self.aaci_fake.clk_domain = clkdomain
self.mmc_fake.clk_domain = clkdomain
self.rtc.clk_domain = clkdomain
self.flash_fake.clk_domain = clkdomain
self.energy_ctrl.clk_domain = clkdomain
# Reference for memory map and interrupt number
# RealView Emulation Baseboard User Guide (ARM DUI 0143B)
# Chapter 4: Programmer's Reference
class RealViewEB(RealView):
uart = Pl011(pio_addr=0x10009000, int_num=44)
realview_io = RealViewCtrl(pio_addr=0x10000000, idreg=0x01400500)
mcc = VExpressMCC()
dcc = CoreTile2A15DCC()
gic = Pl390(dist_addr=0x10041000, cpu_addr=0x10040000)
timer0 = Sp804(int_num0=36, int_num1=36, pio_addr=0x10011000)
timer1 = Sp804(int_num0=37, int_num1=37, pio_addr=0x10012000)
clcd = Pl111(pio_addr=0x10020000, int_num=23)
kmi0 = Pl050(pio_addr=0x10006000, int_num=20)
kmi1 = Pl050(pio_addr=0x10007000, int_num=21, is_mouse=True)
l2x0_fake = IsaFake(pio_addr=0x1f002000, pio_size=0xfff, warn_access="1")
flash_fake = IsaFake(pio_addr=0x40000000, pio_size=0x20000000-1,
fake_mem=True)
dmac_fake = AmbaFake(pio_addr=0x10030000)
uart1_fake = AmbaFake(pio_addr=0x1000a000)
uart2_fake = AmbaFake(pio_addr=0x1000b000)
uart3_fake = AmbaFake(pio_addr=0x1000c000)
smcreg_fake = IsaFake(pio_addr=0x10080000, pio_size=0x10000-1)
smc_fake = AmbaFake(pio_addr=0x100e1000)
sp810_fake = AmbaFake(pio_addr=0x10001000, ignore_access=True)
watchdog_fake = AmbaFake(pio_addr=0x10010000)
gpio0_fake = AmbaFake(pio_addr=0x10013000)
gpio1_fake = AmbaFake(pio_addr=0x10014000)
gpio2_fake = AmbaFake(pio_addr=0x10015000)
ssp_fake = AmbaFake(pio_addr=0x1000d000)
sci_fake = AmbaFake(pio_addr=0x1000e000)
aaci_fake = AmbaFake(pio_addr=0x10004000)
mmc_fake = AmbaFake(pio_addr=0x10005000)
rtc_fake = AmbaFake(pio_addr=0x10017000, amba_id=0x41031)
energy_ctrl = EnergyCtrl(pio_addr=0x1000f000)
# Attach I/O devices that are on chip and also set the appropriate
# ranges for the bridge
def attachOnChipIO(self, bus, bridge=None, dma_ports =[]):
self.gic.pio = bus.master
self.l2x0_fake.pio = bus.master
if bridge:
# Bridge ranges based on excluding what is part of on-chip I/O
# (gic, l2x0)
bridge.ranges = [AddrRange(self.realview_io.pio_addr,
self.gic.cpu_addr - 1),
AddrRange(self.flash_fake.pio_addr, Addr.max)]
# Set the clock domain for IO objects that are considered
# to be "close" to the cores.
def onChipIOClkDomain(self, clkdomain):
self.gic.clk_domain = clkdomain
self.l2x0_fake.clk_domain = clkdomain
# Attach I/O devices to specified bus object. Can't do this
# earlier, since the bus object itself is typically defined at the
# System level.
def attachIO(self, bus, dma_ports = []):
self.uart.pio = bus.master
self.realview_io.pio = bus.master
self.pci_host.pio = bus.master
self.timer0.pio = bus.master
self.timer1.pio = bus.master
self.clcd.pio = bus.master
if dma_ports.count(self.clcd.dma) == 0:
self.clcd.dma = bus.slave
self.kmi0.pio = bus.master
self.kmi1.pio = bus.master
self.dmac_fake.pio = bus.master
self.uart1_fake.pio = bus.master
self.uart2_fake.pio = bus.master
self.uart3_fake.pio = bus.master
self.smc_fake.pio = bus.master
self.sp810_fake.pio = bus.master
self.watchdog_fake.pio = bus.master
self.gpio0_fake.pio = bus.master
self.gpio1_fake.pio = bus.master
self.gpio2_fake.pio = bus.master
self.ssp_fake.pio = bus.master
self.sci_fake.pio = bus.master
self.aaci_fake.pio = bus.master
self.mmc_fake.pio = bus.master
self.rtc_fake.pio = bus.master
self.flash_fake.pio = bus.master
self.smcreg_fake.pio = bus.master
self.energy_ctrl.pio = bus.master
# Set the clock domain for IO objects that are considered
# to be "far" away from the cores.
def offChipIOClkDomain(self, clkdomain):
self.uart.clk_domain = clkdomain
self.realview_io.clk_domain = clkdomain
self.timer0.clk_domain = clkdomain
self.timer1.clk_domain = clkdomain
self.clcd.clk_domain = clkdomain
self.kmi0.clk_domain = clkdomain
self.kmi1.clk_domain = clkdomain
self.dmac_fake.clk_domain = clkdomain
self.uart1_fake.clk_domain = clkdomain
self.uart2_fake.clk_domain = clkdomain
self.uart3_fake.clk_domain = clkdomain
self.smc_fake.clk_domain = clkdomain
self.sp810_fake.clk_domain = clkdomain
self.watchdog_fake.clk_domain = clkdomain
self.gpio0_fake.clk_domain = clkdomain
self.gpio1_fake.clk_domain = clkdomain
self.gpio2_fake.clk_domain = clkdomain
self.ssp_fake.clk_domain = clkdomain
self.sci_fake.clk_domain = clkdomain
self.aaci_fake.clk_domain = clkdomain
self.mmc_fake.clk_domain = clkdomain
self.rtc.clk_domain = clkdomain
self.flash_fake.clk_domain = clkdomain
self.smcreg_fake.clk_domain = clkdomain
self.energy_ctrl.clk_domain = clkdomain
class VExpress_EMM(RealView):
_mem_regions = [(Addr('2GB'), Addr('2GB'))]
uart = Pl011(pio_addr=0x1c090000, int_num=37)
realview_io = RealViewCtrl(
proc_id0=0x14000000, proc_id1=0x14000000,
idreg=0x02250000, pio_addr=0x1C010000)
mcc = VExpressMCC()
dcc = CoreTile2A15DCC()
gic = Pl390(dist_addr=0x2C001000, cpu_addr=0x2C002000)
pci_host = GenericPciHost(
conf_base=0x30000000, conf_size='256MB', conf_device_bits=16,
pci_pio_base=0)
local_cpu_timer = CpuLocalTimer(int_num_timer=29, int_num_watchdog=30, pio_addr=0x2C080000)
generic_timer = GenericTimer(int_phys=29, int_virt=27)
timer0 = Sp804(int_num0=34, int_num1=34, pio_addr=0x1C110000, clock0='1MHz', clock1='1MHz')
timer1 = Sp804(int_num0=35, int_num1=35, pio_addr=0x1C120000, clock0='1MHz', clock1='1MHz')
clcd = Pl111(pio_addr=0x1c1f0000, int_num=46)
hdlcd = HDLcd(pxl_clk=dcc.osc_pxl,
pio_addr=0x2b000000, int_num=117,
workaround_swap_rb=True)
kmi0 = Pl050(pio_addr=0x1c060000, int_num=44)
kmi1 = Pl050(pio_addr=0x1c070000, int_num=45, is_mouse=True)
vgic = VGic(vcpu_addr=0x2c006000, hv_addr=0x2c004000, ppint=25)
cf_ctrl = IdeController(disks=[], pci_func=0, pci_dev=0, pci_bus=2,
io_shift = 2, ctrl_offset = 2, Command = 0x1,
BAR0 = 0x1C1A0000, BAR0Size = '256B',
BAR1 = 0x1C1A0100, BAR1Size = '4096B',
BAR0LegacyIO = True, BAR1LegacyIO = True)
vram = SimpleMemory(range = AddrRange(0x18000000, size='32MB'),
conf_table_reported = False)
rtc = PL031(pio_addr=0x1C170000, int_num=36)
l2x0_fake = IsaFake(pio_addr=0x2C100000, pio_size=0xfff)
uart1_fake = AmbaFake(pio_addr=0x1C0A0000)
uart2_fake = AmbaFake(pio_addr=0x1C0B0000)
uart3_fake = AmbaFake(pio_addr=0x1C0C0000)
sp810_fake = AmbaFake(pio_addr=0x1C020000, ignore_access=True)
watchdog_fake = AmbaFake(pio_addr=0x1C0F0000)
aaci_fake = AmbaFake(pio_addr=0x1C040000)
lan_fake = IsaFake(pio_addr=0x1A000000, pio_size=0xffff)
usb_fake = IsaFake(pio_addr=0x1B000000, pio_size=0x1ffff)
mmc_fake = AmbaFake(pio_addr=0x1c050000)
energy_ctrl = EnergyCtrl(pio_addr=0x1c080000)
# Attach any PCI devices that are supported
def attachPciDevices(self):
self.ethernet = IGbE_e1000(pci_bus=0, pci_dev=0, pci_func=0,
InterruptLine=1, InterruptPin=1)
self.ide = IdeController(disks = [], pci_bus=0, pci_dev=1, pci_func=0,
InterruptLine=2, InterruptPin=2)
def enableMSIX(self):
self.gic = Pl390(dist_addr=0x2C001000, cpu_addr=0x2C002000, it_lines=512)
self.gicv2m = Gicv2m()
self.gicv2m.frames = [Gicv2mFrame(spi_base=256, spi_len=64, addr=0x2C1C0000)]
def setupBootLoader(self, mem_bus, cur_sys, loc):
self.nvmem = SimpleMemory(range = AddrRange('64MB'),
conf_table_reported = False)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = loc('boot_emm.arm')
cur_sys.atags_addr = 0x8000000
cur_sys.load_addr_mask = 0xfffffff
cur_sys.load_offset = 0x80000000
# Attach I/O devices that are on chip and also set the appropriate
# ranges for the bridge
def attachOnChipIO(self, bus, bridge=None, dma_ports =[]):
self.gic.pio = bus.master
self.vgic.pio = bus.master
self.local_cpu_timer.pio = bus.master
if hasattr(self, "gicv2m"):
self.gicv2m.pio = bus.master
if dma_ports.count(self.hdlcd.dma) == 0:
self.hdlcd.dma = bus.slave
if bridge:
# Bridge ranges based on excluding what is part of on-chip I/O
# (gic, a9scu)
bridge.ranges = [AddrRange(0x2F000000, size='16MB'),
AddrRange(0x2B000000, size='4MB'),
AddrRange(0x30000000, size='256MB'),
AddrRange(0x40000000, size='512MB'),
AddrRange(0x18000000, size='64MB'),
AddrRange(0x1C000000, size='64MB')]
# Set the clock domain for IO objects that are considered
# to be "close" to the cores.
def onChipIOClkDomain(self, clkdomain):
self.gic.clk_domain = clkdomain
if hasattr(self, "gicv2m"):
self.gicv2m.clk_domain = clkdomain
self.hdlcd.clk_domain = clkdomain
self.vgic.clk_domain = clkdomain
# Attach I/O devices to specified bus object. Done here
# as the specified bus to connect to may not always be fixed.
def attachIO(self, bus, dma_ports =[]):
self.uart.pio = bus.master
self.realview_io.pio = bus.master
self.pci_host.pio = bus.master
self.timer0.pio = bus.master
self.timer1.pio = bus.master
self.clcd.pio = bus.master
if dma_ports.count(self.clcd.dma) == 0:
self.clcd.dma = bus.slave
self.hdlcd.pio = bus.master
self.kmi0.pio = bus.master
self.kmi1.pio = bus.master
self.cf_ctrl.pio = bus.master
if dma_ports.count(self.cf_ctrl.dma) == 0:
self.cf_ctrl.dma = bus.slave
self.rtc.pio = bus.master
self.vram.port = bus.master
self.l2x0_fake.pio = bus.master
self.uart1_fake.pio = bus.master
self.uart2_fake.pio = bus.master
self.uart3_fake.pio = bus.master
self.sp810_fake.pio = bus.master
self.watchdog_fake.pio = bus.master
self.aaci_fake.pio = bus.master
self.lan_fake.pio = bus.master
self.usb_fake.pio = bus.master
self.mmc_fake.pio = bus.master
self.energy_ctrl.pio = bus.master
# Try to attach the I/O if it exists
try:
self.ide.pio = bus.master
if dma_ports.count(self.ide.dma) == 0:
self.ide.dma = bus.slave
self.ethernet.pio = bus.master
if dma_ports.count(self.ethernet.dma) == 0:
self.ethernet.dma = bus.slave
except:
pass
# Set the clock domain for IO objects that are considered
# to be "far" away from the cores.
def offChipIOClkDomain(self, clkdomain):
self.uart.clk_domain = clkdomain
self.realview_io.clk_domain = clkdomain
self.timer0.clk_domain = clkdomain
self.timer1.clk_domain = clkdomain
self.clcd.clk_domain = clkdomain
self.kmi0.clk_domain = clkdomain
self.kmi1.clk_domain = clkdomain
self.cf_ctrl.clk_domain = clkdomain
self.rtc.clk_domain = clkdomain
self.vram.clk_domain = clkdomain
self.l2x0_fake.clk_domain = clkdomain
self.uart1_fake.clk_domain = clkdomain
self.uart2_fake.clk_domain = clkdomain
self.uart3_fake.clk_domain = clkdomain
self.sp810_fake.clk_domain = clkdomain
self.watchdog_fake.clk_domain = clkdomain
self.aaci_fake.clk_domain = clkdomain
self.lan_fake.clk_domain = clkdomain
self.usb_fake.clk_domain = clkdomain
self.mmc_fake.clk_domain = clkdomain
self.energy_ctrl.clk_domain = clkdomain
class VExpress_EMM64(VExpress_EMM):
# Three memory regions are specified totalling 512GB
_mem_regions = [(Addr('2GB'), Addr('2GB')), (Addr('34GB'), Addr('30GB')),
(Addr('512GB'), Addr('480GB'))]
pci_host = GenericPciHost(
conf_base=0x30000000, conf_size='256MB', conf_device_bits=12,
pci_pio_base=0x2f000000)
def setupBootLoader(self, mem_bus, cur_sys, loc):
self.nvmem = SimpleMemory(range=AddrRange(0, size='64MB'),
conf_table_reported=False)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = loc('boot_emm.arm64')
cur_sys.atags_addr = 0x8000000
cur_sys.load_addr_mask = 0xfffffff
cur_sys.load_offset = 0x80000000
class VExpress_GEM5_V1(RealView):
"""
The VExpress gem5 memory map is loosely based on a modified
Versatile Express RS1 memory map.
The gem5 platform has been designed to implement a subset of the
original Versatile Express RS1 memory map. Off-chip peripherals should,
when possible, adhere to the Versatile Express memory map. Non-PCI
off-chip devices that are gem5-specific should live in the CS5 memory
space to avoid conflicts with existing devices that we might want to
model in the future. Such devices should normally have interrupts in
the gem5-specific SPI range.
On-chip peripherals are loosely modeled after the ARM CoreTile Express
A15x2 A7x3 memory and interrupt map. In particular, the GIC and
Generic Timer have the same interrupt lines and base addresses. Other
on-chip devices are gem5 specific.
Unlike the original Versatile Express RS2 extended platform, gem5 implements a
large contigious DRAM space, without aliases or holes, starting at the
2GiB boundary. This means that PCI memory is limited to 1GiB.
Memory map:
0x00000000-0x03ffffff: Boot memory (CS0)
0x04000000-0x07ffffff: Reserved
0x08000000-0x0bffffff: Reserved (CS0 alias)
0x0c000000-0x0fffffff: Reserved (Off-chip, CS4)
0x10000000-0x13ffffff: gem5-specific peripherals (Off-chip, CS5)
0x10000000-0x1000ffff: gem5 energy controller
0x10010000-0x1001ffff: gem5 pseudo-ops
0x14000000-0x17ffffff: Reserved (Off-chip, PSRAM, CS1)
0x18000000-0x1bffffff: Reserved (Off-chip, Peripherals, CS2)
0x1c000000-0x1fffffff: Peripheral block 1 (Off-chip, CS3):
0x1c010000-0x1c01ffff: realview_io (VE system control regs.)
0x1c060000-0x1c06ffff: KMI0 (keyboard)
0x1c070000-0x1c07ffff: KMI1 (mouse)
0x1c090000-0x1c09ffff: UART0
0x1c0a0000-0x1c0affff: UART1 (reserved)
0x1c0b0000-0x1c0bffff: UART2 (reserved)
0x1c0c0000-0x1c0cffff: UART3 (reserved)
0x1c170000-0x1c17ffff: RTC
0x20000000-0x3fffffff: On-chip peripherals:
0x2b000000-0x2b00ffff: HDLCD
0x2c001000-0x2c001fff: GIC (distributor)
0x2c002000-0x2c0020ff: GIC (CPU interface)
0x2c004000-0x2c005fff: vGIC (HV)
0x2c006000-0x2c007fff: vGIC (VCPU)
0x2c1c0000-0x2c1cffff: GICv2m MSI frame 0
0x2d000000-0x2d00ffff: GPU (reserved)
0x2f000000-0x2fffffff: PCI IO space
0x30000000-0x3fffffff: PCI config space
0x40000000-0x7fffffff: Ext. AXI: Used as PCI memory
0x80000000-X: DRAM
Interrupts:
0- 15: Software generated interrupts (SGIs)
16- 31: On-chip private peripherals (PPIs)
25 : vgic
26 : generic_timer (hyp)
27 : generic_timer (virt)
28 : Reserved (Legacy FIQ)
29 : generic_timer (phys, sec)
30 : generic_timer (phys, non-sec)
31 : Reserved (Legacy IRQ)
32- 95: Mother board peripherals (SPIs)
32 : Reserved (SP805)
33 : Reserved (IOFPGA SW int)
34-35: Reserved (SP804)
36 : RTC
37-40: uart0-uart3
41-42: Reserved (PL180)
43 : Reserved (AACI)
44-45: kmi0-kmi1
46 : Reserved (CLCD)
47 : Reserved (Ethernet)
48 : Reserved (USB)
95-255: On-chip interrupt sources (we use these for
gem5-specific devices, SPIs)
95 : HDLCD
96- 98: GPU (reserved)
100-103: PCI
256-319: MSI frame 0 (gem5-specific, SPIs)
320-511: Unused
"""
# Everything above 2GiB is memory
_mem_regions = [(Addr('2GB'), Addr('510GB'))]
_off_chip_ranges = [
# CS1-CS5
AddrRange(0x0c000000, 0x1fffffff),
# External AXI interface (PCI)
AddrRange(0x2f000000, 0x7fffffff),
]
# Platform control device (off-chip)
realview_io = RealViewCtrl(proc_id0=0x14000000, proc_id1=0x14000000,
idreg=0x02250000, pio_addr=0x1c010000)
mcc = VExpressMCC()
dcc = CoreTile2A15DCC()
### On-chip devices ###
gic = kvm_gicv2_class(dist_addr=0x2c001000, cpu_addr=0x2c002000,
it_lines=512)
vgic = VGic(vcpu_addr=0x2c006000, hv_addr=0x2c004000, ppint=25)
gicv2m = Gicv2m()
gicv2m.frames = [
Gicv2mFrame(spi_base=256, spi_len=64, addr=0x2c1c0000),
]
generic_timer = GenericTimer(int_phys=29, int_virt=27)
hdlcd = HDLcd(pxl_clk=dcc.osc_pxl,
pio_addr=0x2b000000, int_num=95)
def _on_chip_devices(self):
return [
self.gic, self.vgic, self.gicv2m,
self.hdlcd,
self.generic_timer,
]
### Off-chip devices ###
uart0 = Pl011(pio_addr=0x1c090000, int_num=37)
kmi0 = Pl050(pio_addr=0x1c060000, int_num=44)
kmi1 = Pl050(pio_addr=0x1c070000, int_num=45, is_mouse=True)
rtc = PL031(pio_addr=0x1c170000, int_num=36)
### gem5-specific off-chip devices ###
pci_host = GenericArmPciHost(
conf_base=0x30000000, conf_size='256MB', conf_device_bits=12,
pci_pio_base=0x2f000000,
int_policy="ARM_PCI_INT_DEV", int_base=100, int_count=4)
energy_ctrl = EnergyCtrl(pio_addr=0x10000000)
def _off_chip_devices(self):
return [
self.realview_io,
self.uart0,
self.kmi0, self.kmi1,
self.rtc,
self.pci_host,
self.energy_ctrl,
]
def attachPciDevice(self, device, *args, **kwargs):
device.host = self.pci_host
self._attach_device(device, *args, **kwargs)
def setupBootLoader(self, mem_bus, cur_sys, loc):
self.nvmem = SimpleMemory(range=AddrRange(0, size='64MB'),
conf_table_reported=False)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = [ loc('boot_emm.arm64'), loc('boot_emm.arm') ]
cur_sys.atags_addr = 0x8000000
cur_sys.load_addr_mask = 0xfffffff
cur_sys.load_offset = 0x80000000
# Setup m5ops. It's technically not a part of the boot
# loader, but this is the only place we can configure the
# system.
cur_sys.m5ops_base = 0x10010000
| 41.76078 | 103 | 0.654309 |
from m5.params import *
from m5.proxy import *
from ClockDomain import ClockDomain
from VoltageDomain import VoltageDomain
from Device import BasicPioDevice, PioDevice, IsaFake, BadAddr, DmaDevice
from PciHost import *
from Ethernet import NSGigE, IGbE_igb, IGbE_e1000
from Ide import *
from Platform import Platform
from Terminal import Terminal
from Uart import Uart
from SimpleMemory import SimpleMemory
from Gic import *
from EnergyCtrl import EnergyCtrl
from ClockDomain import SrcClockDomain
from SubSystem import SubSystem
try:
from KvmGic import MuxingKvmGic
kvm_gicv2_class = MuxingKvmGic
except ImportError:
# software-only GIC.
kvm_gicv2_class = Pl390
pass
class AmbaPioDevice(BasicPioDevice):
type = 'AmbaPioDevice'
abstract = True
cxx_header = "dev/arm/amba_device.hh"
amba_id = Param.UInt32("ID of AMBA device for kernel detection")
class AmbaIntDevice(AmbaPioDevice):
type = 'AmbaIntDevice'
abstract = True
cxx_header = "dev/arm/amba_device.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num = Param.UInt32("Interrupt number that connects to GIC")
int_delay = Param.Latency("100ns",
"Time between action and interrupt generation by device")
class AmbaDmaDevice(DmaDevice):
type = 'AmbaDmaDevice'
abstract = True
cxx_header = "dev/arm/amba_device.hh"
pio_addr = Param.Addr("Address for AMBA slave interface")
pio_latency = Param.Latency("10ns", "Time between action and write/read result by AMBA DMA Device")
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num = Param.UInt32("Interrupt number that connects to GIC")
amba_id = Param.UInt32("ID of AMBA device for kernel detection")
class A9SCU(BasicPioDevice):
type = 'A9SCU'
cxx_header = "dev/arm/a9scu.hh"
class ArmPciIntRouting(Enum): vals = [
'ARM_PCI_INT_STATIC',
'ARM_PCI_INT_DEV',
'ARM_PCI_INT_PIN',
]
class GenericArmPciHost(GenericPciHost):
type = 'GenericArmPciHost'
cxx_header = "dev/arm/pci_host.hh"
int_policy = Param.ArmPciIntRouting("PCI interrupt routing policy")
int_base = Param.Unsigned("PCI interrupt base")
int_count = Param.Unsigned("Maximum number of interrupts used by this host")
class RealViewCtrl(BasicPioDevice):
type = 'RealViewCtrl'
cxx_header = "dev/arm/rv_ctrl.hh"
proc_id0 = Param.UInt32(0x0C000000, "Processor ID, SYS_PROCID")
proc_id1 = Param.UInt32(0x0C000222, "Processor ID, SYS_PROCID1")
idreg = Param.UInt32(0x00000000, "ID Register, SYS_ID")
class RealViewOsc(ClockDomain):
type = 'RealViewOsc'
cxx_header = "dev/arm/rv_ctrl.hh"
parent = Param.RealViewCtrl(Parent.any, "RealView controller")
# TODO: We currently don't have the notion of a clock source,
voltage_domain = Param.VoltageDomain(Parent.voltage_domain,
"Voltage domain")
site = Param.UInt8("Board Site")
position = Param.UInt8("Position in device stack")
dcc = Param.UInt8("Daughterboard Configuration Controller")
device = Param.UInt8("Device ID")
freq = Param.Clock("Default frequency")
class RealViewTemperatureSensor(SimObject):
type = 'RealViewTemperatureSensor'
cxx_header = "dev/arm/rv_ctrl.hh"
parent = Param.RealViewCtrl(Parent.any, "RealView controller")
system = Param.System(Parent.any, "system")
site = Param.UInt8("Board Site")
position = Param.UInt8("Position in device stack")
dcc = Param.UInt8("Daughterboard Configuration Controller")
device = Param.UInt8("Device ID")
class VExpressMCC(SubSystem):
class Osc(RealViewOsc):
site, position, dcc = (0, 0, 0)
class Temperature(RealViewTemperatureSensor):
site, position, dcc = (0, 0, 0)
osc_mcc = Osc(device=0, freq="50MHz")
osc_clcd = Osc(device=1, freq="23.75MHz")
osc_peripheral = Osc(device=2, freq="24MHz")
osc_system_bus = Osc(device=4, freq="24MHz")
temp_crtl = Temperature(device=0)
class CoreTile2A15DCC(SubSystem):
class Osc(RealViewOsc):
site, position, dcc = (1, 0, 0)
osc_cpu = Osc(device=0, freq="60MHz")
osc_hsbm = Osc(device=4, freq="40MHz")
osc_pxl = Osc(device=5, freq="23.75MHz")
osc_smb = Osc(device=6, freq="50MHz")
osc_sys = Osc(device=7, freq="60MHz")
osc_ddr = Osc(device=8, freq="40MHz")
class VGic(PioDevice):
type = 'VGic'
cxx_header = "dev/arm/vgic.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
platform = Param.Platform(Parent.any, "Platform this device is part of.")
vcpu_addr = Param.Addr(0, "Address for vcpu interfaces")
hv_addr = Param.Addr(0, "Address for hv control")
pio_delay = Param.Latency('10ns', "Delay for PIO r/w")
ppint = Param.UInt32("HV maintenance interrupt number")
class AmbaFake(AmbaPioDevice):
type = 'AmbaFake'
cxx_header = "dev/arm/amba_fake.hh"
ignore_access = Param.Bool(False, "Ignore reads/writes to this device, (e.g. IsaFake + AMBA)")
amba_id = 0;
class Pl011(Uart):
type = 'Pl011'
cxx_header = "dev/arm/pl011.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num = Param.UInt32("Interrupt number that connects to GIC")
end_on_eot = Param.Bool(False, "End the simulation when a EOT is received on the UART")
int_delay = Param.Latency("100ns", "Time between action and interrupt generation by UART")
class Sp804(AmbaPioDevice):
type = 'Sp804'
cxx_header = "dev/arm/timer_sp804.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num0 = Param.UInt32("Interrupt number that connects to GIC")
clock0 = Param.Clock('1MHz', "Clock speed of the input")
int_num1 = Param.UInt32("Interrupt number that connects to GIC")
clock1 = Param.Clock('1MHz', "Clock speed of the input")
amba_id = 0x00141804
class CpuLocalTimer(BasicPioDevice):
type = 'CpuLocalTimer'
cxx_header = "dev/arm/timer_cpulocal.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num_timer = Param.UInt32("Interrrupt number used per-cpu to GIC")
int_num_watchdog = Param.UInt32("Interrupt number for per-cpu watchdog to GIC")
class GenericTimer(SimObject):
type = 'GenericTimer'
cxx_header = "dev/arm/generic_timer.hh"
system = Param.ArmSystem(Parent.any, "system")
gic = Param.BaseGic(Parent.any, "GIC to use for interrupting")
int_phys = Param.UInt32("Physical timer interrupt number")
int_virt = Param.UInt32("Virtual timer interrupt number")
class GenericTimerMem(PioDevice):
type = 'GenericTimerMem'
cxx_header = "dev/arm/generic_timer.hh"
gic = Param.BaseGic(Parent.any, "GIC to use for interrupting")
base = Param.Addr(0, "Base address")
int_phys = Param.UInt32("Interrupt number")
int_virt = Param.UInt32("Interrupt number")
class PL031(AmbaIntDevice):
type = 'PL031'
cxx_header = "dev/arm/rtc_pl031.hh"
time = Param.Time('01/01/2009', "System time to use ('Now' for actual time)")
amba_id = 0x00341031
class Pl050(AmbaIntDevice):
type = 'Pl050'
cxx_header = "dev/arm/kmi.hh"
vnc = Param.VncInput(Parent.any, "Vnc server for remote frame buffer display")
is_mouse = Param.Bool(False, "Is this interface a mouse, if not a keyboard")
int_delay = '1us'
amba_id = 0x00141050
class Pl111(AmbaDmaDevice):
type = 'Pl111'
cxx_header = "dev/arm/pl111.hh"
pixel_clock = Param.Clock('24MHz', "Pixel clock")
vnc = Param.VncInput(Parent.any, "Vnc server for remote frame buffer display")
amba_id = 0x00141111
enable_capture = Param.Bool(True, "capture frame to system.framebuffer.bmp")
class HDLcd(AmbaDmaDevice):
type = 'HDLcd'
cxx_header = "dev/arm/hdlcd.hh"
vnc = Param.VncInput(Parent.any, "Vnc server for remote frame buffer "
"display")
amba_id = 0x00141000
workaround_swap_rb = Param.Bool(False, "Workaround incorrect color "
"selector order in some kernels")
workaround_dma_line_count = Param.Bool(True, "Workaround incorrect "
"DMA line count (off by 1)")
enable_capture = Param.Bool(True, "capture frame to system.framebuffer.bmp")
pixel_buffer_size = Param.MemorySize32("2kB", "Size of address range")
pxl_clk = Param.ClockDomain("Pixel clock source")
pixel_chunk = Param.Unsigned(32, "Number of pixels to handle in one batch")
virt_refresh_rate = Param.Frequency("20Hz", "Frame refresh rate "
"in KVM mode")
class RealView(Platform):
type = 'RealView'
cxx_header = "dev/arm/realview.hh"
system = Param.System(Parent.any, "system")
_mem_regions = [(Addr(0), Addr('256MB'))]
def _on_chip_devices(self):
return []
def _off_chip_devices(self):
return []
_off_chip_ranges = []
def _attach_device(self, device, bus, dma_ports=None):
if hasattr(device, "pio"):
device.pio = bus.master
if hasattr(device, "dma"):
if dma_ports is None:
device.dma = bus.slave
else:
dma_ports.append(device.dma)
def _attach_io(self, devices, *args, **kwargs):
for d in devices:
self._attach_device(d, *args, **kwargs)
def _attach_clk(self, devices, clkdomain):
for d in devices:
if hasattr(d, "clk_domain"):
d.clk_domain = clkdomain
def attachPciDevices(self):
pass
def enableMSIX(self):
pass
def onChipIOClkDomain(self, clkdomain):
self._attach_clk(self._on_chip_devices(), clkdomain)
def offChipIOClkDomain(self, clkdomain):
self._attach_clk(self._off_chip_devices(), clkdomain)
def attachOnChipIO(self, bus, bridge=None, **kwargs):
self._attach_io(self._on_chip_devices(), bus, **kwargs)
if bridge:
bridge.ranges = self._off_chip_ranges
def attachIO(self, *args, **kwargs):
self._attach_io(self._off_chip_devices(), *args, **kwargs)
def setupBootLoader(self, mem_bus, cur_sys, loc):
self.nvmem = SimpleMemory(range = AddrRange('2GB', size = '64MB'),
conf_table_reported = False)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = loc('boot.arm')
cur_sys.atags_addr = 0x100
cur_sys.load_addr_mask = 0xfffffff
cur_sys.load_offset = 0
class RealViewPBX(RealView):
uart = Pl011(pio_addr=0x10009000, int_num=44)
realview_io = RealViewCtrl(pio_addr=0x10000000)
mcc = VExpressMCC()
dcc = CoreTile2A15DCC()
gic = Pl390()
pci_host = GenericPciHost(
conf_base=0x30000000, conf_size='256MB', conf_device_bits=16,
pci_pio_base=0)
timer0 = Sp804(int_num0=36, int_num1=36, pio_addr=0x10011000)
timer1 = Sp804(int_num0=37, int_num1=37, pio_addr=0x10012000)
local_cpu_timer = CpuLocalTimer(int_num_timer=29, int_num_watchdog=30, pio_addr=0x1f000600)
clcd = Pl111(pio_addr=0x10020000, int_num=55)
kmi0 = Pl050(pio_addr=0x10006000, int_num=52)
kmi1 = Pl050(pio_addr=0x10007000, int_num=53, is_mouse=True)
a9scu = A9SCU(pio_addr=0x1f000000)
cf_ctrl = IdeController(disks=[], pci_func=0, pci_dev=7, pci_bus=2,
io_shift = 1, ctrl_offset = 2, Command = 0x1,
BAR0 = 0x18000000, BAR0Size = '16B',
BAR1 = 0x18000100, BAR1Size = '1B',
BAR0LegacyIO = True, BAR1LegacyIO = True)
l2x0_fake = IsaFake(pio_addr=0x1f002000, pio_size=0xfff)
flash_fake = IsaFake(pio_addr=0x40000000, pio_size=0x20000000,
fake_mem=True)
dmac_fake = AmbaFake(pio_addr=0x10030000)
uart1_fake = AmbaFake(pio_addr=0x1000a000)
uart2_fake = AmbaFake(pio_addr=0x1000b000)
uart3_fake = AmbaFake(pio_addr=0x1000c000)
smc_fake = AmbaFake(pio_addr=0x100e1000)
sp810_fake = AmbaFake(pio_addr=0x10001000, ignore_access=True)
watchdog_fake = AmbaFake(pio_addr=0x10010000)
gpio0_fake = AmbaFake(pio_addr=0x10013000)
gpio1_fake = AmbaFake(pio_addr=0x10014000)
gpio2_fake = AmbaFake(pio_addr=0x10015000)
ssp_fake = AmbaFake(pio_addr=0x1000d000)
sci_fake = AmbaFake(pio_addr=0x1000e000)
aaci_fake = AmbaFake(pio_addr=0x10004000)
mmc_fake = AmbaFake(pio_addr=0x10005000)
rtc = PL031(pio_addr=0x10017000, int_num=42)
energy_ctrl = EnergyCtrl(pio_addr=0x1000f000)
# Attach I/O devices that are on chip and also set the appropriate
# ranges for the bridge
def attachOnChipIO(self, bus, bridge=None, dma_ports =[]):
self.gic.pio = bus.master
self.l2x0_fake.pio = bus.master
self.a9scu.pio = bus.master
self.local_cpu_timer.pio = bus.master
if bridge:
# Bridge ranges based on excluding what is part of on-chip I/O
# (gic, l2x0, a9scu, local_cpu_timer)
bridge.ranges = [AddrRange(self.realview_io.pio_addr,
self.a9scu.pio_addr - 1),
AddrRange(self.flash_fake.pio_addr,
self.flash_fake.pio_addr + \
self.flash_fake.pio_size - 1)]
# Set the clock domain for IO objects that are considered
# to be "close" to the cores.
def onChipIOClkDomain(self, clkdomain):
self.gic.clk_domain = clkdomain
self.l2x0_fake.clk_domain = clkdomain
self.a9scu.clkdomain = clkdomain
self.local_cpu_timer.clk_domain = clkdomain
# Attach I/O devices to specified bus object. Can't do this
def attachIO(self, bus, dma_ports = []):
self.uart.pio = bus.master
self.realview_io.pio = bus.master
self.pci_host.pio = bus.master
self.timer0.pio = bus.master
self.timer1.pio = bus.master
self.clcd.pio = bus.master
if dma_ports.count(self.clcd.dma) == 0:
self.clcd.dma = bus.slave
self.kmi0.pio = bus.master
self.kmi1.pio = bus.master
self.cf_ctrl.pio = bus.master
if dma_ports.count(self.cf_ctrl.dma) == 0:
self.cf_ctrl.dma = bus.slave
self.dmac_fake.pio = bus.master
self.uart1_fake.pio = bus.master
self.uart2_fake.pio = bus.master
self.uart3_fake.pio = bus.master
self.smc_fake.pio = bus.master
self.sp810_fake.pio = bus.master
self.watchdog_fake.pio = bus.master
self.gpio0_fake.pio = bus.master
self.gpio1_fake.pio = bus.master
self.gpio2_fake.pio = bus.master
self.ssp_fake.pio = bus.master
self.sci_fake.pio = bus.master
self.aaci_fake.pio = bus.master
self.mmc_fake.pio = bus.master
self.rtc.pio = bus.master
self.flash_fake.pio = bus.master
self.energy_ctrl.pio = bus.master
def offChipIOClkDomain(self, clkdomain):
self.uart.clk_domain = clkdomain
self.realview_io.clk_domain = clkdomain
self.timer0.clk_domain = clkdomain
self.timer1.clk_domain = clkdomain
self.clcd.clk_domain = clkdomain
self.kmi0.clk_domain = clkdomain
self.kmi1.clk_domain = clkdomain
self.cf_ctrl.clk_domain = clkdomain
self.dmac_fake.clk_domain = clkdomain
self.uart1_fake.clk_domain = clkdomain
self.uart2_fake.clk_domain = clkdomain
self.uart3_fake.clk_domain = clkdomain
self.smc_fake.clk_domain = clkdomain
self.sp810_fake.clk_domain = clkdomain
self.watchdog_fake.clk_domain = clkdomain
self.gpio0_fake.clk_domain = clkdomain
self.gpio1_fake.clk_domain = clkdomain
self.gpio2_fake.clk_domain = clkdomain
self.ssp_fake.clk_domain = clkdomain
self.sci_fake.clk_domain = clkdomain
self.aaci_fake.clk_domain = clkdomain
self.mmc_fake.clk_domain = clkdomain
self.rtc.clk_domain = clkdomain
self.flash_fake.clk_domain = clkdomain
self.energy_ctrl.clk_domain = clkdomain
class RealViewEB(RealView):
uart = Pl011(pio_addr=0x10009000, int_num=44)
realview_io = RealViewCtrl(pio_addr=0x10000000, idreg=0x01400500)
mcc = VExpressMCC()
dcc = CoreTile2A15DCC()
gic = Pl390(dist_addr=0x10041000, cpu_addr=0x10040000)
timer0 = Sp804(int_num0=36, int_num1=36, pio_addr=0x10011000)
timer1 = Sp804(int_num0=37, int_num1=37, pio_addr=0x10012000)
clcd = Pl111(pio_addr=0x10020000, int_num=23)
kmi0 = Pl050(pio_addr=0x10006000, int_num=20)
kmi1 = Pl050(pio_addr=0x10007000, int_num=21, is_mouse=True)
l2x0_fake = IsaFake(pio_addr=0x1f002000, pio_size=0xfff, warn_access="1")
flash_fake = IsaFake(pio_addr=0x40000000, pio_size=0x20000000-1,
fake_mem=True)
dmac_fake = AmbaFake(pio_addr=0x10030000)
uart1_fake = AmbaFake(pio_addr=0x1000a000)
uart2_fake = AmbaFake(pio_addr=0x1000b000)
uart3_fake = AmbaFake(pio_addr=0x1000c000)
smcreg_fake = IsaFake(pio_addr=0x10080000, pio_size=0x10000-1)
smc_fake = AmbaFake(pio_addr=0x100e1000)
sp810_fake = AmbaFake(pio_addr=0x10001000, ignore_access=True)
watchdog_fake = AmbaFake(pio_addr=0x10010000)
gpio0_fake = AmbaFake(pio_addr=0x10013000)
gpio1_fake = AmbaFake(pio_addr=0x10014000)
gpio2_fake = AmbaFake(pio_addr=0x10015000)
ssp_fake = AmbaFake(pio_addr=0x1000d000)
sci_fake = AmbaFake(pio_addr=0x1000e000)
aaci_fake = AmbaFake(pio_addr=0x10004000)
mmc_fake = AmbaFake(pio_addr=0x10005000)
rtc_fake = AmbaFake(pio_addr=0x10017000, amba_id=0x41031)
energy_ctrl = EnergyCtrl(pio_addr=0x1000f000)
# Attach I/O devices that are on chip and also set the appropriate
# ranges for the bridge
def attachOnChipIO(self, bus, bridge=None, dma_ports =[]):
self.gic.pio = bus.master
self.l2x0_fake.pio = bus.master
if bridge:
# Bridge ranges based on excluding what is part of on-chip I/O
# (gic, l2x0)
bridge.ranges = [AddrRange(self.realview_io.pio_addr,
self.gic.cpu_addr - 1),
AddrRange(self.flash_fake.pio_addr, Addr.max)]
# Set the clock domain for IO objects that are considered
# to be "close" to the cores.
def onChipIOClkDomain(self, clkdomain):
self.gic.clk_domain = clkdomain
self.l2x0_fake.clk_domain = clkdomain
# Attach I/O devices to specified bus object. Can't do this
def attachIO(self, bus, dma_ports = []):
self.uart.pio = bus.master
self.realview_io.pio = bus.master
self.pci_host.pio = bus.master
self.timer0.pio = bus.master
self.timer1.pio = bus.master
self.clcd.pio = bus.master
if dma_ports.count(self.clcd.dma) == 0:
self.clcd.dma = bus.slave
self.kmi0.pio = bus.master
self.kmi1.pio = bus.master
self.dmac_fake.pio = bus.master
self.uart1_fake.pio = bus.master
self.uart2_fake.pio = bus.master
self.uart3_fake.pio = bus.master
self.smc_fake.pio = bus.master
self.sp810_fake.pio = bus.master
self.watchdog_fake.pio = bus.master
self.gpio0_fake.pio = bus.master
self.gpio1_fake.pio = bus.master
self.gpio2_fake.pio = bus.master
self.ssp_fake.pio = bus.master
self.sci_fake.pio = bus.master
self.aaci_fake.pio = bus.master
self.mmc_fake.pio = bus.master
self.rtc_fake.pio = bus.master
self.flash_fake.pio = bus.master
self.smcreg_fake.pio = bus.master
self.energy_ctrl.pio = bus.master
def offChipIOClkDomain(self, clkdomain):
self.uart.clk_domain = clkdomain
self.realview_io.clk_domain = clkdomain
self.timer0.clk_domain = clkdomain
self.timer1.clk_domain = clkdomain
self.clcd.clk_domain = clkdomain
self.kmi0.clk_domain = clkdomain
self.kmi1.clk_domain = clkdomain
self.dmac_fake.clk_domain = clkdomain
self.uart1_fake.clk_domain = clkdomain
self.uart2_fake.clk_domain = clkdomain
self.uart3_fake.clk_domain = clkdomain
self.smc_fake.clk_domain = clkdomain
self.sp810_fake.clk_domain = clkdomain
self.watchdog_fake.clk_domain = clkdomain
self.gpio0_fake.clk_domain = clkdomain
self.gpio1_fake.clk_domain = clkdomain
self.gpio2_fake.clk_domain = clkdomain
self.ssp_fake.clk_domain = clkdomain
self.sci_fake.clk_domain = clkdomain
self.aaci_fake.clk_domain = clkdomain
self.mmc_fake.clk_domain = clkdomain
self.rtc.clk_domain = clkdomain
self.flash_fake.clk_domain = clkdomain
self.smcreg_fake.clk_domain = clkdomain
self.energy_ctrl.clk_domain = clkdomain
class VExpress_EMM(RealView):
_mem_regions = [(Addr('2GB'), Addr('2GB'))]
uart = Pl011(pio_addr=0x1c090000, int_num=37)
realview_io = RealViewCtrl(
proc_id0=0x14000000, proc_id1=0x14000000,
idreg=0x02250000, pio_addr=0x1C010000)
mcc = VExpressMCC()
dcc = CoreTile2A15DCC()
gic = Pl390(dist_addr=0x2C001000, cpu_addr=0x2C002000)
pci_host = GenericPciHost(
conf_base=0x30000000, conf_size='256MB', conf_device_bits=16,
pci_pio_base=0)
local_cpu_timer = CpuLocalTimer(int_num_timer=29, int_num_watchdog=30, pio_addr=0x2C080000)
generic_timer = GenericTimer(int_phys=29, int_virt=27)
timer0 = Sp804(int_num0=34, int_num1=34, pio_addr=0x1C110000, clock0='1MHz', clock1='1MHz')
timer1 = Sp804(int_num0=35, int_num1=35, pio_addr=0x1C120000, clock0='1MHz', clock1='1MHz')
clcd = Pl111(pio_addr=0x1c1f0000, int_num=46)
hdlcd = HDLcd(pxl_clk=dcc.osc_pxl,
pio_addr=0x2b000000, int_num=117,
workaround_swap_rb=True)
kmi0 = Pl050(pio_addr=0x1c060000, int_num=44)
kmi1 = Pl050(pio_addr=0x1c070000, int_num=45, is_mouse=True)
vgic = VGic(vcpu_addr=0x2c006000, hv_addr=0x2c004000, ppint=25)
cf_ctrl = IdeController(disks=[], pci_func=0, pci_dev=0, pci_bus=2,
io_shift = 2, ctrl_offset = 2, Command = 0x1,
BAR0 = 0x1C1A0000, BAR0Size = '256B',
BAR1 = 0x1C1A0100, BAR1Size = '4096B',
BAR0LegacyIO = True, BAR1LegacyIO = True)
vram = SimpleMemory(range = AddrRange(0x18000000, size='32MB'),
conf_table_reported = False)
rtc = PL031(pio_addr=0x1C170000, int_num=36)
l2x0_fake = IsaFake(pio_addr=0x2C100000, pio_size=0xfff)
uart1_fake = AmbaFake(pio_addr=0x1C0A0000)
uart2_fake = AmbaFake(pio_addr=0x1C0B0000)
uart3_fake = AmbaFake(pio_addr=0x1C0C0000)
sp810_fake = AmbaFake(pio_addr=0x1C020000, ignore_access=True)
watchdog_fake = AmbaFake(pio_addr=0x1C0F0000)
aaci_fake = AmbaFake(pio_addr=0x1C040000)
lan_fake = IsaFake(pio_addr=0x1A000000, pio_size=0xffff)
usb_fake = IsaFake(pio_addr=0x1B000000, pio_size=0x1ffff)
mmc_fake = AmbaFake(pio_addr=0x1c050000)
energy_ctrl = EnergyCtrl(pio_addr=0x1c080000)
def attachPciDevices(self):
self.ethernet = IGbE_e1000(pci_bus=0, pci_dev=0, pci_func=0,
InterruptLine=1, InterruptPin=1)
self.ide = IdeController(disks = [], pci_bus=0, pci_dev=1, pci_func=0,
InterruptLine=2, InterruptPin=2)
def enableMSIX(self):
self.gic = Pl390(dist_addr=0x2C001000, cpu_addr=0x2C002000, it_lines=512)
self.gicv2m = Gicv2m()
self.gicv2m.frames = [Gicv2mFrame(spi_base=256, spi_len=64, addr=0x2C1C0000)]
def setupBootLoader(self, mem_bus, cur_sys, loc):
self.nvmem = SimpleMemory(range = AddrRange('64MB'),
conf_table_reported = False)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = loc('boot_emm.arm')
cur_sys.atags_addr = 0x8000000
cur_sys.load_addr_mask = 0xfffffff
cur_sys.load_offset = 0x80000000
def attachOnChipIO(self, bus, bridge=None, dma_ports =[]):
self.gic.pio = bus.master
self.vgic.pio = bus.master
self.local_cpu_timer.pio = bus.master
if hasattr(self, "gicv2m"):
self.gicv2m.pio = bus.master
if dma_ports.count(self.hdlcd.dma) == 0:
self.hdlcd.dma = bus.slave
if bridge:
bridge.ranges = [AddrRange(0x2F000000, size='16MB'),
AddrRange(0x2B000000, size='4MB'),
AddrRange(0x30000000, size='256MB'),
AddrRange(0x40000000, size='512MB'),
AddrRange(0x18000000, size='64MB'),
AddrRange(0x1C000000, size='64MB')]
def onChipIOClkDomain(self, clkdomain):
self.gic.clk_domain = clkdomain
if hasattr(self, "gicv2m"):
self.gicv2m.clk_domain = clkdomain
self.hdlcd.clk_domain = clkdomain
self.vgic.clk_domain = clkdomain
def attachIO(self, bus, dma_ports =[]):
self.uart.pio = bus.master
self.realview_io.pio = bus.master
self.pci_host.pio = bus.master
self.timer0.pio = bus.master
self.timer1.pio = bus.master
self.clcd.pio = bus.master
if dma_ports.count(self.clcd.dma) == 0:
self.clcd.dma = bus.slave
self.hdlcd.pio = bus.master
self.kmi0.pio = bus.master
self.kmi1.pio = bus.master
self.cf_ctrl.pio = bus.master
if dma_ports.count(self.cf_ctrl.dma) == 0:
self.cf_ctrl.dma = bus.slave
self.rtc.pio = bus.master
self.vram.port = bus.master
self.l2x0_fake.pio = bus.master
self.uart1_fake.pio = bus.master
self.uart2_fake.pio = bus.master
self.uart3_fake.pio = bus.master
self.sp810_fake.pio = bus.master
self.watchdog_fake.pio = bus.master
self.aaci_fake.pio = bus.master
self.lan_fake.pio = bus.master
self.usb_fake.pio = bus.master
self.mmc_fake.pio = bus.master
self.energy_ctrl.pio = bus.master
try:
self.ide.pio = bus.master
if dma_ports.count(self.ide.dma) == 0:
self.ide.dma = bus.slave
self.ethernet.pio = bus.master
if dma_ports.count(self.ethernet.dma) == 0:
self.ethernet.dma = bus.slave
except:
pass
def offChipIOClkDomain(self, clkdomain):
self.uart.clk_domain = clkdomain
self.realview_io.clk_domain = clkdomain
self.timer0.clk_domain = clkdomain
self.timer1.clk_domain = clkdomain
self.clcd.clk_domain = clkdomain
self.kmi0.clk_domain = clkdomain
self.kmi1.clk_domain = clkdomain
self.cf_ctrl.clk_domain = clkdomain
self.rtc.clk_domain = clkdomain
self.vram.clk_domain = clkdomain
self.l2x0_fake.clk_domain = clkdomain
self.uart1_fake.clk_domain = clkdomain
self.uart2_fake.clk_domain = clkdomain
self.uart3_fake.clk_domain = clkdomain
self.sp810_fake.clk_domain = clkdomain
self.watchdog_fake.clk_domain = clkdomain
self.aaci_fake.clk_domain = clkdomain
self.lan_fake.clk_domain = clkdomain
self.usb_fake.clk_domain = clkdomain
self.mmc_fake.clk_domain = clkdomain
self.energy_ctrl.clk_domain = clkdomain
class VExpress_EMM64(VExpress_EMM):
_mem_regions = [(Addr('2GB'), Addr('2GB')), (Addr('34GB'), Addr('30GB')),
(Addr('512GB'), Addr('480GB'))]
pci_host = GenericPciHost(
conf_base=0x30000000, conf_size='256MB', conf_device_bits=12,
pci_pio_base=0x2f000000)
def setupBootLoader(self, mem_bus, cur_sys, loc):
self.nvmem = SimpleMemory(range=AddrRange(0, size='64MB'),
conf_table_reported=False)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = loc('boot_emm.arm64')
cur_sys.atags_addr = 0x8000000
cur_sys.load_addr_mask = 0xfffffff
cur_sys.load_offset = 0x80000000
class VExpress_GEM5_V1(RealView):
_mem_regions = [(Addr('2GB'), Addr('510GB'))]
_off_chip_ranges = [
AddrRange(0x0c000000, 0x1fffffff),
AddrRange(0x2f000000, 0x7fffffff),
]
realview_io = RealViewCtrl(proc_id0=0x14000000, proc_id1=0x14000000,
idreg=0x02250000, pio_addr=0x1c010000)
mcc = VExpressMCC()
dcc = CoreTile2A15DCC()
cpu_addr=0x2c002000,
it_lines=512)
vgic = VGic(vcpu_addr=0x2c006000, hv_addr=0x2c004000, ppint=25)
gicv2m = Gicv2m()
gicv2m.frames = [
Gicv2mFrame(spi_base=256, spi_len=64, addr=0x2c1c0000),
]
generic_timer = GenericTimer(int_phys=29, int_virt=27)
hdlcd = HDLcd(pxl_clk=dcc.osc_pxl,
pio_addr=0x2b000000, int_num=95)
def _on_chip_devices(self):
return [
self.gic, self.vgic, self.gicv2m,
self.hdlcd,
self.generic_timer,
]
kmi0 = Pl050(pio_addr=0x1c060000, int_num=44)
kmi1 = Pl050(pio_addr=0x1c070000, int_num=45, is_mouse=True)
rtc = PL031(pio_addr=0x1c170000, int_num=36)
MB', conf_device_bits=12,
pci_pio_base=0x2f000000,
int_policy="ARM_PCI_INT_DEV", int_base=100, int_count=4)
energy_ctrl = EnergyCtrl(pio_addr=0x10000000)
def _off_chip_devices(self):
return [
self.realview_io,
self.uart0,
self.kmi0, self.kmi1,
self.rtc,
self.pci_host,
self.energy_ctrl,
]
def attachPciDevice(self, device, *args, **kwargs):
device.host = self.pci_host
self._attach_device(device, *args, **kwargs)
def setupBootLoader(self, mem_bus, cur_sys, loc):
self.nvmem = SimpleMemory(range=AddrRange(0, size='64MB'),
conf_table_reported=False)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = [ loc('boot_emm.arm64'), loc('boot_emm.arm') ]
cur_sys.atags_addr = 0x8000000
cur_sys.load_addr_mask = 0xfffffff
cur_sys.load_offset = 0x80000000
# loader, but this is the only place we can configure the
# system.
cur_sys.m5ops_base = 0x10010000
| true | true |
790155575af3419dde73d20431a702991834c85d | 419 | py | Python | Collect/SRTM/__init__.py | HesamZamanpour/wapor | 553981e78164e7fd326be5f65a46bdd1dc80288a | [
"Apache-2.0"
] | 1 | 2021-05-24T08:12:03.000Z | 2021-05-24T08:12:03.000Z | Collect/SRTM/__init__.py | HesamZamanpour/wapor | 553981e78164e7fd326be5f65a46bdd1dc80288a | [
"Apache-2.0"
] | 2 | 2020-06-25T08:27:55.000Z | 2020-08-28T07:38:17.000Z | Collect/SRTM/__init__.py | HesamZamanpour/wapor | 553981e78164e7fd326be5f65a46bdd1dc80288a | [
"Apache-2.0"
] | 4 | 2020-09-23T09:51:59.000Z | 2021-08-10T08:59:14.000Z | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
Module: Collect/SRTM
Description:
This module downloads DEM data from http://earlywarning.usgs.gov/hydrodata/.
Use the DEM functions to download and create DEM images in Gtiff format.
Examples:
from pyWAPOR.Collect import SRTM
SRTM.DEM(Dir='C:/TempDEM4/', latlim=[29, 32], lonlim=[-113, -109])
"""
from .DEM import main as DEM
__all__ = ['DEM']
__version__ = '0.1'
| 20.95 | 76 | 0.711217 |
from .DEM import main as DEM
__all__ = ['DEM']
__version__ = '0.1'
| true | true |
7901555a4670a44fad3948593f79d366e0f5e5e4 | 2,726 | py | Python | apps/trade/src/PositionsManager.py | kikei/btc-bot-ai | cb118fa1809ebef472a2025be697c9050e948009 | [
"Apache-2.0"
] | 1 | 2020-02-02T13:53:21.000Z | 2020-02-02T13:53:21.000Z | apps/trade/src/PositionsManager.py | kikei/btc-bot-ai | cb118fa1809ebef472a2025be697c9050e948009 | [
"Apache-2.0"
] | null | null | null | apps/trade/src/PositionsManager.py | kikei/btc-bot-ai | cb118fa1809ebef472a2025be697c9050e948009 | [
"Apache-2.0"
] | null | null | null | import logging
from Models import Values
from classes import PlayerActions, OnePosition
from ActionsDispatcher import Action
class PositionsManagerDBException(RuntimeError):
pass
class PositionsManager(object):
def __init__(self, models, accountId, logger=None):
self.models = models
self.accountId = accountId
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.restore()
def restore(self):
models = self.models
profitThres = models.Values.get(Values.PositionThresProfit,
accountId=self.accountId)
if profitThres is None:
raise PositionsManagerDBException('Settings "{k}" not initialized.'
.format(k=Values.PositionThresProfit))
self.profitThres = profitThres
lossCutThres = models.Values.get(Values.PositionThresLossCut,
accountId=self.accountId)
if lossCutThres is None:
raise PositionsManagerDBException('Settings "{k}" not initialized.'
.format(k=Values.PositionThresLossCut))
self.lossCutThres = lossCutThres
@staticmethod
def calcVariation(onetick, oneposition):
"""
(tick: OneTick, position: OnePosition) -> float
"""
created = oneposition.priceMean()
if oneposition.side == OnePosition.SideLong:
current = onetick.bid
else:
current = onetick.ask
return current / created
def makeDecision(self, positions):
tick = self.models.Ticks.one()
for p in positions:
onePosition = p.positions[0]
oneTick = tick[onePosition.exchanger]
var = PositionsManager.calcVariation(oneTick, onePosition)
if onePosition.side == OnePosition.SideLong:
if var >= self.profitThres:
return [(PlayerActions.CloseForProfit, p)] # Long, Profit
elif var <= self.lossCutThres:
return [(PlayerActions.CloseForLossCut, p)] # Long, LossCut
else:
if var <= 1.0 / self.profitThres:
return [(PlayerActions.CloseForProfit, p)] # Short, Profit
elif var >= 1.0 / self.lossCutThres:
return [(PlayerActions.CloseForLossCut, p)] # Short, LossCut
return []
def getOpenPositions(self):
return self.models.Positions.currentOpen(accountId=self.accountId)
def createAction(self):
positions = self.getOpenPositions()
positions = filter(lambda p:p.isOpen(), positions)
closes = self.makeDecision(positions)
self.logger.debug('Completed decision, #close={n}.'.format(n=len(closes)))
if len(closes) > 0:
actionType, position = closes[0]
return Action(actionType, position)
else:
return None
| 35.402597 | 79 | 0.657373 | import logging
from Models import Values
from classes import PlayerActions, OnePosition
from ActionsDispatcher import Action
class PositionsManagerDBException(RuntimeError):
pass
class PositionsManager(object):
def __init__(self, models, accountId, logger=None):
self.models = models
self.accountId = accountId
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.restore()
def restore(self):
models = self.models
profitThres = models.Values.get(Values.PositionThresProfit,
accountId=self.accountId)
if profitThres is None:
raise PositionsManagerDBException('Settings "{k}" not initialized.'
.format(k=Values.PositionThresProfit))
self.profitThres = profitThres
lossCutThres = models.Values.get(Values.PositionThresLossCut,
accountId=self.accountId)
if lossCutThres is None:
raise PositionsManagerDBException('Settings "{k}" not initialized.'
.format(k=Values.PositionThresLossCut))
self.lossCutThres = lossCutThres
@staticmethod
def calcVariation(onetick, oneposition):
created = oneposition.priceMean()
if oneposition.side == OnePosition.SideLong:
current = onetick.bid
else:
current = onetick.ask
return current / created
def makeDecision(self, positions):
tick = self.models.Ticks.one()
for p in positions:
onePosition = p.positions[0]
oneTick = tick[onePosition.exchanger]
var = PositionsManager.calcVariation(oneTick, onePosition)
if onePosition.side == OnePosition.SideLong:
if var >= self.profitThres:
return [(PlayerActions.CloseForProfit, p)]
elif var <= self.lossCutThres:
return [(PlayerActions.CloseForLossCut, p)]
else:
if var <= 1.0 / self.profitThres:
return [(PlayerActions.CloseForProfit, p)]
elif var >= 1.0 / self.lossCutThres:
return [(PlayerActions.CloseForLossCut, p)]
return []
def getOpenPositions(self):
return self.models.Positions.currentOpen(accountId=self.accountId)
def createAction(self):
positions = self.getOpenPositions()
positions = filter(lambda p:p.isOpen(), positions)
closes = self.makeDecision(positions)
self.logger.debug('Completed decision, #close={n}.'.format(n=len(closes)))
if len(closes) > 0:
actionType, position = closes[0]
return Action(actionType, position)
else:
return None
| true | true |
790156f4fc37b1b2ef4e1a861aed52450c4fada5 | 680 | py | Python | project_name/users/models.py | Bereware/bedjango-starter | 9ba6ca928bf50c7f24e5f176fba0ac633c61709f | [
"MIT"
] | 58 | 2017-03-07T12:09:50.000Z | 2021-02-21T10:10:09.000Z | project_name/users/models.py | Bereware/bedjango-starter | 9ba6ca928bf50c7f24e5f176fba0ac633c61709f | [
"MIT"
] | 17 | 2017-03-09T16:13:52.000Z | 2018-02-18T20:55:12.000Z | project_name/users/models.py | Bereware/bedjango-starter | 9ba6ca928bf50c7f24e5f176fba0ac633c61709f | [
"MIT"
] | 27 | 2017-03-08T18:59:58.000Z | 2019-07-14T18:54:44.000Z | import uuid
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_countries.fields import CountryField
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
slug = models.CharField(max_length=256, unique=True, null=True)
country = CountryField(_('Country'), blank=True, blank_label=_('Country'))
email = models.EmailField(_('email address'), blank=True, unique=True)
preferred_language = models.CharField(_('Preferred Language'), null=True, blank=True, max_length=100, choices=settings.LANGUAGES)
| 42.5 | 133 | 0.780882 | import uuid
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_countries.fields import CountryField
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
slug = models.CharField(max_length=256, unique=True, null=True)
country = CountryField(_('Country'), blank=True, blank_label=_('Country'))
email = models.EmailField(_('email address'), blank=True, unique=True)
preferred_language = models.CharField(_('Preferred Language'), null=True, blank=True, max_length=100, choices=settings.LANGUAGES)
| true | true |
790157497ca36b676dd8b3e07c4315ae3df4075a | 8,410 | py | Python | asv_bench/benchmarks/categoricals.py | developing-coder/pandas | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | [
"BSD-3-Clause"
] | 1 | 2019-05-04T03:42:25.000Z | 2019-05-04T03:42:25.000Z | asv_bench/benchmarks/categoricals.py | developing-coder/pandas | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | [
"BSD-3-Clause"
] | null | null | null | asv_bench/benchmarks/categoricals.py | developing-coder/pandas | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | [
"BSD-3-Clause"
] | 1 | 2020-01-02T14:28:17.000Z | 2020-01-02T14:28:17.000Z | import warnings
import numpy as np
import pandas as pd
import pandas.util.testing as tm
try:
from pandas.api.types import union_categoricals
except ImportError:
try:
from pandas.types.concat import union_categoricals
except ImportError:
pass
class Concat:
def setup(self):
N = 10**5
self.s = pd.Series(list('aabbcd') * N).astype('category')
self.a = pd.Categorical(list('aabbcd') * N)
self.b = pd.Categorical(list('bbcdjk') * N)
def time_concat(self):
pd.concat([self.s, self.s])
def time_union(self):
union_categoricals([self.a, self.b])
class Constructor:
def setup(self):
N = 10**5
self.categories = list('abcde')
self.cat_idx = pd.Index(self.categories)
self.values = np.tile(self.categories, N)
self.codes = np.tile(range(len(self.categories)), N)
self.datetimes = pd.Series(pd.date_range('1995-01-01 00:00:00',
periods=N / 10,
freq='s'))
self.datetimes_with_nat = self.datetimes.copy()
self.datetimes_with_nat.iloc[-1] = pd.NaT
self.values_some_nan = list(np.tile(self.categories + [np.nan], N))
self.values_all_nan = [np.nan] * len(self.values)
self.values_all_int8 = np.ones(N, 'int8')
self.categorical = pd.Categorical(self.values, self.categories)
self.series = pd.Series(self.categorical)
def time_regular(self):
pd.Categorical(self.values, self.categories)
def time_fastpath(self):
pd.Categorical(self.codes, self.cat_idx, fastpath=True)
def time_datetimes(self):
pd.Categorical(self.datetimes)
def time_datetimes_with_nat(self):
pd.Categorical(self.datetimes_with_nat)
def time_with_nan(self):
pd.Categorical(self.values_some_nan)
def time_all_nan(self):
pd.Categorical(self.values_all_nan)
def time_from_codes_all_int8(self):
pd.Categorical.from_codes(self.values_all_int8, self.categories)
def time_existing_categorical(self):
pd.Categorical(self.categorical)
def time_existing_series(self):
pd.Categorical(self.series)
class ValueCounts:
params = [True, False]
param_names = ['dropna']
def setup(self, dropna):
n = 5 * 10**5
arr = ['s{:04d}'.format(i) for i in np.random.randint(0, n // 10,
size=n)]
self.ts = pd.Series(arr).astype('category')
def time_value_counts(self, dropna):
self.ts.value_counts(dropna=dropna)
class Repr:
def setup(self):
self.sel = pd.Series(['s1234']).astype('category')
def time_rendering(self):
str(self.sel)
class SetCategories:
def setup(self):
n = 5 * 10**5
arr = ['s{:04d}'.format(i) for i in np.random.randint(0, n // 10,
size=n)]
self.ts = pd.Series(arr).astype('category')
def time_set_categories(self):
self.ts.cat.set_categories(self.ts.cat.categories[::2])
class RemoveCategories:
def setup(self):
n = 5 * 10**5
arr = ['s{:04d}'.format(i) for i in np.random.randint(0, n // 10,
size=n)]
self.ts = pd.Series(arr).astype('category')
def time_remove_categories(self):
self.ts.cat.remove_categories(self.ts.cat.categories[::2])
class Rank:
def setup(self):
N = 10**5
ncats = 100
self.s_str = pd.Series(tm.makeCategoricalIndex(N, ncats)).astype(str)
self.s_str_cat = self.s_str.astype('category')
with warnings.catch_warnings(record=True):
self.s_str_cat_ordered = self.s_str.astype('category',
ordered=True)
self.s_int = pd.Series(np.random.randint(0, ncats, size=N))
self.s_int_cat = self.s_int.astype('category')
with warnings.catch_warnings(record=True):
self.s_int_cat_ordered = self.s_int.astype('category',
ordered=True)
def time_rank_string(self):
self.s_str.rank()
def time_rank_string_cat(self):
self.s_str_cat.rank()
def time_rank_string_cat_ordered(self):
self.s_str_cat_ordered.rank()
def time_rank_int(self):
self.s_int.rank()
def time_rank_int_cat(self):
self.s_int_cat.rank()
def time_rank_int_cat_ordered(self):
self.s_int_cat_ordered.rank()
class Isin:
params = ['object', 'int64']
param_names = ['dtype']
def setup(self, dtype):
np.random.seed(1234)
n = 5 * 10**5
sample_size = 100
arr = [i for i in np.random.randint(0, n // 10, size=n)]
if dtype == 'object':
arr = ['s{:04d}'.format(i) for i in arr]
self.sample = np.random.choice(arr, sample_size)
self.series = pd.Series(arr).astype('category')
def time_isin_categorical(self, dtype):
self.series.isin(self.sample)
class IsMonotonic:
def setup(self):
N = 1000
self.c = pd.CategoricalIndex(list('a' * N + 'b' * N + 'c' * N))
self.s = pd.Series(self.c)
def time_categorical_index_is_monotonic_increasing(self):
self.c.is_monotonic_increasing
def time_categorical_index_is_monotonic_decreasing(self):
self.c.is_monotonic_decreasing
def time_categorical_series_is_monotonic_increasing(self):
self.s.is_monotonic_increasing
def time_categorical_series_is_monotonic_decreasing(self):
self.s.is_monotonic_decreasing
class Contains:
def setup(self):
N = 10**5
self.ci = tm.makeCategoricalIndex(N)
self.c = self.ci.values
self.key = self.ci.categories[0]
def time_categorical_index_contains(self):
self.key in self.ci
def time_categorical_contains(self):
self.key in self.c
class CategoricalSlicing:
params = ['monotonic_incr', 'monotonic_decr', 'non_monotonic']
param_names = ['index']
def setup(self, index):
N = 10**6
categories = ['a', 'b', 'c']
values = [0] * N + [1] * N + [2] * N
if index == 'monotonic_incr':
self.data = pd.Categorical.from_codes(values,
categories=categories)
elif index == 'monotonic_decr':
self.data = pd.Categorical.from_codes(list(reversed(values)),
categories=categories)
elif index == 'non_monotonic':
self.data = pd.Categorical.from_codes([0, 1, 2] * N,
categories=categories)
else:
raise ValueError('Invalid index param: {}'.format(index))
self.scalar = 10000
self.list = list(range(10000))
self.cat_scalar = 'b'
def time_getitem_scalar(self, index):
self.data[self.scalar]
def time_getitem_slice(self, index):
self.data[:self.scalar]
def time_getitem_list_like(self, index):
self.data[[self.scalar]]
def time_getitem_list(self, index):
self.data[self.list]
def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
class Indexing:
def setup(self):
N = 10**5
self.index = pd.CategoricalIndex(range(N), range(N))
self.series = pd.Series(range(N), index=self.index).sort_index()
self.category = self.index[500]
def time_get_loc(self):
self.index.get_loc(self.category)
def time_shape(self):
self.index.shape
def time_shallow_copy(self):
self.index._shallow_copy()
def time_align(self):
pd.DataFrame({'a': self.series, 'b': self.series[:500]})
def time_intersection(self):
self.index[:750].intersection(self.index[250:])
def time_unique(self):
self.index.unique()
def time_reindex(self):
self.index.reindex(self.index[:500])
def time_reindex_missing(self):
self.index.reindex(['a', 'b', 'c', 'd'])
def time_sort_values(self):
self.index.sort_values(ascending=False)
from .pandas_vb_common import setup # noqa: F401
| 28.316498 | 77 | 0.592271 | import warnings
import numpy as np
import pandas as pd
import pandas.util.testing as tm
try:
from pandas.api.types import union_categoricals
except ImportError:
try:
from pandas.types.concat import union_categoricals
except ImportError:
pass
class Concat:
def setup(self):
N = 10**5
self.s = pd.Series(list('aabbcd') * N).astype('category')
self.a = pd.Categorical(list('aabbcd') * N)
self.b = pd.Categorical(list('bbcdjk') * N)
def time_concat(self):
pd.concat([self.s, self.s])
def time_union(self):
union_categoricals([self.a, self.b])
class Constructor:
def setup(self):
N = 10**5
self.categories = list('abcde')
self.cat_idx = pd.Index(self.categories)
self.values = np.tile(self.categories, N)
self.codes = np.tile(range(len(self.categories)), N)
self.datetimes = pd.Series(pd.date_range('1995-01-01 00:00:00',
periods=N / 10,
freq='s'))
self.datetimes_with_nat = self.datetimes.copy()
self.datetimes_with_nat.iloc[-1] = pd.NaT
self.values_some_nan = list(np.tile(self.categories + [np.nan], N))
self.values_all_nan = [np.nan] * len(self.values)
self.values_all_int8 = np.ones(N, 'int8')
self.categorical = pd.Categorical(self.values, self.categories)
self.series = pd.Series(self.categorical)
def time_regular(self):
pd.Categorical(self.values, self.categories)
def time_fastpath(self):
pd.Categorical(self.codes, self.cat_idx, fastpath=True)
def time_datetimes(self):
pd.Categorical(self.datetimes)
def time_datetimes_with_nat(self):
pd.Categorical(self.datetimes_with_nat)
def time_with_nan(self):
pd.Categorical(self.values_some_nan)
def time_all_nan(self):
pd.Categorical(self.values_all_nan)
def time_from_codes_all_int8(self):
pd.Categorical.from_codes(self.values_all_int8, self.categories)
def time_existing_categorical(self):
pd.Categorical(self.categorical)
def time_existing_series(self):
pd.Categorical(self.series)
class ValueCounts:
params = [True, False]
param_names = ['dropna']
def setup(self, dropna):
n = 5 * 10**5
arr = ['s{:04d}'.format(i) for i in np.random.randint(0, n // 10,
size=n)]
self.ts = pd.Series(arr).astype('category')
def time_value_counts(self, dropna):
self.ts.value_counts(dropna=dropna)
class Repr:
def setup(self):
self.sel = pd.Series(['s1234']).astype('category')
def time_rendering(self):
str(self.sel)
class SetCategories:
def setup(self):
n = 5 * 10**5
arr = ['s{:04d}'.format(i) for i in np.random.randint(0, n // 10,
size=n)]
self.ts = pd.Series(arr).astype('category')
def time_set_categories(self):
self.ts.cat.set_categories(self.ts.cat.categories[::2])
class RemoveCategories:
def setup(self):
n = 5 * 10**5
arr = ['s{:04d}'.format(i) for i in np.random.randint(0, n // 10,
size=n)]
self.ts = pd.Series(arr).astype('category')
def time_remove_categories(self):
self.ts.cat.remove_categories(self.ts.cat.categories[::2])
class Rank:
def setup(self):
N = 10**5
ncats = 100
self.s_str = pd.Series(tm.makeCategoricalIndex(N, ncats)).astype(str)
self.s_str_cat = self.s_str.astype('category')
with warnings.catch_warnings(record=True):
self.s_str_cat_ordered = self.s_str.astype('category',
ordered=True)
self.s_int = pd.Series(np.random.randint(0, ncats, size=N))
self.s_int_cat = self.s_int.astype('category')
with warnings.catch_warnings(record=True):
self.s_int_cat_ordered = self.s_int.astype('category',
ordered=True)
def time_rank_string(self):
self.s_str.rank()
def time_rank_string_cat(self):
self.s_str_cat.rank()
def time_rank_string_cat_ordered(self):
self.s_str_cat_ordered.rank()
def time_rank_int(self):
self.s_int.rank()
def time_rank_int_cat(self):
self.s_int_cat.rank()
def time_rank_int_cat_ordered(self):
self.s_int_cat_ordered.rank()
class Isin:
params = ['object', 'int64']
param_names = ['dtype']
def setup(self, dtype):
np.random.seed(1234)
n = 5 * 10**5
sample_size = 100
arr = [i for i in np.random.randint(0, n // 10, size=n)]
if dtype == 'object':
arr = ['s{:04d}'.format(i) for i in arr]
self.sample = np.random.choice(arr, sample_size)
self.series = pd.Series(arr).astype('category')
def time_isin_categorical(self, dtype):
self.series.isin(self.sample)
class IsMonotonic:
def setup(self):
N = 1000
self.c = pd.CategoricalIndex(list('a' * N + 'b' * N + 'c' * N))
self.s = pd.Series(self.c)
def time_categorical_index_is_monotonic_increasing(self):
self.c.is_monotonic_increasing
def time_categorical_index_is_monotonic_decreasing(self):
self.c.is_monotonic_decreasing
def time_categorical_series_is_monotonic_increasing(self):
self.s.is_monotonic_increasing
def time_categorical_series_is_monotonic_decreasing(self):
self.s.is_monotonic_decreasing
class Contains:
def setup(self):
N = 10**5
self.ci = tm.makeCategoricalIndex(N)
self.c = self.ci.values
self.key = self.ci.categories[0]
def time_categorical_index_contains(self):
self.key in self.ci
def time_categorical_contains(self):
self.key in self.c
class CategoricalSlicing:
params = ['monotonic_incr', 'monotonic_decr', 'non_monotonic']
param_names = ['index']
def setup(self, index):
N = 10**6
categories = ['a', 'b', 'c']
values = [0] * N + [1] * N + [2] * N
if index == 'monotonic_incr':
self.data = pd.Categorical.from_codes(values,
categories=categories)
elif index == 'monotonic_decr':
self.data = pd.Categorical.from_codes(list(reversed(values)),
categories=categories)
elif index == 'non_monotonic':
self.data = pd.Categorical.from_codes([0, 1, 2] * N,
categories=categories)
else:
raise ValueError('Invalid index param: {}'.format(index))
self.scalar = 10000
self.list = list(range(10000))
self.cat_scalar = 'b'
def time_getitem_scalar(self, index):
self.data[self.scalar]
def time_getitem_slice(self, index):
self.data[:self.scalar]
def time_getitem_list_like(self, index):
self.data[[self.scalar]]
def time_getitem_list(self, index):
self.data[self.list]
def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
class Indexing:
def setup(self):
N = 10**5
self.index = pd.CategoricalIndex(range(N), range(N))
self.series = pd.Series(range(N), index=self.index).sort_index()
self.category = self.index[500]
def time_get_loc(self):
self.index.get_loc(self.category)
def time_shape(self):
self.index.shape
def time_shallow_copy(self):
self.index._shallow_copy()
def time_align(self):
pd.DataFrame({'a': self.series, 'b': self.series[:500]})
def time_intersection(self):
self.index[:750].intersection(self.index[250:])
def time_unique(self):
self.index.unique()
def time_reindex(self):
self.index.reindex(self.index[:500])
def time_reindex_missing(self):
self.index.reindex(['a', 'b', 'c', 'd'])
def time_sort_values(self):
self.index.sort_values(ascending=False)
from .pandas_vb_common import setup
| true | true |
790157504976a4aa3070a39ca031dd015952fa19 | 9,976 | py | Python | numpy/array_api/__init__.py | yashasvimisra2798/numpy | b892ed2c7fa27b2e0d73c12d12ace4b4d4e12897 | [
"BSD-3-Clause"
] | 2 | 2021-08-25T11:22:49.000Z | 2021-08-28T05:35:46.000Z | numpy/array_api/__init__.py | yashasvimisra2798/numpy | b892ed2c7fa27b2e0d73c12d12ace4b4d4e12897 | [
"BSD-3-Clause"
] | 68 | 2021-08-30T05:08:25.000Z | 2022-03-28T05:11:42.000Z | numpy/array_api/__init__.py | yashasvimisra2798/numpy | b892ed2c7fa27b2e0d73c12d12ace4b4d4e12897 | [
"BSD-3-Clause"
] | null | null | null | """
A NumPy sub-namespace that conforms to the Python array API standard.
This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It
is still considered experimental, and will issue a warning when imported.
This is a proof-of-concept namespace that wraps the corresponding NumPy
functions to give a conforming implementation of the Python array API standard
(https://data-apis.github.io/array-api/latest/). The standard is currently in
an RFC phase and comments on it are both welcome and encouraged. Comments
should be made either at https://github.com/data-apis/array-api or at
https://github.com/data-apis/consortium-feedback/discussions.
NumPy already follows the proposed spec for the most part, so this module
serves mostly as a thin wrapper around it. However, NumPy also implements a
lot of behavior that is not included in the spec, so this serves as a
restricted subset of the API. Only those functions that are part of the spec
are included in this namespace, and all functions are given with the exact
signature given in the spec, including the use of position-only arguments, and
omitting any extra keyword arguments implemented by NumPy but not part of the
spec. The behavior of some functions is also modified from the NumPy behavior
to conform to the standard. Note that the underlying array object itself is
wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule
is implemented in pure Python with no C extensions.
The array API spec is designed as a "minimal API subset" and explicitly allows
libraries to include behaviors not specified by it. But users of this module
that intend to write portable code should be aware that only those behaviors
that are listed in the spec are guaranteed to be implemented across libraries.
Consequently, the NumPy implementation was chosen to be both conforming and
minimal, so that users can use this implementation of the array API namespace
and be sure that behaviors that it defines will be available in conforming
namespaces from other libraries.
A few notes about the current state of this submodule:
- There is a test suite that tests modules against the array API standard at
https://github.com/data-apis/array-api-tests. The test suite is still a work
in progress, but the existing tests pass on this module, with a few
exceptions:
- DLPack support (see https://github.com/data-apis/array-api/pull/106) is
not included here, as it requires a full implementation in NumPy proper
first.
The test suite is not yet complete, and even the tests that exist are not
guaranteed to give a comprehensive coverage of the spec. Therefore, when
reviewing and using this submodule, you should refer to the standard
documents themselves. There are some tests in numpy.array_api.tests, but
they primarily focus on things that are not tested by the official array API
test suite.
- There is a custom array object, numpy.array_api.Array, which is returned by
all functions in this module. All functions in the array API namespace
implicitly assume that they will only receive this object as input. The only
way to create instances of this object is to use one of the array creation
functions. It does not have a public constructor on the object itself. The
object is a small wrapper class around numpy.ndarray. The main purpose of it
is to restrict the namespace of the array object to only those dtypes and
only those methods that are required by the spec, as well as to limit/change
certain behavior that differs in the spec. In particular:
- The array API namespace does not have scalar objects, only 0-D arrays.
Operations on Array that would create a scalar in NumPy create a 0-D
array.
- Indexing: Only a subset of indices supported by NumPy are required by the
spec. The Array object restricts indexing to only allow those types of
indices that are required by the spec. See the docstring of the
numpy.array_api.Array._validate_indices helper function for more
information.
- Type promotion: Some type promotion rules are different in the spec. In
particular, the spec does not have any value-based casting. The spec also
does not require cross-kind casting, like integer -> floating-point. Only
those promotions that are explicitly required by the array API
specification are allowed in this module. See NEP 47 for more info.
- Functions do not automatically call asarray() on their input, and will not
work if the input type is not Array. The exception is array creation
functions, and Python operators on the Array object, which accept Python
scalars of the same type as the array dtype.
- All functions include type annotations, corresponding to those given in the
spec (see _typing.py for definitions of some custom types). These do not
currently fully pass mypy due to some limitations in mypy.
- Dtype objects are just the NumPy dtype objects, e.g., float64 =
np.dtype('float64'). The spec does not require any behavior on these dtype
objects other than that they be accessible by name and be comparable by
equality, but it was considered too much extra complexity to create custom
objects to represent dtypes.
- All places where the implementations in this submodule are known to deviate
from their corresponding functions in NumPy are marked with "# Note:"
comments.
Still TODO in this module are:
- DLPack support for numpy.ndarray is still in progress. See
https://github.com/numpy/numpy/pull/19083.
- The copy=False keyword argument to asarray() is not yet implemented. This
requires support in numpy.asarray() first.
- Some functions are not yet fully tested in the array API test suite, and may
require updates that are not yet known until the tests are written.
- The spec is still in an RFC phase and may still have minor updates, which
will need to be reflected here.
- The linear algebra extension in the spec will be added in a future pull
request.
- Complex number support in array API spec is planned but not yet finalized,
as are the fft extension and certain linear algebra functions such as eig
that require complex dtypes.
"""
import warnings
warnings.warn(
"The numpy.array_api submodule is still experimental. See NEP 47.", stacklevel=2
)
__all__ = []
from ._constants import e, inf, nan, pi
__all__ += ["e", "inf", "nan", "pi"]
from ._creation_functions import (
asarray,
arange,
empty,
empty_like,
eye,
from_dlpack,
full,
full_like,
linspace,
meshgrid,
ones,
ones_like,
zeros,
zeros_like,
)
__all__ += [
"asarray",
"arange",
"empty",
"empty_like",
"eye",
"from_dlpack",
"full",
"full_like",
"linspace",
"meshgrid",
"ones",
"ones_like",
"zeros",
"zeros_like",
]
from ._data_type_functions import (
broadcast_arrays,
broadcast_to,
can_cast,
finfo,
iinfo,
result_type,
)
__all__ += [
"broadcast_arrays",
"broadcast_to",
"can_cast",
"finfo",
"iinfo",
"result_type",
]
from ._dtypes import (
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
bool,
)
__all__ += [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float32",
"float64",
"bool",
]
from ._elementwise_functions import (
abs,
acos,
acosh,
add,
asin,
asinh,
atan,
atan2,
atanh,
bitwise_and,
bitwise_left_shift,
bitwise_invert,
bitwise_or,
bitwise_right_shift,
bitwise_xor,
ceil,
cos,
cosh,
divide,
equal,
exp,
expm1,
floor,
floor_divide,
greater,
greater_equal,
isfinite,
isinf,
isnan,
less,
less_equal,
log,
log1p,
log2,
log10,
logaddexp,
logical_and,
logical_not,
logical_or,
logical_xor,
multiply,
negative,
not_equal,
positive,
pow,
remainder,
round,
sign,
sin,
sinh,
square,
sqrt,
subtract,
tan,
tanh,
trunc,
)
__all__ += [
"abs",
"acos",
"acosh",
"add",
"asin",
"asinh",
"atan",
"atan2",
"atanh",
"bitwise_and",
"bitwise_left_shift",
"bitwise_invert",
"bitwise_or",
"bitwise_right_shift",
"bitwise_xor",
"ceil",
"cos",
"cosh",
"divide",
"equal",
"exp",
"expm1",
"floor",
"floor_divide",
"greater",
"greater_equal",
"isfinite",
"isinf",
"isnan",
"less",
"less_equal",
"log",
"log1p",
"log2",
"log10",
"logaddexp",
"logical_and",
"logical_not",
"logical_or",
"logical_xor",
"multiply",
"negative",
"not_equal",
"positive",
"pow",
"remainder",
"round",
"sign",
"sin",
"sinh",
"square",
"sqrt",
"subtract",
"tan",
"tanh",
"trunc",
]
# einsum is not yet implemented in the array API spec.
# from ._linear_algebra_functions import einsum
# __all__ += ['einsum']
from ._linear_algebra_functions import matmul, tensordot, transpose, vecdot
__all__ += ["matmul", "tensordot", "transpose", "vecdot"]
from ._manipulation_functions import (
concat,
expand_dims,
flip,
reshape,
roll,
squeeze,
stack,
)
__all__ += ["concat", "expand_dims", "flip", "reshape", "roll", "squeeze", "stack"]
from ._searching_functions import argmax, argmin, nonzero, where
__all__ += ["argmax", "argmin", "nonzero", "where"]
from ._set_functions import unique
__all__ += ["unique"]
from ._sorting_functions import argsort, sort
__all__ += ["argsort", "sort"]
from ._statistical_functions import max, mean, min, prod, std, sum, var
__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"]
from ._utility_functions import all, any
__all__ += ["all", "any"]
| 26.889488 | 84 | 0.697775 |
import warnings
warnings.warn(
"The numpy.array_api submodule is still experimental. See NEP 47.", stacklevel=2
)
__all__ = []
from ._constants import e, inf, nan, pi
__all__ += ["e", "inf", "nan", "pi"]
from ._creation_functions import (
asarray,
arange,
empty,
empty_like,
eye,
from_dlpack,
full,
full_like,
linspace,
meshgrid,
ones,
ones_like,
zeros,
zeros_like,
)
__all__ += [
"asarray",
"arange",
"empty",
"empty_like",
"eye",
"from_dlpack",
"full",
"full_like",
"linspace",
"meshgrid",
"ones",
"ones_like",
"zeros",
"zeros_like",
]
from ._data_type_functions import (
broadcast_arrays,
broadcast_to,
can_cast,
finfo,
iinfo,
result_type,
)
__all__ += [
"broadcast_arrays",
"broadcast_to",
"can_cast",
"finfo",
"iinfo",
"result_type",
]
from ._dtypes import (
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
bool,
)
__all__ += [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float32",
"float64",
"bool",
]
from ._elementwise_functions import (
abs,
acos,
acosh,
add,
asin,
asinh,
atan,
atan2,
atanh,
bitwise_and,
bitwise_left_shift,
bitwise_invert,
bitwise_or,
bitwise_right_shift,
bitwise_xor,
ceil,
cos,
cosh,
divide,
equal,
exp,
expm1,
floor,
floor_divide,
greater,
greater_equal,
isfinite,
isinf,
isnan,
less,
less_equal,
log,
log1p,
log2,
log10,
logaddexp,
logical_and,
logical_not,
logical_or,
logical_xor,
multiply,
negative,
not_equal,
positive,
pow,
remainder,
round,
sign,
sin,
sinh,
square,
sqrt,
subtract,
tan,
tanh,
trunc,
)
__all__ += [
"abs",
"acos",
"acosh",
"add",
"asin",
"asinh",
"atan",
"atan2",
"atanh",
"bitwise_and",
"bitwise_left_shift",
"bitwise_invert",
"bitwise_or",
"bitwise_right_shift",
"bitwise_xor",
"ceil",
"cos",
"cosh",
"divide",
"equal",
"exp",
"expm1",
"floor",
"floor_divide",
"greater",
"greater_equal",
"isfinite",
"isinf",
"isnan",
"less",
"less_equal",
"log",
"log1p",
"log2",
"log10",
"logaddexp",
"logical_and",
"logical_not",
"logical_or",
"logical_xor",
"multiply",
"negative",
"not_equal",
"positive",
"pow",
"remainder",
"round",
"sign",
"sin",
"sinh",
"square",
"sqrt",
"subtract",
"tan",
"tanh",
"trunc",
]
from ._linear_algebra_functions import matmul, tensordot, transpose, vecdot
__all__ += ["matmul", "tensordot", "transpose", "vecdot"]
from ._manipulation_functions import (
concat,
expand_dims,
flip,
reshape,
roll,
squeeze,
stack,
)
__all__ += ["concat", "expand_dims", "flip", "reshape", "roll", "squeeze", "stack"]
from ._searching_functions import argmax, argmin, nonzero, where
__all__ += ["argmax", "argmin", "nonzero", "where"]
from ._set_functions import unique
__all__ += ["unique"]
from ._sorting_functions import argsort, sort
__all__ += ["argsort", "sort"]
from ._statistical_functions import max, mean, min, prod, std, sum, var
__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"]
from ._utility_functions import all, any
__all__ += ["all", "any"]
| true | true |
79015799747a5054d9e7fa3a89e743e4be264ccb | 2,170 | py | Python | tests/conftest.py | IBM/castor-messaging | 1bf5001fab7c7479885ececaa58eba5ede9d4fb2 | [
"Apache-2.0"
] | 1 | 2019-05-13T10:35:53.000Z | 2019-05-13T10:35:53.000Z | tests/conftest.py | IBM/castor-messaging | 1bf5001fab7c7479885ececaa58eba5ede9d4fb2 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | IBM/castor-messaging | 1bf5001fab7c7479885ececaa58eba5ede9d4fb2 | [
"Apache-2.0"
] | 3 | 2019-05-07T16:26:10.000Z | 2019-05-22T13:09:04.000Z | #!/usr/bin/env python3
#author markpurcell@ie.ibm.com
"""RabbitMQ helper class.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
"""
IBM-Review-Requirement: Art30.3 - DO NOT TRANSFER OR EXCLUSIVELY LICENSE THE FOLLOWING CODE UNTIL 30/11/2025!
Please note that the following code was developed for the project MUSKETEER in DRL funded by the European Union
under the Horizon 2020 Program.
The project started on 01/12/2018 and was completed on 30/11/2021. Thus, in accordance with article 30.3 of the
Multi-Beneficiary General Model Grant Agreement of the Program, the above limitations are in force until 30/11/2025.
"""
import pytest
import json
def pytest_addoption(parser):
parser.addoption("--credentials", required=True)
parser.addoption("--feed_queue", required=False)
parser.addoption("--reply_queue", required=False)
@pytest.fixture
def credentials(request):
value = request.config.getoption('credentials')
if request.cls:
with open(value) as json_file:
request.cls.credentials = json.load(json_file)
return value
@pytest.fixture
def feed_queue(request):
value = request.config.getoption('feed_queue')
if request.cls:
request.cls.feed_queue = value
return value
@pytest.fixture
def reply_queue(request):
value = request.config.getoption('reply_queue')
if request.cls:
request.cls.reply_queue = value
return value
| 35 | 116 | 0.74424 |
import pytest
import json
def pytest_addoption(parser):
parser.addoption("--credentials", required=True)
parser.addoption("--feed_queue", required=False)
parser.addoption("--reply_queue", required=False)
@pytest.fixture
def credentials(request):
value = request.config.getoption('credentials')
if request.cls:
with open(value) as json_file:
request.cls.credentials = json.load(json_file)
return value
@pytest.fixture
def feed_queue(request):
value = request.config.getoption('feed_queue')
if request.cls:
request.cls.feed_queue = value
return value
@pytest.fixture
def reply_queue(request):
value = request.config.getoption('reply_queue')
if request.cls:
request.cls.reply_queue = value
return value
| true | true |
79015899e599f2aedca475e99bb3d434f58032e6 | 353 | py | Python | examples/service/fastweb_thrift_async/HelloService/ttypes.py | BSlience/fastweb | 2c1b956e9846c4205d0201d39d09891d088754e4 | [
"Apache-2.0"
] | 123 | 2017-06-06T04:59:07.000Z | 2019-07-11T10:20:35.000Z | examples/service/fastweb_thrift_async/HelloService/ttypes.py | BSlience/fastweb | 2c1b956e9846c4205d0201d39d09891d088754e4 | [
"Apache-2.0"
] | null | null | null | examples/service/fastweb_thrift_async/HelloService/ttypes.py | BSlience/fastweb | 2c1b956e9846c4205d0201d39d09891d088754e4 | [
"Apache-2.0"
] | 2 | 2017-06-28T05:58:39.000Z | 2018-09-25T00:18:33.000Z | #
# Autogenerated by Thrift Compiler (0.10.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:tornado
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
from thrift.transport import TTransport
| 25.214286 | 93 | 0.798867 |
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
from thrift.transport import TTransport
| true | true |
7901589efe833a1d4d89f83225c112506102474a | 16,107 | py | Python | h/schemas/annotation.py | bibliotechie/h | 16e275f79ef7d1086971bd30ef403501c6b93beb | [
"BSD-2-Clause"
] | null | null | null | h/schemas/annotation.py | bibliotechie/h | 16e275f79ef7d1086971bd30ef403501c6b93beb | [
"BSD-2-Clause"
] | null | null | null | h/schemas/annotation.py | bibliotechie/h | 16e275f79ef7d1086971bd30ef403501c6b93beb | [
"BSD-2-Clause"
] | null | null | null | """Classes for validating data passed to the annotations API."""
import copy
import colander
from dateutil.parser import parse
from pyramid import i18n
from h.schemas.base import JSONSchema, ValidationError
from h.search.query import LIMIT_DEFAULT, LIMIT_MAX, OFFSET_MAX
from h.search.util import wildcard_uri_is_valid
from h.util import document_claims
_ = i18n.TranslationStringFactory(__package__)
def _validate_wildcard_uri(node, value):
"""Raise if wildcards are within the domain of the uri."""
for val in value:
if not wildcard_uri_is_valid(val):
raise colander.Invalid(
node,
"""Wildcards (_ and *) are not permitted within the
domain of wildcard_uri""",
)
class AnnotationSchema(JSONSchema):
"""Validate an annotation object."""
schema = {
"type": "object",
"properties": {
"document": {
"type": "object",
"properties": {
"dc": {
"type": "object",
"properties": {
"identifier": {"type": "array", "items": {"type": "string"}}
},
},
"highwire": {
"type": "object",
"properties": {
"doi": {"type": "array", "items": {"type": "string"}},
"pdf_url": {"type": "array", "items": {"type": "string"}},
},
},
"link": {
"type": "array",
"items": {
"type": "object",
"properties": {
"href": {"type": "string"},
"type": {"type": "string"},
},
"required": ["href"],
},
},
},
},
"group": {"type": "string"},
"permissions": {
"title": "Permissions",
"description": "Annotation action access control list",
"type": "object",
"patternProperties": {
"^(admin|delete|read|update)$": {
"type": "array",
"items": {"type": "string", "pattern": "^(acct:|group:).+$"},
}
},
"required": ["read"],
},
"references": {"type": "array", "items": {"type": "string"}},
"tags": {"type": "array", "items": {"type": "string"}},
"target": {
"type": "array",
"items": {
"type": "object",
"properties": {
"selector": {
"type": "array",
"items": {
"type": "object",
"properties": {"type": {"type": "string"}},
"required": ["type"],
},
}
},
},
},
"text": {"type": "string"},
"uri": {"type": "string"},
},
}
class CreateAnnotationSchema:
"""Validate the POSTed data of a create annotation request."""
def __init__(self, request):
self.structure = AnnotationSchema()
self.request = request
def validate(self, data):
appstruct = self.structure.validate(data)
new_appstruct = {}
_remove_protected_fields(appstruct)
new_appstruct["userid"] = self.request.authenticated_userid
uri = appstruct.pop("uri", "").strip()
if not uri:
raise ValidationError("uri: " + _("'uri' is a required property"))
new_appstruct["target_uri"] = uri
new_appstruct["text"] = appstruct.pop("text", "")
new_appstruct["tags"] = appstruct.pop("tags", [])
new_appstruct["groupid"] = appstruct.pop("group", "__world__")
new_appstruct["references"] = appstruct.pop("references", [])
if "permissions" in appstruct:
new_appstruct["shared"] = _shared(
appstruct.pop("permissions"), new_appstruct["groupid"]
)
else:
new_appstruct["shared"] = False
if "target" in appstruct:
new_appstruct["target_selectors"] = _target_selectors(
appstruct.pop("target")
)
# Replies always get the same groupid as their parent. The parent's
# groupid is added to the reply annotation later by the storage code.
# Here we just delete any group sent by the client from replies.
if new_appstruct["references"] and "groupid" in new_appstruct:
del new_appstruct["groupid"]
new_appstruct["document"] = _document(
appstruct.pop("document", {}), new_appstruct["target_uri"]
)
new_appstruct["extra"] = appstruct
return new_appstruct
class UpdateAnnotationSchema:
"""Validate the POSTed data of an update annotation request."""
def __init__(self, request, existing_target_uri, groupid):
self.request = request
self.existing_target_uri = existing_target_uri
self.groupid = groupid
self.structure = AnnotationSchema()
def validate(self, data):
appstruct = self.structure.validate(data)
new_appstruct = {}
_remove_protected_fields(appstruct)
# Some fields are not allowed to be changed in annotation updates.
for key in ["group", "groupid", "userid", "references"]:
appstruct.pop(key, "")
# Fields that are allowed to be updated and that have a different name
# internally than in the public API.
if "uri" in appstruct:
new_uri = appstruct.pop("uri").strip()
if not new_uri:
raise ValidationError("uri: " + _("'uri' is a required property"))
new_appstruct["target_uri"] = new_uri
if "permissions" in appstruct:
new_appstruct["shared"] = _shared(
appstruct.pop("permissions"), self.groupid
)
if "target" in appstruct:
new_appstruct["target_selectors"] = _target_selectors(
appstruct.pop("target")
)
# Fields that are allowed to be updated and that have the same internal
# and external name.
for key in ["text", "tags"]:
if key in appstruct:
new_appstruct[key] = appstruct.pop(key)
if "document" in appstruct:
new_appstruct["document"] = _document(
appstruct.pop("document"),
new_appstruct.get("target_uri", self.existing_target_uri),
)
new_appstruct["extra"] = appstruct
return new_appstruct
def _document(document, claimant):
"""
Return document meta and document URI data from the given document dict.
Transforms the "document" dict that the client posts into a convenient
format for creating DocumentURI and DocumentMeta objects later.
"""
document = document or {}
document_uri_dicts = document_claims.document_uris_from_data(
copy.deepcopy(document), claimant=claimant
)
document_meta_dicts = document_claims.document_metas_from_data(
copy.deepcopy(document), claimant=claimant
)
return {
"document_uri_dicts": document_uri_dicts,
"document_meta_dicts": document_meta_dicts,
}
def _format_jsonschema_error(error):
"""Format a :py:class:`jsonschema.ValidationError` as a string."""
if error.path:
dotted_path = ".".join([str(c) for c in error.path])
return "{path}: {message}".format(path=dotted_path, message=error.message)
return error.message
def _remove_protected_fields(appstruct):
# Some fields are not to be set by the user, ignore them.
for field in [
"created",
"updated",
"user",
"id",
"links",
"flagged",
"hidden",
"moderation",
"user_info",
]:
appstruct.pop(field, None)
def _shared(permissions, groupid):
"""
Return True if the given permissions object represents shared permissions.
Return False otherwise.
Reduces the client's complex permissions dict to a simple shared boolean.
:param permissions: the permissions dict sent by the client in an
annotation create or update request
:type permissions: dict
:param groupid: the groupid of the annotation that the permissions dict
applies to
:type groupid: unicode
"""
return permissions["read"] == ["group:{id}".format(id=groupid)]
def _target_selectors(targets):
"""
Return the target selectors from the given target list.
Transforms the target lists that the client sends in annotation create and
update requests into our internal target_selectors format.
"""
# Any targets other than the first in the list are discarded.
# Any fields of the target other than 'selector' are discarded.
if targets and "selector" in targets[0]:
return targets[0]["selector"]
return []
class SearchParamsSchema(colander.Schema):
_separate_replies = colander.SchemaNode(
colander.Boolean(),
missing=False,
description="Return a separate set of annotations and their replies.",
)
sort = colander.SchemaNode(
colander.String(),
validator=colander.OneOf(["created", "updated", "group", "id", "user"]),
missing="updated",
description="The field by which annotations should be sorted.",
)
search_after = colander.SchemaNode(
colander.String(),
missing=colander.drop,
description="""Returns results after the annotation who's sort field
has this value. If specifying a date use the format
yyyy-MM-dd'T'HH:mm:ss.SSX or time in miliseconds since the
epoch. This is used for iteration through large collections
of results.""",
)
limit = colander.SchemaNode(
colander.Integer(),
validator=colander.Range(min=0, max=LIMIT_MAX),
missing=LIMIT_DEFAULT,
description="The maximum number of annotations to return.",
)
order = colander.SchemaNode(
colander.String(),
validator=colander.OneOf(["asc", "desc"]),
missing="desc",
description="The direction of sort.",
)
offset = colander.SchemaNode(
colander.Integer(),
validator=colander.Range(min=0, max=OFFSET_MAX),
missing=0,
description="""The number of initial annotations to skip. This is
used for pagination. Not suitable for paging through
thousands of annotations-search_after should be used
instead.""",
)
group = colander.SchemaNode(
colander.String(),
missing=colander.drop,
description="Limit the results to this group of annotations.",
)
quote = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Limit the results to annotations that contain this text inside
the text that was annotated.""",
)
references = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Returns annotations that are replies to this parent annotation id.""",
)
tag = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Limit the results to annotations tagged with the specified value.",
)
tags = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Alias of tag.",
)
text = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Limit the results to annotations that contain this text in their textual body.",
)
uri = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Limit the results to annotations matching the specific URI
or equivalent URIs. URI can be a URL (a web page address) or
a URN representing another kind of resource such as DOI
(Digital Object Identifier) or a PDF fingerprint.""",
)
uri_parts = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
name="uri.parts",
missing=colander.drop,
description="""Limit the results to annotations with the given keyword
appearing in the URL.""",
)
url = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Alias of uri.",
)
wildcard_uri = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
validator=_validate_wildcard_uri,
missing=colander.drop,
description="""
Limit the results to annotations matching the wildcard URI.
URI can be a URL (a web page address) or a URN representing another
kind of resource such as DOI (Digital Object Identifier) or a
PDF fingerprint.
`*` will match any character sequence (including an empty one),
and a `_` will match any single character. Wildcards are only permitted
within the path and query parts of the URI.
Escaping wildcards is not supported.
Examples of valid uris":" `http://foo.com/*` `urn:x-pdf:*` `file://localhost/_bc.pdf`
Examples of invalid uris":" `*foo.com` `u_n:*` `file://*` `http://foo.com*`
""",
)
any = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Limit the results to annotations whose quote, tags,
text or url fields contain this keyword.""",
)
user = colander.SchemaNode(
colander.String(),
missing=colander.drop,
description="Limit the results to annotations made by the specified user.",
)
def validator(self, node, cstruct):
sort = cstruct["sort"]
search_after = cstruct.get("search_after", None)
if search_after:
if sort in ["updated", "created"] and not self._date_is_parsable(
search_after
):
raise colander.Invalid(
node,
"""search_after must be a parsable date in the form
yyyy-MM-dd'T'HH:mm:ss.SSX
or time in miliseconds since the epoch.""",
)
# offset must be set to 0 if search_after is specified.
cstruct["offset"] = 0
def _date_is_parsable(self, value):
"""Return True if date is parsable and False otherwise."""
# Dates like "2017" can also be cast as floats so if a number is less
# than 9999 it is assumed to be a year and not ms since the epoch.
try:
if float(value) < 9999:
raise ValueError("This is not in the form ms since the epoch.")
except ValueError:
try:
parse(value)
except ValueError:
return False
return True
| 35.168122 | 101 | 0.562488 | import copy
import colander
from dateutil.parser import parse
from pyramid import i18n
from h.schemas.base import JSONSchema, ValidationError
from h.search.query import LIMIT_DEFAULT, LIMIT_MAX, OFFSET_MAX
from h.search.util import wildcard_uri_is_valid
from h.util import document_claims
_ = i18n.TranslationStringFactory(__package__)
def _validate_wildcard_uri(node, value):
for val in value:
if not wildcard_uri_is_valid(val):
raise colander.Invalid(
node,
"""Wildcards (_ and *) are not permitted within the
domain of wildcard_uri""",
)
class AnnotationSchema(JSONSchema):
schema = {
"type": "object",
"properties": {
"document": {
"type": "object",
"properties": {
"dc": {
"type": "object",
"properties": {
"identifier": {"type": "array", "items": {"type": "string"}}
},
},
"highwire": {
"type": "object",
"properties": {
"doi": {"type": "array", "items": {"type": "string"}},
"pdf_url": {"type": "array", "items": {"type": "string"}},
},
},
"link": {
"type": "array",
"items": {
"type": "object",
"properties": {
"href": {"type": "string"},
"type": {"type": "string"},
},
"required": ["href"],
},
},
},
},
"group": {"type": "string"},
"permissions": {
"title": "Permissions",
"description": "Annotation action access control list",
"type": "object",
"patternProperties": {
"^(admin|delete|read|update)$": {
"type": "array",
"items": {"type": "string", "pattern": "^(acct:|group:).+$"},
}
},
"required": ["read"],
},
"references": {"type": "array", "items": {"type": "string"}},
"tags": {"type": "array", "items": {"type": "string"}},
"target": {
"type": "array",
"items": {
"type": "object",
"properties": {
"selector": {
"type": "array",
"items": {
"type": "object",
"properties": {"type": {"type": "string"}},
"required": ["type"],
},
}
},
},
},
"text": {"type": "string"},
"uri": {"type": "string"},
},
}
class CreateAnnotationSchema:
def __init__(self, request):
self.structure = AnnotationSchema()
self.request = request
def validate(self, data):
appstruct = self.structure.validate(data)
new_appstruct = {}
_remove_protected_fields(appstruct)
new_appstruct["userid"] = self.request.authenticated_userid
uri = appstruct.pop("uri", "").strip()
if not uri:
raise ValidationError("uri: " + _("'uri' is a required property"))
new_appstruct["target_uri"] = uri
new_appstruct["text"] = appstruct.pop("text", "")
new_appstruct["tags"] = appstruct.pop("tags", [])
new_appstruct["groupid"] = appstruct.pop("group", "__world__")
new_appstruct["references"] = appstruct.pop("references", [])
if "permissions" in appstruct:
new_appstruct["shared"] = _shared(
appstruct.pop("permissions"), new_appstruct["groupid"]
)
else:
new_appstruct["shared"] = False
if "target" in appstruct:
new_appstruct["target_selectors"] = _target_selectors(
appstruct.pop("target")
)
# groupid is added to the reply annotation later by the storage code.
# Here we just delete any group sent by the client from replies.
if new_appstruct["references"] and "groupid" in new_appstruct:
del new_appstruct["groupid"]
new_appstruct["document"] = _document(
appstruct.pop("document", {}), new_appstruct["target_uri"]
)
new_appstruct["extra"] = appstruct
return new_appstruct
class UpdateAnnotationSchema:
def __init__(self, request, existing_target_uri, groupid):
self.request = request
self.existing_target_uri = existing_target_uri
self.groupid = groupid
self.structure = AnnotationSchema()
def validate(self, data):
appstruct = self.structure.validate(data)
new_appstruct = {}
_remove_protected_fields(appstruct)
# Some fields are not allowed to be changed in annotation updates.
for key in ["group", "groupid", "userid", "references"]:
appstruct.pop(key, "")
# Fields that are allowed to be updated and that have a different name
# internally than in the public API.
if "uri" in appstruct:
new_uri = appstruct.pop("uri").strip()
if not new_uri:
raise ValidationError("uri: " + _("'uri' is a required property"))
new_appstruct["target_uri"] = new_uri
if "permissions" in appstruct:
new_appstruct["shared"] = _shared(
appstruct.pop("permissions"), self.groupid
)
if "target" in appstruct:
new_appstruct["target_selectors"] = _target_selectors(
appstruct.pop("target")
)
# Fields that are allowed to be updated and that have the same internal
# and external name.
for key in ["text", "tags"]:
if key in appstruct:
new_appstruct[key] = appstruct.pop(key)
if "document" in appstruct:
new_appstruct["document"] = _document(
appstruct.pop("document"),
new_appstruct.get("target_uri", self.existing_target_uri),
)
new_appstruct["extra"] = appstruct
return new_appstruct
def _document(document, claimant):
document = document or {}
document_uri_dicts = document_claims.document_uris_from_data(
copy.deepcopy(document), claimant=claimant
)
document_meta_dicts = document_claims.document_metas_from_data(
copy.deepcopy(document), claimant=claimant
)
return {
"document_uri_dicts": document_uri_dicts,
"document_meta_dicts": document_meta_dicts,
}
def _format_jsonschema_error(error):
if error.path:
dotted_path = ".".join([str(c) for c in error.path])
return "{path}: {message}".format(path=dotted_path, message=error.message)
return error.message
def _remove_protected_fields(appstruct):
# Some fields are not to be set by the user, ignore them.
for field in [
"created",
"updated",
"user",
"id",
"links",
"flagged",
"hidden",
"moderation",
"user_info",
]:
appstruct.pop(field, None)
def _shared(permissions, groupid):
return permissions["read"] == ["group:{id}".format(id=groupid)]
def _target_selectors(targets):
# Any targets other than the first in the list are discarded.
# Any fields of the target other than 'selector' are discarded.
if targets and "selector" in targets[0]:
return targets[0]["selector"]
return []
class SearchParamsSchema(colander.Schema):
_separate_replies = colander.SchemaNode(
colander.Boolean(),
missing=False,
description="Return a separate set of annotations and their replies.",
)
sort = colander.SchemaNode(
colander.String(),
validator=colander.OneOf(["created", "updated", "group", "id", "user"]),
missing="updated",
description="The field by which annotations should be sorted.",
)
search_after = colander.SchemaNode(
colander.String(),
missing=colander.drop,
description="""Returns results after the annotation who's sort field
has this value. If specifying a date use the format
yyyy-MM-dd'T'HH:mm:ss.SSX or time in miliseconds since the
epoch. This is used for iteration through large collections
of results.""",
)
limit = colander.SchemaNode(
colander.Integer(),
validator=colander.Range(min=0, max=LIMIT_MAX),
missing=LIMIT_DEFAULT,
description="The maximum number of annotations to return.",
)
order = colander.SchemaNode(
colander.String(),
validator=colander.OneOf(["asc", "desc"]),
missing="desc",
description="The direction of sort.",
)
offset = colander.SchemaNode(
colander.Integer(),
validator=colander.Range(min=0, max=OFFSET_MAX),
missing=0,
description="""The number of initial annotations to skip. This is
used for pagination. Not suitable for paging through
thousands of annotations-search_after should be used
instead.""",
)
group = colander.SchemaNode(
colander.String(),
missing=colander.drop,
description="Limit the results to this group of annotations.",
)
quote = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Limit the results to annotations that contain this text inside
the text that was annotated.""",
)
references = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Returns annotations that are replies to this parent annotation id.""",
)
tag = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Limit the results to annotations tagged with the specified value.",
)
tags = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Alias of tag.",
)
text = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Limit the results to annotations that contain this text in their textual body.",
)
uri = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Limit the results to annotations matching the specific URI
or equivalent URIs. URI can be a URL (a web page address) or
a URN representing another kind of resource such as DOI
(Digital Object Identifier) or a PDF fingerprint.""",
)
uri_parts = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
name="uri.parts",
missing=colander.drop,
description="""Limit the results to annotations with the given keyword
appearing in the URL.""",
)
url = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Alias of uri.",
)
wildcard_uri = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
validator=_validate_wildcard_uri,
missing=colander.drop,
description="""
Limit the results to annotations matching the wildcard URI.
URI can be a URL (a web page address) or a URN representing another
kind of resource such as DOI (Digital Object Identifier) or a
PDF fingerprint.
`*` will match any character sequence (including an empty one),
and a `_` will match any single character. Wildcards are only permitted
within the path and query parts of the URI.
Escaping wildcards is not supported.
Examples of valid uris":" `http://foo.com/*` `urn:x-pdf:*` `file://localhost/_bc.pdf`
Examples of invalid uris":" `*foo.com` `u_n:*` `file://*` `http://foo.com*`
""",
)
any = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Limit the results to annotations whose quote, tags,
text or url fields contain this keyword.""",
)
user = colander.SchemaNode(
colander.String(),
missing=colander.drop,
description="Limit the results to annotations made by the specified user.",
)
def validator(self, node, cstruct):
sort = cstruct["sort"]
search_after = cstruct.get("search_after", None)
if search_after:
if sort in ["updated", "created"] and not self._date_is_parsable(
search_after
):
raise colander.Invalid(
node,
"""search_after must be a parsable date in the form
yyyy-MM-dd'T'HH:mm:ss.SSX
or time in miliseconds since the epoch.""",
)
cstruct["offset"] = 0
def _date_is_parsable(self, value):
try:
if float(value) < 9999:
raise ValueError("This is not in the form ms since the epoch.")
except ValueError:
try:
parse(value)
except ValueError:
return False
return True
| true | true |
790158adb157be042965c36e6ffb3700aa6ba8fe | 528 | py | Python | examples/lsm303_accel_simpletest.py | adafruit/Adafruit_CircuitPython_LSM303AGR_Accel | 14a0708506f00d822e4da879a758362121265eb1 | [
"MIT",
"MIT-0",
"Unlicense"
] | 4 | 2019-12-27T19:51:02.000Z | 2022-01-21T00:00:25.000Z | examples/lsm303_accel_simpletest.py | adafruit/Adafruit_CircuitPython_LSM303AGR_Accel | 14a0708506f00d822e4da879a758362121265eb1 | [
"MIT",
"MIT-0",
"Unlicense"
] | 3 | 2019-10-24T18:15:36.000Z | 2021-09-27T16:46:19.000Z | examples/lsm303_accel_simpletest.py | adafruit/Adafruit_CircuitPython_LSM303AGR_Accel | 14a0708506f00d822e4da879a758362121265eb1 | [
"MIT",
"MIT-0",
"Unlicense"
] | 7 | 2019-12-06T20:04:41.000Z | 2021-07-22T19:13:46.000Z | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
""" Display accelerometer data once per second """
import time
import board
import adafruit_lsm303_accel
i2c = board.I2C() # uses board.SCL and board.SDA
sensor = adafruit_lsm303_accel.LSM303_Accel(i2c)
while True:
acc_x, acc_y, acc_z = sensor.acceleration
print(
"Acceleration (m/s^2): ({0:10.3f}, {1:10.3f}, {2:10.3f})".format(
acc_x, acc_y, acc_z
)
)
print("")
time.sleep(1.0)
| 22.956522 | 73 | 0.666667 |
import time
import board
import adafruit_lsm303_accel
i2c = board.I2C()
sensor = adafruit_lsm303_accel.LSM303_Accel(i2c)
while True:
acc_x, acc_y, acc_z = sensor.acceleration
print(
"Acceleration (m/s^2): ({0:10.3f}, {1:10.3f}, {2:10.3f})".format(
acc_x, acc_y, acc_z
)
)
print("")
time.sleep(1.0)
| true | true |
790158e215a7dfe785db5ea59c5a4d103dc029a6 | 1,509 | py | Python | src/tt_bank/tt_bank/service.py | devapromix/the-tale | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | [
"BSD-3-Clause"
] | 1 | 2020-04-02T11:51:20.000Z | 2020-04-02T11:51:20.000Z | src/tt_bank/tt_bank/service.py | devapromix/the-tale | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | [
"BSD-3-Clause"
] | null | null | null | src/tt_bank/tt_bank/service.py | devapromix/the-tale | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | [
"BSD-3-Clause"
] | null | null | null |
import asyncio
from aiohttp import web
from tt_web import log
from tt_web import postgresql
async def initialize(config, loop):
await postgresql.initialize(config['database'], loop=loop)
async def deinitialize(config, loop):
await postgresql.deinitialize()
async def on_startup(app):
await initialize(app['config'], loop=app.loop)
async def on_cleanup(app):
await deinitialize(app['config'], loop=app.loop)
def register_routers(app):
from . import handlers
app.router.add_post('/accounts/balance', handlers.account_balance)
app.router.add_post('/accounts/history', handlers.account_history)
app.router.add_post('/transactions/start', handlers.start_transaction)
app.router.add_post('/transactions/commit', handlers.commit_transaction)
app.router.add_post('/transactions/rollback', handlers.rollback_transaction)
app.router.add_post('/debug-clear-service', handlers.debug_clear_service)
def create_application(config, loop=None):
app = web.Application(loop=loop)
app['config'] = config
log.initilize(config['log'])
app.on_startup.append(on_startup)
app.on_cleanup.append(on_cleanup)
register_routers(app)
return app
def run_utility(config, utility):
loop = asyncio.get_event_loop()
async def runner():
await initialize(config, loop=loop)
log.initilize(config['log'])
await utility(loop=loop)
await deinitialize(config, loop=loop)
loop.run_until_complete(runner())
| 22.522388 | 80 | 0.726972 |
import asyncio
from aiohttp import web
from tt_web import log
from tt_web import postgresql
async def initialize(config, loop):
await postgresql.initialize(config['database'], loop=loop)
async def deinitialize(config, loop):
await postgresql.deinitialize()
async def on_startup(app):
await initialize(app['config'], loop=app.loop)
async def on_cleanup(app):
await deinitialize(app['config'], loop=app.loop)
def register_routers(app):
from . import handlers
app.router.add_post('/accounts/balance', handlers.account_balance)
app.router.add_post('/accounts/history', handlers.account_history)
app.router.add_post('/transactions/start', handlers.start_transaction)
app.router.add_post('/transactions/commit', handlers.commit_transaction)
app.router.add_post('/transactions/rollback', handlers.rollback_transaction)
app.router.add_post('/debug-clear-service', handlers.debug_clear_service)
def create_application(config, loop=None):
app = web.Application(loop=loop)
app['config'] = config
log.initilize(config['log'])
app.on_startup.append(on_startup)
app.on_cleanup.append(on_cleanup)
register_routers(app)
return app
def run_utility(config, utility):
loop = asyncio.get_event_loop()
async def runner():
await initialize(config, loop=loop)
log.initilize(config['log'])
await utility(loop=loop)
await deinitialize(config, loop=loop)
loop.run_until_complete(runner())
| true | true |
7901597213c34a703821a6a2773bedfe1166770a | 6,588 | py | Python | sdk/python/pulumi_azure_nextgen/servicebus/list_topic_keys.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/servicebus/list_topic_keys.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/servicebus/list_topic_keys.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'ListTopicKeysResult',
'AwaitableListTopicKeysResult',
'list_topic_keys',
]
@pulumi.output_type
class ListTopicKeysResult:
"""
Namespace/ServiceBus Connection String
"""
def __init__(__self__, alias_primary_connection_string=None, alias_secondary_connection_string=None, key_name=None, primary_connection_string=None, primary_key=None, secondary_connection_string=None, secondary_key=None):
if alias_primary_connection_string and not isinstance(alias_primary_connection_string, str):
raise TypeError("Expected argument 'alias_primary_connection_string' to be a str")
pulumi.set(__self__, "alias_primary_connection_string", alias_primary_connection_string)
if alias_secondary_connection_string and not isinstance(alias_secondary_connection_string, str):
raise TypeError("Expected argument 'alias_secondary_connection_string' to be a str")
pulumi.set(__self__, "alias_secondary_connection_string", alias_secondary_connection_string)
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="aliasPrimaryConnectionString")
def alias_primary_connection_string(self) -> str:
"""
Primary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_primary_connection_string")
@property
@pulumi.getter(name="aliasSecondaryConnectionString")
def alias_secondary_connection_string(self) -> str:
"""
Secondary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_secondary_connection_string")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
A string that describes the authorization rule.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> str:
"""
Primary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> str:
"""
Secondary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListTopicKeysResult(ListTopicKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListTopicKeysResult(
alias_primary_connection_string=self.alias_primary_connection_string,
alias_secondary_connection_string=self.alias_secondary_connection_string,
key_name=self.key_name,
primary_connection_string=self.primary_connection_string,
primary_key=self.primary_key,
secondary_connection_string=self.secondary_connection_string,
secondary_key=self.secondary_key)
def list_topic_keys(authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
topic_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListTopicKeysResult:
"""
Namespace/ServiceBus Connection String
API Version: 2017-04-01.
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
:param str topic_name: The topic name.
"""
__args__ = dict()
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
__args__['topicName'] = topic_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:servicebus:listTopicKeys', __args__, opts=opts, typ=ListTopicKeysResult).value
return AwaitableListTopicKeysResult(
alias_primary_connection_string=__ret__.alias_primary_connection_string,
alias_secondary_connection_string=__ret__.alias_secondary_connection_string,
key_name=__ret__.key_name,
primary_connection_string=__ret__.primary_connection_string,
primary_key=__ret__.primary_key,
secondary_connection_string=__ret__.secondary_connection_string,
secondary_key=__ret__.secondary_key)
| 43.629139 | 224 | 0.713418 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'ListTopicKeysResult',
'AwaitableListTopicKeysResult',
'list_topic_keys',
]
@pulumi.output_type
class ListTopicKeysResult:
def __init__(__self__, alias_primary_connection_string=None, alias_secondary_connection_string=None, key_name=None, primary_connection_string=None, primary_key=None, secondary_connection_string=None, secondary_key=None):
if alias_primary_connection_string and not isinstance(alias_primary_connection_string, str):
raise TypeError("Expected argument 'alias_primary_connection_string' to be a str")
pulumi.set(__self__, "alias_primary_connection_string", alias_primary_connection_string)
if alias_secondary_connection_string and not isinstance(alias_secondary_connection_string, str):
raise TypeError("Expected argument 'alias_secondary_connection_string' to be a str")
pulumi.set(__self__, "alias_secondary_connection_string", alias_secondary_connection_string)
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="aliasPrimaryConnectionString")
def alias_primary_connection_string(self) -> str:
return pulumi.get(self, "alias_primary_connection_string")
@property
@pulumi.getter(name="aliasSecondaryConnectionString")
def alias_secondary_connection_string(self) -> str:
return pulumi.get(self, "alias_secondary_connection_string")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> str:
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> str:
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
return pulumi.get(self, "secondary_key")
class AwaitableListTopicKeysResult(ListTopicKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListTopicKeysResult(
alias_primary_connection_string=self.alias_primary_connection_string,
alias_secondary_connection_string=self.alias_secondary_connection_string,
key_name=self.key_name,
primary_connection_string=self.primary_connection_string,
primary_key=self.primary_key,
secondary_connection_string=self.secondary_connection_string,
secondary_key=self.secondary_key)
def list_topic_keys(authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
topic_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListTopicKeysResult:
__args__ = dict()
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
__args__['topicName'] = topic_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:servicebus:listTopicKeys', __args__, opts=opts, typ=ListTopicKeysResult).value
return AwaitableListTopicKeysResult(
alias_primary_connection_string=__ret__.alias_primary_connection_string,
alias_secondary_connection_string=__ret__.alias_secondary_connection_string,
key_name=__ret__.key_name,
primary_connection_string=__ret__.primary_connection_string,
primary_key=__ret__.primary_key,
secondary_connection_string=__ret__.secondary_connection_string,
secondary_key=__ret__.secondary_key)
| true | true |
790159f76508832a0f6aa45d9f513f167723094e | 7,570 | py | Python | Dict-search/__init__.py | what-is-me/WordListEnquiry | e80597d70a3f79e442149022df7e45d81a272057 | [
"Apache-2.0"
] | 4 | 2021-11-05T19:48:53.000Z | 2022-02-12T11:21:40.000Z | Dict-search/__init__.py | what-is-me/wordlisttranslate | e80597d70a3f79e442149022df7e45d81a272057 | [
"Apache-2.0"
] | null | null | null | Dict-search/__init__.py | what-is-me/wordlisttranslate | e80597d70a3f79e442149022df7e45d81a272057 | [
"Apache-2.0"
] | 3 | 2021-11-05T19:48:54.000Z | 2022-03-25T10:37:54.000Z | '''
Author: what-is-me
E-mail: nt_cqc@126.com
Github: https://github.com/what-is-me
LeetCode: https://leetcode-cn.com/u/what-is-me/
Date: 2021-05-17 23:22:14
LastEditors: what-is-me
LastEditTime: 2021-05-19 12:33:23
Description: 查询单个单词/词组意思
'''
import re
import urllib.parse
import requests
class getimg:
def youdao(html):
html = html.split('</h2>')[-1]
html = html.split('<span>网络释义</span>')[0]
reg = r'<li>(.*?)</li>'
img = re.compile(reg)
img_list = re.findall(img, html)
result = ""
for s in img_list:
if (s != ""):
result = result + s + ";"
result = "".join(result.split())
result = re.sub(r'<(.*?)>', '', result)
if result == '' or result[0:1] == '<a':
return "未收录"
return result
def jinshan(html):
reg = r'<ul class="Mean_part__1RA2V"><li>(.*?)</ul>'
img = re.compile(reg)
img_list = re.findall(img, html)
result = "".join(img_list)
result = re.sub('<', '[', result)
result = re.sub('>', ']', result)
result = re.sub(r'<(.*?)>', '', result)
if result == "":
return "未收录"
return result
def bing(html):
reg = r'<meta name="description" content="(.*?)" />'
img = re.compile(reg)
result = re.search(img, html).group()
result = result.split('<meta name="description" content="')[-1]
result = result.split('" />')[0]
result = re.sub('必应词典为您提供', '', result)
result = re.sub('的释义', '', result)
result = re.sub('英', '', result)
result = re.sub('美', '', result)
result = re.sub(',', '', result)
result = result.split('网络释义:')[0]
result = re.sub(r'\[(.*?)\]', '', result)
if result == "" or result[0:3] == "必应词典":
return "未收录"
return result
def haici(html):
html = html.split('<div class="basic clearfix">')[-1]
html = html.split('<li style="padding-top: 25px;">')[0]
reg1 = r'<span>(.*?)</span>'
img1 = re.compile(reg1)
img_list1 = re.findall(img1, html)
reg2 = r'<strong>(.*?)</strong>'
img2 = re.compile(reg2)
img_list2 = re.findall(img2, html)
if len(img_list2) == 0:
result = "未收录"
return result
result = ''
if(len(img_list1) == 0):
for i in range(0, len(img_list2)):
result += img_list2[i]
else:
for i in range(0, len(img_list1)):
result += "["+img_list1[i]+"]"
result += img_list2[i]
return result
def youdao_jp(html):
html = html.split('<!--日汉词典结果 -->')[-1]
html = html.split('<!--网络翻译-->')[0]
result = "".join(html.split())
result = re.sub(r'<span class="keyword">(.*?)</span>', '', result)
result = re.sub(r'<h4>(.*?)</sup>', '', result)
result = re.sub(r'<sup>(.*?)</sup>', '', result)
result = re.sub('<span>网络释义</span>', '', result)
result = re.sub(r'例证:(.*?)li>', '', result)
result = re.sub(r'谚语或成语:(.*?)li>', '', result)
result = re.sub(r'<p class="exam-sen">(.*?)</p>', '', result)
result = re.sub(r'<(.*?)>', '', result)
if result[0] == "【":
return "未收录,日语暂不支持有道翻译函数"
result = result.split('【')[-1]
return '【'+result
def youdao_fr(html):
html = html.split('<!--Title -->')[-1]
html = html.split(
'<div id="webTrans" class="trans-wrapper trans-tab">')[0]
result = re.sub(r'<(.*?)>', '', html)
return "".join(result.split())
def de(html):
html = html.split('<div id="ExpFCChild" class="expDiv">')[-1]
n = 0
while(html[n] != '\n'):
n += 1
result = html[0:n-1]
result = re.sub(r'<i>(.*?)</i>', '', result)
result = re.sub(r'<span class=eg>(.*?)</span>', '', result)
result = re.sub(r'<span id="phrase">(.*?)</span>', '', result)
result = re.sub(r'<[a-zA-Z]{1,}(.*?)>', '', result)
result = re.sub(r'<\/.*?>', '', result)
result = re.sub(r'<\!.*?>', '', result)
result = "".join(result.split())
result = re.sub('赞踩改进更换举报initThumbnail', '', result)
result = re.sub('欧路软件版权所有', '', result)
result = re.sub('欧路软件', '', result)
result = re.sub('德语助手', '', result)
result = re.sub("()", '', result)
return result
def getImg(html, choice):
if(choice == 1):
return getimg.youdao(html)
if(choice == 2):
return getimg.jinshan(html)
if(choice == 3):
return getimg.bing(html)
if(choice == 4):
return getimg.haici(html)
if(choice == 5):
return getimg.youdao_jp(html)
if(choice == 6):
return getimg.youdao_fr(html)
if(choice == 7):
return getimg.de(html)
def url(choice): # 选择翻译网站
if(choice == 1):
return "http://dict.youdao.com/w/eng/"
if(choice == 2):
return "https://www.iciba.com/word?w="
if(choice == 3):
return "https://cn.bing.com/dict/search?q="
if(choice == 4):
return "https://dict.cn/search?q="
if(choice == 5):
return "http://www.youdao.com/w/jap/"
if(choice == 6):
return "http://www.youdao.com/w/fr/"
if(choice == 7):
return "http://www.godic.net/dicts/de/"
def phrase(choice, word): # 如果是词组,就将空格替换
if(choice == 1):
return re.sub(' ', '%20', word)
if(choice == 2):
return re.sub(' ', '%20', word)
if(choice == 3):
return re.sub(' ', '+', word)
if(choice == 4):
return re.sub(' ', '+', word)
if(choice == 5):
return re.sub(' ', '%20', word)
if(choice == 6):
return re.sub(' ', '%20', word)
if(choice == 7):
ans = urllib.parse.quote(word)
return ans
def getHtml(url):
# 获得网址源代码
headers = {
"User-Agent": "User-Agent:Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}
page = requests.get(url, headers=headers)
page.encoding = 'utf-8'
html = page.text
return html
def help():
help = '''
==================================================================================
Help:
choice:
英>>
1. 有道
2. 金山
3. bing
4. 海词
日>>
5. 有道
法>>
6. 有道
德>>
7. 德语助手
默认有道查询源
functions:
查询单个单词/词组:
search(word, choice=1)
查询单词/词组列表,并生成[字典(dict)]:
wordlist_todict(wordlis, choice=1)
查询单词/词组列表,并生成列表:
wordlist_tolist(wordlist, choice=1, div = " : ", needword = True)
div是输出的list里单词和意思之间的分隔符
needword为False则表示return纯解释列表
==================================================================================
'''
print(help)
def search(word, choice=1):
_url = url(choice) + phrase(choice, word)
_html = getHtml(_url)
return getImg(_html, choice)
def wordlist_todict(wordlist, choice=1):
_dict = {}
for word in wordlist:
_dict[word] = search(word, choice)
return _dict
def wordlist_tolist(wordlist, choice=1, div=" : ", needword=True):
result_list = []
for word in wordlist:
result_list.append(
((word + div)if needword else "") + search(word, choice))
return result_list
| 31.410788 | 100 | 0.480713 | import re
import urllib.parse
import requests
class getimg:
def youdao(html):
html = html.split('</h2>')[-1]
html = html.split('<span>网络释义</span>')[0]
reg = r'<li>(.*?)</li>'
img = re.compile(reg)
img_list = re.findall(img, html)
result = ""
for s in img_list:
if (s != ""):
result = result + s + ";"
result = "".join(result.split())
result = re.sub(r'<(.*?)>', '', result)
if result == '' or result[0:1] == '<a':
return "未收录"
return result
def jinshan(html):
reg = r'<ul class="Mean_part__1RA2V"><li>(.*?)</ul>'
img = re.compile(reg)
img_list = re.findall(img, html)
result = "".join(img_list)
result = re.sub('<', '[', result)
result = re.sub('>', ']', result)
result = re.sub(r'<(.*?)>', '', result)
if result == "":
return "未收录"
return result
def bing(html):
reg = r'<meta name="description" content="(.*?)" />'
img = re.compile(reg)
result = re.search(img, html).group()
result = result.split('<meta name="description" content="')[-1]
result = result.split('" />')[0]
result = re.sub('必应词典为您提供', '', result)
result = re.sub('的释义', '', result)
result = re.sub('英', '', result)
result = re.sub('美', '', result)
result = re.sub(',', '', result)
result = result.split('网络释义:')[0]
result = re.sub(r'\[(.*?)\]', '', result)
if result == "" or result[0:3] == "必应词典":
return "未收录"
return result
def haici(html):
html = html.split('<div class="basic clearfix">')[-1]
html = html.split('<li style="padding-top: 25px;">')[0]
reg1 = r'<span>(.*?)</span>'
img1 = re.compile(reg1)
img_list1 = re.findall(img1, html)
reg2 = r'<strong>(.*?)</strong>'
img2 = re.compile(reg2)
img_list2 = re.findall(img2, html)
if len(img_list2) == 0:
result = "未收录"
return result
result = ''
if(len(img_list1) == 0):
for i in range(0, len(img_list2)):
result += img_list2[i]
else:
for i in range(0, len(img_list1)):
result += "["+img_list1[i]+"]"
result += img_list2[i]
return result
def youdao_jp(html):
html = html.split('<!--日汉词典结果 -->')[-1]
html = html.split('<!--网络翻译-->')[0]
result = "".join(html.split())
result = re.sub(r'<span class="keyword">(.*?)</span>', '', result)
result = re.sub(r'<h4>(.*?)</sup>', '', result)
result = re.sub(r'<sup>(.*?)</sup>', '', result)
result = re.sub('<span>网络释义</span>', '', result)
result = re.sub(r'例证:(.*?)li>', '', result)
result = re.sub(r'谚语或成语:(.*?)li>', '', result)
result = re.sub(r'<p class="exam-sen">(.*?)</p>', '', result)
result = re.sub(r'<(.*?)>', '', result)
if result[0] == "【":
return "未收录,日语暂不支持有道翻译函数"
result = result.split('【')[-1]
return '【'+result
def youdao_fr(html):
html = html.split('<!--Title -->')[-1]
html = html.split(
'<div id="webTrans" class="trans-wrapper trans-tab">')[0]
result = re.sub(r'<(.*?)>', '', html)
return "".join(result.split())
def de(html):
html = html.split('<div id="ExpFCChild" class="expDiv">')[-1]
n = 0
while(html[n] != '\n'):
n += 1
result = html[0:n-1]
result = re.sub(r'<i>(.*?)</i>', '', result)
result = re.sub(r'<span class=eg>(.*?)</span>', '', result)
result = re.sub(r'<span id="phrase">(.*?)</span>', '', result)
result = re.sub(r'<[a-zA-Z]{1,}(.*?)>', '', result)
result = re.sub(r'<\/.*?>', '', result)
result = re.sub(r'<\!.*?>', '', result)
result = "".join(result.split())
result = re.sub('赞踩改进更换举报initThumbnail', '', result)
result = re.sub('欧路软件版权所有', '', result)
result = re.sub('欧路软件', '', result)
result = re.sub('德语助手', '', result)
result = re.sub("()", '', result)
return result
def getImg(html, choice):
if(choice == 1):
return getimg.youdao(html)
if(choice == 2):
return getimg.jinshan(html)
if(choice == 3):
return getimg.bing(html)
if(choice == 4):
return getimg.haici(html)
if(choice == 5):
return getimg.youdao_jp(html)
if(choice == 6):
return getimg.youdao_fr(html)
if(choice == 7):
return getimg.de(html)
def url(choice):
if(choice == 1):
return "http://dict.youdao.com/w/eng/"
if(choice == 2):
return "https://www.iciba.com/word?w="
if(choice == 3):
return "https://cn.bing.com/dict/search?q="
if(choice == 4):
return "https://dict.cn/search?q="
if(choice == 5):
return "http://www.youdao.com/w/jap/"
if(choice == 6):
return "http://www.youdao.com/w/fr/"
if(choice == 7):
return "http://www.godic.net/dicts/de/"
def phrase(choice, word):
if(choice == 1):
return re.sub(' ', '%20', word)
if(choice == 2):
return re.sub(' ', '%20', word)
if(choice == 3):
return re.sub(' ', '+', word)
if(choice == 4):
return re.sub(' ', '+', word)
if(choice == 5):
return re.sub(' ', '%20', word)
if(choice == 6):
return re.sub(' ', '%20', word)
if(choice == 7):
ans = urllib.parse.quote(word)
return ans
def getHtml(url):
headers = {
"User-Agent": "User-Agent:Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}
page = requests.get(url, headers=headers)
page.encoding = 'utf-8'
html = page.text
return html
def help():
help = '''
==================================================================================
Help:
choice:
英>>
1. 有道
2. 金山
3. bing
4. 海词
日>>
5. 有道
法>>
6. 有道
德>>
7. 德语助手
默认有道查询源
functions:
查询单个单词/词组:
search(word, choice=1)
查询单词/词组列表,并生成[字典(dict)]:
wordlist_todict(wordlis, choice=1)
查询单词/词组列表,并生成列表:
wordlist_tolist(wordlist, choice=1, div = " : ", needword = True)
div是输出的list里单词和意思之间的分隔符
needword为False则表示return纯解释列表
==================================================================================
'''
print(help)
def search(word, choice=1):
_url = url(choice) + phrase(choice, word)
_html = getHtml(_url)
return getImg(_html, choice)
def wordlist_todict(wordlist, choice=1):
_dict = {}
for word in wordlist:
_dict[word] = search(word, choice)
return _dict
def wordlist_tolist(wordlist, choice=1, div=" : ", needword=True):
result_list = []
for word in wordlist:
result_list.append(
((word + div)if needword else "") + search(word, choice))
return result_list
| true | true |
79015c12f6ea88bd8cfe5cd155f92eac2d63e612 | 6,206 | py | Python | semseg/models/bisenetv1.py | Genevievekim/semantic-segmentation-1 | f28b026e44cff80fe3ca4cac94cea27e4073821b | [
"BSD-3-Clause"
] | 196 | 2021-08-22T10:01:34.000Z | 2022-03-29T09:59:51.000Z | semseg/models/bisenetv1.py | Genevievekim/semantic-segmentation-1 | f28b026e44cff80fe3ca4cac94cea27e4073821b | [
"BSD-3-Clause"
] | 21 | 2021-08-22T09:59:02.000Z | 2022-03-29T15:22:46.000Z | semseg/models/bisenetv1.py | Genevievekim/semantic-segmentation-1 | f28b026e44cff80fe3ca4cac94cea27e4073821b | [
"BSD-3-Clause"
] | 36 | 2021-08-22T08:59:40.000Z | 2022-03-28T10:13:20.000Z | import torch
import math
from torch import nn, Tensor
from torch.nn import functional as F
from semseg.models.backbones import *
from semseg.models.modules.common import ConvModule
class SpatialPath(nn.Module):
def __init__(self, c1, c2) -> None:
super().__init__()
ch = 64
self.conv_7x7 = ConvModule(c1, ch, 7, 2, 3)
self.conv_3x3_1 = ConvModule(ch, ch, 3, 2, 1)
self.conv_3x3_2 = ConvModule(ch, ch, 3, 2, 1)
self.conv_1x1 = ConvModule(ch, c2, 1, 1, 0)
def forward(self, x):
x = self.conv_7x7(x)
x = self.conv_3x3_1(x)
x = self.conv_3x3_2(x)
return self.conv_1x1(x)
class ContextPath(nn.Module):
def __init__(self, backbone: nn.Module) -> None:
super().__init__()
self.backbone = backbone
c3, c4 = self.backbone.channels[-2:]
self.arm16 = AttentionRefinmentModule(c3, 128)
self.arm32 = AttentionRefinmentModule(c4, 128)
self.global_context = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
ConvModule(c4, 128, 1, 1, 0)
)
self.up16 = nn.Upsample(scale_factor=2.0, mode='bilinear', align_corners=True)
self.up32 = nn.Upsample(scale_factor=2.0, mode='bilinear', align_corners=True)
self.refine16 = ConvModule(128, 128, 3, 1, 1)
self.refine32 = ConvModule(128, 128, 3, 1, 1)
def forward(self, x):
_, _, down16, down32 = self.backbone(x) # 4x256x64x128, 4x512x32x64
arm_down16 = self.arm16(down16) # 4x128x64x128
arm_down32 = self.arm32(down32) # 4x128x32x64
global_down32 = self.global_context(down32) # 4x128x1x1
global_down32 = F.interpolate(global_down32, size=down32.size()[2:], mode='bilinear', align_corners=True) # 4x128x32x64
arm_down32 = arm_down32 + global_down32 # 4x128x32x64
arm_down32 = self.up32(arm_down32) # 4x128x64x128
arm_down32 = self.refine32(arm_down32) # 4x128x64x128
arm_down16 = arm_down16 + arm_down32 # 4x128x64x128
arm_down16 = self.up16(arm_down16) # 4x128x128x256
arm_down16 = self.refine16(arm_down16) # 4x128x128x256
return arm_down16, arm_down32
class AttentionRefinmentModule(nn.Module):
def __init__(self, c1, c2) -> None:
super().__init__()
self.conv_3x3 = ConvModule(c1, c2, 3, 1, 1)
self.attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(c2, c2, 1, bias=False),
nn.BatchNorm2d(c2),
nn.Sigmoid()
)
def forward(self, x):
fm = self.conv_3x3(x)
fm_se = self.attention(fm)
return fm * fm_se
class FeatureFusionModule(nn.Module):
def __init__(self, c1, c2, reduction=1) -> None:
super().__init__()
self.conv_1x1 = ConvModule(c1, c2, 1, 1, 0)
self.attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(c2, c2 // reduction, 1, bias=False),
nn.ReLU(True),
nn.Conv2d(c2 // reduction, c2, 1, bias=False),
nn.Sigmoid()
)
def forward(self, x1, x2):
fm = torch.cat([x1, x2], dim=1)
fm = self.conv_1x1(fm)
fm_se = self.attention(fm)
return fm + fm * fm_se
class Head(nn.Module):
def __init__(self, c1, n_classes, upscale_factor, is_aux=False) -> None:
super().__init__()
ch = 256 if is_aux else 64
c2 = n_classes * upscale_factor * upscale_factor
self.conv_3x3 = ConvModule(c1, ch, 3, 1, 1)
self.conv_1x1 = nn.Conv2d(ch, c2, 1, 1, 0)
self.upscale = nn.PixelShuffle(upscale_factor)
def forward(self, x):
x = self.conv_1x1(self.conv_3x3(x))
return self.upscale(x)
class BiSeNetv1(nn.Module):
def __init__(self, backbone: str = 'ResNet-18', num_classes: int = 19) -> None:
super().__init__()
backbone, variant = backbone.split('-')
self.context_path = ContextPath(eval(backbone)(variant))
self.spatial_path = SpatialPath(3, 128)
self.ffm = FeatureFusionModule(256, 256)
self.output_head = Head(256, num_classes, upscale_factor=8, is_aux=False)
self.context16_head = Head(128, num_classes, upscale_factor=8, is_aux=True)
self.context32_head = Head(128, num_classes, upscale_factor=16, is_aux=True)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out // m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def init_pretrained(self, pretrained: str = None) -> None:
if pretrained:
self.context_path.backbone.load_state_dict(torch.load(pretrained, map_location='cpu'), strict=False)
def forward(self, x): # 4x3x1024x2048
spatial_out = self.spatial_path(x) # 4x128x128x256
context16, context32 = self.context_path(x) # 4x128x128x256, 4x128x64x128
fm_fuse = self.ffm(spatial_out, context16) # 4x256x128x256
output = self.output_head(fm_fuse) # 4xn_classesx1024x2048
if self.training:
context_out16 = self.context16_head(context16) # 4xn_classesx1024x2048
context_out32 = self.context32_head(context32) # 4xn_classesx1024x2048
return output, context_out16, context_out32
return output
if __name__ == '__main__':
model = BiSeNetv1('MobileNetV2-1.0', 19)
# model.init_pretrained('checkpoints/backbones/resnet/resnet18.pth')
model.eval()
image = torch.randn(1, 3, 224, 224)
output = model(image)
print(output.shape) | 36.940476 | 129 | 0.596197 | import torch
import math
from torch import nn, Tensor
from torch.nn import functional as F
from semseg.models.backbones import *
from semseg.models.modules.common import ConvModule
class SpatialPath(nn.Module):
def __init__(self, c1, c2) -> None:
super().__init__()
ch = 64
self.conv_7x7 = ConvModule(c1, ch, 7, 2, 3)
self.conv_3x3_1 = ConvModule(ch, ch, 3, 2, 1)
self.conv_3x3_2 = ConvModule(ch, ch, 3, 2, 1)
self.conv_1x1 = ConvModule(ch, c2, 1, 1, 0)
def forward(self, x):
x = self.conv_7x7(x)
x = self.conv_3x3_1(x)
x = self.conv_3x3_2(x)
return self.conv_1x1(x)
class ContextPath(nn.Module):
def __init__(self, backbone: nn.Module) -> None:
super().__init__()
self.backbone = backbone
c3, c4 = self.backbone.channels[-2:]
self.arm16 = AttentionRefinmentModule(c3, 128)
self.arm32 = AttentionRefinmentModule(c4, 128)
self.global_context = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
ConvModule(c4, 128, 1, 1, 0)
)
self.up16 = nn.Upsample(scale_factor=2.0, mode='bilinear', align_corners=True)
self.up32 = nn.Upsample(scale_factor=2.0, mode='bilinear', align_corners=True)
self.refine16 = ConvModule(128, 128, 3, 1, 1)
self.refine32 = ConvModule(128, 128, 3, 1, 1)
def forward(self, x):
_, _, down16, down32 = self.backbone(x)
arm_down16 = self.arm16(down16)
arm_down32 = self.arm32(down32)
global_down32 = self.global_context(down32)
global_down32 = F.interpolate(global_down32, size=down32.size()[2:], mode='bilinear', align_corners=True)
arm_down32 = arm_down32 + global_down32
arm_down32 = self.up32(arm_down32)
arm_down32 = self.refine32(arm_down32)
arm_down16 = arm_down16 + arm_down32
arm_down16 = self.up16(arm_down16)
arm_down16 = self.refine16(arm_down16)
return arm_down16, arm_down32
class AttentionRefinmentModule(nn.Module):
def __init__(self, c1, c2) -> None:
super().__init__()
self.conv_3x3 = ConvModule(c1, c2, 3, 1, 1)
self.attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(c2, c2, 1, bias=False),
nn.BatchNorm2d(c2),
nn.Sigmoid()
)
def forward(self, x):
fm = self.conv_3x3(x)
fm_se = self.attention(fm)
return fm * fm_se
class FeatureFusionModule(nn.Module):
def __init__(self, c1, c2, reduction=1) -> None:
super().__init__()
self.conv_1x1 = ConvModule(c1, c2, 1, 1, 0)
self.attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(c2, c2 // reduction, 1, bias=False),
nn.ReLU(True),
nn.Conv2d(c2 // reduction, c2, 1, bias=False),
nn.Sigmoid()
)
def forward(self, x1, x2):
fm = torch.cat([x1, x2], dim=1)
fm = self.conv_1x1(fm)
fm_se = self.attention(fm)
return fm + fm * fm_se
class Head(nn.Module):
def __init__(self, c1, n_classes, upscale_factor, is_aux=False) -> None:
super().__init__()
ch = 256 if is_aux else 64
c2 = n_classes * upscale_factor * upscale_factor
self.conv_3x3 = ConvModule(c1, ch, 3, 1, 1)
self.conv_1x1 = nn.Conv2d(ch, c2, 1, 1, 0)
self.upscale = nn.PixelShuffle(upscale_factor)
def forward(self, x):
x = self.conv_1x1(self.conv_3x3(x))
return self.upscale(x)
class BiSeNetv1(nn.Module):
def __init__(self, backbone: str = 'ResNet-18', num_classes: int = 19) -> None:
super().__init__()
backbone, variant = backbone.split('-')
self.context_path = ContextPath(eval(backbone)(variant))
self.spatial_path = SpatialPath(3, 128)
self.ffm = FeatureFusionModule(256, 256)
self.output_head = Head(256, num_classes, upscale_factor=8, is_aux=False)
self.context16_head = Head(128, num_classes, upscale_factor=8, is_aux=True)
self.context32_head = Head(128, num_classes, upscale_factor=16, is_aux=True)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out // m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def init_pretrained(self, pretrained: str = None) -> None:
if pretrained:
self.context_path.backbone.load_state_dict(torch.load(pretrained, map_location='cpu'), strict=False)
def forward(self, x):
spatial_out = self.spatial_path(x)
context16, context32 = self.context_path(x)
fm_fuse = self.ffm(spatial_out, context16)
output = self.output_head(fm_fuse)
if self.training:
context_out16 = self.context16_head(context16)
context_out32 = self.context32_head(context32)
return output, context_out16, context_out32
return output
if __name__ == '__main__':
model = BiSeNetv1('MobileNetV2-1.0', 19)
model.eval()
image = torch.randn(1, 3, 224, 224)
output = model(image)
print(output.shape) | true | true |
79015ca13c31ca9be0283a0dc30c6ae249a98364 | 6,589 | py | Python | processor/processor/requests_processor.py | gabrielbazan/sate | ff160d7f024a0422f3cd279b92361ec6e7fd7420 | [
"MIT"
] | null | null | null | processor/processor/requests_processor.py | gabrielbazan/sate | ff160d7f024a0422f3cd279b92361ec6e7fd7420 | [
"MIT"
] | null | null | null | processor/processor/requests_processor.py | gabrielbazan/sate | ff160d7f024a0422f3cd279b92361ec6e7fd7420 | [
"MIT"
] | null | null | null | import json
from time import sleep
from uuid import uuid4
from datetime import datetime
import logging
from kafka import KafkaProducer, KafkaConsumer
from settings import (
KAFKA_BOOTSTRAP_SERVER,
KAFKA_VALUE_ENCODING,
KAFKA_INBOUND_TOPIC,
KAFKA_SUCCESS_OUTBOUND_TOPIC,
KAFKA_ERROR_OUTBOUND_TOPIC,
KAFKA_DEAD_LETTER_QUEUE_TOPIC,
KAFKA_SUCCESS_ACKS,
KAFKA_ERROR_ACKS,
KAFKA_DEAD_LETTER_QUEUE_ACKS,
KAFKA_INBOUND_GROUP_ID,
KAFKA_INBOUND_AUTO_OFFSET_RESET,
EXECUTION_SLEEP,
EXECUTION_MESSAGE_FORCE_ERROR_KEY,
KAFKA_INBOUND_TIMEOUT,
KAFKA_INBOUND_MAX_RECORDS,
)
from schemas import ResultField
LOGGER = logging.getLogger(__name__)
class RequestsProcessorBuilder(object):
@staticmethod
def build():
return RequestsProcessor(
RequestsProcessorBuilder.build_inbound_consumer(),
RequestsProcessorBuilder.build_success_publisher(),
RequestsProcessorBuilder.build_error_publisher(),
RequestsProcessorBuilder.build_dead_letter_publisher(),
)
@staticmethod
def build_inbound_consumer():
return KafkaConsumer(
KAFKA_INBOUND_TOPIC,
bootstrap_servers=[KAFKA_BOOTSTRAP_SERVER],
auto_offset_reset=KAFKA_INBOUND_AUTO_OFFSET_RESET,
enable_auto_commit=False,
group_id=KAFKA_INBOUND_GROUP_ID,
value_deserializer=lambda value: json.loads(value.decode(KAFKA_VALUE_ENCODING))
)
@staticmethod
def build_success_publisher():
return RequestsProcessorBuilder.build_producer(KAFKA_SUCCESS_ACKS)
@staticmethod
def build_error_publisher():
return RequestsProcessorBuilder.build_producer(KAFKA_ERROR_ACKS)
@staticmethod
def build_dead_letter_publisher():
return RequestsProcessorBuilder.build_producer(KAFKA_DEAD_LETTER_QUEUE_ACKS)
@staticmethod
def build_producer(acknowledgements):
return KafkaProducer(
bootstrap_servers=[KAFKA_BOOTSTRAP_SERVER],
value_serializer=lambda value: json.dumps(value).encode(KAFKA_VALUE_ENCODING),
acks=acknowledgements
)
class RequestsProcessor(object):
def __init__(self, inbound_consumer, success_publisher, error_publisher, dead_letter_publisher):
self.inbound_consumer = inbound_consumer
self.success_publisher = success_publisher
self.error_publisher = error_publisher
self.dead_letter_publisher = dead_letter_publisher
def start(self):
while True:
messages_by_partition = self.inbound_consumer.poll(
timeout_ms=KAFKA_INBOUND_TIMEOUT,
max_records=KAFKA_INBOUND_MAX_RECORDS,
)
self.handle_messages(messages_by_partition)
def handle_messages(self, messages_by_partition):
for topic_partition, messages in messages_by_partition.items():
for message in messages:
self.handle_message(topic_partition, message)
def handle_message(self, topic_partition, message):
execution = message.value
LOGGER.info("Handling message: '%s'", str(execution))
try:
failed, outputs, start_time, end_time, total_seconds = RequestsProcessor.process(
execution
)
result = RequestsProcessor.build_result(
execution, outputs, start_time, end_time, total_seconds
)
self.publish_to_result_topic(result, failed)
except:
LOGGER.exception("An error occurred while handling the execution")
self.publish_to_dead_letter_queue_topic(execution)
self.commit_current_message(topic_partition)
LOGGER.info("Done handling message: '%s'", str(execution))
def publish_to_result_topic(self, execution, failed):
if failed:
LOGGER.info("Publishing execution to failed executions topic")
self.error_publisher.send(KAFKA_ERROR_OUTBOUND_TOPIC, value=execution)
LOGGER.info("Published execution to failed executions topic")
else:
LOGGER.info("Publishing execution to successful executions topic")
self.success_publisher.send(KAFKA_SUCCESS_OUTBOUND_TOPIC, value=execution)
LOGGER.info("Published execution to successful executions topic")
def publish_to_dead_letter_queue_topic(self, execution):
LOGGER.info("Publishing execution to dead letter queue topic")
self.dead_letter_publisher.send(KAFKA_DEAD_LETTER_QUEUE_TOPIC, value=execution)
LOGGER.info("Published execution to dead letter queue topic")
def commit_current_message(self, topic_partition):
LOGGER.info("Committing")
self.inbound_consumer.commit()
new_offset = self.inbound_consumer.committed(topic_partition)
LOGGER.info("Committed. New Kafka offset: %s", new_offset)
@staticmethod
def process(execution):
LOGGER.info("Executing: %s", execution)
start_time = datetime.utcnow()
failed, outputs = Executor(execution).execute()
end_time = datetime.utcnow()
processing_time_difference = end_time - start_time
processing_time_seconds = processing_time_difference.total_seconds()
LOGGER.info("Executed: %s", execution)
return failed, outputs, start_time, end_time, processing_time_seconds
@staticmethod
def build_result(execution, outputs, start_time, end_time, total_seconds):
return {
ResultField.ID: generate_identifier(),
ResultField.START_TIME: str(start_time),
ResultField.END_TIME: str(end_time),
ResultField.TOTAL_SECONDS: total_seconds,
ResultField.EXECUTION: execution.copy(),
ResultField.OUTPUTS: outputs
}
class Executor(object):
def __init__(self, execution):
self.execution = execution
def execute(self):
Executor.wait(EXECUTION_SLEEP)
force_error = self.execution.get(EXECUTION_MESSAGE_FORCE_ERROR_KEY)
outputs = Executor.get_outputs(force_error)
return force_error, outputs
@staticmethod
def wait(seconds):
LOGGER.info("Sleeping for %d seconds...", seconds)
sleep(seconds)
LOGGER.info("Done waiting")
@staticmethod
def get_outputs(force_error):
outputs = {}
if not force_error:
outputs[ResultField.OUTPUT_MESSAGE_KEY] = ResultField.OUTPUT_MESSAGE_VALUE
return outputs
def generate_identifier():
return str(uuid4())
| 34.317708 | 100 | 0.700562 | import json
from time import sleep
from uuid import uuid4
from datetime import datetime
import logging
from kafka import KafkaProducer, KafkaConsumer
from settings import (
KAFKA_BOOTSTRAP_SERVER,
KAFKA_VALUE_ENCODING,
KAFKA_INBOUND_TOPIC,
KAFKA_SUCCESS_OUTBOUND_TOPIC,
KAFKA_ERROR_OUTBOUND_TOPIC,
KAFKA_DEAD_LETTER_QUEUE_TOPIC,
KAFKA_SUCCESS_ACKS,
KAFKA_ERROR_ACKS,
KAFKA_DEAD_LETTER_QUEUE_ACKS,
KAFKA_INBOUND_GROUP_ID,
KAFKA_INBOUND_AUTO_OFFSET_RESET,
EXECUTION_SLEEP,
EXECUTION_MESSAGE_FORCE_ERROR_KEY,
KAFKA_INBOUND_TIMEOUT,
KAFKA_INBOUND_MAX_RECORDS,
)
from schemas import ResultField
LOGGER = logging.getLogger(__name__)
class RequestsProcessorBuilder(object):
@staticmethod
def build():
return RequestsProcessor(
RequestsProcessorBuilder.build_inbound_consumer(),
RequestsProcessorBuilder.build_success_publisher(),
RequestsProcessorBuilder.build_error_publisher(),
RequestsProcessorBuilder.build_dead_letter_publisher(),
)
@staticmethod
def build_inbound_consumer():
return KafkaConsumer(
KAFKA_INBOUND_TOPIC,
bootstrap_servers=[KAFKA_BOOTSTRAP_SERVER],
auto_offset_reset=KAFKA_INBOUND_AUTO_OFFSET_RESET,
enable_auto_commit=False,
group_id=KAFKA_INBOUND_GROUP_ID,
value_deserializer=lambda value: json.loads(value.decode(KAFKA_VALUE_ENCODING))
)
@staticmethod
def build_success_publisher():
return RequestsProcessorBuilder.build_producer(KAFKA_SUCCESS_ACKS)
@staticmethod
def build_error_publisher():
return RequestsProcessorBuilder.build_producer(KAFKA_ERROR_ACKS)
@staticmethod
def build_dead_letter_publisher():
return RequestsProcessorBuilder.build_producer(KAFKA_DEAD_LETTER_QUEUE_ACKS)
@staticmethod
def build_producer(acknowledgements):
return KafkaProducer(
bootstrap_servers=[KAFKA_BOOTSTRAP_SERVER],
value_serializer=lambda value: json.dumps(value).encode(KAFKA_VALUE_ENCODING),
acks=acknowledgements
)
class RequestsProcessor(object):
def __init__(self, inbound_consumer, success_publisher, error_publisher, dead_letter_publisher):
self.inbound_consumer = inbound_consumer
self.success_publisher = success_publisher
self.error_publisher = error_publisher
self.dead_letter_publisher = dead_letter_publisher
def start(self):
while True:
messages_by_partition = self.inbound_consumer.poll(
timeout_ms=KAFKA_INBOUND_TIMEOUT,
max_records=KAFKA_INBOUND_MAX_RECORDS,
)
self.handle_messages(messages_by_partition)
def handle_messages(self, messages_by_partition):
for topic_partition, messages in messages_by_partition.items():
for message in messages:
self.handle_message(topic_partition, message)
def handle_message(self, topic_partition, message):
execution = message.value
LOGGER.info("Handling message: '%s'", str(execution))
try:
failed, outputs, start_time, end_time, total_seconds = RequestsProcessor.process(
execution
)
result = RequestsProcessor.build_result(
execution, outputs, start_time, end_time, total_seconds
)
self.publish_to_result_topic(result, failed)
except:
LOGGER.exception("An error occurred while handling the execution")
self.publish_to_dead_letter_queue_topic(execution)
self.commit_current_message(topic_partition)
LOGGER.info("Done handling message: '%s'", str(execution))
def publish_to_result_topic(self, execution, failed):
if failed:
LOGGER.info("Publishing execution to failed executions topic")
self.error_publisher.send(KAFKA_ERROR_OUTBOUND_TOPIC, value=execution)
LOGGER.info("Published execution to failed executions topic")
else:
LOGGER.info("Publishing execution to successful executions topic")
self.success_publisher.send(KAFKA_SUCCESS_OUTBOUND_TOPIC, value=execution)
LOGGER.info("Published execution to successful executions topic")
def publish_to_dead_letter_queue_topic(self, execution):
LOGGER.info("Publishing execution to dead letter queue topic")
self.dead_letter_publisher.send(KAFKA_DEAD_LETTER_QUEUE_TOPIC, value=execution)
LOGGER.info("Published execution to dead letter queue topic")
def commit_current_message(self, topic_partition):
LOGGER.info("Committing")
self.inbound_consumer.commit()
new_offset = self.inbound_consumer.committed(topic_partition)
LOGGER.info("Committed. New Kafka offset: %s", new_offset)
@staticmethod
def process(execution):
LOGGER.info("Executing: %s", execution)
start_time = datetime.utcnow()
failed, outputs = Executor(execution).execute()
end_time = datetime.utcnow()
processing_time_difference = end_time - start_time
processing_time_seconds = processing_time_difference.total_seconds()
LOGGER.info("Executed: %s", execution)
return failed, outputs, start_time, end_time, processing_time_seconds
@staticmethod
def build_result(execution, outputs, start_time, end_time, total_seconds):
return {
ResultField.ID: generate_identifier(),
ResultField.START_TIME: str(start_time),
ResultField.END_TIME: str(end_time),
ResultField.TOTAL_SECONDS: total_seconds,
ResultField.EXECUTION: execution.copy(),
ResultField.OUTPUTS: outputs
}
class Executor(object):
def __init__(self, execution):
self.execution = execution
def execute(self):
Executor.wait(EXECUTION_SLEEP)
force_error = self.execution.get(EXECUTION_MESSAGE_FORCE_ERROR_KEY)
outputs = Executor.get_outputs(force_error)
return force_error, outputs
@staticmethod
def wait(seconds):
LOGGER.info("Sleeping for %d seconds...", seconds)
sleep(seconds)
LOGGER.info("Done waiting")
@staticmethod
def get_outputs(force_error):
outputs = {}
if not force_error:
outputs[ResultField.OUTPUT_MESSAGE_KEY] = ResultField.OUTPUT_MESSAGE_VALUE
return outputs
def generate_identifier():
return str(uuid4())
| true | true |
79015cda1488556f4a202a89907d6ffe380ae288 | 7,660 | py | Python | bot/cogs/memes_cog/memes_cog.py | zepthro/ClemBot | c354cc9ed2a0749a1751a717347f4b5377c0b4b2 | [
"MIT"
] | null | null | null | bot/cogs/memes_cog/memes_cog.py | zepthro/ClemBot | c354cc9ed2a0749a1751a717347f4b5377c0b4b2 | [
"MIT"
] | null | null | null | bot/cogs/memes_cog/memes_cog.py | zepthro/ClemBot | c354cc9ed2a0749a1751a717347f4b5377c0b4b2 | [
"MIT"
] | null | null | null | import concurrent.futures
import datetime
import io
import logging
import os
import random
import time
import typing as t
import discord
import discord.ext.commands as commands
from PIL import Image, ImageDraw, ImageSequence, ImageFont
import bot.extensions as ext
from bot.consts import Colors
from bot.messaging.events import Events
log = logging.getLogger(__name__)
MAX_WALDO_GRID_SIZE = 100
CRAB_LINE_LENGTH = 58
CRAB_COMMAND_COOLDOWN = 3
def pillow_process(args, is_rave, lines_in_text, timestamp):
# Open crab.gif and add our font
with Image.open('bot/cogs/memes_cog/assets/crab.gif') as im:
fnt = ImageFont.truetype('bot/cogs/memes_cog/assets/LemonMilk.otf', 11)
# Draw text on each frame of the gif
# Gonna be honest I don't quite understand how it works but I got it from the Pillow docs/issues
frames = []
for frame in ImageSequence.Iterator(im):
d = ImageDraw.Draw(frame)
w, h = d.textsize(args, fnt)
# draws the text on to the frame. Tries to center horizontally and tries to go as close to the bottom as possible
d.text((im.size[0] / 2 - w / 2, im.size[1] - h - (5 * lines_in_text)), args, font=fnt, align='center',
stroke_width=bool(is_rave), stroke_fill=Colors.ClemsonOrange, spacing=6)
del d
b = io.BytesIO()
frame.save(b, format='GIF')
frame = Image.open(b)
frames.append(frame)
frames[0].save(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif', save_all=True, append_images=frames[1:])
class MemesCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@ext.command()
@ext.long_help(
'A fun command to generate a pseudo bubblewrap effect in discord'
)
@ext.short_help('Creates bubblewrap!')
@ext.example('bubblewrap')
async def bubblewrap(self, ctx):
msg = ''
for _ in range(0, 5):
for _ in range(0, 10):
msg += '||pop!|| '
msg += '\n'
await ctx.send(msg)
@commands.command()
@ext.long_help(
'A fun command to generate a wheres waldo effect in discord, see if you can find him first!'
'Optionally takes a size parameter to make it easier or harder'
)
@ext.short_help('Can you find him?')
@ext.example(('waldo', 'waldo 10'))
async def waldo(self, ctx, size=MAX_WALDO_GRID_SIZE):
"""
Play Where's Waldo!
Usage: <prefix>waldo [size = 100]
"""
random_start_letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'X',
'Y', 'Z']
max_waldo_line_size = 6
new_line_waldo_chance = 10
msg = ''
count = 0
place = random.randint(0, size)
for i in range(size + 1):
if i == place:
msg += '||`WALDO`|| '
count += 1
else:
helper = random.randint(0, len(random_start_letters) - 1)
letter = random_start_letters[helper]
msg += f'||`{letter}ALDO`|| '
count += 1
new_line = random.randint(0, 100)
if new_line < new_line_waldo_chance or count > max_waldo_line_size:
msg += '\n'
count = 0
await ctx.send(msg)
@ext.command()
@ext.chainable()
@ext.long_help(
'A fun command to spongebob meme text in discord'
)
@ext.short_help('sO yOu doNt KnOw wHat tHiS Is?')
@ext.example('spongebob hello world')
async def spongebob(self, ctx, *, args):
"""
Spongebob Text
"""
random.seed(time.time())
args = args.replace('"', "'")
result = ''
for i in args:
helper = random.randint(0, 100)
if helper > 60:
result += str(i).upper()
else:
result += str(i).lower()
await ctx.send(result)
@ext.command(aliases=['rave', '🦀'])
@commands.cooldown(1, CRAB_COMMAND_COOLDOWN, commands.BucketType.guild)
@ext.long_help(
'A fun command to generate a crab rave gif with specified text overlay'
)
@ext.short_help('Generates a crab rave gif')
@ext.chainable_input()
@ext.example('crab hello from crab world')
async def crab(self, ctx, is_rave: t.Optional[bool] = True, *, args='Bottom text\n is dead'):
"""
Create your own crab rave.
Usage: <prefix>crab [is_rave=True] [text=Bottom text\\n is dead]
Aliases: rave, 🦀
"""
# crab.gif dimensions - 352 by 200
# Immediately grab the timestamp incase of multiple calls in a row
timestamp = datetime.datetime.utcnow().microsecond
wait_msg = await ctx.send('Generating your gif')
args = args.replace('\\', '')
# Add new lines for when the text would go out of bounds
lines_in_text = 1
while len(args) > (CRAB_LINE_LENGTH * lines_in_text):
newline_loc = CRAB_LINE_LENGTH * lines_in_text
# I didn't want to add a newline in the middle of a word
while not args[newline_loc].isspace():
newline_loc -= 1
if newline_loc == CRAB_LINE_LENGTH * (lines_in_text - 1):
newline_loc = CRAB_LINE_LENGTH * lines_in_text
break
args = f'{args[:newline_loc]} \n{args[newline_loc:]}'
lines_in_text += 1
loop = self.bot.loop
with concurrent.futures.ProcessPoolExecutor() as pool:
pil_args = (args, is_rave, lines_in_text, timestamp)
await loop.run_in_executor(pool, pillow_process, *pil_args)
# Attach, send, and delete created gif
attachment = discord.File(filename=f'out_{timestamp}.gif', fp=f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')
msg = await ctx.send(file=attachment)
await self.bot.messenger.publish(Events.on_set_deletable, msg=msg, author=ctx.author)
await wait_msg.delete()
os.remove(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')
@ext.command(hidden=True, aliases=['ctray', 'trayforjay'])
async def cookouttray(self, ctx, input):
"""
For those who do finances with cookout trays, we proudly present the command for you
Simply type one of the following:
cookouttray
ctray
trayforjay
Followed by a monetary value such as (leave off the dollar sign):
20
100
3.14
To have it converted into cookout trays
Examples:
cookouttray 20
ctray 100
trayforjay 3.14
Clicking the link "Cash to Cookout Tray Converter" in the output will also take you to cookout's website
"""
money = round(float(input), 2)
output = money / 5
embed = discord.Embed(
title='Cash to Cookout Tray Converter',
description=f'{ctx.message.author.mention} ${money} is approximately {output} cookout trays',
url=f"https://www.fastfoodmenuprices.com/cookout-prices/",
color=Colors.ClemsonOrange)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(MemesCog(bot))
| 35.462963 | 147 | 0.565144 | import concurrent.futures
import datetime
import io
import logging
import os
import random
import time
import typing as t
import discord
import discord.ext.commands as commands
from PIL import Image, ImageDraw, ImageSequence, ImageFont
import bot.extensions as ext
from bot.consts import Colors
from bot.messaging.events import Events
log = logging.getLogger(__name__)
MAX_WALDO_GRID_SIZE = 100
CRAB_LINE_LENGTH = 58
CRAB_COMMAND_COOLDOWN = 3
def pillow_process(args, is_rave, lines_in_text, timestamp):
with Image.open('bot/cogs/memes_cog/assets/crab.gif') as im:
fnt = ImageFont.truetype('bot/cogs/memes_cog/assets/LemonMilk.otf', 11)
frames = []
for frame in ImageSequence.Iterator(im):
d = ImageDraw.Draw(frame)
w, h = d.textsize(args, fnt)
# draws the text on to the frame. Tries to center horizontally and tries to go as close to the bottom as possible
d.text((im.size[0] / 2 - w / 2, im.size[1] - h - (5 * lines_in_text)), args, font=fnt, align='center',
stroke_width=bool(is_rave), stroke_fill=Colors.ClemsonOrange, spacing=6)
del d
b = io.BytesIO()
frame.save(b, format='GIF')
frame = Image.open(b)
frames.append(frame)
frames[0].save(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif', save_all=True, append_images=frames[1:])
class MemesCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@ext.command()
@ext.long_help(
'A fun command to generate a pseudo bubblewrap effect in discord'
)
@ext.short_help('Creates bubblewrap!')
@ext.example('bubblewrap')
async def bubblewrap(self, ctx):
msg = ''
for _ in range(0, 5):
for _ in range(0, 10):
msg += '||pop!|| '
msg += '\n'
await ctx.send(msg)
@commands.command()
@ext.long_help(
'A fun command to generate a wheres waldo effect in discord, see if you can find him first!'
'Optionally takes a size parameter to make it easier or harder'
)
@ext.short_help('Can you find him?')
@ext.example(('waldo', 'waldo 10'))
async def waldo(self, ctx, size=MAX_WALDO_GRID_SIZE):
random_start_letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'X',
'Y', 'Z']
max_waldo_line_size = 6
new_line_waldo_chance = 10
msg = ''
count = 0
place = random.randint(0, size)
for i in range(size + 1):
if i == place:
msg += '||`WALDO`|| '
count += 1
else:
helper = random.randint(0, len(random_start_letters) - 1)
letter = random_start_letters[helper]
msg += f'||`{letter}ALDO`|| '
count += 1
new_line = random.randint(0, 100)
if new_line < new_line_waldo_chance or count > max_waldo_line_size:
msg += '\n'
count = 0
await ctx.send(msg)
@ext.command()
@ext.chainable()
@ext.long_help(
'A fun command to spongebob meme text in discord'
)
@ext.short_help('sO yOu doNt KnOw wHat tHiS Is?')
@ext.example('spongebob hello world')
async def spongebob(self, ctx, *, args):
random.seed(time.time())
args = args.replace('"', "'")
result = ''
for i in args:
helper = random.randint(0, 100)
if helper > 60:
result += str(i).upper()
else:
result += str(i).lower()
await ctx.send(result)
@ext.command(aliases=['rave', '🦀'])
@commands.cooldown(1, CRAB_COMMAND_COOLDOWN, commands.BucketType.guild)
@ext.long_help(
'A fun command to generate a crab rave gif with specified text overlay'
)
@ext.short_help('Generates a crab rave gif')
@ext.chainable_input()
@ext.example('crab hello from crab world')
async def crab(self, ctx, is_rave: t.Optional[bool] = True, *, args='Bottom text\n is dead'):
# crab.gif dimensions - 352 by 200
# Immediately grab the timestamp incase of multiple calls in a row
timestamp = datetime.datetime.utcnow().microsecond
wait_msg = await ctx.send('Generating your gif')
args = args.replace('\\', '')
# Add new lines for when the text would go out of bounds
lines_in_text = 1
while len(args) > (CRAB_LINE_LENGTH * lines_in_text):
newline_loc = CRAB_LINE_LENGTH * lines_in_text
# I didn't want to add a newline in the middle of a word
while not args[newline_loc].isspace():
newline_loc -= 1
if newline_loc == CRAB_LINE_LENGTH * (lines_in_text - 1):
newline_loc = CRAB_LINE_LENGTH * lines_in_text
break
args = f'{args[:newline_loc]} \n{args[newline_loc:]}'
lines_in_text += 1
loop = self.bot.loop
with concurrent.futures.ProcessPoolExecutor() as pool:
pil_args = (args, is_rave, lines_in_text, timestamp)
await loop.run_in_executor(pool, pillow_process, *pil_args)
# Attach, send, and delete created gif
attachment = discord.File(filename=f'out_{timestamp}.gif', fp=f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')
msg = await ctx.send(file=attachment)
await self.bot.messenger.publish(Events.on_set_deletable, msg=msg, author=ctx.author)
await wait_msg.delete()
os.remove(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')
@ext.command(hidden=True, aliases=['ctray', 'trayforjay'])
async def cookouttray(self, ctx, input):
money = round(float(input), 2)
output = money / 5
embed = discord.Embed(
title='Cash to Cookout Tray Converter',
description=f'{ctx.message.author.mention} ${money} is approximately {output} cookout trays',
url=f"https://www.fastfoodmenuprices.com/cookout-prices/",
color=Colors.ClemsonOrange)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(MemesCog(bot))
| true | true |
79015d18f178a6c66cd536772ba491841d5cbdf0 | 1,612 | py | Python | settings.py | pk0912/TweetEmotionsPredictor | 5325de8d6df6ea8e2d3627dc36eeb463b5c1ea92 | [
"MIT"
] | null | null | null | settings.py | pk0912/TweetEmotionsPredictor | 5325de8d6df6ea8e2d3627dc36eeb463b5c1ea92 | [
"MIT"
] | null | null | null | settings.py | pk0912/TweetEmotionsPredictor | 5325de8d6df6ea8e2d3627dc36eeb463b5c1ea92 | [
"MIT"
] | null | null | null | import os
LUCKY_SEED = 42
TRAIN_FILE_COUNT = 43
VAL_FILE_COUNT = 12
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
OBJECTS_DIR = os.path.join(ROOT_DIR, "objects")
OUTPUTS_DIR = os.path.join(ROOT_DIR, "outputs")
LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DATA_DIR = os.path.join(ROOT_DIR, "data")
RAW_DATA_DIR = os.path.join(DATA_DIR, "raw_data")
ORIG_DATA_DIR = os.path.join(RAW_DATA_DIR, "sa-emotions")
OTHERS_RAW_DATA = os.path.join(RAW_DATA_DIR, "others")
PROCESSED_DATA_DIR = os.path.join(DATA_DIR, "processed_data")
COMPLEX_PROCESSED_DATA_DIR = os.path.join(PROCESSED_DATA_DIR, "complex")
SIMPLE_PROCESSED_DATA_DIR = os.path.join(PROCESSED_DATA_DIR, "simple")
TEST_DATA_DIR = os.path.join(DATA_DIR, "testing_data")
TRAIN_DATA_DIR = os.path.join(DATA_DIR, "training_data")
TRAIN_DATA_DIR_WI = os.path.join(TRAIN_DATA_DIR, "word_2_index")
TRAIN_DATA_DIR_TF_IDF = os.path.join(TRAIN_DATA_DIR, "tf_idf")
VAL_DATA_DIR = os.path.join(DATA_DIR, "validation_data")
VAL_DATA_DIR_WI = os.path.join(VAL_DATA_DIR, "word_2_index")
VAL_DATA_DIR_TF_IDF = os.path.join(VAL_DATA_DIR, "tf_idf")
SPACY_MEDIUM_MODEL = "en_core_web_md"
SPACY_LARGE_MODEL = "en_core_web_lg"
TF_HUB_EMBEDDING_MODELS = [
"https://tfhub.dev/google/nnlm-en-dim128/2",
"https://tfhub.dev/google/universal-sentence-encoder/4",
"https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1",
]
LOG_FORMAT = (
"%(asctime)s | %(levelname)s | %(name)s | %(filename)s | %(lineno)d | %(message)s"
)
LOG_LEVEL = "DEBUG"
LOG_FILE = os.path.join(LOGS_DIR, "sentiment_analysis.log")
LOG_FILE_MAX_BYTES = 1048576
LOG_FILE_BACKUP_COUNT = 2
| 34.297872 | 86 | 0.759926 | import os
LUCKY_SEED = 42
TRAIN_FILE_COUNT = 43
VAL_FILE_COUNT = 12
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
OBJECTS_DIR = os.path.join(ROOT_DIR, "objects")
OUTPUTS_DIR = os.path.join(ROOT_DIR, "outputs")
LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DATA_DIR = os.path.join(ROOT_DIR, "data")
RAW_DATA_DIR = os.path.join(DATA_DIR, "raw_data")
ORIG_DATA_DIR = os.path.join(RAW_DATA_DIR, "sa-emotions")
OTHERS_RAW_DATA = os.path.join(RAW_DATA_DIR, "others")
PROCESSED_DATA_DIR = os.path.join(DATA_DIR, "processed_data")
COMPLEX_PROCESSED_DATA_DIR = os.path.join(PROCESSED_DATA_DIR, "complex")
SIMPLE_PROCESSED_DATA_DIR = os.path.join(PROCESSED_DATA_DIR, "simple")
TEST_DATA_DIR = os.path.join(DATA_DIR, "testing_data")
TRAIN_DATA_DIR = os.path.join(DATA_DIR, "training_data")
TRAIN_DATA_DIR_WI = os.path.join(TRAIN_DATA_DIR, "word_2_index")
TRAIN_DATA_DIR_TF_IDF = os.path.join(TRAIN_DATA_DIR, "tf_idf")
VAL_DATA_DIR = os.path.join(DATA_DIR, "validation_data")
VAL_DATA_DIR_WI = os.path.join(VAL_DATA_DIR, "word_2_index")
VAL_DATA_DIR_TF_IDF = os.path.join(VAL_DATA_DIR, "tf_idf")
SPACY_MEDIUM_MODEL = "en_core_web_md"
SPACY_LARGE_MODEL = "en_core_web_lg"
TF_HUB_EMBEDDING_MODELS = [
"https://tfhub.dev/google/nnlm-en-dim128/2",
"https://tfhub.dev/google/universal-sentence-encoder/4",
"https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1",
]
LOG_FORMAT = (
"%(asctime)s | %(levelname)s | %(name)s | %(filename)s | %(lineno)d | %(message)s"
)
LOG_LEVEL = "DEBUG"
LOG_FILE = os.path.join(LOGS_DIR, "sentiment_analysis.log")
LOG_FILE_MAX_BYTES = 1048576
LOG_FILE_BACKUP_COUNT = 2
| true | true |
79015d4d30a421b799f9697e7384e1bf9d8c9584 | 9,661 | py | Python | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SnapshotScheduleExtended(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'alias': 'str',
'duration': 'int',
'id': 'int',
'name': 'str',
'next_run': 'int',
'next_snapshot': 'str',
'path': 'str',
'pattern': 'str',
'schedule': 'str'
}
attribute_map = {
'alias': 'alias',
'duration': 'duration',
'id': 'id',
'name': 'name',
'next_run': 'next_run',
'next_snapshot': 'next_snapshot',
'path': 'path',
'pattern': 'pattern',
'schedule': 'schedule'
}
def __init__(self, alias=None, duration=None, id=None, name=None, next_run=None, next_snapshot=None, path=None, pattern=None, schedule=None): # noqa: E501
"""SnapshotScheduleExtended - a model defined in Swagger""" # noqa: E501
self._alias = None
self._duration = None
self._id = None
self._name = None
self._next_run = None
self._next_snapshot = None
self._path = None
self._pattern = None
self._schedule = None
self.discriminator = None
if alias is not None:
self.alias = alias
if duration is not None:
self.duration = duration
if id is not None:
self.id = id
if name is not None:
self.name = name
if next_run is not None:
self.next_run = next_run
if next_snapshot is not None:
self.next_snapshot = next_snapshot
if path is not None:
self.path = path
if pattern is not None:
self.pattern = pattern
if schedule is not None:
self.schedule = schedule
@property
def alias(self):
"""Gets the alias of this SnapshotScheduleExtended. # noqa: E501
Alias name to create for each snapshot. # noqa: E501
:return: The alias of this SnapshotScheduleExtended. # noqa: E501
:rtype: str
"""
return self._alias
@alias.setter
def alias(self, alias):
"""Sets the alias of this SnapshotScheduleExtended.
Alias name to create for each snapshot. # noqa: E501
:param alias: The alias of this SnapshotScheduleExtended. # noqa: E501
:type: str
"""
self._alias = alias
@property
def duration(self):
"""Gets the duration of this SnapshotScheduleExtended. # noqa: E501
Time in seconds added to creation time to construction expiration time. # noqa: E501
:return: The duration of this SnapshotScheduleExtended. # noqa: E501
:rtype: int
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this SnapshotScheduleExtended.
Time in seconds added to creation time to construction expiration time. # noqa: E501
:param duration: The duration of this SnapshotScheduleExtended. # noqa: E501
:type: int
"""
self._duration = duration
@property
def id(self):
"""Gets the id of this SnapshotScheduleExtended. # noqa: E501
The system ID given to the schedule. # noqa: E501
:return: The id of this SnapshotScheduleExtended. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SnapshotScheduleExtended.
The system ID given to the schedule. # noqa: E501
:param id: The id of this SnapshotScheduleExtended. # noqa: E501
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this SnapshotScheduleExtended. # noqa: E501
The schedule name. # noqa: E501
:return: The name of this SnapshotScheduleExtended. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SnapshotScheduleExtended.
The schedule name. # noqa: E501
:param name: The name of this SnapshotScheduleExtended. # noqa: E501
:type: str
"""
self._name = name
@property
def next_run(self):
"""Gets the next_run of this SnapshotScheduleExtended. # noqa: E501
Unix Epoch time of next snapshot to be created. # noqa: E501
:return: The next_run of this SnapshotScheduleExtended. # noqa: E501
:rtype: int
"""
return self._next_run
@next_run.setter
def next_run(self, next_run):
"""Sets the next_run of this SnapshotScheduleExtended.
Unix Epoch time of next snapshot to be created. # noqa: E501
:param next_run: The next_run of this SnapshotScheduleExtended. # noqa: E501
:type: int
"""
self._next_run = next_run
@property
def next_snapshot(self):
"""Gets the next_snapshot of this SnapshotScheduleExtended. # noqa: E501
Formatted name (see pattern) of next snapshot to be created. # noqa: E501
:return: The next_snapshot of this SnapshotScheduleExtended. # noqa: E501
:rtype: str
"""
return self._next_snapshot
@next_snapshot.setter
def next_snapshot(self, next_snapshot):
"""Sets the next_snapshot of this SnapshotScheduleExtended.
Formatted name (see pattern) of next snapshot to be created. # noqa: E501
:param next_snapshot: The next_snapshot of this SnapshotScheduleExtended. # noqa: E501
:type: str
"""
self._next_snapshot = next_snapshot
@property
def path(self):
"""Gets the path of this SnapshotScheduleExtended. # noqa: E501
The /ifs path snapshotted. # noqa: E501
:return: The path of this SnapshotScheduleExtended. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this SnapshotScheduleExtended.
The /ifs path snapshotted. # noqa: E501
:param path: The path of this SnapshotScheduleExtended. # noqa: E501
:type: str
"""
self._path = path
@property
def pattern(self):
"""Gets the pattern of this SnapshotScheduleExtended. # noqa: E501
Pattern expanded with strftime to create snapshot name. # noqa: E501
:return: The pattern of this SnapshotScheduleExtended. # noqa: E501
:rtype: str
"""
return self._pattern
@pattern.setter
def pattern(self, pattern):
"""Sets the pattern of this SnapshotScheduleExtended.
Pattern expanded with strftime to create snapshot name. # noqa: E501
:param pattern: The pattern of this SnapshotScheduleExtended. # noqa: E501
:type: str
"""
self._pattern = pattern
@property
def schedule(self):
"""Gets the schedule of this SnapshotScheduleExtended. # noqa: E501
The isidate compatible natural language description of the schedule. # noqa: E501
:return: The schedule of this SnapshotScheduleExtended. # noqa: E501
:rtype: str
"""
return self._schedule
@schedule.setter
def schedule(self, schedule):
"""Sets the schedule of this SnapshotScheduleExtended.
The isidate compatible natural language description of the schedule. # noqa: E501
:param schedule: The schedule of this SnapshotScheduleExtended. # noqa: E501
:type: str
"""
self._schedule = schedule
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SnapshotScheduleExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.498525 | 159 | 0.589898 |
import pprint
import re
import six
class SnapshotScheduleExtended(object):
swagger_types = {
'alias': 'str',
'duration': 'int',
'id': 'int',
'name': 'str',
'next_run': 'int',
'next_snapshot': 'str',
'path': 'str',
'pattern': 'str',
'schedule': 'str'
}
attribute_map = {
'alias': 'alias',
'duration': 'duration',
'id': 'id',
'name': 'name',
'next_run': 'next_run',
'next_snapshot': 'next_snapshot',
'path': 'path',
'pattern': 'pattern',
'schedule': 'schedule'
}
def __init__(self, alias=None, duration=None, id=None, name=None, next_run=None, next_snapshot=None, path=None, pattern=None, schedule=None):
self._alias = None
self._duration = None
self._id = None
self._name = None
self._next_run = None
self._next_snapshot = None
self._path = None
self._pattern = None
self._schedule = None
self.discriminator = None
if alias is not None:
self.alias = alias
if duration is not None:
self.duration = duration
if id is not None:
self.id = id
if name is not None:
self.name = name
if next_run is not None:
self.next_run = next_run
if next_snapshot is not None:
self.next_snapshot = next_snapshot
if path is not None:
self.path = path
if pattern is not None:
self.pattern = pattern
if schedule is not None:
self.schedule = schedule
@property
def alias(self):
return self._alias
@alias.setter
def alias(self, alias):
self._alias = alias
@property
def duration(self):
return self._duration
@duration.setter
def duration(self, duration):
self._duration = duration
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def next_run(self):
return self._next_run
@next_run.setter
def next_run(self, next_run):
self._next_run = next_run
@property
def next_snapshot(self):
return self._next_snapshot
@next_snapshot.setter
def next_snapshot(self, next_snapshot):
self._next_snapshot = next_snapshot
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
@property
def pattern(self):
return self._pattern
@pattern.setter
def pattern(self, pattern):
self._pattern = pattern
@property
def schedule(self):
return self._schedule
@schedule.setter
def schedule(self, schedule):
self._schedule = schedule
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, SnapshotScheduleExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
79015dd33df11e36e24c400cef67799129d10ea4 | 6,026 | py | Python | pyg.py | luizperes/brain-online-repl | c563a311e0e441132bec59d0eab0e37fc1270b5c | [
"MIT"
] | 513 | 2015-01-06T17:06:41.000Z | 2018-06-22T10:50:16.000Z | pyg.py | luizperes/brain-online-repl | c563a311e0e441132bec59d0eab0e37fc1270b5c | [
"MIT"
] | 129 | 2015-01-21T07:50:58.000Z | 2017-12-21T22:46:55.000Z | pyg.py | luizperes/brain-online-repl | c563a311e0e441132bec59d0eab0e37fc1270b5c | [
"MIT"
] | 115 | 2015-01-15T01:05:57.000Z | 2018-05-30T17:55:05.000Z | #!/usr/bin/python2
import re, sys
from pygments import highlight
from pygments.lexers import get_lexer_by_name, JavascriptLexer, FactorLexer
from pygments.formatters import HtmlFormatter
from pygments.token import *
from pygments.lexer import RegexLexer
class UnlambdaLexer(RegexLexer):
name = 'Unlambda'
aliases = ['unlambda']
filenames = ['*.u']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'd', Comment.Preproc),
(r'\..', Generic.Output),
(r'[sk]', Keyword.Declaration),
(r'[cv]', Keyword.Type),
(r'i', Keyword.Constant),
(r'[@ried|?]', Keyword.Pseudo),
(r'`', Operator),
(r'.', Text),
]
}
class QBasicLexer(RegexLexer):
name = 'QBasic'
aliases = ['qbasic']
filenames = ['*.bas']
tokens = {
'root': [
(r'\'.*\n', Comment.Single),
(r'\"[^"]*\"', Literal.String),
(r'&H[\da-fA-F]+|\d*\.\d+|\d+', Literal.Number),
(r'[-+*/<>=\\]', Operator),
(r'[()\[\]]', Punctuation),
(r'\b(AND|AS|CASE|CONST|DATA|DECLARE|DEF|DEFINT|DIM|DO|ELSE|END|EXIT|FOR|FUNCTION|GOSUB|GOTO|IF|INPUT|LINE|LOOP|MOD|NEXT|NOT|OR|POKE|PRINT|RESTORE|RETURN|SEG|SELECT|SHARED|STATIC|STEP|SUB|TAB|THEN|TO|TYPE|UNTIL|USING|VIEW|WEND|WHILE|XOR)\b', Keyword),
(r'^([a-zA-Z][a-zA-Z0-9_]*:|\d+)', Name.Label),
(r'[a-zA-Z_][a-zA-Z0-9_]*(\$|%|#|&|!)?', Name.Variable),
(r'.', Text),
]
}
class LOLCODELexer(RegexLexer):
name = 'LOLCODE'
aliases = ['lolcode']
filenames = ['*.bas']
tokens = {
'root': [
(r'^OBTW\b.*?\bTLDR\b', Comment.Multiline),
(r'\bBTW\b.*\n', Comment.Single),
(r'\b(NERFIN|YA\s+RLY|BUKKIT|IS\s+NOW\s+A|MEBBE|GIMMEH|TIL|UPPIN|MKAY|TROOF|INTA|YR|!|NUMBR|OMG|NUMBAR|IF\s+U\s+SAY\s+SO|YARN|VISIBLE|I\s+HAS\s+A|IM\s+OUTTA\s+YR|IM\s+IN\s+YR|A|HAI|NO\s+WAI|GTFO|AN|R|FOUND\s+YR|OMGWTF|FAIL|O\s+RLY?|WTF\?|NOOB|HOW\s+DUZ\s+I|WIN|MAEK|OIC|PUTZ|KTHXBYE|ITZ|WILE|AT)(\b|(?=\s))', Keyword),
(r'\b(NOT|LENGZ\s+OF|CHARZ\s+OF|ORDZ\s+OF|SUM\s+OF|DIFF\s+OF|PRODUKT\s+OF|QUOSHUNT\s+OF|MOD\s+OF|BIGGR\s+OF|SMALLR\s+OF|BOTH\s+OF|EITHER\s+OF|WON\s+OF|BOTH\s+SAEM|DIFFRINT|ALL\s+OF|ANY\s+OF|SMOOSH|N)\b', Operator.Word),
(r'"(?::(?:[)>o":]|\([\dA-Fa-f]+\)|\{[A-Za-z]\w*\}|\[[^\[\]]+\])|[^":])*"', Literal.String),
(r'-?(\d+|\d+\.\d*|\.\d+)', Literal.Number),
(r'[a-zA-Z]\w*', Name.Variable),
(r',', Punctuation),
(r'.', Text),
]
}
class BloopLexer(RegexLexer):
name = 'Bloop'
aliases = ['bloop']
filenames = ['*.bloop']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'/\*.*?\*/', Comment.Multiline),
(r"'[^']*'", Literal.String),
(r'-?\d+', Literal.Number),
(r'\b(DEFINE|PROCEDURE|BLOCK|LOOP|AT|MOST|TIMES|MU_LOOP|CELL|OUTPUT|YES|NO|QUIT|ABORT|IF|THEN|AND|OR|PRINT|BEGIN|END)(\b|(?=\s))', Keyword),
(r'[A-Z]\w*', Name),
(r'[+*!=<>(){}":;,.-\[\]]', Punctuation),
(r'.', Text),
]
}
class EmoticonLexerHelper(RegexLexer):
tokens = {
'root': [
(r'\*\*([^*]|\*[^*])*\*\*', Comment),
(r'\S+[OC<>\[\]VD@PQ7L#${}\\/()|3E*]((?=\s)|$)', Keyword),
(r'\S+', Literal.String),
(r'-?\d+', Literal.Number),
(r'.', Text),
]
}
class EmoticonLexer(EmoticonLexerHelper):
name = 'Emoticon'
aliases = ['emoticon']
filenames = ['*.emo']
def get_tokens_unprocessed(self, text):
for index, token, value in EmoticonLexerHelper.get_tokens_unprocessed(self, text):
if token is Keyword:
yield index, Name, value[:-2]
yield index + len(value) - 2, Operator, value[-2]
yield index + len(value) - 2, Keyword, value[-1]
else:
yield index, token, value
class KaffeineLexer(JavascriptLexer):
name = 'Kaffeine'
aliases = ['kaffeine']
filenames = ['*.k']
def get_tokens_unprocessed(self, text):
for index, token, value in JavascriptLexer.get_tokens_unprocessed(self, text):
if token is Error and value in ['#', '@']:
token_type = Name.Tag if value == '#' else Keyword
yield index, token_type, value
else:
yield index, token, value
class JavascriptNextLexer(JavascriptLexer):
name = 'Javascript.next'
aliases = ['javascript.next', 'traceur']
filenames = ['*.jsn']
EXTRA_KEYWORDS = ['let', 'yield']
def get_tokens_unprocessed(self, text):
for index, token, value in JavascriptLexer.get_tokens_unprocessed(self, text):
if token is Name.Other and value in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
class MoveLexer(JavascriptLexer):
name = 'Move'
aliases = ['move']
filenames = ['*.mv']
class ForthLexer(FactorLexer):
name = 'Forth'
aliases = ['forth']
filenames = ['*.4th']
class RoyLexer(RegexLexer):
name = 'Roy'
aliases = ['roy']
filenames = ['*.roy']
tokens = {
'root': [
(r'//.*\n', Comment.Single),
(r'\b(true|false|let|fn|if|then|else|data|type|match|case|do|return|macro|with)\b', Keyword),
(r'-?\d+', Literal.Number),
(r'\"[^"]*\"', Literal.String),
(r'<-|->|=|==|!=|\*|\+\+|\\', Operator),
(r'.', Text)
]
}
class APLLexer(RegexLexer):
name = 'APL'
aliases = ['apl']
filenames = ['*.apl']
tokens = {
'root': [
(r'.+', Text)
]
}
def getLexer(lexer_name):
lexers = [value for name, value in globals().items()
if name.endswith('Lexer') and hasattr(value, 'aliases')]
for lexer in lexers:
if lexer_name in lexer.aliases:
return lexer()
return get_lexer_by_name(lexer_name)
def main():
if len(sys.argv) == 2:
lexer = getLexer(sys.argv[1])
if lexer:
result = highlight(sys.stdin.read().decode('utf8'), lexer, HtmlFormatter())
result = result.replace('<div class="highlight"><pre>', '')
result = result.replace('</pre></div>', '')
print result.strip().encode('utf8')
else:
print 'Unknown language:', sys.argv[1]
else:
print 'Usage: pyg.py language < code.txt'
if __name__ == '__main__':
main()
| 30.744898 | 324 | 0.578659 |
import re, sys
from pygments import highlight
from pygments.lexers import get_lexer_by_name, JavascriptLexer, FactorLexer
from pygments.formatters import HtmlFormatter
from pygments.token import *
from pygments.lexer import RegexLexer
class UnlambdaLexer(RegexLexer):
name = 'Unlambda'
aliases = ['unlambda']
filenames = ['*.u']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'd', Comment.Preproc),
(r'\..', Generic.Output),
(r'[sk]', Keyword.Declaration),
(r'[cv]', Keyword.Type),
(r'i', Keyword.Constant),
(r'[@ried|?]', Keyword.Pseudo),
(r'`', Operator),
(r'.', Text),
]
}
class QBasicLexer(RegexLexer):
name = 'QBasic'
aliases = ['qbasic']
filenames = ['*.bas']
tokens = {
'root': [
(r'\'.*\n', Comment.Single),
(r'\"[^"]*\"', Literal.String),
(r'&H[\da-fA-F]+|\d*\.\d+|\d+', Literal.Number),
(r'[-+*/<>=\\]', Operator),
(r'[()\[\]]', Punctuation),
(r'\b(AND|AS|CASE|CONST|DATA|DECLARE|DEF|DEFINT|DIM|DO|ELSE|END|EXIT|FOR|FUNCTION|GOSUB|GOTO|IF|INPUT|LINE|LOOP|MOD|NEXT|NOT|OR|POKE|PRINT|RESTORE|RETURN|SEG|SELECT|SHARED|STATIC|STEP|SUB|TAB|THEN|TO|TYPE|UNTIL|USING|VIEW|WEND|WHILE|XOR)\b', Keyword),
(r'^([a-zA-Z][a-zA-Z0-9_]*:|\d+)', Name.Label),
(r'[a-zA-Z_][a-zA-Z0-9_]*(\$|%|#|&|!)?', Name.Variable),
(r'.', Text),
]
}
class LOLCODELexer(RegexLexer):
name = 'LOLCODE'
aliases = ['lolcode']
filenames = ['*.bas']
tokens = {
'root': [
(r'^OBTW\b.*?\bTLDR\b', Comment.Multiline),
(r'\bBTW\b.*\n', Comment.Single),
(r'\b(NERFIN|YA\s+RLY|BUKKIT|IS\s+NOW\s+A|MEBBE|GIMMEH|TIL|UPPIN|MKAY|TROOF|INTA|YR|!|NUMBR|OMG|NUMBAR|IF\s+U\s+SAY\s+SO|YARN|VISIBLE|I\s+HAS\s+A|IM\s+OUTTA\s+YR|IM\s+IN\s+YR|A|HAI|NO\s+WAI|GTFO|AN|R|FOUND\s+YR|OMGWTF|FAIL|O\s+RLY?|WTF\?|NOOB|HOW\s+DUZ\s+I|WIN|MAEK|OIC|PUTZ|KTHXBYE|ITZ|WILE|AT)(\b|(?=\s))', Keyword),
(r'\b(NOT|LENGZ\s+OF|CHARZ\s+OF|ORDZ\s+OF|SUM\s+OF|DIFF\s+OF|PRODUKT\s+OF|QUOSHUNT\s+OF|MOD\s+OF|BIGGR\s+OF|SMALLR\s+OF|BOTH\s+OF|EITHER\s+OF|WON\s+OF|BOTH\s+SAEM|DIFFRINT|ALL\s+OF|ANY\s+OF|SMOOSH|N)\b', Operator.Word),
(r'"(?::(?:[)>o":]|\([\dA-Fa-f]+\)|\{[A-Za-z]\w*\}|\[[^\[\]]+\])|[^":])*"', Literal.String),
(r'-?(\d+|\d+\.\d*|\.\d+)', Literal.Number),
(r'[a-zA-Z]\w*', Name.Variable),
(r',', Punctuation),
(r'.', Text),
]
}
class BloopLexer(RegexLexer):
name = 'Bloop'
aliases = ['bloop']
filenames = ['*.bloop']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'/\*.*?\*/', Comment.Multiline),
(r"'[^']*'", Literal.String),
(r'-?\d+', Literal.Number),
(r'\b(DEFINE|PROCEDURE|BLOCK|LOOP|AT|MOST|TIMES|MU_LOOP|CELL|OUTPUT|YES|NO|QUIT|ABORT|IF|THEN|AND|OR|PRINT|BEGIN|END)(\b|(?=\s))', Keyword),
(r'[A-Z]\w*', Name),
(r'[+*!=<>(){}":;,.-\[\]]', Punctuation),
(r'.', Text),
]
}
class EmoticonLexerHelper(RegexLexer):
tokens = {
'root': [
(r'\*\*([^*]|\*[^*])*\*\*', Comment),
(r'\S+[OC<>\[\]VD@PQ7L#${}\\/()|3E*]((?=\s)|$)', Keyword),
(r'\S+', Literal.String),
(r'-?\d+', Literal.Number),
(r'.', Text),
]
}
class EmoticonLexer(EmoticonLexerHelper):
name = 'Emoticon'
aliases = ['emoticon']
filenames = ['*.emo']
def get_tokens_unprocessed(self, text):
for index, token, value in EmoticonLexerHelper.get_tokens_unprocessed(self, text):
if token is Keyword:
yield index, Name, value[:-2]
yield index + len(value) - 2, Operator, value[-2]
yield index + len(value) - 2, Keyword, value[-1]
else:
yield index, token, value
class KaffeineLexer(JavascriptLexer):
name = 'Kaffeine'
aliases = ['kaffeine']
filenames = ['*.k']
def get_tokens_unprocessed(self, text):
for index, token, value in JavascriptLexer.get_tokens_unprocessed(self, text):
if token is Error and value in ['#', '@']:
token_type = Name.Tag if value == '#' else Keyword
yield index, token_type, value
else:
yield index, token, value
class JavascriptNextLexer(JavascriptLexer):
name = 'Javascript.next'
aliases = ['javascript.next', 'traceur']
filenames = ['*.jsn']
EXTRA_KEYWORDS = ['let', 'yield']
def get_tokens_unprocessed(self, text):
for index, token, value in JavascriptLexer.get_tokens_unprocessed(self, text):
if token is Name.Other and value in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
class MoveLexer(JavascriptLexer):
name = 'Move'
aliases = ['move']
filenames = ['*.mv']
class ForthLexer(FactorLexer):
name = 'Forth'
aliases = ['forth']
filenames = ['*.4th']
class RoyLexer(RegexLexer):
name = 'Roy'
aliases = ['roy']
filenames = ['*.roy']
tokens = {
'root': [
(r'//.*\n', Comment.Single),
(r'\b(true|false|let|fn|if|then|else|data|type|match|case|do|return|macro|with)\b', Keyword),
(r'-?\d+', Literal.Number),
(r'\"[^"]*\"', Literal.String),
(r'<-|->|=|==|!=|\*|\+\+|\\', Operator),
(r'.', Text)
]
}
class APLLexer(RegexLexer):
name = 'APL'
aliases = ['apl']
filenames = ['*.apl']
tokens = {
'root': [
(r'.+', Text)
]
}
def getLexer(lexer_name):
lexers = [value for name, value in globals().items()
if name.endswith('Lexer') and hasattr(value, 'aliases')]
for lexer in lexers:
if lexer_name in lexer.aliases:
return lexer()
return get_lexer_by_name(lexer_name)
def main():
if len(sys.argv) == 2:
lexer = getLexer(sys.argv[1])
if lexer:
result = highlight(sys.stdin.read().decode('utf8'), lexer, HtmlFormatter())
result = result.replace('<div class="highlight"><pre>', '')
result = result.replace('</pre></div>', '')
print result.strip().encode('utf8')
else:
print 'Unknown language:', sys.argv[1]
else:
print 'Usage: pyg.py language < code.txt'
if __name__ == '__main__':
main()
| false | true |
79015e4b2c3c10da985bf6c8d20f322ef87441e3 | 11,975 | py | Python | scrapereads/scrape.py | arthurdjn/scrape-goodreads | be0d81ef0be09955599a3fae43648e9945923ff1 | [
"MIT"
] | 3 | 2021-02-10T10:32:48.000Z | 2021-08-06T23:53:22.000Z | scrapereads/scrape.py | arthurdjn/scrape-goodreads | be0d81ef0be09955599a3fae43648e9945923ff1 | [
"MIT"
] | 1 | 2022-03-14T09:33:41.000Z | 2022-03-14T09:33:41.000Z | scrapereads/scrape.py | arthurdjn/scrape-goodreads | be0d81ef0be09955599a3fae43648e9945923ff1 | [
"MIT"
] | null | null | null | """
Scrape quotes, books and authors from ``Good Reads`` website.
"""
import bs4
from .utils import *
def get_author_name(soup):
"""Get the author's name from its main page.
Args:
soup (bs4.element.Tag): connection to the author page.
Returns:
string: name of the author.
Examples::
>>> from scrapereads import connect
>>> url = 'https://www.goodreads.com/author/show/1077326'
>>> soup = connect(url)
>>> get_author_name(soup)
J.K. Rowling
"""
author_h1 = soup.find('h1', attrs={'class': 'authorName'})
return author_h1.find('span').text
def get_author_desc(soup):
"""Get the author description / biography.
Args:
soup (bs4.element.Tag): connection to the author page.
Returns:
str: long description of the author.
Examples::
>>> from scrapereads import connect
>>> url = 'https://www.goodreads.com/author/show/1077326'
>>> soup = connect(url)
>>> get_author_desc(soup)
See also: Robert Galbraith
Although she writes under the pen name J.K. Rowling, pronounced like rolling,
her name when her first Harry Potter book was published was simply Joanne Rowling.
...
"""
author_info_desc = soup.find('div', attrs={'class': 'aboutAuthorInfo'})
author_info_long = author_info_desc.findAll('span')[-1]
long_desc = ""
for sentence in author_info_long.children:
if isinstance(sentence, bs4.element.Tag):
if sentence.name == 'br':
long_desc += '\n'
else:
long_desc += sentence.text
else:
long_desc += sentence
long_desc = long_desc.replace('’', "'")
return long_desc
def get_author_info(soup):
"""Get all information from an author (genres, influences, website etc.).
Args:
soup (bs4.element.Tag): author page connection.
Returns:
dict
"""
container = soup.find('div', attrs={'class': 'rightContainer'})
author_info = {}
data_div = container.find('br', attrs={'class': 'clear'})
while data_div:
if data_div.name:
data_class = data_div.get('class')[0]
# Information section is finished
if data_class == 'aboutAuthorInfo':
break
# Key elements
elif data_class == 'dataTitle':
key = data_div.text.strip()
author_info[key] = []
# Born section
if data_div.text == 'Born':
data_div = data_div.next_sibling
author_info[key].append(data_div.strip())
# Influences section
elif data_div.text == 'Influences':
data_div = data_div.next_sibling.next_sibling
data_items = data_div.findAll('span')[-1].findAll('a')
for data_a in data_items:
author_info[key].append(data_a.text.strip())
# Member since section
elif data_div.text == 'Member Since':
data_div = data_div.next_sibling.next_sibling
author_info[key].append(data_div.text.strip())
# Genre, website and other sections
else:
data_items = data_div.findAll('a')
for data_a in data_items:
author_info[key].append(data_a.text.strip())
data_div = data_div.next_sibling
author_info.update({'Description': get_author_desc(soup)})
return author_info
def scrape_quotes_container(soup):
"""Get the quote container from a quote page.
Args:
soup (bs4.element.Tag): connection to the quote page.
Returns:
bs4.element.Tag
"""
return soup.findAll('div', attrs={'class': 'quotes'})
def scrape_quotes(soup):
"""Retrieve all ``<div>`` quote element from a quote page.
Args:
soup (bs4.element.Tag): connection to the quote page.
Returns:
yield bs4.element.Tag
"""
for container_div in scrape_quotes_container(soup):
quote_div = container_div.find('div', attrs={'class': 'quote'})
while quote_div:
if quote_div.name == 'div' and quote_div.get('class') and 'quote' in quote_div.get('class'):
yield quote_div
quote_div = quote_div.next_sibling
def get_quote_text(quote_div):
"""Get the text from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element to extract the text.
Returns:
string
"""
quote_text = ''
text_iterator = quote_div.find('div', attrs={'class': 'quoteText'}).children
for text in text_iterator:
if text.name == 'br':
quote_text += '\n'
elif not text.name:
quote_text += text.strip()
quote_text = process_quote_text(quote_text)
return quote_text
def scrape_quote_tags(quote_div):
"""Scrape tags from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
yield ``<a>`` tags
"""
tags_container = quote_div.find('div', attrs={'class': 'greyText smallText left'})
if tags_container:
for tag in tags_container.children:
if tag.name == 'a':
yield tag
return None
def get_quote_book(quote_div):
"""Get the reference (book) from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
bs4.element.Tag
"""
quote_details = quote_div.find('div', attrs={'class': 'quoteText'})
return quote_details.find('a', attrs={'class': 'authorOrTitle'})
def get_quote_author_name(quote_div):
"""Get the author's name from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
string
"""
quote_text = quote_div.find('div', attrs={'class': 'quoteText '})
author_name = quote_text.find('span', attrs={'class': 'authorOrTitle'}).text
return remove_punctuation(author_name).title()
def get_quote_likes(quote_div):
"""Get the likes ``<a>`` tag from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
bs4.element.Tag: ``<a>`` tag for likes.
"""
quote_footer = quote_div.find('div', attrs={'class': 'quoteFooter'})
return quote_footer.find('a', attrs={'class': 'smallText'})
# TODO: deprecate this
def get_quote_name_id(quote_div):
"""Get the name and id of a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
tuple: id and name.
"""
quote_href = get_quote_likes(quote_div).get('href')
quote_id = quote_href.split('/')[-1].split('-')[0]
quote_name = '-'.join(quote_href.split('/')[-1].split('-')[1:])
return quote_id, quote_name
def scrape_author_books(soup):
"""Retrieve books from an author's page.
Args:
soup (bs4.element.Tag): connection to an author books page.
Returns:
yield bs4.element.Tag: ``<tr>`` element.
"""
table_tr = soup.find('tr')
while table_tr:
if table_tr.name == 'tr':
yield table_tr
table_tr = table_tr.next_sibling
def get_author_book_title(book_tr):
"""Get the book title ``<a>`` element from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: book title ``<a>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_title = get_author_book_title(book_tr)
... print(book_title.text.strip(), book_title.get('href'))
The Bell Jar /book/show/6514.The_Bell_Jar
Ariel /book/show/395090.Ariel
The Collected Poems /book/show/31426.The_Collected_Poems
The Unabridged Journals of Sylvia Plath /book/show/11623.The_Unabridged_Journals_of_Sylvia_Plath
"""
return book_tr.find('a', attrs={'class': 'bookTitle'})
def get_author_book_author(book_tr):
"""Get the author ``<a>`` element from a table ``<tr>`` element.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: author name ``<a>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_author = get_author_book_author(book_tr)
... print(book_author.text, book_author.get('href'))
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
"""
return book_tr.find('a', attrs={'class': 'authorName'})
def get_author_book_ratings(book_tr):
"""Get the ratings ``<span>`` element from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: ratings ``<span>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... ratings_span = get_author_book_ratings(book_tr)
... print(ratings_span.contents[-1])
4.55 avg rating — 2,414 ratings
3.77 avg rating — 1,689 ratings
4.28 avg rating — 892 ratings
4.54 avg rating — 490 ratings
...
"""
return book_tr.find('span', attrs={'class': 'minirating'})
def get_author_book_edition(book_tr):
"""Get the edition ``<a>`` element from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: book edition ``<a>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_edition = get_author_book_edition(book_tr)
... if book_edition:
... print(book_edition.text, book_edition.get('href'))
... print()
493 editions /work/editions/1385044-the-bell-jar
80 editions /work/editions/1185316-ariel
30 editions /work/editions/1003095-the-collected-poems
45 editions /work/editions/3094683-the-unabridged-journals-of-sylvia-plath
...
"""
book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'})
return book_details.find('a', attrs={'class': 'greyText'})
def get_author_book_date(book_tr):
"""Get the published date from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
int: date of publication
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_date = get_author_book_date(book_tr)
... print(book_date)
None
None
1958
2009
...
"""
book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'})
book_publish = book_details.contents[-1].replace('—', '').replace('\n', '')
book_date = book_publish.replace('published', '').strip()
book_date = eval(book_date) if book_date != '' else None
return book_date
def get_book_quote_page(soup):
"""Find the ``<a>`` element pointing to the quote page of a book.
Args:
soup (bs4.element.Tag):
Returns:
"""
quote_div = soup.findAll('div', attrs={'class': ' clearFloats bigBox'})
if quote_div:
return quote_div[-1].find('a')
return None
| 30.470738 | 108 | 0.600585 |
import bs4
from .utils import *
def get_author_name(soup):
author_h1 = soup.find('h1', attrs={'class': 'authorName'})
return author_h1.find('span').text
def get_author_desc(soup):
author_info_desc = soup.find('div', attrs={'class': 'aboutAuthorInfo'})
author_info_long = author_info_desc.findAll('span')[-1]
long_desc = ""
for sentence in author_info_long.children:
if isinstance(sentence, bs4.element.Tag):
if sentence.name == 'br':
long_desc += '\n'
else:
long_desc += sentence.text
else:
long_desc += sentence
long_desc = long_desc.replace('’', "'")
return long_desc
def get_author_info(soup):
container = soup.find('div', attrs={'class': 'rightContainer'})
author_info = {}
data_div = container.find('br', attrs={'class': 'clear'})
while data_div:
if data_div.name:
data_class = data_div.get('class')[0]
# Information section is finished
if data_class == 'aboutAuthorInfo':
break
# Key elements
elif data_class == 'dataTitle':
key = data_div.text.strip()
author_info[key] = []
# Born section
if data_div.text == 'Born':
data_div = data_div.next_sibling
author_info[key].append(data_div.strip())
# Influences section
elif data_div.text == 'Influences':
data_div = data_div.next_sibling.next_sibling
data_items = data_div.findAll('span')[-1].findAll('a')
for data_a in data_items:
author_info[key].append(data_a.text.strip())
# Member since section
elif data_div.text == 'Member Since':
data_div = data_div.next_sibling.next_sibling
author_info[key].append(data_div.text.strip())
# Genre, website and other sections
else:
data_items = data_div.findAll('a')
for data_a in data_items:
author_info[key].append(data_a.text.strip())
data_div = data_div.next_sibling
author_info.update({'Description': get_author_desc(soup)})
return author_info
def scrape_quotes_container(soup):
return soup.findAll('div', attrs={'class': 'quotes'})
def scrape_quotes(soup):
for container_div in scrape_quotes_container(soup):
quote_div = container_div.find('div', attrs={'class': 'quote'})
while quote_div:
if quote_div.name == 'div' and quote_div.get('class') and 'quote' in quote_div.get('class'):
yield quote_div
quote_div = quote_div.next_sibling
def get_quote_text(quote_div):
quote_text = ''
text_iterator = quote_div.find('div', attrs={'class': 'quoteText'}).children
for text in text_iterator:
if text.name == 'br':
quote_text += '\n'
elif not text.name:
quote_text += text.strip()
quote_text = process_quote_text(quote_text)
return quote_text
def scrape_quote_tags(quote_div):
tags_container = quote_div.find('div', attrs={'class': 'greyText smallText left'})
if tags_container:
for tag in tags_container.children:
if tag.name == 'a':
yield tag
return None
def get_quote_book(quote_div):
quote_details = quote_div.find('div', attrs={'class': 'quoteText'})
return quote_details.find('a', attrs={'class': 'authorOrTitle'})
def get_quote_author_name(quote_div):
quote_text = quote_div.find('div', attrs={'class': 'quoteText '})
author_name = quote_text.find('span', attrs={'class': 'authorOrTitle'}).text
return remove_punctuation(author_name).title()
def get_quote_likes(quote_div):
quote_footer = quote_div.find('div', attrs={'class': 'quoteFooter'})
return quote_footer.find('a', attrs={'class': 'smallText'})
# TODO: deprecate this
def get_quote_name_id(quote_div):
quote_href = get_quote_likes(quote_div).get('href')
quote_id = quote_href.split('/')[-1].split('-')[0]
quote_name = '-'.join(quote_href.split('/')[-1].split('-')[1:])
return quote_id, quote_name
def scrape_author_books(soup):
table_tr = soup.find('tr')
while table_tr:
if table_tr.name == 'tr':
yield table_tr
table_tr = table_tr.next_sibling
def get_author_book_title(book_tr):
return book_tr.find('a', attrs={'class': 'bookTitle'})
def get_author_book_author(book_tr):
return book_tr.find('a', attrs={'class': 'authorName'})
def get_author_book_ratings(book_tr):
return book_tr.find('span', attrs={'class': 'minirating'})
def get_author_book_edition(book_tr):
book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'})
return book_details.find('a', attrs={'class': 'greyText'})
def get_author_book_date(book_tr):
book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'})
book_publish = book_details.contents[-1].replace('—', '').replace('\n', '')
book_date = book_publish.replace('published', '').strip()
book_date = eval(book_date) if book_date != '' else None
return book_date
def get_book_quote_page(soup):
quote_div = soup.findAll('div', attrs={'class': ' clearFloats bigBox'})
if quote_div:
return quote_div[-1].find('a')
return None
| true | true |
79015e5f22aadae4239d79808c59dfde92ba7d2e | 4,427 | py | Python | tupelo/xmlrpc.py | jait/tupelo | fd6fd6c5c246c1ba3d2496a86cab087284c92af1 | [
"BSD-3-Clause"
] | null | null | null | tupelo/xmlrpc.py | jait/tupelo | fd6fd6c5c246c1ba3d2496a86cab087284c92af1 | [
"BSD-3-Clause"
] | null | null | null | tupelo/xmlrpc.py | jait/tupelo | fd6fd6c5c246c1ba3d2496a86cab087284c92af1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# vim: set sts=4 sw=4 et:
import time
import xmlrpc.client
from . import players
from . import rpc
from .common import GameState, CardSet, GameError, RuleError, ProtocolError, simple_decorator
from .events import EventList, CardPlayedEvent, MessageEvent, TrickPlayedEvent, TurnEvent, StateChangedEvent
@simple_decorator
def error2fault(func):
"""
Catch known exceptions and translate them to
XML-RPC faults.
"""
def catcher(*args):
try:
return func(*args)
except GameError as error:
raise xmlrpc.client.Fault(GameError.rpc_code, str(error))
except RuleError as error:
raise xmlrpc.client.Fault(RuleError.rpc_code, str(error))
except ProtocolError as error:
raise xmlrpc.client.Fault(ProtocolError.rpc_code, str(error))
return catcher
@simple_decorator
def fault2error(func):
"""
Catch known XML-RPC faults and translate them to
custom exceptions.
"""
def catcher(*args):
try:
return func(*args)
except xmlrpc.client.Fault as error:
error_classes = (GameError, RuleError, ProtocolError)
for klass in error_classes:
if error.faultCode == klass.rpc_code:
raise klass(error.faultString)
raise error
return catcher
class XMLRPCCliPlayer(players.CliPlayer):
"""
XML-RPC command line interface human player.
"""
def __init__(self, player_name):
players.CliPlayer.__init__(self, player_name)
self.game_state = GameState()
self.hand = None
def handle_event(self, event):
if isinstance(event, CardPlayedEvent):
self.card_played(event.player, event.card, event.game_state)
elif isinstance(event, MessageEvent):
self.send_message(event.sender, event.message)
elif isinstance(event, TrickPlayedEvent):
self.trick_played(event.player, event.game_state)
elif isinstance(event, TurnEvent):
self.game_state.update(event.game_state)
state = self.controller.get_state(self.id)
self.hand = state['hand']
self.game_state.update(state['game_state'])
elif isinstance(event, StateChangedEvent):
self.game_state.update(event.game_state)
else:
print("unknown event: %s" % event)
def wait_for_turn(self):
"""
Wait for this player's turn.
"""
while True:
time.sleep(0.5)
if self.controller is not None:
events = self.controller.get_events(self.id)
for event in events:
self.handle_event(event)
if self.game_state.turn_id == self.id:
break
class XMLRPCProxyController():
"""
Client-side proxy object for the server/GameController.
"""
def __init__(self, server_uri):
super(XMLRPCProxyController, self).__init__()
if not server_uri.startswith('http://') and \
not server_uri.startswith('https://'):
server_uri = 'http://' + server_uri
self.server = xmlrpc.client.ServerProxy(server_uri)
self.game_id = None
self.akey = None
@fault2error
def play_card(self, _player, card):
self.server.game.play_card(self.akey, self.game_id, rpc.rpc_encode(card))
@fault2error
def get_events(self, _player_id):
return rpc.rpc_decode(EventList, self.server.get_events(self.akey))
@fault2error
def get_state(self, _player_id):
state = self.server.game.get_state(self.akey, self.game_id)
state['game_state'] = rpc.rpc_decode(GameState, state['game_state'])
state['hand'] = rpc.rpc_decode(CardSet, state['hand'])
return state
@fault2error
def player_quit(self, _player_id):
self.server.player.quit(self.akey)
@fault2error
def register_player(self, player):
player.controller = self
plr_data = self.server.player.register(rpc.rpc_encode(player))
player.id = plr_data['id']
self.akey = plr_data['akey']
@fault2error
def start_game_with_bots(self):
return self.server.game.start_with_bots(self.akey, self.game_id)
@fault2error
def create_game(self):
self.game_id = self.server.game.create(self.akey)
return self.game_id
| 31.621429 | 108 | 0.63813 |
import time
import xmlrpc.client
from . import players
from . import rpc
from .common import GameState, CardSet, GameError, RuleError, ProtocolError, simple_decorator
from .events import EventList, CardPlayedEvent, MessageEvent, TrickPlayedEvent, TurnEvent, StateChangedEvent
@simple_decorator
def error2fault(func):
def catcher(*args):
try:
return func(*args)
except GameError as error:
raise xmlrpc.client.Fault(GameError.rpc_code, str(error))
except RuleError as error:
raise xmlrpc.client.Fault(RuleError.rpc_code, str(error))
except ProtocolError as error:
raise xmlrpc.client.Fault(ProtocolError.rpc_code, str(error))
return catcher
@simple_decorator
def fault2error(func):
def catcher(*args):
try:
return func(*args)
except xmlrpc.client.Fault as error:
error_classes = (GameError, RuleError, ProtocolError)
for klass in error_classes:
if error.faultCode == klass.rpc_code:
raise klass(error.faultString)
raise error
return catcher
class XMLRPCCliPlayer(players.CliPlayer):
def __init__(self, player_name):
players.CliPlayer.__init__(self, player_name)
self.game_state = GameState()
self.hand = None
def handle_event(self, event):
if isinstance(event, CardPlayedEvent):
self.card_played(event.player, event.card, event.game_state)
elif isinstance(event, MessageEvent):
self.send_message(event.sender, event.message)
elif isinstance(event, TrickPlayedEvent):
self.trick_played(event.player, event.game_state)
elif isinstance(event, TurnEvent):
self.game_state.update(event.game_state)
state = self.controller.get_state(self.id)
self.hand = state['hand']
self.game_state.update(state['game_state'])
elif isinstance(event, StateChangedEvent):
self.game_state.update(event.game_state)
else:
print("unknown event: %s" % event)
def wait_for_turn(self):
while True:
time.sleep(0.5)
if self.controller is not None:
events = self.controller.get_events(self.id)
for event in events:
self.handle_event(event)
if self.game_state.turn_id == self.id:
break
class XMLRPCProxyController():
def __init__(self, server_uri):
super(XMLRPCProxyController, self).__init__()
if not server_uri.startswith('http://') and \
not server_uri.startswith('https://'):
server_uri = 'http://' + server_uri
self.server = xmlrpc.client.ServerProxy(server_uri)
self.game_id = None
self.akey = None
@fault2error
def play_card(self, _player, card):
self.server.game.play_card(self.akey, self.game_id, rpc.rpc_encode(card))
@fault2error
def get_events(self, _player_id):
return rpc.rpc_decode(EventList, self.server.get_events(self.akey))
@fault2error
def get_state(self, _player_id):
state = self.server.game.get_state(self.akey, self.game_id)
state['game_state'] = rpc.rpc_decode(GameState, state['game_state'])
state['hand'] = rpc.rpc_decode(CardSet, state['hand'])
return state
@fault2error
def player_quit(self, _player_id):
self.server.player.quit(self.akey)
@fault2error
def register_player(self, player):
player.controller = self
plr_data = self.server.player.register(rpc.rpc_encode(player))
player.id = plr_data['id']
self.akey = plr_data['akey']
@fault2error
def start_game_with_bots(self):
return self.server.game.start_with_bots(self.akey, self.game_id)
@fault2error
def create_game(self):
self.game_id = self.server.game.create(self.akey)
return self.game_id
| true | true |
79015f55f04aee062f85e9234f10a50e4771d3b4 | 178 | py | Python | files/judgeFileType.py | sdyz5210/python | 78f9999f94d92d9ca7fde6f18acec7d3abd422ef | [
"BSD-3-Clause"
] | null | null | null | files/judgeFileType.py | sdyz5210/python | 78f9999f94d92d9ca7fde6f18acec7d3abd422ef | [
"BSD-3-Clause"
] | null | null | null | files/judgeFileType.py | sdyz5210/python | 78f9999f94d92d9ca7fde6f18acec7d3abd422ef | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/evn python
# -*- coding: utf-8 -*-
# python version 2.7.6
import magic
mime = magic.Magic(mime=True)
print mime.from_file("/Users/mac/Documents/data/fastq/8.fastq") | 19.777778 | 63 | 0.696629 |
import magic
mime = magic.Magic(mime=True)
print mime.from_file("/Users/mac/Documents/data/fastq/8.fastq") | false | true |
79016139e007b58d28b563bde3c2e6e9f8f7d9f4 | 838 | py | Python | setup.py | ebelter/upserve-analysis | b33a63eed4caa07ec282995de66e8821d38e7199 | [
"MIT"
] | null | null | null | setup.py | ebelter/upserve-analysis | b33a63eed4caa07ec282995de66e8821d38e7199 | [
"MIT"
] | null | null | null | setup.py | ebelter/upserve-analysis | b33a63eed4caa07ec282995de66e8821d38e7199 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('VERSION.py') as f:
exec(f.read())
setup(
name='upsere analysis',
version=__version__,
description='10X Genomics CLI',
long_description=readme,
author='Eddie Belter',
author_email='ebetler@gmail.com',
license=license,
url='https://github.com/ebelter/upserve-analysis.git',
install_requires=[
'click==7.0',
'pyyaml==5.1',
'Jinja2>=2.10.1',
],
entry_points='''
[console_scripts]
upserve=upserve.cli:cli
''',
setup_requires=["pytest-runner"],
tests_require=["pytest"],
packages=find_packages(exclude=('tests', 'docs')),
include_package_data=True,
)
| 22.648649 | 58 | 0.619332 |
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('VERSION.py') as f:
exec(f.read())
setup(
name='upsere analysis',
version=__version__,
description='10X Genomics CLI',
long_description=readme,
author='Eddie Belter',
author_email='ebetler@gmail.com',
license=license,
url='https://github.com/ebelter/upserve-analysis.git',
install_requires=[
'click==7.0',
'pyyaml==5.1',
'Jinja2>=2.10.1',
],
entry_points='''
[console_scripts]
upserve=upserve.cli:cli
''',
setup_requires=["pytest-runner"],
tests_require=["pytest"],
packages=find_packages(exclude=('tests', 'docs')),
include_package_data=True,
)
| true | true |
79016186edc6e6b1716101da45b246290da87e2b | 4,709 | py | Python | ichnaea/data/tests/test_monitor.py | BBOXX/ichnaea | 15362d5b4d2a45d28cdf4864a89c9d3fa62b8c28 | [
"Apache-2.0"
] | 1 | 2018-01-18T16:02:43.000Z | 2018-01-18T16:02:43.000Z | ichnaea/data/tests/test_monitor.py | BBOXX/ichnaea | 15362d5b4d2a45d28cdf4864a89c9d3fa62b8c28 | [
"Apache-2.0"
] | null | null | null | ichnaea/data/tests/test_monitor.py | BBOXX/ichnaea | 15362d5b4d2a45d28cdf4864a89c9d3fa62b8c28 | [
"Apache-2.0"
] | 1 | 2018-01-19T17:56:48.000Z | 2018-01-19T17:56:48.000Z | from datetime import timedelta
from random import randint
from ichnaea.data.tasks import (
monitor_api_key_limits,
monitor_api_users,
monitor_queue_size,
)
from ichnaea import util
class TestMonitor(object):
def test_monitor_api_keys_empty(self, celery, stats):
monitor_api_key_limits.delay().get()
stats.check(gauge=[('api.limit', 0)])
def test_monitor_api_keys_one(self, celery, redis, stats):
today = util.utcnow().strftime('%Y%m%d')
rate_key = 'apilimit:no_key_1:v1.geolocate:' + today
redis.incr(rate_key, 13)
monitor_api_key_limits.delay().get()
stats.check(gauge=[
('api.limit', ['key:no_key_1', 'path:v1.geolocate']),
])
def test_monitor_api_keys_multiple(self, celery, redis, stats):
now = util.utcnow()
today = now.strftime('%Y%m%d')
yesterday = (now - timedelta(hours=24)).strftime('%Y%m%d')
data = {
'test': {'v1.search': 11, 'v1.geolocate': 13},
'no_key_1': {'v1.search': 12},
'no_key_2': {'v1.geolocate': 15},
}
for key, paths in data.items():
for path, value in paths.items():
rate_key = 'apilimit:%s:%s:%s' % (key, path, today)
redis.incr(rate_key, value)
rate_key = 'apilimit:%s:%s:%s' % (key, path, yesterday)
redis.incr(rate_key, value - 10)
# add some other items into Redis
redis.lpush('default', 1, 2)
redis.set('cache_something', '{}')
monitor_api_key_limits.delay().get()
stats.check(gauge=[
('api.limit', ['key:test', 'path:v1.geolocate']),
('api.limit', ['key:test', 'path:v1.search']),
('api.limit', ['key:no_key_1', 'path:v1.search']),
('api.limit', ['key:no_key_2', 'path:v1.geolocate']),
])
def test_monitor_queue_size(self, celery, redis, stats):
data = {
'export_queue_internal': 3,
'export_queue_backup:abcd-ef-1234': 7,
}
for name in celery.all_queues:
data[name] = randint(1, 10)
for k, v in data.items():
redis.lpush(k, *range(v))
monitor_queue_size.delay().get()
stats.check(
gauge=[('queue', 1, v, ['queue:' + k]) for k, v in data.items()])
class TestMonitorAPIUsers(object):
@property
def today(self):
return util.utcnow().date()
@property
def today_str(self):
return self.today.strftime('%Y-%m-%d')
def test_empty(self, celery, stats):
monitor_api_users.delay().get()
stats.check(gauge=[('submit.user', 0), ('locate.user', 0)])
def test_one_day(self, celery, geoip_data, redis, stats):
bhutan_ip = geoip_data['Bhutan']['ip']
london_ip = geoip_data['London']['ip']
redis.pfadd(
'apiuser:submit:test:' + self.today_str, bhutan_ip, london_ip)
redis.pfadd(
'apiuser:submit:valid_key:' + self.today_str, bhutan_ip)
redis.pfadd(
'apiuser:locate:valid_key:' + self.today_str, bhutan_ip)
monitor_api_users.delay().get()
stats.check(gauge=[
('submit.user', 1, 2, ['key:test', 'interval:1d']),
('submit.user', 1, 2, ['key:test', 'interval:7d']),
('submit.user', 1, 1, ['key:valid_key', 'interval:1d']),
('submit.user', 1, 1, ['key:valid_key', 'interval:7d']),
('locate.user', 1, 1, ['key:valid_key', 'interval:1d']),
('locate.user', 1, 1, ['key:valid_key', 'interval:7d']),
])
def test_many_days(self, celery, geoip_data, redis, stats):
bhutan_ip = geoip_data['Bhutan']['ip']
london_ip = geoip_data['London']['ip']
days_6 = (self.today - timedelta(days=6)).strftime('%Y-%m-%d')
days_7 = (self.today - timedelta(days=7)).strftime('%Y-%m-%d')
redis.pfadd(
'apiuser:submit:test:' + self.today_str, '127.0.0.1', bhutan_ip)
# add the same IPs + one new one again
redis.pfadd(
'apiuser:submit:test:' + days_6, '127.0.0.1', bhutan_ip, london_ip)
# add one entry which is too old
redis.pfadd(
'apiuser:submit:test:' + days_7, bhutan_ip)
monitor_api_users.delay().get()
stats.check(gauge=[
('submit.user', 1, 2, ['key:test', 'interval:1d']),
# we count unique IPs over the entire 7 day period,
# so it's just 3 uniques
('submit.user', 1, 3, ['key:test', 'interval:7d']),
])
# the too old key was deleted manually
assert not redis.exists('apiuser:submit:test:' + days_7)
| 36.223077 | 79 | 0.561478 | from datetime import timedelta
from random import randint
from ichnaea.data.tasks import (
monitor_api_key_limits,
monitor_api_users,
monitor_queue_size,
)
from ichnaea import util
class TestMonitor(object):
def test_monitor_api_keys_empty(self, celery, stats):
monitor_api_key_limits.delay().get()
stats.check(gauge=[('api.limit', 0)])
def test_monitor_api_keys_one(self, celery, redis, stats):
today = util.utcnow().strftime('%Y%m%d')
rate_key = 'apilimit:no_key_1:v1.geolocate:' + today
redis.incr(rate_key, 13)
monitor_api_key_limits.delay().get()
stats.check(gauge=[
('api.limit', ['key:no_key_1', 'path:v1.geolocate']),
])
def test_monitor_api_keys_multiple(self, celery, redis, stats):
now = util.utcnow()
today = now.strftime('%Y%m%d')
yesterday = (now - timedelta(hours=24)).strftime('%Y%m%d')
data = {
'test': {'v1.search': 11, 'v1.geolocate': 13},
'no_key_1': {'v1.search': 12},
'no_key_2': {'v1.geolocate': 15},
}
for key, paths in data.items():
for path, value in paths.items():
rate_key = 'apilimit:%s:%s:%s' % (key, path, today)
redis.incr(rate_key, value)
rate_key = 'apilimit:%s:%s:%s' % (key, path, yesterday)
redis.incr(rate_key, value - 10)
redis.lpush('default', 1, 2)
redis.set('cache_something', '{}')
monitor_api_key_limits.delay().get()
stats.check(gauge=[
('api.limit', ['key:test', 'path:v1.geolocate']),
('api.limit', ['key:test', 'path:v1.search']),
('api.limit', ['key:no_key_1', 'path:v1.search']),
('api.limit', ['key:no_key_2', 'path:v1.geolocate']),
])
def test_monitor_queue_size(self, celery, redis, stats):
data = {
'export_queue_internal': 3,
'export_queue_backup:abcd-ef-1234': 7,
}
for name in celery.all_queues:
data[name] = randint(1, 10)
for k, v in data.items():
redis.lpush(k, *range(v))
monitor_queue_size.delay().get()
stats.check(
gauge=[('queue', 1, v, ['queue:' + k]) for k, v in data.items()])
class TestMonitorAPIUsers(object):
@property
def today(self):
return util.utcnow().date()
@property
def today_str(self):
return self.today.strftime('%Y-%m-%d')
def test_empty(self, celery, stats):
monitor_api_users.delay().get()
stats.check(gauge=[('submit.user', 0), ('locate.user', 0)])
def test_one_day(self, celery, geoip_data, redis, stats):
bhutan_ip = geoip_data['Bhutan']['ip']
london_ip = geoip_data['London']['ip']
redis.pfadd(
'apiuser:submit:test:' + self.today_str, bhutan_ip, london_ip)
redis.pfadd(
'apiuser:submit:valid_key:' + self.today_str, bhutan_ip)
redis.pfadd(
'apiuser:locate:valid_key:' + self.today_str, bhutan_ip)
monitor_api_users.delay().get()
stats.check(gauge=[
('submit.user', 1, 2, ['key:test', 'interval:1d']),
('submit.user', 1, 2, ['key:test', 'interval:7d']),
('submit.user', 1, 1, ['key:valid_key', 'interval:1d']),
('submit.user', 1, 1, ['key:valid_key', 'interval:7d']),
('locate.user', 1, 1, ['key:valid_key', 'interval:1d']),
('locate.user', 1, 1, ['key:valid_key', 'interval:7d']),
])
def test_many_days(self, celery, geoip_data, redis, stats):
bhutan_ip = geoip_data['Bhutan']['ip']
london_ip = geoip_data['London']['ip']
days_6 = (self.today - timedelta(days=6)).strftime('%Y-%m-%d')
days_7 = (self.today - timedelta(days=7)).strftime('%Y-%m-%d')
redis.pfadd(
'apiuser:submit:test:' + self.today_str, '127.0.0.1', bhutan_ip)
redis.pfadd(
'apiuser:submit:test:' + days_6, '127.0.0.1', bhutan_ip, london_ip)
redis.pfadd(
'apiuser:submit:test:' + days_7, bhutan_ip)
monitor_api_users.delay().get()
stats.check(gauge=[
('submit.user', 1, 2, ['key:test', 'interval:1d']),
('submit.user', 1, 3, ['key:test', 'interval:7d']),
])
# the too old key was deleted manually
assert not redis.exists('apiuser:submit:test:' + days_7)
| true | true |
79016261c216d8295176431c2d08472c6cc3482f | 7,009 | py | Python | data/p3BR/R2/benchmark/startQiskit_QC292.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit_QC292.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit_QC292.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=60
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.cx(input_qubit[0],input_qubit[2]) # number=54
prog.x(input_qubit[2]) # number=55
prog.h(input_qubit[2]) # number=57
prog.cz(input_qubit[0],input_qubit[2]) # number=58
prog.h(input_qubit[2]) # number=59
prog.cx(input_qubit[0],input_qubit[2]) # number=47
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC292.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 31.572072 | 140 | 0.637466 |
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
zero = np.binary_repr(0, n)
b = f(zero)
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
prog.x(input_qubit[n])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.cz(input_qubit[0],input_qubit[2])
prog.h(input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.h(input_qubit[2])
prog.cz(input_qubit[0],input_qubit[2])
prog.h(input_qubit[2])
prog.h(input_qubit[2])
prog.cz(input_qubit[0],input_qubit[2])
prog.h(input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[2])
prog.h(input_qubit[2])
prog.cz(input_qubit[0],input_qubit[2])
prog.h(input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.h(input_qubit[2])
prog.cz(input_qubit[0],input_qubit[2])
prog.h(input_qubit[2])
prog.h(input_qubit[2])
prog.cz(input_qubit[0],input_qubit[2])
prog.h(input_qubit[2])
prog.h(input_qubit[1])
prog.cz(input_qubit[2],input_qubit[1])
prog.rx(0.17592918860102857,input_qubit[2])
prog.rx(-0.3989822670059037,input_qubit[1])
prog.h(input_qubit[1])
prog.h(input_qubit[1])
prog.cz(input_qubit[2],input_qubit[1])
prog.h(input_qubit[1])
prog.y(input_qubit[1])
prog.h(input_qubit[1])
prog.cz(input_qubit[2],input_qubit[1])
prog.h(input_qubit[1])
prog.z(input_qubit[2])
prog.z(input_qubit[1])
prog.x(input_qubit[1])
prog.y(input_qubit[2])
prog.x(input_qubit[2])
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
quantum_state = get_statevector(prog)
backend = Aer.get_backend(backend_str)
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC292.csv", "w")
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| true | true |
790162bf698c2529b1c8fdedeafe6193a2076e64 | 5,973 | py | Python | src/goodboy/types/numeric.py | andryunin/goodboy | 66ba803630e149d00bcb9c40a0aa0586c97ef48a | [
"MIT"
] | null | null | null | src/goodboy/types/numeric.py | andryunin/goodboy | 66ba803630e149d00bcb9c40a0aa0586c97ef48a | [
"MIT"
] | null | null | null | src/goodboy/types/numeric.py | andryunin/goodboy | 66ba803630e149d00bcb9c40a0aa0586c97ef48a | [
"MIT"
] | null | null | null | from __future__ import annotations
from abc import abstractmethod
from typing import Any, Generic, Optional, TypeVar
from goodboy.errors import Error
from goodboy.messages import DEFAULT_MESSAGES, MessageCollectionType, type_name
from goodboy.schema import Rule, SchemaWithUtils
N = TypeVar("N")
class NumericBase(Generic[N], SchemaWithUtils):
"""
Abstract base class for Int/Float schemas, should not be used directly. Use
:class:`Int` or :class:`Float` instead.
"""
def __init__(
self,
*,
allow_none: bool = False,
messages: MessageCollectionType = DEFAULT_MESSAGES,
rules: list[Rule] = [],
less_than: Optional[N] = None,
less_or_equal_to: Optional[N] = None,
greater_than: Optional[N] = None,
greater_or_equal_to: Optional[N] = None,
allowed: Optional[list[N]] = None,
):
super().__init__(allow_none=allow_none, messages=messages, rules=rules)
self._less_than = less_than
self._less_or_equal_to = less_or_equal_to
self._greater_than = greater_than
self._greater_or_equal_to = greater_or_equal_to
self._allowed = allowed
def _validate(
self, value: Any, typecast: bool, context: dict[str, Any] = {}
) -> tuple[Optional[N], list[Error]]:
value, type_errors = self._validate_exact_type(value)
if type_errors:
return None, type_errors
errors = []
if self._allowed is not None and value not in self._allowed:
errors.append(self._error("not_allowed", {"allowed": self._allowed}))
if self._less_than is not None and value >= self._less_than:
errors.append(
self._error("greater_or_equal_to", {"value": self._less_than})
)
if self._less_or_equal_to is not None and value > self._less_or_equal_to:
errors.append(
self._error("greater_than", {"value": self._less_or_equal_to})
)
if self._greater_than is not None and value <= self._greater_than:
errors.append(
self._error("less_or_equal_to", {"value": self._greater_than})
)
if self._greater_or_equal_to is not None and value < self._greater_or_equal_to:
errors.append(
self._error("less_than", {"value": self._greater_or_equal_to})
)
value, rule_errors = self._call_rules(value, typecast, context)
return value, errors + rule_errors
@abstractmethod
def _validate_exact_type(self, value: Any) -> tuple[Optional[N], list[Error]]:
...
class Float(NumericBase[float]):
"""
Accept ``float`` values. Integer values are converted to floats.
When type casting enabled, strings and other values with magic method
`__float__ <https://docs.python.org/3/reference/datamodel.html#object.__float__>`_
are converted to floats.
:param allow_none: If true, value is allowed to be ``None``.
:param messages: Override error messages.
:param rules: Custom validation rules.
:param less_than: Accept only values less than option value.
:param less_or_equal_to: Accept only values less than or equal to option value.
:param greater_than: Accept only values greater than option value.
:param greater_or_equal_to: Accept only values greater than or equal to option
value.
:param allowed: Allow only certain values.
"""
def _typecast(
self, input: Any, context: dict[str, Any] = {}
) -> tuple[Optional[float], list[Error]]:
if isinstance(input, float):
return input, []
if isinstance(input, int):
return float(input), []
if not isinstance(input, str):
return None, [
self._error("unexpected_type", {"expected_type": type_name("float")})
]
try:
return float(input), []
except ValueError:
return None, [self._error("invalid_numeric_format")]
def _validate_exact_type(self, value: Any) -> tuple[Optional[float], list[Error]]:
if isinstance(value, float):
return value, []
elif isinstance(value, int):
return float(value), []
else:
return None, [
self._error("unexpected_type", {"expected_type": type_name("float")})
]
class Int(NumericBase[int]):
"""
Accept ``int`` values.
When type casting enabled, strings and other values with magic method
`__int__ <https://docs.python.org/3/reference/datamodel.html#object.__int__>`_ are
converted to integers.
:param allow_none: If true, value is allowed to be ``None``.
:param messages: Override error messages.
:param rules: Custom validation rules.
:param less_than: Accept only values less than option value.
:param less_or_equal_to: Accept only values less than or equal to option value.
:param greater_than: Accept only values greater than option value.
:param greater_or_equal_to: Accept only values greater than or equal to option
value.
:param allowed: Allow only certain values.
"""
def _typecast(
self, input: Any, context: dict[str, Any] = {}
) -> tuple[Optional[int], list[Error]]:
if isinstance(input, int):
return input, []
if not isinstance(input, str):
return None, [
self._error("unexpected_type", {"expected_type": type_name("int")})
]
try:
return int(input), []
except ValueError:
return None, [self._error("invalid_integer_format")]
def _validate_exact_type(self, value: Any) -> tuple[Optional[int], list[Error]]:
if not isinstance(value, int):
return None, [
self._error("unexpected_type", {"expected_type": type_name("int")})
]
else:
return value, []
| 34.929825 | 87 | 0.630169 | from __future__ import annotations
from abc import abstractmethod
from typing import Any, Generic, Optional, TypeVar
from goodboy.errors import Error
from goodboy.messages import DEFAULT_MESSAGES, MessageCollectionType, type_name
from goodboy.schema import Rule, SchemaWithUtils
N = TypeVar("N")
class NumericBase(Generic[N], SchemaWithUtils):
def __init__(
self,
*,
allow_none: bool = False,
messages: MessageCollectionType = DEFAULT_MESSAGES,
rules: list[Rule] = [],
less_than: Optional[N] = None,
less_or_equal_to: Optional[N] = None,
greater_than: Optional[N] = None,
greater_or_equal_to: Optional[N] = None,
allowed: Optional[list[N]] = None,
):
super().__init__(allow_none=allow_none, messages=messages, rules=rules)
self._less_than = less_than
self._less_or_equal_to = less_or_equal_to
self._greater_than = greater_than
self._greater_or_equal_to = greater_or_equal_to
self._allowed = allowed
def _validate(
self, value: Any, typecast: bool, context: dict[str, Any] = {}
) -> tuple[Optional[N], list[Error]]:
value, type_errors = self._validate_exact_type(value)
if type_errors:
return None, type_errors
errors = []
if self._allowed is not None and value not in self._allowed:
errors.append(self._error("not_allowed", {"allowed": self._allowed}))
if self._less_than is not None and value >= self._less_than:
errors.append(
self._error("greater_or_equal_to", {"value": self._less_than})
)
if self._less_or_equal_to is not None and value > self._less_or_equal_to:
errors.append(
self._error("greater_than", {"value": self._less_or_equal_to})
)
if self._greater_than is not None and value <= self._greater_than:
errors.append(
self._error("less_or_equal_to", {"value": self._greater_than})
)
if self._greater_or_equal_to is not None and value < self._greater_or_equal_to:
errors.append(
self._error("less_than", {"value": self._greater_or_equal_to})
)
value, rule_errors = self._call_rules(value, typecast, context)
return value, errors + rule_errors
@abstractmethod
def _validate_exact_type(self, value: Any) -> tuple[Optional[N], list[Error]]:
...
class Float(NumericBase[float]):
def _typecast(
self, input: Any, context: dict[str, Any] = {}
) -> tuple[Optional[float], list[Error]]:
if isinstance(input, float):
return input, []
if isinstance(input, int):
return float(input), []
if not isinstance(input, str):
return None, [
self._error("unexpected_type", {"expected_type": type_name("float")})
]
try:
return float(input), []
except ValueError:
return None, [self._error("invalid_numeric_format")]
def _validate_exact_type(self, value: Any) -> tuple[Optional[float], list[Error]]:
if isinstance(value, float):
return value, []
elif isinstance(value, int):
return float(value), []
else:
return None, [
self._error("unexpected_type", {"expected_type": type_name("float")})
]
class Int(NumericBase[int]):
def _typecast(
self, input: Any, context: dict[str, Any] = {}
) -> tuple[Optional[int], list[Error]]:
if isinstance(input, int):
return input, []
if not isinstance(input, str):
return None, [
self._error("unexpected_type", {"expected_type": type_name("int")})
]
try:
return int(input), []
except ValueError:
return None, [self._error("invalid_integer_format")]
def _validate_exact_type(self, value: Any) -> tuple[Optional[int], list[Error]]:
if not isinstance(value, int):
return None, [
self._error("unexpected_type", {"expected_type": type_name("int")})
]
else:
return value, []
| true | true |
7901644916c489ddc04eda403482d98c5cf0b446 | 1,667 | py | Python | azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/replication_status_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/replication_status_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/replication_status_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ReplicationStatus(Model):
"""This is the replication status of the gallery Image Version.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar aggregated_state: This is the aggregated replication status based on
all the regional replication status flags. Possible values include:
'Unknown', 'InProgress', 'Completed', 'Failed'
:vartype aggregated_state: str or
~azure.mgmt.compute.v2018_06_01.models.AggregatedReplicationState
:ivar summary: This is a summary of replication status for each region.
:vartype summary:
list[~azure.mgmt.compute.v2018_06_01.models.RegionalReplicationStatus]
"""
_validation = {
'aggregated_state': {'readonly': True},
'summary': {'readonly': True},
}
_attribute_map = {
'aggregated_state': {'key': 'aggregatedState', 'type': 'str'},
'summary': {'key': 'summary', 'type': '[RegionalReplicationStatus]'},
}
def __init__(self, **kwargs) -> None:
super(ReplicationStatus, self).__init__(**kwargs)
self.aggregated_state = None
self.summary = None
| 37.044444 | 78 | 0.637672 |
from msrest.serialization import Model
class ReplicationStatus(Model):
_validation = {
'aggregated_state': {'readonly': True},
'summary': {'readonly': True},
}
_attribute_map = {
'aggregated_state': {'key': 'aggregatedState', 'type': 'str'},
'summary': {'key': 'summary', 'type': '[RegionalReplicationStatus]'},
}
def __init__(self, **kwargs) -> None:
super(ReplicationStatus, self).__init__(**kwargs)
self.aggregated_state = None
self.summary = None
| true | true |
790164e04235f1c5c08991bc08e0569374182c2b | 1,511 | py | Python | z3/dinner.py | Wikunia/hakank | 030bc928d2efe8dcbc5118bda3f8ae9575d0fd13 | [
"MIT"
] | 279 | 2015-01-10T09:55:35.000Z | 2022-03-28T02:34:03.000Z | z3/dinner.py | Wikunia/hakank | 030bc928d2efe8dcbc5118bda3f8ae9575d0fd13 | [
"MIT"
] | 10 | 2017-10-05T15:48:50.000Z | 2021-09-20T12:06:52.000Z | z3/dinner.py | Wikunia/hakank | 030bc928d2efe8dcbc5118bda3f8ae9575d0fd13 | [
"MIT"
] | 83 | 2015-01-20T03:44:00.000Z | 2022-03-13T23:53:06.000Z | #!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Dinner problem in Z3
#
# From http://www.sellsbrothers.com/spout/#The_Logic_of_Logic
# """
# My son came to me the other day and said, "Dad, I need help with a
# math problem." The problem went like this:
#
# * We're going out to dinner taking 1-6 grandparents, 1-10 parents and/or 1-40 children
# * Grandparents cost $3 for dinner, parents $2 and children $0.50
# * There must be 20 total people at dinner and it must cost $20
# * How many grandparents, parents and children are going to dinner?
# """
#
# This Z3 model was written by Hakan Kjellerstrand (hakank@gmail.com)
# See also my Z3 page: http://hakank.org/z3/
#
from z3_utils_hakank import *
sol = Solver()
n = 3
# variables
# x = makeIntVector(sol, "x", 3, 1, 100)
# x = makeRealVector(sol, "x", 3, 1, 100)
# Grandparents, Parents, Children = x
Grandparents = makeRealVar(sol,"Grandparents", 1,6)
Parents = makeRealVar(sol,"Parents", 1,10)
Children = makeRealVar(sol,"Children", 1,40)
# constraints
#
# sol.add(3*Grandparents + 2*Parents + Children/2 == 20)
# multiply with 2:
sol.add(Grandparents * 6 + Parents * 4 + Children * 1 == 40)
# Grandparents + Parents + Children = 20 /\
num_solutions = 0
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
print([mod.eval(x) for x in [Grandparents,Parents,Children]])
getDifferentSolution(sol,mod,[Grandparents,Parents,Children])
if num_solutions > 5:
break;
print('num_solutions:', num_solutions)
| 27.981481 | 88 | 0.68233 |
me the other day and said, "Dad, I need help with a
# math problem." The problem went like this:
#
# * We're going out to dinner taking 1-6 grandparents, 1-10 parents and/or 1-40 children
# * Grandparents cost $3 for dinner, parents $2 and children $0.50
# * There must be 20 total people at dinner and it must cost $20
# * How many grandparents, parents and children are going to dinner?
# """
#
# This Z3 model was written by Hakan Kjellerstrand (hakank@gmail.com)
# See also my Z3 page: http://hakank.org/z3/
#
from z3_utils_hakank import *
sol = Solver()
n = 3
# variables
# x = makeIntVector(sol, "x", 3, 1, 100)
# x = makeRealVector(sol, "x", 3, 1, 100)
# Grandparents, Parents, Children = x
Grandparents = makeRealVar(sol,"Grandparents", 1,6)
Parents = makeRealVar(sol,"Parents", 1,10)
Children = makeRealVar(sol,"Children", 1,40)
# constraints
#
# sol.add(3*Grandparents + 2*Parents + Children/2 == 20)
# multiply with 2:
sol.add(Grandparents * 6 + Parents * 4 + Children * 1 == 40)
# Grandparents + Parents + Children = 20 /\
num_solutions = 0
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
print([mod.eval(x) for x in [Grandparents,Parents,Children]])
getDifferentSolution(sol,mod,[Grandparents,Parents,Children])
if num_solutions > 5:
break;
print('num_solutions:', num_solutions)
| true | true |
7901653016a0a42cadc1dee37098fc4dc71fefa6 | 14,082 | py | Python | tensorboard/plugins/hparams/backend_context_test.py | aryaman4/tensorboard | 4da84884c77bb094c6d87a3d9df009d54d69af49 | [
"Apache-2.0"
] | 1 | 2021-04-05T21:16:16.000Z | 2021-04-05T21:16:16.000Z | tensorboard/plugins/hparams/backend_context_test.py | zalzala/tensorboard | 72a104edb0f8d83ef8889ebee7dd39be684461c1 | [
"Apache-2.0"
] | null | null | null | tensorboard/plugins/hparams/backend_context_test.py | zalzala/tensorboard | 72a104edb0f8d83ef8889ebee7dd39be684461c1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for backend_context."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
try:
# python version >= 3.3
from unittest import mock
except ImportError:
import mock # pylint: disable=unused-import
import tensorflow as tf
from google.protobuf import text_format
from tensorboard import context
from tensorboard.backend.event_processing import data_provider
from tensorboard.backend.event_processing import event_accumulator
from tensorboard.backend.event_processing import plugin_event_multiplexer
from tensorboard.compat.proto import summary_pb2
from tensorboard.plugins import base_plugin
from tensorboard.plugins.hparams import api_pb2
from tensorboard.plugins.hparams import backend_context
from tensorboard.plugins.hparams import metadata
from tensorboard.plugins.hparams import plugin_data_pb2
from tensorboard.plugins.scalar import metadata as scalars_metadata
DATA_TYPE_EXPERIMENT = "experiment"
DATA_TYPE_SESSION_START_INFO = "session_start_info"
DATA_TYPE_SESSION_END_INFO = "session_end_info"
class BackendContextTest(tf.test.TestCase):
# Make assertProtoEquals print all the diff.
maxDiff = None # pylint: disable=invalid-name
def setUp(self):
self._mock_tb_context = base_plugin.TBContext()
# TODO(#3425): Remove mocking or switch to mocking data provider
# APIs directly.
self._mock_multiplexer = mock.create_autospec(
plugin_event_multiplexer.EventMultiplexer
)
self._mock_tb_context.multiplexer = self._mock_multiplexer
self._mock_multiplexer.PluginRunToTagToContent.side_effect = (
self._mock_plugin_run_to_tag_to_content
)
self._mock_multiplexer.AllSummaryMetadata.side_effect = (
self._mock_all_summary_metadata
)
self._mock_multiplexer.SummaryMetadata.side_effect = (
self._mock_summary_metadata
)
self._mock_tb_context.data_provider = data_provider.MultiplexerDataProvider(
self._mock_multiplexer, "/path/to/logs"
)
self.session_1_start_info_ = ""
self.session_2_start_info_ = ""
self.session_3_start_info_ = ""
def _mock_all_summary_metadata(self):
result = {}
hparams_content = {
"exp/session_1": {
metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data(
DATA_TYPE_SESSION_START_INFO, self.session_1_start_info_
),
},
"exp/session_2": {
metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data(
DATA_TYPE_SESSION_START_INFO, self.session_2_start_info_
),
},
"exp/session_3": {
metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data(
DATA_TYPE_SESSION_START_INFO, self.session_3_start_info_
),
},
}
scalars_content = {
"exp/session_1": {"loss": b"", "accuracy": b""},
"exp/session_1/eval": {"loss": b"",},
"exp/session_1/train": {"loss": b"",},
"exp/session_2": {"loss": b"", "accuracy": b"",},
"exp/session_2/eval": {"loss": b"",},
"exp/session_2/train": {"loss": b"",},
"exp/session_3": {"loss": b"", "accuracy": b"",},
"exp/session_3/eval": {"loss": b"",},
"exp/session_3xyz/": {"loss2": b"",},
}
for (run, tag_to_content) in hparams_content.items():
result.setdefault(run, {})
for (tag, content) in tag_to_content.items():
m = summary_pb2.SummaryMetadata()
m.data_class = summary_pb2.DATA_CLASS_TENSOR
m.plugin_data.plugin_name = metadata.PLUGIN_NAME
m.plugin_data.content = content
result[run][tag] = m
for (run, tag_to_content) in scalars_content.items():
result.setdefault(run, {})
for (tag, content) in tag_to_content.items():
m = summary_pb2.SummaryMetadata()
m.data_class = summary_pb2.DATA_CLASS_SCALAR
m.plugin_data.plugin_name = scalars_metadata.PLUGIN_NAME
m.plugin_data.content = content
result[run][tag] = m
return result
def _mock_plugin_run_to_tag_to_content(self, plugin_name):
result = {}
for (
run,
tag_to_metadata,
) in self._mock_multiplexer.AllSummaryMetadata().items():
for (tag, metadata) in tag_to_metadata.items():
if metadata.plugin_data.plugin_name != plugin_name:
continue
result.setdefault(run, {})
result[run][tag] = metadata.plugin_data.content
return result
def _mock_summary_metadata(self, run, tag):
return self._mock_multiplexer.AllSummaryMetadata()[run][tag]
def test_experiment_with_experiment_tag(self):
experiment = """
description: 'Test experiment'
metric_infos: [
{ name: { tag: 'current_temp' } }
]
"""
run = "exp"
tag = metadata.EXPERIMENT_TAG
m = summary_pb2.SummaryMetadata()
m.data_class = summary_pb2.DATA_CLASS_TENSOR
m.plugin_data.plugin_name = metadata.PLUGIN_NAME
m.plugin_data.content = self._serialized_plugin_data(
DATA_TYPE_EXPERIMENT, experiment
)
self._mock_multiplexer.AllSummaryMetadata.side_effect = None
self._mock_multiplexer.AllSummaryMetadata.return_value = {run: {tag: m}}
ctxt = backend_context.Context(self._mock_tb_context)
request_ctx = context.RequestContext()
self.assertProtoEquals(
experiment,
ctxt.experiment_from_metadata(
request_ctx, "123", ctxt.hparams_metadata(request_ctx, "123")
),
)
def test_experiment_without_experiment_tag(self):
self.session_1_start_info_ = """
hparams: [
{key: 'batch_size' value: {number_value: 100}},
{key: 'lr' value: {number_value: 0.01}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
self.session_2_start_info_ = """
hparams:[
{key: 'batch_size' value: {number_value: 200}},
{key: 'lr' value: {number_value: 0.02}},
{key: 'model_type' value: {string_value: 'LATTICE'}}
]
"""
self.session_3_start_info_ = """
hparams:[
{key: 'batch_size' value: {number_value: 300}},
{key: 'lr' value: {number_value: 0.05}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
expected_exp = """
hparam_infos: {
name: 'batch_size'
type: DATA_TYPE_FLOAT64
},
hparam_infos: {
name: 'lr'
type: DATA_TYPE_FLOAT64
},
hparam_infos: {
name: 'model_type'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: 'CNN'},
{string_value: 'LATTICE'}]
}
}
metric_infos: {
name: {group: '', tag: 'accuracy'}
}
metric_infos: {
name: {group: '', tag: 'loss'}
}
metric_infos: {
name: {group: 'eval', tag: 'loss'}
}
metric_infos: {
name: {group: 'train', tag: 'loss'}
}
"""
ctxt = backend_context.Context(self._mock_tb_context)
request_ctx = context.RequestContext()
actual_exp = ctxt.experiment_from_metadata(
request_ctx, "123", ctxt.hparams_metadata(request_ctx, "123")
)
_canonicalize_experiment(actual_exp)
self.assertProtoEquals(expected_exp, actual_exp)
def test_experiment_without_experiment_tag_different_hparam_types(self):
self.session_1_start_info_ = """
hparams:[
{key: 'batch_size' value: {number_value: 100}},
{key: 'lr' value: {string_value: '0.01'}}
]
"""
self.session_2_start_info_ = """
hparams:[
{key: 'lr' value: {number_value: 0.02}},
{key: 'model_type' value: {string_value: 'LATTICE'}}
]
"""
self.session_3_start_info_ = """
hparams:[
{key: 'batch_size' value: {bool_value: true}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
expected_exp = """
hparam_infos: {
name: 'batch_size'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: '100.0'},
{string_value: 'true'}]
}
}
hparam_infos: {
name: 'lr'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: '0.01'},
{string_value: '0.02'}]
}
}
hparam_infos: {
name: 'model_type'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: 'CNN'},
{string_value: 'LATTICE'}]
}
}
metric_infos: {
name: {group: '', tag: 'accuracy'}
}
metric_infos: {
name: {group: '', tag: 'loss'}
}
metric_infos: {
name: {group: 'eval', tag: 'loss'}
}
metric_infos: {
name: {group: 'train', tag: 'loss'}
}
"""
ctxt = backend_context.Context(self._mock_tb_context)
request_ctx = context.RequestContext()
actual_exp = ctxt.experiment_from_metadata(
request_ctx, "123", ctxt.hparams_metadata(request_ctx, "123")
)
_canonicalize_experiment(actual_exp)
self.assertProtoEquals(expected_exp, actual_exp)
def test_experiment_without_experiment_tag_many_distinct_values(self):
self.session_1_start_info_ = """
hparams:[
{key: 'batch_size' value: {number_value: 100}},
{key: 'lr' value: {string_value: '0.01'}}
]
"""
self.session_2_start_info_ = """
hparams:[
{key: 'lr' value: {number_value: 0.02}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
self.session_3_start_info_ = """
hparams:[
{key: 'batch_size' value: {bool_value: true}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
expected_exp = """
hparam_infos: {
name: 'batch_size'
type: DATA_TYPE_STRING
}
hparam_infos: {
name: 'lr'
type: DATA_TYPE_STRING
}
hparam_infos: {
name: 'model_type'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: 'CNN'}]
}
}
metric_infos: {
name: {group: '', tag: 'accuracy'}
}
metric_infos: {
name: {group: '', tag: 'loss'}
}
metric_infos: {
name: {group: 'eval', tag: 'loss'}
}
metric_infos: {
name: {group: 'train', tag: 'loss'}
}
"""
ctxt = backend_context.Context(
self._mock_tb_context, max_domain_discrete_len=1
)
request_ctx = context.RequestContext()
actual_exp = ctxt.experiment_from_metadata(
request_ctx, "123", ctxt.hparams_metadata(request_ctx, "123"),
)
_canonicalize_experiment(actual_exp)
self.assertProtoEquals(expected_exp, actual_exp)
def _serialized_plugin_data(self, data_oneof_field, text_protobuffer):
oneof_type_dict = {
DATA_TYPE_EXPERIMENT: api_pb2.Experiment,
DATA_TYPE_SESSION_START_INFO: plugin_data_pb2.SessionStartInfo,
DATA_TYPE_SESSION_END_INFO: plugin_data_pb2.SessionEndInfo,
}
protobuffer = text_format.Merge(
text_protobuffer, oneof_type_dict[data_oneof_field]()
)
plugin_data = plugin_data_pb2.HParamsPluginData()
getattr(plugin_data, data_oneof_field).CopyFrom(protobuffer)
return metadata.create_summary_metadata(plugin_data).plugin_data.content
def _canonicalize_experiment(exp):
"""Sorts the repeated fields of an Experiment message."""
exp.hparam_infos.sort(key=operator.attrgetter("name"))
exp.metric_infos.sort(key=operator.attrgetter("name.group", "name.tag"))
for hparam_info in exp.hparam_infos:
if hparam_info.HasField("domain_discrete"):
hparam_info.domain_discrete.values.sort(
key=operator.attrgetter("string_value")
)
if __name__ == "__main__":
tf.test.main()
| 37.452128 | 84 | 0.569805 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
try:
from unittest import mock
except ImportError:
import mock
import tensorflow as tf
from google.protobuf import text_format
from tensorboard import context
from tensorboard.backend.event_processing import data_provider
from tensorboard.backend.event_processing import event_accumulator
from tensorboard.backend.event_processing import plugin_event_multiplexer
from tensorboard.compat.proto import summary_pb2
from tensorboard.plugins import base_plugin
from tensorboard.plugins.hparams import api_pb2
from tensorboard.plugins.hparams import backend_context
from tensorboard.plugins.hparams import metadata
from tensorboard.plugins.hparams import plugin_data_pb2
from tensorboard.plugins.scalar import metadata as scalars_metadata
DATA_TYPE_EXPERIMENT = "experiment"
DATA_TYPE_SESSION_START_INFO = "session_start_info"
DATA_TYPE_SESSION_END_INFO = "session_end_info"
class BackendContextTest(tf.test.TestCase):
maxDiff = None
def setUp(self):
self._mock_tb_context = base_plugin.TBContext()
tospec(
plugin_event_multiplexer.EventMultiplexer
)
self._mock_tb_context.multiplexer = self._mock_multiplexer
self._mock_multiplexer.PluginRunToTagToContent.side_effect = (
self._mock_plugin_run_to_tag_to_content
)
self._mock_multiplexer.AllSummaryMetadata.side_effect = (
self._mock_all_summary_metadata
)
self._mock_multiplexer.SummaryMetadata.side_effect = (
self._mock_summary_metadata
)
self._mock_tb_context.data_provider = data_provider.MultiplexerDataProvider(
self._mock_multiplexer, "/path/to/logs"
)
self.session_1_start_info_ = ""
self.session_2_start_info_ = ""
self.session_3_start_info_ = ""
def _mock_all_summary_metadata(self):
result = {}
hparams_content = {
"exp/session_1": {
metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data(
DATA_TYPE_SESSION_START_INFO, self.session_1_start_info_
),
},
"exp/session_2": {
metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data(
DATA_TYPE_SESSION_START_INFO, self.session_2_start_info_
),
},
"exp/session_3": {
metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data(
DATA_TYPE_SESSION_START_INFO, self.session_3_start_info_
),
},
}
scalars_content = {
"exp/session_1": {"loss": b"", "accuracy": b""},
"exp/session_1/eval": {"loss": b"",},
"exp/session_1/train": {"loss": b"",},
"exp/session_2": {"loss": b"", "accuracy": b"",},
"exp/session_2/eval": {"loss": b"",},
"exp/session_2/train": {"loss": b"",},
"exp/session_3": {"loss": b"", "accuracy": b"",},
"exp/session_3/eval": {"loss": b"",},
"exp/session_3xyz/": {"loss2": b"",},
}
for (run, tag_to_content) in hparams_content.items():
result.setdefault(run, {})
for (tag, content) in tag_to_content.items():
m = summary_pb2.SummaryMetadata()
m.data_class = summary_pb2.DATA_CLASS_TENSOR
m.plugin_data.plugin_name = metadata.PLUGIN_NAME
m.plugin_data.content = content
result[run][tag] = m
for (run, tag_to_content) in scalars_content.items():
result.setdefault(run, {})
for (tag, content) in tag_to_content.items():
m = summary_pb2.SummaryMetadata()
m.data_class = summary_pb2.DATA_CLASS_SCALAR
m.plugin_data.plugin_name = scalars_metadata.PLUGIN_NAME
m.plugin_data.content = content
result[run][tag] = m
return result
def _mock_plugin_run_to_tag_to_content(self, plugin_name):
result = {}
for (
run,
tag_to_metadata,
) in self._mock_multiplexer.AllSummaryMetadata().items():
for (tag, metadata) in tag_to_metadata.items():
if metadata.plugin_data.plugin_name != plugin_name:
continue
result.setdefault(run, {})
result[run][tag] = metadata.plugin_data.content
return result
def _mock_summary_metadata(self, run, tag):
return self._mock_multiplexer.AllSummaryMetadata()[run][tag]
def test_experiment_with_experiment_tag(self):
experiment = """
description: 'Test experiment'
metric_infos: [
{ name: { tag: 'current_temp' } }
]
"""
run = "exp"
tag = metadata.EXPERIMENT_TAG
m = summary_pb2.SummaryMetadata()
m.data_class = summary_pb2.DATA_CLASS_TENSOR
m.plugin_data.plugin_name = metadata.PLUGIN_NAME
m.plugin_data.content = self._serialized_plugin_data(
DATA_TYPE_EXPERIMENT, experiment
)
self._mock_multiplexer.AllSummaryMetadata.side_effect = None
self._mock_multiplexer.AllSummaryMetadata.return_value = {run: {tag: m}}
ctxt = backend_context.Context(self._mock_tb_context)
request_ctx = context.RequestContext()
self.assertProtoEquals(
experiment,
ctxt.experiment_from_metadata(
request_ctx, "123", ctxt.hparams_metadata(request_ctx, "123")
),
)
def test_experiment_without_experiment_tag(self):
self.session_1_start_info_ = """
hparams: [
{key: 'batch_size' value: {number_value: 100}},
{key: 'lr' value: {number_value: 0.01}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
self.session_2_start_info_ = """
hparams:[
{key: 'batch_size' value: {number_value: 200}},
{key: 'lr' value: {number_value: 0.02}},
{key: 'model_type' value: {string_value: 'LATTICE'}}
]
"""
self.session_3_start_info_ = """
hparams:[
{key: 'batch_size' value: {number_value: 300}},
{key: 'lr' value: {number_value: 0.05}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
expected_exp = """
hparam_infos: {
name: 'batch_size'
type: DATA_TYPE_FLOAT64
},
hparam_infos: {
name: 'lr'
type: DATA_TYPE_FLOAT64
},
hparam_infos: {
name: 'model_type'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: 'CNN'},
{string_value: 'LATTICE'}]
}
}
metric_infos: {
name: {group: '', tag: 'accuracy'}
}
metric_infos: {
name: {group: '', tag: 'loss'}
}
metric_infos: {
name: {group: 'eval', tag: 'loss'}
}
metric_infos: {
name: {group: 'train', tag: 'loss'}
}
"""
ctxt = backend_context.Context(self._mock_tb_context)
request_ctx = context.RequestContext()
actual_exp = ctxt.experiment_from_metadata(
request_ctx, "123", ctxt.hparams_metadata(request_ctx, "123")
)
_canonicalize_experiment(actual_exp)
self.assertProtoEquals(expected_exp, actual_exp)
def test_experiment_without_experiment_tag_different_hparam_types(self):
self.session_1_start_info_ = """
hparams:[
{key: 'batch_size' value: {number_value: 100}},
{key: 'lr' value: {string_value: '0.01'}}
]
"""
self.session_2_start_info_ = """
hparams:[
{key: 'lr' value: {number_value: 0.02}},
{key: 'model_type' value: {string_value: 'LATTICE'}}
]
"""
self.session_3_start_info_ = """
hparams:[
{key: 'batch_size' value: {bool_value: true}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
expected_exp = """
hparam_infos: {
name: 'batch_size'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: '100.0'},
{string_value: 'true'}]
}
}
hparam_infos: {
name: 'lr'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: '0.01'},
{string_value: '0.02'}]
}
}
hparam_infos: {
name: 'model_type'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: 'CNN'},
{string_value: 'LATTICE'}]
}
}
metric_infos: {
name: {group: '', tag: 'accuracy'}
}
metric_infos: {
name: {group: '', tag: 'loss'}
}
metric_infos: {
name: {group: 'eval', tag: 'loss'}
}
metric_infos: {
name: {group: 'train', tag: 'loss'}
}
"""
ctxt = backend_context.Context(self._mock_tb_context)
request_ctx = context.RequestContext()
actual_exp = ctxt.experiment_from_metadata(
request_ctx, "123", ctxt.hparams_metadata(request_ctx, "123")
)
_canonicalize_experiment(actual_exp)
self.assertProtoEquals(expected_exp, actual_exp)
def test_experiment_without_experiment_tag_many_distinct_values(self):
self.session_1_start_info_ = """
hparams:[
{key: 'batch_size' value: {number_value: 100}},
{key: 'lr' value: {string_value: '0.01'}}
]
"""
self.session_2_start_info_ = """
hparams:[
{key: 'lr' value: {number_value: 0.02}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
self.session_3_start_info_ = """
hparams:[
{key: 'batch_size' value: {bool_value: true}},
{key: 'model_type' value: {string_value: 'CNN'}}
]
"""
expected_exp = """
hparam_infos: {
name: 'batch_size'
type: DATA_TYPE_STRING
}
hparam_infos: {
name: 'lr'
type: DATA_TYPE_STRING
}
hparam_infos: {
name: 'model_type'
type: DATA_TYPE_STRING
domain_discrete: {
values: [{string_value: 'CNN'}]
}
}
metric_infos: {
name: {group: '', tag: 'accuracy'}
}
metric_infos: {
name: {group: '', tag: 'loss'}
}
metric_infos: {
name: {group: 'eval', tag: 'loss'}
}
metric_infos: {
name: {group: 'train', tag: 'loss'}
}
"""
ctxt = backend_context.Context(
self._mock_tb_context, max_domain_discrete_len=1
)
request_ctx = context.RequestContext()
actual_exp = ctxt.experiment_from_metadata(
request_ctx, "123", ctxt.hparams_metadata(request_ctx, "123"),
)
_canonicalize_experiment(actual_exp)
self.assertProtoEquals(expected_exp, actual_exp)
def _serialized_plugin_data(self, data_oneof_field, text_protobuffer):
oneof_type_dict = {
DATA_TYPE_EXPERIMENT: api_pb2.Experiment,
DATA_TYPE_SESSION_START_INFO: plugin_data_pb2.SessionStartInfo,
DATA_TYPE_SESSION_END_INFO: plugin_data_pb2.SessionEndInfo,
}
protobuffer = text_format.Merge(
text_protobuffer, oneof_type_dict[data_oneof_field]()
)
plugin_data = plugin_data_pb2.HParamsPluginData()
getattr(plugin_data, data_oneof_field).CopyFrom(protobuffer)
return metadata.create_summary_metadata(plugin_data).plugin_data.content
def _canonicalize_experiment(exp):
exp.hparam_infos.sort(key=operator.attrgetter("name"))
exp.metric_infos.sort(key=operator.attrgetter("name.group", "name.tag"))
for hparam_info in exp.hparam_infos:
if hparam_info.HasField("domain_discrete"):
hparam_info.domain_discrete.values.sort(
key=operator.attrgetter("string_value")
)
if __name__ == "__main__":
tf.test.main()
| true | true |
790165724c4788d42311acba5d09690a8563157f | 147 | py | Python | tests/contract/test_contract_identidock.py | anirbanroydas/RChat-RabbitMQ-AMQP- | d4106357def0b36e1e803986c13c4fd84ec91e6a | [
"MIT"
] | 52 | 2016-07-26T08:57:44.000Z | 2022-03-30T06:50:07.000Z | tests/contract/test_contract_identidock.py | anirbanroydas/elaster | 08b5873d7a61d01905d059e08cc9ba533358e684 | [
"MIT"
] | 2 | 2016-05-18T22:45:44.000Z | 2017-09-19T03:55:22.000Z | tests/contract/test_contract_identidock.py | anirbanroydas/elaster | 08b5873d7a61d01905d059e08cc9ba533358e684 | [
"MIT"
] | 22 | 2017-11-26T22:04:43.000Z | 2022-02-10T04:21:23.000Z | # import unittest
import pytest
# from ci_testing_python.app.identidock import app
if __name__ == '__main__':
# unittest.main()
pytest.main()
| 14.7 | 50 | 0.741497 |
import pytest
if __name__ == '__main__':
pytest.main()
| true | true |
79016636207cea316bdee299c0f14bde4f689728 | 8,879 | py | Python | geoist/cattools/MapTools.py | wqqpp007/geoist | 116b674eae3da4ee706902ce7f5feae1f61f43a5 | [
"MIT"
] | 1 | 2020-06-04T01:09:24.000Z | 2020-06-04T01:09:24.000Z | geoist/cattools/MapTools.py | wqqpp007/geoist | 116b674eae3da4ee706902ce7f5feae1f61f43a5 | [
"MIT"
] | null | null | null | geoist/cattools/MapTools.py | wqqpp007/geoist | 116b674eae3da4ee706902ce7f5feae1f61f43a5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
#-----------------------------------------------------------------------------------------
class GeoMap:
'''
INFO:
Map boundary edges order:
[LeftLowerLon,LeftLowerLat,UpperRightLon,UpperRightLat]
Background type:
'none'
'etopo'
'esri' --> background source
Background sources available for 'esri':
ESRI_Imagery_World_2D (MapServer)
ESRI_StreetMap_World_2D (MapServer)
I3_Imagery_Prime_World (GlobeServer)
NASA_CloudCover_World (GlobeServer)
NatGeo_World_Map (MapServer)
NGS_Topo_US_2D (MapServer)
Ocean_Basemap (MapServer)
USA_Topo_Maps (MapServer)
World_Imagery (MapServer)
World_Physical_Map (MapServer)
World_Shaded_Relief (MapServer)
World_Street_Map (MapServer)
World_Terrain_Base (MapServer)
World_Topo_Map (MapServer)
'''
#---------------------------------------------------------------------------------------
def __init__(self, Cfg=[]):
# Defaults (example setting)
if not Cfg:
self._cfg = {'Bounds': [7., 36., 19., 48.],
'FigSize': [6., 6.],
'Background': ['esri','World_Terrain_Base',1500],
'Grid': [5., 5.]}
else:
self._cfg = Cfg
self._zo = 1
#---------------------------------------------------------------------------------------
def BasePlot(self):
plt.figure(figsize = (self._cfg['FigSize'][0],
self._cfg['FigSize'][1]))
# Basemap
self._map = Basemap(self._cfg['Bounds'][0],
self._cfg['Bounds'][1],
self._cfg['Bounds'][2],
self._cfg['Bounds'][3],
resolution = 'l',
projection = 'tmerc',
epsg = 3857)
# Background land
if self._cfg['Background'][0] == 'color':
self._map.drawlsmask(land_color = self._cfg['Background'][1],
ocean_color = self._cfg['Background'][2],
grid = 1.25,
lakes = True)
if self._cfg['Background'][0] == 'etopo':
self._map.etopo(zorder = self._zo)
if self._cfg['Background'][0] == 'esri':
self._map.arcgisimage(service = self._cfg['Background'][1],
xpixels = self._cfg['Background'][2],
dpi = 300,
zorder = self._zo)
if self._cfg['Background'][0] == 'relief':
self._map.shadedrelief()
#---------------------------------------------------------------------------------------
def DrawGrid(self):
# Parallels and meridians
parallels = np.arange(-90, 90, self._cfg['Grid'][1])
meridians = np.arange(0, 360., self._cfg['Grid'][0])
self._zo += 1
self._map.drawparallels(parallels, labels = [1,0,0,0],
fontsize = 14, weight = 'normal',
linewidth = 0.5,
zorder = self._zo)
self._zo += 1
self._map.drawmeridians(meridians, labels = [0,0,0,1],
fontsize = 14, weight = 'normal',
linewidth = 0.5,
zorder = self._zo)
#---------------------------------------------------------------------------------------
def DrawBounds(self):
# Boundaries and lines
self._zo += 1
self._map.drawcoastlines(linewidth = 0.8,
zorder = self._zo)
self._zo += 1
self._map.drawstates(linewidth = 0.8,
zorder = self._zo)
self._zo += 1
self._map.drawcountries(linewidth = 0.8,
zorder = self._zo)
self._zo += 1
self._map.drawrivers(linewidth = 0.1,
color = 'b',
zorder = self._zo)
"""
self._zo += 1
self._map.drawmapboundary(linewidth = 2,
color = 'k',
zorder = self._zo)
"""
#---------------------------------------------------------------------------------------
def Title(self, string, Set=['bold','k',18]):
plt.title(string, weight = Set[0],
color = Set[1],
fontsize = Set[2])
#---------------------------------------------------------------------------------------
def PointPlot(self, Lon, Lat, Label=[], Set=['o','y',5,1]):
x, y = self._map(Lon, Lat)
self._zo += 1
self._map.plot(x, y, Set[0],
color = Set[1],
markersize = Set[2],
markeredgewidth = Set[3],
label = Label,
zorder = self._zo)
#---------------------------------------------------------------------------------------
def LabelPlot(self, Lon, Lat, Label, Set=['normal','k',14]):
x, y = self._map(Lon, Lat)
# If only one label provided, convert to list
if isinstance(Label, str):
x = [x]
y = [y]
Label = [Label]
self._zo += 1
for i, string in enumerate(Label):
plt.text(x[i], y[i], string, weight = Set[0],
color = Set[1],
fontsize = Set[2],
zorder = self._zo)
#---------------------------------------------------------------------------------------
def AreaPlot(self, Lon, Lat, Set=['y',1,'k',1]):
x, y = self._map(Lon, Lat)
if Set[0]:
self._zo += 1
plt.fill(x, y, color = Set[0],
alpha = Set[1],
zorder = self._zo)
if Set[2]:
self._zo += 1
plt.plot(x, y, Set[2],
linewidth = Set[3],
zorder = self._zo)
#---------------------------------------------------------------------------------------
def MeshPlot(self, Lon, Lat, Elev, Cmap=[], Clim=[], Mesh=True):
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
x, y = self._map(Lon, Lat)
z = Elev
if not Cmap:
Cmap = cm.jet
# cmap.set_under('w', alpha=0.)
if not Clim:
Clim = [z.min(), z.max()]
levels = MaxNLocator(nbins=16).tick_values(Clim[0], Clim[1])
norm = BoundaryNorm(levels, ncolors = Cmap.N, clip=True)
if not Mesh:
self._zo += 1
h = plt.scatter(x, y, c = z,
s = 20,
marker = 's',
cmap = Cmap,
vmin = Clim[0],
vmax = Clim[1],
lw = 0,
alpha = 1.,
zorder = self._zo)
else:
self._zo += 1
z = z[:-1, :-1]
h = plt.pcolormesh(x, y, z, cmap = Cmap,
norm = norm,
vmin = Clim[0],
vmax = Clim[1],
lw = 0,
alpha = 1.,
zorder = self._zo)
clb = plt.gcf().colorbar(h, orientation = 'vertical')
clb.outline.set_linewidth(1)
clb.ax.tick_params(labelsize=14)
clb.set_label('Spectral Acceleration ($g$)', size=12)
#---------------------------------------------------------------------------------------
def ShapeFile(self, ShpFile, Name, Color='k'):
# NOTE: this doesn't always work with polygons,
# better to use the function in crd_tool
self._zo += 1
self._map.readshapefile(ShpFile,
Name,
linewidth = 1.5,
drawbounds = True,
color = Color,
zorder = self._zo)
#---------------------------------------------------------------------------------------
def Legend(self, Location=[]):
self._zo += 1
if Location:
l = plt.legend(loc = Location, numpoints = 1)
else:
# Default outside
l = plt.legend(bbox_to_anchor = (1.05, 1),
loc = 2, borderaxespad = 0.,
numpoints = 1)
l.set_zorder(self._zo)
#---------------------------------------------------------------------------------------
def Show(self):
plt.show(block = False)
#---------------------------------------------------------------------------------------
def Close(self):
plt.close('all')
#---------------------------------------------------------------------------------------
def SaveFig(self, OutFile, Dpi=150):
plt.savefig(OutFile, bbox_inches = 'tight', dpi = Dpi)
| 30.937282 | 90 | 0.401847 |
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
class GeoMap:
def __init__(self, Cfg=[]):
if not Cfg:
self._cfg = {'Bounds': [7., 36., 19., 48.],
'FigSize': [6., 6.],
'Background': ['esri','World_Terrain_Base',1500],
'Grid': [5., 5.]}
else:
self._cfg = Cfg
self._zo = 1
def BasePlot(self):
plt.figure(figsize = (self._cfg['FigSize'][0],
self._cfg['FigSize'][1]))
self._map = Basemap(self._cfg['Bounds'][0],
self._cfg['Bounds'][1],
self._cfg['Bounds'][2],
self._cfg['Bounds'][3],
resolution = 'l',
projection = 'tmerc',
epsg = 3857)
if self._cfg['Background'][0] == 'color':
self._map.drawlsmask(land_color = self._cfg['Background'][1],
ocean_color = self._cfg['Background'][2],
grid = 1.25,
lakes = True)
if self._cfg['Background'][0] == 'etopo':
self._map.etopo(zorder = self._zo)
if self._cfg['Background'][0] == 'esri':
self._map.arcgisimage(service = self._cfg['Background'][1],
xpixels = self._cfg['Background'][2],
dpi = 300,
zorder = self._zo)
if self._cfg['Background'][0] == 'relief':
self._map.shadedrelief()
def DrawGrid(self):
parallels = np.arange(-90, 90, self._cfg['Grid'][1])
meridians = np.arange(0, 360., self._cfg['Grid'][0])
self._zo += 1
self._map.drawparallels(parallels, labels = [1,0,0,0],
fontsize = 14, weight = 'normal',
linewidth = 0.5,
zorder = self._zo)
self._zo += 1
self._map.drawmeridians(meridians, labels = [0,0,0,1],
fontsize = 14, weight = 'normal',
linewidth = 0.5,
zorder = self._zo)
def DrawBounds(self):
self._zo += 1
self._map.drawcoastlines(linewidth = 0.8,
zorder = self._zo)
self._zo += 1
self._map.drawstates(linewidth = 0.8,
zorder = self._zo)
self._zo += 1
self._map.drawcountries(linewidth = 0.8,
zorder = self._zo)
self._zo += 1
self._map.drawrivers(linewidth = 0.1,
color = 'b',
zorder = self._zo)
def Title(self, string, Set=['bold','k',18]):
plt.title(string, weight = Set[0],
color = Set[1],
fontsize = Set[2])
def PointPlot(self, Lon, Lat, Label=[], Set=['o','y',5,1]):
x, y = self._map(Lon, Lat)
self._zo += 1
self._map.plot(x, y, Set[0],
color = Set[1],
markersize = Set[2],
markeredgewidth = Set[3],
label = Label,
zorder = self._zo)
def LabelPlot(self, Lon, Lat, Label, Set=['normal','k',14]):
x, y = self._map(Lon, Lat)
if isinstance(Label, str):
x = [x]
y = [y]
Label = [Label]
self._zo += 1
for i, string in enumerate(Label):
plt.text(x[i], y[i], string, weight = Set[0],
color = Set[1],
fontsize = Set[2],
zorder = self._zo)
def AreaPlot(self, Lon, Lat, Set=['y',1,'k',1]):
x, y = self._map(Lon, Lat)
if Set[0]:
self._zo += 1
plt.fill(x, y, color = Set[0],
alpha = Set[1],
zorder = self._zo)
if Set[2]:
self._zo += 1
plt.plot(x, y, Set[2],
linewidth = Set[3],
zorder = self._zo)
def MeshPlot(self, Lon, Lat, Elev, Cmap=[], Clim=[], Mesh=True):
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
x, y = self._map(Lon, Lat)
z = Elev
if not Cmap:
Cmap = cm.jet
if not Clim:
Clim = [z.min(), z.max()]
levels = MaxNLocator(nbins=16).tick_values(Clim[0], Clim[1])
norm = BoundaryNorm(levels, ncolors = Cmap.N, clip=True)
if not Mesh:
self._zo += 1
h = plt.scatter(x, y, c = z,
s = 20,
marker = 's',
cmap = Cmap,
vmin = Clim[0],
vmax = Clim[1],
lw = 0,
alpha = 1.,
zorder = self._zo)
else:
self._zo += 1
z = z[:-1, :-1]
h = plt.pcolormesh(x, y, z, cmap = Cmap,
norm = norm,
vmin = Clim[0],
vmax = Clim[1],
lw = 0,
alpha = 1.,
zorder = self._zo)
clb = plt.gcf().colorbar(h, orientation = 'vertical')
clb.outline.set_linewidth(1)
clb.ax.tick_params(labelsize=14)
clb.set_label('Spectral Acceleration ($g$)', size=12)
def ShapeFile(self, ShpFile, Name, Color='k'):
# better to use the function in crd_tool
self._zo += 1
self._map.readshapefile(ShpFile,
Name,
linewidth = 1.5,
drawbounds = True,
color = Color,
zorder = self._zo)
#---------------------------------------------------------------------------------------
def Legend(self, Location=[]):
self._zo += 1
if Location:
l = plt.legend(loc = Location, numpoints = 1)
else:
# Default outside
l = plt.legend(bbox_to_anchor = (1.05, 1),
loc = 2, borderaxespad = 0.,
numpoints = 1)
l.set_zorder(self._zo)
#---------------------------------------------------------------------------------------
def Show(self):
plt.show(block = False)
#---------------------------------------------------------------------------------------
def Close(self):
plt.close('all')
#---------------------------------------------------------------------------------------
def SaveFig(self, OutFile, Dpi=150):
plt.savefig(OutFile, bbox_inches = 'tight', dpi = Dpi)
| true | true |
790166f632e8151a123c27d2c3e525232a3cc545 | 35,713 | py | Python | docstring_styles.py | dextreem/SublimeAutoDocstring | 2b986fe8754a36434b10da5f58b07f07da316685 | [
"MIT"
] | 60 | 2015-05-04T06:58:49.000Z | 2022-02-23T14:42:35.000Z | docstring_styles.py | dextreem/SublimeAutoDocstring | 2b986fe8754a36434b10da5f58b07f07da316685 | [
"MIT"
] | 48 | 2015-05-10T00:42:24.000Z | 2022-02-22T19:05:42.000Z | docstring_styles.py | dextreem/SublimeAutoDocstring | 2b986fe8754a36434b10da5f58b07f07da316685 | [
"MIT"
] | 14 | 2015-07-16T22:48:26.000Z | 2022-02-21T06:25:07.000Z | # -*- coding: utf-8 -*-
"""Docstring Parsers/Formatters"""
# TODO: break this module up into smaller pieces
import sys
import re
from textwrap import dedent
from collections import OrderedDict
from itertools import islice
from .autodocstring_logging import logger
PY3k = sys.version_info[0] == 3
if PY3k:
string_types = str,
else:
string_types = basestring, # pylint: disable=undefined-variable
def make_docstring_obj(docstr, default="google", template_order=False):
"""Detect docstring style and create a Docstring object
Parameters:
docstr (str): source docstring
default (str, class): 'google', 'numpy' or subclass
of Docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
Returns:
subclass of Docstring
"""
typ = detect_style(docstr)
logger.info("[make_docstring_obj] from {} to {}"
"".format(typ.__name__ if typ is not None else None, default.__name__))
if typ is None:
if issubclass(default, Docstring):
typ = default
else:
typ = STYLE_LOOKUP[default.lower()]
return typ(docstr, template_order=template_order)
def detect_style(docstr):
"""Detect docstr style from existing docstring
Parameters:
docstr (str): docstring whose style we want to know
Returns:
class: one of [GoogleDocstring, NumpyDocstring, None]; None
means no match
"""
docstr = dedent_docstr(docstr)
for c in STYLE_LOOKUP.values():
if c.detect_style(docstr):
return c
return None
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
def dedent_verbose(s, n=1):
new = dedent_docstr(s, n=n)
s_split = s.splitlines(keepends=True)
new_split = new.splitlines(keepends=True)
i, ind = 0, -1
for i in range(n, len(s_split)):
if s_split[i].strip():
ind = s_split[i].find(new_split[i])
break
if ind >= 0:
indent = s_split[i][:ind]
else:
indent = ""
return indent, new
def indent_docstr(s, indent, n=1, trim=True):
"""Add common indentation to all lines except first
Args:
s (str): docstring starting at indentation level 0
indent (str): text used for indentation, in practice
this will be the level of the declaration + 1
n (int): don't indent first n lines
trim (bool): trim whitespace (' \t') out of blank lines
Returns:
s with common indentation applied
"""
lines = s.splitlines(keepends=True)
for i in range(n, len(lines)):
if lines[i].strip() or not trim:
lines[i] = "{0}{1}".format(indent, lines[i])
else:
lines[i] = lines[i].strip(' \t')
return "".join(lines)
def count_leading_newlines(s):
"""count number of leading newlines
this includes newlines that are separated by other whitespace
"""
return s[:-len(s.lstrip())].count('\n')
def count_trailing_newlines(s):
"""count number of trailing newlines
this includes newlines that are separated by other whitespace
"""
return s[len(s.rstrip()):].count('\n')
def with_bounding_newlines(s, nleading=0, ntrailing=0, nl='\n'):
"""return s with at least # leading and # trailing newlines
this includes newlines that are separated by other whitespace
"""
return "{0}{1}{2}".format(nl * (nleading - count_leading_newlines(s)),
s,
nl * (ntrailing - count_trailing_newlines(s)))
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
class Parameter(object):
""""""
names = None
types = None
description = None
tag = None
descr_only = None
meta = None
def __init__(self, names, types, description, tag=None, descr_only=False,
annotated=False, **kwargs):
"""
Args:
names (list): list of names
types (str): string describing data types
description (str): description text
tag (int): some meaningful index? not fleshed out yet
descr_only (bool): only description is useful
**kwargs: Description
"""
assert names is not None
if description is None:
description = ""
self.names = names
self.types = types
self.description = description
self.tag = tag
self.descr_only = descr_only
self.annotated = annotated
self.meta = kwargs
class Section(object):
""""""
ALIASES = {}
PARSERS = {}
is_formatted = None
args = None
args_parser = None
args_formatter = None
heading = None
alias = None
_text = None
section_indent = ""
indent = " "
meta = None
formatter_override = None
def __init__(self, heading, text="", indent=None, **kwargs):
"""
Args:
heading (str): heading of the section (should be title case)
text (str, optional): section text
indent (str, optional): used by some formatters
"""
self.heading = heading
self.alias = self.resolve_alias(heading)
if self.alias in self.PARSERS:
parser, formatter = self.PARSERS[self.alias]
self.args_parser = parser
self.args_formatter = formatter
self.is_formatted = True
else:
self.is_formatted = False
if indent is not None:
self.indent = indent
self.text = text
self.meta = kwargs
logger.debug("create section '{}' ({}) with args : '{}'".format(self.heading,
self.alias,
self.args))
@classmethod
def from_section(cls, sec):
new_sec = cls(sec.alias)
new_sec._text = sec._text # pylint: disable=protected-access
# when changing styles, the indentation should change to better fit
# the new style
# new_sec.section_indent = sec.section_indent
# new_sec.indent = sec.indent
if hasattr(sec, "args"):
new_sec.args = sec.args
return new_sec
@classmethod
def resolve_alias(cls, heading):
""""""
titled_heading = heading.title()
try:
return cls.ALIASES[titled_heading]
except KeyError:
return heading
@property
def text(self):
""""""
if self.formatter_override is not None:
s = self.formatter_override(self) # pylint: disable=not-callable
elif self.args_formatter is not None:
s = self.args_formatter(self)
else:
s = self._text
return s
@text.setter
def text(self, val):
""""""
val = strip_newlines(val, ntrailing=1)
if self.args_parser is not None:
self.args = self.args_parser(self, val)
else:
section_indent, self._text = dedent_verbose(val, n=0)
# don't overwrite section indent if val isn't indented
if section_indent:
self.section_indent = section_indent
class NapoleonSection(Section):
""""""
ALIASES = {"Args": "Parameters",
"Arguments": "Parameters",
"Deleted Args": "Deleted Parameters",
"Deleted Arguments": "Deleted Parameters",
"Other Args": "Other Parameters",
"Other Arguments": "Other Parameters",
"Keyword Args": "Keyword Arguments",
"Return": "Returns",
"Yield": "Yields",
"No Longer Returns": "No Longer Returned",
"No Longer Yields": "No Longer Yielded",
"Warnings": "Warning"
}
def is_return_section(self):
return self.heading and self.heading.lower() in ('return', 'returns',
'yield', 'yields')
def param_parser_common(self, text):
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
param_list = []
param_dict = OrderedDict()
text = dedent_docstr(text, 0)
_r = r"^\S[^\r\n]*(?:\n[^\S\n]+\S[^\r\n]*|\n)*"
param_blocks = re.findall(_r, text, re.MULTILINE)
for i, block in enumerate(param_blocks):
param = self.finalize_param(block, len(param_list))
param_list.append(param)
if self.is_return_section():
param.names = [", ".join(param.names)]
param_dict[i] = param
else:
for name in param.names:
param_dict[name] = param
return param_dict
class GoogleSection(NapoleonSection):
""""""
section_indent = " "
indent = " "
@staticmethod
def finalize_param(s, tag):
"""
Args:
s (type): Description
tag (int): index of param? not fleshed out yet
"""
meta = {}
_r = r"([^,\s]+(?:\s*,\s*[^,\s]+)*\s*)(?:\((.*)\))?\s*:\s*(.*)"
m = re.match(_r, s, re.DOTALL | re.MULTILINE)
if m:
names, typ, descr = m.groups()
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(descr, n=1)
descr_only = False
else:
names = ["{0}".format(tag)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=tag, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[GoogleSection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
logger.info("[GoogleSection] section '{}' starts formatting".format(self.alias))
s = ""
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
if len(param.names) > 1:
logger.warn("section '{}' : Google docstrings don't allow > 1 "
"parameter per description".format(self.alias))
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} ({1})".format(p, types)
if param.description:
desc = indent_docstr(param.description,
param.meta.get("indent", self.indent))
p = "{0}: {1}".format(p, desc)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class NumpySection(NapoleonSection):
""""""
indent = " "
@staticmethod
def finalize_param(s, i):
meta = {}
_r = r"\s*([^,\s]+(?:\s*,\s*[^,\s]+)*)\s*(?::\s*(.*?))?[^\S\n]*?\n(\s+.*)"
m = re.match(_r, s, re.DOTALL)
if m:
names, typ, desc = m.groups()
# FIXME hack, name for numpy parameters is always a list of names
# to support the multiple parameters per description option in
# numpy docstrings
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(desc, 0)
descr_only = False
else:
names = ["{0}".format(i)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=i, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[NumpySection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
logger.info("[NumpySection] section '{}' starts formatting".format(self.alias))
s = ""
# already_seen = {}
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} : {1}".format(p, param.types.strip())
p = with_bounding_newlines(p, ntrailing=1)
if param.description:
p += indent_docstr(param.description,
param.meta.get("indent", self.indent),
n=0)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class Docstring(object):
"""Handle parsing / modifying / writing docstrings"""
STYLE_NAME = "none"
SECTION_STYLE = Section
TEMPLATE = OrderedDict([("Summary", None)])
PREFERRED_PARAMS_ALIAS = "Args"
sections = None
trailing_newlines = None
def __init__(self, docstr, template_order=False):
"""
Parameters:
docstr (Docstring or str): some existing docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
"""
if isinstance(docstr, Docstring):
self.sections = docstr.sections
self.trailing_newlines = docstr.trailing_newlines
if not isinstance(docstr, type(self)):
# fixme, this is kinda hacky
make_new_sec = self.SECTION_STYLE.from_section
for sec_name, sec in docstr.sections.items():
# when the section should not exists
# i.e. when a section was generated, but isn't needed anymore
# e.g. when there isn't any exception raised
if sec:
docstr.sections[sec_name] = make_new_sec(sec)
else:
# deleting section that shouldn't be here
# including those generated with template_order=True
del docstr.sections[sec_name]
# ok, this way of changing indentation is a thunder hack
if "Parameters" in docstr.sections:
self.get_section("Parameters").heading = self.PREFERRED_PARAMS_ALIAS
for arg in self.get_section("Parameters").args.values():
arg.meta['indent'] = self.get_section("Parameters").indent
if "Returns" in docstr.sections:
for arg in self.get_section("Returns").args.values():
arg.meta['indent'] = self.get_section("Returns").indent
if "Yields" in docstr.sections:
for arg in self.get_section("Yields").args.values():
arg.meta['indent'] = self.get_section("Yields").indent
elif isinstance(docstr, string_types):
if template_order:
self.sections = self.TEMPLATE.copy()
else:
self.sections = OrderedDict()
self._parse(docstr)
def _parse(self, s):
"""Parse docstring into meta data
Parameters:
s (str): docstring
"""
raise NotImplementedError("_parse is an abstract method")
def format(self, top_indent):
"""Format docstring into a string
Parameters:
top_indent (str): indentation added to all but the first
lines
Returns:
str: properly formatted
"""
raise NotImplementedError("format is an abstract method")
def update_parameters(self, params):
""""""
raise NotImplementedError("update_parameters is an abstract method")
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return"):
""""""
raise NotImplementedError("update_return_type is an abstract method")
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_attributes is an abstract method")
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_exceptions is an abstract method")
def add_dummy_returns(self, name, typ, description):
raise NotImplementedError("add_dummy_returns is an abstract method")
def finalize_section(self, heading, text):
"""
Args:
heading (type): Description
text (type): Description
"""
section = self.SECTION_STYLE(heading, text)
self.sections[section.alias] = section
def get_section(self, section_name):
if section_name in self.sections:
return self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections[alias]
raise KeyError("Section '{0}' not found".format(section_name))
def pop_section(self, section_name):
if section_name in self.sections:
return self.sections.pop(section_name)
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections.pop(alias)
raise KeyError("Section '{0}' not found".format(section_name))
def insert_section(self, section_name, section):
if section.heading != section_name:
section.heading = section_name
self.sections[section_name] = section
def section_exists(self, section_name):
"""returns True iff section exists, and was finalized"""
sec = None
if section_name in self.sections:
sec = self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
sec = self.sections[alias]
if sec is not None:
return True
return False
class NapoleonDocstring(Docstring): # pylint: disable=abstract-method
"""Styles understood by napoleon, aka. Google/Numpy"""
STYLE_NAME = "napoleon"
TEMPLATE = OrderedDict([("Summary", None),
("Parameters", None),
("Keyword Arguments", None),
("Returns", None),
("Yields", None),
("No Longer Returned", None),
("No Longer Yielded", None),
("Other Parameters", None),
("Deleted Parameters", None),
("Attributes", None),
("Deleted Attributes", None),
("Methods", None),
("Raises", None),
("No Longer Raises", None),
("Warns", None),
("See Also", None),
("Warning", None),
("Note", None),
("Notes", None),
("References", None),
("Example", None),
("Examples", None),
])
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip()
def _parse(self, s):
"""
Args:
s (type): Description
"""
logger.info("[NapoleonDocstring] starts parsing text")
self.trailing_newlines = count_trailing_newlines(s)
s = dedent_docstr(s)
sec_starts = [(m.start(), m.end(), m.string[m.start():m.end()])
for m in re.finditer(self.SECTION_RE, s, re.MULTILINE)]
sec_starts.insert(0, (0, 0, "Summary"))
sec_starts.append((len(s), len(s), ""))
for current_sec, next_sec in zip(sec_starts[:-1], sec_starts[1:]):
sec_name = self._extract_section_name(current_sec[2])
sec_body = s[current_sec[1]:next_sec[0]]
self.finalize_section(sec_name, sec_body)
@staticmethod
def _format_section_text(heading, body):
raise NotImplementedError("This is an abstract method")
def format(self, top_indent):
"""
Args:
top_indent (type): Description
"""
logger.info("[NapoleonDocstring] starts formatting")
s = ""
if self.section_exists("Summary"):
sec_text = self.get_section("Summary").text
if sec_text.strip():
s += with_bounding_newlines(sec_text, nleading=0, ntrailing=1)
for _, section in islice(self.sections.items(), 1, None):
if section is None:
continue
sec_body = indent_docstr(section.text, section.section_indent, n=0)
sec_text = self._format_section_text(section.heading, sec_body)
s += with_bounding_newlines(sec_text, nleading=1, ntrailing=1)
if self.trailing_newlines:
s = with_bounding_newlines(s, ntrailing=self.trailing_newlines)
s = indent_docstr(s, top_indent)
return s
def _update_section(self, params, sec_name, sec_alias=None,
del_prefix="Deleted ", alpha_order=False,
other_sections=()):
"""Update section to add / remove params
As a failsafe, params that are removed are placed in a
"Deleted ..." section
Args:
params (OrderedDict): dict of Parameter objects
sec_name (str): generic section name
sec_alias (str): section name that appears in teh docstring
del_prefix (str): prefix for section that holds params that
no longer exist.
alpha_order (bool): whether or not to alphabetically sort
the params
"""
if not sec_alias:
sec_alias = sec_name
if not self.section_exists(sec_name) and len(params) == 0:
return None
elif not self.section_exists(sec_name):
self.finalize_section(sec_alias, "")
# put together which other sections exist so we can use them to
# exclude params that exist in them
_other = []
for _secname in other_sections:
if self.section_exists(_secname):
_other.append(self.get_section(_secname))
other_sections = _other
if alpha_order:
sorted_params = OrderedDict()
for k in sorted(list(params.keys()), key=str.lower):
sorted_params[k] = params[k]
params = sorted_params
current_dict = self.get_section(sec_name).args
# go through params in the order of the function declaration
# and cherry-pick from current_dict if there's already a description
# for that parameter
tags_seen = dict()
new = OrderedDict()
for name, param in params.items():
if name in current_dict:
def_param = param
param = current_dict.pop(name)
if param.tag in tags_seen:
param = None
else:
tags_seen[param.tag] = True
# update the type if annotated
if def_param.annotated:
param.types = def_param.types
else:
# if param is in one of the 'other sections', then don't
# worry about it
for sec in other_sections:
if name in sec.args:
# update the type if the annotated
if param.annotated:
sec.args[name].types = param.types
# now ignore it
param = None
if param:
new[name] = param
# add description only parameters back in
for key, param in current_dict.items():
if param.descr_only:
# param.description = '\n' + param.description
new[key] = current_dict.pop(key)
# not sure when this guy gets created
if '' in current_dict:
del current_dict['']
# go through params that are no linger in the arguments list and
# move them from the Parameters section of the docstring to the
# deleted parameters section
if len(current_dict):
del_sec_name = del_prefix + sec_name
del_sec_alias = del_prefix + sec_alias
logger.warn("killing parameters named: {}".format(current_dict.keys()))
# TODO: put a switch here for other bahavior?
if not self.section_exists(self.SECTION_STYLE.resolve_alias(del_sec_name)):
self.finalize_section(del_sec_name, "")
deled_params = self.get_section(del_sec_name)
deleted_tags = dict()
for key, val in current_dict.items():
if key in deled_params.args:
logger.warn("Stronger Warning: Killing old deleted param: "
"'{0}'".format(key))
val.names.remove(key)
if val.tag in deleted_tags:
deleted_tags[val.tag].names.append(key)
else:
new_val = Parameter([key], val.types, val.description)
deleted_tags[val.tag] = new_val
deled_params.args[key] = new_val
if len(new) == 0:
self.sections[sec_name] = None
else:
self.sections[sec_name].args = new
def update_parameters(self, params):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update parameters")
other_sections = ['Other Parameters', 'Keyword Parameters']
self._update_section(params, "Parameters", self.PREFERRED_PARAMS_ALIAS,
other_sections=other_sections)
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return", del_prefix="No Longer "):
""""""
logger.info("[NapoleonDocstring] update return type")
if keyword == "yield":
sec_name = "Yields"
elif keyword == "return":
sec_name = "Returns"
else:
logger.debug("Unknown return keyword: '{}'".format(keyword))
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
del_sec_name = del_prefix + std_ret_name
del_sec_alias = self.SECTION_STYLE.resolve_alias(del_sec_name)
if not self.section_exists(del_sec_alias):
self.finalize_section(del_sec_alias, "")
del_sec = self.get_section(del_sec_alias)
sec = self.pop_section(std_ret_name)
del_sec.args = sec.args
return
if not self.section_exists(sec_name):
# see if a section exists from another keyword, ie, maybe
# this function used to return, but now it yields
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
# necessary to recreate completly the section
# in order to use the right parser and formatter
logger.debug("old return section exists : '{}'".format(std_ret_name))
old_sec = self.pop_section(std_ret_name)
self.finalize_section(sec_name, "")
new_sec = self.get_section(sec_name)
new_sec.args = old_sec.args
self.insert_section(sec_name, new_sec)
break
if self.section_exists(sec_name):
sec = self.get_section(sec_name)
if sec.args and ret_type:
p0 = next(iter(sec.args.values()))
if p0.descr_only:
p0.description = ret_type
elif p0.types:
p0.types = ret_type
elif p0.names:
p0.names = [ret_type]
elif ret_name or ret_type:
description = default_description
sec.args = OrderedDict()
if ret_name:
sec.args[ret_name] = Parameter([ret_name], ret_type, description)
else:
sec.args[ret_type] = Parameter([ret_type], "", description)
else:
# and i ask myself, how did i get here?
pass
else:
self.finalize_section(sec_name, "")
sec = self.get_section(sec_name)
ret_type = ret_type if ret_type != "" else "${NUMBER:TYPE}"
sec.args = OrderedDict()
sec.args[ret_type] = Parameter([ret_type], "", default_description)
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update attributes")
self._update_section(attribs, "Attributes", alpha_order=alpha_order)
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update exceptions")
self._update_section(attribs, "Raises", del_prefix="No Longer ",
alpha_order=alpha_order)
def add_dummy_returns(self, name, typ, description):
# No longer used??
if not self.section_exists("Returns"):
sec = self.SECTION_STYLE("Returns")
if name:
sec.args = {name: Parameter([name], typ, description)}
else:
sec.args = {typ: Parameter([typ], "", description)}
self.sections["Returns"] = sec
class GoogleDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "google"
SECTION_STYLE = GoogleSection
SECTION_RE = r"^[A-Za-z0-9][A-Za-z0-9 \t]*:\s*$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Args"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip(':').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}:\n{1}".format(heading, body)
class NumpyDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "numpy"
SECTION_STYLE = NumpySection
SECTION_RE = r"^([A-Za-z0-9][A-Za-z0-9 \t]*)\s*\n-+\s*?$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Parameters"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip('-').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}\n{1}\n{2}".format(heading, "-" * len(heading), body)
STYLE_LOOKUP = OrderedDict([('numpy', NumpyDocstring),
('google', GoogleDocstring)])
##
## EOF
##
| 35.641717 | 89 | 0.540895 |
import sys
import re
from textwrap import dedent
from collections import OrderedDict
from itertools import islice
from .autodocstring_logging import logger
PY3k = sys.version_info[0] == 3
if PY3k:
string_types = str,
else:
string_types = basestring,
def make_docstring_obj(docstr, default="google", template_order=False):
typ = detect_style(docstr)
logger.info("[make_docstring_obj] from {} to {}"
"".format(typ.__name__ if typ is not None else None, default.__name__))
if typ is None:
if issubclass(default, Docstring):
typ = default
else:
typ = STYLE_LOOKUP[default.lower()]
return typ(docstr, template_order=template_order)
def detect_style(docstr):
docstr = dedent_docstr(docstr)
for c in STYLE_LOOKUP.values():
if c.detect_style(docstr):
return c
return None
def dedent_docstr(s, n=1):
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
def dedent_verbose(s, n=1):
new = dedent_docstr(s, n=n)
s_split = s.splitlines(keepends=True)
new_split = new.splitlines(keepends=True)
i, ind = 0, -1
for i in range(n, len(s_split)):
if s_split[i].strip():
ind = s_split[i].find(new_split[i])
break
if ind >= 0:
indent = s_split[i][:ind]
else:
indent = ""
return indent, new
def indent_docstr(s, indent, n=1, trim=True):
lines = s.splitlines(keepends=True)
for i in range(n, len(lines)):
if lines[i].strip() or not trim:
lines[i] = "{0}{1}".format(indent, lines[i])
else:
lines[i] = lines[i].strip(' \t')
return "".join(lines)
def count_leading_newlines(s):
return s[:-len(s.lstrip())].count('\n')
def count_trailing_newlines(s):
return s[len(s.rstrip()):].count('\n')
def with_bounding_newlines(s, nleading=0, ntrailing=0, nl='\n'):
return "{0}{1}{2}".format(nl * (nleading - count_leading_newlines(s)),
s,
nl * (ntrailing - count_trailing_newlines(s)))
def strip_newlines(s, nleading=0, ntrailing=0):
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
class Parameter(object):
names = None
types = None
description = None
tag = None
descr_only = None
meta = None
def __init__(self, names, types, description, tag=None, descr_only=False,
annotated=False, **kwargs):
assert names is not None
if description is None:
description = ""
self.names = names
self.types = types
self.description = description
self.tag = tag
self.descr_only = descr_only
self.annotated = annotated
self.meta = kwargs
class Section(object):
ALIASES = {}
PARSERS = {}
is_formatted = None
args = None
args_parser = None
args_formatter = None
heading = None
alias = None
_text = None
section_indent = ""
indent = " "
meta = None
formatter_override = None
def __init__(self, heading, text="", indent=None, **kwargs):
self.heading = heading
self.alias = self.resolve_alias(heading)
if self.alias in self.PARSERS:
parser, formatter = self.PARSERS[self.alias]
self.args_parser = parser
self.args_formatter = formatter
self.is_formatted = True
else:
self.is_formatted = False
if indent is not None:
self.indent = indent
self.text = text
self.meta = kwargs
logger.debug("create section '{}' ({}) with args : '{}'".format(self.heading,
self.alias,
self.args))
@classmethod
def from_section(cls, sec):
new_sec = cls(sec.alias)
new_sec._text = sec._text
if hasattr(sec, "args"):
new_sec.args = sec.args
return new_sec
@classmethod
def resolve_alias(cls, heading):
titled_heading = heading.title()
try:
return cls.ALIASES[titled_heading]
except KeyError:
return heading
@property
def text(self):
if self.formatter_override is not None:
s = self.formatter_override(self)
elif self.args_formatter is not None:
s = self.args_formatter(self)
else:
s = self._text
return s
@text.setter
def text(self, val):
val = strip_newlines(val, ntrailing=1)
if self.args_parser is not None:
self.args = self.args_parser(self, val)
else:
section_indent, self._text = dedent_verbose(val, n=0)
if section_indent:
self.section_indent = section_indent
class NapoleonSection(Section):
ALIASES = {"Args": "Parameters",
"Arguments": "Parameters",
"Deleted Args": "Deleted Parameters",
"Deleted Arguments": "Deleted Parameters",
"Other Args": "Other Parameters",
"Other Arguments": "Other Parameters",
"Keyword Args": "Keyword Arguments",
"Return": "Returns",
"Yield": "Yields",
"No Longer Returns": "No Longer Returned",
"No Longer Yields": "No Longer Yielded",
"Warnings": "Warning"
}
def is_return_section(self):
return self.heading and self.heading.lower() in ('return', 'returns',
'yield', 'yields')
def param_parser_common(self, text):
param_list = []
param_dict = OrderedDict()
text = dedent_docstr(text, 0)
_r = r"^\S[^\r\n]*(?:\n[^\S\n]+\S[^\r\n]*|\n)*"
param_blocks = re.findall(_r, text, re.MULTILINE)
for i, block in enumerate(param_blocks):
param = self.finalize_param(block, len(param_list))
param_list.append(param)
if self.is_return_section():
param.names = [", ".join(param.names)]
param_dict[i] = param
else:
for name in param.names:
param_dict[name] = param
return param_dict
class GoogleSection(NapoleonSection):
section_indent = " "
indent = " "
@staticmethod
def finalize_param(s, tag):
meta = {}
_r = r"([^,\s]+(?:\s*,\s*[^,\s]+)*\s*)(?:\((.*)\))?\s*:\s*(.*)"
m = re.match(_r, s, re.DOTALL | re.MULTILINE)
if m:
names, typ, descr = m.groups()
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(descr, n=1)
descr_only = False
else:
names = ["{0}".format(tag)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=tag, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[GoogleSection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
logger.info("[GoogleSection] section '{}' starts formatting".format(self.alias))
s = ""
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
if len(param.names) > 1:
logger.warn("section '{}' : Google docstrings don't allow > 1 "
"parameter per description".format(self.alias))
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} ({1})".format(p, types)
if param.description:
desc = indent_docstr(param.description,
param.meta.get("indent", self.indent))
p = "{0}: {1}".format(p, desc)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class NumpySection(NapoleonSection):
indent = " "
@staticmethod
def finalize_param(s, i):
meta = {}
_r = r"\s*([^,\s]+(?:\s*,\s*[^,\s]+)*)\s*(?::\s*(.*?))?[^\S\n]*?\n(\s+.*)"
m = re.match(_r, s, re.DOTALL)
if m:
names, typ, desc = m.groups()
# FIXME hack, name for numpy parameters is always a list of names
# to support the multiple parameters per description option in
# numpy docstrings
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(desc, 0)
descr_only = False
else:
names = ["{0}".format(i)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=i, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[NumpySection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
logger.info("[NumpySection] section '{}' starts formatting".format(self.alias))
s = ""
# already_seen = {}
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} : {1}".format(p, param.types.strip())
p = with_bounding_newlines(p, ntrailing=1)
if param.description:
p += indent_docstr(param.description,
param.meta.get("indent", self.indent),
n=0)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class Docstring(object):
STYLE_NAME = "none"
SECTION_STYLE = Section
TEMPLATE = OrderedDict([("Summary", None)])
PREFERRED_PARAMS_ALIAS = "Args"
sections = None
trailing_newlines = None
def __init__(self, docstr, template_order=False):
if isinstance(docstr, Docstring):
self.sections = docstr.sections
self.trailing_newlines = docstr.trailing_newlines
if not isinstance(docstr, type(self)):
# fixme, this is kinda hacky
make_new_sec = self.SECTION_STYLE.from_section
for sec_name, sec in docstr.sections.items():
# when the section should not exists
# i.e. when a section was generated, but isn't needed anymore
if sec:
docstr.sections[sec_name] = make_new_sec(sec)
else:
# deleting section that shouldn't be here
del docstr.sections[sec_name]
if "Parameters" in docstr.sections:
self.get_section("Parameters").heading = self.PREFERRED_PARAMS_ALIAS
for arg in self.get_section("Parameters").args.values():
arg.meta['indent'] = self.get_section("Parameters").indent
if "Returns" in docstr.sections:
for arg in self.get_section("Returns").args.values():
arg.meta['indent'] = self.get_section("Returns").indent
if "Yields" in docstr.sections:
for arg in self.get_section("Yields").args.values():
arg.meta['indent'] = self.get_section("Yields").indent
elif isinstance(docstr, string_types):
if template_order:
self.sections = self.TEMPLATE.copy()
else:
self.sections = OrderedDict()
self._parse(docstr)
def _parse(self, s):
raise NotImplementedError("_parse is an abstract method")
def format(self, top_indent):
raise NotImplementedError("format is an abstract method")
def update_parameters(self, params):
raise NotImplementedError("update_parameters is an abstract method")
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return"):
raise NotImplementedError("update_return_type is an abstract method")
def update_attributes(self, attribs, alpha_order=True):
raise NotImplementedError("update_attributes is an abstract method")
def update_exceptions(self, attribs, alpha_order=True):
raise NotImplementedError("update_exceptions is an abstract method")
def add_dummy_returns(self, name, typ, description):
raise NotImplementedError("add_dummy_returns is an abstract method")
def finalize_section(self, heading, text):
section = self.SECTION_STYLE(heading, text)
self.sections[section.alias] = section
def get_section(self, section_name):
if section_name in self.sections:
return self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections[alias]
raise KeyError("Section '{0}' not found".format(section_name))
def pop_section(self, section_name):
if section_name in self.sections:
return self.sections.pop(section_name)
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections.pop(alias)
raise KeyError("Section '{0}' not found".format(section_name))
def insert_section(self, section_name, section):
if section.heading != section_name:
section.heading = section_name
self.sections[section_name] = section
def section_exists(self, section_name):
sec = None
if section_name in self.sections:
sec = self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
sec = self.sections[alias]
if sec is not None:
return True
return False
class NapoleonDocstring(Docstring):
STYLE_NAME = "napoleon"
TEMPLATE = OrderedDict([("Summary", None),
("Parameters", None),
("Keyword Arguments", None),
("Returns", None),
("Yields", None),
("No Longer Returned", None),
("No Longer Yielded", None),
("Other Parameters", None),
("Deleted Parameters", None),
("Attributes", None),
("Deleted Attributes", None),
("Methods", None),
("Raises", None),
("No Longer Raises", None),
("Warns", None),
("See Also", None),
("Warning", None),
("Note", None),
("Notes", None),
("References", None),
("Example", None),
("Examples", None),
])
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip()
def _parse(self, s):
logger.info("[NapoleonDocstring] starts parsing text")
self.trailing_newlines = count_trailing_newlines(s)
s = dedent_docstr(s)
sec_starts = [(m.start(), m.end(), m.string[m.start():m.end()])
for m in re.finditer(self.SECTION_RE, s, re.MULTILINE)]
sec_starts.insert(0, (0, 0, "Summary"))
sec_starts.append((len(s), len(s), ""))
for current_sec, next_sec in zip(sec_starts[:-1], sec_starts[1:]):
sec_name = self._extract_section_name(current_sec[2])
sec_body = s[current_sec[1]:next_sec[0]]
self.finalize_section(sec_name, sec_body)
@staticmethod
def _format_section_text(heading, body):
raise NotImplementedError("This is an abstract method")
def format(self, top_indent):
logger.info("[NapoleonDocstring] starts formatting")
s = ""
if self.section_exists("Summary"):
sec_text = self.get_section("Summary").text
if sec_text.strip():
s += with_bounding_newlines(sec_text, nleading=0, ntrailing=1)
for _, section in islice(self.sections.items(), 1, None):
if section is None:
continue
sec_body = indent_docstr(section.text, section.section_indent, n=0)
sec_text = self._format_section_text(section.heading, sec_body)
s += with_bounding_newlines(sec_text, nleading=1, ntrailing=1)
if self.trailing_newlines:
s = with_bounding_newlines(s, ntrailing=self.trailing_newlines)
s = indent_docstr(s, top_indent)
return s
def _update_section(self, params, sec_name, sec_alias=None,
del_prefix="Deleted ", alpha_order=False,
other_sections=()):
if not sec_alias:
sec_alias = sec_name
if not self.section_exists(sec_name) and len(params) == 0:
return None
elif not self.section_exists(sec_name):
self.finalize_section(sec_alias, "")
_other = []
for _secname in other_sections:
if self.section_exists(_secname):
_other.append(self.get_section(_secname))
other_sections = _other
if alpha_order:
sorted_params = OrderedDict()
for k in sorted(list(params.keys()), key=str.lower):
sorted_params[k] = params[k]
params = sorted_params
current_dict = self.get_section(sec_name).args
# for that parameter
tags_seen = dict()
new = OrderedDict()
for name, param in params.items():
if name in current_dict:
def_param = param
param = current_dict.pop(name)
if param.tag in tags_seen:
param = None
else:
tags_seen[param.tag] = True
# update the type if annotated
if def_param.annotated:
param.types = def_param.types
else:
# if param is in one of the 'other sections', then don't
for sec in other_sections:
if name in sec.args:
if param.annotated:
sec.args[name].types = param.types
param = None
if param:
new[name] = param
for key, param in current_dict.items():
if param.descr_only:
new[key] = current_dict.pop(key)
if '' in current_dict:
del current_dict['']
if len(current_dict):
del_sec_name = del_prefix + sec_name
del_sec_alias = del_prefix + sec_alias
logger.warn("killing parameters named: {}".format(current_dict.keys()))
if not self.section_exists(self.SECTION_STYLE.resolve_alias(del_sec_name)):
self.finalize_section(del_sec_name, "")
deled_params = self.get_section(del_sec_name)
deleted_tags = dict()
for key, val in current_dict.items():
if key in deled_params.args:
logger.warn("Stronger Warning: Killing old deleted param: "
"'{0}'".format(key))
val.names.remove(key)
if val.tag in deleted_tags:
deleted_tags[val.tag].names.append(key)
else:
new_val = Parameter([key], val.types, val.description)
deleted_tags[val.tag] = new_val
deled_params.args[key] = new_val
if len(new) == 0:
self.sections[sec_name] = None
else:
self.sections[sec_name].args = new
def update_parameters(self, params):
logger.info("[NapoleonDocstring] update parameters")
other_sections = ['Other Parameters', 'Keyword Parameters']
self._update_section(params, "Parameters", self.PREFERRED_PARAMS_ALIAS,
other_sections=other_sections)
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return", del_prefix="No Longer "):
logger.info("[NapoleonDocstring] update return type")
if keyword == "yield":
sec_name = "Yields"
elif keyword == "return":
sec_name = "Returns"
else:
logger.debug("Unknown return keyword: '{}'".format(keyword))
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
del_sec_name = del_prefix + std_ret_name
del_sec_alias = self.SECTION_STYLE.resolve_alias(del_sec_name)
if not self.section_exists(del_sec_alias):
self.finalize_section(del_sec_alias, "")
del_sec = self.get_section(del_sec_alias)
sec = self.pop_section(std_ret_name)
del_sec.args = sec.args
return
if not self.section_exists(sec_name):
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
logger.debug("old return section exists : '{}'".format(std_ret_name))
old_sec = self.pop_section(std_ret_name)
self.finalize_section(sec_name, "")
new_sec = self.get_section(sec_name)
new_sec.args = old_sec.args
self.insert_section(sec_name, new_sec)
break
if self.section_exists(sec_name):
sec = self.get_section(sec_name)
if sec.args and ret_type:
p0 = next(iter(sec.args.values()))
if p0.descr_only:
p0.description = ret_type
elif p0.types:
p0.types = ret_type
elif p0.names:
p0.names = [ret_type]
elif ret_name or ret_type:
description = default_description
sec.args = OrderedDict()
if ret_name:
sec.args[ret_name] = Parameter([ret_name], ret_type, description)
else:
sec.args[ret_type] = Parameter([ret_type], "", description)
else:
pass
else:
self.finalize_section(sec_name, "")
sec = self.get_section(sec_name)
ret_type = ret_type if ret_type != "" else "${NUMBER:TYPE}"
sec.args = OrderedDict()
sec.args[ret_type] = Parameter([ret_type], "", default_description)
def update_attributes(self, attribs, alpha_order=True):
logger.info("[NapoleonDocstring] update attributes")
self._update_section(attribs, "Attributes", alpha_order=alpha_order)
def update_exceptions(self, attribs, alpha_order=True):
logger.info("[NapoleonDocstring] update exceptions")
self._update_section(attribs, "Raises", del_prefix="No Longer ",
alpha_order=alpha_order)
def add_dummy_returns(self, name, typ, description):
if not self.section_exists("Returns"):
sec = self.SECTION_STYLE("Returns")
if name:
sec.args = {name: Parameter([name], typ, description)}
else:
sec.args = {typ: Parameter([typ], "", description)}
self.sections["Returns"] = sec
class GoogleDocstring(NapoleonDocstring):
STYLE_NAME = "google"
SECTION_STYLE = GoogleSection
SECTION_RE = r"^[A-Za-z0-9][A-Za-z0-9 \t]*:\s*$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Args"
@classmethod
def detect_style(cls, docstr):
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip(':').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}:\n{1}".format(heading, body)
class NumpyDocstring(NapoleonDocstring):
STYLE_NAME = "numpy"
SECTION_STYLE = NumpySection
SECTION_RE = r"^([A-Za-z0-9][A-Za-z0-9 \t]*)\s*\n-+\s*?$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Parameters"
@classmethod
def detect_style(cls, docstr):
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip('-').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}\n{1}\n{2}".format(heading, "-" * len(heading), body)
STYLE_LOOKUP = OrderedDict([('numpy', NumpyDocstring),
('google', GoogleDocstring)])
| true | true |
790167157a9a024c8a75e10f3de03b5d641fbd82 | 812 | py | Python | espaloma/nn/readout/node_typing.py | cschlick/espaloma | cae5664446d0c89025de5eb827f507d8af64e2d4 | [
"MIT"
] | 60 | 2020-05-15T13:21:55.000Z | 2022-03-29T17:53:17.000Z | espaloma/nn/readout/node_typing.py | cschlick/espaloma | cae5664446d0c89025de5eb827f507d8af64e2d4 | [
"MIT"
] | 72 | 2020-04-16T18:49:51.000Z | 2022-03-25T14:24:52.000Z | espaloma/nn/readout/node_typing.py | cschlick/espaloma | cae5664446d0c89025de5eb827f507d8af64e2d4 | [
"MIT"
] | 5 | 2020-11-13T19:24:09.000Z | 2022-01-19T20:49:08.000Z | # =============================================================================
# IMPORTS
# =============================================================================
import torch
from espaloma.nn.readout.base_readout import BaseReadout
# =============================================================================
# MODULE CLASSES
# =============================================================================
class NodeTyping(BaseReadout):
"""Simple typing on homograph."""
def __init__(self, in_features, n_classes=100):
super(NodeTyping, self).__init__()
self.c = torch.nn.Linear(in_features, n_classes)
def forward(self, g):
g.apply_nodes(
ntype="n1",
func=lambda node: {"nn_typing": self.c(node.data["h"])},
)
return g
| 32.48 | 79 | 0.383005 |
import torch
from espaloma.nn.readout.base_readout import BaseReadout
class NodeTyping(BaseReadout):
def __init__(self, in_features, n_classes=100):
super(NodeTyping, self).__init__()
self.c = torch.nn.Linear(in_features, n_classes)
def forward(self, g):
g.apply_nodes(
ntype="n1",
func=lambda node: {"nn_typing": self.c(node.data["h"])},
)
return g
| true | true |
790167ae4a2f2af95da14362ccfeb3293564046d | 373 | py | Python | Lecture_notes/数据提取与验证码的识别(上)/code/jsonpath_test.py | littleturings/2021PythonWebCrawler | a9089a912affce4369cf50df3c22c55eb4ebf2d5 | [
"MIT"
] | 1 | 2021-02-03T08:28:16.000Z | 2021-02-03T08:28:16.000Z | Lecture_notes/数据提取与验证码的识别(上)/code/jsonpath_test.py | littleturings/2021PythonWebCrawler | a9089a912affce4369cf50df3c22c55eb4ebf2d5 | [
"MIT"
] | null | null | null | Lecture_notes/数据提取与验证码的识别(上)/code/jsonpath_test.py | littleturings/2021PythonWebCrawler | a9089a912affce4369cf50df3c22c55eb4ebf2d5 | [
"MIT"
] | null | null | null | from fake_useragent import UserAgent
import requests
from jsonpath import jsonpath
url = "https://www.lagou.com/lbs/getAllCitySearchLabels.json"
headers = {"User-Agent": UserAgent().chrome}
resp = requests.get(url, headers=headers)
ids = jsonpath(resp.json(), "$..id")
names = jsonpath(resp.json(), "$..name")
for id, name in zip(ids, names):
print(id, ":", name)
| 23.3125 | 61 | 0.705094 | from fake_useragent import UserAgent
import requests
from jsonpath import jsonpath
url = "https://www.lagou.com/lbs/getAllCitySearchLabels.json"
headers = {"User-Agent": UserAgent().chrome}
resp = requests.get(url, headers=headers)
ids = jsonpath(resp.json(), "$..id")
names = jsonpath(resp.json(), "$..name")
for id, name in zip(ids, names):
print(id, ":", name)
| true | true |
79016946767147d0fbaeddece8c5f2511d1e6b1d | 178 | py | Python | floris/tools/optimization/scipy/__init__.py | eirikur16/flrs | c98604593753def05086b54ce82f5551f01d2529 | [
"Apache-2.0"
] | 91 | 2019-06-04T08:56:29.000Z | 2022-03-13T17:39:22.000Z | floris/tools/optimization/scipy/__init__.py | eirikur16/flrs | c98604593753def05086b54ce82f5551f01d2529 | [
"Apache-2.0"
] | 224 | 2019-04-08T22:03:45.000Z | 2022-03-31T17:56:09.000Z | floris/tools/optimization/scipy/__init__.py | eirikur16/flrs | c98604593753def05086b54ce82f5551f01d2529 | [
"Apache-2.0"
] | 97 | 2019-04-23T20:48:20.000Z | 2022-03-29T08:17:02.000Z | from . import (
yaw,
layout,
base_COE,
optimization,
layout_height,
power_density,
yaw_wind_rose,
power_density_1D,
yaw_wind_rose_parallel,
)
| 14.833333 | 27 | 0.651685 | from . import (
yaw,
layout,
base_COE,
optimization,
layout_height,
power_density,
yaw_wind_rose,
power_density_1D,
yaw_wind_rose_parallel,
)
| true | true |
79016a2841defa9016d34e37abaa9b4dd8225e46 | 5,881 | py | Python | config.py | sdtaylor/scheduleCrossRef | d948308ed9e3eaf2ae4ecefab61d41754a435e90 | [
"MIT"
] | null | null | null | config.py | sdtaylor/scheduleCrossRef | d948308ed9e3eaf2ae4ecefab61d41754a435e90 | [
"MIT"
] | 8 | 2016-08-11T15:03:26.000Z | 2017-04-14T12:51:41.000Z | config.py | sdtaylor/scheduleCrossRef | d948308ed9e3eaf2ae4ecefab61d41754a435e90 | [
"MIT"
] | null | null | null | #The term schedule that gets displayed. Can do multiple terms in the case of displaying
#summer and fall at the same time. ie termNames ['2201','2208']
termNames=['2218']
majorTemplate='in/majorPage.html.mako'
#Add new majors here.
#Name: short name for the major
#classFile: the csv file containing all the classes in this majors curriculum
#asof: the date that the major curriculum was aquired
majors=[
{'name': 'SNRE', 'classFile': 'majorClassLists/SNREList.csv', 'asof': 'Oct 10,2015'},
{'name': 'WEC', 'classFile': 'majorClassLists/WECList.csv', 'asof': 'Oct 10,2015'}
]
#Add new semesters here.
#Name: The term code, see below.
#prettyName: the more comprehendable name. eg. Fall 2015
#termSchedule: the filename for the downloaded csv file for the schedule. All should be semesterData/YYYYXX.csv
#
#The new API started being the sole source in spring 2020. With that term codes are:
# CYYM, where C = 2, YY = the last 2 digits of the year, and M is 8 or 1 for fall or spring
#
#TODO: New codes for Summer. Its special since it has several mini-terms.
terms=[
{'name' :'2218', 'prettyName':'Fall 2021', 'termSchedule': 'semesterData/fall2021.csv'},
{'name' :'2211', 'prettyName':'Spring 2021', 'termSchedule': 'semesterData/spring2021.csv'},
{'name' :'2208', 'prettyName':'Fall 2020', 'termSchedule': 'semesterData/fall2020.csv'},
{'name' :'2201', 'prettyName':'Spring 2020', 'termSchedule': 'semesterData/spring2020.csv'},
{'name' :'201908', 'prettyName':'Fall 2019', 'termSchedule': 'semesterData/201908.csv'},
{'name' :'201906', 'prettyName':'Summer 2019', 'termSchedule': 'semesterData/201906.csv'},
{'name' :'201901', 'prettyName':'Spring 2019', 'termSchedule': 'semesterData/201901.csv'},
{'name' :'201808', 'prettyName':'Fall 2018', 'termSchedule': 'semesterData/201808.csv'},
{'name' :'201806', 'prettyName':'Summer 2018', 'termSchedule': 'semesterData/201806.csv'},
{'name' :'201801', 'prettyName':'Spring 2018', 'termSchedule': 'semesterData/201801.csv'},
{'name' :'201708', 'prettyName':'Fall 2017', 'termSchedule': 'semesterData/201708.csv'},
{'name' :'201706', 'prettyName':'Summer 2017', 'termSchedule': 'semesterData/201706.csv'},
{'name' :'201701', 'prettyName':'Spring 2017', 'termSchedule': 'semesterData/201701.csv'},
{'name' :'201608', 'prettyName':'Fall 2016', 'termSchedule': 'semesterData/201608.csv'},
{'name' :'201606', 'prettyName':'Summer 2016', 'termSchedule': 'semesterData/201606.csv'},
{'name' :'201601', 'prettyName':'Spring 2016', 'termSchedule': 'semesterData/201601.csv'},
{'name' :'201508', 'prettyName':'Fall 2015', 'termSchedule': 'semesterData/201508.csv'},
{'name' :'201506', 'prettyName':'Summer 2015', 'termSchedule': 'semesterData/201506.csv'},
{'name' :'201501', 'prettyName':'Spring 2015', 'termSchedule': 'semesterData/201501.csv'},
{'name' :'201408', 'prettyName':'Fall 2014', 'termSchedule': 'semesterData/201408.csv'},
{'name' :'201406', 'prettyName':'Summer 2014', 'termSchedule': 'semesterData/201406.csv'},
{'name' :'201401', 'prettyName':'Spring 2014', 'termSchedule': 'semesterData/201401.csv'},
{'name' :'201308', 'prettyName':'Fall 2013', 'termSchedule': 'semesterData/201308.csv'},
{'name' :'201301', 'prettyName':'Spring 2013', 'termSchedule': 'semesterData/201301.csv'},
{'name' :'201208', 'prettyName':'Fall 2012', 'termSchedule': 'semesterData/201208.csv'},
{'name' :'201201', 'prettyName':'Spring 2012', 'termSchedule': 'semesterData/201201.csv'},
{'name' :'201108', 'prettyName':'Fall 2011', 'termSchedule': 'semesterData/201108.csv'},
{'name' :'201101', 'prettyName':'Spring 2011', 'termSchedule': 'semesterData/201101.csv'},
{'name' :'201008', 'prettyName':'Fall 2010', 'termSchedule': 'semesterData/201008.csv'}
]
#To deal with 100's of special topic classes that may or may not be on the curriculum (and if not, still deserve
#to be considered), show *all* special topcis classes from a few relevant departments
relevantDepts=['BOT','ZOO','FAS','WIS','FOR','GEO','ENV']
#Exclude any classes with these titles. Designed for research credits which I don't need to have on the site
classTitleExclusions=['SUPERVISED RESEARCH','MASTERS RESEARCH','DOCTORAL RESEARCH','ADVANCED RESEARCH',
'SUPERVISED TEACHING','INDIVIDUAL WORK','INDIVIDUAL STUDIES','SPECIAL TOPICS']
#Every dept has 'Special Topic' codes that are not necessarily in the curriculum.
#Since they all share the same course codes with things thare *are* in the curriclum,
#all special topics are included.
#This list is to go and find them and mark them "special topics" to indicate the class might
#need prior approval.
#Theres probably a better way to account for these. maybe scrape the grad catalog website
specialTopicClasses=['ZOO6927',
'WIS6934',
'SWS6932',
'ALS5932',
'AOM6932',
'AEC6932',
'STA6934',
'ANS6932',
'ENY6932',
'NEM6932',
'AEB6933',
'ABE6933',
'PHC6937',
'LAS6938',
'GEO6938',
'HOS6932',
'MCB6937',
'PBC6937',
'FAS6932',
'AGR6932',
'BOT6935',
'ANG6930',
'ENV6935',
'ENV6932',
'FOR6934',
'MAT6932',
'LAW6930',
'SYA7933',
'GEB6930',
'AFS6905',
'VME6934'
]
| 57.097087 | 112 | 0.60857 |
termNames=['2218']
majorTemplate='in/majorPage.html.mako'
majors=[
{'name': 'SNRE', 'classFile': 'majorClassLists/SNREList.csv', 'asof': 'Oct 10,2015'},
{'name': 'WEC', 'classFile': 'majorClassLists/WECList.csv', 'asof': 'Oct 10,2015'}
]
terms=[
{'name' :'2218', 'prettyName':'Fall 2021', 'termSchedule': 'semesterData/fall2021.csv'},
{'name' :'2211', 'prettyName':'Spring 2021', 'termSchedule': 'semesterData/spring2021.csv'},
{'name' :'2208', 'prettyName':'Fall 2020', 'termSchedule': 'semesterData/fall2020.csv'},
{'name' :'2201', 'prettyName':'Spring 2020', 'termSchedule': 'semesterData/spring2020.csv'},
{'name' :'201908', 'prettyName':'Fall 2019', 'termSchedule': 'semesterData/201908.csv'},
{'name' :'201906', 'prettyName':'Summer 2019', 'termSchedule': 'semesterData/201906.csv'},
{'name' :'201901', 'prettyName':'Spring 2019', 'termSchedule': 'semesterData/201901.csv'},
{'name' :'201808', 'prettyName':'Fall 2018', 'termSchedule': 'semesterData/201808.csv'},
{'name' :'201806', 'prettyName':'Summer 2018', 'termSchedule': 'semesterData/201806.csv'},
{'name' :'201801', 'prettyName':'Spring 2018', 'termSchedule': 'semesterData/201801.csv'},
{'name' :'201708', 'prettyName':'Fall 2017', 'termSchedule': 'semesterData/201708.csv'},
{'name' :'201706', 'prettyName':'Summer 2017', 'termSchedule': 'semesterData/201706.csv'},
{'name' :'201701', 'prettyName':'Spring 2017', 'termSchedule': 'semesterData/201701.csv'},
{'name' :'201608', 'prettyName':'Fall 2016', 'termSchedule': 'semesterData/201608.csv'},
{'name' :'201606', 'prettyName':'Summer 2016', 'termSchedule': 'semesterData/201606.csv'},
{'name' :'201601', 'prettyName':'Spring 2016', 'termSchedule': 'semesterData/201601.csv'},
{'name' :'201508', 'prettyName':'Fall 2015', 'termSchedule': 'semesterData/201508.csv'},
{'name' :'201506', 'prettyName':'Summer 2015', 'termSchedule': 'semesterData/201506.csv'},
{'name' :'201501', 'prettyName':'Spring 2015', 'termSchedule': 'semesterData/201501.csv'},
{'name' :'201408', 'prettyName':'Fall 2014', 'termSchedule': 'semesterData/201408.csv'},
{'name' :'201406', 'prettyName':'Summer 2014', 'termSchedule': 'semesterData/201406.csv'},
{'name' :'201401', 'prettyName':'Spring 2014', 'termSchedule': 'semesterData/201401.csv'},
{'name' :'201308', 'prettyName':'Fall 2013', 'termSchedule': 'semesterData/201308.csv'},
{'name' :'201301', 'prettyName':'Spring 2013', 'termSchedule': 'semesterData/201301.csv'},
{'name' :'201208', 'prettyName':'Fall 2012', 'termSchedule': 'semesterData/201208.csv'},
{'name' :'201201', 'prettyName':'Spring 2012', 'termSchedule': 'semesterData/201201.csv'},
{'name' :'201108', 'prettyName':'Fall 2011', 'termSchedule': 'semesterData/201108.csv'},
{'name' :'201101', 'prettyName':'Spring 2011', 'termSchedule': 'semesterData/201101.csv'},
{'name' :'201008', 'prettyName':'Fall 2010', 'termSchedule': 'semesterData/201008.csv'}
]
#to be considered), show *all* special topcis classes from a few relevant departments
relevantDepts=['BOT','ZOO','FAS','WIS','FOR','GEO','ENV']
#Exclude any classes with these titles. Designed for research credits which I don't need to have on the site
classTitleExclusions=['SUPERVISED RESEARCH','MASTERS RESEARCH','DOCTORAL RESEARCH','ADVANCED RESEARCH',
'SUPERVISED TEACHING','INDIVIDUAL WORK','INDIVIDUAL STUDIES','SPECIAL TOPICS']
specialTopicClasses=['ZOO6927',
'WIS6934',
'SWS6932',
'ALS5932',
'AOM6932',
'AEC6932',
'STA6934',
'ANS6932',
'ENY6932',
'NEM6932',
'AEB6933',
'ABE6933',
'PHC6937',
'LAS6938',
'GEO6938',
'HOS6932',
'MCB6937',
'PBC6937',
'FAS6932',
'AGR6932',
'BOT6935',
'ANG6930',
'ENV6935',
'ENV6932',
'FOR6934',
'MAT6932',
'LAW6930',
'SYA7933',
'GEB6930',
'AFS6905',
'VME6934'
]
| true | true |
79016add5752b9c41913c61c3c313c0dbeeea4a9 | 1,240 | py | Python | src/emmental/utils/seed.py | KeAWang/emmental | dae9f9fbba944f7c8404ab85aa9296545db1b82b | [
"MIT"
] | null | null | null | src/emmental/utils/seed.py | KeAWang/emmental | dae9f9fbba944f7c8404ab85aa9296545db1b82b | [
"MIT"
] | null | null | null | src/emmental/utils/seed.py | KeAWang/emmental | dae9f9fbba944f7c8404ab85aa9296545db1b82b | [
"MIT"
] | null | null | null | # Copyright (c) 2021 Sen Wu. All Rights Reserved.
"""Helper function to set random seed for reproducibility of models."""
import logging
import random
from typing import Optional
import numpy as np
import torch
logger = logging.getLogger(__name__)
def set_random_seed(seed: Optional[int] = None) -> None:
"""Set random seed for random, numpy, and pytorch.
Args:
seed: The random seed, defaults to `None` which select it randomly.
"""
max_value = np.iinfo(np.uint32).max
min_value = np.iinfo(np.uint32).min
try:
seed = int(seed)
logger.info(f"Set random seed to {seed}.")
except (TypeError, ValueError):
seed = random.randint(min_value, max_value)
logger.info(f"No random seed specified, randomly set random seed to {seed}.")
if not (min_value <= seed <= max_value):
new_seed = random.randint(min_value, max_value)
logger.info(
f"Random seed {seed} is not valid, randomly set random seed to {new_seed}."
)
seed = new_seed
# Set random seed for random
random.seed(seed)
# Set random seed for all numpy operations
np.random.seed(seed=seed)
# Set random seed for PyTorch
torch.manual_seed(seed)
| 27.555556 | 87 | 0.664516 |
import logging
import random
from typing import Optional
import numpy as np
import torch
logger = logging.getLogger(__name__)
def set_random_seed(seed: Optional[int] = None) -> None:
max_value = np.iinfo(np.uint32).max
min_value = np.iinfo(np.uint32).min
try:
seed = int(seed)
logger.info(f"Set random seed to {seed}.")
except (TypeError, ValueError):
seed = random.randint(min_value, max_value)
logger.info(f"No random seed specified, randomly set random seed to {seed}.")
if not (min_value <= seed <= max_value):
new_seed = random.randint(min_value, max_value)
logger.info(
f"Random seed {seed} is not valid, randomly set random seed to {new_seed}."
)
seed = new_seed
random.seed(seed)
np.random.seed(seed=seed)
torch.manual_seed(seed)
| true | true |
79016b82d35531de3a104ba9fd851f24783eea7a | 123 | py | Python | spotdl/command_line/exitcodes.py | khjxiaogu/spotify-downloader | a8dcb8d998da0769bbe210f2808d16b346453c23 | [
"MIT"
] | 4,698 | 2017-06-20T22:37:10.000Z | 2022-03-28T13:38:07.000Z | spotdl/command_line/exitcodes.py | Delgan/spotify-downloader | 8adf3e8d6b98269b1538dd91c9a44ed345c77545 | [
"MIT"
] | 690 | 2017-06-20T20:08:42.000Z | 2022-02-26T23:36:07.000Z | spotdl/command_line/exitcodes.py | Delgan/spotify-downloader | 8adf3e8d6b98269b1538dd91c9a44ed345c77545 | [
"MIT"
] | 741 | 2017-06-21T23:32:51.000Z | 2022-03-07T12:11:54.000Z | KEYBOARD_INTERRUPT = 1
ARGUMENT_ERROR = 2
# When playlists, albums, artists, users aren't found.
URI_NOT_FOUND_ERROR = 5
| 17.571429 | 54 | 0.772358 | KEYBOARD_INTERRUPT = 1
ARGUMENT_ERROR = 2
URI_NOT_FOUND_ERROR = 5
| true | true |
79016bb28f164d02030a50f9f09a1aa1410c864f | 553 | py | Python | jobs/migrations/0001_initial.py | muhammadh-s/web-portfolio | cd2c3d9bb0cc6e5f1a058fa20c52d2edcaf7c573 | [
"MIT"
] | null | null | null | jobs/migrations/0001_initial.py | muhammadh-s/web-portfolio | cd2c3d9bb0cc6e5f1a058fa20c52d2edcaf7c573 | [
"MIT"
] | 1 | 2019-02-08T14:09:06.000Z | 2019-02-08T14:09:06.000Z | jobs/migrations/0001_initial.py | muhammadh-s/web-portfolio | cd2c3d9bb0cc6e5f1a058fa20c52d2edcaf7c573 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.3 on 2018-11-24 07:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('summary', models.CharField(max_length=200)),
],
),
]
| 24.043478 | 114 | 0.56962 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('summary', models.CharField(max_length=200)),
],
),
]
| true | true |
79016be67214c4d6f1999c5b7a053e5a7b167e1f | 2,239 | py | Python | mmskeleton/utils/checkpoint.py | Trebua/mmskeleton | df2057e5d25ff19204ae60e710326bee72c625e1 | [
"Apache-2.0"
] | 1,347 | 2019-08-24T19:03:50.000Z | 2022-03-29T05:44:57.000Z | mmskeleton/utils/checkpoint.py | Trebua/mmskeleton | df2057e5d25ff19204ae60e710326bee72c625e1 | [
"Apache-2.0"
] | 246 | 2019-08-24T15:36:11.000Z | 2022-03-23T06:57:02.000Z | mmskeleton/utils/checkpoint.py | Trebua/mmskeleton | df2057e5d25ff19204ae60e710326bee72c625e1 | [
"Apache-2.0"
] | 335 | 2019-08-25T14:54:19.000Z | 2022-03-31T23:07:18.000Z | from mmcv.runner import load_checkpoint as mmcv_load_checkpoint
from mmcv.runner.checkpoint import load_url_dist
import urllib
mmskeleton_model_urls = {
'st_gcn/kinetics-skeleton': "https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/st-gcn/st_gcn.kinetics-6fa43f73.pth",
'st_gcn/ntu-xsub': "https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/st-gcn/st_gcn.ntu-xsub-300b57d4.pth",
'st_gcn/ntu-xview': "https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/st-gcn/st_gcn.ntu-xview-9ba67746.pth",
'mmdet/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e': 'https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/mmdet/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e_20190408-0e50669c.pth',
'pose_estimation/pose_hrnet_w32_256x192': 'https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/pose_estimation/pose_hrnet_w32_256x192-76ea353b.pth',
'mmdet/cascade_rcnn_r50_fpn_20e': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_rcnn_r50_fpn_20e_20181123-db483a09.pth',
} # yapf: disable
def load_checkpoint(model, filename, *args, **kwargs):
try:
filename = get_mmskeleton_url(filename)
return mmcv_load_checkpoint(model, filename, *args, **kwargs)
except (urllib.error.HTTPError, urllib.error.URLError) as e:
raise Exception(url_error_message.format(filename)) from e
def get_mmskeleton_url(filename):
if filename.startswith('mmskeleton://'):
model_name = filename[13:]
model_url = (mmskeleton_model_urls[model_name])
return model_url
return filename
def cache_checkpoint(filename):
try:
filename = get_mmskeleton_url(filename)
load_url_dist(get_mmskeleton_url(filename))
except (urllib.error.HTTPError, urllib.error.URLError) as e:
raise Exception(url_error_message.format(filename)) from e
url_error_message = """
==================================================
MMSkeleton fail to load checkpoint from url:
{}
Please check your network connection. Or manually download checkpoints according to the instructor:
https://github.com/open-mmlab/mmskeleton/blob/master/doc/MODEL_ZOO.md
""" | 47.638298 | 216 | 0.748995 | from mmcv.runner import load_checkpoint as mmcv_load_checkpoint
from mmcv.runner.checkpoint import load_url_dist
import urllib
mmskeleton_model_urls = {
'st_gcn/kinetics-skeleton': "https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/st-gcn/st_gcn.kinetics-6fa43f73.pth",
'st_gcn/ntu-xsub': "https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/st-gcn/st_gcn.ntu-xsub-300b57d4.pth",
'st_gcn/ntu-xview': "https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/st-gcn/st_gcn.ntu-xview-9ba67746.pth",
'mmdet/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e': 'https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/mmdet/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e_20190408-0e50669c.pth',
'pose_estimation/pose_hrnet_w32_256x192': 'https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/pose_estimation/pose_hrnet_w32_256x192-76ea353b.pth',
'mmdet/cascade_rcnn_r50_fpn_20e': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_rcnn_r50_fpn_20e_20181123-db483a09.pth',
}
def load_checkpoint(model, filename, *args, **kwargs):
try:
filename = get_mmskeleton_url(filename)
return mmcv_load_checkpoint(model, filename, *args, **kwargs)
except (urllib.error.HTTPError, urllib.error.URLError) as e:
raise Exception(url_error_message.format(filename)) from e
def get_mmskeleton_url(filename):
if filename.startswith('mmskeleton://'):
model_name = filename[13:]
model_url = (mmskeleton_model_urls[model_name])
return model_url
return filename
def cache_checkpoint(filename):
try:
filename = get_mmskeleton_url(filename)
load_url_dist(get_mmskeleton_url(filename))
except (urllib.error.HTTPError, urllib.error.URLError) as e:
raise Exception(url_error_message.format(filename)) from e
url_error_message = """
==================================================
MMSkeleton fail to load checkpoint from url:
{}
Please check your network connection. Or manually download checkpoints according to the instructor:
https://github.com/open-mmlab/mmskeleton/blob/master/doc/MODEL_ZOO.md
""" | true | true |
79016c13be9e84ffa82e8022bf0d53d91829182f | 24,051 | py | Python | Python3/Tornado/apps/ExchangeWalletApi/ExWallet/bsv/handler.py | youngqqcn/QBlockChainNotes | 85122049024dc5555705bf016312491a51966621 | [
"MIT"
] | 24 | 2018-11-01T03:36:43.000Z | 2022-03-28T08:20:30.000Z | Python3/Tornado/apps/ExchangeWalletApi/ExWallet/bsv/handler.py | songning4/QBlockChainNotes | d65ede073f5a20f728f41cc6850409693820cdb1 | [
"MIT"
] | 57 | 2019-12-04T08:26:47.000Z | 2022-03-08T07:35:15.000Z | Python3/Tornado/apps/ExchangeWalletApi/ExWallet/bsv/handler.py | youngqqcn/QBlockChainNotes | 85122049024dc5555705bf016312491a51966621 | [
"MIT"
] | 11 | 2019-01-04T08:41:57.000Z | 2022-03-16T03:51:36.000Z | #coding:utf8
#authors : yqq
import logging
import json
from utils import decimal_default,get_linenumber
from base_handler import BaseHandler
from .proxy import AuthServiceProxy
from cashaddress import convert
import traceback
#设置精度
from decimal import Decimal
from decimal import getcontext
getcontext().prec = 8
from constants import BSV_RPC_URL as RPC_URL
STR_ADDRESS_TABLE = "t_btc_address"
class BTC_ListAccounts(BaseHandler):
@staticmethod
def addresses():
from sql import run
accounts = run("""select * from {};""".format(STR_ADDRESS_TABLE)) #TODO:后期数据量大的时候, 使用redis进行缓存地址
return [account['address'] for account in accounts]
def get(self):
try:
data = BTC_ListAccounts.addresses()
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_ListAccounts error:{0} in {1}".format(e,get_linenumber()))
g_exUserAddrs = BTC_ListAccounts.addresses() #使用全局变量保存交易所用户BTC地址 2019-06-01
class BTC_GetAccount(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getaccount",self.get_argument("address")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccount error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetAccountAddress(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getaccountaddress",self.get_argument("account")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccoutAddress error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetAccountBalance(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
account = self.get_argument("account").decode("utf-8")
if account is None or len(account) == 0:
self.write(json.dumps(BaseHandler.error_ret()))
return
commands = [["getbalance", account]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccountBalance error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBalance(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
addr = self.get_argument("address")
data = BTC_ListUTXO.utxo(btc_rpc_connection, addr)
if not data:
self.write(json.dumps(BaseHandler.error_ret_with_data("0")))
return
from utils import accumulate
self.write(json.dumps(BaseHandler.success_ret_with_data('%.8f' % accumulate(data)), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBalance error:{0} in {1}".format(e,get_linenumber()))
class BTC_ListUTXO(BaseHandler):
@staticmethod
def utxo(rpcconn, addrs, minconf=1, maxconf=9999999, opt=None):
argAddrs = addrs if isinstance(addrs, list) else [addrs]
if opt == None:
commands = [["listunspent", minconf, maxconf, argAddrs, True]]
else:
commands = [["listunspent", minconf, maxconf, argAddrs, True, opt]]
utxos = rpcconn.batch_(commands)[0]
#要进行地址格式的转换
for i in range(len(utxos)):
cashAddr = utxos[i]['address']
legacyAddr = convert.to_legacy_address(cashAddr)
utxos[i]['address'] = legacyAddr
utxos[i]['cashaddress'] = cashAddr
return utxos
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
data = None
try:
minconf = int(self.get_argument("minconf")) if not self.get_argument("minconf") == "" else 1
maxconf = int(self.get_argument("maxconf")) if not self.get_argument("maxconf") == "" else 9999999
addr = self.get_argument("address")
data = BTC_ListUTXO.utxo(btc_rpc_connection,addr,minconf,maxconf)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetUTXO error:{0} in {1}".format(e,get_linenumber()))
class BTC_EstimateSmartFee(BaseHandler):
@staticmethod
def process(rpcconn, nConfTarget=2, strEstimateMode='ECONOMICAL'):
# commands = [["estimatesmartfee", nConfTarget, strEstimateMode ]]
# commands = [["estimatefee", nConfTarget]] # bsv 需要根据前面的区块来计算, 和 bch, btc , ltc 不一样
# data = rpcconn.batch_(commands)
# nFeeRate = data[0] if len(data) > 0 else Decimal(0.00001)
# return nFeeRate * 100000000 / 1000 # satoshi/Byte 即 in satoshis per byte
# if len(data) > 0:
# return data[0]['feerate'] * 100000000 / 1000 # satoshi/Byte 即 in satoshis per byte
return 20
@staticmethod
def calcFee(rpcconn, nIn=1, nOut = 2):
from decimal import Decimal
from decimal import getcontext
getcontext().prec = 8
rate = BTC_EstimateSmartFee.process(rpcconn)
rate = "%.8f" % (rate / Decimal(100000000.0))
return Decimal(str((148 * nIn + 34 * nOut + 10))) * Decimal(rate)
def get(self):
try:
rpcconn = AuthServiceProxy(RPC_URL)
data = BTC_EstimateSmartFee.calcFee(rpcconn)
data = '%.8f' % data
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % e)))
logging.error("BTC_EstimateSmartFee error:{0} in {1}".format(e, get_linenumber()))
pass
class BTC_CreateRawTransaction(BaseHandler):
@staticmethod
def process(rpcconn,from_addr,to_addr,amount):
#utxos
utxos = BTC_ListUTXO.utxo(rpcconn, from_addr)
#print(utxos)
def UtxoFilter(utxos, amount):
selected = []
from decimal import Decimal
nSum = Decimal('0')
#最小输入utxo金额 : 148 * rate 其中rate是 1000字节 所需的btc数量
nFee = Decimal('0.0')
for utxo in [item for item in utxos if int(item["confirmations"]) >= 1 and float(item["amount"]) > 0.0003 ]:
selected.append(utxo)
nSum += Decimal(str((utxo["amount"])))
if nSum > Decimal(str(amount)):
nFee = BTC_EstimateSmartFee.calcFee(rpcconn, len(selected), 2)
if nSum > nFee + amount:
break
return selected, nSum, nFee
selected, nSum , fee = UtxoFilter(utxos, amount)
# check if enough
# from utils import calcFee
if not isinstance(amount, Decimal):
amount = Decimal(str(amount))
# fee = BTC_EstimateSmartFee.calcFee(rpcconn, len(selected), 2)
if nSum < fee + amount:
return False,"budget not enough"
#return False,0 #需测试!!!
from utils import filtered
param_in = [filtered(item,["txid","vout"]) for item in selected]
param_out = {to_addr:amount, from_addr: nSum - amount - fee}
#print("--------------param_out-------------")
#print("fee" + str(fee))
#print(param_in)
#print(param_out)
#print("--------------param_out-------------")
# create raw transaction
commands = [["createrawtransaction",param_in,param_out]]
return True, {"hex":rpcconn.batch_(commands), "utxos":selected, "txout":param_out}
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
from_addr = self.get_argument("from")
to_addr = self.get_argument("to")
#amount = float(self.get_argument("amount"))
from decimal import Decimal
amount = Decimal(str(self.get_argument("amount")))
ret, rsp = BTC_CreateRawTransaction.process(btc_rpc_connection,from_addr,to_addr,amount)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreatRawTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_SendRawTransaction(BaseHandler):
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
rawdata = self.get_argument("rawdata")
if not rawdata: return
commands = [["sendrawtransaction",rawdata]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_SendRawTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_CreateRawTransactionEx(BaseHandler):
@staticmethod
def genearateInParam(rpcconn, src, dest):
utxos,gross,amount = [],Decimal('0'),sum(dest.values())
redundant = 0
for addr in src:
# utxos
all = BTC_ListUTXO.utxo(rpcconn,addr)
# recommend
from utils import recommended
selected,aggregate = recommended(all,amount)
# process
utxos += selected
gross += aggregate
# check if enough
redundant = gross - BTC_EstimateSmartFee.calcFee(rpcconn, len(utxos), len(dest.keys())+1) - amount
if redundant > 0:
return True,utxos,redundant
return False,utxos,redundant
@staticmethod
def generateOutParam(dest):
param_out = {}
for key,value in dest.items():
param_out[key] = Decimal(value) if isinstance(value, str) else Decimal(str(value))
return param_out
@staticmethod
def process(rpcconn, src, dest ):
# preprocess
param_out = BTC_CreateRawTransactionEx.generateOutParam(dest)
ret,utxos,redundant = BTC_CreateRawTransactionEx.genearateInParam(rpcconn,src,param_out)
if not ret: return False, "budget not enough"
# param_out refinement
param_out[src[0]] = redundant if src[0] not in param_out.keys() else param_out[src[0]] + redundant
#print(param_out)
# param_in refinement
from utils import filtered
param_in = [filtered(item,["txid","vout"]) for item in utxos]
#print(param_in)
return True, {"hex":rpcconn.batch_([["createrawtransaction",param_in,param_out]]),"utxos":utxos, "txout":param_out}
def get_argument_ex(self, str):
from utils import json2dict
str2dict = json2dict(self.request.body)
return str2dict[str] if str in str2dict.keys() else False
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
src = self.get_argument_ex("src")
dest = self.get_argument_ex("dest")
if not isinstance(src, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("src must be json list"))))
return
if not isinstance(dest, dict):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("dest must be json object"))))
return
ret, rsp = BTC_CreateRawTransactionEx.process(btc_rpc_connection, src, dest)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreateRawTransactionEx error:{0} in {1}".format(e,get_linenumber()))
class BTC_CreateRawTransactionEx_Collection(BaseHandler):
@staticmethod
def makeParams( rpcconn, lstSrc, lstDest):
if len(lstSrc) == 1 and lstSrc[0].strip() == "*":
lstSrcAddrs = g_exUserAddrs
else:
lstSrcAddrs = lstSrc
utxos, nSum = [], Decimal('0')
txAmount, fTxFee = 0, 0
#for addr in lstSrc:
if isinstance(lstSrc, list):
# bitcoin-cli -conf=/root/.bitcoin/bitcoin-test.conf listunspent 0 9999999 '[]' true '{ "minimumAmount": 0.005 }'
# commands = [["listunspent", 0, 99999999, [], True, {'minimumAmount':0.0003}]]
# lstUtxos = rpcconn.batch_(commands)[0]
# BSV 不支持 option操作
# opt = {'minimumAmount':0.0003}
lstUtxos = BTC_ListUTXO.utxo(rpcconn, [ ], 1, 9999999)
# print(len(lstUtxos))
for utxo in lstUtxos:
if Decimal(utxo['amount']) < 0.0003: continue
if utxo['address'].strip() in lstSrcAddrs:
utxos.append(utxo)
nSum += Decimal(str((utxo["amount"])))
fTxFee = BTC_EstimateSmartFee.calcFee(rpcconn, len(utxos), len(lstDest))
txAmount = nSum - fTxFee #实际转账金额
if txAmount <= 0.0003: #实际转账金额太小
return False, None, 0, 0
return True, utxos, txAmount , fTxFee
@staticmethod
def process(rpcconn, lstSrc, lstDest):
#lstSrcAddrs = []
bRet, utxos, txAmount, fTxFee = BTC_CreateRawTransactionEx_Collection.makeParams(rpcconn, lstSrc, lstDest)
if not bRet:
return False, "collection amount is too small!"
strDst = lstDest[0]
vout = {strDst : txAmount}
from utils import filtered
vin = [filtered(item,["txid","vout"]) for item in utxos]
strHex = rpcconn.batch_([["createrawtransaction", vin, vout]])
return True, {"hex": strHex, "utxos":utxos, "txout":vout, "txFee":fTxFee}
def get_argument_ex(self, str):
from utils import json2dict
str2dict = json2dict(self.request.body)
return str2dict[str] if str in str2dict.keys() else False
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
src = self.get_argument_ex("src")
dest = self.get_argument_ex("dest")
if not isinstance(src, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("src must be json list"))))
return
if not isinstance(dest, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("dest must be json list"))))
return
ret, rsp = BTC_CreateRawTransactionEx_Collection.process(btc_rpc_connection, src, dest)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
# traceback.print_exc()
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreateRawTransactionEx error:{0} in {1}".format(e,get_linenumber()))
#查询需要归集的地址余额
class BTC_CollectionQuery(BaseHandler):
def get(self):
rpcconn = AuthServiceProxy(RPC_URL)
try:
# commands = [["listunspent", 0, 99999999, [], True, {'minimumAmount':0.0003}]]
# lstUtxos = rpcconn.batch_(commands)[0]
# opt = {'minimumAmount': 0.0003}
lstUtxos = BTC_ListUTXO.utxo(rpcconn, [], 1, 9999999)
mapRet = {}
for utxo in lstUtxos:
strAddr = utxo['address'].strip()
if Decimal(utxo['amount']) < 0.0003: continue
if strAddr not in g_exUserAddrs : continue
if strAddr not in mapRet:
mapRet[strAddr] = Decimal("0.0")
nAmount = utxo['amount']
mapRet[strAddr] = str( nAmount + Decimal( mapRet[strAddr]) )
self.write(json.dumps(BaseHandler.success_ret_with_data(mapRet), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CollectionQuery error:{0} in {1}".format(e, get_linenumber()))
class BTC_ListTransactions(BaseHandler):
@staticmethod
def blktimes(rpc_connection,account="*",tx_counts=10):
commands = [["listtransactions",account,tx_counts]]
data = rpc_connection.batch_(commands)
if len(data) == 0: return []
#fix bug:only return those txs which be writen into blockchain @yqq 2019-03-21
return [item['blocktime'] for item in data[0] if "blocktime" in item][::-1]
#add 'include_watchonly' to include those address's transactions
# which not import private key into the wallet. #yqq 2019-03-26
@staticmethod
def process(rpc_connection,account="*",tx_counts=10,skips=0,include_watchonly=True):
commands = [["listtransactions",account,tx_counts,skips, include_watchonly]]
data = rpc_connection.batch_(commands)
if len(data) == 0: return []
#fix bug:only return those txs which be writen into blockchain @yqq 2019-03-21
txs = [item for item in data[0] if "blocktime" in item and item["category"] == "receive"]
from utils import filtered
return [filtered(item,["address","category","amount","confirmations","txid","blocktime"]) for item in txs][::-1]
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
account = self.get_argument("account") if self.get_argument("account") else "*"
tx_counts = int(self.get_argument("count")) if self.get_argument("count") else 10
skips = int(self.get_argument("skips")) if self.get_argument("skips") else 0
data = BTC_ListTransactions.process(btc_rpc_connection,account,tx_counts,skips)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_ListTransActions error:{0} in {1}".format(e,get_linenumber()))
class BTC_CrawlTxData(BaseHandler):
@staticmethod
def process(rpc_connection, nblktime):
if len(g_exUserAddrs) == 0:
return []
txs = BTC_ListTransactions.process(rpc_connection, '*', 100000000)
retTxs = []
for tx in txs:
strLegacyAddr = convert.to_legacy_address(tx["address"].strip())
tx["address"] = strLegacyAddr.strip()
# print(tx)
if int(str(tx['blocktime'])) >= nblktime and tx["address"].strip() in g_exUserAddrs:
retTxs.append(tx)
return retTxs
def post(self):
rpc_connection = AuthServiceProxy(RPC_URL)
try:
lastscannedblktime = int(str(self.get_argument("blocktime")))
data = BTC_CrawlTxData.process(rpc_connection,lastscannedblktime)
for i in range(len(data)):
data[i]["amount"] = str(data[i]["amount"]) #convert to str to avoid bug
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CrawlTxData error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBlockCount(BaseHandler):
@staticmethod
def process(rpcconn):
commands = [["getblockcount"]]
return int(rpcconn.batch_(commands))
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
blknumber = BTC_GetBlockCount.process(btc_rpc_connection)
self.write(json.dumps(BaseHandler.success_ret_with_data(blknumber), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBlockCount error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBlockHash(BaseHandler):
@staticmethod
def process(rpcconn,blknumber):
commands = [["getblockhash",blknumber]]
return rpcconn.batch_(commands)
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
blknumber = self.get_argument("blknumber") if self.get_argument("blknumber") else BTC_GetBlockCount.process(btc_rpc_connection)
data = BTC_GetBlockHash.process(btc_rpc_connection,blknumber)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBlockHash error:{0} in {1}".format(e,get_linenumber()))
class BTC_DecodeRawTransaction(BaseHandler):
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["decoderawtransaction",self.get_argument("rawdata")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetRawTransaction(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getrawtransaction",self.get_argument("txid"),True]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBlock(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
blkhash = self.get_argument("blkhash") if self.get_argument("blkhash") else BTC_GetBlockCount.process(btc_rpc_connection)
commands = [["getblock"]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBlockHash error:{0} in {1}".format(e,get_linenumber()))
| 43.808743 | 139 | 0.627541 |
import logging
import json
from utils import decimal_default,get_linenumber
from base_handler import BaseHandler
from .proxy import AuthServiceProxy
from cashaddress import convert
import traceback
from decimal import Decimal
from decimal import getcontext
getcontext().prec = 8
from constants import BSV_RPC_URL as RPC_URL
STR_ADDRESS_TABLE = "t_btc_address"
class BTC_ListAccounts(BaseHandler):
@staticmethod
def addresses():
from sql import run
accounts = run("""select * from {};""".format(STR_ADDRESS_TABLE))
return [account['address'] for account in accounts]
def get(self):
try:
data = BTC_ListAccounts.addresses()
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_ListAccounts error:{0} in {1}".format(e,get_linenumber()))
g_exUserAddrs = BTC_ListAccounts.addresses()
class BTC_GetAccount(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getaccount",self.get_argument("address")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccount error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetAccountAddress(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getaccountaddress",self.get_argument("account")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccoutAddress error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetAccountBalance(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
account = self.get_argument("account").decode("utf-8")
if account is None or len(account) == 0:
self.write(json.dumps(BaseHandler.error_ret()))
return
commands = [["getbalance", account]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccountBalance error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBalance(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
addr = self.get_argument("address")
data = BTC_ListUTXO.utxo(btc_rpc_connection, addr)
if not data:
self.write(json.dumps(BaseHandler.error_ret_with_data("0")))
return
from utils import accumulate
self.write(json.dumps(BaseHandler.success_ret_with_data('%.8f' % accumulate(data)), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBalance error:{0} in {1}".format(e,get_linenumber()))
class BTC_ListUTXO(BaseHandler):
@staticmethod
def utxo(rpcconn, addrs, minconf=1, maxconf=9999999, opt=None):
argAddrs = addrs if isinstance(addrs, list) else [addrs]
if opt == None:
commands = [["listunspent", minconf, maxconf, argAddrs, True]]
else:
commands = [["listunspent", minconf, maxconf, argAddrs, True, opt]]
utxos = rpcconn.batch_(commands)[0]
for i in range(len(utxos)):
cashAddr = utxos[i]['address']
legacyAddr = convert.to_legacy_address(cashAddr)
utxos[i]['address'] = legacyAddr
utxos[i]['cashaddress'] = cashAddr
return utxos
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
data = None
try:
minconf = int(self.get_argument("minconf")) if not self.get_argument("minconf") == "" else 1
maxconf = int(self.get_argument("maxconf")) if not self.get_argument("maxconf") == "" else 9999999
addr = self.get_argument("address")
data = BTC_ListUTXO.utxo(btc_rpc_connection,addr,minconf,maxconf)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetUTXO error:{0} in {1}".format(e,get_linenumber()))
class BTC_EstimateSmartFee(BaseHandler):
@staticmethod
def process(rpcconn, nConfTarget=2, strEstimateMode='ECONOMICAL'):
t = 2):
from decimal import Decimal
from decimal import getcontext
getcontext().prec = 8
rate = BTC_EstimateSmartFee.process(rpcconn)
rate = "%.8f" % (rate / Decimal(100000000.0))
return Decimal(str((148 * nIn + 34 * nOut + 10))) * Decimal(rate)
def get(self):
try:
rpcconn = AuthServiceProxy(RPC_URL)
data = BTC_EstimateSmartFee.calcFee(rpcconn)
data = '%.8f' % data
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % e)))
logging.error("BTC_EstimateSmartFee error:{0} in {1}".format(e, get_linenumber()))
pass
class BTC_CreateRawTransaction(BaseHandler):
@staticmethod
def process(rpcconn,from_addr,to_addr,amount):
utxos = BTC_ListUTXO.utxo(rpcconn, from_addr)
def UtxoFilter(utxos, amount):
selected = []
from decimal import Decimal
nSum = Decimal('0')
nFee = Decimal('0.0')
for utxo in [item for item in utxos if int(item["confirmations"]) >= 1 and float(item["amount"]) > 0.0003 ]:
selected.append(utxo)
nSum += Decimal(str((utxo["amount"])))
if nSum > Decimal(str(amount)):
nFee = BTC_EstimateSmartFee.calcFee(rpcconn, len(selected), 2)
if nSum > nFee + amount:
break
return selected, nSum, nFee
selected, nSum , fee = UtxoFilter(utxos, amount)
if not isinstance(amount, Decimal):
amount = Decimal(str(amount))
if nSum < fee + amount:
return False,"budget not enough"
from utils import filtered
param_in = [filtered(item,["txid","vout"]) for item in selected]
param_out = {to_addr:amount, from_addr: nSum - amount - fee}
commands = [["createrawtransaction",param_in,param_out]]
return True, {"hex":rpcconn.batch_(commands), "utxos":selected, "txout":param_out}
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
from_addr = self.get_argument("from")
to_addr = self.get_argument("to")
from decimal import Decimal
amount = Decimal(str(self.get_argument("amount")))
ret, rsp = BTC_CreateRawTransaction.process(btc_rpc_connection,from_addr,to_addr,amount)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreatRawTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_SendRawTransaction(BaseHandler):
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
rawdata = self.get_argument("rawdata")
if not rawdata: return
commands = [["sendrawtransaction",rawdata]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_SendRawTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_CreateRawTransactionEx(BaseHandler):
@staticmethod
def genearateInParam(rpcconn, src, dest):
utxos,gross,amount = [],Decimal('0'),sum(dest.values())
redundant = 0
for addr in src:
all = BTC_ListUTXO.utxo(rpcconn,addr)
from utils import recommended
selected,aggregate = recommended(all,amount)
utxos += selected
gross += aggregate
redundant = gross - BTC_EstimateSmartFee.calcFee(rpcconn, len(utxos), len(dest.keys())+1) - amount
if redundant > 0:
return True,utxos,redundant
return False,utxos,redundant
@staticmethod
def generateOutParam(dest):
param_out = {}
for key,value in dest.items():
param_out[key] = Decimal(value) if isinstance(value, str) else Decimal(str(value))
return param_out
@staticmethod
def process(rpcconn, src, dest ):
param_out = BTC_CreateRawTransactionEx.generateOutParam(dest)
ret,utxos,redundant = BTC_CreateRawTransactionEx.genearateInParam(rpcconn,src,param_out)
if not ret: return False, "budget not enough"
param_out[src[0]] = redundant if src[0] not in param_out.keys() else param_out[src[0]] + redundant
from utils import filtered
param_in = [filtered(item,["txid","vout"]) for item in utxos]
return True, {"hex":rpcconn.batch_([["createrawtransaction",param_in,param_out]]),"utxos":utxos, "txout":param_out}
def get_argument_ex(self, str):
from utils import json2dict
str2dict = json2dict(self.request.body)
return str2dict[str] if str in str2dict.keys() else False
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
src = self.get_argument_ex("src")
dest = self.get_argument_ex("dest")
if not isinstance(src, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("src must be json list"))))
return
if not isinstance(dest, dict):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("dest must be json object"))))
return
ret, rsp = BTC_CreateRawTransactionEx.process(btc_rpc_connection, src, dest)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreateRawTransactionEx error:{0} in {1}".format(e,get_linenumber()))
class BTC_CreateRawTransactionEx_Collection(BaseHandler):
@staticmethod
def makeParams( rpcconn, lstSrc, lstDest):
if len(lstSrc) == 1 and lstSrc[0].strip() == "*":
lstSrcAddrs = g_exUserAddrs
else:
lstSrcAddrs = lstSrc
utxos, nSum = [], Decimal('0')
txAmount, fTxFee = 0, 0
if isinstance(lstSrc, list):
lstUtxos = BTC_ListUTXO.utxo(rpcconn, [ ], 1, 9999999)
for utxo in lstUtxos:
if Decimal(utxo['amount']) < 0.0003: continue
if utxo['address'].strip() in lstSrcAddrs:
utxos.append(utxo)
nSum += Decimal(str((utxo["amount"])))
fTxFee = BTC_EstimateSmartFee.calcFee(rpcconn, len(utxos), len(lstDest))
txAmount = nSum - fTxFee
if txAmount <= 0.0003:
return False, None, 0, 0
return True, utxos, txAmount , fTxFee
@staticmethod
def process(rpcconn, lstSrc, lstDest):
bRet, utxos, txAmount, fTxFee = BTC_CreateRawTransactionEx_Collection.makeParams(rpcconn, lstSrc, lstDest)
if not bRet:
return False, "collection amount is too small!"
strDst = lstDest[0]
vout = {strDst : txAmount}
from utils import filtered
vin = [filtered(item,["txid","vout"]) for item in utxos]
strHex = rpcconn.batch_([["createrawtransaction", vin, vout]])
return True, {"hex": strHex, "utxos":utxos, "txout":vout, "txFee":fTxFee}
def get_argument_ex(self, str):
from utils import json2dict
str2dict = json2dict(self.request.body)
return str2dict[str] if str in str2dict.keys() else False
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
src = self.get_argument_ex("src")
dest = self.get_argument_ex("dest")
if not isinstance(src, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("src must be json list"))))
return
if not isinstance(dest, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("dest must be json list"))))
return
ret, rsp = BTC_CreateRawTransactionEx_Collection.process(btc_rpc_connection, src, dest)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreateRawTransactionEx error:{0} in {1}".format(e,get_linenumber()))
class BTC_CollectionQuery(BaseHandler):
def get(self):
rpcconn = AuthServiceProxy(RPC_URL)
try:
lstUtxos = BTC_ListUTXO.utxo(rpcconn, [], 1, 9999999)
mapRet = {}
for utxo in lstUtxos:
strAddr = utxo['address'].strip()
if Decimal(utxo['amount']) < 0.0003: continue
if strAddr not in g_exUserAddrs : continue
if strAddr not in mapRet:
mapRet[strAddr] = Decimal("0.0")
nAmount = utxo['amount']
mapRet[strAddr] = str( nAmount + Decimal( mapRet[strAddr]) )
self.write(json.dumps(BaseHandler.success_ret_with_data(mapRet), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CollectionQuery error:{0} in {1}".format(e, get_linenumber()))
class BTC_ListTransactions(BaseHandler):
@staticmethod
def blktimes(rpc_connection,account="*",tx_counts=10):
commands = [["listtransactions",account,tx_counts]]
data = rpc_connection.batch_(commands)
if len(data) == 0: return []
return [item['blocktime'] for item in data[0] if "blocktime" in item][::-1]
# which not import private key into the wallet. #yqq 2019-03-26
@staticmethod
def process(rpc_connection,account="*",tx_counts=10,skips=0,include_watchonly=True):
commands = [["listtransactions",account,tx_counts,skips, include_watchonly]]
data = rpc_connection.batch_(commands)
if len(data) == 0: return []
#fix bug:only return those txs which be writen into blockchain @yqq 2019-03-21
txs = [item for item in data[0] if "blocktime" in item and item["category"] == "receive"]
from utils import filtered
return [filtered(item,["address","category","amount","confirmations","txid","blocktime"]) for item in txs][::-1]
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
account = self.get_argument("account") if self.get_argument("account") else "*"
tx_counts = int(self.get_argument("count")) if self.get_argument("count") else 10
skips = int(self.get_argument("skips")) if self.get_argument("skips") else 0
data = BTC_ListTransactions.process(btc_rpc_connection,account,tx_counts,skips)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_ListTransActions error:{0} in {1}".format(e,get_linenumber()))
class BTC_CrawlTxData(BaseHandler):
@staticmethod
def process(rpc_connection, nblktime):
if len(g_exUserAddrs) == 0:
return []
txs = BTC_ListTransactions.process(rpc_connection, '*', 100000000)
retTxs = []
for tx in txs:
strLegacyAddr = convert.to_legacy_address(tx["address"].strip())
tx["address"] = strLegacyAddr.strip()
# print(tx)
if int(str(tx['blocktime'])) >= nblktime and tx["address"].strip() in g_exUserAddrs:
retTxs.append(tx)
return retTxs
def post(self):
rpc_connection = AuthServiceProxy(RPC_URL)
try:
lastscannedblktime = int(str(self.get_argument("blocktime")))
data = BTC_CrawlTxData.process(rpc_connection,lastscannedblktime)
for i in range(len(data)):
data[i]["amount"] = str(data[i]["amount"]) #convert to str to avoid bug
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CrawlTxData error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBlockCount(BaseHandler):
@staticmethod
def process(rpcconn):
commands = [["getblockcount"]]
return int(rpcconn.batch_(commands))
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
blknumber = BTC_GetBlockCount.process(btc_rpc_connection)
self.write(json.dumps(BaseHandler.success_ret_with_data(blknumber), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBlockCount error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBlockHash(BaseHandler):
@staticmethod
def process(rpcconn,blknumber):
commands = [["getblockhash",blknumber]]
return rpcconn.batch_(commands)
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
blknumber = self.get_argument("blknumber") if self.get_argument("blknumber") else BTC_GetBlockCount.process(btc_rpc_connection)
data = BTC_GetBlockHash.process(btc_rpc_connection,blknumber)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBlockHash error:{0} in {1}".format(e,get_linenumber()))
class BTC_DecodeRawTransaction(BaseHandler):
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["decoderawtransaction",self.get_argument("rawdata")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetRawTransaction(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getrawtransaction",self.get_argument("txid"),True]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBlock(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
blkhash = self.get_argument("blkhash") if self.get_argument("blkhash") else BTC_GetBlockCount.process(btc_rpc_connection)
commands = [["getblock"]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBlockHash error:{0} in {1}".format(e,get_linenumber()))
| true | true |
79016cd7b072ebe0a6633862ea0beada1a4ce8ca | 186 | py | Python | dataset_utils/general_utils.py | kareemjano/image_toolbox | ea9e1654142a1492e7e462b4f0a8245f4ee430ae | [
"Apache-2.0"
] | null | null | null | dataset_utils/general_utils.py | kareemjano/image_toolbox | ea9e1654142a1492e7e462b4f0a8245f4ee430ae | [
"Apache-2.0"
] | null | null | null | dataset_utils/general_utils.py | kareemjano/image_toolbox | ea9e1654142a1492e7e462b4f0a8245f4ee430ae | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
def list_to_map(Xs, ys):
labels_map = defaultdict(list)
for x, y in list(zip(Xs, ys)):
labels_map[y].append(x)
return labels_map | 23.25 | 35 | 0.682796 | from collections import defaultdict
def list_to_map(Xs, ys):
labels_map = defaultdict(list)
for x, y in list(zip(Xs, ys)):
labels_map[y].append(x)
return labels_map | true | true |
79016cdb695b3d6ba5c0c673abce73f037ff232b | 217 | py | Python | Config.py | sjsafranek/pomegranate | 8965a7cc2fe8b6981f3961fa8cd6e8b1bdff8ccf | [
"MIT"
] | null | null | null | Config.py | sjsafranek/pomegranate | 8965a7cc2fe8b6981f3961fa8cd6e8b1bdff8ccf | [
"MIT"
] | 1 | 2017-01-19T02:02:15.000Z | 2017-01-19T02:38:58.000Z | Config.py | sjsafranek/pomegranate | 8965a7cc2fe8b6981f3961fa8cd6e8b1bdff8ccf | [
"MIT"
] | null | null | null | #!/usr/bin/python
import os
import json
def get_db_config():
# read config file and return data
data = {}
with open('config.json', 'r') as infile:
data = json.loads(infile.read())
return data
| 19.727273 | 44 | 0.631336 |
import os
import json
def get_db_config():
data = {}
with open('config.json', 'r') as infile:
data = json.loads(infile.read())
return data
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.