text stringlengths 38 1.54M |
|---|
from datetime import datetime
from dateutil.relativedelta import relativedelta
from django import forms
from accounts import validators as account_validators
from accounts.models import BOOL_CHOICES, OnlineDisclaimer, DISCLAIMER_TERMS, \
OVER_18_TERMS, MEDICAL_TREATMENT_TERMS
class SignupForm(forms.Form):
first_name = forms.CharField(max_length=30, label='First name')
last_name = forms.CharField(max_length=30, label='Last name')
def signup(self, request, user):
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.save()
class DisclaimerForm(forms.ModelForm):
medical_treatment_permission = forms.BooleanField(
validators=[account_validators.validate_medical_treatment_permission],
required=False,
widget=forms.CheckboxInput(
attrs={'class': 'regular-checkbox'}
),
label='Please tick to confirm'
)
terms_accepted = forms.BooleanField(
validators=[account_validators.validate_confirm],
required=False,
widget=forms.CheckboxInput(
attrs={'class': 'regular-checkbox'}
),
label='Please tick to accept terms'
)
age_over_18_confirmed = forms.BooleanField(
validators=[account_validators.validate_age],
required=False,
widget=forms.CheckboxInput(
attrs={'class': 'regular-checkbox'}
),
label='Please tick to confirm'
)
medical_conditions_details = forms.CharField(
widget=forms.Textarea(
attrs={'class': 'form-control', 'rows': 3}
),
label="If yes, please give details",
required=False
)
allergies_details = forms.CharField(
widget=forms.Textarea(
attrs={'class': 'form-control', 'rows': 3}
),
label="If yes, please give details",
required=False
)
joint_problems_details = forms.CharField(
widget=forms.Textarea(
attrs={'class': 'form-control', 'rows': 3}
),
label="If yes, please give details",
required=False
)
password = forms.CharField(
widget=forms.PasswordInput(),
label="Please enter your password to submit your data.<br/>"
"By submitting this form, you confirm that "
"the information you have provided is complete and accurate.",
required=True
)
def __init__(self, *args, **kwargs):
super(DisclaimerForm, self).__init__(*args, **kwargs)
# the agreed-to terms are read-only fields. For a new disclaimer, we
# show the default terms from the model. If we're updating an existing
# disclaimer, we show the terms that are already on the instance (i.e.
# the terms the user agreed to before. THESE WILL NEVER CHANGE! If the
# default terms are updated, existing disclaimers will continue to show
# the old terms that the user agreed to when they first completed the
# disclaimer
if self.instance.id:
# in the DisclaimerForm, these fields are autopoulated based
self.medical_treatment_terms = self.instance.medical_treatment_terms
self.disclaimer_terms = self.instance.disclaimer_terms
self.age_over_18_confirmed = self.instance.age_over_18_confirmed
else:
self.disclaimer_terms = DISCLAIMER_TERMS
self.over_18_terms = OVER_18_TERMS
self.medical_treatment_terms = MEDICAL_TREATMENT_TERMS
self.fields['home_phone'].required = False
class Meta:
model = OnlineDisclaimer
fields = (
'name', 'dob', 'address', 'postcode', 'home_phone', 'mobile_phone',
'emergency_contact1_name', 'emergency_contact1_relationship',
'emergency_contact1_phone', 'emergency_contact2_name',
'emergency_contact2_relationship', 'emergency_contact2_phone',
'medical_conditions', 'medical_conditions_details',
'joint_problems', 'joint_problems_details', 'allergies',
'allergies_details', 'medical_treatment_permission',
'terms_accepted', 'age_over_18_confirmed')
widgets = {
'name': forms.TextInput(
attrs={'class': 'form-control'}
),
'address': forms.TextInput(
attrs={'class': 'form-control'}
),
'dob': forms.DateInput(
attrs={
'class': "form-control",
'id': 'dobdatepicker',
},
format='%d %b %Y'
),
'postcode': forms.TextInput(
attrs={'class': 'form-control'}
),
'home_phone': forms.TextInput(
attrs={'class': 'form-control'}
),
'mobile_phone': forms.TextInput(
attrs={'class': 'form-control'}
),
'emergency_contact1_name': forms.TextInput(
attrs={'class': 'form-control'}
),
'emergency_contact1_relationship': forms.TextInput(
attrs={'class': 'form-control'}
),
'emergency_contact1_phone': forms.TextInput(
attrs={'class': 'form-control'}
),
'emergency_contact2_name': forms.TextInput(
attrs={'class': 'form-control'}
),
'emergency_contact2_relationship': forms.TextInput(
attrs={'class': 'form-control'}
),
'emergency_contact2_phone': forms.TextInput(
attrs={'class': 'form-control'}
),
'medical_conditions': forms.RadioSelect(choices=BOOL_CHOICES),
'joint_problems': forms.RadioSelect(choices=BOOL_CHOICES),
'allergies': forms.RadioSelect(choices=BOOL_CHOICES),
}
def clean(self):
if self.cleaned_data.get('medical_conditions', False) \
and not self.cleaned_data['medical_conditions_details']:
self.add_error(
'medical_conditions_details',
'Please provide details of medical conditions'
)
if self.cleaned_data.get('joint_problems', False) \
and not self.cleaned_data['joint_problems_details']:
self.add_error(
'joint_problems_details',
'Please provide details of knee/back/shoulder/ankle/hip/neck '
'problems'
)
if self.cleaned_data.get('allergies', False) \
and not self.cleaned_data['allergies_details']:
self.add_error(
'allergies_details',
'Please provide details of allergies'
)
dob = self.data.get('dob', None)
if dob and self.errors.get('dob'):
del self.errors['dob']
if dob:
try:
dob = datetime.strptime(dob, '%d %b %Y').date()
self.cleaned_data['dob'] = dob
except ValueError:
self.add_error(
'dob', 'Invalid date format. Select from '
'the date picker or enter date in the '
'format e.g. 08 Jun 1990')
if not self.errors.get('dob'):
yearsago = datetime.today().date() - relativedelta(years=18)
if dob > yearsago:
self.add_error(
'dob', 'You must be over 18 years in order to register')
return super(DisclaimerForm, self).clean()
|
"""
Django settings for benchmark project.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
#import djcelery
from environ import Path
from benchmark.utils.environ import Env
ROOT_DIR = Path(__file__) - 3 # Three folders back
APPS_DIR = ROOT_DIR.path('benchmark')
# Nothing initially
env = Env()
# Read .env file
env_file = str(ROOT_DIR.path('.env'))
env.read_env(env_file)
SITE_ROOT = ROOT_DIR()
SITE_ID = env('SITE_ID', default=1)
DEBUG = env.bool('DJANGO_DEBUG', False)
# codeontap environment variables
BUILD_REFERENCE = env("BUILD_REFERENCE", default=None)
CONFIGURATION_REFERENCE = env("CONFIGURATION_REFERENCE", default=None)
APP_REFERENCE = env("APP_REFERENCE", default=None)
ENVIRONMENT = env("ENVIRONMENT", default='local')
if DEBUG:
# require it only for debug=False, let user ignore it for debug=True
SECRET_KEY = env('DJANGO_SECRET_KEY', default='XXX')
else:
SECRET_KEY = env('DJANGO_SECRET_KEY')
# ALLOWED_HOSTS = ['*']
ALLOWED_HOSTS = env(
'DJANGO_ALLOWED_HOSTS',
default='*'
).split(',')
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.humanize',
]
THIRD_PARTY_APPS = [
'crispy_forms',
#'django_ses',
#'djcelery',
'djcelery_email',
'queued_storage',
'memoize',
'avatar'
]
LOCAL_APPS = [
'benchmark',
'benchmark.account',
'benchmark.report',
'benchmark.chart'
]
# Applications definition
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'benchmark.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(APPS_DIR.path('templates')),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'benchmark.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': env('DATABASE_ENGINE', default='django.db.backends.postgresql'),
'NAME': env('DATABASE_NAME', default=''),
'USER': env('DATABASE_USERNAME', default=''),
'PASSWORD': env('DATABASE_PASSWORD', default=''),
'HOST': env('DATABASE_HOST', default=''),
'PORT': env('DATABASE_PORT', default='')
}
}
# The age of session cookies, in seconds
SESSION_COOKIE_AGE = env.int('SESSION_COOKIE_AGE', default=60 * 60)
# Caching
DATA_SCIENCE_CACHE_BACKEND = env("DATA_SCIENCE_CACHE_BACKEND", default='django_redis.cache.RedisCache')
DATA_SCIENCE_CACHE_URL = env("DATA_SCIENCE_CACHE_URL", default='')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'releases': {
'BACKEND': DATA_SCIENCE_CACHE_BACKEND,
'LOCATION': DATA_SCIENCE_CACHE_URL,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
},
'TIMEOUT': None # Never expires
},
'charts': {
'BACKEND': DATA_SCIENCE_CACHE_BACKEND,
'LOCATION': DATA_SCIENCE_CACHE_URL,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
},
'TIMEOUT': SESSION_COOKIE_AGE * 2 # Expire it with session * 2
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
if DEBUG is False:
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
else:
AUTH_PASSWORD_VALIDATORS = [] # hate it locally
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Australia/Canberra'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Celery
CELERY_BROKER_URL = env('CELERY_BROKER_URL', default='')
CELERY_RESULT_BACKEND = env('CELERY_RESULT_BACKEND', default='redis')
#CELERYBEAT_SCHEDULER = env('CELERYBEAT_SCHEDULER', default='djcelery.schedulers.DatabaseScheduler')
#djcelery.setup_loader()
# AWS
AWS_REGION = env("AWS_REGION", default='')
AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID", default='')
AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY", default='')
# Emails
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='djcelery_email.backends.CeleryEmailBackend')
CELERY_EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = env('SMTP_USE_TLS', default=True)
EMAIL_HOST = env('SMTP_HOST', default='')
EMAIL_PORT = env('SMTP_PORT', default=587)
EMAIL_HOST_USER = env('SMTP_USERNAME', default='')
EMAIL_HOST_PASSWORD = env('SMTP_PASSWORD', default='')
EMAIL_FROM_EMAIL = env('SMTP_FROM_EMAIL', default='')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = env(
"DJANGO_STATIC_ROOT",
default=str(ROOT_DIR('../var/static_root'))
)
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# AWS S3
AWS_REGION = env("AWS_REGION", default='')
APPDATA_BUCKET = env("APPDATA_BUCKET", default='')
APPDATA_PREFIX = env("APPDATA_PREFIX", default='')
AWS_STORAGE_BUCKET_NAME = APPDATA_BUCKET
AWS_UPLOADS_LOCATION = env("AWS_UPLOADS_LOCATION", default='uploads')
AWS_RELEASE_LOCATION = env("AWS_RELEASE_LOCATION", default='releases')
# TODO: Refactor it to use storage.remote_options
AWS_LOCATION = '{}/{}'.format(APPDATA_PREFIX, AWS_UPLOADS_LOCATION) if APPDATA_PREFIX else AWS_UPLOADS_LOCATION
AWS_RELEASE_LOCATION = '{}/{}'.format(APPDATA_PREFIX, AWS_RELEASE_LOCATION) if APPDATA_PREFIX else AWS_RELEASE_LOCATION
AWS_RELEASE_MANIFEST = '{}/manifest.json'.format(AWS_RELEASE_LOCATION)
S3_URL = 'http://{}.s3.amazonaws.com'.format(APPDATA_BUCKET)
#DEFAULT_FILE_STORAGE = 'benchmark.utils.s3.MediaS3BotoStorage'
AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID", default='')
AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY", default='')
AWS_S3_REGION_NAME = AWS_REGION
AWS_S3_OBJECT_PARAMETERS = {
'ACL': 'bucket-owner-full-control'
}
AWS_DEFAULT_ACL = 'bucket-owner-full-control'
AWS_BUCKET_ACL = 'bucket-owner-full-control'
# Media files
MEDIA_URL = '/media/'
MEDIA_ROOT = str(ROOT_DIR('media'))
# Configuration for apps
FIXTURE_DIRS = (
str(ROOT_DIR('fixtures')),
)
# New users activation signing salt
ACCOUNT_ACTIVATION_SALT = SECRET_KEY
# New users activation period
ACCOUNT_ACTIVATION_DAYS = 7
CRISPY_TEMPLATE_PACK = 'bootstrap3'
LOGIN_URL = '/account/login/'
# Redirect to page after login
LOGIN_REDIRECT_URL = '/'
# if we have sentry credentials - use it
SENTRY_DSN = env("SENTRY_DSN", default=None)
if SENTRY_DSN:
INSTALLED_APPS += [
'raven.contrib.django.raven_compat',
]
RAVEN_CONFIG = {
'dsn': SENTRY_DSN,
'release': APP_REFERENCE,
'environment': ENVIRONMENT
}
|
import cv2
import os
import sys
#ใซในใฑใผใใฎ่ชญใฟ่พผใฟ(from github/opencv/data)
cascade_file = './Cascade/haarcascade_frontalface_alt.xml'
cascade = cv2.CascadeClassifier(cascade_file)
def mosaic(img,rect,size):
(x1,y1,x2,y2) = rect
w = x2 - x1
h = y2 - y1
tmp_img = img[y1:y2,x1:x2]
i_small = cv2.resize(tmp_img,(size,size))
mosic_img = cv2.resize(i_small,(w,h),interpolation=cv2.INTER_AREA)
img2 = img.copy()
img2[y1:y2,x1:x2] = mosic_img
return img2
img_dir = './Images'
filename = sys.argv
Pass = os.path.join(img_dir,filename[1])
img = cv2.imread(Pass)
print('*'*20)
print(Pass)
print('*'*20)
#ใซในใฑใผใใไฝฟใฃใฆ้กใ่ช่ญใใใชในใ(ๅบงๆจ)ใ่ฟใ
img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
face_list = cascade.detectMultiScale(img_gray,minSize=(10,10))
for (x,y,w,h) in face_list:
img = mosaic(img,(x,y,x+w,y+h),5)
cv2.imwrite(Pass + '_mosic.jpg',img) |
from django.contrib import admin
from django.urls import include,path
from profile.views import profile_form,update_profile,get_profile_data,return_profile_page
urlpatterns = [
path('<int:user_id>/edit/',profile_form,name="profile_form"),
path('update_profile/',update_profile,name="Update Profile"),
path('data/<int:user_id>/',get_profile_data,name="Profile Data"),
path('<int:user_id>/',return_profile_page,name="Profile Page"),
]
|
"""
Reads in an entire file, removes extraneous whitespace, and returns the
data as an array of strings
"""
def ingestFile(fname):
print "Attempting to read" + fname
f = open(fname)
return [line.strip() for line in f.readlines() if len(line.strip()) > 0] # Get rid of all the extra whitespace
""" Searches through a given input file to find the list of ranked (or actual) players for a
given week and a given position. If the data cannot be found, an empty list is returned"""
def find(lines, week, pos):
try:
startIndex = lines.index(week) #Find the week index
except ValueError:
return [] #No data for week
try:
startIndex = lines.index(pos, startIndex) #From there, look for the position
except ValueError:
return [] #No data for position
endIndex = startIndex + 1
while len(lines[endIndex].split()) > 1: #Keep going till you find a line with no spaces
endIndex = endIndex + 1
return lines[startIndex+1:endIndex]
"""Checks to see if a player is in the database. If they are not, we can insert them. Either way, returns
the unique identifier (idp) of the player"""
def lookupPlayer(cur, playerName, playerTeam, position):
#TODO: We currently lookup only by name, but there could be the case where two
#players with the same name are actually different players. For example, there
#were two Steve Smiths playing a few years back.
#If we lookup by team and name however, we fail to track players in and out of
#free agency, as well as when they get traded.
lookup = "select idp from player where name = %s"
cur.execute(lookup, (playerName))
rows = cur.fetchall()
if len(rows) == 0:
print "Creating a new record for ", playerName, playerTeam
insert = "insert into player(name, team, position) values (%s,%s,%s)"
cur.execute(insert, (playerName, playerTeam, position));
id = cur.lastrowid
cur.execute("commit")
return id
else:
return rows[0][0]
# A listing of the valid teams we recognize
def validTeams():
#32 NFL Teams + FA for Free Agent players
return ['NO','NE','GB','DET','CAR','NYG','ATL','PHI',
'PIT','SD','DEN','BAL','DAL','HOU','CHI','SF',
'WAS','CIN','TB','IND','BUF','OAK','STL','TEN',
'KC','MIN','SEA','NYJ','JAX','MIA','ARI','CLE','FA']
"""Cleans up the various different incarnations of team abbreviations and standardizes them"""
def sanitizeTeam(input):
#Some sites use non-standard abbreviations for teams just as Jacksonville or Washington, this simply standardizes
commonAliases = {"JAC" : "JAX", "WSH": "WAS", "" : "FA"}
team = input.upper().strip(); #Get rid of capitalization issues and padding
if team in commonAliases:
team = commonAliases[team]
if team in validTeams():
return team
else:
raise ValueError('Could not determine a mapping for teamCode [%s]' % input)
""" This function is currently manually maintained, we track any players whose names vary across sites"""
def sanitizePlayerName(input):
aliasDict = {
"Robert Griffin" : "Robert Griffin III",
"Ben Watson" : "Benjamin Watson",
"Dave Thomas" : "David Thomas",
"Steve Johnson" : "Stevie Johnson"
}
if input in aliasDict:
return aliasDict[input]
else:
return input
|
class Solution:
def getSmallestString(self, n: int, k: int) -> str:
#reserve 'a' for all positions
k -= n
num = [0] * n
for i in range(n-1, -1, -1):
#assign 26-1 = 25 if it's min o
add = min(25, k)
num[i] = chr(ord('a') + add)
k -= add
return ''.join(num)
|
import shapely.geometry
class Feature:
def __init__(self, geometry: shapely.geometry.base.BaseGeometry, tags: dict = None):
"""
:type geometry: shapely.geometry.base
"""
assert isinstance(geometry, shapely.geometry.base.BaseGeometry)
self.geometry = geometry
if tags:
self._tags = tags
else:
self._tags = {}
def set_tag(self, key: str, value: str):
self._tags[key] = value
def get_tag(self, key: str) -> str:
return self._tags[key]
@property
def geojson(self):
return {
'type': "Feature",
'geometry': shapely.geometry.mapping(self.geometry),
'properties': self._tags
}
@property
def tags(self):
return self._tags
def __str__(self):
return str(self._tags)
@staticmethod
def from_geojson(dct: dict):
tags = dct['properties']
geometry = shapely.geometry.shape(dct['geometry'])
return Feature(geometry, tags)
class ImmutableFeature:
def __init__(self, feature: Feature):
self.geometry = feature.geometry
self.tags = tuple(sorted(list(feature.tags.items())))
def __eq__(self, other):
return self.geometry == other.geometry and self.tags == other.tags
def __hash__(self):
return hash((self.geometry.wkt, self.tags))
def to_feature(self):
return Feature(self.geometry, dict(self.tags))
|
"""Module tests functionalty in write.py"""
import unittest
from src.write import simple_writer, list_writer, writers, text_writer
from src.file_type import get_data_type
class TestWriteSimple(unittest.TestCase):
"""Tests functionality in the write.py module"""
def setUp(self) -> None:
self.sample_data = ["line 1\n", "line 2\n", "line 3\n"]
self.expected_data = ["line 1\n", "line 2\n", "line 3\n"]
self.output_file_name = "./tests/output_sample_file.txt"
simple_writer(self.sample_data, self.output_file_name)
def test_simple_write(self):
with open(self.output_file_name, 'r') as f:
data = f.readlines()
self.assertEqual(data, self.expected_data)
def tearDown(self) -> None:
import os
os.remove(self.output_file_name)
class TestWritelistOfDicts(unittest.TestCase):
"""Tests write of list of dicts"""
def setUp(self) -> None:
self.list_of_dicts = [
{
'key1': 'value1', 'key2': 'value2'
},
{
'key3': 'value3', 'key4': 'value4'
},
{
'key5': 'value5', 'key6': 'value6'
}
]
self.expected_data = [
'key1\tkey2\n',
'value1\tvalue2\n',
'key3\tkey4\n',
'value3\tvalue4\n',
'key5\tkey6\n',
'value5\tvalue6\n'
]
self.output_file = "./tests/list_writer_output.txt"
list_writer(self.list_of_dicts, self.output_file)
with open(self.output_file, 'r') as f:
self.written_data = f.readlines()
def test_write_list_of_dicts(self):
self.assertEqual(self.written_data, self.expected_data)
def tearDown(self) -> None:
import os
os.remove(self.output_file)
class TestWriters(unittest.TestCase):
"""Tests writers returned based on Data Type"""
def test_writers_returns_list_writer_for_json(self):
"""List writer is required as this handles multiple blocks of json"""
testing_file = "./tests/json_testing_sample.json"
file_type = get_data_type(testing_file)
self.assertEqual(writers[file_type], list_writer)
def test_writers_returns_text_writer_for_text(self):
testing_file = "./tests/ingest_testing_sample.txt"
file_type = get_data_type(testing_file)
self.assertEqual(writers[file_type], text_writer)
|
from ossConfig import ossConfig
import Oss
access_key = 'XXXXXXXXX'
secret_key = 'XXXXXXXXXXXXXXXXXXX'
endpoint_url = 'http://XXXXXXXXXXXXXXXXX.com'
config = ossConfig(access_key, secret_key, endpoint_url)
bucket_name = 'ddddd1'
# get_corsConfig
CORSConfiguration = Oss.get_bucket_cors(config, bucket_name)
if CORSConfiguration is False:
print("The specified bucket does not have CORS configured")
else:
print(CORSConfiguration)
# put_corsConfig
CORSRules = [
{
'AllowedHeaders': [
'string111',
],
'AllowedMethods': [
'PUT',
'POST',
],
'AllowedOrigins': [
'string',
],
'ExposeHeaders': [
'string',
],
'MaxAgeSeconds': 123
},
]
if Oss.put_bucket_cors(config, bucket_name, CORSRules):
print("put bucket cors succes!")
else:
print("put bucket cors failed!")
# delete_corsConfig
if Oss.delete_bucket_cors(config, bucket_name):
print("delete sucess!")
else:
print("delete failed!")
|
from mock.mock import Mock
from base import GAETestCase
from web import listar
from usuario.model import Usuario
import json
class RestTests(GAETestCase):
def test_listar(self):
usuario = Usuario(nome='teste', email='teste@teste.tst', google_id=123)
usuario.put()
usuarios = Usuario.query().fetch()
lista_dict = [{"nome": usu.nome, "email": usu.email, "google_id": usu.google_id, "id": usu.key.id()} for usu in usuarios]
resposta_mock = Mock()
listar.listar(resposta_mock)
json_str = json.dumps(lista_dict)
resposta_mock.write.assert_called_once_with(json_str)
def test_salvar(self):
resp = Mock()
rest.salvar(resp, 'teste', 'teste@teste.com', 1234)
lista = Usuario.query().fetch()
self.assertEquals(1, len(lista))
usuario = lista[0]
self.assertEqual('teste', usuario.firstname)
self.assertEqual('teste@teste.com', usuario.email) |
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dpctl
import dpctl.tensor as dpt
import numpy as np
import pytest
from numba import njit
import numba_dppy as dppy
from numba_dppy.tests._helper import skip_test
list_of_dtype = [
np.int32,
np.int64,
np.float32,
np.float64,
]
@pytest.fixture(params=list_of_dtype)
def dtype(request):
return request.param
list_of_usm_type = [
"shared",
"device",
"host",
]
@pytest.fixture(params=list_of_usm_type)
def usm_type(request):
return request.param
def test_consuming_usm_ndarray(offload_device, dtype, usm_type):
if skip_test(offload_device):
pytest.skip()
@dppy.kernel
def data_parallel_sum(a, b, c):
"""
Vector addition using the ``kernel`` decorator.
"""
i = dppy.get_global_id(0)
c[i] = a[i] + b[i]
global_size = 1021
N = global_size
a = np.array(np.random.random(N), dtype=dtype)
b = np.array(np.random.random(N), dtype=dtype)
got = np.ones_like(a)
with dppy.offload_to_sycl_device(offload_device) as gpu_queue:
da = dpt.usm_ndarray(
a.shape,
dtype=a.dtype,
buffer=usm_type,
buffer_ctor_kwargs={"queue": gpu_queue},
)
da.usm_data.copy_from_host(a.reshape((-1)).view("|u1"))
db = dpt.usm_ndarray(
b.shape,
dtype=b.dtype,
buffer=usm_type,
buffer_ctor_kwargs={"queue": gpu_queue},
)
db.usm_data.copy_from_host(b.reshape((-1)).view("|u1"))
dc = dpt.usm_ndarray(
got.shape,
dtype=got.dtype,
buffer=usm_type,
buffer_ctor_kwargs={"queue": gpu_queue},
)
data_parallel_sum[global_size, dppy.DEFAULT_LOCAL_SIZE](da, db, dc)
dc.usm_data.copy_to_host(got.reshape((-1)).view("|u1"))
expected = a + b
assert np.array_equal(got, expected)
|
import grpcapi.server.v1
from route_guide_pb2 import (
Point,
Feature,
Rectangle,
RouteNote,
RouteSummary,
)
import grpc
from concurrent.futures import ThreadPoolExecutor
from typing import Iterable
import logging
logging.basicConfig(level=logging.DEBUG)
app = grpcapi.server.v1.App()
@app.unary_unary("/routeguide.RouteGuide/GetFeature")
def get_feature(request: Point, context: grpc.ServicerContext) -> Feature:
return Feature(name="feature")
@app.unary_stream("/routeguide.RouteGuide/ListFeatures")
def list_features(
request: Rectangle, context: grpc.ServicerContext
) -> Iterable[Feature]:
yield Feature(name="feature1")
yield Feature(name="feature2")
@app.stream_unary("/routeguide.RouteGuide/RecordRoute")
def record_route(
requests: Iterable[Point], context: grpc.ServicerContext
) -> RouteSummary:
count = len(list(requests))
return RouteSummary(point_count=count)
@app.stream_stream("/routeguide.RouteGuide/RouteChat")
def route_chat(
requests: Iterable[RouteNote], context: grpc.ServicerContext
) -> Iterable[RouteNote]:
for note in requests:
yield note
server = grpc.server(ThreadPoolExecutor())
app.add_handlers(server)
server.add_insecure_port("[::]:50051")
server.start()
server.wait_for_termination()
|
import time
class MemoryMetrics():
def __init__(self, percent, available, total, pc_id):
self._percent = percent
self._available = available
self._total = total
self._created_at = time.strftime('%Y-%m-%d')
self._hour_at = time.strftime('%H:%M:%S')
self._pc_id = pc_id
@property
def percent(self):
return self._percent
@property
def available(self):
return round(self._available * 100 / self._total, 1)
@property
def created_at(self):
return self._created_at
@property
def hour_at(self):
return self._hour_at
@property
def pc_id(self):
return self._pc_id |
from random import seed
from random import random
f = open("List2.txt", "r")
y = f.read().rstrip().split(",")
d = list(map(int, y))
def main(x):
i = 0
while i < len(d):
if (x[i] == 1):
opcode1(x, i)
if (x[i] == 2):
opcode2(x, i)
if (x[i] == 99):
break
i = i + 4
#print(d[0])
def opcode1(d, i):
a = d[i + 3]
b = d[i + 1]
c = d[i + 2]
d[a] = d[b] + d[c]
return
def opcode2(d, i):
a = d[i + 3]
b = d[i + 1]
c = d[i + 2]
d[a] = d[b] * d[c]
return
def write(x, y):
d[x] = y
def findComb(x):
for y in range(99):
for z in range(99):
e = [int(a) for a in d]
e[1] = y
e[2] = z
try:
main(e)
except IndexError:
pass
if (e[0] == x):
print(f"{e[0]} with numbers {y} and {z}. Thus 100 * ")
findComb(19690720)
|
from adder import adder
def multiplier(nb1, nb2):
result = 0
loop_count = 0
while nb2 != 0:
if nb2 % 2 == 1:
tmp_result = nb1 << loop_count
result = adder(result, tmp_result)
loop_count += 1
nb2 >>= 1
return result
|
import sys
from webbpsf_ext.logging_utils import setup_logging as setup_logging_wext
from . import conf
import logging
_log = logging.getLogger('pynrc')
_DISABLE_FILE_LOGGING_VALUE = 'none'
import warnings
warnings.filterwarnings('ignore')
### Helper routines for logging: ###
class FilterLevelRange(object):
def __init__(self, min_level, max_level):
self.min_level = min_level
self.max_level = max_level
def filter(self, record):
if record.levelno >= self.min_level and record.levelno <= self.max_level:
return 1
else:
return 0
def restart_logging(verbose=True):
"""Restart Logging
Restart logging using the same settings as those currently
stored in conf.logging_level. This function was shamelessly
stolen from WebbPSF utils.py.
Parameters
----------
verbose : boolean
Should this function print the new logging targets to
standard output?
"""
level = str(conf.logging_level).upper()
lognames = ['pynrc', 'webbpsf', 'poppy']
root_logger = logging.getLogger()
root_logger.handlers = []
if level in ['DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL']:
level_id = getattr(logging, level) # obtain one of the DEBUG, INFO, WARN,
# or ERROR constants
if verbose:
print(f"pyNRC log messages of level {level} and above will be shown.")
elif level == 'NONE':
root_logger.handlers = [] # n.b. this will clear any handlers other libs/users configured
return
else:
raise ValueError("Invalid logging level: {}".format(level))
for name in lognames:
logger = logging.getLogger(name)
logger.setLevel(level_id)
# set up screen logging
stdout_handler = logging.StreamHandler(stream=sys.stdout)
stdout_handler.addFilter(FilterLevelRange(
min_level=logging.DEBUG,
max_level=logging.INFO
))
stderr_handler = logging.StreamHandler(stream=sys.stderr)
stderr_handler.addFilter(FilterLevelRange(
min_level=logging.WARNING,
max_level=logging.CRITICAL
))
formatter = logging.Formatter(conf.logging_format_screen)
stderr_handler.setFormatter(formatter)
stdout_handler.setFormatter(formatter)
root_logger.addHandler(stdout_handler)
root_logger.addHandler(stderr_handler)
if verbose:
print("pyNRC log outputs will be directed to the screen.")
# set up file logging
filename = conf.logging_filename
if filename is None or filename.strip().lower() != _DISABLE_FILE_LOGGING_VALUE:
hdlr = logging.FileHandler(filename)
formatter = logging.Formatter(conf.logging_format_file)
hdlr.setFormatter(formatter)
root_logger.addHandler(hdlr)
if verbose:
print("pyNRC log outputs will also be saved to file {}".format(filename))
def setup_logging(level='INFO', filename=None, verbose=True):
"""Setup Logging
Allows selection of logging detail and output locations
(screen and/or file). Shamelessly stolen from WebbPSF utils.py.
This is a convenience wrapper to Python's built-in logging package.
By default, this sets up log messages to be written to the screen,
but the user can also request logging to a file.
Editing the WebbPSF config file to set `autoconfigure_logging = True`
(and any of the logging settings you wish to persist) instructs
WebbPSF to apply your settings on import. (This is not
done by default in case you have configured `logging` yourself
and don't wish to overwrite your configuration.)
For more advanced log handling, see the Python logging module's
own documentation.
Parameters
-------------
level : str
Name of log output to show. Defaults to 'INFO', set to 'DEBUG'
for more extensive messages, or to 'WARN' or 'ERROR' for fewer.
filename : str, optional
Filename to write the log output to. If not set, output will
just be displayed on screen. (Default: None)
Examples
-----------
>>> pynrc.setup_logging(filename='pynrc_log.txt')
This will save all log messages to 'pynrc_log.txt' in the current
directory.
>>> pynrc.setup_logging(level='WARN')
This will show only WARNING or ERROR messages on screen, and not
save any logs to files at all (since the filename argument is None)
"""
# implementation note: All this function actually does is apply the
# defaults into the configuration system, then calls restart_logging to
# do the actual work.
level = str(level).upper()
if level=='WARNING':
level = 'WARN'
# The astropy config system will enforce the limited set of values for the logging_level
# parameter by raising a TypeError on this next line if we feed in an invalid string.
conf.logging_level = level
if filename is None:
# Use the string 'none' as a sentinel value for astropy.config
filename = _DISABLE_FILE_LOGGING_VALUE
conf.logging_filename = filename
restart_logging(verbose=verbose)
setup_logging_wext(level=level, filename=filename, verbose=False)
|
# coding: utf-8
"""
Xero Finance API
The Finance API is a collection of endpoints which customers can use in the course of a loan application, which may assist lenders to gain the confidence they need to provide capital. # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class PnlAccountType(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"total": "float", "title": "str", "accounts": "list[PnlAccount]"}
attribute_map = {"total": "total", "title": "title", "accounts": "accounts"}
def __init__(self, total=None, title=None, accounts=None): # noqa: E501
"""PnlAccountType - a model defined in OpenAPI""" # noqa: E501
self._total = None
self._title = None
self._accounts = None
self.discriminator = None
if total is not None:
self.total = total
if title is not None:
self.title = title
if accounts is not None:
self.accounts = accounts
@property
def total(self):
"""Gets the total of this PnlAccountType. # noqa: E501
Total movement on this account type # noqa: E501
:return: The total of this PnlAccountType. # noqa: E501
:rtype: float
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this PnlAccountType.
Total movement on this account type # noqa: E501
:param total: The total of this PnlAccountType. # noqa: E501
:type: float
"""
self._total = total
@property
def title(self):
"""Gets the title of this PnlAccountType. # noqa: E501
Name of this account type, it will be either Trading Income or Other Income for Revenue section / Direct Cost or Operating Expenses for Expense section # noqa: E501
:return: The title of this PnlAccountType. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this PnlAccountType.
Name of this account type, it will be either Trading Income or Other Income for Revenue section / Direct Cost or Operating Expenses for Expense section # noqa: E501
:param title: The title of this PnlAccountType. # noqa: E501
:type: str
"""
self._title = title
@property
def accounts(self):
"""Gets the accounts of this PnlAccountType. # noqa: E501
A list of the movement on each account detail during the query period. Refer to the account detail element below # noqa: E501
:return: The accounts of this PnlAccountType. # noqa: E501
:rtype: list[PnlAccount]
"""
return self._accounts
@accounts.setter
def accounts(self, accounts):
"""Sets the accounts of this PnlAccountType.
A list of the movement on each account detail during the query period. Refer to the account detail element below # noqa: E501
:param accounts: The accounts of this PnlAccountType. # noqa: E501
:type: list[PnlAccount]
"""
self._accounts = accounts
|
# -*- coding: utf-8 -*-
from pysped.xml_sped import *
#from soap_100 import SOAPEnvio, SOAPRetorno, conectar_servico
from pysped.nfe.manual_300 import ESQUEMA_ATUAL
import os
import random
DIRNAME = os.path.dirname(__file__)
class ISSQN(XMLNFe):
def __init__(self):
super(ISSQN, self).__init__()
self.vBC = TagDecimal(nome=u'vBC' , codigo=u'U02', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/imposto/ISSQN')
self.vAliq = TagDecimal(nome=u'vAliq' , codigo=u'U03', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz=u'//det/imposto/ISSQN')
self.vISSQN = TagDecimal(nome=u'vISSQN' , codigo=u'U04', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/imposto/ISSQN')
self.cMunFG = TagInteiro(nome=u'cMunFG' , codigo=u'U05', tamanho=[7, 7, 7], raiz=u'//det/imposto/ISSQN')
self.cListServ = TagInteiro(nome=u'cListServ', codigo=u'U06', tamanho=[3, 4] , raiz=u'//det/imposto/ISSQN')
def get_xml(self):
if not (self.vBC.valor or self.vAliq.valor or self.vISSQN.valor or self.cMunFG.valor or self.cListServ.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<ISSQN>'
xml += self.vBC.xml
xml += self.vAliq.xml
xml += self.vISSQN.xml
xml += self.cMunFG.xml
xml += self.cListServ.xml
xml += u'</ISSQN>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.vAliq.xml = arquivo
self.vISSQN.xml = arquivo
self.cMunFG.xml = arquivo
self.cListServ.xml = arquivo
xml = property(get_xml, set_xml)
class COFINSST(XMLNFe):
def __init__(self):
super(COFINSST, self).__init__()
self.vBC = TagDecimal(nome=u'vBC' , codigo=u'T02', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/imposto/COFINS/COFINSST')
self.pCOFINS = TagDecimal(nome=u'pCOFINS' , codigo=u'T03', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz=u'//det/imposto/COFINS/COFINSST')
self.qBCProd = TagDecimal(nome=u'qBCProd' , codigo=u'T04', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz=u'//det/imposto/COFINS/COFINSST')
self.vAliqProd = TagDecimal(nome=u'vAliqProd', codigo=u'T05', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz=u'//det/imposto/COFINS/COFINSST')
self.vCOFINS = TagDecimal(nome=u'vCOFINS' , codigo=u'T06', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/imposto/COFINS/COFINSST')
def get_xml(self):
if not (self.vBC.valor or self.pCOFINS.valor or self.qBCProd.valor or self.vAliqProd.valor or self.vCOFINS.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<COFINSST>'
if self.qBCProd.valor or self.vAliqProd.valor:
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
else:
xml += self.vBC.xml
xml += self.pCOFINS.xml
xml += self.vCOFINS.xml
xml += u'</COFINSST>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.pCOFINS.xml = arquivo
self.qBCProd.xml = arquivo
self.vAliqProd.xml = arquivo
self.vCOFINS.xml = arquivo
xml = property(get_xml, set_xml)
class TagCSTCOFINS(TagCaracter):
def __init__(self, *args, **kwargs):
super(TagCSTCOFINS, self).__init__(*args, **kwargs)
self.nome = u'CST'
self.codigo = u'S06'
self.tamanho = [2, 2]
self.raiz = u''
self.grupo_cofins = None
def set_valor(self, novo_valor):
super(TagCSTCOFINS, self).set_valor(novo_valor)
if not self.grupo_cofins:
return None
#
# Definimos todas as tags como nรฃo obrigatรณrias
#
self.grupo_cofins.vBC.obrigatorio = False
self.grupo_cofins.pCOFINS.obrigatorio = False
self.grupo_cofins.vCOFINS.obrigatorio = False
self.grupo_cofins.qBCProd.obrigatorio = False
self.grupo_cofins.vAliqProd.obrigatorio = False
#
# Por seguranรงa, zeramos os valores das tags do
# grupo COFINS ao redefinirmos o cรณdigo da situaรงรฃo
# tributรกria
#
self.grupo_cofins.vBC.valor = u'0.00'
self.grupo_cofins.pCOFINS.valor = u'0.00'
self.grupo_cofins.vCOFINS.valor = u'0.00'
self.grupo_cofins.qBCProd.valor = u'0.00'
self.grupo_cofins.vAliqProd.valor = u'0.00'
#
# Para cada cรณdigo de situaรงรฃo tributรกria,
# redefinimos a raiz e a obrigatoriedade das
# tags do grupo de COFINS
#
if self.valor in (u'01', u'02'):
self.grupo_cofins.nome_tag = u'COFINSAliq'
self.grupo_cofins.raiz_tag = u'//det/imposto/COFINS/COFINSAliq'
self.grupo_cofins.vBC.obrigatorio = True
self.grupo_cofins.pCOFINS.obrigatorio = True
self.grupo_cofins.vCOFINS.obrigatorio = True
#self.grupo_cofins.qBCProd.obrigatorio = True
#self.grupo_cofins.vAliqProd.obrigatorio = True
elif self.valor == u'03':
self.grupo_cofins.nome_tag = u'COFINSQtde'
self.grupo_cofins.raiz_tag = u'//det/imposto/COFINS/COFINSQtde'
#self.grupo_cofins.vBC.obrigatorio = True
#self.grupo_cofins.pCOFINS.obrigatorio = True
self.grupo_cofins.vCOFINS.obrigatorio = True
self.grupo_cofins.qBCProd.obrigatorio = True
self.grupo_cofins.vAliqProd.obrigatorio = True
elif self.valor in (u'04', u'06', u'07', u'08', u'09'):
self.grupo_cofins.nome_tag = u'COFINSNT'
self.grupo_cofins.raiz_tag = u'//det/imposto/COFINS/COFINSNT'
#self.grupo_cofins.vBC.obrigatorio = True
#self.grupo_cofins.pCOFINS.obrigatorio = True
#self.grupo_cofins.vCOFINS.obrigatorio = True
#self.grupo_cofins.qBCProd.obrigatorio = True
#self.grupo_cofins.vAliqProd.obrigatorio = True
else:
self.grupo_cofins.nome_tag = u'COFINSOutr'
self.grupo_cofins.raiz_tag = u'//det/imposto/COFINS/COFINSOutr'
self.grupo_cofins.vBC.obrigatorio = True
self.grupo_cofins.pCOFINS.obrigatorio = True
self.grupo_cofins.vCOFINS.obrigatorio = True
self.grupo_cofins.qBCProd.obrigatorio = True
self.grupo_cofins.vAliqProd.obrigatorio = True
#
# Redefine a raiz para todas as tags do grupo COFINS
#
self.grupo_cofins.CST.raiz = self.grupo_cofins.raiz_tag
self.grupo_cofins.vBC.raiz = self.grupo_cofins.raiz_tag
self.grupo_cofins.pCOFINS.raiz = self.grupo_cofins.raiz_tag
self.grupo_cofins.vCOFINS.raiz = self.grupo_cofins.raiz_tag
self.grupo_cofins.qBCProd.raiz = self.grupo_cofins.raiz_tag
self.grupo_cofins.vAliqProd.raiz = self.grupo_cofins.raiz_tag
def get_valor(self):
return self._valor_string
valor = property(get_valor, set_valor)
class COFINS(XMLNFe):
def __init__(self):
super(COFINS, self).__init__()
self.nome_tag = u'COFINSAliq'
self.raiz_tag = u'//det/imposto/COFINS/COFINSAliq'
self.vBC = TagDecimal(nome=u'vBC' , codigo=u'S07', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'')
self.pCOFINS = TagDecimal(nome=u'pCOFINS' , codigo=u'S08', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz=u'')
self.vCOFINS = TagDecimal(nome=u'vCOFINS' , codigo=u'S11', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'')
self.qBCProd = TagDecimal(nome=u'qBCProd' , codigo=u'S09', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz=u'')
self.vAliqProd = TagDecimal(nome=u'vAliqProd', codigo=u'S10', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz=u'')
self.CST = TagCSTCOFINS()
self.CST.grupo_cofins = self
self.CST.valor = u'07'
def get_xml(self):
#
# Define as tags baseado no cรณdigo da situaรงรฃo tributรกria
#
xml = XMLNFe.get_xml(self)
xml += u'<COFINS>'
xml += '<' + self.nome_tag + u'>'
xml += self.CST.xml
if self.CST.valor in (u'01', u'02'):
xml += self.vBC.xml
xml += self.pCOFINS.xml
xml += self.vCOFINS.xml
elif self.CST.valor == u'03':
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
xml += self.vCOFINS.xml
elif self.CST.valor in (u'04', u'06', u'07', u'08', u'09'):
pass
else:
if self.qBCProd.valor or self.vAliqProd.valor:
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
else:
xml += self.vBC.xml
xml += self.pCOFINS.xml
xml += self.vCOFINS.xml
xml += u'</' + self.nome_tag + u'></COFINS>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
#
# Para ler corretamente o COFINS, primeiro temos que descobrir em
# qual grupo de situaรงรฃo tributรกria ele estรก
#
if self._le_noh(u'//det/imposto/COFINS/COFINSAliq') is not None:
self.CST.valor = u'01'
elif self._le_noh(u'//det/imposto/COFINS/COFINSQtde') is not None:
self.CST.valor = u'03'
elif self._le_noh(u'//det/imposto/COFINS/COFINSNT') is not None:
self.CST.valor = u'04'
else:
self.CST.valor = u'99'
#
# Agora podemos ler os valores tranquilamente...
#
self.CST.xml = arquivo
self.vBC.xml = arquivo
self.pCOFINS.xml = arquivo
self.vCOFINS.xml = arquivo
self.qBCProd.xml = arquivo
self.vAliqProd.xml = arquivo
xml = property(get_xml, set_xml)
class PISST(XMLNFe):
def __init__(self):
super(PISST, self).__init__()
self.vBC = TagDecimal(nome=u'vBC' , codigo=u'R02', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/imposto/PIS/PISST')
self.pPIS = TagDecimal(nome=u'pPIS' , codigo=u'R03', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz=u'//det/imposto/PIS/PISST')
self.qBCProd = TagDecimal(nome=u'qBCProd' , codigo=u'R04', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz=u'//det/imposto/PIS/PISST')
self.vAliqProd = TagDecimal(nome=u'vAliqProd', codigo=u'R05', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz=u'//det/imposto/PIS/PISST')
self.vPIS = TagDecimal(nome=u'vPIS' , codigo=u'R06', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/imposto/PIS/PISST')
def get_xml(self):
if not (self.vBC.valor or self.pPIS.valor or self.qBCProd.valor or self.vAliqProd.valor or self.vPIS.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<PISST>'
if self.qBCProd.valor or self.vAliqProd.valor:
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
else:
xml += self.vBC.xml
xml += self.pPIS.xml
xml += self.vPIS.xml
xml += u'</PISST>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.pPIS.xml = arquivo
self.qBCProd.xml = arquivo
self.vAliqProd.xml = arquivo
self.vPIS.xml = arquivo
xml = property(get_xml, set_xml)
class TagCSTPIS(TagCaracter):
def __init__(self, *args, **kwargs):
super(TagCSTPIS, self).__init__(*args, **kwargs)
self.nome = u'CST'
self.codigo = u'Q06'
self.tamanho = [2, 2]
self.raiz = u''
self.grupo_pis = None
def set_valor(self, novo_valor):
super(TagCSTPIS, self).set_valor(novo_valor)
if not self.grupo_pis:
return None
#
# Definimos todas as tags como nรฃo obrigatรณrias
#
self.grupo_pis.vBC.obrigatorio = False
self.grupo_pis.pPIS.obrigatorio = False
self.grupo_pis.vPIS.obrigatorio = False
self.grupo_pis.qBCProd.obrigatorio = False
self.grupo_pis.vAliqProd.obrigatorio = False
#
# Por seguranรงa, zeramos os valores das tags do
# grupo PIS ao redefinirmos o cรณdigo da situaรงรฃo
# tributรกria
#
self.grupo_pis.vBC.valor = u'0.00'
self.grupo_pis.pPIS.valor = u'0.00'
self.grupo_pis.vPIS.valor = u'0.00'
self.grupo_pis.qBCProd.valor = u'0.00'
self.grupo_pis.vAliqProd.valor = u'0.00'
#
# Para cada cรณdigo de situaรงรฃo tributรกria,
# redefinimos a raiz e a obrigatoriedade das
# tags do grupo de PIS
#
if self.valor in (u'01', u'02'):
self.grupo_pis.nome_tag = u'PISAliq'
self.grupo_pis.raiz_tag = u'//det/imposto/PIS/PISAliq'
self.grupo_pis.vBC.obrigatorio = True
self.grupo_pis.pPIS.obrigatorio = True
self.grupo_pis.vPIS.obrigatorio = True
#self.grupo_pis.qBCProd.obrigatorio = True
#self.grupo_pis.vAliqProd.obrigatorio = True
elif self.valor == u'03':
self.grupo_pis.nome_tag = u'PISQtde'
self.grupo_pis.raiz_tag = u'//det/imposto/PIS/PISQtde'
#self.grupo_pis.vBC.obrigatorio = True
#self.grupo_pis.pPIS.obrigatorio = True
self.grupo_pis.vPIS.obrigatorio = True
self.grupo_pis.qBCProd.obrigatorio = True
self.grupo_pis.vAliqProd.obrigatorio = True
elif self.valor in (u'04', u'06', u'07', u'08', u'09'):
self.grupo_pis.nome_tag = u'PISNT'
self.grupo_pis.raiz_tag = u'//det/imposto/PIS/PISNT'
#self.grupo_pis.vBC.obrigatorio = True
#self.grupo_pis.pPIS.obrigatorio = True
#self.grupo_pis.vPIS.obrigatorio = True
#self.grupo_pis.qBCProd.obrigatorio = True
#self.grupo_pis.vAliqProd.obrigatorio = True
else:
self.grupo_pis.nome_tag = u'PISOutr'
self.grupo_pis.raiz_tag = u'//det/imposto/PIS/PISOutr'
self.grupo_pis.vBC.obrigatorio = True
self.grupo_pis.pPIS.obrigatorio = True
self.grupo_pis.vPIS.obrigatorio = True
self.grupo_pis.qBCProd.obrigatorio = True
self.grupo_pis.vAliqProd.obrigatorio = True
#
# Redefine a raiz para todas as tags do grupo PIS
#
self.grupo_pis.CST.raiz = self.grupo_pis.raiz_tag
self.grupo_pis.vBC.raiz = self.grupo_pis.raiz_tag
self.grupo_pis.pPIS.raiz = self.grupo_pis.raiz_tag
self.grupo_pis.vPIS.raiz = self.grupo_pis.raiz_tag
self.grupo_pis.qBCProd.raiz = self.grupo_pis.raiz_tag
self.grupo_pis.vAliqProd.raiz = self.grupo_pis.raiz_tag
def get_valor(self):
return self._valor_string
valor = property(get_valor, set_valor)
class PIS(XMLNFe):
def __init__(self):
super(PIS, self).__init__()
self.nome_tag = u'PISAliq'
self.raiz_tag = u'//det/imposto/PIS/PISAliq'
self.vBC = TagDecimal(nome=u'vBC' , codigo=u'Q07', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'')
self.pPIS = TagDecimal(nome=u'pPIS' , codigo=u'Q08', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz=u'')
self.vPIS = TagDecimal(nome=u'vPIS' , codigo=u'Q09', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'')
self.qBCProd = TagDecimal(nome=u'qBCProd' , codigo=u'Q10', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz=u'')
self.vAliqProd = TagDecimal(nome=u'vAliqProd', codigo=u'Q11', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz=u'')
self.CST = TagCSTPIS()
self.CST.grupo_pis = self
self.CST.valor = u'07'
def get_xml(self):
#
# Define as tags baseado no cรณdigo da situaรงรฃo tributรกria
#
xml = XMLNFe.get_xml(self)
xml += u'<PIS>'
xml += '<' + self.nome_tag + u'>'
xml += self.CST.xml
if self.CST.valor in (u'01', u'02'):
xml += self.vBC.xml
xml += self.pPIS.xml
xml += self.vPIS.xml
elif self.CST.valor == u'03':
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
xml += self.vPIS.xml
elif self.CST.valor in (u'04', u'06', u'07', u'08', u'09'):
pass
else:
if self.qBCProd.valor or self.vAliqProd.valor:
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
else:
xml += self.vBC.xml
xml += self.pPIS.xml
xml += self.vPIS.xml
xml += u'</' + self.nome_tag + u'></PIS>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
#
# Para ler corretamente o PIS, primeiro temos que descobrir em
# qual grupo de situaรงรฃo tributรกria ele estรก
#
if self._le_noh(u'//det/imposto/PIS/PISAliq') is not None:
self.CST.valor = u'01'
elif self._le_noh(u'//det/imposto/PIS/PISQtde') is not None:
self.CST.valor = u'03'
elif self._le_noh(u'//det/imposto/PIS/PISNT') is not None:
self.CST.valor = u'04'
else:
self.CST.valor = u'99'
#
# Agora podemos ler os valores tranquilamente...
#
self.CST.xml = arquivo
self.vBC.xml = arquivo
self.pPIS.xml = arquivo
self.vPIS.xml = arquivo
self.qBCProd.xml = arquivo
self.vAliqProd.xml = arquivo
xml = property(get_xml, set_xml)
class II(XMLNFe):
def __init__(self):
super(II, self).__init__()
self.vBC = TagDecimal(nome=u'vBC' , codigo=u'P02', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/imposto/II')
self.vDespAdu = TagDecimal(nome=u'vDespAdu', codigo=u'P03', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/imposto/II')
self.vII = TagDecimal(nome=u'vII' , codigo=u'P04', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/imposto/II')
self.vIOF = TagDecimal(nome=u'vIOF' , codigo=u'P05', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/imposto/II')
def get_xml(self):
if not (self.vBC.valor or self.vDespAdu.valor or self.vII.valor or self.vIOF.valor):
return u''
#
# Define as tags baseado no cรณdigo da situaรงรฃo tributรกria
#
xml = XMLNFe.get_xml(self)
xml += u'<II>'
xml += self.vBC.xml
xml += self.vDespAdu.xml
xml += self.vII.xml
xml += self.vIOF.xml
xml += u'</II>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.vDespAdu.xml = arquivo
self.vII.xml = arquivo
self.vIOF.xml = arquivo
xml = property(get_xml, set_xml)
class TagCSTIPI(TagCaracter):
def __init__(self, *args, **kwargs):
super(TagCSTIPI, self).__init__(*args, **kwargs)
self.nome = u'CST'
self.codigo = u'O09'
self.tamanho = [2, 2]
self.raiz = u''
self.grupo_ipi = None
def set_valor(self, novo_valor):
super(TagCSTIPI, self).set_valor(novo_valor)
if not self.grupo_ipi:
return None
#
# Definimos todas as tags como nรฃo obrigatรณrias
#
self.grupo_ipi.vBC.obrigatorio = False
self.grupo_ipi.qUnid.obrigatorio = False
self.grupo_ipi.vUnid.obrigatorio = False
self.grupo_ipi.pIPI.obrigatorio = False
self.grupo_ipi.vIPI.obrigatorio = False
#
# Por seguranรงa, zeramos os valores das tags do
# grupo IPI ao redefinirmos o cรณdigo da situaรงรฃo
# tributรกria
#
self.grupo_ipi.vBC.valor = u'0.00'
self.grupo_ipi.qUnid.valor = u'0.00'
self.grupo_ipi.vUnid.valor = u'0.00'
self.grupo_ipi.pIPI.valor = u'0.00'
self.grupo_ipi.vIPI.valor = u'0.00'
#
# Para cada cรณdigo de situaรงรฃo tributรกria,
# redefinimos a raiz e a obrigatoriedade das
# tags do grupo de IPI
#
if self.valor in (u'00', u'49', u'50', u'99'):
self.grupo_ipi.nome_tag = u'IPITrib'
self.grupo_ipi.raiz_tag = u'//det/imposto/IPI/IPITrib'
self.grupo_ipi.vBC.obrigatorio = True
self.grupo_ipi.qUnid.obrigatorio = True
self.grupo_ipi.vUnid.obrigatorio = True
self.grupo_ipi.pIPI.obrigatorio = True
self.grupo_ipi.vIPI.obrigatorio = True
else:
self.grupo_ipi.nome_tag = u'IPINT'
self.grupo_ipi.raiz_tag = u'//det/imposto/IPI/IPINT'
#
# Redefine a raiz para todas as tags do grupo IPI
#
self.grupo_ipi.CST.raiz = self.grupo_ipi.raiz_tag
self.grupo_ipi.vBC.raiz = self.grupo_ipi.raiz_tag
self.grupo_ipi.qUnid.raiz = self.grupo_ipi.raiz_tag
self.grupo_ipi.vUnid.raiz = self.grupo_ipi.raiz_tag
self.grupo_ipi.pIPI.raiz = self.grupo_ipi.raiz_tag
self.grupo_ipi.vIPI.raiz = self.grupo_ipi.raiz_tag
def get_valor(self):
return self._valor_string
valor = property(get_valor, set_valor)
class IPI(XMLNFe):
def __init__(self):
super(IPI, self).__init__()
self.nome_tag = u'IPITrib'
self.raiz_tag = u'//det/imposto/IPI/IPITrib'
self.clEnq = TagCaracter(nome=u'clEnq' , codigo=u'O02', tamanho=[ 5, 5], raiz=u'//det/imposto/IPI', obrigatorio=False)
self.CNPJProd = TagCaracter(nome=u'CNPJProd', codigo=u'O03', tamanho=[14, 14], raiz=u'//det/imposto/IPI', obrigatorio=False)
self.cSelo = TagCaracter(nome=u'cSelo' , codigo=u'O04', tamanho=[ 1, 60], raiz=u'//det/imposto/IPI', obrigatorio=False)
self.qSelo = TagInteiro(nome=u'qSelo' , codigo=u'O05', tamanho=[ 1, 12], raiz=u'//det/imposto/IPI', obrigatorio=False)
self.cEnq = TagCaracter(nome=u'cEnq' , codigo=u'O06', tamanho=[ 3, 3], raiz=u'//det/imposto/IPI', valor=u'999')
self.vBC = TagDecimal(nome=u'vBC' , codigo=u'O10', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'')
self.qUnid = TagDecimal(nome=u'qUnid' , codigo=u'O11', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz=u'')
self.vUnid = TagDecimal(nome=u'vUnid' , codigo=u'O12', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz=u'')
self.pIPI = TagDecimal(nome=u'pIPI' , codigo=u'O13', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz=u'')
self.vIPI = TagDecimal(nome=u'vIPI' , codigo=u'O13', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'')
self.CST = TagCSTIPI()
self.CST.grupo_ipi = self
self.CST.valor = u'52'
def get_xml(self):
if not ((self.CST.valor in (u'00', u'49', u'50', u'99')) or
(self.qUnid.valor or self.vUnid.valor or self.vBC.valor or self.pIPI.valor or self.vIPI.valor)):
return u''
#
# Define as tags baseado no cรณdigo da situaรงรฃo tributรกria
#
xml = XMLNFe.get_xml(self)
xml += u'<IPI>'
xml += self.clEnq.xml
xml += self.CNPJProd.xml
xml += self.cSelo.xml
xml += self.qSelo.xml
xml += self.cEnq.xml
xml += '<' + self.nome_tag + u'>'
xml += self.CST.xml
if self.CST.valor in (u'00', u'49', u'50', u'99'):
if self.qUnid.valor or self.vUnid.valor:
xml += self.qUnid.xml
xml += self.vUnid.xml
else:
xml += self.vBC.xml
xml += self.pIPI.xml
xml += self.vIPI.xml
xml += u'</' + self.nome_tag + u'></IPI>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
#
# Para ler corretamente o IPI, primeiro temos que descobrir em
# qual grupo de situaรงรฃo tributรกria ele estรก
#
if self._le_noh(u'//det/imposto/IPI/IPINT') is not None:
self.CST.valor = u'01'
else:
self.CST.valor = u'00'
#
# Agora podemos ler os valores tranquilamente...
#
self.CST.xml = arquivo
self.clEnq.xml = arquivo
self.CNPJProd.xml = arquivo
self.cSelo.xml = arquivo
self.qSelo.xml = arquivo
self.cEnq.xml = arquivo
self.vBC.xml = arquivo
self.qUnid.xml = arquivo
self.vUnid.xml = arquivo
self.pIPI.xml = arquivo
self.vIPI.xml = arquivo
xml = property(get_xml, set_xml)
class TagCSTICMS(TagCaracter):
def __init__(self, *args, **kwargs):
super(TagCSTICMS, self).__init__(*args, **kwargs)
self.nome = u'CST'
self.codigo = u'N12'
self.tamanho = [2, 2]
self.raiz = u''
self.grupo_icms = None
def set_valor(self, novo_valor):
super(TagCSTICMS, self).set_valor(novo_valor)
if not self.grupo_icms:
return None
#
# Definimos todas as tags como nรฃo obrigatรณrias
#
self.grupo_icms.modBC.obrigatorio = False
self.grupo_icms.vBC.obrigatorio = False
self.grupo_icms.pRedBC.obrigatorio = False
self.grupo_icms.pICMS.obrigatorio = False
self.grupo_icms.vICMS.obrigatorio = False
self.grupo_icms.modBCST.obrigatorio = False
self.grupo_icms.pMVAST.obrigatorio = False
self.grupo_icms.pRedBCST.obrigatorio = False
self.grupo_icms.vBCST.obrigatorio = False
self.grupo_icms.pICMSST.obrigatorio = False
self.grupo_icms.vICMSST.obrigatorio = False
#
# Por seguranรงa, zeramos os valores das tags do
# grupo ICMS ao redefinirmos o cรณdigo da situaรงรฃo
# tributรกria
#
self.grupo_icms.modBC.valor = 3
self.grupo_icms.vBC.valor = u'0.00'
self.grupo_icms.pRedBC.valor = u'0.00'
self.grupo_icms.pICMS.valor = u'0.00'
self.grupo_icms.vICMS.valor = u'0.00'
self.grupo_icms.modBCST.valor = 4
self.grupo_icms.pMVAST.valor = u'0.00'
self.grupo_icms.pRedBCST.valor = u'0.00'
self.grupo_icms.vBCST.valor = u'0.00'
self.grupo_icms.pICMSST.valor = u'0.00'
self.grupo_icms.vICMSST.valor = u'0.00'
#
# Para cada cรณdigo de situaรงรฃo tributรกria,
# redefinimos a raiz e a obrigatoriedade das
# tags do grupo de ICMS
#
if self.valor == u'00':
self.grupo_icms.nome_tag = u'ICMS00'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMS00'
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
elif self.valor == u'10':
self.grupo_icms.nome_tag = u'ICMS10'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMS10'
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
self.grupo_icms.modBCST.obrigatorio = True
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.pICMSST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
elif self.valor == u'20':
self.grupo_icms.nome_tag = u'ICMS20'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMS20'
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pRedBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
elif self.valor == u'30':
self.grupo_icms.nome_tag = u'ICMS30'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMS30'
self.grupo_icms.modBCST.obrigatorio = True
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.pICMSST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
elif self.valor in (u'40', u'41', u'50'):
self.grupo_icms.nome_tag = u'ICMS40'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMS40'
elif self.valor == u'51':
self.grupo_icms.nome_tag = u'ICMS51'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMS51'
elif self.valor == u'60':
self.grupo_icms.nome_tag = u'ICMS60'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMS60'
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
elif self.valor == u'70':
self.grupo_icms.nome_tag = u'ICMS70'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMS70'
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pRedBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
self.grupo_icms.modBCST.obrigatorio = True
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.pICMSST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
elif self.valor == u'90':
self.grupo_icms.nome_tag = u'ICMS90'
self.grupo_icms.raiz_tag = u'//det/imposto/ICMS/ICMS90'
self.grupo_icms.modBC.obrigatorio = True
self.grupo_icms.vBC.obrigatorio = True
self.grupo_icms.pICMS.obrigatorio = True
self.grupo_icms.vICMS.obrigatorio = True
self.grupo_icms.modBCST.obrigatorio = True
self.grupo_icms.vBCST.obrigatorio = True
self.grupo_icms.pICMSST.obrigatorio = True
self.grupo_icms.vICMSST.obrigatorio = True
#
# Redefine a raiz para todas as tags do grupo ICMS
#
self.grupo_icms.orig.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.CST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.modBC.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vBC.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pRedBC.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pICMS.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vICMS.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.modBCST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pMVAST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pRedBCST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vBCST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.pICMSST.raiz = self.grupo_icms.raiz_tag
self.grupo_icms.vICMSST.raiz = self.grupo_icms.raiz_tag
def get_valor(self):
return self._valor_string
valor = property(get_valor, set_valor)
class ICMS(XMLNFe):
def __init__(self):
super(ICMS, self).__init__()
self.nome_tag = u'ICMS00'
self.raiz_tag = u'//det/imposto/ICMS/ICMS00'
self.orig = TagInteiro(nome=u'orig' , codigo=u'N11', tamanho=[1, 1, 1], raiz=u'')
# codigo=u'N12' รฉ o campo CST
self.modBC = TagInteiro(nome=u'modBC' , codigo=u'N13', tamanho=[1, 1, 1], raiz=u'')
self.pRedBC = TagDecimal(nome=u'pRedBC' , codigo=u'N14', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz=u'')
self.vBC = TagDecimal(nome=u'vBC' , codigo=u'N15', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'')
self.pICMS = TagDecimal(nome=u'pICMS' , codigo=u'N16', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz=u'')
self.vICMS = TagDecimal(nome=u'vICMS' , codigo=u'N17', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'')
self.modBCST = TagInteiro(nome=u'modBCST' , codigo=u'N18', tamanho=[1, 1, 1], raiz=u'')
self.pMVAST = TagDecimal(nome=u'pMVAST' , codigo=u'N19', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz=u'')
self.pRedBCST = TagDecimal(nome=u'pRedBCST', codigo=u'N20', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz=u'')
self.vBCST = TagDecimal(nome=u'vBCST' , codigo=u'N21', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'')
self.pICMSST = TagDecimal(nome=u'pICMSST' , codigo=u'N22', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz=u'')
self.vICMSST = TagDecimal(nome=u'vICMSST' , codigo=u'N23', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'')
self.CST = TagCSTICMS()
self.CST.grupo_icms = self
self.CST.valor = u'40'
def get_xml(self):
#
# Define as tags baseado no cรณdigo da situaรงรฃo tributรกria
#
xml = XMLNFe.get_xml(self)
xml += u'<ICMS><' + self.nome_tag + u'>'
xml += self.orig.xml
xml += self.CST.xml
if self.CST.valor == u'00':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
elif self.CST.valor == u'10':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
xml += self.modBCST.xml
# Somente quando for marge de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
elif self.CST.valor == u'20':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pRedBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
elif self.CST.valor == u'30':
xml += self.modBCST.xml
# Somente quando for marge de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
elif self.CST.valor in (u'40', u'41', u'50'):
pass
elif self.CST.valor == u'51':
xml += self.modBC.xml
xml += self.pRedBC.xml
xml += self.vBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
elif self.CST.valor == u'60':
xml += self.vBCST.xml
xml += self.vICMSST.xml
elif self.CST.valor == u'70':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pRedBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
xml += self.modBCST.xml
# Somente quando for marge de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
elif self.CST.valor == u'90':
xml += self.modBC.xml
xml += self.vBC.xml
xml += self.pRedBC.xml
xml += self.pICMS.xml
xml += self.vICMS.xml
xml += self.modBCST.xml
# Somente quando for marge de valor agregado
if self.modBCST.valor == 4:
xml += self.pMVAST.xml
xml += self.pRedBCST.xml
xml += self.vBCST.xml
xml += self.pICMSST.xml
xml += self.vICMSST.xml
xml += u'</' + self.nome_tag + u'></ICMS>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
#
# Para ler corretamente o ICMS, primeiro temos que descobrir em
# qual grupo de situaรงรฃo tributรกria ele estรก
#
if self._le_noh(u'//det/imposto/ICMS/ICMS00') is not None:
self.CST.valor = u'00'
elif self._le_noh(u'//det/imposto/ICMS/ICMS10') is not None:
self.CST.valor = u'10'
elif self._le_noh(u'//det/imposto/ICMS/ICMS20') is not None:
self.CST.valor = u'20'
elif self._le_noh(u'//det/imposto/ICMS/ICMS30') is not None:
self.CST.valor = u'30'
elif self._le_noh(u'//det/imposto/ICMS/ICMS40') is not None:
self.CST.valor = u'40'
elif self._le_noh(u'//det/imposto/ICMS/ICMS51') is not None:
self.CST.valor = u'51'
elif self._le_noh(u'//det/imposto/ICMS/ICMS60') is not None:
self.CST.valor = u'60'
elif self._le_noh(u'//det/imposto/ICMS/ICMS70') is not None:
self.CST.valor = u'70'
elif self._le_noh(u'//det/imposto/ICMS/ICMS90') is not None:
self.CST.valor = u'90'
#
# Agora podemos ler os valores tranquilamente...
#
self.orig.xml = arquivo
self.CST.xml = arquivo
self.modBC.xml = arquivo
self.vBC.xml = arquivo
self.pRedBC.xml = arquivo
self.pICMS.xml = arquivo
self.vICMS.xml = arquivo
self.modBCST.xml = arquivo
self.pMVAST.xml = arquivo
self.pRedBCST.xml = arquivo
self.vBCST.xml = arquivo
self.pICMSST.xml = arquivo
self.vICMSST.xml = arquivo
xml = property(get_xml, set_xml)
class Imposto(XMLNFe):
def __init__(self):
super(Imposto, self).__init__()
self.ICMS = ICMS()
self.IPI = IPI()
self.II = II()
self.PIS = PIS()
self.PISST = PISST()
self.COFINS = COFINS()
self.COFINSST = COFINSST()
self.ISSQN = ISSQN()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<imposto>'
xml += self.ICMS.xml
xml += self.IPI.xml
xml += self.II.xml
xml += self.PIS.xml
xml += self.PISST.xml
xml += self.COFINS.xml
xml += self.COFINSST.xml
xml += self.ISSQN.xml
xml += u'</imposto>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.ICMS.xml = arquivo
self.IPI.xml = arquivo
self.II.xml = arquivo
self.PIS.xml = arquivo
self.PISST.xml = arquivo
self.COFINS.xml = arquivo
self.COFINSST.xml = arquivo
self.ISSQN.xml = arquivo
xml = property(get_xml, set_xml)
class ICMSCons(XMLNFe):
def __init__(self):
super(ICMSCons, self).__init__()
self.vBCICMSSTCons = TagDecimal(nome=u'vBCICMSSTCons', codigo=u'L118', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod/comb/ICMSCons')
self.vICMSSTCons = TagDecimal(nome=u'vICMSSTCons' , codigo=u'L119', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod/comb/ICMSCons')
self.UFcons = TagCaracter(nome=u'UFcons' , codigo=u'L120', tamanho=[2, 2], raiz=u'//det/prod/comb/ICMSCons')
def get_xml(self):
if not (self.vBCICMSSTCons.valor or self.vICMSSTCons.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<ICMSCons>'
xml += self.vBCICMSSTCons.xml
xml += self.vICMSSTCons.xml
xml += self.UFcons.xml
xml += u'</ICMSCons>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBCICMSSTCons.xml = arquivo
self.vICMSSTCons.xml = arquivo
self.UFcons.xml = arquivo
xml = property(get_xml, set_xml)
class ICMSInter(XMLNFe):
def __init__(self):
super(ICMSInter, self).__init__()
self.vBCICMSSTDest = TagDecimal(nome=u'vBCICMSSTDest', codigo=u'L115', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod/comb/ICMSInter')
self.vICMSSTDest = TagDecimal(nome=u'vICMSSTDest' , codigo=u'L116', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod/comb/ICMSInter')
def get_xml(self):
if not (self.vBCICMSSTDest.valor or self.vICMSSTDest.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<ICMSInter>'
xml += self.vBCICMSSTDest.xml
xml += self.vICMSSTDest.xml
xml += u'</ICMSInter>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBCICMSSTDest.xml = arquivo
self.vICMSSTDest.xml = arquivo
xml = property(get_xml, set_xml)
class ICMSComb(XMLNFe):
def __init__(self):
super(ICMSComb, self).__init__()
self.vBCICMS = TagDecimal(nome=u'vBCICMS' , codigo=u'L110', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod/comb/ICMSComb')
self.vICMS = TagDecimal(nome=u'vICMS' , codigo=u'L111', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod/comb/ICMSComb')
self.vBCICMSST = TagDecimal(nome=u'vBCICMSST', codigo=u'L112', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod/comb/ICMSComb')
self.vICMSST = TagDecimal(nome=u'vICMSST' , codigo=u'L113', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod/comb/ICMSComb')
def get_xml(self):
if not (self.vBCICMS.valor or self.vICMS.valor or self.vBCICMSST.valor or self.vICMSST.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<ICMSComb>'
xml += self.vBCICMS.xml
xml += self.vICMS.xml
xml += self.vBCICMSST.xml
xml += self.vICMSST.xml
xml += u'</ICMSComb>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBCICMS.xml = arquivo
self.vICMS.xml = arquivo
self.vBCICMSST.xml = arquivo
self.vICMSST.xml = arquivo
xml = property(get_xml, set_xml)
class CIDE(XMLNFe):
def __init__(self):
super(CIDE, self).__init__()
self.qBCProd = TagDecimal(nome=u'qBCProd' , codigo=u'L106', tamanho=[1, 16] , decimais=[0, 4, 4], raiz=u'//det/prod/comb/CIDE')
self.vAliqProd = TagDecimal(nome=u'vAliqProd', codigo=u'L107', tamanho=[1, 15] , decimais=[0, 4, 4], raiz=u'//det/prod/comb/CIDE')
self.vCIDE = TagDecimal(nome=u'vCIDE' , codigo=u'L108', tamanho=[1, 15] , decimais=[0, 2, 2], raiz=u'//det/prod/comb/CIDE')
def get_xml(self):
if not (self.qBCProd.valor or self.vAliqProd.valor or self.vCIDE.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<CIDE>'
xml += self.qBCProd.xml
xml += self.vAliqProd.xml
xml += self.vCIDE.xml
xml += u'</CIDE>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.qBCProd.xml = arquivo
self.vAliqProd.xml = arquivo
self.vCIDE.xml = arquivo
xml = property(get_xml, set_xml)
class Comb(XMLNFe):
def __init__(self):
super(Comb, self).__init__()
self.cProdANP = TagInteiro(nome=u'cProdANP', codigo=u'L102', tamanho=[9, 9, 9], raiz=u'//det/prod/comb')
self.CODIF = TagInteiro(nome=u'CODIF' , codigo=u'L103', tamanho=[0, 21] , raiz=u'//det/prod/comb', obrigatorio=False)
self.qTemp = TagDecimal(nome=u'qTemp' , codigo=u'L104', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz=u'//det/prod/comb', obrigatorio=False)
self.CIDE = CIDE()
self.ICMSComb = ICMSComb()
self.ICMSInter = ICMSInter()
self.ICMSCons = ICMSCons()
def get_xml(self):
if not self.cProdANP.valor:
return u''
xml = XMLNFe.get_xml(self)
xml += u'<comb>'
xml += self.cProdANP.xml
xml += self.CODIF.xml
xml += self.qTemp.xml
xml += self.CIDE.xml
xml += self.ICMSComb.xml
xml += self.ICMSInter.xml
xml += self.ICMSCons.xml
xml += u'</comb>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cProdANP.xml = arquivo
self.CODIF.xml = arquivo
self.qTemp.xml = arquivo
self.CIDE.xml = arquivo
self.ICMSComb.xml = arquivo
self.ICMSInter.xml = arquivo
self.ICMSCons.xml = arquivo
xml = property(get_xml, set_xml)
class Arma(XMLNFe):
def __init__(self):
super(Arma, self).__init__()
self.tpArma = TagInteiro(nome=u'tpArma', codigo=u'L02', tamanho=[1, 1], raiz=u'//arma')
self.nSerie = TagInteiro(nome=u'nSerie', codigo=u'L03', tamanho=[1, 9], raiz=u'//arma')
self.nCano = TagInteiro(nome=u'nCano', codigo=u'L04', tamanho=[1, 9], raiz=u'//arma')
self.descr = TagCaracter(nome=u'descr', codigo=u'L05', tamanho=[1, 256], raiz=u'//arma')
def get_xml(self):
if not self.nSerie:
return u''
xml = XMLNFe.get_xml(self)
xml += u'<arma>'
xml += self.tpArma.xml
xml += self.nSerie.xml
xml += self.nCano.xml
xml += self.descr.xml
xml += u'</arma>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.tpArma.xml = arquivo
self.nSerie.xml = arquivo
self.nCano.xml = arquivo
self.descr.xml = arquivo
xml = property(get_xml, set_xml)
class Med(XMLNFe):
def __init__(self):
super(Med, self).__init__()
self.nLote = TagCaracter(nome=u'nLote', codigo=u'K02', tamanho=[1, 20] , raiz=u'//med')
self.qLote = TagDecimal(nome=u'qLote' , codigo=u'K03', tamanho=[1, 11], decimais=[0, 3, 3], raiz=u'//med')
self.dFab = TagData(nome=u'dFab' , codigo=u'K04' , raiz=u'//med')
self.dVal = TagData(nome=u'dVal' , codigo=u'K05' , raiz=u'//med')
self.vPMC = TagDecimal(nome=u'vPMC' , codigo=u'K06', tamanho=[1, 15], decimais=[0, 2, 2], raiz=u'//med')
def get_xml(self):
if not self.nLote.valor:
return u''
xml = XMLNFe.get_xml(self)
xml += u'<med>'
xml += self.nLote.xml
xml += self.qLote.xml
xml += self.dFab.xml
xml += self.dVal.xml
xml += self.vPMC.xml
xml += u'</med>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nLote.xml = arquivo
self.qLote.xml = arquivo
self.dFab.xml = arquivo
self.dVal.xml = arquivo
self.vPMC.xml = arquivo
xml = property(get_xml, set_xml)
class VeicProd(XMLNFe):
def __init__(self):
super(VeicProd, self).__init__()
self.tpOp = TagInteiro(nome=u'tpOp' , codigo=u'J02', tamanho=[ 1, 1, 1], raiz=u'//det/prod/veicProd')
self.chassi = TagCaracter(nome=u'chassi' , codigo=u'J03', tamanho=[ 1, 17] , raiz=u'//det/prod/veicProd')
self.cCor = TagCaracter(nome=u'cCor' , codigo=u'J04', tamanho=[ 1, 4] , raiz=u'//det/prod/veicProd')
self.xCor = TagCaracter(nome=u'xCor' , codigo=u'J05', tamanho=[ 1, 40] , raiz=u'//det/prod/veicProd')
self.pot = TagCaracter(nome=u'pot' , codigo=u'J06', tamanho=[ 1, 4] , raiz=u'//det/prod/veicProd')
self.CM3 = TagCaracter(nome=u'CM3' , codigo=u'J07', tamanho=[ 1, 4] , raiz=u'//det/prod/veicProd')
self.pesoL = TagCaracter(nome=u'pesoL' , codigo=u'J08', tamanho=[ 1, 9] , raiz=u'//det/prod/veicProd')
self.pesoB = TagCaracter(nome=u'pesoB' , codigo=u'J09', tamanho=[ 1, 9] , raiz=u'//det/prod/veicProd')
self.nSerie = TagCaracter(nome=u'nSerie' , codigo=u'J10', tamanho=[ 1, 9] , raiz=u'//det/prod/veicProd')
self.tpComb = TagCaracter(nome=u'tpComb' , codigo=u'J11', tamanho=[ 1, 8] , raiz=u'//det/prod/veicProd')
self.nMotor = TagCaracter(nome=u'nMotor' , codigo=u'J12', tamanho=[ 1, 21] , raiz=u'//det/prod/veicProd')
self.CMKG = TagCaracter(nome=u'CMKG' , codigo=u'J13', tamanho=[ 1, 9] , raiz=u'//det/prod/veicProd')
self.dist = TagCaracter(nome=u'dist' , codigo=u'J14', tamanho=[ 1, 4] , raiz=u'//det/prod/veicProd')
self.RENAVAM = TagCaracter(nome=u'RENAVAM', codigo=u'J15', tamanho=[ 1, 9] , raiz=u'//det/prod/veicProd', obrigatorio=False)
self.anoMod = TagInteiro(nome=u'anoMod' , codigo=u'J16', tamanho=[ 4, 4, 4], raiz=u'//det/prod/veicProd')
self.anoFab = TagInteiro(nome=u'anoFab' , codigo=u'J17', tamanho=[ 4, 4, 4], raiz=u'//det/prod/veicProd')
self.tpPint = TagCaracter(nome=u'tpPint' , codigo=u'J18', tamanho=[ 1, 1] , raiz=u'//det/prod/veicProd')
self.tpVeic = TagInteiro(nome=u'tpVeic' , codigo=u'J19', tamanho=[ 2, 2, 2], raiz=u'//det/prod/veicProd')
self.espVeic = TagInteiro(nome=u'espVeic' , codigo=u'J20', tamanho=[ 1, 1] , raiz=u'//det/prod/veicProd')
self.VIN = TagCaracter(nome=u'VIN' , codigo=u'J21', tamanho=[ 1, 1] , raiz=u'//det/prod/veicProd')
self.condVeic = TagInteiro(nome=u'condVeic', codigo=u'J22', tamanho=[ 1, 1] , raiz=u'//det/prod/veicProd')
self.cMod = TagInteiro(nome=u'cMod' , codigo=u'J23', tamanho=[ 6, 6, 6], raiz=u'//det/prod/veicProd')
def get_xml(self):
if not self.chassi.valor:
return u''
xml = XMLNFe.get_xml(self)
xml += u'<veicProd>'
xml += self.tpOp.xml
xml += self.chassi.xml
xml += self.cCor.xml
xml += self.xCor.xml
xml += self.pot.xml
xml += self.CM3.xml
xml += self.pesoL.xml
xml += self.pesoB.xml
xml += self.nSerie.xml
xml += self.tpComb.xml
xml += self.nMotor.xml
xml += self.CMKG.xml
xml += self.dist.xml
xml += self.RENAVAM.xml
xml += self.anoMod.xml
xml += self.anoFab.xml
xml += self.tpPint.xml
xml += self.tpVeic.xml
xml += self.espVeic.xml
xml += self.VIN.xml
xml += self.condVeic.xml
xml += self.cMod.xml
xml += u'</veicProd>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.tpOp.xml = arquivo
self.chassi.xml = arquivo
self.cCor.xml = arquivo
self.xCor.xml = arquivo
self.pot.xml = arquivo
self.CM3.xml = arquivo
self.pesoL.xml = arquivo
self.pesoB.xml = arquivo
self.nSerie.xml = arquivo
self.tpComb.xml = arquivo
self.nMotor.xml = arquivo
self.CMKG.xml = arquivo
self.dist.xml = arquivo
self.RENAVAM.xml = arquivo
self.anoMod.xml = arquivo
self.anoFab.xml = arquivo
self.tpPint.xml = arquivo
self.tpVeic.xml = arquivo
self.espVeic.xml = arquivo
self.VIN.xml = arquivo
self.condVeic.xml = arquivo
self.cMod.xml = arquivo
xml = property(get_xml, set_xml)
class Adi(XMLNFe):
def __init__(self):
super(Adi, self).__init__()
self.nAdicao = TagInteiro(nome=u'nAdicao' , codigo=u'I26', tamanho=[1, 3], raiz=u'//adi')
self.nSeqAdic = TagInteiro(nome=u'nSeqAdic' , codigo=u'I27', tamanho=[1, 3], raiz=u'//adi')
self.cFabricante = TagCaracter(nome=u'cFabricante', codigo=u'I28', tamanho=[1, 60], raiz=u'//adi')
self.vDescDI = TagDecimal(nome=u'vDescDI' , codigo=u'I29', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//adi', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<adi>'
xml += self.nAdicao.xml
xml += self.nSeqAdic.xml
xml += self.cFabricante.xml
xml += self.vDescDI.xml
xml += u'</adi>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nAdicao.xml = arquivo
self.nSeqAdic.xml = arquivo
self.cFabricante = arquivo
self.vDescDI = arquivo
xml = property(get_xml, set_xml)
class DI(XMLNFe):
def __init__(self):
super(DI, self).__init__()
self.nDI = TagCaracter(nome=u'nDI' , codigo=u'I19', tamanho=[1, 10], raiz=u'//DI')
self.dDI = TagData(nome=u'dDI' , codigo=u'I20', raiz=u'//DI')
self.xLocDesemb = TagCaracter(nome=u'xLocDesemb' , codigo=u'I21', tamanho=[1, 60], raiz=u'//DI')
self.UFDesemb = TagCaracter(nome=u'UFDesemb' , codigo=u'I22', tamanho=[2, 2], raiz=u'//DI')
self.dDesemb = TagData(nome=u'dDesemb' , codigo=u'I23', raiz=u'//DI')
self.cExportador = TagCaracter(nome=u'cExportador', codigo=u'I24', tamanho=[1, 60], raiz=u'//DI')
self.adi = [Adi()]
def get_xml(self):
if not self.nDI:
return u''
xml = XMLNFe.get_xml(self)
xml += u'<DI>'
xml += self.nDI.xml
xml += self.dDI.xml
xml += self.xLocDesemb.xml
xml += self.UFDesemb.xml
xml += self.dDesemb.xml
xml += self.cExportador.xml
for a in self.adi:
xml += a.xml
xml += u'</DI>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nDI.xml = arquivo
self.dDI.xml = arquivo
self.xLocDesemb = arquivo
self.UFDesemb = arquivo
self.dDesemb = arquivo
self.cExportador = arquivo
#
# Tรฉcnica para leitura de tags mรบltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
adis = self._le_nohs('//DI/adi')
self.adi = []
if adis is not None:
self.adi = [_Adi() for a in adis]
for i in range(len(adis)):
self.adi[i].xml = adis[i]
xml = property(get_xml, set_xml)
class Prod(XMLNFe):
def __init__(self):
super(Prod, self).__init__()
self.cProd = TagCaracter(nome=u'cProd' , codigo=u'I02' , tamanho=[1, 60] , raiz=u'//det/prod')
self.cEAN = TagCaracter(nome=u'cEAN' , codigo=u'I03' , tamanho=[0, 14] , raiz=u'//det/prod')
self.xProd = TagCaracter(nome=u'xProd' , codigo=u'I04' , tamanho=[1, 120] , raiz=u'//det/prod')
self.NCM = TagCaracter(nome=u'NCM' , codigo=u'I05' , tamanho=[2, 8] , raiz=u'//det/prod', obrigatorio=False)
self.EXTIPI = TagCaracter(nome=u'EXTIPI' , codigo=u'I06' , tamanho=[2, 3] , raiz=u'//det/prod', obrigatorio=False)
self.genero = TagCaracter(nome=u'genero' , codigo=u'I07' , tamanho=[2, 2, 2] , raiz=u'//det/prod', obrigatorio=False)
self.CFOP = TagInteiro(nome=u'CFOP' , codigo=u'I08' , tamanho=[4, 4, 4] , raiz=u'//det/prod')
self.uCom = TagCaracter(nome=u'uCom' , codigo=u'I09' , tamanho=[1, 6] , raiz=u'//det/prod')
self.qCom = TagDecimal(nome=u'qCom' , codigo=u'I10' , tamanho=[1, 12, 1], decimais=[0, 4, 4], raiz=u'//det/prod')
self.vUnCom = TagDecimal(nome=u'vUnCom' , codigo=u'I10a', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz=u'//det/prod')
self.vProd = TagDecimal(nome=u'vProd' , codigo=u'I11' , tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod')
self.cEANTrib = TagCaracter(nome=u'cEANTrib', codigo=u'I12' , tamanho=[0, 14] , raiz=u'//det/prod')
self.uTrib = TagCaracter(nome=u'uTrib' , codigo=u'I13' , tamanho=[1, 6] , raiz=u'//det/prod')
self.qTrib = TagDecimal(nome=u'qTrib' , codigo=u'I14' , tamanho=[1, 12, 1], decimais=[0, 4, 4], raiz=u'//det/prod')
self.vUnTrib = TagDecimal(nome=u'vUnTrib' , codigo=u'I14a', tamanho=[1, 16, 1], decimais=[0, 4, 4], raiz=u'//det/prod')
self.vTrib = TagDecimal(nome=u'vTrib' , codigo=u'' , tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod', obrigatorio=False)
self.vFrete = TagDecimal(nome=u'vFrete' , codigo=u'I15' , tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod', obrigatorio=False)
self.vSeg = TagDecimal(nome=u'vSeg' , codigo=u'I16' , tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod', obrigatorio=False)
self.vDesc = TagDecimal(nome=u'vDesc' , codigo=u'I17' , tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//det/prod', obrigatorio=False)
self.DI = []
self.veicProd = VeicProd()
self.med = []
self.arma = []
self.comb = Comb()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<prod>'
xml += self.cProd.xml
xml += self.cEAN.xml
xml += self.xProd.xml
xml += self.NCM.xml
xml += self.EXTIPI.xml
xml += self.genero.xml
xml += self.CFOP.xml
xml += self.uCom.xml
xml += self.qCom.xml
xml += self.vUnCom.xml
xml += self.vProd.xml
xml += self.cEANTrib.xml
xml += self.uTrib.xml
xml += self.qTrib.xml
xml += self.vUnTrib.xml
xml += self.vFrete.xml
xml += self.vSeg.xml
xml += self.vDesc.xml
for d in self.DI:
xml += d.xml
xml += self.veicProd.xml
for m in self.med:
xml += m.xml
for a in self.arma:
xml += a.xml
xml += self.comb.xml
xml += u'</prod>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cProd.xml = arquivo
self.cEAN.xml = arquivo
self.xProd.xml = arquivo
self.NCM.xml = arquivo
self.EXTIPI.xml = arquivo
self.genero.xml = arquivo
self.CFOP.xml = arquivo
self.uCom.xml = arquivo
self.qCom.xml = arquivo
self.vUnCom.xml = arquivo
self.vProd.xml = arquivo
self.cEANTrib.xml = arquivo
self.uTrib.xml = arquivo
self.qTrib.xml = arquivo
self.vUnTrib.xml = arquivo
self.vFrete.xml = arquivo
self.vSeg.xml = arquivo
self.vDesc.xml = arquivo
#
# Tรฉcnica para leitura de tags mรบltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.DI = self.le_grupo('//det/prod/DI', DI)
self.veicProd.xml = arquivo
#
# Tรฉcnica para leitura de tags mรบltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.med = self.le_grupo('//det/prod/med', Med)
self.arma = self.le_grupo('//det/prod/arma', Arma)
self.comb.xml = arquivo
xml = property(get_xml, set_xml)
class Det(XMLNFe):
def __init__(self):
super(Det, self).__init__()
self.nItem = TagInteiro(nome=u'det' , codigo=u'H01', tamanho=[1, 3], propriedade=u'nItem', raiz=u'/') #, namespace=NAMESPACE_NFE)
self.prod = Prod()
self.imposto = Imposto()
self.infAdProd = TagCaracter(nome=u'infAdProd', codigo=u'V01', tamanho=[1, 500], raiz=u'//det', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += self.nItem.xml
xml += self.prod.xml
xml += self.imposto.xml
xml += self.infAdProd.xml
xml += u'</det>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nItem.xml = arquivo
self.prod.xml = arquivo
self.imposto.xml = arquivo
self.infAdProd.xml = arquivo
xml = property(get_xml, set_xml)
def descricao_produto_formatada(self):
formatado = self.prod.xProd.valor.replace(u'|', u'<br />')
if len(self.infAdProd.valor):
formatado += u'<br />'
formatado += self.infAdProd.valor.replace(u'|', u'<br />')
return formatado
def cst_formatado(self):
formatado = unicode(self.imposto.ICMS.orig.valor).zfill(1)
formatado += unicode(self.imposto.ICMS.CST.valor).zfill(2)
return formatado
class Compra(XMLNFe):
def __init__(self):
super(Compra, self).__init__()
self.xNEmp = TagCaracter(nome=u'xNEmp', codigo=u'ZB02', tamanho=[1, 17], raiz=u'//NFe/infNFe/compra', obrigatorio=False)
self.xPed = TagCaracter(nome=u'xPed' , codigo=u'ZB03', tamanho=[1, 60], raiz=u'//NFe/infNFe/compra', obrigatorio=False)
self.xCont = TagCaracter(nome=u'xCont', codigo=u'ZB04', tamanho=[1, 60], raiz=u'//NFe/infNFe/compra', obrigatorio=False)
def get_xml(self):
if not (self.xNEmp.valor or self.xPed.valor or self.xCont.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<compra>'
xml += self.xNEmp.xml
xml += self.xPed.xml
xml += self.xCont.xml
xml += u'</compra>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xNEmp.xml = arquivo
self.xPed.xml = arquivo
self.xCont.xml = arquivo
xml = property(get_xml, set_xml)
class Exporta(XMLNFe):
def __init__(self):
super(Exporta, self).__init__()
self.UFEmbarq = TagCaracter(nome=u'UFEmbarq' , codigo=u'ZA02', tamanho=[2, 2], raiz=u'//NFe/infNFe/exporta', obrigatorio=False)
self.xLocEmbarq = TagCaracter(nome=u'xLocEmbarq', codigo=u'ZA03', tamanho=[1, 60], raiz=u'//NFe/infNFe/exporta', obrigatorio=False)
def get_xml(self):
if not (self.UFEmbarq.valor or self.xLocEmbarq.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<exporta>'
xml += self.UFEmbarq.xml
xml += self.xLocEmbarq.xml
xml += u'</exporta>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.UFEmbarq.xml = arquivo
self.xLocEmbarq.xml = arquivo
xml = property(get_xml, set_xml)
class ProcRef(XMLNFe):
def __init__(self):
super(ProcRef, self).__init__()
self.nProc = TagCaracter(nome=u'nProc' , codigo=u'Z11', tamanho=[1, 60], raiz=u'//procRef')
self.indProc = TagInteiro(nome=u'indProc', codigo=u'Z12', tamanho=[1, 1], raiz=u'//procRef')
def get_xml(self):
if not (self.nProc.valor or self.indProc.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<procRef>'
xml += self.nProc.xml
xml += self.indProc.xml
xml += u'</procRef>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nProc.xml = arquivo
self.indProc.xml = arquivo
xml = property(get_xml, set_xml)
class ObsFisco(XMLNFe):
def __init__(self):
super(ObsFisco, self).__init__()
self.xCampo = TagCaracter(nome=u'xCampo', codigo=u'Z08', tamanho=[1, 20], raiz=u'//obsFisco')
self.xTexto = TagCaracter(nome=u'xTexto', codigo=u'Z09', tamanho=[1, 60], raiz=u'//obsFisco')
def get_xml(self):
if not (self.xCampo.valor or self.xTexto.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<obsFisco>'
xml += self.xCampo.xml
xml += self.xTexto.xml
xml += u'</obsFisco>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xCampo.xml = arquivo
self.xTexto.xml = arquivo
xml = property(get_xml, set_xml)
class ObsCont(XMLNFe):
def __init__(self):
super(ObsCont, self).__init__()
self.xCampo = TagCaracter(nome=u'xCampo', codigo=u'Z05', tamanho=[1, 20], raiz=u'//obsCont')
self.xTexto = TagCaracter(nome=u'xTexto', codigo=u'Z06', tamanho=[1, 60], raiz=u'//obsCont')
def get_xml(self):
if not (self.xCampo.valor or self.xTexto.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<obsCont>'
xml += self.xCampo.xml
xml += self.xTexto.xml
xml += u'</obsCont>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xCampo.xml = arquivo
self.xTexto.xml = arquivo
xml = property(get_xml, set_xml)
class InfAdic(XMLNFe):
def __init__(self):
super(InfAdic, self).__init__()
self.infAdFisco = TagCaracter(nome=u'infAdFisco', codigo=u'Z02', tamanho=[1, 256], raiz=u'//NFe/infNFe/infAdic', obrigatorio=False)
self.infCpl = TagCaracter(nome=u'infCpl' , codigo=u'Z03', tamanho=[1, 5000], raiz=u'//NFe/infNFe/infAdic', obrigatorio=False)
self.obsCont = []
self.obsFisco = []
self.procRef = []
def get_xml(self):
if not (self.infAdFisco.valor or self.infCpl.valor or len(self.obsCont) or len(self.obsFisco) or len(self.procRef)):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<infAdic>'
xml += self.infAdFisco.xml
xml += self.infCpl.xml
for o in self.obsCont:
xml += o.xml
for o in self.obsFisco:
xml += o.xml
for p in self.procRef:
xml += p.xml
xml += u'</infAdic>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.infAdFisco.xml = arquivo
self.infCpl.xml = arquivo
#
# Tรฉcnica para leitura de tags mรบltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.obsCont = self.le_grupo('//NFe/infNFe/infAdic/obsCont', ObsCont)
self.obsFisco = self.le_grupo('//NFe/infNFe/infAdic/obsFisco', ObsFisco)
self.procRef = self.le_grupo('//NFe/infNFe/infAdic/procRef', ProcRef)
xml = property(get_xml, set_xml)
class Dup(XMLNFe):
def __init__(self):
super(Dup, self).__init__()
self.nDup = TagCaracter(nome=u'nDup', codigo=u'Y08', tamanho=[1, 60], raiz=u'//dup', obrigatorio=False)
self.dVenc = TagData(nome=u'dVenc' , codigo=u'Y09', raiz=u'//dup', obrigatorio=False)
self.vDup = TagDecimal(nome=u'vDup' , codigo=u'Y10', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//dup', obrigatorio=False)
def get_xml(self):
if not (self.nDup.valor or self.dVenc.valor or self.vDup.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<dup>'
xml += self.nDup.xml
xml += self.dVenc.xml
xml += self.vDup.xml
xml += u'</dup>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nDup.xml = arquivo
self.dVenc.xml = arquivo
self.vDup.xml = arquivo
xml = property(get_xml, set_xml)
class Fat(XMLNFe):
def __init__(self):
super(Fat, self).__init__()
self.nFat = TagCaracter(nome=u'nFat', codigo=u'Y03', tamanho=[1, 60], raiz=u'//NFe/infNFe/cobr/fat', obrigatorio=False)
self.vOrig = TagDecimal(nome=u'vOrig', codigo=u'Y04', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/cobr/fat', obrigatorio=False)
self.vDesc = TagDecimal(nome=u'vDesc', codigo=u'Y05', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/cobr/fat', obrigatorio=False)
self.vLiq = TagDecimal(nome=u'vLiq' , codigo=u'Y06', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/cobr/fat', obrigatorio=False)
def get_xml(self):
if not (self.nFat.valor or self.vOrig.valor or self.vDesc.valor or self.vLiq.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<fat>'
xml += self.nFat.xml
xml += self.vOrig.xml
xml += self.vDesc.xml
xml += self.vLiq.xml
xml += u'</fat>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nFat.xml = arquivo
self.vOrig.xml = arquivo
self.vDesc.xml = arquivo
self.vLiq.xml = arquivo
xml = property(get_xml, set_xml)
class Cobr(XMLNFe):
def __init__(self):
super(Cobr, self).__init__()
self.fat = Fat()
self.dup = []
def get_xml(self):
if not (self.fat.xml or len(self.dup)):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<cobr>'
xml += self.fat.xml
for d in self.dup:
xml += d.xml
xml += u'</cobr>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.fat.xml = arquivo
#
# Tรฉcnica para leitura de tags mรบltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.dup = self.le_grupo('//NFe/infNFe/cobr/dup', Dup)
xml = property(get_xml, set_xml)
class Lacres(XMLNFe):
def __init__(self):
super(Lacres, self).__init__()
self.nLacre = TagCaracter(nome=u'nLacre', codigo=u'X34', tamanho=[1, 60], raiz=u'//lacres')
def get_xml(self):
if not self.nLacre.valor:
return u''
xml = XMLNFe.get_xml(self)
xml += u'<lacres>'
xml += self.nLacre.xml
xml += u'</lacres>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.nLacre.xml = arquivo
xml = property(get_xml, set_xml)
class Vol(XMLNFe):
#
# No caso dos volumes, se o valor da quantidade, peso bruto ou lรญquido for zero ou inexistente
# nรฃo imprime os valores no DANFE
#
class TagInteiroVolume(TagInteiro):
def formato_danfe(self):
if not self._valor_inteiro:
return u''
else:
return super(Vol.TagInteiroVolume, self).formato_danfe()
class TagDecimalVolume(TagDecimal):
def formato_danfe(self):
if not self._valor_decimal:
return u''
else:
return super(Vol.TagDecimalVolume, self).formato_danfe()
def __init__(self, xml=None):
super(Vol, self).__init__()
self.qVol = TagInteiro(nome=u'qVol' , codigo=u'X27', tamanho=[1, 15], raiz=u'//vol', obrigatorio=False)
#self.qVol = self.TagInteiroVolume(nome=u'qVol' , codigo=u'X27', tamanho=[1, 15], raiz=u'//vol', obrigatorio=False)
self.esp = TagCaracter(nome=u'esp' , codigo=u'X28', tamanho=[1, 60], raiz=u'//vol', obrigatorio=False)
self.marca = TagCaracter(nome=u'marca', codigo=u'X29', tamanho=[1, 60], raiz=u'//vol', obrigatorio=False)
self.nVol = TagCaracter(nome=u'nVol' , codigo=u'X30', tamanho=[1, 60], raiz=u'//vol', obrigatorio=False)
self.pesoL = TagDecimal(nome=u'pesoL' , codiog=u'X31', tamanho=[1, 15, 1], decimais=[0, 3, 3], raiz=u'//vol', obrigatorio=False)
self.pesoB = TagDecimal(nome=u'pesoB' , codiog=u'X32', tamanho=[1, 15, 1], decimais=[0, 3, 3], raiz=u'//vol', obrigatorio=False)
#self.pesoL = self.TagDecimalVolume(nome=u'pesoL' , codiog=u'X31', tamanho=[1, 15, 1], decimais=[0, 3, 3], raiz=u'//vol', obrigatorio=False)
#self.pesoB = self.TagDecimalVolume(nome=u'pesoB' , codiog=u'X32', tamanho=[1, 15, 1], decimais=[0, 3, 3], raiz=u'//vol', obrigatorio=False)
self.lacres = []
def get_xml(self):
if not (self.qVol.valor or self.esp.valor or self.marca.valor or self.nVol.valor or self.pesoL.valor or self.pesoB.valor or len(self.lacres.nLacre)):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<vol>'
xml += self.qVol.xml
xml += self.esp.xml
xml += self.marca.xml
xml += self.nVol.xml
xml += self.pesoL.xml
xml += self.pesoB.xml
for l in self.lacres:
xml += l.xml
xml += u'</vol>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.qVol.xml = arquivo
self.esp.xml = arquivo
self.marca.xml = arquivo
self.nVol.xml = arquivo
self.pesoL.xml = arquivo
self.pesoB.xml = arquivo
#
# Tรฉcnica para leitura de tags mรบltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.lacres = self.le_grupo('//vol/lacres', Lacres)
xml = property(get_xml, set_xml)
class Reboque(XMLNFe):
def __init__(self):
super(Reboque, self).__init__()
self.placa = TagCaracter(nome=u'placa', codigo=u'X23', tamanho=[1, 8], raiz=u'//reboque')
self.UF = TagCaracter(nome=u'UF' , codigo=u'X24', tamanho=[2, 2], raiz=u'//reboque')
self.RNTC = TagCaracter(nome=u'RNTC' , codigo=u'X25', tamanho=[1, 20], raiz=u'//reboque', obrigatorio=False)
def get_xml(self):
if not (self.placa.valor or self.UF.valor or self.RNTC.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<reboque>'
xml += self.placa.xml
xml += self.UF.xml
xml += self.RNTC.xml
xml += u'</reboque>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.placa.xml = arquivo
self.UF.xml = arquivo
self.RNTC.xml = arquivo
xml = property(get_xml, set_xml)
class VeicTransp(XMLNFe):
def __init__(self):
super(VeicTransp, self).__init__()
self.placa = TagCaracter(nome=u'placa', codigo=u'X19', tamanho=[1, 8], raiz=u'//NFe/infNFe/transp/veicTransp')
self.UF = TagCaracter(nome=u'UF' , codigo=u'X20', tamanho=[2, 2], raiz=u'//NFe/infNFe/transp/veicTransp')
self.RNTC = TagCaracter(nome=u'RNTC' , codigo=u'X21', tamanho=[1, 20], raiz=u'//NFe/infNFe/transp/veicTransp', obrigatorio=False)
def get_xml(self):
if not (self.placa.valor or self.UF.valor or self.RNTC.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<veicTransp>'
xml += self.placa.xml
xml += self.UF.xml
xml += self.RNTC.xml
xml += u'</veicTransp>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.placa.xml = arquivo
self.UF.xml = arquivo
self.RNTC.xml = arquivo
xml = property(get_xml, set_xml)
class RetTransp(XMLNFe):
def __init__(self):
super(RetTransp, self).__init__()
self.vServ = TagDecimal(nome=u'vServ' , codigo=u'X12', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/transp/retTransp')
self.vBCRet = TagDecimal(nome=u'vBCRet' , codigo=u'X13', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/transp/retTransp')
self.pICMSRet = TagDecimal(nome=u'vICMSRet', codigo=u'X14', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/transp/retTransp')
self.vICMSRet = TagDecimal(nome=u'vICMSRet', codigo=u'X15', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/transp/retTransp')
self.CFOP = TagInteiro(nome=u'CFOP' , codigo=u'X16', tamanho=[4, 4, 4], raiz=u'//NFe/infNFe/transp/retTransp')
self.cMunFG = TagInteiro(nome=u'cMunFG' , codigo=u'X17', tamanho=[7, 7, 7], raiz=u'//NFe/infNFe/transp/retTransp')
def get_xml(self):
if not (self.vServ.valor or self.vBCRet.valor or self.pICMSRet.valor or self.vICMSRet.valor or self.CFOP.valor or self.cMunFG.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<retTransp>'
xml += self.vServ.xml
xml += self.vBCRet.xml
xml += self.pICMSRet.xml
xml += self.vICMSRet.xml
xml += self.CFOP.xml
xml += self.cMunFG.xml
xml += u'</retTransp>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vServ.xml = arquivo
self.vBCRet.xml = arquivo
self.pICMSRet.xml = arquivo
self.vICMSRet.xml = arquivo
self.CFOP.xml = arquivo
self.cMunFG.xml = arquivo
xml = property(get_xml, set_xml)
class Transporta(XMLNFe):
def __init__(self):
super(Transporta, self).__init__()
self.CNPJ = TagCaracter(nome=u'CNPJ' , codigo=u'X04', tamanho=[14, 14], raiz=u'//NFe/infNFe/transp/transporta', obrigatorio=False)
self.CPF = TagCaracter(nome=u'CPF' , codigo=u'X05', tamanho=[11, 11], raiz=u'//NFe/infNFe/transp/transporta', obrigatorio=False)
self.xNome = TagCaracter(nome=u'xNome' , codigo=u'X06', tamanho=[ 1, 60], raiz=u'//NFe/infNFe/transp/transporta', obrigatorio=False)
self.IE = TagCaracter(nome=u'IE' , codigo=u'X07', tamanho=[ 2, 14], raiz=u'//NFe/infNFe/transp/transporta', obrigatorio=False)
self.xEnder = TagCaracter(nome=u'xEnder', codigo=u'X08', tamanho=[ 1, 60], raiz=u'//NFe/infNFe/transp/transporta', obrigatorio=False)
self.xMun = TagCaracter(nome=u'xMun' , codigo=u'X09', tamanho=[ 1, 60], raiz=u'//NFe/infNFe/transp/transporta', obrigatorio=False)
self.UF = TagCaracter(nome=u'UF' , codigo=u'X10', tamanho=[ 2, 2], raiz=u'//NFe/infNFe/transp/transporta', obrigatorio=False)
def get_xml(self):
if not (self.CNPJ.valor or self.CPF.valor or self.xNome.valor or self.IE.valor or self.xEnder.valor or self.xMun.valor or self.UF.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<transporta>'
xml += self.CNPJ.xml
xml += self.CPF.xml
xml += self.xNome.xml
xml += self.IE.xml
xml += self.xEnder.xml
xml += self.xMun.xml
xml += self.UF.xml
xml += u'</transporta>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.CPF.xml = arquivo
self.xNome.xml = arquivo
self.IE.xml = arquivo
self.xEnder.xml = arquivo
self.xMun.xml = arquivo
self.UF.xml = arquivo
xml = property(get_xml, set_xml)
class Transp(XMLNFe):
def __init__(self):
super(Transp, self).__init__()
self.modFrete = TagInteiro(nome=u'modFrete', codigo=u'X02', tamanho=[ 1, 1, 1], raiz=u'//NFe/infNFe/transp')
self.transporta = Transporta()
self.retTransp = RetTransp()
self.veicTransp = VeicTransp()
self.reboque = []
self.vol = []
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<transp>'
xml += self.modFrete.xml
xml += self.transporta.xml
xml += self.retTransp.xml
xml += self.veicTransp.xml
for r in self.reboque:
xml += r.xml
for v in self.vol:
xml += v.xml
xml += u'</transp>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.modFrete.xml = arquivo
self.transporta.xml = arquivo
self.retTransp.xml = arquivo
self.veicTransp.xml = arquivo
#
# Tรฉcnica para leitura de tags mรบltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.reboque = self.le_grupo('//NFe/infNFe/transp/reboque', Reboque)
self.vol = self.le_grupo('//NFe/infNFe/transp/vol', Vol)
xml = property(get_xml, set_xml)
class RetTrib(XMLNFe):
def __init__(self):
super(RetTrib, self).__init__()
self.vRetPIS = TagDecimal(nome=u'vRetPIS' , codigo=u'W24', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/retTrib', obrigatorio=False)
self.vRetCOFINS = TagDecimal(nome=u'vRetCOFINS', codigo=u'W25', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/retTrib', obrigatorio=False)
self.vRetCSLL = TagDecimal(nome=u'vRetCSLL' , codigo=u'W26', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/retTrib', obrigatorio=False)
self.vBCIRRF = TagDecimal(nome=u'vBCIRRF' , codigo=u'W27', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/retTrib', obrigatorio=False)
self.vIRRF = TagDecimal(nome=u'vIRRF' , codigo=u'W28', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/retTrib', obrigatorio=False)
self.vBCRetPrev = TagDecimal(nome=u'vBCRetPrev', codigo=u'W29', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/retTrib', obrigatorio=False)
self.vRetPrev = TagDecimal(nome=u'vRetPrev' , codigo=u'W30', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/retTrib', obrigatorio=False)
def get_xml(self):
if not (self.vRetPIS.valor or self.vRetCOFINS.valor or self.vRetCSLL.valor or self.vBCIRRF.valor or self.vIRRF.valor or self.vBCRetPrev.valor or self.vRetPrev.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<retTrib>'
xml += self.vRetPIS.xml
xml += self.vRetCOFINS.xml
xml += self.vRetCSLL.xml
xml += self.vBCIRRF.xml
xml += self.vIRRF.xml
xml += self.vBCRetPrev.xml
xml += self.vRetPrev.xml
xml += u'</retTrib>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vRetPIS.xml = arquivo
self.vRetCOFINS.xml = arquivo
self.vRetCSLL.xml = arquivo
self.vBCIRRF.xml = arquivo
self.vIRRF.xml = arquivo
self.vBCRetPrev.xml = arquivo
self.vRetPrev.xml = arquivo
xml = property(get_xml, set_xml)
class ISSQNTot(XMLNFe):
def __init__(self):
super(ISSQNTot, self).__init__()
self.vServ = TagDecimal(nome=u'vServ' , codigo=u'W18', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
self.vBC = TagDecimal(nome=u'vBC' , codigo=u'W19', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
self.vISS = TagDecimal(nome=u'vISS' , codigo=u'W20', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
self.vPIS = TagDecimal(nome=u'vPIS' , codigo=u'W21', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
self.vCOFINS = TagDecimal(nome=u'vCOFINS', codigo=u'W22', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ISSQNtot', obrigatorio=False)
def get_xml(self):
if not (self.vServ.valor or self.vBC.valor or self.vISS.valor or self.vPIS.valor or self.vCOFINS.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<ISSQNtot>'
xml += self.vServ.xml
xml += self.vBC.xml
xml += self.vISS.xml
xml += self.vPIS.xml
xml += self.vCOFINS.xml
xml += u'</ISSQNtot>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vServ.xml = arquivo
self.vBC.xml = arquivo
self.vISS.xml = arquivo
self.vPIS.xml = arquivo
self.vCOFINS.xml = arquivo
xml = property(get_xml, set_xml)
class ICMSTot(XMLNFe):
def __init__(self):
super(ICMSTot, self).__init__()
self.vBC = TagDecimal(nome=u'vBC' , codigo=u'W03', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
self.vICMS = TagDecimal(nome=u'vICMS' , codigo=u'W04', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
self.vBCST = TagDecimal(nome=u'vBCST' , codigo=u'W05', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
self.vST = TagDecimal(nome=u'vST' , codigo=u'W06', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
self.vProd = TagDecimal(nome=u'vProd' , codigo=u'W07', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
self.vFrete = TagDecimal(nome=u'vFrete' , codigo=u'W08', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
self.vSeg = TagDecimal(nome=u'vSeg' , codigo=u'W09', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
self.vDesc = TagDecimal(nome=u'vDesc' , codigo=u'W10', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
self.vII = TagDecimal(nome=u'vII' , codigo=u'W11', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
self.vIPI = TagDecimal(nome=u'vIPI' , codigo=u'W12', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
self.vPIS = TagDecimal(nome=u'vPIS' , codigo=u'W13', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
self.vCOFINS = TagDecimal(nome=u'vCOFINS', codigo=u'W14', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
self.vOutro = TagDecimal(nome=u'vOutro' , codigo=u'W15', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
self.vNF = TagDecimal(nome=u'vNF' , codigo=u'W16', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/total/ICMSTot')
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<ICMSTot>'
xml += self.vBC.xml
xml += self.vICMS.xml
xml += self.vBCST.xml
xml += self.vST.xml
xml += self.vProd.xml
xml += self.vFrete.xml
xml += self.vSeg.xml
xml += self.vDesc.xml
xml += self.vII.xml
xml += self.vIPI.xml
xml += self.vPIS.xml
xml += self.vCOFINS.xml
xml += self.vOutro.xml
xml += self.vNF.xml
xml += u'</ICMSTot>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.vICMS.xml = arquivo
self.vBCST.xml = arquivo
self.vST.xml = arquivo
self.vProd.xml = arquivo
self.vFrete.xml = arquivo
self.vSeg.xml = arquivo
self.vDesc.xml = arquivo
self.vII.xml = arquivo
self.vIPI.xml = arquivo
self.vPIS.xml = arquivo
self.vCOFINS.xml = arquivo
self.vOutro.xml = arquivo
self.vNF.xml = arquivo
xml = property(get_xml, set_xml)
class Total(XMLNFe):
def __init__(self):
super(Total, self).__init__()
self.ICMSTot = ICMSTot()
self.ISSQNTot = ISSQNTot()
self.retTrib = RetTrib()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<total>'
xml += self.ICMSTot.xml
xml += self.ISSQNTot.xml
xml += self.retTrib.xml
xml += u'</total>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.ICMSTot.xml = arquivo
self.ISSQNTot.xml = arquivo
self.retTrib.xml = arquivo
xml = property(get_xml, set_xml)
class Entrega(XMLNFe):
def __init__(self):
super(Entrega, self).__init__()
self.CNPJ = TagCaracter(nome=u'CNPJ' , codigo=u'G01', tamanho=[14, 14] , raiz=u'//NFe/infNFe/entrega')
self.xLgr = TagCaracter(nome=u'xLgr' , codigo=u'G02', tamanho=[ 2, 60] , raiz=u'//NFe/infNFe/entrega')
self.nro = TagCaracter(nome=u'nro' , codigo=u'G03', tamanho=[ 1, 60] , raiz=u'//NFe/infNFe/entrega')
self.xCpl = TagCaracter(nome=u'xCpl' , codigo=u'G04', tamanho=[ 1, 60] , raiz=u'//NFe/infNFe/entrega', obrigatorio=False)
self.xBairro = TagCaracter(nome=u'xBairro', codigo=u'G05', tamanho=[ 2, 60] , raiz=u'//NFe/infNFe/entrega')
self.cMun = TagInteiro(nome=u'cMun' , codigo=u'G06', tamanho=[ 7, 7, 7], raiz=u'//NFe/infNFe/entrega')
self.xMun = TagCaracter(nome=u'xMun' , codigo=u'G07', tamanho=[ 2, 60] , raiz=u'//NFe/infNFe/entrega')
self.UF = TagCaracter(nome=u'UF' , codigo=u'G08', tamanho=[ 2, 2] , raiz=u'//NFe/infNFe/entrega')
def get_xml(self):
if not len(self.CNPJ.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<entrega>'
xml += self.CNPJ.xml
xml += self.xLgr.xml
xml += self.nro.xml
xml += self.xCpl.xml
xml += self.xBairro.xml
xml += self.cMun.xml
xml += self.xMun.xml
xml += self.UF.xml
xml += u'</entrega>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.xLgr.xml = arquivo
self.nro.xml = arquivo
self.xCpl.xml = arquivo
self.xBairro.xml = arquivo
self.cMun.xml = arquivo
self.xMun.xml = arquivo
self.UF.xml = arquivo
xml = property(get_xml, set_xml)
class Retirada(XMLNFe):
def __init__(self):
super(Retirada, self).__init__()
self.CNPJ = TagCaracter(nome=u'CNPJ' , codigo=u'F01', tamanho=[14, 14] , raiz=u'//NFe/infNFe/retirada')
self.xLgr = TagCaracter(nome=u'xLgr' , codigo=u'F02', tamanho=[ 2, 60] , raiz=u'//NFe/infNFe/retirada')
self.nro = TagCaracter(nome=u'nro' , codigo=u'F03', tamanho=[ 1, 60] , raiz=u'//NFe/infNFe/retirada')
self.xCpl = TagCaracter(nome=u'xCpl' , codigo=u'F04', tamanho=[ 1, 60] , raiz=u'//NFe/infNFe/retirada', obrigatorio=False)
self.xBairro = TagCaracter(nome=u'xBairro', codigo=u'F05', tamanho=[ 2, 60] , raiz=u'//NFe/infNFe/retirada')
self.cMun = TagInteiro(nome=u'cMun' , codigo=u'F06', tamanho=[ 7, 7, 7], raiz=u'//NFe/infNFe/retirada')
self.xMun = TagCaracter(nome=u'xMun' , codigo=u'F07', tamanho=[ 2, 60] , raiz=u'//NFe/infNFe/retirada')
self.UF = TagCaracter(nome=u'UF' , codigo=u'F08', tamanho=[ 2, 2] , raiz=u'//NFe/infNFe/retirada')
def get_xml(self):
if not len(self.CNPJ.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<retirada>'
xml += self.CNPJ.xml
xml += self.xLgr.xml
xml += self.nro.xml
xml += self.xCpl.xml
xml += self.xBairro.xml
xml += self.cMun.xml
xml += self.xMun.xml
xml += self.UF.xml
xml += u'</retirada>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.xLgr.xml = arquivo
self.nro.xml = arquivo
self.xCpl.xml = arquivo
self.xBairro.xml = arquivo
self.cMun.xml = arquivo
self.xMun.xml = arquivo
self.UF.xml = arquivo
xml = property(get_xml, set_xml)
class EnderDest(XMLNFe):
def __init__(self):
super(EnderDest, self).__init__()
self.xLgr = TagCaracter(nome=u'xLgr' , codigo=u'E06', tamanho=[ 2, 60] , raiz=u'//NFe/infNFe/dest/enderDest')
self.nro = TagCaracter(nome=u'nro' , codigo=u'E07', tamanho=[ 1, 60] , raiz=u'//NFe/infNFe/dest/enderDest')
self.xCpl = TagCaracter(nome=u'xCpl' , codigo=u'E08', tamanho=[ 1, 60] , raiz=u'//NFe/infNFe/dest/enderDest', obrigatorio=False)
self.xBairro = TagCaracter(nome=u'xBairro', codigo=u'E09', tamanho=[ 2, 60] , raiz=u'//NFe/infNFe/dest/enderDest')
self.cMun = TagInteiro(nome=u'cMun' , codigo=u'E10', tamanho=[ 7, 7, 7], raiz=u'//NFe/infNFe/dest/enderDest')
self.xMun = TagCaracter(nome=u'xMun' , codigo=u'E11', tamanho=[ 2, 60] , raiz=u'//NFe/infNFe/dest/enderDest')
self.UF = TagCaracter(nome=u'UF' , codigo=u'E12', tamanho=[ 2, 2] , raiz=u'//NFe/infNFe/dest/enderDest')
self.CEP = TagCaracter(nome=u'CEP' , codigo=u'E13', tamanho=[ 8, 8, 8], raiz=u'//NFe/infNFe/dest/enderDest', obrigatorio=False)
self.cPais = TagInteiro(nome=u'cPais' , codigo=u'E14', tamanho=[ 4, 4, 4], raiz=u'//NFe/infNFe/dest/enderDest', obrigatorio=False)
self.xPais = TagCaracter(nome=u'xPais' , codigo=u'E15', tamanho=[ 1, 60] , raiz=u'//NFe/infNFe/dest/enderDest', obrigatorio=False)
self.fone = TagInteiro(nome=u'fone' , codigo=u'E16', tamanho=[ 1, 10] , raiz=u'//NFe/infNFe/dest/enderDest', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<enderDest>'
xml += self.xLgr.xml
xml += self.nro.xml
xml += self.xCpl.xml
xml += self.xBairro.xml
xml += self.cMun.xml
xml += self.xMun.xml
xml += self.UF.xml
xml += self.CEP.xml
xml += self.cPais.xml
xml += self.xPais.xml
xml += self.fone.xml
xml += u'</enderDest>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xLgr.xml = arquivo
self.nro.xml = arquivo
self.xCpl.xml = arquivo
self.xBairro.xml = arquivo
self.cMun.xml = arquivo
self.xMun.xml = arquivo
self.UF.xml = arquivo
self.CEP.xml = arquivo
self.cPais.xml = arquivo
self.xPais.xml = arquivo
self.fone.xml = arquivo
xml = property(get_xml, set_xml)
class Dest(XMLNFe):
def __init__(self):
super(Dest, self).__init__()
self.CNPJ = TagCaracter(nome=u'CNPJ' , codigo=u'E02', tamanho=[0 , 14] , raiz=u'//NFe/infNFe/dest', obrigatorio=False)
self.CPF = TagCaracter(nome=u'CPF' , codigo=u'E03', tamanho=[11, 11] , raiz=u'//NFe/infNFe/dest', obrigatorio=False)
self.xNome = TagCaracter(nome=u'xNome', codigo=u'E04', tamanho=[ 2, 60] , raiz=u'//NFe/infNFe/dest')
self.enderDest = EnderDest()
self.IE = TagCaracter(nome=u'IE' , codigo=u'E17', tamanho=[ 2, 14] , raiz=u'//NFe/infNFe/dest')
self.ISUF = TagCaracter(nome=u'ISUF' , codigo=u'E18', tamanho=[ 9, 9] , raiz=u'//NFe/infNFe/dest', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<dest>'
if self.CPF.valor:
xml += self.CPF.xml
else:
xml += self.CNPJ.xml
xml += self.xNome.xml
xml += self.enderDest.xml
xml += self.IE.xml
xml += self.ISUF.xml
xml += u'</dest>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.CPF.xml = arquivo
self.xNome.xml = arquivo
self.enderDest.xml = arquivo
self.IE.xml = arquivo
self.ISUF.xml = arquivo
xml = property(get_xml, set_xml)
class Avulsa(XMLNFe):
def __init__(self):
super(Avulsa, self).__init__()
self.CNPJ = TagCaracter(nome=u'CNPJ' , codigo=u'D02', tamanho=[14, 14], raiz=u'//NFe/infNFe/avulsa')
self.xOrgao = TagCaracter(nome=u'xOrgao' , codigo=u'D03', tamanho=[ 1, 60], raiz=u'//NFe/infNFe/avulsa')
self.matr = TagCaracter(nome=u'matr' , codigo=u'D04', tamanho=[ 1, 60], raiz=u'//NFe/infNFe/avulsa')
self.xAgente = TagCaracter(nome=u'xAgente', codigo=u'D05', tamanho=[ 1, 60], raiz=u'//NFe/infNFe/avulsa')
self.fone = TagInteiro(nome=u'fone' , codigo=u'D06', tamanho=[ 1, 10], raiz=u'//NFe/infNFe/avulsa')
self.UF = TagCaracter(nome=u'UF' , codigo=u'D07', tamanho=[ 2, 2], raiz=u'//NFe/infNFe/avulsa')
self.nDAR = TagCaracter(nome=u'nDAR' , codigo=u'D08', tamanho=[ 1, 60], raiz=u'//NFe/infNFe/avulsa')
self.dEmi = TagData(nome=u'dEmi' , codigo=u'D09', raiz=u'//NFe/infNFe/avulsa')
self.vDAR = TagDecimal(nome=u'vDAR' , codigo=u'D10', tamanho=[ 1, 15], decimais=[0, 2, 2], raiz=u'//NFe/infNFe/avulsa')
self.repEmi = TagCaracter(nome=u'repEmi' , codigo=u'D11', tamanho=[ 1, 60], raiz=u'//NFe/infNFe/avulsa')
self.dPag = TagData(nome=u'dPag' , codigo=u'D12', raiz=u'//NFe/infNFe/avulsa', obrigatorio=False)
def get_xml(self):
if not len(self.CNPJ.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<avulsa>'
xml += self.CNPJ.xml
xml += self.xOrgao.xml
xml += self.matr.xml
xml += self.xAgente.xml
xml += self.fone.xml
xml += self.UF.xml
xml += self.nDAR.xml
xml += self.dEmi.xml
xml += self.vDAR.xml
xml += self.repEmi.xml
xml += self.dPag.xml
xml += u'</avulsa>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.xOrgao.xml = arquivo
self.matr.xml = arquivo
self.xAgente.xml = arquivo
self.fone.xml = arquivo
self.UF.xml = arquivo
self.nDAR.xml = arquivo
self.dEmi.xml = arquivo
self.vDAR.xml = arquivo
self.repEmi.xml = arquivo
self.dPag.xml = arquivo
xml = property(get_xml, set_xml)
class EnderEmit(XMLNFe):
def __init__(self):
super(EnderEmit, self).__init__()
self.xLgr = TagCaracter(nome=u'xLgr' , codigo=u'C06', tamanho=[ 2, 60] , raiz=u'//NFe/infNFe/emit/enderEmit')
self.nro = TagCaracter(nome=u'nro' , codigo=u'C07', tamanho=[ 1, 60] , raiz=u'//NFe/infNFe/emit/enderEmit')
self.xCpl = TagCaracter(nome=u'xCpl' , codigo=u'C08', tamanho=[ 1, 60] , raiz=u'//NFe/infNFe/emit/enderEmit', obrigatorio=False)
self.xBairro = TagCaracter(nome=u'xBairro', codigo=u'C09', tamanho=[ 2, 60] , raiz=u'//NFe/infNFe/emit/enderEmit')
self.cMun = TagInteiro(nome=u'cMun' , codigo=u'C10', tamanho=[ 7, 7, 7], raiz=u'//NFe/infNFe/emit/enderEmit')
self.xMun = TagCaracter(nome=u'xMun' , codigo=u'C11', tamanho=[ 2, 60] , raiz=u'//NFe/infNFe/emit/enderEmit')
self.UF = TagCaracter(nome=u'UF' , codigo=u'C12', tamanho=[ 2, 2] , raiz=u'//NFe/infNFe/emit/enderEmit')
self.CEP = TagCaracter(nome=u'CEP' , codigo=u'C13', tamanho=[ 8, 8, 8], raiz=u'//NFe/infNFe/emit/enderEmit', obrigatorio=False)
self.cPais = TagInteiro(nome=u'cPais' , codigo=u'C14', tamanho=[ 4, 4, 4], raiz=u'//NFe/infNFe/emit/enderEmit', obrigatorio=False)
self.xPais = TagCaracter(nome=u'xPais' , codigo=u'C15', tamanho=[ 1, 60] , raiz=u'//NFe/infNFe/emit/enderEmit', obrigatorio=False)
self.fone = TagInteiro(nome=u'fone' , codigo=u'C16', tamanho=[ 1, 10] , raiz=u'//NFe/infNFe/emit/enderEmit', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<enderEmit>'
xml += self.xLgr.xml
xml += self.nro.xml
xml += self.xCpl.xml
xml += self.xBairro.xml
xml += self.cMun.xml
xml += self.xMun.xml
xml += self.UF.xml
xml += self.CEP.xml
xml += self.cPais.xml
xml += self.xPais.xml
xml += self.fone.xml
xml += u'</enderEmit>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xLgr.xml = arquivo
self.nro.xml = arquivo
self.xCpl.xml = arquivo
self.xBairro.xml = arquivo
self.cMun.xml = arquivo
self.xMun.xml = arquivo
self.UF.xml = arquivo
self.CEP.xml = arquivo
self.cPais.xml = arquivo
self.xPais.xml = arquivo
self.fone.xml = arquivo
xml = property(get_xml, set_xml)
class Emit(XMLNFe):
def __init__(self):
super(Emit, self).__init__()
self.CNPJ = TagCaracter(nome=u'CNPJ' , codigo=u'C02' , tamanho=[14, 14], raiz=u'//NFe/infNFe/emit', obrigatorio=False)
self.CPF = TagCaracter(nome=u'CPF' , codigo=u'C02a', tamanho=[11, 11], raiz=u'//NFe/infNFe/emit', obrigatorio=False)
self.xNome = TagCaracter(nome=u'xNome', codigo=u'C03' , tamanho=[ 2, 60], raiz=u'//NFe/infNFe/emit')
self.xFant = TagCaracter(nome=u'xFant', codigo=u'C04' , tamanho=[ 1, 60], raiz=u'//NFe/infNFe/emit', obrigatorio=False)
self.enderEmit = EnderEmit()
self.IE = TagCaracter(nome=u'IE' , codigo=u'C17' , tamanho=[ 2, 14], raiz=u'//NFe/infNFe/emit', obrigatorio=False)
self.IEST = TagCaracter(nome=u'IEST' , codigo=u'C18' , tamanho=[ 2, 14], raiz=u'//NFe/infNFe/emit', obrigatorio=False)
self.IM = TagCaracter(nome=u'IM' , codigo=u'C19' , tamanho=[ 1, 15], raiz=u'//NFe/infNFe/emit', obrigatorio=False)
self.CNAE = TagCaracter(nome=u'CNAE' , codigo=u'C20' , tamanho=[ 7, 7], raiz=u'//NFe/infNFe/emit', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<emit>'
xml += self.CNPJ.xml
xml += self.CPF.xml
xml += self.xNome.xml
xml += self.xFant.xml
xml += self.enderEmit.xml
xml += self.IE.xml
xml += self.IEST.xml
xml += self.IM.xml
xml += self.CNAE.xml
xml += u'</emit>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CNPJ.xml = arquivo
self.CPF.xml = arquivo
self.xNome.xml = arquivo
self.xFant.xml = arquivo
self.enderEmit.xml = arquivo
self.IE.xml = arquivo
self.IEST.xml = arquivo
self.IM.xml = arquivo
self.CNAE.xml = arquivo
xml = property(get_xml, set_xml)
class RefNF(XMLNFe):
def __init__(self):
super(RefNF, self).__init__()
self.cUF = TagInteiro(nome=u'cUF' , codigo=u'B15', tamanho=[ 2, 2, 2], raiz=u'//NFref/refNF')
self.AAMM = TagCaracter(nome=u'AAMM', codigo=u'B16', tamanho=[ 4, 4, 4], raiz=u'//NFref/refNF')
self.CNPJ = TagCaracter(nome=u'CNPJ', codigo=u'B17', tamanho=[14, 14] , raiz=u'//NFref/refNF')
self.mod = TagCaracter(nome=u'mod' , codigo=u'B18', tamanho=[ 2, 2, 2], raiz=u'//NFref/refNF')
self.serie = TagInteiro(nome=u'serie', codigo=u'B19', tamanho=[ 1, 3, 1], raiz=u'//NFref/refNF')
self.nNF = TagInteiro(nome=u'nNF' , codigo=u'B20', tamanho=[ 1, 9, 1], raiz=u'//NFref/refNF')
def get_xml(self):
if not (self.cUF.valor or self.AAMM.valor or self.CNPJ.valor or self.mod.valor or self.serie.valor or self.nNF.valor):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<refNF>'
xml += self.cUF.xml
xml += self.AAMM.xml
xml += self.CNPJ.xml
xml += self.mod.xml
xml += self.serie.xml
xml += self.nNF.xml
xml += u'</refNF>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cUF.xml = arquivo
self.AAMM.xml = arquivo
self.CNPJ.xml = arquivo
self.mod.xml = arquivo
self.serie.xml = arquivo
self.nNF.xml = arquivo
xml = property(get_xml, set_xml)
class NFRef(XMLNFe):
def __init__(self):
super(NFRef, self).__init__()
self.refNFe = TagCaracter(nome=u'refNFe', codigo=u'B13', tamanho=[44, 44], raiz=u'//NFRef', obrigatorio=False)
self.refNF = RefNF()
def get_xml(self):
if not (self.refNFe.valor or self.refNF.xml):
return u''
xml = XMLNFe.get_xml(self)
xml += u'<NFref>'
xml += self.refNFe.xml
xml += self.refNF.xml
#if self.refNFe.valor:
#xml += self.refNFe.xml
#else:
#xml += self.refNF.xml
xml += u'</NFref>'
return xml
def set_xml(self):
if self._le_xml(arquivo):
self.refNFe.xml = arquivo
self.refNF.xml = arquivo
xml = property(get_xml, set_xml)
class Ide(XMLNFe):
def __init__(self):
super(Ide, self).__init__()
self.cUF = TagInteiro(nome=u'cUF' , codigo=u'B02', tamanho=[ 2, 2, 2], raiz=u'//NFe/infNFe/ide')
self.cNF = TagCaracter(nome=u'cNF' , codigo=u'B03', tamanho=[ 9, 9, 9], raiz=u'//NFe/infNFe/ide')
self.natOp = TagCaracter(nome=u'natOp' , codigo=u'B04', tamanho=[ 1, 60] , raiz=u'//NFe/infNFe/ide')
self.indPag = TagInteiro(nome=u'indPag' , codigo=u'B05', tamanho=[ 1, 1, 1], raiz=u'//NFe/infNFe/ide')
self.mod = TagInteiro(nome=u'mod' , codigo=u'B06', tamanho=[ 2, 2, 2], raiz=u'//NFe/infNFe/ide', valor=55)
self.serie = TagInteiro(nome=u'serie' , codigo=u'B07', tamanho=[ 1, 3, 1], raiz=u'//NFe/infNFe/ide')
self.nNF = TagInteiro(nome=u'nNF' , codigo=u'B08', tamanho=[ 1, 9, 1], raiz=u'//NFe/infNFe/ide')
self.dEmi = TagData(nome=u'dEmi' , codigo=u'B09', raiz=u'//NFe/infNFe/ide')
self.dSaiEnt = TagData(nome=u'dSaiEnt' , codigo=u'B10', raiz=u'//NFe/infNFe/ide', obrigatorio=False)
self.tpNF = TagInteiro(nome=u'tpNF' , codigo=u'B11', tamanho=[ 1, 1, 1], raiz=u'//NFe/infNFe/ide', valor=1)
self.cMunFG = TagInteiro(nome=u'cMunFG' , codigo=u'B12', tamanho=[ 7, 7, 7], raiz=u'//NFe/infNFe/ide')
self.NFref = []
self.tpImp = TagInteiro(nome=u'tpImp' , codigo=u'B21', tamanho=[ 1, 1, 1], raiz=u'//NFe/infNFe/ide', valor=1)
self.tpEmis = TagInteiro(nome=u'tpEmis' , codigo=u'B22', tamanho=[ 1, 1, 1], raiz=u'//NFe/infNFe/ide', valor=1)
self.cDV = TagInteiro(nome=u'cDV' , codigo=u'B23', tamanho=[ 1, 1, 1], raiz=u'//NFe/infNFe/ide')
self.tpAmb = TagInteiro(nome=u'tpAmb' , codigo=u'B24', tamanho=[ 1, 1, 1], raiz=u'//NFe/infNFe/ide', valor=2)
self.finNFe = TagInteiro(nome=u'finNFe' , codigo=u'B25', tamanho=[ 1, 1, 1], raiz=u'//NFe/infNFe/ide', valor=1)
self.procEmi = TagInteiro(nome=u'procEmi' , codigo=u'B26', tamanho=[ 1, 1, 1], raiz=u'//NFe/infNFe/ide')
self.verProc = TagCaracter(nome=u'verProc', codigo=u'B27', tamanho=[ 1, 20] , raiz=u'//NFe/infNFe/ide')
self.hSaiEnt = TagHora(nome=u'hSaiEnt' , codigo=u'' , raiz=u'//NFe/infNFe/ide', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<ide>'
xml += self.cUF.xml
xml += self.cNF.xml
xml += self.natOp.xml
xml += self.indPag.xml
xml += self.mod.xml
xml += self.serie.xml
xml += self.nNF.xml
xml += self.dEmi.xml
xml += self.dSaiEnt.xml
xml += self.tpNF.xml
xml += self.cMunFG.xml
for nr in self.NFref:
xml += nr.xml
xml += self.tpImp.xml
xml += self.tpEmis.xml
xml += self.cDV.xml
xml += self.tpAmb.xml
xml += self.finNFe.xml
xml += self.procEmi.xml
xml += self.verProc.xml
xml += u'</ide>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cUF.xml = arquivo
self.cNF.xml = arquivo
self.natOp.xml = arquivo
self.indPag.xml = arquivo
self.mod.xml = arquivo
self.serie.xml = arquivo
self.nNF.xml = arquivo
self.dEmi.xml = arquivo
self.dSaiEnt.xml = arquivo
self.tpNF.xml = arquivo
self.cMunFG.xml = arquivo
#
# Tรฉcnica para leitura de tags mรบltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.NFRef = self.le_grupo('//NFe/infNFe/ide/NFref', NFRef)
self.tpImp.xml = arquivo
self.tpEmis.xml = arquivo
self.cDV.xml = arquivo
self.tpAmb.xml = arquivo
self.finNFe.xml = arquivo
self.procEmi.xml = arquivo
self.verProc.xml = arquivo
xml = property(get_xml, set_xml)
class InfNFe(XMLNFe):
def __init__(self):
super(InfNFe, self).__init__()
self.versao = TagDecimal(nome=u'infNFe' , codigo=u'A01', propriedade=u'versao', raiz=u'//NFe', namespace=NAMESPACE_NFE, valor=u'1.10')
self.Id = TagCaracter(nome=u'infNFe', codigo=u'A03', propriedade=u'Id' , raiz=u'//NFe', namespace=NAMESPACE_NFE)
self.ide = Ide()
self.emit = Emit()
self.avulsa = Avulsa()
self.dest = Dest()
self.retirada = Retirada()
self.entrega = Entrega()
self.det = []
self.total = Total()
self.transp = Transp()
self.cobr = Cobr()
self.infAdic = InfAdic()
self.exporta = Exporta()
self.compra = Compra()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<infNFe versao="' + unicode(self.versao.valor) + u'" Id="' + self.Id.valor + u'">'
xml += self.ide.xml
xml += self.emit.xml
xml += self.avulsa.xml
xml += self.dest.xml
xml += self.retirada.xml
xml += self.entrega.xml
for d in self.det:
xml += d.xml
xml += self.total.xml
xml += self.transp.xml
xml += self.cobr.xml
xml += self.infAdic.xml
xml += self.exporta.xml
xml += self.compra.xml
xml += u'</infNFe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.Id.xml = arquivo
self.ide.xml = arquivo
self.emit.xml = arquivo
self.avulsa.xml = arquivo
self.dest.xml = arquivo
self.retirada.xml = arquivo
self.entrega.xml = arquivo
#
# Tรฉcnica para leitura de tags mรบltiplas
# As classes dessas tags, e suas filhas, devem ser
# "reenraizadas" (propriedade raiz) para poderem ser
# lidas corretamente
#
self.det = self.le_grupo('//NFe/infNFe/det', Det)
self.total.xml = arquivo
self.transp.xml = arquivo
self.cobr.xml = arquivo
self.infAdic.xml = arquivo
self.exporta.xml = arquivo
self.compra.xml = arquivo
xml = property(get_xml, set_xml)
class NFe(XMLNFe):
def __init__(self):
super(NFe, self).__init__()
self.infNFe = InfNFe()
self.Signature = Signature()
self.caminho_esquema = os.path.join(DIRNAME, u'schema/', ESQUEMA_ATUAL + u'/')
self.arquivo_esquema = u'nfe_v1.10.xsd'
self.chave = u''
self.dados_contingencia_fsda = u''
self.site = u''
self.email = u''
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<NFe xmlns="http://www.portalfiscal.inf.br/nfe">'
xml += self.infNFe.xml
#
# Define a URI a ser assinada
#
self.Signature.URI = u'#' + self.infNFe.Id.valor
xml += self.Signature.xml
xml += u'</NFe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.infNFe.xml = arquivo
self.Signature.xml = self._le_noh('//NFe/sig:Signature')
xml = property(get_xml, set_xml)
def _calcula_dv(self, valor):
soma = 0
m = 2
for i in range(len(valor)-1, -1, -1):
c = valor[i]
soma += int(c) * m
m += 1
if m > 9:
m = 2
digito = 11 - (soma % 11)
if digito > 9:
digito = 0
return digito
def gera_nova_chave(self):
chave = unicode(self.infNFe.ide.cUF.valor).zfill(2)
chave += unicode(self.infNFe.ide.dEmi.valor.strftime(u'%y%m')).zfill(4)
chave += unicode(self.infNFe.emit.CNPJ.valor).zfill(14)
chave += unicode(self.infNFe.ide.mod.valor).zfill(2)
chave += unicode(self.infNFe.ide.serie.valor).zfill(3)
chave += unicode(self.infNFe.ide.nNF.valor).zfill(9)
#
# A inclusรฃo do tipo de emissรฃo na chave jรก torna a chave vรกlida tambรฉm
# para a versรฃo 2.00 da NF-e
#
chave += unicode(self.infNFe.ide.tpEmis.valor).zfill(1)
#
# O cรณdigo numรฉrio รฉ um nรบmero aleatรณrio
#
#chave += unicode(random.randint(0, 99999999)).strip().rjust(8, u'0')
#
# Mas, por seguranรงa, รฉ preferรญvel que esse nรบmero nรฃo seja aleatรณrio de todo
#
soma = 0
for c in chave:
soma += int(c) ** 3 ** 2
codigo = unicode(soma)
if len(codigo) > 8:
codigo = codigo[-8:]
else:
codigo = codigo.rjust(8, u'0')
chave += codigo
#
# Define na estrutura do XML o campo cNF
#
self.infNFe.ide.cNF.valor = unicode(self.infNFe.ide.tpEmis.valor).zfill(1) + codigo
#
# Gera o dรญgito verificador
#
digito = self._calcula_dv(chave)
#
# Define na estrutura do XML o campo cDV
#
self.infNFe.ide.cDV.valor = digito
chave += unicode(digito)
self.chave = chave
#
# Define o Id
#
self.infNFe.Id.valor = u'NFe' + chave
def monta_chave(self):
self.gera_nova_chave()
"""
chave = unicode(self.infNFe.ide.cUF.valor).zfill(2)
chave += unicode(self.infNFe.ide.dEmi.valor.strftime(u'%y%m')).zfill(4)
chave += unicode(self.infNFe.emit.CNPJ.valor).zfill(14)
chave += unicode(self.infNFe.ide.mod.valor).zfill(2)
chave += unicode(self.infNFe.ide.serie.valor).zfill(3)
chave += unicode(self.infNFe.ide.nNF.valor).zfill(9)
chave += unicode(self.infNFe.ide.cNF.valor).zfill(9)
chave += unicode(self.infNFe.ide.cDV.valor).zfill(1)
self.chave = chave
"""
def chave_para_codigo_barras(self):
#
# As funรงรตes do reportlabs para geraรงรฃo de cรณdigos de barras nรฃo estรฃo
# aceitando strings unicode
#
return self.chave.encode(u'utf-8')
def monta_dados_contingencia_fsda(self):
dados = unicode(self.infNFe.ide.cUF.valor).zfill(2)
dados += unicode(self.infNFe.ide.tpEmis.valor).zfill(1)
dados += unicode(self.infNFe.emit.CNPJ.valor).zfill(14)
dados += unicode(int(self.infNFe.total.ICMSTot.vNF.valor * 100)).zfill(14)
#
# Hรก ICMS prรณprio?
#
if self.infNFe.total.ICMSTot.vICMS.valor:
dados += u'1'
else:
dados += u'2'
#
# Hรก ICMS ST?
#
if self.infNFe.total.ICMSTot.vST.valor:
dados += u'1'
else:
dados += u'2'
dados += self.infNFe.ide.dEmi.valor.strftime(u'%d').zfill(2)
digito = self._calcula_dv(dados)
dados += unicode(digito)
self.dados_contingencia_fsda = dados
def dados_contingencia_fsda_para_codigo_barras(self):
#
# As funรงรตes do reportlabs para geraรงรฃo de cรณdigos de barras nรฃo estรฃo
# aceitando strings unicode
#
self.monta_dados_contingencia_fsda()
return self.dados_contingencia_fsda.encode(u'utf-8')
#
# Funรงรตes para formatar campos para o DANFE
#
def chave_formatada(self):
chave = self.chave
chave_formatada = u' '.join((chave[0:4], chave[4:8], chave[8:12], chave[12:16], chave[16:20], chave[20:24], chave[24:28], chave[28:32], chave[32:36], chave[36:40], chave[40:44]))
return chave_formatada
def dados_contingencia_fsda_formatados(self):
self.monta_dados_contingencia_fsda()
dados = self.dados_contingencia_fsda
dados_formatados = u' '.join((dados[0:4], dados[4:8], dados[8:12], dados[12:16], dados[16:20], dados[20:24], dados[24:28], dados[28:32], dados[32:36]))
return dados_formatados
def numero_formatado(self):
num = unicode(self.infNFe.ide.nNF.valor).zfill(9)
num_formatado = u'.'.join((num[0:3], num[3:6], num[6:9]))
return u'Nยบ ' + num_formatado
def serie_formatada(self):
return u'SรRIE ' + unicode(self.infNFe.ide.serie.valor).zfill(3)
def _formata_cpf(self, cpf):
if not len(cpf.strip()):
return u''
formatado = cpf[0:3] + u'.' + cpf[3:6] + u'.' + cpf[6:9] + u'-' + cpf[9:11]
return formatado
def _formata_cnpj(self, cnpj):
if not len(cnpj.strip()):
return u''
formatado = cnpj[0:2] + u'.' + cnpj[2:5] + u'.' + cnpj[5:8] + u'/' + cnpj[8:12] + u'-' + cnpj[12:14]
return formatado
def cnpj_emitente_formatado(self):
if len(self.infNFe.emit.CPF.valor):
return self._formata_cpf(unicode(self.infNFe.emit.CPF.valor))
else:
return self._formata_cnpj(unicode(self.infNFe.emit.CNPJ.valor))
def endereco_emitente_formatado(self):
formatado = self.infNFe.emit.enderEmit.xLgr.valor
formatado += u', ' + self.infNFe.emit.enderEmit.nro.valor
if len(self.infNFe.emit.enderEmit.xCpl.valor.strip()):
formatado += u' - ' + self.infNFe.emit.enderEmit.xCpl.valor
return formatado
def _formata_cep(self, cep):
if not len(cep.strip()):
return u''
return cep[0:5] + u'-' + cep[5:8]
def cep_emitente_formatado(self):
return self._formata_cep(self.infNFe.emit.enderEmit.CEP.valor)
def endereco_emitente_formatado_linha_1(self):
formatado = self.endereco_emitente_formatado()
formatado += u' - ' + self.infNFe.emit.enderEmit.xBairro.valor
return formatado
def endereco_emitente_formatado_linha_2(self):
formatado = self.infNFe.emit.enderEmit.xMun.valor
formatado += u' - ' + self.infNFe.emit.enderEmit.UF.valor
formatado += u' - ' + self.cep_emitente_formatado()
return formatado
def endereco_emitente_formatado_linha_3(self):
formatado = u'Fone: ' + self.fone_emitente_formatado()
return formatado
def endereco_emitente_formatado_linha_4(self):
return self.site
def _formata_fone(self, fone):
if not len(fone.strip()):
return u''
if len(fone) <= 8:
formatado = fone[:-4] + u'-' + fone[-4:]
elif len(fone) <= 10:
ddd = fone[0:2]
fone = fone[2:]
formatado = u'(' + ddd + u') ' + fone[:-4] + u'-' + fone[-4:]
return formatado
def fone_emitente_formatado(self):
return self._formata_fone(unicode(self.infNFe.emit.enderEmit.fone.valor))
def cnpj_destinatario_formatado(self):
if len(self.infNFe.dest.CPF.valor):
return self._formata_cpf(unicode(self.infNFe.dest.CPF.valor))
else:
return self._formata_cnpj(unicode(self.infNFe.dest.CNPJ.valor))
def endereco_destinatario_formatado(self):
formatado = self.infNFe.dest.enderDest.xLgr.valor
formatado += u', ' + self.infNFe.dest.enderDest.nro.valor
if len(self.infNFe.dest.enderDest.xCpl.valor.strip()):
formatado += u' - ' + self.infNFe.dest.enderDest.xCpl.valor
return formatado
def cep_destinatario_formatado(self):
return self._formata_cep(self.infNFe.dest.enderDest.CEP.valor)
def fone_destinatario_formatado(self):
return self._formata_fone(unicode(self.infNFe.dest.enderDest.fone.valor))
def cnpj_retirada_formatado(self):
return self._formata_cnpj(self.infNFe.retirada.CNPJ.valor)
def endereco_retirada_formatado(self):
formatado = self.infNFe.retirada.xLgr.valor
formatado += u', ' + self.infNFe.retirada.nro.valor
if len(self.infNFe.retirada.xCpl.valor.strip()):
formatado += u' - ' + self.infNFe.retirada.xCpl.valor
formatado += u' - ' + self.infNFe.retirada.xBairro.valor
formatado += u' - ' + self.infNFe.retirada.xMun.valor
formatado += u'-' + self.infNFe.retirada.UF.valor
return formatado
def cnpj_entrega_formatado(self):
return self._formata_cnpj(self.infNFe.entrega.CNPJ.valor)
def endereco_entrega_formatado(self):
formatado = self.infNFe.entrega.xLgr.valor
formatado += u', ' + self.infNFe.entrega.nro.valor
if len(self.infNFe.entrega.xCpl.valor.strip()):
formatado += u' - ' + self.infNFe.entrega.xCpl.valor
formatado += u' - ' + self.infNFe.entrega.xBairro.valor
formatado += u' - ' + self.infNFe.entrega.xMun.valor
formatado += u'-' + self.infNFe.entrega.UF.valor
return formatado
def cnpj_transportadora_formatado(self):
if self.infNFe.transp.transporta.CPF.valor:
return self._formata_cpf(self.infNFe.transp.transporta.CPF.valor)
else:
return self._formata_cnpj(self.infNFe.transp.transporta.CNPJ.valor)
def placa_veiculo_formatada(self):
if not self.infNFe.transp.veicTransp.placa.valor:
return u''
placa = self.infNFe.transp.veicTransp.placa.valor
placa = placa[:-4] + u'-' + placa[-4:]
return placa
def dados_adicionais(self):
da = u''
if self.infNFe.infAdic.infAdFisco.valor:
da = self.infNFe.infAdic.infAdFisco.valor.replace(u'|', u'<br />')
if self.infNFe.infAdic.infCpl.valor:
if len(da) > 0:
da += u'<br />'
da += self.infNFe.infAdic.infCpl.valor.replace(u'|', u'<br />')
return da
def canhoto_formatado(self):
formatado = u'RECEBEMOS DE <b>'
formatado += self.infNFe.emit.xNome.valor.upper()
formatado += u'</b> OS PRODUTOS E/OU SERVIรOS CONSTANTES DA <b>NOTA FISCAL ELETRรNICA</b> INDICADA AO LADO'
return formatado
def frete_formatado(self):
if self.infNFe.transp.modFrete.valor == 0:
formatado = u'0-EMITENTE'
elif self.infNFe.transp.modFrete.valor == 1:
if self.infNFe.ide.tpNF.valor == 0:
formatado = u'1-REMETENTE'
else:
formatado = u'1-DESTINATรRIO'
elif self.infNFe.transp.modFrete.valor == 2:
formatado = u'2-DE TERCEIROS'
elif self.infNFe.transp.modFrete.valor == 9:
formatado = u'9-SEM FRETE'
else:
formatado = u''
return formatado
|
class HtmlOutputer(object):
def __init__(self):
self.datas = []
def collect_data(self, data):
if data is None:
return
print(data)
self.datas.append(data)
pass
def output_html(self):
fout = open("formatter.html", "w")
fout.write("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>Document</title>
<!-- ๆๆฐ็ๆฌ็ Bootstrap ๆ ธๅฟ CSS ๆไปถ -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@3.3.7/dist/css/bootstrap.min.css"
integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
<!-- ๆๆฐ็ Bootstrap ๆ ธๅฟ JavaScript ๆไปถ -->
<script src="https://cdn.jsdelivr.net/npm/bootstrap@3.3.7/dist/js/bootstrap.min.js"
integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous">
</script>
</head>
"""
)
fout.write("<body>")
fout.write('<table class="table table-bordered">')
fout.write('<thead>')
fout.write('<tr>')
fout.write('<th>ๆ ้ข</th>')
fout.write('<th>ๆ่ฆ</th>')
fout.write("</tr>")
fout.write('</thead>')
for data in self.datas:
fout.write('<tr class="active">')
# fout.write('<td>%s</td>'% data['url'])
fout.write('<td class="active">%s</td>' % data["title"])
fout.write('<td class="active">%s</td>' % data["summary"])
fout.write("</tr>")
fout.write("</table>")
fout.write("</body>")
fout.write("</html>")
pass
|
# encoding=utf-8
from telebot import types
from module import CourseList, StartBot
from config import TOKEN
from app import server
bot = StartBot(server, TOKEN)
course = CourseList()
@bot.message_handler(commands=['start'])
def update_course(message):
bot.send_message(message.chat.id, 'ะัะธะฒะตั, ' + message.from_user.first_name +
'! ะฏ ะฑะพั ะบะพัะพััะน ะบะพะฝะฒะตัะธััะตั ะฝะตะพะฑั
ะพะดะธะผัั ััะผะผั ะธะท ะพะดะฝะพะน ะฒะฐะปััั ะฒ ะดััะณัั.\n' +
'ะขั ะผะพะถะตัั ะธัะฟะพะปัะทะพะฒะฐัั ะผะตะฝั ะบะฐะบ ะฒ ััะพะผ, ัะฐะบ ะธ ะฒ ะดััะณะธั
ัะฐัะฐั
.\n' +
'ะะพ ัะตะฑะต ะพะฑัะทะฐัะตะปัะฝะพ ะฝัะถะฝะพ ะฟัะธะดะตัะถะธะฒะฐัััั ัะพัะผะฐัะฐ ะฒะฒะพะดะฐ ๐\n' +
"ะคะพัะผะฐั ะฒะฒะพะดะฐ: 'ัะธัะปะพ' 'ะฒะฐะปััะฐ ะธะท' 'ะฒะฐะปััะฐ ะฒ'")
@bot.inline_handler(func=lambda query: len(query.query) > 8)
def query_text(query):
try:
CourseList.parse_text(query.query)
except AttributeError as e:
return
text = query.query.upper().split()
try:
if course.update(text):
result = types.InlineQueryResultArticle(
id='0', title="ะะพะฝะฒะตััะพั",
description=str(course.value),
input_message_content=types.InputTextMessageContent(
message_text=str(course.value)))
else:
result = types.InlineQueryResultArticle(
id='0', title="ะะต ัะพัะผะฐั", description="ะคะพัะผะฐั: 'ัะธัะปะพ' 'ะฒะฐะปััะฐ ะธะท' 'ะฒะฐะปััะฐ ะฒ'",
input_message_content=types.InputTextMessageContent(
message_text=''))
bot.answer_inline_query(query.id, [result])
except Exception as e:
if bot.debug():
bot.query_error(query.id, e)
@bot.inline_handler(func=lambda query: len(query.query) < 8)
def empty_query(query):
try:
result = types.InlineQueryResultArticle(
id='1',
title='ะะถะธะดะฐะตััั ัะพัะผะฐั',
description="ะคะพัะผะฐั: 'ัะธัะปะพ' 'ะฒะฐะปััะฐ ะธะท' 'ะฒะฐะปััะฐ ะฒ'",
input_message_content=types.InputTextMessageContent(
message_text=query.query))
bot.answer_inline_query(query.id, [result])
except Exception as e:
if bot.debug():
bot.query_error(query.id, e)
@bot.message_handler(commands=['know'])
def known_course(message):
try:
bot_message = ''
if not course:
bot_message = 'ะัะพััะธ, ั ะตัะต ะฝะต ะทะฝะฐั ะฝะธ ะพะดะฝะพะน ะฒะฐะปััั'
else:
for i in course:
bot_message += str(i) + ' '
bot.send_message(message.chat.id, bot_message)
except Exception as e:
if bot.debug():
bot.msg_error(message.chat.id, e, message.text)
@bot.message_handler(commands=['add'])
def add_course(message):
global course
try:
new_course = message.text.lower().split()[1]
if 3 < len(new_course) < 6:
bot.send_message(message.chat.id, 'ะัะธะฑะบะฐ! ะขะฐะบะพะน ะบััั ะฝะตะฒะพะทะผะพะถะตะฝ!\n' +
'ะะปั ะดะพะฑะฐะฒะปะตะฝะธั ะบัััะฐ ััะตะฑัะตััั ะฒะฒะตััะธ ะตะณะพ ะฐะฑะฑัะตะฒะธะฐัััั!\n' +
"ะัะธะผะตั ัะพะพะฑัะตะฝะธั: '/add uah'")
elif new_course in course:
bot.send_message(message.chat.id, 'ะขะฐะบะพะน ะบััั ัะถะต ั ะผะตะฝั ะตััั!โบ')
else:
if course == new_course:
course += new_course
bot.send_message(message.chat.id, 'ะฃัะฐ! ะขะตะฟะตัั ะผะฝะต ะดะพัััะฟะฝะฐ {}!โบ'.
format(new_course.upper()))
else:
bot.send_message(message.chat.id, 'ะั
! ะฏ ะฝะต ะผะพะณั ะฝะฐะนัะธ ัะฐะบัั ะฒะฐะปััั ๐ฐ')
except Exception as e:
if bot.debug():
bot.msg_error(message.chat.id, e, message.text)
bot.send_message(message.chat.id, 'ะะน! ะงัะพ-ัะพ ะฟะพัะปะพ ะฝะต ัะฐะบ ๐ฐ')
@bot.message_handler(content_types=['text'])
def send_text(message):
try:
if CourseList.parse_text(message.text):
text = message.text.upper().split()
if course.update(text):
bot.send_message(message.chat.id, str(course))
else:
bot.send_message(message.chat.id, 'ะััะฐะถะตะฝะธะต ะฒะฒะตะดะตะฝะพะฝะตะฟัะฐะฒะธะปัะฝะพ'
'ะธะปะธ ะพะดะฝะฐ ะธะท ะฒะฐะปัั ะผะฝะต ะฝะต ะธะทะฒะตััะฝะฐ ๐ฐ')
elif 'ะฟัะธะฒะตั' in message.text.lower():
bot.send_message(message.chat.id, 'ะัะธะฒะตัะธะบ, ' + message.from_user.first_name
+ '๐')
elif 'ะฟะพะบะฐ' in message.text.lower():
bot.send_message(message.chat.id, 'ะัะพัะฐะน ๐ฐ')
else:
bot.send_message(message.chat.id, 'ะะทะฒะธะฝะธ, ั ะฝะต ะฟะพะฝะธะผะฐั ััะพ ัั ัะบะฐะทะฐะป ๐
')
except Exception as e:
if bot.debug():
bot.msg_error(message.chat.id, e, message.text)
bot.send_message(message.chat.id, 'ะะน! ะงัะพ-ัะพ ะฟะพัะปะพ ะฝะต ัะฐะบ ๐ฐ')
@server.route('/' + TOKEN, methods=['POST'])
def get_message():
bot.update()
return 'Message update', 200
if __name__ == '__main__':
bot.start()
|
import sys
import dlib
import cv2
import numpy as np
TEMPLATE = np.float32([
(0.0792396913815, 0.339223741112), (0.0829219487236, 0.456955367943),
(0.0967927109165, 0.575648016728), (0.122141515615, 0.691921601066),
(0.168687863544, 0.800341263616), (0.239789390707, 0.895732504778),
(0.325662452515, 0.977068762493), (0.422318282013, 1.04329000149),
(0.531777802068, 1.06080371126), (0.641296298053, 1.03981924107),
(0.738105872266, 0.972268833998), (0.824444363295, 0.889624082279),
(0.894792677532, 0.792494155836), (0.939395486253, 0.681546643421),
(0.96111933829, 0.562238253072), (0.970579841181, 0.441758925744),
(0.971193274221, 0.322118743967), (0.163846223133, 0.249151738053),
(0.21780354657, 0.204255863861), (0.291299351124, 0.192367318323),
(0.367460241458, 0.203582210627), (0.4392945113, 0.233135599851),
(0.586445962425, 0.228141644834), (0.660152671635, 0.195923841854),
(0.737466449096, 0.182360984545), (0.813236546239, 0.192828009114),
(0.8707571886, 0.235293377042), (0.51534533827, 0.31863546193),
(0.516221448289, 0.396200446263), (0.517118861835, 0.473797687758),
(0.51816430343, 0.553157797772), (0.433701156035, 0.604054457668),
(0.475501237769, 0.62076344024), (0.520712933176, 0.634268222208),
(0.565874114041, 0.618796581487), (0.607054002672, 0.60157671656),
(0.252418718401, 0.331052263829), (0.298663015648, 0.302646354002),
(0.355749724218, 0.303020650651), (0.403718978315, 0.33867711083),
(0.352507175597, 0.349987615384), (0.296791759886, 0.350478978225),
(0.631326076346, 0.334136672344), (0.679073381078, 0.29645404267),
(0.73597236153, 0.294721285802), (0.782865376271, 0.321305281656),
(0.740312274764, 0.341849376713), (0.68499850091, 0.343734332172),
(0.353167761422, 0.746189164237), (0.414587777921, 0.719053835073),
(0.477677654595, 0.706835892494), (0.522732900812, 0.717092275768),
(0.569832064287, 0.705414478982), (0.635195811927, 0.71565572516),
(0.69951672331, 0.739419187253), (0.639447159575, 0.805236879972),
(0.576410514055, 0.835436670169), (0.525398405766, 0.841706377792),
(0.47641545769, 0.837505914975), (0.41379548902, 0.810045601727),
(0.380084785646, 0.749979603086), (0.477955996282, 0.74513234612),
(0.523389793327, 0.748924302636), (0.571057789237, 0.74332894691),
(0.672409137852, 0.744177032192), (0.572539621444, 0.776609286626),
(0.5240106503, 0.783370783245), (0.477561227414, 0.778476346951)])
TPL_MIN, TPL_MAX = np.min(TEMPLATE, axis=0), np.max(TEMPLATE, axis=0)
MINMAX_TEMPLATE = (TEMPLATE - TPL_MIN) / (TPL_MAX - TPL_MIN)
class AlignDlib:
"""
ไฝฟ็จโdlibโ็landmark estimation ๆฅๆ กๆญฃไบบ่ธ๏ผ
1-ไบบ่ธๅฐบๅฏธ้็ฝฎ๏ผ96ร96
2-landmarks็ไฝ็ฝฎ่ง่ๅ
"""
#landmark indices
INNER_EYES_AND_BOTTOM_LIP=[39,42,57]
OUTER_EYES_AND_NOSE=[36,45,33]
def __init__(self,facePredictor):
"""
ๅๅงๅไธไธชๅฏน่ฑก.
:param of facePredictor:the path to dlib's
:type of facePredictor:str
"""
assert facePredictor is not None
self.detector=dlib.get_frontal_face_detector()
self.predictor=dlib.shape_predictor(facePredictor)
def getAllFaceBoundingBoxes(self,rgbImg):
"""
ๆพๅฐๅพ็ไธญ็ๆๆbounding box๏ผ้ๅถๆก๏ผ
rgbImg:ๅพ
ๅค็็ๅพๅ๏ผshape(height,width,3)
return:ๆพๅฐ็ๆๆ้ๅถๆก๏ผ็ฑปๅ dlib.rectangles
"""
assert rgbImg is not None
try:
return self.detector(rgbImg,1)
# we should upsample the image 1 time.
except Exception as e:
print("warning:{}".format(e))
return []
def getLargestFaceBoundingBox(self,rgbImg,skipMulti=False):
"""
ๆพๅฐๅพๅไธญๆๅคง็้ๅถๆก
rgbImg๏ผๅพ
ๅค็็ๅพๅ๏ผshaple(height,width๏ผ3)
skipMulti๏ผๅฆๆๆๅคไธชไบบ่ธๆฃๆตๅบๆฅ๏ผๅ่ทณ่ฟๆนๅพๅ๏ผ้ป่ฎคไธบFalse
return:ๆๅคง็ไบบ่ธ้ๅถๆก๏ผdlib.rectangles
"""
assert rgbImg is not None
faces=self.getAllFaceBoundingBoxes(rgbImg)
if (not skipMulti and len(faces)>0) or len(faces)==1:
return max(faces,key=lambda rect:rect.width()*rect.height())
else:
return None
def findLandmarks(self,rgbImg,bb):
"""
ๆพๅฐไบบ่ธไธญ็landmarks
rgnImg๏ผๅพ
ๅค็็ไบบ่ธๅพๅ๏ผshape๏ผheight๏ผwidth๏ผ3๏ผ
bb๏ผๅจbounding boxไธๆพlandmarks
return๏ผjiancedaoderenlianlandmarks็ไฝ็ฝฎ๏ผ
่ฟๅ็ฑปๅ๏ผlist๏ผx,y๏ผๅ
็ป
"""
assert rgbImg is not None
assert bb is not None
points=self.predictor(rgbImg,bb)
return list(map(lambda p:(p.x,p.y),points.parts()))
def align(self,imgDim,rgbImg,bb=None,landmarks=None,landmarkIndices=INNER_EYES_AND_BOTTOM_LIP,skipMulti=False):
#align(534,image,face_rect,landmakIndices=Alib.AlignDlib.OUTER_EYES_AND_NOSE)
# landmarkIndices
"""
ๆ กๆญฃไบบ่ธ
rgbImg๏ผๅพ
ๅค็็ๅพๅ
bb๏ผไบบ่ธ็้ๅถๆก
landmarks๏ผๆฃๆตๅบ็ไบบ่ธlandmarks็ไฝ็ฝฎ
landmarkIndices๏ผๆ กๅ็ๅบๅ
skipMutli๏ผๅฝๆๅคไธชfaceๆฃๆตๅบๆฅ็ๆถๅๆฏๅฆ่ทณ่ฟ
่ฟๅๅผ๏ผๆ กๅ็RGBๅพๅ shape๏ผheight๏ผwidth๏ผ3๏ผ
imgDim๏ผint-ๅพๅๅฐบๅฏธ้็ฝฎ็่พน้ฟ้ฟๅบฆ
"""
assert imgDim is not None
assert rgbImg is not None
assert landmarkIndices is not None
if bb is None:
bb=self.getLargestFaceBoundingBox(rgbImg,skipMulti)
if bb is None:
return
if landmarks is None:
landmarks=self.findLandmarks(rgbImg,bb)
npLandmarks=np.array(landmarks)
npLandmarks=np.float32(landmarks)
npLandmarkIndices=np.array(landmarkIndices)
H=cv2.getAffineTransform(npLandmarks[npLandmarkIndices],imgDim*MINMAX_TEMPLATE[npLandmarkIndices])
thumbnail=cv2.warpAffine(rgbImg,H,(imgDim,imgDim))
return thumbnail
|
from typing import Dict
from pydantic import BaseModel
class ProductoInDB(BaseModel):
codigo: str
nombre: str
precio: float
cantidad: int
seccion:str
database_producto = {
"1001": ProductoInDB(**{"codigo": "1001",
"nombre": "Mause",
"precio": 12000,
"cantidad": 5,
"seccion": "Tecnologia"
}),
"1002": ProductoInDB(**{"codigo": "1002",
"nombre": "Monitor",
"precio": 14000,
"cantidad": 9,
"seccion": "Tecnologia"
}),
}
def get_productos():
return database_producto
def get_producto(codigo: str):
if codigo in database_producto.keys():
return database_producto[codigo]
return None
|
"""Runtime Errors."""
def bad_type(item):
"""
(str) -> TypeError
Arguement must be a alpha string.
Attempts to convert a given word or string into an integer.
>>>bad_type('John')
Traceback (most recent call last):
File "<error_library.py>", line 36, in <bad_type>
TypeError: Can't convert 'int' object to str implicitly
>>>bad_type('Cassandra')
Traceback (most recent call last):
File "<error_library.py>", line 36, in <bad_type>
TypeError: Can't convert 'int' object to str implicitly
"""
return int(item)
def bad_name(item):
"""
(str) -> NameError
Attempts to return original string.
>>>bad_name(item)
Traceback (most recent call last):
File "<error_library.py>", line 56, in <bad_name>
builtins.NameError: name 'thing' is not defined
>>>bad_name(item)
Traceback (most recent call last):
File "<error_library.py>", line 56, in <bad_name>
builtins.NameError: name 'thing' is not defined
"""
return thing
def bad_attribute(num):
"""
(int) -> str
Attempts to make the given integer uppercase.
>>bad_attribute(4)
Traceback (most recent call last):
File "<string>", line 76, in <bad_attribute>
builtins.AttributeError: 'int' object has no attribute 'upper'
>>>bad_attribute(88)
Traceback (most recent call last):
File "<string>", line 76, in <bad_attribute>
builtins.AttributeError: 'int' object has no attribute 'upper'
"""
return num.upper()
def bad_index(randomlist):
"""
list -> IndexError
When run, attempts to return randomlist[(len(randomlist)+1)]
>>>bad_index([3, 5, 22, 7])
Traceback (most recent call last):
File "<error_library.py>", line 96, in <bad_index>
builtins.IndexError: list index out of range
>>>bad_index(['Applebee', 'John', 'Park', 'Cartman'])
Traceback (most recent call last):
File "<error_library.py>", line 96, in <bad_index>
builtins.IndexError: list index out of range
"""
return randomlist[(len(randomlist)+1)]
def bad_key(string):
"""
(str) -> str
Returns the value associated with the given string.
>>>bad_key('Joseph')
Traceback (most recent call last):
File "<error_library.py>", line 119, in <bad_key>
builtins.KeyError: 'Joseph'
>>>bad_key('8')
Traceback (most recent call last):
File "<error_library.py>", line 119, in <bad_key>
builtins.KeyError: '8'
"""
dicton = {}
return dicton[string]
def bad_zero(bop):
"""
(int) -> float
Returns given integer divided by zero.
>>>bad_zero(9)
Traceback (most recent call last):
File "<error_library.py>", line 139, in <bad_zero>
builtins.ZeroDivisionError: division by zero
>>>bad_zero(88)
Traceback (most recent call last):
File "<error_library.py>", line 139, in <bad_zero>
builtins.ZeroDivisionError: division by zero
"""
return bop/0
def bad_import(string):
"""
(str) -> Module
Attempts to import a module named 'Cat'.
>>>bad_import('Lol')
Traceback (most recent call last):
File "<error_library.py>", line 160, in <bad_import>
builtins.ImportError: No module named 'Cat'
>>>bad_import('Dog')
Traceback (most recent call last):
File "<error_library.py>", line 160, in <bad_import>
builtins.ImportError: No module named 'Cat'
"""
import Cat
|
#!/usr/bin/env python
PROJECT = 'clifford'
# Change docs/sphinx/conf.py too!
VERSION = '0.1'
from setuptools import setup, find_packages
try:
long_description = open('README.rst', 'rt').read()
except IOError:
long_description = ''
setup(
name=PROJECT,
version=VERSION,
description='Clifford, ec2 made easy.',
long_description=long_description,
author='Joe Server',
author_email='joe@jserver.io',
url='https://github.com/jserver/clifford',
download_url='https://github.com/jserver/clifford/tarball/master',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers',
'Environment :: Console',
],
platforms=['Any'],
scripts=[],
provides=[],
install_requires=['cliff', 'boto', 'paramiko'],
namespace_packages=[],
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'clifford = clifford.main:main'
],
'clifford': [
# General
'domain = clifford.actions:Domain',
'key_paths = clifford.actions:KeyPaths',
'script_path = clifford.actions:ScriptPath',
# Lister tools
'ls = clifford.listing:Instances',
'addresses = clifford.listing:Addresses',
'aws_images = clifford.listing:AwsImages',
'buckets = clifford.listing:Buckets',
'builds = clifford.listing:Builds',
'bundles = clifford.listing:Bundles',
'groups = clifford.listing:Groups',
'images = clifford.listing:Images',
'tags = clifford.listing:InstanceTags',
'instances = clifford.listing:Instances',
'keys = clifford.listing:Keys',
'projects = clifford.listing:Projects',
'scripts = clifford.listing:Scripts',
'security groups = clifford.listing:SecurityGroups',
'snapshots = clifford.listing:Snapshots',
'volumes = clifford.listing:Volumes',
# EC2
'image = clifford.actions:Image',
'create image = clifford.actions:CreateImage',
'create snapshot = clifford.actions:CreateSnapshot',
'del aws_image = clifford.actions:DeleteAwsImage',
'del snapshot = clifford.actions:DeleteSnapshot',
'del volume = clifford.actions:DeleteVolume',
'build = clifford.build:Build',
'launch = clifford.launch:Launch',
'project = clifford.project:Project',
'instance = clifford.show:Instance',
'terminate = clifford.actions:Terminate',
'reboot = clifford.actions:Reboot',
'stop = clifford.actions:Stop',
'start = clifford.actions:Start',
'tag = clifford.actions:Tag',
'adduser = clifford.remote:AddUser',
'cnct = clifford.actions:Cnct',
'script = clifford.remote:Script',
'copy = clifford.remote:CopyFile',
'update = clifford.remote:Update',
'upgrade = clifford.remote:Upgrade',
'apt install = clifford.remote:AptGetInstall',
'pip install = clifford.remote:PipInstall',
'install bundle = clifford.remote:BundleInstall',
'install group = clifford.remote:GroupInstall',
#'add-apt = clifford.remote:AddAptInstall',
#'ppa install = clifford.remote:PPAInstall',
'bundle = clifford.package:Bundle',
'group = clifford.package:Group',
# Elastic IPs
'associate = clifford.address:Associate',
'disassociate = clifford.address:Disassociate',
'allocate = clifford.address:Allocate',
'release = clifford.address:Release',
# S3
'create bucket = clifford.storage:CreateBucket',
'del bucket = clifford.storage:DeleteBucket',
'download = clifford.storage:Download',
'upload = clifford.storage:Upload',
],
},
zip_safe=False,
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-26 18:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0002_customuser_avatar'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='avatar',
field=models.CharField(
default='', editable=False, max_length=1000),
),
migrations.AlterField(
model_name='customuser',
name='email',
field=models.EmailField(max_length=254, unique=True),
),
]
|
# coding=utf-8
from core import HackingTool
from core import HackingToolsCollection
class GoSpider(HackingTool):
TITLE = "Gospider"
DESCRIPTION = "Gospider - Fast web spider written in Go"
INSTALL_COMMANDS = ["sudo go get -u github.com/jaeles-project/gospider"]
PROJECT_URL = "https://github.com/jaeles-project/gospider"
def __init__(self):
super(GoSpider, self).__init__(runnable = False)
class WebCrawlingTools(HackingToolsCollection):
TITLE = "Web crawling"
TOOLS = [GoSpider()]
|
# -*- coding: utf-8 -*-
import wx
from docpage import DocPage
from settings import FILENAME_FONT, COMMENT_FONT
class DocListBox(wx.VListBox):
def __init__(self, parent, docs):
super(DocListBox, self).__init__(parent)
self.docs = docs
"""
@type : Document
"""
self.SetItemCount(len(docs))
self.labelFont = wx.SystemSettings_GetFont(wx.SYS_SYSTEM_FONT)
self.labelFont.SetPointSize(10)
self.labelFont.SetFaceName(FILENAME_FONT)
self.commentFont = wx.SystemSettings_GetFont(wx.SYS_SYSTEM_FONT)
self.commentFont.SetPointSize(9)
self.commentFont.SetFaceName(COMMENT_FONT)
def OnMeasureItem(self, index):
return 35
def OnDrawSeparator(self, dc, rect, index):
oldpen = dc.GetPen()
dc.SetPen(wx.Pen(wx.BLACK))
dc.DrawLine(rect.x, rect.y, rect.x + rect.width, rect.y)
dc.SetPen(oldpen)
def OnDrawItem(self, dc, rect, index):
'''
:type dc: wx.DC
:type rect: wx.Rect
:type index: int
'''
doc = self.docs[index]
""" @type: Document"""
if doc.hasStatus():
dc.SetPen(wx.TRANSPARENT_PEN)
brushColour = wx.Colour(155, 155, 155)
if doc.status == "progress":
brushColour = wx.Colour(50, 200, 50)
elif doc.status == "regress":
brushColour = wx.Colour(200, 50, 50)
dc.SetBrush(wx.Brush(brushColour))
dc.DrawRectangle(rect.x + 1, rect.y + 2, 5, rect.height - 3)
labelRect = wx.Rect(rect.x + 15, rect.y + 2, rect.width - 20, rect.height / 2 - 4)
commentRect = wx.Rect(labelRect.x, labelRect.y + labelRect.height + 2, labelRect.width, labelRect.height)
dc.SetPen(wx.BLACK_PEN)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetFont(self.labelFont)
dc.SetTextForeground(wx.BLACK if doc.isCompared() else wx.Colour(135, 135, 135))
dc.DrawLabel(doc.key, labelRect)
dc.SetFont(self.commentFont)
if not doc.hasComment():
dc.SetTextForeground(wx.RED)
dc.DrawLabel("not commented", commentRect)
else:
dc.SetTextForeground(wx.BLUE)
dc.DrawLabel(doc.comment, commentRect)
def GetItem(self, index):
return self.docs[index]
|
#P1_A Write a program to store the elements in
1-D array and provide an option to perform
the operations like searching, sorting,
merging, reversing the elements.#
Code: searching
def linear_search(values, search_for):
search_at = 0
search_res = False
# Match the value with each data element
while search_at < len(values) and search_res is False:
if values[search_at] == search_for:
search_res = True
else:
search_at = search_at + 1
return search_res
l = [64, 34, 25, 12, 22, 11, 90]
print(linear_search(l, 12))
print(linear_search(l, 91))
Code: sorting
def bubblesort(list):
# Swap the elements to arrange in order
for iter_num in range(len(list)-1,0,-1):
for idx in range(iter_num):
if list[idx]>list[idx+1]:
temp = list[idx]
list[idx] = list[idx+1]
list[idx+1] = temp
list = [19,2,31,45,6,11,121,27]
bubblesort(list)
print(list)
Code: merging
def mergeSort(nlist):
print("Splitting ",nlist)
if len(nlist)>1:
mid = len(nlist)//2
lefthalf = nlist[:mid]
righthalf = nlist[mid:]
mergeSort(lefthalf)
mergeSort(righthalf)
i=j=k=0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i] < righthalf[j]:
nlist[k]=lefthalf[i]
i=i+1
else:
nlist[k]=righthalf[j]
j=j+1
k=k+1
while i < len(lefthalf):
nlist[k]=lefthalf[i]
i=i+1
k=k+1
while j < len(righthalf):
nlist[k]=righthalf[j]
j=j+1
k=k+1
print("Merging ",nlist)
nlist = [14,46,43,27,57,41,45,21,70]
mergeSort(nlist)
print(nlist)
Code:reversing
def reverse(s):
str = ""
for i in s:
str = i + str
return str
s = "Geeksforgeeks"
print ("The original string is : ",end="")
print (s)
print ("The reversed string(using loops) is : ",end="")
print (reverse(s))
|
__author__ = 'ayost'
import sys
def buildUpdate():
f = open("/home/.emails/updates.txt", "rb")
contents = f.read()
f.close()
contents = contents.replace("\n","<br />")
email = "<h2>Updates are available for your computer</h2><p>The following updates are available for your computer. You should log on soon to install these.</p><div style=\"font-size:10pt;\"><p>"
email += contents
email += "</p></div>"
return email
def buildRestart():
email = "<h2>A restart is needed for your computer</h2><p>A restart is needed for your computer. Please log on soon and reboot to continue normal operation.</p>"
return email
def buildDownload():
f = open("/home/.emails/downloads.txt", "rb")
contents = f.read()
f.close()
email = "<h2>Your subscribed download is complete</h2><p>The following downloads you subscribed to are complete.</p><div style=\"font-size:10pt;\"><p>"
email += contents
email += "</p></div>"
return email
def main():
if len(sys.argv) < 2:
print("Incorrect Usage!")
print("Usage:")
print("\t python emailBuilder type")
print("\t types:")
print("\t\t -u updates avaialable")
print("\t\t -r restart required")
print("\t\t -d download finished")
type = sys.argv[1]
if type[1] == 'u':
msg = buildUpdate()
dest = "/home/.emails/update_email.txt"
elif type[1] == 'r':
msg = buildRestart()
dest = "/home/.emails/restart_email.txt"
elif type[1] == 'd':
msg = buildDownload()
dest = "/home/.emails/download_email.txt"
f = open(dest, "wb")
f.write(msg)
f.close()
if __name__ == '__main__':
main()
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dates = pd.date_range('20130101', periods=6)
print(dates)
df = pd.DataFrame(np.random.rand(6,4), index=dates, columns=list('ABCD'))
print(df)
|
#!/usr/bin/env python3
from pprint import pprint
from collections import deque, defaultdict
import sys
sys.setrecursionlimit(10 ** 6)
input = sys.stdin.buffer.readline
inf = float("inf")
n_item, w_volume = map(int, input().split())
weight, value = [], []
for _ in range(n_item):
w, v = map(int, input().split())
weight.append(w)
value.append(v)
# print(n_item, w_volume, weight, value)
# dp[i + 1][w]: i็ช็ฎใพใงใฎๅๅใใ้ใใwใ่ถ
ใใชใใใใใใคใ้ธใใ ใจใใฎไพกๅคใฎๆๅคงๅค
# ๅๅiใ้ธใถใใจใใงใใใจใ (w - weight[i] >= 0 ใฎใจใ)
# dp[i + 1][w] = max(dp[i + 1][w], dp[i][w - weight[i]] + value[i])
# ๅๅiใ้ธใถใใจใใงใใชใใจใ
# dp[i + 1][w] = max(dp[i + 1][w], dp[i][w])
dp = [[0 for w in range(w_volume + 1)] for i in range(n_item + 1)]
for i in range(n_item):
for w in range(w_volume + 1):
if w - weight[i] >= 0:
dp[i + 1][w] = max(dp[i + 1][w], dp[i][w - weight[i]] + value[i])
dp[i + 1][w] = max(dp[i + 1][w], dp[i][w])
print(dp[n_item][w_volume])
|
import mxnet as mx
import numpy as np
import cv2
from vgg_mx.symbol_vgg import VGG
from caffe_io import Transformer
from collections import namedtuple
import symbol_sentiment
import config
ctx = [mx.cpu()] if len(config.gpus) == 0 else [mx.gpu(int(i)) for i in config.gpus.split(',')]
feature_names = ['object', 'scene', 'Sentiment']
Batch = namedtuple('Batch', ['data'])
def data_trans(img, shape, mu):
transformer = Transformer({'data': shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', mu)
transformer.set_raw_scale('data', 255)
transformed_image = transformer.preprocess('data', img)
return transformed_image
def crop_lit_centor(img, mu, img_len = 224):
[n,m,_]=img.shape
if m>n:
m = int(m*256/n)
n = 256
else:
n = int(n*256/m)
m = 256
return data_trans(cv2.resize(img,(m,n))/255.0,(1,3,n,m), mu)[:,int((n-img_len)/2):int((n+img_len)/2),int((m-img_len)/2):int((m+img_len)/2)]
def get_mod(output_name = 'relu7_output', sym = None, img_len = 224):
if sym is None:
vgg = VGG()
sym = vgg.get_symbol(num_classes = 1000,
blocks = [(2, 64),
(2, 128),
(3, 256),
(3, 512),
(3, 512)])
internals = sym.get_internals()
sym = internals[output_name]
mod = mx.module.Module(
context = ctx,
symbol = sym,
data_names = ("data", ),
label_names = ()
)
mod.bind(data_shapes = [("data", (1, 3, img_len, img_len))], for_training = False)
return mod
object_model = get_mod()
object_model.load_params('../model/object.params')
scene_model = get_mod()
scene_model.load_params('../model/scene.params')
sentiment_model = get_mod(sym = symbol_sentiment.get_sym(), img_len = 227)
sentiment_model.load_params('../model/Sentiment.params')
def get_obj_feature(img):
mu = np.array([104,117,123])
transformed_img = crop_lit_centor(img, mu)
transformed_img = transformed_img[None]
object_model.forward(Batch([mx.nd.array(transformed_img)]), is_train = False)
outputs = object_model.get_outputs()[0].asnumpy()
return outputs
def get_scene_feature(img):
mu = np.array([105.487823486,113.741088867,116.060394287])
transformed_img = crop_lit_centor(img, mu)
transformed_img = transformed_img[None]
scene_model.forward(Batch([mx.nd.array(transformed_img)]), is_train = False)
outputs = scene_model.get_outputs()[0].asnumpy()
return outputs
def get_sentiment_feature(img):
mu = np.array([97.0411,105.423,111.677])
transformed_img = crop_lit_centor(img, mu, img_len = 227)
transformed_img = transformed_img[None]
sentiment_model.forward(Batch([mx.nd.array(transformed_img)]), is_train = False)
outputs = sentiment_model.get_outputs()[0].asnumpy()
return outputs
def extract_feature(image_file):
img = cv2.imread(image_file)
assert img is not None, IOError(
'The file `{}` may be not an image'.format(image_file))
# img.shape: H, W, T
if img.ndim == 2:
# gray image
img = np.stack([img, img, img], axis=2)
else:
if img.ndim == 3 and img.shape[2] in [3, 4]:
if img.shape[2] == 4:
# remove alpha channel
img = img[:, :, :3]
else:
raise Exception('Invalid Image `{}` whose shape is {}'.format(image_file, img.shape))
obj_feat = get_obj_feature(img)
scene_feat = get_scene_feature(img)
sentiment_feat = get_sentiment_feature(img)
image_features = [obj_feat, sentiment_feat, scene_feat]
img_feature = np.hstack(image_features)
return img_feature
if __name__ == '__main__':
img_feature = get_feature('../images/test.jpg')
np.save('../images/mx_test.npy', img_feature)
|
"""
Final exam, problem 3.
Authors: David Mutchler, Dave Fisher, Matt Boutell, their colleagues,
and Joshua Eckels.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the TEST functions in this module. """
run_test_shape()
def run_test_shape():
""" Tests the shape function. """
print()
print('--------------------------------------------------')
print('Testing the SHAPE function:')
print('--------------------------------------------------')
print()
print('Test 1 of shape: m=5 and n=2')
shape(5, 2)
print()
print('Test 2 of shape: m=3 and n=6')
shape(3, 6)
print()
print('Test 3 of shape: m=7 and n=1')
shape(7, 1)
print()
print('Test 4 of shape: m=6 and n=4')
shape(6, 4)
def shape(m, n):
###########################################################################
# IMPORTANT: In solving this problem,
# You must NOT use string multiplication.
###########################################################################
"""
What comes in: Positive integers m and n.
What goes out: Nothing.
Side effects:
Prints n "v" shaped patterns of numbers,
where the height of each "v" is m.
Examples:
It looks like this example for m=5 and n=2:
5 5 5 5
4 4 4 4
3 3 3 3
2 2 2 2
1 1
and like this for m=3 and n=6:
3 3 3 3 3 3 3 3 3 3 3 3
2 2 2 2 2 2 2 2 2 2 2 2
1 1 1 1 1 1
:type m: int
:type n: int
"""
count = 0
for j in range(m, 0, -1):
while count < n:
for k in range(m - j):
print(' ', end='')
for i in range(1):
print(j, end='')
for l in range(0, 2*j - 3):
print(' ', end='')
for p in range(1):
if j > 1:
print(j, end='')
for w in range(m - j):
print(' ', end='')
print(' ', end='')
count += 1
print()
count = 0
# -------------------------------------------------------------------------
# DONE: Implement and test this function.
# Some tests are already written for you (above).
# IMPORTANT: See the RESTRICTION just below the DEF line.
#
# NOTE:
# If you are having trouble solving this problem,
# write code that prints a SINGLE v-shaped object for partial credit.
# -------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
|
from unittest import TestCase
from unittest.mock import patch
import io
from game import print_class_description
class TestPrintClassDescription(TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
def test_print_squire_class(self, mock_output):
print_class_description("Squire")
actual = mock_output.getvalue()
expected = "A Lazarus Engineโข allows for this ship to repair itself after its hull " \
"integrity has been completely breached for the first time.\n"
self.assertEqual(actual, expected)
@patch('sys.stdout', new_callable=io.StringIO)
def test_print_sapper_class(self, mock_output):
print_class_description("Sapper")
actual = mock_output.getvalue()
expected = "Destroy enemy ships to steal their energy and charge up your Quasar Cannonโข " \
"for a devastating attack.\n"
self.assertEqual(actual, expected)
@patch('sys.stdout', new_callable=io.StringIO)
def test_print_ghost_class(self, mock_output):
print_class_description("Ghost")
actual = mock_output.getvalue()
expected = "A nimble ship covered in aerodynamic SlipStreamโข technology allows the pilot to make" \
" the first move in combat, and attack multiple times in one turn.\n"
self.assertEqual(actual, expected)
@patch('sys.stdout', new_callable=io.StringIO)
def test_print_cherub_class(self, mock_output):
print_class_description("Cherub")
actual = mock_output.getvalue()
expected = "QuickFixโข Protocols allows this ship to repair itself during combat.\n"
self.assertEqual(actual, expected)
|
from __future__ import print_function
def run():
input = open("input.txt", 'r')
lines = file.read(input)
abbas = 0
for line in lines.split("\n"):
if len(line) == 0:
continue
sliding_window = []
bracket_mod = False
abba_mod = False
try:
for c in line:
if c == "[":
bracket_mod=True
elif c == "]":
bracket_mod=False
if len(sliding_window) == 4:
sliding_window.pop(0)
sliding_window.append(c)
else:
sliding_window.append(c)
if len(sliding_window) < 4:
continue
new_abba_mod = check_abba(sliding_window)
if new_abba_mod and bracket_mod:
raise NameError("Bad ABBA")
if new_abba_mod:
abba_mod=new_abba_mod
except NameError:
print("{} has ABBA between ".format(line))
continue
if abba_mod:
print("{} is ok".format(line))
abbas+=1
else:
print("{} NO".format(line))
print("{} ABBA".format(abbas))
def check_abba(sliding_window):
if sliding_window[0] == sliding_window[3]:
if sliding_window[0] != sliding_window[1]:
if sliding_window[1] == sliding_window[2]:
print("{} is candidate".format(sliding_window))
return True
return False
if __name__ == '__main__':
run()
|
import codecs
import json
import os
def getProjectPages(start=0, end=100, cache=False):
if not cache:
print 'WARNING: API queries not supported yet'
return []
basedir = os.path.dirname(__file__)
with codecs.open(os.path.join(basedir, 'config/medicine_dump.json'), encoding='utf-8') as jsonfile:
data = json.load(jsonfile)
if end and end < len(data):
data = data[start:end]
return data
|
a = [1, 1.2, 'sagar', True]
print(a)
print(a[0])
#print(a[4])
# access index using a[x]
a[0] = 'one'
print(a)
#list slicing
b = ["sagar", 13, 12, 1998, "neha", 30, 9, 1998]
print("sagar = ", b[1:4])
print("neha = ",b[5: ])
|
import networkx as nx
import rw
import numpy as np
subs=['S101','S102','S103','S104','S105','S106','S107','S108','S109','S110',
'S111','S112','S113','S114','S115','S116','S117','S118','S119','S120']
#subs=['S1','S2','S3','S4','S5','S7','S8','S9','S10','S11','S12','S13']
toydata=rw.Toydata({
'numx': 3,
'trim': 1,
'jump': 0.0,
'jumptype': "stationary",
'priming': 0.0,
'startX': "stationary"})
fitinfo=rw.Fitinfo({
'startGraph': "windowgraph_valid",
'windowgraph_size': 2,
'windowgraph_threshold': 2,
'followtype': "avg",
'prior_samplesize': 10000,
'recorddir': "records/",
'directed': False,
'prune_limit': np.inf,
'triangle_limit': np.inf,
'other_limit': np.inf})
toygraphs=rw.Toygraphs({
'numgraphs': 1,
'graphtype': "steyvers",
'numnodes': 280,
'numlinks': 6,
'prob_rewire': .3})
irts=rw.Irts({
'data': [],
'irttype': "exgauss",
'exgauss_lambda': 0.721386887,
'exgauss_sigma': 6.58655566,
'irt_weight': 0.95,
'rcutoff': 20})
prior_graphs=[]
prior_items=[]
ss_Xs=[]
ss_numnodes=[]
# generate starting graphs
for subj in subs:
category="animals"
Xs, items, irts.data, numnodes=rw.readX(subj,category,'./Spring2015/results_cleaned.csv',ignorePerseverations=True)
window_graph=rw.windowGraph(Xs, numnodes, td=toydata, valid=True, fitinfo=fitinfo)
prior_graphs.append(window_graph)
prior_items.append(items)
ss_Xs.append(Xs)
ss_numnodes.append(numnodes)
# initialize prior
priordict = rw.genGraphPrior(prior_graphs, prior_items)
changesmade = 1
rnd = 1
original_order=subs[:]
while changesmade > 0:
print "round:", rnd, " graphs changed: ", changesmade
rnd += 1
changesmade=0
np.random.shuffle(subs)
for subj in subs:
print "ss: ", subj
subj_idx = original_order.index(subj)
toydata.numx = len(ss_Xs[subj_idx])
fitinfo.startGraph = prior_graphs[subj_idx]
prior = (priordict, prior_items[subj_idx])
# find best graph
uinvite_graph, bestval=rw.uinvite(ss_Xs[subj_idx], toydata, ss_numnodes[subj_idx], fitinfo=fitinfo, prior=prior)
## update prior if graph has changed
if not np.array_equal(uinvite_graph, prior_graphs[subj_idx]):
changesmade += 1
prior_graphs[subj_idx] = uinvite_graph
priordict = rw.genGraphPrior(prior_graphs, prior_items)
for subj in range(len(subs)):
g=nx.to_networkx_graph(prior_graphs[subj])
g2=nx.to_networkx_graph(rw.noHidden(ss_Xs[subj],ss_numnodes[subj]))
nx.relabel_nodes(g, prior_items[subj], copy=False)
nx.relabel_nodes(g2, prior_items[subj], copy=False)
rw.write_csv([g, g2],subj+".csv",subj) # write multiple graphs
|
''' Faรงa um programa que calcule a soma entre todos os nรบmeros impares que sรฃo mรบltiplos de trรชs e que se
encontram no intervalo de 1 atรฉ 500. '''
soma = 0
conta = 0
for count in range(1, 501, 2):
if count % 3 == 0:
conta += 1
soma += count
print('A soma de todos os valores {} solicitados รฉ {}'.format(conta, soma))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Allenatore',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('budget', models.PositiveSmallIntegerField()),
('numerogiocatori', models.CommaSeparatedIntegerField(max_length=20)),
('nomesquadra', models.CharField(max_length=200)),
('amministratore', models.BooleanField()),
],
),
migrations.CreateModel(
name='Calciatore',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nome', models.CharField(max_length=40)),
('ruolo', models.PositiveSmallIntegerField(choices=[(b'p', 0), (b'd', 1), (b'c', 2), (b'a', 3)])),
('exsquadra', models.CharField(max_length=40)),
('quotazione', models.PositiveSmallIntegerField()),
('fantamedia', models.FloatField()),
('fantamediasq', models.FloatField()),
('mediavoto', models.FloatField()),
('presenze', models.PositiveSmallIntegerField()),
('golfatti', models.PositiveSmallIntegerField()),
('golsubiti', models.PositiveSmallIntegerField()),
('rigoriparati', models.PositiveSmallIntegerField()),
('ammonizioni', models.PositiveSmallIntegerField()),
('espulsioni', models.PositiveSmallIntegerField()),
('assist', models.PositiveSmallIntegerField()),
('imageurl', models.URLField()),
],
),
migrations.CreateModel(
name='Campionato',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nome', models.CharField(max_length=50)),
('data_inizio', models.DateField()),
('data_fine', models.DateField()),
],
),
migrations.CreateModel(
name='Formazione',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data_invio', models.DateField()),
('definitiva', models.BooleanField()),
('allenatore', models.ForeignKey(to='fantaapp.Allenatore')),
],
),
migrations.CreateModel(
name='Giornata',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('numero', models.PositiveSmallIntegerField()),
('data', models.DateField()),
('campionato', models.ForeignKey(to='fantaapp.Campionato')),
],
),
migrations.CreateModel(
name='IncontroCalendario',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('giornata', models.PositiveSmallIntegerField()),
('allenatorecasa', models.ForeignKey(related_name='incontricasa', to='fantaapp.Allenatore')),
('allenatoretrasferta', models.ForeignKey(related_name='incontritrasferta', to='fantaapp.Allenatore')),
],
),
migrations.CreateModel(
name='IncontroCampionato',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', models.DateField()),
('giornata', models.ForeignKey(to='fantaapp.Giornata')),
],
),
migrations.CreateModel(
name='IncontroLega',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('giornatalega', models.PositiveSmallIntegerField()),
('formazionecasa', models.ForeignKey(related_name='IncontroCasa', to='fantaapp.Formazione')),
('formazionetrasferta', models.ForeignKey(related_name='IncontroTrasferta', to='fantaapp.Formazione')),
('giornata', models.ForeignKey(to='fantaapp.Giornata')),
],
),
migrations.CreateModel(
name='Lega',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nome', models.CharField(max_length=50)),
('descrizione', models.TextField()),
('calcolo_voto', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Redazione',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nome', models.CharField(max_length=50)),
('descrizione', models.TextField()),
],
),
migrations.CreateModel(
name='Referto',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('posizione', models.PositiveSmallIntegerField()),
('calciatore', models.ForeignKey(to='fantaapp.Calciatore')),
('formazione', models.ForeignKey(to='fantaapp.Formazione')),
],
),
migrations.CreateModel(
name='SquadraCampionato',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nome', models.CharField(max_length=50)),
('campionato', models.ForeignKey(to='fantaapp.Campionato')),
],
),
migrations.CreateModel(
name='TransferLega',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('costo', models.PositiveSmallIntegerField()),
('entrata', models.BooleanField()),
('allenatore', models.ForeignKey(to='fantaapp.Allenatore')),
('calciatore', models.ForeignKey(to='fantaapp.Calciatore')),
],
),
migrations.CreateModel(
name='Voto',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('votopuro', models.DecimalField(max_digits=4, decimal_places=2)),
('assist', models.PositiveSmallIntegerField()),
('golsuazione', models.PositiveSmallIntegerField()),
('golsurigore', models.PositiveSmallIntegerField()),
('ammo', models.PositiveSmallIntegerField()),
('espu', models.PositiveSmallIntegerField()),
('autogol', models.PositiveSmallIntegerField()),
('golsubiti', models.PositiveSmallIntegerField()),
('giornata', models.ForeignKey(to='fantaapp.Giornata')),
('redazione', models.ForeignKey(to='fantaapp.Redazione')),
],
),
migrations.AddField(
model_name='incontrolega',
name='lega',
field=models.ForeignKey(to='fantaapp.Lega'),
),
migrations.AddField(
model_name='incontrocampionato',
name='squadracasa',
field=models.ForeignKey(related_name='IncontroCasa', to='fantaapp.SquadraCampionato'),
),
migrations.AddField(
model_name='incontrocampionato',
name='squadratrasferta',
field=models.ForeignKey(related_name='IncontroTransferta', to='fantaapp.SquadraCampionato'),
),
migrations.AddField(
model_name='incontrocalendario',
name='lega',
field=models.ForeignKey(to='fantaapp.Lega'),
),
migrations.AddField(
model_name='formazione',
name='giocatori',
field=models.ManyToManyField(to='fantaapp.Calciatore', through='fantaapp.Referto'),
),
migrations.AddField(
model_name='calciatore',
name='squadra',
field=models.ForeignKey(to='fantaapp.SquadraCampionato'),
),
migrations.AddField(
model_name='allenatore',
name='lega',
field=models.ForeignKey(to='fantaapp.Lega'),
),
migrations.AddField(
model_name='allenatore',
name='utente',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
]
|
#This program is to find next greater number from a given number
"""
Input: n='231'
Output: 'next greater value=321'
"""
#NOTE: Using permutations will not give you efficient time complexity if number will be too large
from itertools import permutations
import time
n='231'
#Start and end will give you actual time the program need to execute
start=time.time()
temp=[]
new=[]
#Find permutations for finding combinations of number
for i in permutations(n):
per=''.join(i)
temp.append(per)
temp.sort()
print temp
if n==max(temp):
print "not"
else:
for i in temp:
if n<i:
new.append(i)
print min(new)
end=time.time()
total=end-start
print total
def find(number,n):
for i in range(n-1,0,-1):
if number[i]>number[i-1]:
break
if i==0:
print "not possible"
|
# -*- coding: utf-8 -*-
"""
Created on 2020-03-10
@author: duytinvo
Copy from HuggingFace examples and adapt with NNlib
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
import sys
import random
import argparse
import glob
import logging
import os
import time
from typing import Dict, Tuple
import torch
from tqdm import tqdm, trange
import numpy as np
from datetime import datetime
import shutil
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
from transformers import (
MODEL_WITH_LM_HEAD_MAPPING,
WEIGHTS_NAME,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
)
from mlmodels.utils.auxiliary import Timer
from mlmodels.utils.special_tokens import SENSP, SENGE, SOT, EOT, UNK, PAD, NULL, CLS, SEP, MASK, NL
from mlmodels.utils.idx2tensor import Data2tensor
from mlmodels.utils.trad_tokenizer import Tokenizer
from mlmodels.utils.helper import Helper
from mlmodels.metrics.prf1 import APRF1
from mlmodels.metrics.bleu import compute_bleu
from mlmodels.metrics.string_match import compute_string_match
from mlmodels.modules.t5_multi_sampling import T5ForConditionalGeneration
from mlmodels.metrics.metrics_loader import MetricsFactory
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
class TransLanguageModel(object):
def __init__(self, args):
self.args = args
self.task = 1 if self.args.task == "sentiment" else (2 if self.args.task == 'response' else 3)
if self.args.model_type in ["bert", "roberta", "distilbert", "camembert"] and not self.args.mlm:
raise ValueError("BERT and RoBERTa-like models do not have LM heads but masked LM heads. "
"They must be run using the --mlm flag (masked language modeling).")
if self.args.dev_file is None and self.args.do_eval:
raise ValueError("Cannot do evaluation without an evaluation data file. "
"Either supply a file to --dev_file or remove the --do_eval argument.")
if self.args.should_continue:
if self.args.max_steps < 0:
checkpoint_prefix = "checkpoint_by-epoch"
else:
checkpoint_prefix = "checkpoint_by-step"
# print(checkpoint_prefix)
sorted_checkpoints = Helper._sorted_checkpoints(self.args.output_dir, checkpoint_prefix=checkpoint_prefix)
if len(sorted_checkpoints) == 0:
raise ValueError("Used --should_continue but no checkpoint was found in --output_dir.")
else:
self.args.model_name_or_path = sorted_checkpoints[-1]
if (os.path.exists(self.args.output_dir) and os.listdir(self.args.output_dir) and
self.args.do_train and not self.args.overwrite_output_dir):
raise ValueError("Output directory ({}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome.".format(self.args.output_dir))
# Setup CUDA, GPU & distributed training
if self.args.local_rank == -1 or self.args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not self.args.no_cuda else "cpu")
self.args.n_gpu = 0 if self.args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(self.args.local_rank)
device = torch.device("cuda", self.args.local_rank)
torch.distributed.init_process_group(backend="nccl")
self.args.n_gpu = 1
self.args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if self.args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
self.args.local_rank, device, self.args.n_gpu, bool(self.args.local_rank != -1),
self.args.fp16, )
self.config = None
self.tokenizer = None
self.model = None
self.source2idx = None
self.target2idx = None
self.collate = None
self.pad_label_id = torch.nn.CrossEntropyLoss().ignore_index
self.pad_id = 0
self.optimizer = None
self.scheduler = None
self.tb_writer = None
def model_init(self):
# Set seed
Data2tensor.set_randseed(self.args.seed, self.args.n_gpu)
# Load pretrained model and tokenizer
if self.args.local_rank not in [-1, 0]:
torch.distributed.barrier()
# Barrier to make sure only the first process in distributed training download model & vocab
# Load the configuration file
if self.args.config_name:
# self.config = self.args.config_class.from_pretrained(self.args.config_name,
# cache_dir=self.args.cache_dir)
self.config = AutoConfig.from_pretrained(self.args.config_name, cache_dir=self.args.cache_dir)
elif self.args.model_name_or_path:
# self.config = self.args.config_class.from_pretrained(self.args.model_name_or_path,
# cache_dir=self.args.cache_dir)
self.config = AutoConfig.from_pretrained(self.args.model_name_or_path, cache_dir=self.args.cache_dir)
else:
# self.config = self.args.config_class()
raise ValueError(
"You are instantiating a new config instance from scratch. "
"This is not supported, but you can do it from another script, "
"save it, and load it from here, using --config_name")
# Load the tokenizer file
if self.args.tokenizer_name:
# self.tokenizer = self.args.tokenizer_class.from_pretrained(self.args.tokenizer_name,
# cache_dir=self.args.cache_dir)
self.tokenizer = AutoTokenizer.from_pretrained(self.args.tokenizer_name, cache_dir=self.args.cache_dir)
elif self.args.model_name_or_path:
# self.tokenizer = self.args.tokenizer_class.from_pretrained(self.args.model_name_or_path,
# cache_dir=self.args.cache_dir)
self.tokenizer = AutoTokenizer.from_pretrained(self.args.model_name_or_path, cache_dir=self.args.cache_dir)
else:
raise ValueError("You are instantiating a new tokenizer from scratch. "
"This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name")
# Add special tokens for auto responding
try:
if self.tokenizer._bos_token is None:
self.tokenizer.add_special_tokens({"bos_token": SOT})
except AssertionError:
self.tokenizer.add_tokens([SOT])
try:
if self.tokenizer._eos_token is None:
self.tokenizer.add_special_tokens({"eos_token": EOT})
except AssertionError:
self.tokenizer.add_tokens([EOT])
try:
if self.tokenizer._unk_token is None:
self.tokenizer.add_special_tokens({"unk_token": UNK})
except AssertionError:
self.tokenizer.add_tokens([UNK])
try:
if self.tokenizer._sep_token is None:
self.tokenizer.add_special_tokens({"sep_token": SEP})
except AssertionError:
self.tokenizer.add_tokens([SEP])
try:
if self.tokenizer._pad_token is None:
self.tokenizer.add_special_tokens({"pad_token": PAD})
except AssertionError:
self.tokenizer.add_tokens([PAD])
try:
if self.tokenizer._cls_token is None:
self.tokenizer.add_special_tokens({"cls_token": CLS})
except AssertionError:
self.tokenizer.add_tokens([CLS])
try:
if self.tokenizer._mask_token is None:
self.tokenizer.add_special_tokens({"mask_token": MASK})
except AssertionError:
self.tokenizer.add_tokens([MASK])
self.tokenizer.add_tokens([SENSP, SENGE, NULL, NL])
# Load the model file
if self.args.model_name_or_path:
# self.model = self.args.model_class.from_pretrained(self.args.model_name_or_path,
# from_tf=bool(".ckpt" in self.args.model_name_or_path),
# config=self.config, cache_dir=self.args.cache_dir, )
if 't5' in self.args.model_name_or_path:
self.model = T5ForConditionalGeneration.from_pretrained(self.args.model_name_or_path)
else:
self.model = AutoModelWithLMHead.from_pretrained(self.args.model_name_or_path,
from_tf=bool(".ckpt" in self.args.model_name_or_path),
config=self.config, cache_dir=self.args.cache_dir, )
else:
logger.info("Training new model from scratch")
# self.model = self.args.model_class(config=self.config)
self.model = AutoModelWithLMHead.from_config(self.config)
self.model.to(self.args.device)
if self.args.local_rank == 0:
torch.distributed.barrier()
# End of barrier to make sure only the first process in distributed training download model & vocab
if self.args.model_type == "t5":
if self.args.block_size <= 0:
self.args.block_size = self.tokenizer.max_len - 1
# Our input block size will be the max possible for the model
else:
self.args.block_size = min(self.args.block_size, self.tokenizer.max_len) - 1
else:
if self.args.block_size <= 0:
self.args.block_size = self.tokenizer.max_len // 2 - 1
# Our input block size will be the max possible for the model
else:
self.args.block_size = min(self.args.block_size, self.tokenizer.max_len // 2) - 1
data_block_size = self.args.block_size - (self.tokenizer.max_len - self.tokenizer.max_len_single_sentence)
logger.info("Training/evaluation parameters %s", self.args)
self.source2idx = TransLanguageModel.tok2id(self.tokenizer, data_block_size, eos=True, special_tokens=True)
if self.task == 2:
self.target2idx = self.source2idx
else:
self.target2idx = TransLanguageModel.tok2id(self.tokenizer, data_block_size, eos=False,
special_tokens=False)
if self.tokenizer.pad_token is not None:
self.pad_id = self.tokenizer.pad_token_id
if "gpt2" in self.args.model_type:
self.pad_label_id = self.pad_id
self.collate = Helper.collate_fn(padding_value=self.pad_id, target_padding_value=self.pad_label_id,
batch_first=True, task=self.task)
pass
def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. "
"Remove the --mlm flag if you want to use this tokenizer.")
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, self.args.mlm_probability)
special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
for val in labels.tolist()]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def train_batch(self, epoch_iterator, tr_loss, logging_loss, global_step=0, steps_trained_in_current_epoch=0):
self.model.train()
for step, batch in enumerate(epoch_iterator):
self.model.zero_grad()
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if self.args.model_type == "t5":
assert len(batch) == 2, "Must return a pair of input-output"
inputs, labels = batch[0], batch[1]
else:
if len(batch) == 2:
# batch, _ = batch
batch = torch.cat((batch[0], batch[1]), dim=1)
inputs, labels = self.mask_tokens(batch) if self.args.mlm else (batch, batch)
inputs = inputs.to(self.args.device)
labels = labels.to(self.args.device)
if self.args.model_type == "t5":
outputs = self.model(input_ids=inputs, lm_labels=labels)
else:
outputs = self.model(inputs, masked_lm_labels=labels) if self.args.mlm else self.model(inputs,
labels=labels)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.args.fp16:
with apex.amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % self.args.gradient_accumulation_steps == 0:
if self.args.fp16:
torch.nn.utils.clip_grad_norm_(apex.amp.master_params(self.optimizer), self.args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm)
self.optimizer.step()
self.scheduler.step() # Update learning rate schedule
global_step += 1
if self.args.local_rank in [-1, 0] and self.args.max_steps > 0 \
and self.args.save_steps > 0 and global_step % self.args.save_steps == 0:
# Log metrics
if self.args.local_rank == -1 and self.args.evaluate_during_training:
# Only evaluate when single GPU otherwise metrics may not average well
results = self.evaluate_batch(dev_file=self.args.dev_file)
for key, value in results.items():
self.tb_writer.add_scalar("eval_{}".format(key), value, global_step)
self.tb_writer.add_scalar("lr", self.scheduler.get_lr()[0], global_step)
self.tb_writer.add_scalar("loss", (tr_loss - logging_loss) / self.args.save_steps, global_step)
logging_loss = tr_loss
# Save model checkpoint
checkpoint_prefix = "checkpoint_by-step"
output_dir = os.path.join(self.args.output_dir, "{}_{}".format(checkpoint_prefix, global_step))
checkpoints_to_be_deleted = Helper.save_model(do_train=self.args.do_train,
local_rank=self.args.local_rank,
output_dir=output_dir,
model=self.model, tokenizer=self.tokenizer,
optimizer=self.optimizer, scheduler=self.scheduler,
save_total_limit=self.args.save_total_limit,
args=self.args,
checkpoint_prefix=checkpoint_prefix)
logger.info("Saving model checkpoint to %s", output_dir)
logger.info("Saving optimizer and scheduler states to %s\n", output_dir)
for checkpoint in checkpoints_to_be_deleted:
logger.info("Order checkpoint [{}] deleted due to args.save_total_limit".format(checkpoint))
logger.info("Older checkpoint [{}] deleted due to args.save_total_limit".format(checkpoint))
if global_step > self.args.max_steps > 0:
epoch_iterator.close()
break
return tr_loss, logging_loss, global_step, steps_trained_in_current_epoch
def train(self):
"""
Train the model
"""
self.model_init()
if self.args.local_rank in [-1, 0]:
self.tb_writer = SummaryWriter()
self.args.train_batch_size = self.args.per_gpu_train_batch_size * max(1, self.args.n_gpu)
if self.args.local_rank not in [-1, 0]:
torch.distributed.barrier()
# Barrier to make sure only the first process in distributed training process the dataset,
# and the others will use the cache
train_dataloader, train_numlines = Helper.build_dataloader(file=self.args.train_file, task=self.task,
source2idx=self.source2idx,
target2idx=self.target2idx,
batch_size=self.args.train_batch_size,
firstline=self.args.firstline, collate=self.collate)
if self.args.local_rank == 0:
torch.distributed.barrier()
# End of barrier to make sure only the first process in distributed training process the dataset,
# and the others will use the cache
num_batchs = (train_numlines // self.args.train_batch_size) + 1 \
if train_numlines % self.args.train_batch_size != 0 else train_numlines // self.args.train_batch_size
if self.args.max_steps > 0:
t_total = self.args.max_steps
self.args.num_train_epochs = self.args.max_steps // (
num_batchs // self.args.gradient_accumulation_steps) + 1
else:
t_total = num_batchs // self.args.gradient_accumulation_steps * self.args.num_train_epochs
self.optimizer, self.scheduler = Helper.load_optimizer(
optimizer=self.args.optimizer, model_named_parameters=self.model.named_parameters(),
learning_rate=self.args.learning_rate, adam_epsilon=self.args.adam_epsilon, t_total=t_total,
warmup_steps=self.args.warmup_steps, weight_decay=self.args.weight_decay,
model_name_or_path=self.args.model_name_or_path)
# check apex
if self.args.fp16:
try:
global apex
import apex
# from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
self.model, self.optimizer = apex.amp.initialize(self.model, self.optimizer,
opt_level=self.args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
self.model = torch.nn.DataParallel(self.model)
# Distributed training (should be after apex fp16 initialization)
if self.args.local_rank != -1:
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", train_numlines)
logger.info(" Num Epochs = %d", self.args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", self.args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
self.args.train_batch_size * self.args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if self.args.model_name_or_path and os.path.exists(self.args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = self.args.model_name_or_path.split("-")[-1].split("/")[0]
if checkpoint_suffix.startswith("step"):
global_step = int(checkpoint_suffix.split("_")[-1])
epochs_trained = global_step // (num_batchs // self.args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (num_batchs // self.args.gradient_accumulation_steps)
else:
epochs_trained = int(checkpoint_suffix.split("_")[-1])
global_step = epochs_trained * (num_batchs // self.args.gradient_accumulation_steps)
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the %d epoch", steps_trained_in_current_epoch,
epochs_trained+1)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
# model_to_resize = self.model.module if hasattr(self.model, "module") else self.model
# Take care of distributed/parallel training
# model_to_resize.resize_token_embeddings(len(self.tokenizer))
train_iterator = trange(epochs_trained, int(self.args.num_train_epochs), desc="Epoch",
disable=self.args.local_rank not in [-1, 0])
Data2tensor.set_randseed(self.args.seed, self.args.n_gpu) # Added here for reproducibility
nepoch_no_imprv = 0
best_dev = np.inf if self.args.metric == "loss" else -np.inf
ep_count = epochs_trained
if self.args.overwrite_output_dir and not self.args.should_continue and os.path.exists(self.args.output_dir):
shutil.rmtree(self.args.output_dir)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=self.args.local_rank not in [-1, 0],
total=num_batchs)
tr_loss, logging_loss, \
global_step, steps_trained_in_current_epoch = self.train_batch(epoch_iterator,
tr_loss,
logging_loss,
global_step,
steps_trained_in_current_epoch)
ep_count += 1
if self.args.local_rank in [-1, 0] and self.args.max_steps < 0:
self.tb_writer.add_scalar("lr", self.scheduler.get_lr()[0], global_step)
self.tb_writer.add_scalar("loss", tr_loss / global_step, global_step)
# Save model checkpoint
checkpoint_prefix = "checkpoint_by-epoch"
output_dir = os.path.join(self.args.output_dir, "{}_{}".format(checkpoint_prefix, ep_count))
checkpoints_to_be_deleted = Helper.save_model(do_train=self.args.do_train,
local_rank=self.args.local_rank, output_dir=output_dir,
model=self.model, tokenizer=self.tokenizer,
optimizer=self.optimizer,
scheduler=self.scheduler,
save_total_limit=self.args.save_total_limit,
args=self.args,
checkpoint_prefix=checkpoint_prefix)
logger.info("Saving model checkpoint to %s", output_dir)
logger.info("Saving optimizer and scheduler states to %s\n", output_dir)
for checkpoint in checkpoints_to_be_deleted:
logger.info("Older checkpoint [{}] deleted due to args.save_total_limit".format(checkpoint))
# Only evaluate when single GPU otherwise metrics may not average well
if self.args.local_rank == -1 and self.args.evaluate_during_training:
results = self.evaluate_batch(self.args.dev_file,
prefix="of {} used the epoch_{} model".format(self.args.dev_file,
ep_count))
for key, value in results.items():
self.tb_writer.add_scalar("eval_{}".format(key), value, ep_count)
dev_metric = results["perplexity"] if self.args.metric == "loss" else results["f1"]
cond = dev_metric < best_dev if self.args.metric == "loss" else dev_metric > best_dev
if cond:
logger.info("New improvement at %d", ep_count)
# Save the best model
# TODO: copying the folder instead of saving
checkpoints_to_be_deleted = Helper.save_model(do_train=self.args.do_train,
local_rank=self.args.local_rank,
output_dir=self.args.output_dir,
model=self.model,
tokenizer=self.tokenizer,
optimizer=self.optimizer,
scheduler=self.scheduler,
save_total_limit=self.args.save_total_limit,
args=self.args)
logger.info("Saving the best model to %s from checkpoint %s", self.args.output_dir, output_dir)
best_dev = dev_metric
nepoch_no_imprv = 0
else:
nepoch_no_imprv += 1
if nepoch_no_imprv >= self.args.patience:
# Testing
if self.args.do_predict:
if not os.path.exists(self.args.output_dir):
logger.error("%s does not exist", self.args.output_dir)
self.model, self.tokenizer, self.optimizer, self.scheduler = Helper.load_model(
output_dir=self.args.output_dir,
device=self.args.device,
optimizer=self.optimizer,
scheduler=self.scheduler)
test_results = self.evaluate_batch(self.args.test_file,
prefix="of {} used the epoch_{} model".
format(self.args.test_file, ep_count))
for key, value in test_results.items():
self.tb_writer.add_scalar("eval_{}".format(key), value, ep_count)
self.tb_writer.close()
return global_step, tr_loss / global_step
if global_step > self.args.max_steps > 0:
train_iterator.close()
break
if self.args.local_rank in [-1, 0]:
if not self.args.evaluate_during_training:
# Save the model of the last epoch as the best model
# TODO: copying the folder instead of saving
checkpoints_to_be_deleted = Helper.save_model(do_train=self.args.do_train,
local_rank=self.args.local_rank,
output_dir=self.args.output_dir,
model=self.model,
tokenizer=self.tokenizer,
optimizer=self.optimizer,
scheduler=self.scheduler,
save_total_limit=self.args.save_total_limit,
args=self.args)
logger.info("Saving the current model as the best model to %s", self.args.output_dir)
if self.args.do_predict:
if not os.path.exists(self.args.output_dir):
logger.error("%s does not exist", self.args.output_dir)
self.model, self.tokenizer, self.optimizer, self.scheduler = Helper.load_model(
output_dir=self.args.output_dir,
device=self.args.device,
optimizer=self.optimizer,
scheduler=self.scheduler)
test_results = self.evaluate_batch(self.args.test_file,
prefix="of {} used the epoch_{} model".
format(self.args.test_file, ep_count))
for key, value in test_results.items():
self.tb_writer.add_scalar("eval_{}".format(key), value, ep_count)
self.tb_writer.close()
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss / global_step)
return global_step, tr_loss / global_step
def evaluate_batch(self, dev_file, prefix="") -> Dict:
eval_output_dir = self.args.output_dir
if self.args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir, exist_ok=True)
self.args.eval_batch_size = self.args.per_gpu_eval_batch_size * max(1, self.args.n_gpu)
# Note that DistributedSampler samples randomly
eval_dataloader, num_lines = Helper.build_dataloader(file=dev_file, task=self.task, source2idx=self.source2idx,
target2idx=self.target2idx,
batch_size=self.args.eval_batch_size,
firstline=self.args.firstline, collate=self.collate)
num_batchs = (num_lines // self.args.eval_batch_size) + 1 \
if num_lines % self.args.eval_batch_size != 0 else num_lines // self.args.eval_batch_size
# multi-gpu evaluate
if self.args.n_gpu > 1:
self.model = torch.nn.DataParallel(self.model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", num_lines)
logger.info(" Batch size = %d", self.args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
nl_tokens = []
candidate = []
reference = []
self.model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating", total=num_batchs):
if self.args.model_type == "t5":
assert len(batch) == 2, "Must return a pair of input-output"
inputs, labels = batch[0], batch[1]
else:
if len(batch) == 2:
# batch, _ = batch
batch = torch.cat((batch[0], batch[1]), dim=1)
inputs, labels = self.mask_tokens(batch) if self.args.mlm else (batch, batch)
inputs = inputs.to(self.args.device)
attention_mask = inputs != self.pad_id
inputs_len = attention_mask.sum(dim=-1)
labels = labels.to(self.args.device)
labels_mask = labels != self.pad_label_id
labels_len = labels_mask.sum(dim=-1)
with torch.no_grad():
if self.args.model_type == "t5":
outputs = self.model(input_ids=inputs, lm_labels=labels)
else:
outputs = self.model(inputs, masked_lm_labels=labels) \
if self.args.mlm else self.model(inputs, labels=labels)
lm_loss = outputs[0]
logits = outputs[1]
pred_labels = logits.argmax(dim=-1)
eval_loss += lm_loss.mean().item()
preds = [ids[:l] for ids, l in zip(pred_labels.tolist(), labels_len.tolist())]
out_label_ids = [ids[:l] for ids, l in zip(labels.tolist(), labels_len.tolist())]
nl_list = [ids[:l] for ids, l in zip(inputs.tolist(), inputs_len.tolist())]
if self.task == 1:
candidate.extend(preds)
reference.extend(out_label_ids)
else:
label_words = self.tokenizer.batch_decode(out_label_ids)
predict_words = self.tokenizer.batch_decode(preds)
nl_token = self.tokenizer.batch_decode(nl_list)
# reference = [[w1, ..., EOT], ..., [w1, ..., EOT]]
reference.extend([words.split() for words in label_words])
# candidate = [[w1, ..., EOT], ..., [w1, ..., EOT]]
candidate.extend([words.split() for words in predict_words])
nl_tokens.extend([words.split() for words in nl_token])
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {"perplexity": perplexity.item()}
if self.task == 1:
precision, recall, f1, acc = APRF1.sklearn(reference, candidate)
result.update({"precision": precision, "recall": recall, "f1": f1, "accuracy": acc})
else:
rand_idx = random.randint(0, len(reference) - 1)
print("\nRANDOMLY sampling: ")
print("\t- A NL question: ", " ".join(nl_tokens[rand_idx]))
print("\t- A LABEL query: ", " ".join(reference[rand_idx]))
print("\t- A PREDICTED query: ", " ".join(candidate[rand_idx]), "\n")
bleu_score = compute_bleu(list(zip(reference)), candidate)
result.update({"bleu_score": bleu_score[0]})
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
return result
def evaluate(self, eval_output_file="eval_results.txt"):
# Evaluation
results = {}
checkpoints = [self.args.output_dir]
if self.args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in
sorted(glob.glob(self.args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoint.split("-")) > 1 else "best"
self.model, self.tokenizer, self.optimizer, self.scheduler = Helper.load_model(
output_dir=checkpoint,
device=self.args.device,
optimizer=self.optimizer,
scheduler=self.scheduler)
result = self.evaluate_batch(dev_file=self.args.dev_file,
prefix="of {} used the {} model".format(self.args.dev_file, global_step))
# if global_step != "best":
# result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update({global_step: result})
output_dev_file = os.path.join(self.args.output_dir, eval_output_file)
with open(output_dev_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("- {}:\n".format(key))
for k in sorted(results[key].keys()):
writer.write("\t{} = {}\n".format(k, str(results[key][k])))
return results
@staticmethod
def tok2id(pretrained_tokenizer, block_size=512, eos=True, special_tokens=True):
"""
:param pretrained_tokenizer: pretrained tokenizer
:param block_size: max length of a sequence
:param eos: add an end of sequence token
:param special_tokens: add specific token from the pretrained tokenizer
:return: a token2index function
"""
def f(sequence, eos=eos, sep=False):
# TODO: add more code to handle special tokens
tokens = pretrained_tokenizer.tokenize(sequence)[:block_size]
if eos:
assert pretrained_tokenizer.eos_token, "There is no END OF SEQUENCE token"
tokens += [pretrained_tokenizer.eos_token]
if sep:
assert pretrained_tokenizer.sep_token, "There is no SEP OF SEQUENCE token"
tokens += [pretrained_tokenizer.sep_token]
tokenized_ids = pretrained_tokenizer.convert_tokens_to_ids(tokens)
if special_tokens:
tokenized_ids = pretrained_tokenizer.build_inputs_with_special_tokens(tokenized_ids)
return tokenized_ids
return f
def generate(self, eval_file, eval_output_file="generate_eval_results.txt"):
# Evaluation
results = {}
checkpoints = [self.args.output_dir]
if self.args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in
sorted(glob.glob(self.args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Generate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoint.split("-")) > 1 else "best"
# Check output directory
if not os.path.exists(checkpoint):
logger.error("%s does not exist", checkpoint)
self.model, self.tokenizer, self.optimizer, self.scheduler = Helper.load_model(
output_dir=checkpoint,
device=self.args.device,
optimizer=self.optimizer,
scheduler=self.scheduler)
result = self.generate_batch(dev_file=eval_file,
prefix="of {} used the {} model".format(eval_file, global_step))
# if global_step != "best":
# result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update({global_step: result})
output_dev_file = os.path.join(self.args.output_dir, eval_output_file)
with open(output_dev_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("- {}:\n".format(key))
for k in sorted(results[key].keys()):
writer.write("\t{} = {}\n".format(k, str(results[key][k])))
return results
def generate_batch(self, dev_file, prefix="") -> Dict:
eval_output_dir = self.args.output_dir
if self.args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir, exist_ok=True)
self.args.eval_batch_size = self.args.per_gpu_eval_batch_size * max(1, self.args.n_gpu)
eval_dataloader, num_lines = Helper.build_dataloader(file=dev_file, task=self.task, source2idx=self.source2idx,
target2idx=self.target2idx,
batch_size=self.args.eval_batch_size,
firstline=self.args.firstline, collate=self.collate)
num_batchs = (num_lines // self.args.eval_batch_size) + 1 \
if num_lines % self.args.eval_batch_size != 0 else num_lines // self.args.eval_batch_size
# multi-gpu evaluate
if self.args.n_gpu > 1:
self.model = torch.nn.DataParallel(self.model)
# Eval!
logger.info("***** Running generation {} *****".format(prefix))
logger.info(" Num examples = %d", num_lines)
logger.info(" Batch size = %d", self.args.eval_batch_size)
nb_eval_steps = 0
nl_tokens = []
reference = []
candidate = []
self.model.eval()
for batch in tqdm(eval_dataloader, desc="Generating", total=num_batchs):
if self.args.model_type == "t5":
assert len(batch) == 2, "Must return a pair of input-output"
inputs, labels = batch[0], batch[1]
else:
if len(batch) == 2:
# batch, _ = batch
batch = torch.cat((batch[0], batch[1]), dim=1)
inputs, labels = self.mask_tokens(batch) if self.args.mlm else (batch, batch)
inputs = inputs.to(self.args.device)
attention_mask = inputs != self.pad_id
# inputs_len = attention_mask.sum(dim=-1)
labels = labels.to(self.args.device)
# labels = labels.clone()
decoder_attention_mask = labels != self.pad_label_id
labels_len = decoder_attention_mask.sum(dim=-1)
max_length = int(1.3 * labels_len.max().item())
with torch.no_grad():
outputs = self.model.generate(input_ids=inputs,
max_length=max_length,
temperature=self.config.temperature,
top_k=self.config.top_k,
top_p=self.config.top_p,
repetition_penalty=self.config.repetition_penalty,
num_beams=1,
do_sample=self.config.do_sample,
num_return_sequences=self.config.num_return_sequences,
bos_token_id=self.tokenizer.convert_tokens_to_ids(
self.tokenizer.eos_token),
# pad_token_id=self.pad_token_id,
eos_token_id=self.tokenizer.convert_tokens_to_ids(
self.tokenizer.bos_token),
)
pred_labels = outputs
# probs = outputs[1]
# TODO: fix this bug IndexError: list index out of range
preds = pred_labels.detach().cpu().tolist()
out_label_ids = labels.detach().cpu().tolist()
nl_list = inputs.detach().cpu().tolist()
self.tokenizer.i2tw = Tokenizer.reversed_dict(self.tokenizer.get_vocab())
label_words = Tokenizer.decode_batch(out_label_ids, self.tokenizer.i2tw, 2)
label_words = [words[:i] if EOT not in words else words[: words.index(EOT)]
for words, i in zip(label_words, labels_len.tolist())]
predict_words = Tokenizer.decode_batch(preds, self.tokenizer.i2tw, 2)
# Remove SOT
predict_words = [words if words[0] != SOT else words[1:]
for words in predict_words]
predict_words = [words if EOT not in words else words[: words.index(EOT)]
for words in predict_words]
nl_token = self.tokenizer.batch_decode(nl_list, skip_special_tokens=True)
# reference = [[w1, ..., EOT], ..., [w1, ..., EOT]]
reference.extend(label_words)
# candidate = [[w1, ..., EOT], ..., [w1, ..., EOT]]
candidate.extend(predict_words)
nl_tokens.extend(nl_token)
nb_eval_steps += 1
result = {}
rand_idx = random.randint(0, len(reference) - 1)
print("\nRANDOMLY sampling: ")
print("\t- A NL question: ", nl_tokens[rand_idx])
print("\t- A LABEL query: ", " ".join(reference[rand_idx]))
print("\t- A PREDICTED query: ", " ".join(candidate[rand_idx]), "\n")
bleu_score = compute_bleu(list(zip(reference)), candidate)
string_match = compute_string_match(reference, candidate)
result.update({"bleu_"
"score": bleu_score[0]})
result.update({"string_match_score": string_match})
logger.info("***** Generate results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
return result
def main(argv):
now = time.time()
parser = argparse.ArgumentParser(argv)
# input peripherals
parser.add_argument("--train_file", default="../../../media/data/paraphrase/manual_paraphrases_dev.csv", type=str,
help="The input training data file (a text file)."
)
parser.add_argument("--dev_file", default="../../../media/data/paraphrase/manual_paraphrases_dev.csv", type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).", )
parser.add_argument("--test_file", default="../../../media/data/paraphrase/manual_paraphrases_dev.csv", type=str,
help="An optional input test data file to test the perplexity on (a text file).", )
parser.add_argument("--task", default="conditional", type=str, choices=["response", "sentiment", "conditional"],
help="Select a task to solve",)
parser.add_argument("--firstline", action='store_true', default=False,
help="labelled files having a header" )
parser.add_argument("--conditional_metric", default='rouge', type=str, choices=["rouge"],
help="Metric to condition the input sequence during training and leave it None if using "
"precomputed value")
# output peripherals
parser.add_argument("--output_dir", default="/media/data/review_response/trained_model", type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--overwrite_output_dir", action="store_true",
help="Overwrite the content of the output directory")
# pretrained-model
parser.add_argument("--model_type", type=str, default="t5",
help="The model architecture to be trained or fine-tuned.")
parser.add_argument("--model_name_or_path", default="t5-small", type=str,
help="The model checkpoint for weights initialization. "
"Leave None if you want to train a model from scratch.")
parser.add_argument("--config_name", default=None, type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path. "
"If both are None, initialize a new config.", )
parser.add_argument("--tokenizer_name", default=None, type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path. "
"If both are None, initialize a new tokenizer.", )
parser.add_argument("--cache_dir", default=None, type=str,
help="Optional directory to store the pre-trained models downloaded from s3 "
"(instead of the default one)", )
# Other parameters
parser.add_argument("--mlm", action="store_true",
help="Train with masked-language modeling loss instead of language modeling.")
parser.add_argument("--mlm_probability", type=float, default=0.15,
help="Ratio of tokens to mask for masked language modeling loss")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization. "
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs "
"(take into account special tokens).")
# Training procedure
parser.add_argument("--should_continue", action="store_true",
help="Whether to continue from latest checkpoint in output_dir")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.", default=True)
parser.add_argument("--evaluate_during_training", action="store_true",
help="Run evaluation during training at each logging step.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run metrics on the dev set.")
parser.add_argument("--eval_all_checkpoints", action="store_true",
help="Evaluate all checkpoints starting with the same prefix as "
"model_name_or_path ending and ending with step number")
parser.add_argument("--do_predict", action="store_true", help="Whether to run training.")
parser.add_argument("--do_generate", action="store_true",
help="Whether to run generation on both dev and test sets.")
# Training setup
parser.add_argument("--num_train_epochs", default=1.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--per_gpu_train_batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--optimizer", default="adamw", type=str, help="An optimizer method", )
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--per_gpu_eval_batch_size", default=4, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--save_total_limit", type=int, default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, "
"does not delete by default")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--fp16", action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",)
parser.add_argument("--fp16_opt_level", type=str, default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--patience", type=int, default=2,
help="Early stopping if no improvement after patience epoches")
parser.add_argument("--metric", type=str, default="loss", choices=["f1", "loss"],
help="Optimized criterion (loss or f1)")
parser.add_argument("--timestamped", action='store_true', default=False,
help="Save models in timestamped subdirectory")
args = parser.parse_args()
if args.conditional_metric != None:
logger.info("Computing {} for the dataset".format(args.conditional_metric))
metric_factory = MetricsFactory()
metric = metric_factory.create_metrics(args.conditional_metric, '../metrics/ROUGE-1.5.5/' )
from mlmodels.utils.csvIO import CSV
files = {'train_file': args.train_file, 'dev_file': args.dev_file, 'test_file': args.test_file}
new_files_dir = {}
for file_name, file in files.items():
new_data = []
for data in CSV.read(file):
score = metric.evaluate_example(data[0], data[1])
new_data.append([data[0], score, data[1]])
break
start_index = args.train_file.rfind('/')
new_file_name = file[:start_index] + '/conditional_file' + file[start_index:]
CSV.write(new_data, new_file_name)
new_files_dir[file_name] = new_file_name
args.train_file = new_files_dir['train_file']
args.dev_file = new_files_dir['dev_file']
args.test_file = new_files_dir['test_file']
if args.timestamped and args.do_train:
# args.output_dir = os.path.abspath(os.path.join(args.output_dir, ".."))
sub_folder = datetime.now().isoformat(sep='-', timespec='minutes').replace(":", "-").replace("-", "_")
args.output_dir = os.path.join(args.output_dir, sub_folder)
lm_model = TransLanguageModel(args)
# Training
train_start = time.time()
if args.do_train:
global_step, tr_loss = lm_model.train()
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
logger.info("Training time = %s", Timer.asHours(time.time() - train_start))
# Evaluation
evaluate_start = time.time()
results = {}
# Run evaluation separately
if args.do_eval and args.local_rank in [-1, 0]:
results = lm_model.evaluate(eval_output_file="dev_results.txt")
logger.info("Evaluate time = %s", Timer.asHours(time.time() - evaluate_start))
# Testing
test_start = time.time()
# Running testing separately
if args.do_predict and args.local_rank in [-1, 0]:
results = lm_model.evaluate(eval_output_file="test_results.txt")
logger.info("Test time = %s", Timer.asHours(time.time() - test_start))
# Testing
generate_start = time.time()
if args.do_generate and args.local_rank in [-1, 0]:
_ = lm_model.generate(eval_file=args.dev_file, eval_output_file="generate_dev_results.txt")
_ = lm_model.generate(eval_file=args.test_file, eval_output_file="generate_test_results.txt")
logger.info("Generate time = %s", Timer.asHours(time.time() - generate_start))
total_time = Timer.asHours(time.time() - now)
logger.info("Total time = %s", total_time)
return results
if __name__ == "__main__":
main(sys.argv)
|
import numpy as np
import cv2
from scipy.spatial import distance
import math
import os
def distpp(p1, p2):
'''Mรฉthode rendant la distance entre les deux points entrรฉs en paramรจtres'''
return math.hypot(p2[0] - p1[0], p2[1] - p1[1])
def findangle(datas, w, h, qdens):
'''Mรฉthode grossiรจre pour trouver les angles'''
angles = []
c1 = (0, 0)
c2 = (w, 0)
c3 = (w, h)
c4 = (0, h)
#Variables du second test
atangle = 0
# il est possible de rajouter facilement un point de contrรดle supplรฉmentaire.
angle1 = [c1, [datas[0], distpp(c1, datas[0])], [datas[0], distpp(c1, datas[0])]]
angle2 = [c2, [datas[0], distpp(c2, datas[0])], [datas[0], distpp(c2, datas[0])]]
angle3 = [c3, [datas[0], distpp(c3, datas[0])], [datas[0], distpp(c3, datas[0])]]
angle4 = [c4, [datas[0], distpp(c4, datas[0])], [datas[0], distpp(c4, datas[0])]]
angletest = [angle1, angle2, angle3, angle4]
#recherche des points les plus proches des coins de l'image
for point in datas[1:]:
for i in angletest:
temp = distpp(point, i[0])
if i[1][1] > temp:
i[2][1] = i[1][1]
i[2][0] = i[1][0]
i[1][1] = temp
i[1][0] = point
elif i[2][1] > temp:
i[2][1] = temp
i[2][0] = point
for i in angletest:
angles.append(i[1][0])
#vรฉrification de la prรฉsence du point dans le cadran
for d in qdens:
if d < 1:
if atangle == 0:
angles[atangle] = [0, 0]
elif atangle == 1:
angles[atangle] = [w - 1, 0]
elif atangle == 2:
angles[atangle] = [w - 1, h - 1]
elif atangle == 3:
angles[atangle] = [0, h - 1]
atangle += 1
return angles
def verifangle(angles, qdens, q1, q2, q3, q4):
'''Mรฉthode de correction dea angles'''
sides = [0, 0, 0, 0]
modif = [0, 0, 0, 0]
for i in range(4):
inext = (i+1)%4
if i % 2 == 0:
d = 1
if angles[inext][d]-10 > angles[i][d] or angles[inext][d]+10 < angles[i][d]:
sides[i] += 1
else:
d = 0
if angles[inext][d]-10 > angles[i][d] or angles[inext][d]+10 < angles[i][d]:
sides[i] += 1
for i in range(4):
p = i - 1
n = (i + 1) % 4
if i == 0:
if sides[i] > 0:
y = min(angles[i][1], angles[n][1])
if sides[p] > 0:
x = min(angles[i][0], angles[p][0])
angles[i] = [x, y]
modif[i] = 1
else:
x = angles[i][0]
angles[i] = [x, y]
modif[i] = 1
else:
y = angles[i][1]
if sides[p]>0:
x = min(angles[i][0], angles[p][0])
angles[i] = [x, y]
modif[i] = 1
elif i == 1:
if sides[i] > 0:
x = max(angles[i][0], angles[n][0])
if sides[p] > 0:
y = min(angles[i][1], angles[p][1])
angles[i] = [x, y]
modif[i] = 1
else:
y = angles[i][1]
angles[i] = [x, y]
modif[i] = 1
else:
x = angles[i][0]
if sides[p] > 0:
y = min(angles[i][1], angles[p][1])
angles[i] = [x, y]
modif[i] = 1
elif i == 2:
if sides[i] > 0:
y = max(angles[i][1], angles[n][1])
if sides[p] > 0:
x = max(angles[i][0], angles[p][0])
angles[i] = [x, y]
modif[i] = 1
else:
x = angles[i][0]
angles[i] = [x, y]
modif[i] = 1
else:
y = angles[i][1]
if sides[p]>0:
x = max(angles[i][0], angles[p][0])
angles[i] = [x, y]
modif[i] = 1
elif i == 3:
if sides[i] > 0:
x = min(angles[i][0], angles[n][0])
if sides[p] > 0:
y = max(angles[i][1], angles[p][1])
angles[i] = [x, y]
modif[i] = 1
else:
y = angles[i][1]
angles[i] = [x, y]
modif[i] = 1
else:
x = angles[i][0]
if sides[p] > 0:
y = max(angles[i][1], angles[p][1])
angles[i] = [x, y]
modif[i] = 1
if modif[0] == 1:
q1.append(angles[0])
qdens[0] += 1
if modif[1] == 1:
q2.append(angles[1])
qdens[1] += 1
if modif[2] == 1:
q3.append(angles[2])
qdens[2] += 1
if modif[3] == 1:
q4.append(angles[3])
qdens[3] += 1
return angles, qdens, q1, q2, q3, q4
def density(data, w, h):
'''Mรฉthode sรฉparant les points par quadrans et comptant le nombre dans chacun'''
q = [0, 0, 0, 0]
q1 = []
q2 = []
q3 = []
q4 = []
for i in data:
if 0 <= i[0] < w/2:
if 0 <= i[1] < h/2:
q[0] += 1
q1.append(i)
if h / 2 <= i[1] <= h:
q[3] += 1
q4.append(i)
elif w/2 <= i[0] <= w:
if 0 <= i[1] < h / 2:
q[1] += 1
q2.append(i)
if h / 2 <= i[1] <= h:
q[2] += 1
q3.append(i)
return q, q1, q2, q3, q4
def closest_node(node, nodes):
'''Mรฉthode permettant de trouver le point d'une liste รฉtant le plus proche du point passรฉ en paramรจtre.'''
#trouver sur internet : https://codereview.stackexchange.com/questions/28207/finding-the-closest-point-to-a-list-of-points
closest_index = distance.cdist([node], nodes).argmin()
return nodes[closest_index]
def side_test(datas, angle, nbpoints):
'''Mรฉthode qui va initier la conception des 4 cรดtรฉs d'une piรจce.'''
d = angle
cd = [angle]
cf = [angle]
if nbpoints<2:
f = angle
else:
datas.remove(d)
f = closest_node(d, datas)
datas.remove(f)
cf.append(f)
if nbpoints > 2:
nbpoints -= 2
while nbpoints >0:
'''Pour plus de suretรฉ, on essaie de parcourir les deux cรดtรฉs d'un angle en mรชme temps.
Cela permet de supprimer un nombre certains d'erreur.'''
wayd = distpp(angle, d)
wayf = distpp(angle, f)
if wayd > wayf:
newpoint = closest_node(f, datas)
else:
newpoint = closest_node(d, datas)
testd = distpp(newpoint, d)
testf = distpp(newpoint, f)
if testd < testf:
cd.append(newpoint)
d = newpoint
else:
cf.append(newpoint)
f = newpoint
nbpoints -= 1
datas.remove(newpoint)
return d, f, cd, cf
def fullside(c1, c2, c3, c4):
'''Mรฉthode pour relier les coins entre eux afin de former les cรดtรฉs.'''
p1=0
p2=0
points = []
points.append([c1[0], 1, 0, c1[2]])
points.append([c1[1], 1, 0, c1[3]])
points.append([c2[0], 2, 0, c2[2]])
points.append([c2[1], 2, 0, c2[3]])
points.append([c3[0], 3, 0, c3[2]])
points.append([c3[1], 3, 0, c3[3]])
points.append([c4[0], 4, 0, c4[2]])
points.append([c4[1], 4, 0, c4[3]])
full = 0
for i in range(7):
for j in range(i+1,8):
if points[i][2]==0 and points[j] == 0 and (points[i][1]+points[j][1])%2 ==1:
if full == 0:
dist = distpp(points[i][0], points[j][0])
p1 = i
p2 = j
full = 1
else:
tempdist = distpp(points[i][0], points[j][0])
if tempdist < dist:
dist = tempdist
p1 = i
p2 = j
side1 = points[p1][3] + points[p2][3]
points[p1][2] = 1
points[p2][2] = 1
full = 0
for i in range(7):
for j in range(i + 1, 8):
if points[i][2] == 0 and points[j] == 0 and (points[i][1] + points[j][1]) % 2 == 1:
if full == 0:
dist = distpp(points[i][0], points[j][0])
p1 = i
p2 = j
full = 1
else:
tempdist = distpp(points[i][0], points[j][0])
if tempdist < dist:
dist = tempdist
p1 = i
p2 = j
side2 = points[p1][3] + points[p2][3]
points[p1][2] = 1
points[p2][2] = 1
full = 0
for i in range(7):
for j in range(i + 1, 8):
if points[i][2] == 0 and points[j] == 0 and (points[i][1] + points[j][1]) % 2 == 1:
if full == 0:
dist = distpp(points[i][0], points[j][0])
p1 = i
p2 = j
full = 1
else:
tempdist = distpp(points[i][0], points[j][0])
if tempdist < dist:
dist = tempdist
p1 = i
p2 = j
side3 = points[p1][3] + points[p2][3]
points[p1][2] = 1
points[p2][2] = 1
full = 0
for i in range(7):
for j in range(i + 1, 8):
if points[i][2] == 0 and points[j] == 0 and (points[i][1] + points[j][1]) % 2 == 1:
if full == 0:
dist = distpp(points[i][0], points[j][0])
p1 = i
p2 = j
full = 1
else:
tempdist = distpp(points[i][0], points[j][0])
if tempdist < dist:
dist = tempdist
p1 = i
p2 = j
side4 = points[p1][3] + points[p2][3]
points[p1][2] = 1
points[p2][2] = 1
return [side1, side2, side3, side4]
def working(path):
'''Mรฉthode gรฉrant la classification des piรจces ainsi que les modifications de leur image.'''
img = cv2.imread("img/"+path)
imgRGB = cv2.imread("img/"+path,cv2.IMREAD_UNCHANGED)
channels = cv2.split(imgRGB)
if imgRGB is None:
print("erreur ouverture fichier")
#obtention des contours en passant par l'image du canal alpha
ret, gray = cv2.threshold(channels[3], 127, 255, 0)
ret,thresh = cv2.threshold(channels[3],127,255,0)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#dessin des contours de la piรจce
cv2.drawContours(img, contours, -1, (0,255,0), 1)
#obtention des coins de la piece avec la mรฉthode des bon traits ร relever
corners = cv2.goodFeaturesToTrack(gray, 300, 0.01, 5)
corners = np.int0(corners)
#simplification de la serie de donnees
data = []
for corner in corners:
x, y = corner.ravel()
data.append([x,y])
#acquisition des valeurs necessaires ร l'รฉtude des pieces
height, width = img.shape[:2]
qdens, q1, q2, q3, q4 = density(data, width, height)
angles = findangle(data, width, height, qdens)
# dessin des points de base
posx, posy = zip(*data)
img[posy[:], posx[:]] = [0, 0, 0]
posx, posy = zip(*angles)
img[posy[:], posx[:]] = [255, 0, 0]
#verification de la position des angles
angles, qdens, q1, q2, q3, q4 = verifangle(angles,qdens, q1, q2, q3, q4)
#mise en place des coins
corner1 = side_test(q1, angles[0], qdens[0])
corner2 = side_test(q2, angles[1], qdens[1])
corner3 = side_test(q3, angles[2], qdens[2])
corner4 = side_test(q4, angles[3], qdens[3])
side1, side2, side3, side4 = fullside(corner1, corner2, corner3, corner4)
''' ce morceau de code ne s'affiche pas pour le moment,
d'oรน le commentaire.
# dessin corner1
posx, posy = zip(*side1)
img[posy[:], posx[:]] = [0, 0, 255]
posx, posy = zip(*side2)
img[posy[:], posx[:]] = [255, 0, 0]
posx, posy = zip(*side3)
img[posy[:], posx[:]] = [0, 0, 255]
posx, posy = zip(*side4)
img[posy[:], posx[:]] = [255, 0, 0]'''
#dessin angles
posx, posy = zip(*angles)
img[posy[:], posx[:]] = [255,255,255]
# ecriture des images
cv2.imwrite("resultShiTomasi/" + path, img)
if __name__ == "__main__":
# crรฉation des dossiers si non existants
if not os.path.exists("resultShiTomasi/"):
os.makedirs("resultShiTomasi/")
listing = os.listdir("img")
for list in listing:
working(list)
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Template tags for parsing date strings.
"""
from datetime import datetime
from django import template
from django.utils import timezone
register = template.Library()
class ParseDateNode(template.Node):
def render(self, datestring):
"""Parses a date-like string into a timezone aware Python datetime."""
formats = ["%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%d %H:%M:%S.%f",
"%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"]
if datestring:
for format in formats:
try:
parsed = datetime.strptime(datestring, format)
if not timezone.is_aware(parsed):
parsed = timezone.make_aware(parsed, timezone.utc)
return parsed
except Exception:
pass
return None
@register.filter(name='parse_date')
def parse_date(value):
return ParseDateNode().render(value)
|
from tkinter import *
from tkinter import messagebox
import nltk
from nltk.corpus import words
from time import gmtime, strftime
import time
from collections import Counter
nltk.download('words')
word_list = words.words()
Matrix_list = ['a', 'r', 'b', 'z', 't', 'n', 'd', 'h', 'm',
'v', 's', 'x', 'l', 'u', 'g', 'y', 'p', 'k', 'c', 'o']
score = 0
window = Tk()
window.title("าฎะณ ั
ะพะปะธั
")
window.geometry("600x350")
def checkspells():
global score
word = word_check.get()
if word in word_list:
dict = Counter(word)
flag = 1
for key in dict.keys():
if key not in Matrix_list:
flag = 0
if flag == 1 and len(word) > 3:
score = score+len(word)
total = "score = "+str(score)
label.configure(text=total)
print(word)
else:
messagebox.showinfo(
"check", "No matchine with above character OR word length should be greater than 3")
else:
print("No word")
word_check.delete(0, 'end')
def tick(time1=''):
time2 = time.strftime("%M:%S")
if time2 != time1:
time1 = time2
timer.config(text="After 1 minute it will close automatically "+time2)
timer.after(200, tick)
def quit_pro():
messagebox.showinfo("OOPS!!!!!!!", "Time UP!!! Your Score "+str(score))
window.destroy()
btn1 = Button(window, text="A", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn1.grid(column=1, row=1)
btn2 = Button(window, text="R", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn2.grid(column=2, row=1)
btn3 = Button(window, text="B", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn3.grid(column=3, row=1)
btn4 = Button(window, text="Z", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn4.grid(column=4, row=1)
btn5 = Button(window, text="T", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn5.grid(column=5, row=1)
btn1 = Button(window, text="N", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn1.grid(column=1, row=2)
btn2 = Button(window, text="D", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn2.grid(column=2, row=2)
btn3 = Button(window, text="H", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn3.grid(column=3, row=2)
btn4 = Button(window, text="M", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn4.grid(column=4, row=2)
btn5 = Button(window, text="V", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn5.grid(column=5, row=2)
btn1 = Button(window, text="S", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn1.grid(column=1, row=3)
btn2 = Button(window, text="X", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn2.grid(column=2, row=3)
btn3 = Button(window, text="L", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn3.grid(column=3, row=3)
btn4 = Button(window, text="U", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn4.grid(column=4, row=3)
btn5 = Button(window, text="G", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn5.grid(column=5, row=3)
btn1 = Button(window, text="Y", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn1.grid(column=1, row=4)
btn2 = Button(window, text="P", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn2.grid(column=2, row=4)
btn3 = Button(window, text="K", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn3.grid(column=3, row=4)
btn4 = Button(window, text="C", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn4.grid(column=4, row=4)
btn5 = Button(window, text="O", bg="skyBlue", fg="Black",
width=3, height=1, font=('Helvetica', '20'))
btn5.grid(column=5, row=4)
word_check = Entry(window, width=50)
word_check.configure(highlightbackground="red", highlightcolor="red")
word_check.grid(row=5, column=0, columnspan=6)
btncheck = Button(window, text="Submit", bg="Green", fg="White",
width=5, height=2, font=('Helvetica', '10'), command=checkspells)
btncheck.grid(column=10, row=5)
label = Label(window, text="Score = 0")
label.grid(column=11, row=5)
timer = Label(window, text="you have 1 minute")
timer.grid(column=0, row=6, columnspan=6)
tick()
window.after(60000, quit_pro)
window.mainloop()
|
import pandas as pd
import os
import matplotlib.pyplot as plt
datasets = ['cifar10_binary', 'stl10_binary']
for dataset in datasets:
log_path = f'logs/{dataset}'
title = ['step_size', 'iters', 'batch_size', 'intervals', 'pool_size']
params = [
(0.05, 0.1, 0.2, 0.3, 0.4, 0.5),
(500, 1000, 2000, 4000, 8000),
(0.05, 0.1, 0.25, 0.5, 0.75, 0.9),
(5, 10, 20),
(64, 128, 256),
]
keys = ['lr', 'it', 'nr', 'interval', 'pool']
for param in zip(title, params, keys):
train_acc = {}
test_acc = {}
train_loss = {}
test_loss = {}
for key in param[1]:
file_name = f'{dataset}_01_{param[2]}{key}_mlp01scd_0.csv'
df = pd.read_csv(os.path.join(log_path, file_name))
col_name = f'{key}'
train_acc[col_name] = df['train acc']
test_acc[col_name] = df['test acc']
train_loss[col_name] = df['train loss']
test_loss[col_name] = df['test loss']
pd.DataFrame(train_acc).to_csv(os.path.join('results', f'{dataset}_mlp_{param[0]}_train_acc.csv'), index=False)
pd.DataFrame(test_acc).to_csv(os.path.join('results', f'{dataset}_mlp_{param[0]}_test_acc.csv'), index=False)
pd.DataFrame(train_loss).to_csv(os.path.join('results', f'{dataset}_mlp_{param[0]}_train_loss.csv'), index=False)
pd.DataFrame(test_loss).to_csv(os.path.join('results', f'{dataset}_mlp_{param[0]}_test_loss.csv'), index=False) |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
import cugraph
import pandas as pd
import networkx as nx
import dask_cudf
import os
from cugraph.dask.common.mg_utils import (get_client)
#
# Datasets are numbered based on the number of elements in the array
#
DATASETS_1 = ['../datasets/netscience.csv']
DATASETS_2 = ['../datasets/karate.csv',
'../datasets/dolphins.csv']
DATASETS_3 = ['../datasets/karate.csv',
'../datasets/dolphins.csv',
'../datasets/email-Eu-core.csv']
# FIXME: netscience.csv causes NetworkX pagerank to throw an exception.
# (networkx/algorithms/link_analysis/pagerank_alg.py:152: KeyError: 1532)
DATASETS_4 = ['../datasets/karate.csv',
'../datasets/dolphins.csv',
'../datasets/netscience.csv',
'../datasets/email-Eu-core.csv']
DATASETS_5 = ['../datasets/karate.csv',
'../datasets/dolphins.csv',
'../datasets/polbooks.csv',
'../datasets/netscience.csv',
'../datasets/email-Eu-core.csv']
STRONGDATASETS = ['../datasets/dolphins.csv',
'../datasets/netscience.csv',
'../datasets/email-Eu-core.csv']
DATASETS_KTRUSS = [('../datasets/polbooks.csv',
'../datasets/ref/ktruss/polbooks.csv'),
('../datasets/netscience.csv',
'../datasets/ref/ktruss/netscience.csv')]
TINY_DATASETS = ['../datasets/karate.csv',
'../datasets/dolphins.csv',
'../datasets/polbooks.csv']
SMALL_DATASETS = ['../datasets/netscience.csv',
'../datasets/email-Eu-core.csv']
UNRENUMBERED_DATASETS = ['../datasets/karate.csv']
# define the base for tests to use
DATASETS = DATASETS_3
DATASETS_UNDIRECTED = DATASETS_2
def read_csv_for_nx(csv_file, read_weights_in_sp=True):
print('Reading ' + str(csv_file) + '...')
if read_weights_in_sp is True:
df = pd.read_csv(csv_file, delimiter=' ', header=None,
names=['0', '1', 'weight'],
dtype={'0': 'int32', '1': 'int32',
'weight': 'float32'})
else:
df = pd.read_csv(csv_file, delimiter=' ', header=None,
names=['0', '1', 'weight'],
dtype={'0': 'int32', '1': 'int32',
'weight': 'float64'})
# nverts = 1 + max(df['0'].max(), df['1'].max())
# return coo_matrix((df['2'], (df['0'], df['1'])), shape=(nverts, nverts))
return df
def read_csv_file(csv_file, read_weights_in_sp=True):
print('Reading ' + str(csv_file) + '...')
if read_weights_in_sp is True:
return cudf.read_csv(csv_file, delimiter=' ',
dtype=['int32', 'int32', 'float32'], header=None)
else:
return cudf.read_csv(csv_file, delimiter=' ',
dtype=['int32', 'int32', 'float64'], header=None)
def read_dask_cudf_csv_file(csv_file, read_weights_in_sp=True,
single_partition=True):
print('Reading ' + str(csv_file) + '...')
if read_weights_in_sp is True:
if single_partition:
chunksize = os.path.getsize(csv_file)
return dask_cudf.read_csv(csv_file, chunksize=chunksize,
delimiter=' ',
names=['src', 'dst', 'weight'],
dtype=['int32', 'int32', 'float32'],
header=None)
else:
return dask_cudf.read_csv(csv_file, delimiter=' ',
names=['src', 'dst', 'weight'],
dtype=['int32', 'int32', 'float32'],
header=None)
else:
if single_partition:
chunksize = os.path.getsize(csv_file)
return dask_cudf.read_csv(csv_file, chunksize=chunksize,
delimiter=' ',
names=['src', 'dst', 'weight'],
dtype=['int32', 'int32', 'float32'],
header=None)
else:
return dask_cudf.read_csv(csv_file, delimiter=' ',
names=['src', 'dst', 'weight'],
dtype=['int32', 'int32', 'float64'],
header=None)
def generate_nx_graph_from_file(graph_file, directed=True):
M = read_csv_for_nx(graph_file)
Gnx = nx.from_pandas_edgelist(M, create_using=(nx.DiGraph() if directed
else nx.Graph()),
source='0', target='1')
return Gnx
def generate_cugraph_graph_from_file(graph_file, directed=True):
cu_M = read_csv_file(graph_file)
G = cugraph.DiGraph() if directed else cugraph.Graph()
G.from_cudf_edgelist(cu_M, source='0', destination='1')
return G
def generate_mg_batch_cugraph_graph_from_file(graph_file, directed=True):
client = get_client()
_ddf = read_dask_cudf_csv_file(graph_file)
ddf = client.persist(_ddf)
G = cugraph.DiGraph() if directed else cugraph.Graph()
G.from_dask_cudf_edgelist(ddf)
return G
def build_cu_and_nx_graphs(graph_file, directed=True):
G = generate_cugraph_graph_from_file(graph_file, directed=directed)
Gnx = generate_nx_graph_from_file(graph_file, directed=directed)
return G, Gnx
def build_mg_batch_cu_and_nx_graphs(graph_file, directed=True):
G = generate_mg_batch_cugraph_graph_from_file(graph_file,
directed=directed)
Gnx = generate_nx_graph_from_file(graph_file, directed=directed)
return G, Gnx
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from graficas.models import Empresa, GrafanaData, GrafanaDashBoards
class EmpresaAdmin(admin.ModelAdmin):
model = Empresa
search_fields = ('nombre', 'slug')
list_display = ('nombre',)
prepopulated_fields = {"slug": ("nombre",)}
admin.site.register(Empresa, EmpresaAdmin)
class GrafanaDataAdmin(admin.ModelAdmin):
model = GrafanaData
search_fields = ('empresa', 'id')
list_display = ('empresa',)
raw_id_fields = ['empresa']
admin.site.register(GrafanaData, GrafanaDataAdmin)
class GrafanaDashBoardsAdmin(admin.ModelAdmin):
model = GrafanaDashBoards
search_fields = ('title', 'empresa')
list_display = ('title', 'empresa',)
raw_id_fields = ['empresa']
admin.site.register(GrafanaDashBoards, GrafanaDashBoardsAdmin) |
################################################################################
#
# Copyright (c) 2017 University of Oxford
# Authors:
# Geoff Pascoe (gmp@robots.ox.ac.uk)
#
# This work is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 4.0 International License.
# To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc-sa/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
#
################################################################################
import os
import re
import numpy as np
import matplotlib.pyplot as plt
import argparse
from build_pointcloud import build_pointcloud
from transform import build_se3_transform
from image import load_image
from camera_model import CameraModel
parser = argparse.ArgumentParser(description='Project LIDAR data into camera image')
parser.add_argument('--image_dir', type=str, help='Directory containing images')
parser.add_argument('--laser_dir', type=str, help='Directory containing LIDAR scans')
parser.add_argument('--poses_file', type=str, help='File containing either INS or VO poses')
parser.add_argument('--models_dir', type=str, help='Directory containing camera models')
parser.add_argument('--extrinsics_dir', type=str, help='Directory containing sensor extrinsics')
parser.add_argument('--image_idx', type=int, help='Index of image to display')
args = parser.parse_args()
model = CameraModel(args.models_dir, args.image_dir)
extrinsics_path = os.path.join(args.extrinsics_dir, model.camera + '.txt')
with open(extrinsics_path) as extrinsics_file:
extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]
G_camera_vehicle = build_se3_transform(extrinsics)
G_camera_posesource = None
poses_type = re.search('(vo|ins|rtk)\.csv', args.poses_file).group(1)
if poses_type in ['ins', 'rtk']:
with open(os.path.join(args.extrinsics_dir, 'ins.txt')) as extrinsics_file:
extrinsics = next(extrinsics_file)
G_camera_posesource = G_camera_vehicle * build_se3_transform([float(x) for x in extrinsics.split(' ')])
else:
# VO frame and vehicle frame are the same
G_camera_posesource = G_camera_vehicle
timestamps_path = os.path.join(args.image_dir, os.pardir, model.camera + '.timestamps')
if not os.path.isfile(timestamps_path):
timestamps_path = os.path.join(args.image_dir, os.pardir, os.pardir, model.camera + '.timestamps')
timestamp = 0
with open(timestamps_path) as timestamps_file:
for i, line in enumerate(timestamps_file):
if i == args.image_idx:
timestamp = int(line.split(' ')[0])
pointcloud, reflectance = build_pointcloud(args.laser_dir, args.poses_file, args.extrinsics_dir,
timestamp - 1e7, timestamp + 1e7, timestamp)
pointcloud = np.dot(G_camera_posesource, pointcloud)
image_path = os.path.join(args.image_dir, str(timestamp) + '.png')
image = load_image(image_path, model)
uv, depth = model.project(pointcloud, image.shape)
plt.imshow(image)
plt.scatter(np.ravel(uv[0, :]), np.ravel(uv[1, :]), s=2, c=depth, edgecolors='none', cmap='jet')
plt.xlim(0, image.shape[1])
plt.ylim(image.shape[0], 0)
plt.xticks([])
plt.yticks([])
plt.show()
|
#!/usr/bin/env python3
# sutimar pengpinij
# 590510137
# Lab 05
# Problem 5
# 204111 Sec 003
def main():
year = int(input(""))
print(zodiac_element(year))
def zodiac_element(year):
zodiac = year%12
element = year%10
if element == 0 or element == 1:
ans = "Metal"
elif element == 2 or element == 3:
ans = "Water"
elif element == 4 or element == 5:
ans = "Wood"
elif element == 6 or element == 7:
ans = "Fire"
else:
ans = "Earth"
if zodiac == 0 :
ans1 = "Monkey"
elif zodiac == 1:
ans1 = "Rooster"
elif zodiac == 2:
ans1 = "Dog"
elif zodiac == 3:
ans1 = "Pig"
elif zodiac == 4:
ans1 = "Rat"
elif zodiac == 5:
ans1 = "Ox"
elif zodiac == 6:
ans1 = "Tiger"
elif zodiac == 7:
ans1 = "Rabbit"
elif zodiac == 8:
ans1 = "Dragon"
elif zodiac == 9:
ans1 = "Snake"
elif zodiac == 10:
ans1 = "Horse"
else :
ans1 = "Goat"
a = ans + " " + ans1
return a
if __name__=="__main__":
main()
|
import pandas as pd
df1 = pd.read_csv('/home/vade1057/solar-flares/code/topology/geometry/results/merged.csv')
df2 = pd.read_csv('/home/vade1057/solar-flares/code/topology/cubical_complexes/results/cubical_complexes_320K/cubical_complexes_440K_debug.csv')
print(df1.columns)
print(df2.columns)
df1['merge_label'] = df1['label'].str.replace('(.*).Br.fits', lambda f: f.group(1) + '.png')
print(df1['merge_label'].head())
df2['merge_label'] = df2['0'].str.replace('/srv/data/varad/data/all_images/', '')
print(df2['merge_label'].head())
result = pd.merge(df1, df2, on='merge_label')
result = result.iloc[:,41:]
#result = result.drop(columns="flare")
#result['flare'] = result['M_flare_in_24h'] + result['X_flare_in_24h']
#result['flare'].to_csv('/tmp/foo')
#result['flare'][result['flare'] == 2] = 1
#result = result.drop(columns=["merge_label", "filename", "M_flare_in_6h", "X_flare_in_6h",
# "M_flare_in_12h", "X_flare_in_12h",
# "M_flare_in_24h", "X_flare_in_24h",
# "M_flare_in_48h", "X_flare_in_48h"])
print(result.columns)
print(result.head())
result.to_csv('merged_top.csv', index=None)
|
# coding: utf-8
import os
import sys
import ssl
import time
import socket
from urllib.request import urlopen, Request
def get_dirname(path):
return os.path.dirname(os.path.realpath(path))
file_dir = get_dirname(__file__)
root_dir = os.path.dirname(file_dir)
py_dir = os.path.join(root_dir, 'python')
icon_gotox = os.path.join(root_dir, 'gotox.ico')
config_dir = os.path.join(root_dir, 'config')
direct_ipdb = os.path.join(root_dir, 'data', 'directip.db')
direct_domains = os.path.join(root_dir, 'data', 'directdomains.txt')
config_filename = os.path.join(config_dir, 'Config.ini')
config_user_filename = os.path.join(config_dir, 'Config.user.ini')
config_auto_filename = os.path.join(config_dir, 'ActionFilter.ini')
# GotoX CA
ca1 = os.path.join(root_dir, 'cert', 'CA.crt')
# APNIC ๅ GitHub ไฝฟ็จ็ CA
ca2 = os.path.join(root_dir, 'cert', 'cacert-ds.pem')
gcontext = None
logging = None
logger = None
from _frozen_importlib_external import spec_from_file_location
from _frozen_importlib import _load
filepath = os.path.join(root_dir, 'local', '__init__.py')
_load(spec_from_file_location('gotox', filepath))
from gotox.compat import single_instance, replace_logging, patch_configparser
from gotox.common.cconfig import cconfig
from gotox.common.decorator import propertyb
def load_config():
patch_configparser()
import re
from configparser import ConfigParser
_LOGLv = {
0 : logging.WARNING,
1 : logging.INFO,
2 : logging.TEST,
3 : logging.DEBUG
}
CONFIG = ConfigParser(dict_type=dict, inline_comment_prefixes=('#', ';'))
CONFIG._optcre = re.compile(r'(?P<option>[^=\s]+)\s*(?P<vi>=?)\s*(?P<value>.*)')
CONFIG.read([config_filename, config_user_filename])
LISTEN_IP = CONFIG.get('listen', 'ip')
if LISTEN_IP == '0.0.0.0':
LISTEN_IP = '127.0.0.1'
elif LISTEN_IP == '::':
LISTEN_IP = '::1'
elif LISTEN_IP == '':
LINK_PROFILE = CONFIG.get('link', 'profile')
if LINK_PROFILE not in ('ipv4', 'ipv6', 'ipv46'):
LINK_PROFILE = 'ipv4'
LISTEN_IP = '127.0.0.1' if '4' in LINK_PROFILE else '::1'
LISTEN_AUTOPORT = CONFIG.getint('listen', 'autoport', fallback=8087)
LISTEN_ACTPORT = CONFIG.getint('listen', 'actport', fallback=8086)
LISTEN_ACTTYPE = CONFIG.get('listen', 'act', fallback='cfw').upper()
LISTEN_AUTO = '%s:%d' % (LISTEN_IP, LISTEN_AUTOPORT)
LISTEN_ACT = '%s:%d' % (LISTEN_IP, LISTEN_ACTPORT)
LOG_PRINT = CONFIG.getboolean('log', 'print', fallback=True)
LOG_LEVEL = _LOGLv[min(CONFIG.getint('log', 'level', fallback=1), 3)]
log_config = {'level': LOG_LEVEL}
if not LOG_PRINT:
log_config['stream'] = logging.NULL_STREAM
logging.basicConfig(**log_config)
return LISTEN_AUTO, LISTEN_ACT, LISTEN_ACTTYPE
def getlogger(use_print=False):
global logging, logger
if logging is None:
if use_print:
class logging:
def info(s, *args):
print(s % args)
warning = debug = info
logger = logging
else:
replace_logging()
import logging
logger = logging.getLogger('[launcher]')
return logger
try:
startfile = os.startfile
except AttributeError:
def startfile(filename):
from subprocess import call
if sys.platform.startswith('darwin'):
operation = 'open'
elif os.name == 'posix':
operation = 'xdg-open'
call((operation, filename))
class DataSource:
datefmt = None
def __init__(self, manager, name, url, parser, fullname=None):
if isinstance(manager, DataSourceManager):
self.parent = None
self._generations = 1
self._sign = 1 << manager.sign_bit
self._cconfig = cconfig(name.lower(), manager._conf)
elif isinstance(manager, self.__class__):
parent = manager
manager = parent.manager
generations = parent._generations + 1
if generations > manager.max_generations:
raise ValueError(
'DataSource.__init__ "generations=%d" ่ถ
่ฟๆๅคงๅผ๏ผ%d'
% (generations, manager.max_generations))
self._generations = generations
self._sign = 0
self._cconfig = cconfig(name.lower(), parent)
parent._children[name.lower()] = self
parser = parser or parent.parser
else:
raise TypeError('DataSource.__init__ "manager" ็ฑปๅ้่ฏฏ๏ผ%s'
% manager.__class__)
self.manager = manager
self.url = url
self.parser = parser
self.fullname = fullname or name
self.req = None
self.update = None
self.itemlist = []
def __getattr__(self, name):
return getattr(self._cconfig, name)
def add_child(self, name, url, parser=None, fullname=None):
return self.__class__(self, name, url, parser, fullname)
@property
def sign(self):
return self._sign
@propertyb
def update(self):
return '%s-%s' % (self.name, self._update)
@update.boolgetter
def update(self):
return self._update
@update.setter
def update(self, value):
self._update = value
def clear_data(self):
self.itemlist.clear()
for child_ds in self.get_children():
child_ds.clear_data()
def __get_other_sign(self, other):
if isinstance(other, self.__class__):
other = other.sign
return other
def __and__(self, other):
return self.__get_other_sign(other) & self.sign
def __xor__(self, other):
return self.__get_other_sign(other) ^ self.sign
def __or__(self, other):
return self.__get_other_sign(other) | self.sign
__rand__ = __and__
__rxor__ = __xor__
__ror__ = __or__
def __raise_noit_err(self, other):
raise NotImplementedError
__iand__ = __ixor__ = __ior__ = __raise_noit_err
class DataSourceManager:
max_generations = 2
def __init__(self, name):
self._sign_all = 0
self._sign_bit = 0
self._valid = {}
self._conf = cconfig('ds_' + name)
def set_conf(self, conf):
_conf = self._conf
_conf.parent = conf
conf.add(_conf.name)
conf._children[_conf.name] = _conf
self.load()
def add(self, name, url, parser, fullname=None):
self._conf.add(name)
ds = DataSource(self, name, url, parser, fullname)
self._valid['--' + name.lower()] = ds
self._sign_all |= ds.sign
self._sign_bit += 1
return ds
def get(self, name):
return self._valid.get('--' + name.lower())
@property
def sign_bit(self):
return self._sign_bit
@property
def sign_all(self):
return self._sign_all
def check(self, name):
return self._conf.check(name)
def set(self, name, save=False):
self._conf.set(name, 1, save)
def switch(self, name, save=False):
self._conf.switch(name, save)
def load(self, filename=None):
self._conf.load(filename=filename)
for ds in self.sources():
ds.load(filename=filename)
def save(self, filename=None):
self._conf.save(filename=filename)
for ds in self.sources():
ds.save(filename=filename)
def get_source(self, *args):
kwargs = parse_cmds(*args)
data_source = 0
if '--all' in kwargs:
data_source = self._sign_all
for par in self._valid:
if par in kwargs:
data_source |= self._valid[par].sign
for name in kwargs[par]:
self._valid[par].set(name)
return data_source
def clear_source_data(self):
for ds in self.sources():
ds.clear_data()
def sources(self):
return self._valid.values()
@property
def data_source(self):
data_source = 0
for ds in self.sources():
if self._conf.check(ds.name):
data_source |= ds
return data_source
def parse_cmds(*args):
args = list(args)
kwargs = {}
while args and not args[0].startswith('-'):
del args[0]
cmd = ''
for arg in args:
if arg.startswith('-'):
cmd = arg
kwargs[cmd] = []
else:
kwargs[cmd].append(arg)
return kwargs
def create_context(cafiles=[], capaths=[], cadatas=[]):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.options |= getattr(ssl._ssl, 'OP_NO_COMPRESSION', 0)
context.check_hostname = True
context.verify_mode = ssl.CERT_REQUIRED
context.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS)
for cafile in cafiles:
if os.path.isfile(cafile):
context.load_verify_locations(cafile=cafile)
for capath in capaths:
if os.path.isdir(capath):
context.load_verify_locations(capath=capath)
for cadata in cadatas:
if cadata:
context.load_verify_locations(cadata=cadata)
return context
def download(req, context=None):
#ๆพๅผๅ ่ฝฝ CA๏ผ็กฎไฟๆญฃๅธธไฝฟ็จ
global gcontext
if context is None:
if gcontext is None:
gcontext = create_context(cafiles=[ca1, ca2])
context = gcontext
retry_delay = 10
max_retries = 4
retry_times = 0
timeout = 8
l = 0
while True:
err = None
try:
fd = urlopen(req, timeout=timeout, context=context)
except Exception as e:
err = e
else:
l = int(fd.headers.get('Content-Length', 0))
if l:
break
retry_times += 1
if retry_times > max_retries:
logger.warning('่ฏทๆฑ็ฝๅ %r ๆถ๏ผ้่ฏ %d ๆฌกๅไป็ถๅคฑ่ดฅใ'
% (req.full_url, max_retries))
logger.warning('่ฏทๅฟฝ็ฅไธ้ข่ฟไธช้่ฏฏ่ท่ธช๏ผๅนถๆฃๆฅๆฏๅฆ้่ฆ'
'ๆดๆน่ชๅจไปฃ็่งๅ๏ผActionFilter.ini๏ผใ')
raise err or OSError('่ฟๆฅๅคฑ่ดฅ', 0)
logger.debug('่ทๅๆดๆฐๆฐๆฎๅคฑ่ดฅ๏ผ%d ็งๅ้่ฏ' % retry_delay)
time.sleep(retry_delay)
return fd, l
def download_as_list(ds):
#ๆฏๆ็ปญไผ
logger.info('ๅผๅงไธ่ฝฝ %s ๅ่กจ' % ds.fullname)
if ds.req is None:
ds.req = Request(ds.url)
ds.req.headers['Range'] = 'bytes=0-'
if ds.datefmt is None:
ds.update = None
else:
ds.update = time.strftime(ds.datefmt, time.localtime(time.time()))
ds.itemlist.clear()
read = 0
fd, l = download(ds.req)
while True:
_read, ll = ds.parser(fd, ds)
if _read is None:
read = l
else:
read += _read
fd.close()
if read >= l:
break
#ไธ่ฝฝๅคฑ่ดฅ็ปญไผ
#ๅพๅ่ทณ่ฟๅฏ่ฝ็็ผบๆๆก็ฎ
read -= ll
ds.req.headers['Range'] = 'bytes=%d-' % read
logger.debug('%s ๅ่กจไธ่ฝฝไธญๆญ๏ผ็ปญไผ ๏ผ%d/%d' % (ds.fullname, read, l))
fd, _ = download(ds.req)
logger.info(ds.fullname + ' ๅ่กจไธ่ฝฝๅฎๆฏ')
return ds.itemlist
def set_proxy(proxy_addr):
try:
ip, port = proxy_addr.split(':')
socket.create_connection((ip, int(port)), timeout=1).close()
os.environ['HTTPS_PROXY'] = os.environ['HTTP_PROXY'] = proxy_addr
logger.info('\nไปฃ็ๅฐๅ %r ๅทฒ่ฎพ็ฝฎๆๅใ' % proxy_addr)
return True
except:
os.environ.pop('HTTP_PROXY', None)
os.environ.pop('HTTPS_PROXY', None)
def parse_set_proxy(data_source):
use_proxy = None
if '-p' in sys.argv:
try:
proxy_addr = sys.argv[sys.argv.index('-p') + 1]
except IndexError:
print('\nไปฃ็ๅฐๅ่ฏปๅๅคฑ่ดฅ๏ผ้ๅบ่ๆฌ...')
sys.exit(-1)
if not set_proxy(proxy_addr):
print('\nไปฃ็ๅฐๅ %r ่ฎพ็ฝฎๅคฑ่ดฅ๏ผ้ๅบ่ๆฌ...' % proxy_addr)
sys.exit(-1)
if data_source == 0:
print('่ฟๅ
ฅไบคไบๆจกๅผ\n')
return True
elif '-d' in sys.argv:
if data_source == 0:
print('่ฟๅ
ฅไบคไบๆจกๅผ\n')
return False
else:
use_proxy = input('่ฟๅ
ฅไบคไบๆจกๅผ\n\nๆฏๅฆ่ฎพ็ฝฎไปฃ็๏ผY/N๏ผ๏ผ')
use_proxy = use_proxy.upper() == 'Y'
if use_proxy:
print('\nๅผๅง่ฎพ็ฝฎไปฃ็๏ผไป
ๆฏๆ HTTP ไปฃ็๏ผๆ ผๅผ๏ผ"ไธปๆบๅ(IP ๆๅๅ):็ซฏๅฃ"')
while use_proxy:
proxy_addr = input('\n่ฏท่พๅ
ฅไปฃ็ๅฐๅ๏ผ'
'็็ฉบไฝฟ็จ "127.0.0.1:8087"๏ผ\n') or '127.0.0.1:8087'
if set_proxy(proxy_addr):
break
else:
use_proxy = input('\nๅฝๅไปฃ็ %r ๆ ๆณ้พๆฅ๏ผๆฏๅฆ็ปง็ปญ่ฎพ็ฝฎไปฃ็๏ผY/N๏ผ๏ผ' % proxy_addr)
use_proxy = use_proxy.upper() == 'Y'
if use_proxy is False:
print('\n่ทณ่ฟไปฃ็่ฎพ็ฝฎ')
return use_proxy
Tips1 = '''
***********************************************
* ่ฏท้ๆฉๅญๆพ็ฎๅฝ๏ผ *
* ๆฐๆฎ็ฎๅฝ ------ ๆ 1 *
* ๅฝๅ็ฎๅฝ ------ ๆ 2 *
* ้ๅบ ---------- ๆ 0 *
***********************************************
'''
def select_path(*path):
n = input(Tips1)
try:
n = int(n)
except:
print('่พๅ
ฅ้่ฏฏ๏ผ')
return
if n == 0:
sys.exit(0)
elif n == 1:
return path[0]
elif n == 2:
return path[1]
else:
print('่พๅ
ฅ้่ฏฏ๏ผ')
|
"""
ะะผั ะฟัะพะตะบัะฐ: practicum-1
ะะพะผะตั ะฒะตััะธะธ: 1.0
ะะผั ัะฐะนะปะฐ: 2.py
ะะฒัะพั: 2020 ยฉ ะ.ะ. ะฎัะบะธะฝะฐ, ะงะตะปัะฑะธะฝัะบ
ะะธัะตะฝะทะธั ะธัะฟะพะปัะทะพะฒะฐะฝะธั: CC BY-NC 4.0 (https://creativecommons.org/licenses/by-nc/4.0/deed.ru)
ะะฐัะฐ ัะพะทะดะฐะฝะธั: 11/11/2020
ะะฐัะฐ ะฟะพัะปะตะดะฝะตะน ะผะพะดะธัะธะบะฐัะธะธ: 11/11/2020
ะกะฒัะทะฐะฝะฝัะต ัะฐะนะปั/ะฟะฐะบะตัั: min, max
ะะฟะธัะฐะฝะธะต: ะ ะตัะตะฝะธะต ะทะฐะดะฐัะธ 2 ะฟัะฐะบัะธะบัะผะฐ โ 1
#ะฒะตััะธั Python: 3.8
"""
"""
ะะฐะฝั ะดะตะนััะฒะธัะตะปัะฝัะต ัะธัะปะฐ ะ, ะ, ะก. ะะฐะนัะธ ะผะฐะบัะธะผะฐะปัะฝะพะต ะธ ะผะธะฝะธะผะฐะปัะฝะพะต ะธะท ััะธั
ัะธัะตะป.
"""
A=2.0
B=8
C=6.12
Min=min(A,B,C)
Max=max(A,B,C)
print("Min = ", Min)
print("Max = ", Max)
|
import sys, boto3
args = sys.argv
if __name__ == "__main__":
fileName=args[1]
bucket='pakuty-mujin-backet'
client=boto3.client('rekognition','us-east-2')
response = client.detect_labels(Image={'S3Object':{'Bucket':bucket,'Name':fileName}})
print('Detected labels for ' + fileName)
for label in response['Labels']:
print (label['Name'] + ' : ' + str(label['Confidence']))
|
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import getpass
import json
import os
import shutil
import unittest
from iconcommons.icon_config import IconConfig
from iconcommons.logger import Logger
from iconsdk.builder.transaction_builder import DeployTransactionBuilder
from iconsdk.exception import KeyStoreException
from iconsdk.icon_service import IconService
from iconsdk.libs.in_memory_zip import gen_deploy_data_content
from iconsdk.providers.http_provider import HTTPProvider
from iconsdk.signed_transaction import SignedTransaction
from iconsdk.utils.convert_type import convert_hex_str_to_int
from iconsdk.wallet.wallet import KeyWallet
from iconservice.base.address import is_icon_address_valid
from tbears.command.command_server import CommandServer
from tbears.config.tbears_config import FN_CLI_CONF, tbears_cli_config, TBEARS_CLI_TAG
from tbears.tbears_exception import TBearsDeleteTreeException, TBearsCommandException
from tbears.util.arg_parser import uri_parser
from tbears.util.argparse_type import IconAddress, IconPath, non_negative_num_type
from tbears.util.transaction_logger import send_transaction_with_logger
class CommandScore(object):
def __init__(self, subparsers):
self._add_deploy_parser(subparsers)
self._add_clear_parser(subparsers)
self._add_test_parser(subparsers)
@staticmethod
def _add_deploy_parser(subparsers):
parser = subparsers.add_parser('deploy', help='Deploy the SCORE', description='Deploy the SCORE')
parser.add_argument('project', type=IconPath(), help='Project directory path or zip file path')
parser.add_argument('-u', '--node-uri', dest='uri', help='URI of node (default: http://127.0.0.1:9000/api/v3)')
parser.add_argument('-t', '--type', choices=['tbears', 'zip'], dest='contentType',
help='This option has been deprecated since v1.0.5. Deploy command supports zip type only')
parser.add_argument('-m', '--mode', choices=['install', 'update'], help='Deploy mode (default: install)')
# --from option only accept eoa address ('hx')
parser.add_argument('-f', '--from', type=IconAddress('hx'), help='From address. i.e. SCORE owner address')
# --to option is used only when update score, so eoa address ('hx') need to be denied
parser.add_argument('-o', '--to', type=IconAddress('cx'), help='To address. i.e. SCORE address')
# IconPath's 'r' argument means 'read file'
parser.add_argument('-k', '--key-store', type=IconPath('r'), dest='keyStore',
help='Keystore file path. Used to generate "from" address and transaction signature')
parser.add_argument('-n', '--nid', type=non_negative_num_type, help='Network ID')
parser.add_argument('-p', '--password', help='keystore file\'s password', dest='password')
parser.add_argument('-s', '--step-limit', dest='stepLimit', type=non_negative_num_type, help='Step limit')
parser.add_argument('-c', '--config', type=IconPath(), help=f'deploy config path (default: {FN_CLI_CONF})')
@staticmethod
def _add_clear_parser(subparsers):
subparsers.add_parser('clear', help='Clear all SCOREs deployed on tbears service',
description='Clear all SCOREs deployed on local tbears service')
@staticmethod
def _add_test_parser(subparsers):
parser = subparsers.add_parser('test', help='Run the unittest in the project',
description='Run the unittest in the project')
parser.add_argument('project', type=IconPath('d'), help='Project directory path')
def run(self, args):
if not hasattr(self, args.command):
raise TBearsCommandException(f"Invalid command {args.command}")
# load configurations
conf = self.get_icon_conf(args.command, args=vars(args))
Logger.info(f"Run '{args.command}' command with config: {conf}", TBEARS_CLI_TAG)
# run command
return getattr(self, args.command)(conf)
def deploy(self, conf: dict) -> dict:
"""Deploy SCORE on the server.
:param conf: deploy command configuration
"""
# check keystore, and get password from user's terminal input
password = conf.get('password', None)
password = self._check_deploy(conf, password)
if conf['mode'] == 'install':
score_address = f'cx{"0"*40}'
else:
score_address = conf['to']
uri, version = uri_parser(conf['uri'])
icon_service = IconService(HTTPProvider(uri, version))
if password:
try:
wallet = KeyWallet.load(conf['keyStore'], password)
from_ = wallet.get_address()
except KeyStoreException as e:
print(e.args[0])
return None
else:
# make dummy wallet
wallet = KeyWallet.create()
from_ = conf['from']
# make zip and convert to hexadecimal string data (start with 0x) and return
content = gen_deploy_data_content(conf['project'])
deploy_transaction = DeployTransactionBuilder() \
.from_(from_) \
.to(score_address) \
.nid(convert_hex_str_to_int(conf['nid'])) \
.content_type("application/zip") \
.content(content) \
.params(conf.get('scoreParams', {})) \
.build()
if 'stepLimit' not in conf:
step_limit = icon_service.estimate_step(deploy_transaction) + 10000
else:
step_limit = convert_hex_str_to_int(conf['stepLimit'])
deploy_transaction.step_limit = step_limit
# Returns the signed transaction object having a signature
signed_transaction = SignedTransaction(deploy_transaction, wallet)
if not password:
signed_transaction.signed_transaction_dict['signature'] = 'sig'
# Sends transaction and return response
response = send_transaction_with_logger(icon_service, signed_transaction, uri)
if 'error' in response:
print('Got an error response')
print(json.dumps(response, indent=4))
else:
print('Send deploy request successfully.')
tx_hash = response['result']
print(f'If you want to check SCORE deployed successfully, execute txresult command')
print(f"transaction hash: {tx_hash}")
return response
@staticmethod
def clear(_conf: dict):
"""Clear all SCORE deployed on tbears service
:param _conf: clear command configuration
"""
# referenced data's path is /tmp/.tbears.env (temporary config data)
score_dir_info = CommandServer.get_server_conf()
if score_dir_info is None:
raise TBearsDeleteTreeException("Already clean.")
if CommandServer.is_service_running():
raise TBearsCommandException(f'You must stop tbears service to clear SCORE')
# delete whole score data
try:
if os.path.exists(score_dir_info['scoreRootPath']):
shutil.rmtree(score_dir_info['scoreRootPath'])
if os.path.exists(score_dir_info['stateDbRootPath']):
shutil.rmtree(score_dir_info['stateDbRootPath'])
CommandServer._delete_server_conf()
except (PermissionError, NotADirectoryError) as e:
raise TBearsDeleteTreeException(f"Can't delete SCORE files. {e}")
# delete temporary config data (path: /tmp/.tbears.env)
CommandServer._delete_server_conf()
print(f"Cleared SCORE deployed on tbears successfully")
@staticmethod
def test(conf: dict):
loader = unittest.TestLoader()
start_dir = conf['project']
top_dir = os.path.abspath(os.path.join(start_dir, '..'))
suite = loader.discover(start_dir, top_level_dir=top_dir)
runner = unittest.TextTestRunner()
runner.run(suite)
@staticmethod
def _check_deploy(conf: dict, password: str = None):
"""Check keystore presence, and get password from user's terminal input (not validate password)
password is an optional parameter for unit tests purposes
:param conf: command configuration
:param password: password for unit tests (optional)
:return: password for keystore file
"""
# check if keystore exist. if exist, get password from user input
if not conf['keyStore']:
if not is_icon_address_valid(conf['from']):
raise TBearsCommandException(f"You entered invalid 'from' address '{conf['from']}")
else:
if not password:
password = getpass.getpass("Input your keystore password: ")
# in case of update mode, validate -to option
if conf['mode'] == 'update':
if conf.get('to', None) is None:
raise TBearsCommandException(f'If you want to update SCORE, set --to option')
elif not is_icon_address_valid(conf['to']):
raise TBearsCommandException(f"You entered invalid 'to' address '{conf['to']}'")
# check project directory
check_project(conf.get('project', ""))
return password
def check_command(self, command):
return hasattr(self, command)
@staticmethod
def get_icon_conf(command: str, project: str = None, args: dict = None):
"""Load config file using IconConfig instance
config file is loaded as below priority
system config -> default config -> user config -> user input config (higher priority)
:param command: command name (e.g. deploy)
:param project: project name (in case of deploy)
:param args: user input command (converted to dictionary type)
:return: command configuration
"""
# load default configurations
conf = IconConfig(FN_CLI_CONF, copy.deepcopy(tbears_cli_config))
# move command config
if command in conf:
conf.update_conf(conf[command])
del conf[command]
if project is not None:
conf['project'] = project
# load config file
conf.load(config_path=args.get('config', None) if args else None)
# move command config
if command in conf:
conf.update_conf(conf[command])
del conf[command]
# load user argument
if args:
conf.update_conf(args)
return conf
def check_project(project_path: str) -> int:
if os.path.isdir(project_path):
# there is no __init__.py
if not os.path.exists(f"{project_path}/__init__.py"):
raise TBearsCommandException(f'There is no __init__.py in project directory')
# there is no package.json
if not os.path.exists(f"{project_path}/package.json"):
raise TBearsCommandException(f'There is no package.json in project directory')
with open(f"{project_path}/package.json", mode='r') as file:
try:
package: dict = json.load(file)
except Exception as e:
raise TBearsCommandException(f'package.json has wrong format. {e}')
# wrong package.json file
if 'version' not in package or 'main_score' not in package:
raise TBearsCommandException(f'package.json has wrong format.')
# check the validity of main_module
main_module: str = package.get('main_module')
if not isinstance(main_module, str):
try:
# this will be deprecated soon
main_module: str = package['main_file']
except KeyError:
raise TBearsCommandException(f'package.json should have main_module field.')
if main_module.startswith('.') or main_module.find('/') != -1:
raise TBearsCommandException(f'Invalid main_module field: {main_module}')
main_file = main_module.replace('.', '/') + '.py'
if not os.path.exists(f"{project_path}/{main_file}"):
raise TBearsCommandException(f"There is no '{project_path}/{main_file}'")
return 0
|
import reports
# Export functions
result1 = reports.get_most_played('game_stat.txt')
result2 = reports.sum_sold('game_stat.txt')
result3 = reports.get_selling_avg('game_stat.txt')
result4 = reports.count_longest_title('game_stat.txt')
result5 = reports.get_date_avg('game_stat.txt')
result6 = reports.get_game('game_stat.txt', "Counter-Strike")
result7 = reports.count_grouped_by_genre('game_stat.txt')
result8 = reports.get_date_ordered('game_stat.txt')
result_list = [result1, result2, result3,
result4, result5, result6, result7, result8]
file = open('export_statistics_report2.txt', "w")
for result in result_list:
file.writelines(str(result))
file.write("\n")
|
from torch.utils.data import Dataset
import os
from PIL import Image
class LSUNImageDataset(Dataset):
def __init__(self,img_dir,dir_size=1000,transform=None):
self.img_dir = img_dir
self.dir_size = dir_size
self.transform = transform
subdirs = os.listdir(self.img_dir)
self.n_subdirs = len(subdirs)
self._len = (self.n_subdirs-1) * (self.dir_size) + len(os.listdir(os.path.join(self.img_dir,sorted(subdirs)[-1])))
def _index_to_path(self,i):
return os.path.join(self.img_dir,str(i//self.dir_size).zfill(4),('%i.webp'%i).zfill(12))
def __getitem__(self, i):
#print(i)
return Image.open(self._index_to_path(i)) if self.transform is None else self.transform(Image.open(self._index_to_path(i)))
def __len__(self):
return self._len
|
import pandas as pd
import matplotlib.pyplot as plt
df_schema = pd.read_csv('survey_results_schema.csv')
df_public = pd.read_csv('survey_results_public.csv',
usecols=['Respondent', 'YearsCodePro',
'WorkWeekHrs', 'Gender'],
index_col='Respondent')
df_public.dropna(inplace=True)
# replace string values
df_public.replace(to_replace='Less than 1 year', value='0', inplace=True)
df_public.replace(to_replace='More than 50 years', value='50', inplace=True)
# delete unreal week hours (grater than 24 * 7)
df_public = df_public[df_public['WorkWeekHrs'] <= 168]
# df_public['YearsCodePro'] = pd.to_numeric(df_public['YearsCodePro'],
# errors='coerce')
df_public['YearsCodePro'] = df_public['YearsCodePro'].astype(float)
df_public_man = df_public[df_public['Gender'] == 'Man']
df_public_woman = df_public[df_public['Gender'] == 'Woman']
df_public_other = df_public[(df_public['Gender'] != 'Man') &
df_public['Gender'] != 'Woman']
plt.plot(df_public['YearsCodePro'],
df_public['WorkWeekHrs'], 'ro', markersize=0.3)
plt.xlabel('YearsCodePro')
plt.ylabel('WorkWeekHrs')
plt.title('All')
plt.show()
plt.plot(df_public_man['YearsCodePro'],
df_public_man['WorkWeekHrs'], 'ro', markersize=0.3)
plt.xlabel('YearsCodePro')
plt.ylabel('WorkWeekHrs')
plt.title('Men')
plt.show()
plt.plot(df_public_woman['YearsCodePro'],
df_public_woman['WorkWeekHrs'], 'ro', markersize=0.3)
plt.xlabel('YearsCodePro')
plt.ylabel('WorkWeekHrs')
plt.title('Women')
plt.show()
plt.plot(df_public_other['YearsCodePro'],
df_public_other['WorkWeekHrs'], 'ro', markersize=0.3)
plt.xlabel('YearsCodePro')
plt.ylabel('WorkWeekHrs')
plt.title('Other')
plt.show()
|
"""
app0.py illustrates use of pitaxcalc-demo release 2.0.0 (India version).
USAGE: python app0.py > app0.res
CHECK: Use your favorite Windows diff utility to confirm that app0.res is
the same as the app0.out file that is in the repository.
"""
from taxcalc import *
# create Records object containing pit.csv and pit_weights.csv input data
recs = Records()
assert isinstance(recs, Records)
assert recs.data_year == 2017
assert recs.current_year == 2017
# create Policy object containing current-law policy
pol = Policy()
assert isinstance(pol, Policy)
assert pol.current_year == 2017
# specify Calculator object for current-law policy
calc1 = Calculator(policy=pol, records=recs)
# NOTE: calc1 now contains a PRIVATE COPY of pol and a PRIVATE COPY of recs,
# so we can continue to use pol and recs in this script without any
# concern about side effects from Calculator method calls on calc1.
assert isinstance(calc1, Calculator)
assert calc1.current_year == 2017
calc1.calc_all()
dump_vars = Records.USABLE_READ_VARS | Records.CALCULATED_VARS
dumpdf = calc1.dataframe(list(dump_vars))
column_order = sorted(dumpdf.columns)
assert len(dumpdf.index) == calc1.array_len
dumpdf.to_csv('app0-dump.res', columns=column_order,
index=False, float_format='%.0f')
"""
iMac:pitaxcalc-demo mrh$ python app0.py
iMac:pitaxcalc-demo mrh$ awk -F, '{print $1,$5,$10}' app0-dump.res
AGEGRP TTI pitax
0 230000 0
0 281000 1550
0 301000 2550
0 329000 3950
0 373000 6150
0 450000 10000
0 492000 12100
0 654000 43300
1 682000 46400
2 2269000 480700
"""
|
# Name:
# Date:
# proj01: A Simple Program
# This program asks the user for his/her name and age.
# Then, it prints a sentence that says when the user will turn 100.
# If you complete extensions, describe your extensions here!
name = raw_input("Enter your name: ")
age = int(raw_input("Enter your age: "))
birthday = raw_input("Has your birthday happened this year? Enter Y or N: ")
if birthday == "Y":
year= 2017
else:
year= 2016
answer=year
for number in range (int(age), 101):
answer = answer + 1
print name, " will turn", int(age+1), "in the year ", int(answer), "."
print "done"
|
from server.util import Plugin
# Reloads the Python scripts
def admin_command_reloadscripts(player, playerCommand):
Plugin.load(); |
'''
้ข็ฎๆ่ฟฐ
็ป่ฎกไธไธชๆฐๅญๅจๆๅบๆฐ็ปไธญๅบ็ฐ็ๆฌกๆฐใ
'''
# -*- coding:utf-8 -*-
class Solution:
#็จๅๆ้ๆฏ่พๅฟซ๏ผๆณจๆๅค็ๅฅฝ็นๆฎๆ
ๅตๅณๅฏ
def GetNumberOfK(self, data, k):
# write code here
right = len(data) - 1
left = 0
while left <= right:
if data[left] == k and data[right] == k:
break
if data[left] < k:
left += 1
if data[right] > k:
right -= 1
if left == right:
if data[left] == k:
return 1
else:
return 0
return right - left + 1 |
import os
import datetime, pytz
import pandas as pd
import capnp
from scipy.optimize import curve_fit
from datetime import datetime, timedelta
from xbos import get_client
from xbos.services.mdal import *
from xbos.services.pundat import DataClient, make_dataframe
from xbos.services.hod import HodClient
from sklearn.utils import shuffle
from weather_model import predict_day as predict_weather
from dateutil import rrule
from datetime import datetime, timedelta
# data clients
mdal = MDALClient("xbos/mdal")
hod = HodClient("xbos/hod")
SITE = "ciee"
# Brick queries
zone_query = """SELECT ?zone FROM %s WHERE {
?zone rdf:type brick:HVAC_Zone .
};"""
thermostat_state_query = """SELECT ?tstat_state_uuid FROM %s WHERE {
?tstat rdf:type brick:Thermostat .
?tstat bf:controls/bf:feeds+ <%s> .
?tstat bf:hasPoint ?state .
?state rdf:type brick:Thermostat_Status .
?state bf:uuid ?tstat_state_uuid
};"""
thermostat_temp_query = """SELECT ?tstat_temp_uuid FROM %s WHERE {
?tstat rdf:type brick:Thermostat .
?tstat bf:controls/bf:feeds+ <%s> .
?tstat bf:hasPoint ?temp .
?temp rdf:type brick:Temperature_Sensor .
?temp bf:uuid ?tstat_temp_uuid .
};"""
weather_temp_query = """SELECT ?weather_temp_uuid FROM %s WHERE {
?temp rdf:type brick:Weather_Temperature_Sensor .
?temp bf:uuid ?weather_temp_uuid .
};"""
# if state is 1 we are doing heating
def f1(row):
if row['a'] > 0 and row['a']<=1:
return 1
return 0
# if state is 2 we are doing cooling
def f2(row):
if row['a']>1 and row['a']<=3:
return 1
return 0
# WE ARE NOT LEARNING VENTILATION RIGHT NOW
# $T^{IN}_{t+1}= c_1 * a^{H} * T^{IN}_{t} + c_2 * a^{C} * T^{IN}_{t} + c_3 * T^{IN}_{t}$
def func(X, c1, c2, c3, c4):
Tin, heat_a, cool_a, Tout = X
return c1 * heat_a * Tin + c2 * cool_a * Tin + c3 * Tin + c4 * (Tout-Tin)#+ c4 * (1-heat_a)*(1-cool_a)
def next_temperature(popt, Tin, Tout, action):
if action == 1:
return round(func([Tin, 1, 0, Tout], *popt) * 400) / 400.0
elif action == 2:
return round(func([Tin, 0, 1, Tout], *popt) * 400) / 400.0
return round(func([Tin, 0, 0, Tout], *popt) * 400) / 400.0
def execute_schedule(day, sched, popt, initial_temperature):
"""
sched is a list of (hsp, csp) setpoints at 30m intervals
"""
output = []
actions = []
prev_temp = initial_temperature
weather = predict_weather(day)
print len(sched), len(weather)
for idx, epoch in enumerate(sched):
if prev_temp < epoch[0]: # hsp
next_temp = next_temperature(popt, prev_temp, 1, weather[idx]) # 1 is heat
actions.append(1)
elif prev_temp > epoch[1]: # csp
next_temp = next_temperature(popt, prev_temp, 2, weather[idx]) # 2 is cool
actions.append(2)
else:
next_temp = next_temperature(popt, prev_temp, 0, weather[idx]) # 0 is off
actions.append(0)
print prev_temp, weather[idx], actions[-1], next_temp
output.append(next_temp)
prev_temp = next_temp
return output, actions
def get_model_per_zone(targetday = "2018-02-01 00:00:00 PST"):
zones = [x['?zone']['Namespace']+'#'+x['?zone']['Value'] for x in hod.do_query(zone_query % SITE, values_only=False)['Rows']]
#targetday = "2018-02-01 00:00:00 PST"
targetday = datetime.strptime(targetday, "%Y-%m-%d %H:%M:%S %Z")
targetday = pytz.timezone('US/Pacific').localize(targetday)
T0 = (targetday - timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S %Z")
T1 = (targetday - timedelta(days=30)).strftime("%Y-%m-%d %H:%M:%S %Z")
# TODO: configure the training to go back in time until it has 50 cooling *and* 50 heating actions
ret = {}
for zone in zones:
print thermostat_state_query % (SITE, zone)
tstat_data_query = {
"Composition": ["tstat_sensor", "tstat_state","weather"],
"Selectors": [MEAN, MAX, MEAN],
"Variables": [
{
"Name": "tstat_sensor",
"Definition": thermostat_temp_query % (SITE, zone),
"Units": "F",
},
{
"Name": "tstat_state",
"Definition": thermostat_state_query % (SITE, zone),
},
{
"Name": "weather",
"Definition": weather_temp_query % (SITE),
},
],
"Time": {
"T0": T0, "T1": T1,
"WindowSize": '30m',
"Aligned": True,
}
}
resp = mdal.do_query(tstat_data_query, timeout=120)
if resp.get('error'):
print resp['error']
continue
df = resp['df']
if len(df.columns) < 3:
continue
df = df[df.columns[:3]]
df.columns = ['tin','a','toutside'] # inside temperature, action, outside temperature
# column for heating
df['heat_a'] = df.apply(f1, axis=1)
# column for cooling
df['cool_a'] = df.apply(f2, axis=1)
# pad tempertures to fill holes
df['tin'] = df['tin'].replace(to_replace=0, method='pad')
df['toutside'] = df['toutside'].replace(to_replace=0, method='pad')
# shift inside temperature to get the next timestamp's temperature
df['temp_next'] = df['tin'].shift(-1)
df=df.dropna()
print df.describe()
thermal_data = shuffle(df)
popt, pcov = curve_fit(func, thermal_data[['tin','heat_a','cool_a','toutside']].T.as_matrix(), thermal_data['temp_next'].as_matrix())
print popt
ret[zone] = popt
return ret
# start at midnight
normal_schedule = [
# midnight - 8:00am
(50, 90),(50, 90), (50, 90),(50, 90), (50, 90),(50, 90), (50, 90),(50, 90), (50, 90),(50, 90), (50, 90),(50, 90), (50, 90),(50, 90), (50, 90),(50, 90),
# 8:00am - 4:00pm
(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),
# 4:00pm - 6:00pm
(70, 74),(70, 74),(70, 74),(70, 74),
# 6:00pm - 12:00am
(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90)
]
dr_schedule = [
# midnight - 8:00am
(50, 90),(50, 90), (50, 90),(50, 90), (50, 90),(50, 90), (50, 90),(50, 90), (50, 90),(50, 90), (50, 90),(50, 90), (50, 90),(50, 90), (50, 90),(50, 90),
# 8:00am - 12:00pm
(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),(70, 74),
# 12:00am - 3:00pm (precool)
(60, 64),(60, 64),(60, 64),(60, 64),(60, 64),(60, 64),
# 3:00pm - 6:00pm (dr event
(70, 85),(70, 85),(70, 85),(70, 85),(70, 85),(70, 85),
# 6:00pm - 12:00am
(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90),(50, 90)
]
if __name__ == "__main__":
models = get_model_per_zone("2017-10-06 00:00:00 PST") # don't use data after this argument
for zone, model in models.items():
print zone
temperatures, actions = execute_schedule("2017-10-06 00:00:00 PST", normal_schedule, model, 60) # 70 is starting temperature
print model
print "Temp:", temperatures
print "HVAC:", actions
|
import math
import numpy as np
from config import Config
from core.logger import TensorBoardLogger
from core.util import get_output_folder
class Trainer:
def __init__(self, agent, env, config: Config):
self.agent = agent
self.env = env
self.config = config
self.outputdir = get_output_folder(self.config.output, self.config.env)
self.agent.save_config(self.outputdir)
self.board_logger = TensorBoardLogger(self.outputdir)
print(self.env.action_space.low, self.env.action_space.high)
def train(self, pre_fr=0):
t = 0
all_rewards = []
tmp_reward = 0
episode_reward = 0
ep_num = 0
is_win = False
state = self.env.reset()
for fr in range(pre_fr + 1, self.config.frames + 1):
t += 1
# self.env.render()
action = self.agent.act(state)
action = action + np.random.normal(0, self.config.exploration_noise, size=self.env.action_space.shape[0])
action = action.clip(self.env.action_space.low, self.env.action_space.high)
next_state, reward, done, _ = self.env.step(action)
self.agent.buffer.add(state, action, reward, next_state, float(done))
state = next_state
episode_reward += reward
if fr % self.config.print_interval == 0:
print("frames: %5d, reward: %5f, episode: %4d" % (fr, np.mean(all_rewards[-10:]), ep_num))
if fr % self.config.log_interval == 0:
self.board_logger.scalar_summary('Reward per episode', ep_num, all_rewards[-1])
if self.config.checkpoint and fr % self.config.checkpoint_interval == 0:
self.agent.save_checkpoint(fr, self.outputdir)
if done or t == (self.config.max_timesteps):
self.agent.learning(fr, t)
t = 0
state = self.env.reset()
all_rewards.append(episode_reward)
episode_reward = 0
ep_num += 1
avg_reward = float(np.mean(all_rewards[-100:]))
self.board_logger.scalar_summary('Best 100-episodes average reward', ep_num, avg_reward)
if len(all_rewards) >= 100 and avg_reward >= self.config.win_reward and all_rewards[-1] > self.config.win_reward:
is_win = True
self.agent.save_model(self.outputdir, 'best')
print('Ran %d episodes best 100-episodes average reward is %3f. Solved after %d trials โ' % (ep_num, avg_reward, ep_num - 100))
if self.config.win_break:
break
elif len(all_rewards) >= 100 and avg_reward > tmp_reward:
tmp_reward = avg_reward
self.agent.save_model(self.outputdir, 'tmp')
print('Ran %d episodes tmp 100-episodes average reward is %3f. tmp Solved after %d trials' % (ep_num, avg_reward, ep_num - 100))
if not is_win:
print('Did not solve after %d episodes' % ep_num)
self.agent.save_model(self.outputdir, 'last')
|
from kafka import KafkaConsumer
# To consume latest messages and auto-commit offsets
consumer = KafkaConsumer('my-topic',
group_id='my-group',
bootstrap_servers=['localhost:9092'])
for message in consumer:
# message value and key are raw bytes -- decode if necessary!
# e.g., for unicode: `message.value.decode('utf-8')`
print("{}:{}:{}: key={} value={}".format(
message.topic, message.partition,
message.offset, message.key, message.value
))
##KafkaConsumer(auto_offset_reset='earliest', enable_auto_commit=False)
##
### consume json messages
##KafkaConsumer(value_deserializer=lambda m: json.loads(m.decode('ascii')))
##
### consume msgpack
##KafkaConsumer(value_deserializer=msgpack.unpackb)
|
class StatusBytes:
"""Enumerating of status bytes with corresponding event types
"""
meta = {0: 'Sequence Number', 1: 'Text', 2: 'Copyright', 3: 'Sequence / Track Name', 4: 'Instrument Name',
5: 'Lyric', 6: 'Marker', 7: 'Cue Point', 8: 'Program Name', 9: 'Device Name', 32: 'MIDI Channel Prefix',
33: 'MIDI Port', 47: 'End of Track', 81: 'Tempo', 84: 'SMPTE Offset', 88: 'Time Signature',
89: 'Key Signature', 127: 'Sequencer Specific Event'}
sysex = {240: 'Single (complete) SysEx messages', 247: 'Escape sequences'}
midi = {128: 'Note off', 144: 'Note on', 160: 'Key pressure', 176: 'Control change', 192: 'Program change',
208: 'Channel pressure', 224: 'Pitch wheel change'}
|
from django.shortcuts import render
from django.template.loader import get_template
def main(request):
return render(request, 'index.html', {})
|
#!/usr/bin/env python
# coding: utf-8
import json
from sklearn_crfsuite import CRF
import numpy as np
from scipy.stats import entropy
from nltk import word_tokenize, pos_tag
import random
import pickle
import os
from bs4 import BeautifulSoup
from bs4 import Tag
from collections import Counter
from flask import Flask
from flask import request
from jinja2 import Template
import pandas as pd
def get_sentences(text):
sents = text.split('\n')
sents = [s.strip() for s in sents if len(s.strip()) > 0]
return sents
def is_alpha_and_numeric(string):
toret = ''
if string.isdigit():
toret = 'DIGIT'
elif string.isalpha():
if string.isupper():
toret = 'ALPHA_UPPER'
elif string.islower():
toret = 'ALPHA_LOWER'
else:
toret = 'ALPHA'
elif len(string) > 0:
toks = [string[0], string[-1]]
alphanum = 0
for tok in toks:
if tok.isdigit():
alphanum += 1
elif tok.isalpha():
alphanum -= 1
if alphanum == 0:
toret = 'ALPHA_NUM'
else:
toret = 'EMPTY'
return toret
def word2features(sent, i):
"""
Calculate features for each word in the sentence
:param sent: List of words in the sentence
:param i: i'th word in the sentence
:return:
"""
word = sent[i][0]
postag = sent[i][1]
features = {
'bias': 1.0,
'word.lower()': word.lower(),
'word[-3:]': word[-3:],
'word[-2:]': word[-2:],
'word.isupper()': word.isupper(),
'word.istitle()': word.istitle(),
'word.isdigit()': word.isdigit(),
'word.is_alphanum': is_alpha_and_numeric(word),
'postag': postag,
}
if i > 0:
word = sent[i - 1][0]
postag = sent[i - 1][1]
features.update({
'-1:word.lower()': word.lower(),
'-1:word[-3:]': word[-3:],
'-1:word[-2:]': word[-2:],
'-1:word.istitle()': word.istitle(),
'-1:word.isupper()': word.isupper(),
'-1:postag': postag,
'-1:word.is_alphanum': is_alpha_and_numeric(word)
})
else:
features['BOS'] = True
if i > 1:
word = sent[i - 2][0]
postag = sent[i - 2][1]
features.update({
'-2:word.lower()': word.lower(),
'-2:word[-3:]': word[-3:],
'-2:word[-2:]': word[-2:],
'-2:word.istitle()': word.istitle(),
'-2:word.isupper()': word.isupper(),
'-2:postag': postag,
'-2:word.is_alphanum': is_alpha_and_numeric(word)
})
if i < len(sent) - 1:
word = sent[i + 1][0]
postag = sent[i + 1][1]
features.update({
'+1:word.lower()': word.lower(),
'+1:word[-3:]': word[-3:],
'+1:word[-2:]': word[-2:],
'+1:word.istitle()': word.istitle(),
'+1:word.isupper()': word.isupper(),
'+1:postag': postag,
'+1:word.is_alphanum': is_alpha_and_numeric(word)
})
else:
features['EOS'] = True
if i < len(sent) - 2:
word = sent[i + 2][0]
postag = sent[i + 2][1]
features.update({
'+2:word.lower()': word.lower(),
'+2:word[-3:]': word[-3:],
'+2:word[-2:]': word[-2:],
'+2:word.istitle()': word.istitle(),
'+2:word.isupper()': word.isupper(),
'+2:postag': postag,
'+2:word.is_alphanum': is_alpha_and_numeric(word)
})
return features
def sent2features(sent):
return [word2features(sent, i) for i in range(len(sent))]
def sent2labels(sent):
return [label for token, postag, label in sent]
def sent2tokens(sent):
return [token for token, postag, label in sent]
def add_prediction_to_postagged_data(postagged, prediction):
toret = []
for i in range(len(postagged)):
toret.append((postagged[i][0], postagged[i][1], prediction[i]))
return toret
def get_prediction_uncertainity(pred, mode='max'):
if len(pred) == 0:
return 0
un = []
for tok in pred:
probabilities = list(tok.values())
ent = entropy(probabilities)
un.append(ent)
if mode == 'max':
return max(un)
elif mode == 'mean':
return sum(un) / len(un)
def find_entities_in_text(text, model, utmapping):
text = get_pos_tagged_example(text)
features = sent2features(text)
prediction = model.predict_single(features)
lst = zip([t[0] for t in text], prediction)
curr_ent = 'O'
ent_toks = []
entities = []
for item in lst:
text = item[0]
tag = item[1]
if tag.startswith('B-'):
if len(ent_toks) > 0:
entities.append({
'value': ' '.join(ent_toks),
'entity': utmapping[curr_ent],
})
ent_toks = []
curr_ent = tag[2:]
ent_toks.append(text)
elif tag.startswith('I-'):
if curr_ent == 'O':
continue
ent_toks.append(text)
elif tag.startswith('L-'):
if curr_ent == 'O':
continue
ent_toks.append(text)
entities.append({
'value': ' '.join(ent_toks),
'entity': utmapping[curr_ent],
})
ent_toks = []
elif tag.startswith('U-'):
curr_ent = tag[2:]
ent_toks = []
entities.append({
'value': text,
'entity': utmapping[curr_ent],
})
elif tag.startswith('O'):
if len(ent_toks) > 0:
entities.append({
'value': ' '.join(ent_toks),
'entity': utmapping[curr_ent],
})
ent_toks = []
curr_ent = 'O'
if len(ent_toks) > 0:
entities.append({
'value': ' '.join(ent_toks),
'entity': utmapping[curr_ent],
})
return entities
class BaseNerTagger:
"""
A utility class for NER Tagging.
"""
def __init__(self, unlabelled, labelled=None, data_directory=''):
"""
Initialize with a list of unlabelled strings and/or list of tagged tuples.
:param unlabelled: list of strings
:param labelled: list of {list of tuples [(token, pos_tag, tag), ...]}
"""
if unlabelled is None:
unlabelled = []
else:
unlabelled = [
{'raw': get_pos_tagged_example(text)} for text in unlabelled]
if labelled is None:
labelled = []
self.dataset = []
for ex in unlabelled:
self.dataset.append({
'status': 'UNLABELLED',
'data': ex
})
for ex in labelled:
self.dataset.append({
'status': 'LABELLED',
'data': ex
})
self.model = None
self.data_directory = os.path.join(data_directory, 'NER_Data')
os.makedirs(self.data_directory, exist_ok=True)
def get_unlabelled_indices(self):
return [index for index, ex in enumerate(self.dataset) if ex['status'] == 'UNLABELLED']
def get_new_random_example(self):
"""
Returns a random example to be tagged. Used to bootstrap the model.
:return:
"""
unlabelled_set = self.get_unlabelled_indices()
current_example_index = random.choice(unlabelled_set)
current_example = self.dataset[current_example_index]['data']
toret = current_example['raw']
return {
'example_id': current_example_index,
'example': toret
}
def get_new_random_predicted_example(self):
"""
Returns a random example tagged by the currently tagged model.
:return:
"""
unlabelled_set = self.get_unlabelled_indices()
current_example_index = random.choice(unlabelled_set)
current_example = self.dataset[current_example_index]['data']
raw = current_example['raw']
features = sent2features(raw)
preds = self.model.predict_single(features)
toret = add_prediction_to_postagged_data(raw, preds)
return {
'example_id': current_example_index,
'example': toret
}
def query_new_example(self, mode='max'):
"""
Returns a new example based on the chosen active learning strategy.
:param mode: Active Learning Strategy
- max (Default)
- mean
:return:
"""
unlabelled_set = self.get_unlabelled_indices()
sample = random.choices(unlabelled_set, k=250)
X = []
for s in sample:
example = self.dataset[s]['data']
if 'features' not in example:
example['features'] = sent2features(example['raw'])
X.append(example['features'])
preds = self.model.predict_marginals(X)
uncertainities = [get_prediction_uncertainity(
pred, mode) for pred in preds]
index = np.argmax(uncertainities)
current_example_index = sample[index]
current_example = self.dataset[current_example_index]['data']
raw = current_example['raw']
features = current_example['features']
preds = self.model.predict_single(features)
toret = add_prediction_to_postagged_data(raw, preds)
return {
'example_id': current_example_index,
'example': toret
}
def update_model(self):
"""
Updates the model with the currently labelled dataset
:return:
"""
if self.model is None:
self.model = CRF(
algorithm='lbfgs',
c1=0.1,
c2=0.1,
max_iterations=100,
all_possible_transitions=True
)
labelled = [item['data']
for item in self.dataset if item['status'] == 'LABELLED']
X = [item['features'] for item in labelled]
Y = [sent2labels(item['raw']) for item in labelled]
self.model.fit(X, Y)
def save_example(self, example_id, data):
"""
Saves the current example with the user tagged data
:param data: User tagged data. [list of tags]
:return:
"""
current_example = self.dataset[example_id]['data']
if len(data) != len(current_example['raw']):
return False
else:
toret = []
for index in range(len(data)):
toret.append(
(current_example['raw'][index][0], current_example['raw'][index][1], data[index][1]))
example = current_example
example['raw'] = toret
example['features'] = sent2features(toret)
self.dataset[example_id]['status'] = 'LABELLED'
def save_data(self, filepath=None):
"""
Saves the labelled data to a file
:param filepath: file to save the data in a pickle format.
:return:
"""
if filepath is None:
filepath = os.path.join(
self.data_directory, 'ner_tagged_data.pickle')
with open(filepath, 'wb') as out:
pickle.dump(self.labelled, out)
def load_data(self, filepath=None):
"""
Loads labelled data from file.
:param filepath: file containing pickeled labelled dataset
:return:
"""
with open(filepath, 'rb') as inp:
self.labelled = pickle.load(inp)
for lab in self.labelled:
lab['features'] = sent2features(lab['raw'])
def add_unlabelled_examples(self, examples):
"""
Append more unlabelled data to dataset
:param examples: List of strings
:return:
"""
new_examples = [
{'raw': get_pos_tagged_example(text)} for text in examples]
self.unlabelled.extend(new_examples)
def is_a_tag(span):
if 'data-tag' in span.attrs:
return True
return False
def get_bilou_tags_from_html(html):
soup = BeautifulSoup(html, 'html.parser')
toret = []
tag_items = soup.find_all(['span', 'br'], attrs={'data-tag': True})
# return tag_items
tag_ids = [item.attrs['data-tag-id'] for item in tag_items]
counter = Counter(tag_ids)
items = soup.find_all(['span', 'br'])
max_items = len(items)
index = 0
while index < max_items:
item = items[index]
if is_a_tag(item):
tag_id = item.attrs['data-tag-id']
tag = item.attrs['data-tag']
size = counter[tag_id]
if size == 1:
toret.append((item.text, f'U-{tag}'))
index += 1
elif size == 2:
toret.append((item.text, f'B-{tag}'))
toret.append((items[index + 1].text, f'L-{tag}'))
index += 2
else:
toret.append((item.text, f'B-{tag}'))
for i in range(size - 2):
toret.append((items[index + i + 1].text, f'I-{tag}'))
toret.append((items[index + size - 1].text, f'L-{tag}'))
index += size
else:
toret.append((item.text, 'O'))
index += 1
return toret
def generate_html_from_example(ex):
spans = []
if type(ex) == type({}):
ex = ex['raw']
for item in ex:
if item[0] == '\n':
tag = Tag(name='br', can_be_empty_element=True)
else:
tag = Tag(name='span')
tag.insert(0, item[0])
spans.append(tag)
if len(ex[0]) == 3:
tagidcounter = 0
last_tag = ''
for i in range(len(ex)):
tag = ex[i][2]
if tag[0] in ['B', 'I']:
tag = tag[2:]
spans[i].attrs['data-tag-id'] = tagidcounter
spans[i].attrs['data-tag'] = tag
spans[i].attrs['class'] = tag
elif tag[0] in ['L', 'U']:
tag = tag[2:]
spans[i].attrs['data-tag-id'] = tagidcounter
spans[i].attrs['data-tag'] = tag
spans[i].attrs['class'] = tag
tagidcounter += 1
soup = BeautifulSoup()
soup.extend(spans)
return str(soup)
list_of_colors = "#e6194B, #3cb44b, #ffe119, #4363d8, #f58231, #911eb4, #42d4f4, #f032e6, #bfef45, #fabebe, #469990, #e6beff, #9A6324, #fffac8, #800000, #aaffc3, #808000, #ffd8b1, #000075, #a9a9a9"
list_of_colors = list_of_colors.split(', ')
def render_app_template(unique_tags_data):
"""
Tag data in the form
[
(tag_id, readable_tag_name)
]
:param unique_tags_data:
:return: html template to render
"""
if len(unique_tags_data) > len(list_of_colors):
return "Too many tags. Add more colors to list_of_colors"
trainer_path = os.path.join(os.path.dirname(
__file__), 'html_templates', 'ner_trainer.html.j2')
with open(trainer_path) as templ:
template = Template(templ.read())
css_classes = []
for index, item in enumerate(unique_tags_data):
css_classes.append((item[0], list_of_colors[index]))
return template.render(css_classes=css_classes, id_color_map=css_classes, tag_controls=unique_tags_data)
def get_app(ntagger, tags):
app = Flask(__name__)
@app.route("/")
def base_app():
return render_app_template(tags)
@app.route('/load_example')
def load_example():
if ntagger.model is None:
example = ntagger.get_new_random_example()
else:
example = ntagger.query_new_example(mode='max')
return {
'example_id': example['example_id'],
'example_html': generate_html_from_example(example['example'])
}
@app.route('/update_model')
def update_model():
ntagger.update_model()
return "Model Updated Successfully"
@app.route('/save_example', methods=['POST'])
def save_example():
form_data = request.form
html = form_data['html']
example_id = int(form_data['example_id'])
user_tags = get_bilou_tags_from_html(html)
ntagger.save_example(example_id, user_tags)
return 'Success'
@app.route('/save_data')
def save_tagged_data():
print("save_tagged_data")
ntagger.save_data()
return 'Data Saved'
return app
def get_pos_tagged_example(text):
sents = get_sentences(text)
tokens = []
for index, sent in enumerate(sents):
if index > 0 and index < len(sents) - 1:
tokens.append('\n')
tokens.extend(word_tokenize(sent))
# tokens = word_tokenize(text)
toret = pos_tag(tokens)
return toret
class NerTagger:
def __init__(self,
dataset,
unique_tags,
data_directory='',
multiuser=False
):
"""
need unique tag, tag tiles
EX:
tags= [
("CT", "Course Title"),
("CC", "Course Code"),
("PREQ", "Pre-requisites"),
("PROF", "Professor"),
("SE", "Season"),
("CR", "Credits")
]
:param unique_tags:
"""
self.unique_tags = unique_tags
self.ntagger = BaseNerTagger(dataset, data_directory=data_directory)
self.app = get_app(self.ntagger, self.unique_tags)
self.utmapping = {t[0]: t[1] for t in self.unique_tags}
def start_server(self, host=None, port=None):
"""
Start the ner tagging server
:param port: Port number to bind the server to.
:return:
"""
self.app.run(host, port)
def add_unlabelled_examples(self, examples):
"""
Append unlabelled examples to dataset
:param examples: list of strings
:return:
"""
self.ntagger.add_unlabelled_examples(examples)
def save_labelled_examples(self, filepath):
"""
Save labelled examples to file
:param filepath: destination filename
:return:
"""
self.ntagger.save_data(filepath)
def load_labelled_examples(self, filepath):
"""
Load labelled examples to the dataset
:param filepath: source filename
:return:
"""
self.ntagger.load_data(filepath)
def save_model(self, model_filename):
"""
Save ner model to file
:param model_filename: destination filename
:return:
"""
with open(model_filename, 'wb') as out:
pickle.dump(self.ntagger.model, out)
def load_model(self, model_filename):
"""
Load ner model from file
:param model_filename: source filename
:return:
"""
with open(model_filename, 'rb') as inp:
self.ntagger.model = pickle.load(inp)
def update_model(self):
"""
Updates the model
:return:
"""
self.ntagger.update_model()
def find_entities_in_text(self, text):
return find_entities_in_text(text, self.ntagger.model, self.utmapping)
if __name__ == '__main__':
# Unique Tags / Classes
tags = [
("CT", "Course Title"),
("CC", "Course Code"),
("PREQ", "Pre-requisites"),
("PROF", "Professor"),
("SE", "Season"),
("CR", "Credits")
]
|
import pandas as pd
import numpy as np
import re
from sklearn.preprocessing import MultiLabelBinarizer
our_features = ['budget', 'vote_count', 'runtime', 'Year', 'Friday', 'Monday', 'Saturday',
'Sunday', 'Thursday', 'Tuesday', 'Wednesday', '20th Century Fox',
'Columbia Pictures', 'Metro', 'New Line Cinema', 'Paramount',
'Universal Pictures', 'Warner Bros', 'Action', 'Adventure', 'Animation',
'Comedy', 'Crime', 'Documentary', 'Drama', 'Family', 'Fantasy', 'History',
'Horror', 'Music', 'Mystery', 'Romance', 'Science Fiction', 'Thriller',
'War', 'en', 'holiday_month', 'based on novel or book', 'murder', 'woman director']
def load_data(_data):
pd.options.mode.chained_assignment = None
_data.drop(columns=["id", "homepage", "spoken_languages", "tagline"], inplace=True)
_data['release_date'] = _data['release_date'].astype('datetime64[ns]')
_data['Month'] = _data.release_date.dt.month
_data['Year'] = _data.release_date.dt.year
_data['Weekday'] = _data.release_date.dt.day_name()
return _data
def filter_json(st):
if len(str(st)) > 10:
new_val = re.findall(r"(?<=name': ')[\w ]+", st)
if new_val:
return new_val
else:
return ""
return ""
def filter_json_directors(st):
if len(str(st)) > 10:
new_val = re.findall(r"(?<=name': ')[\w ]+(?=', 'department': 'Directing', 'job': 'Director')", st)
if new_val:
return new_val
else:
return ""
return ""
def filter_json_producer(st):
if len(str(st)) > 10:
new_val = re.findall(
r"(?<=name': ')[\w ]+(?=', 'department': 'Production', 'job': 'Producer')", st)
new_val2 = re.findall(
r"(?<=name': ')[\w ]+(?=', 'department': 'Production', 'job': 'Executive)", st)
lst = new_val + new_val2
if new_val:
return lst
else:
return ""
return ""
def reg_fixing(_data):
_data["genres"] = _data["genres"].apply(filter_json)
_data["production_companies"] = _data["production_companies"].apply(filter_json)
_data["keywords"] = _data["keywords"].apply(filter_json)
_data["production_countries"] = _data["production_countries"].apply(filter_json)
_data["cast"] = _data["cast"].apply(filter_json)
_data["directors"] = _data["crew"].apply(filter_json_directors)
_data["producers"] = _data["crew"].apply(filter_json_producer)
return _data
def turn_to_dummies(_data):
mlb = MultiLabelBinarizer()
genres = pd.DataFrame(mlb.fit_transform(_data["genres"]), columns=mlb.classes_, index=_data.index)
production_companies = pd.DataFrame(mlb.fit_transform(_data["production_companies"]), columns=mlb.classes_,
index=_data.index)
keywords = pd.DataFrame(mlb.fit_transform(_data["keywords"]), columns=mlb.classes_, index=_data.index)
lang = pd.get_dummies(_data["original_language"])
month = pd.get_dummies(_data["Month"])
holiday_month = pd.DataFrame()
holiday_month["holiday_month"] = np.zeros(len(_data))
for i in range(1, 13):
if i in (5, 6, 7, 11, 12) and str(i) in month.columns:
holiday_month["holiday_month"] += month[i]
Weekday = pd.get_dummies(_data["Weekday"])
ready_data = pd.concat([Weekday, production_companies, genres, lang, holiday_month, keywords], axis=1)
return ready_data
def run_process(_data):
_data = load_data(_data)
_data = reg_fixing(_data)
dummy = turn_to_dummies(_data)
clean_data = _data[["budget", "vote_count", "runtime", "Year"]]
ready_data = pd.concat([clean_data, dummy], axis=1)
ready_data = ready_data.dropna()
missing_cols = sorted(set(our_features) - set(ready_data.columns))
empty_df = pd.DataFrame(np.zeros((len(ready_data), len(missing_cols))), columns=missing_cols)
ready_data = pd.concat([ready_data, empty_df], axis=1)
ready_data = ready_data.reindex(sorted(ready_data.columns), axis=1)
return ready_data
|
from flask import Flask, session, g, render_template
from flask_sqlalchemy import SQLAlchemy
from flask_wtf.csrf import CSRFProtect,generate_csrf
from redis import StrictRedis
from flask_session import Session
from config import config_dict
import logging
from logging.handlers import RotatingFileHandler
from info.utils.common import do_index_class, get_user_data
# ๅชๆฏ็ณๆไบdbๅฏน่ฑก่ๅทฒ๏ผๅนถๆฒกๆๅ็ๅฎ็ๆฐๆฎๅบๅๅงๅๆไฝ
db = SQLAlchemy()
# ๅฐredisๆฐๆฎๅบๅฏน่ฑก็ณๆๆๅ
จๅฑๅ้
# # type:StrictRedis ๆๅ็ณๆredis_storeๆฐๆฎ็ฑปๅ
redis_store = None # type:StrictRedis
def write_log(config_class):
"""ๆฅๅฟไฝฟ็จ"""
# ่ฎพ็ฝฎๆฅๅฟ็่ฎฐๅฝ็ญ็บง
logging.basicConfig(level=config_class.LOG_LEVEL) # ่ฐ่ฏdebug็บง
# ๅๅปบๆฅๅฟ่ฎฐๅฝๅจ๏ผๆๆๆฅๅฟไฟๅญ็่ทฏๅพใๆฏไธชๆฅๅฟๆไปถ็ๆๅคงๅคงๅฐ:100Mใไฟๅญ็ๆฅๅฟๆไปถไธชๆฐไธ้
# backCount=10 ๆฏๆlogๆไปถๅญๅจไธ่ถณ,ๆๅคๅคๅถ10ไธชๆไปถ
file_log_handler = RotatingFileHandler("./logs/log", maxBytes=1024 * 1024 * 100, backupCount=10)
# ๅๅปบๆฅๅฟ่ฎฐๅฝ็ๆ ผๅผ ๆฅๅฟ็ญ็บง ่พๅ
ฅๆฅๅฟไฟกๆฏ็ๆไปถๅ ่กๆฐ ๆฅๅฟไฟกๆฏ
formatter = logging.Formatter('%(levelname)s %(filename)s:%(lineno)d %(message)s')
# ไธบๅๅๅปบ็ๆฅๅฟ่ฎฐๅฝๅจ่ฎพ็ฝฎๆฅๅฟ่ฎฐๅฝๆ ผๅผ
file_log_handler.setFormatter(formatter)
# ไธบๅ
จๅฑ็ๆฅๅฟๅทฅๅ
ทๅฏน่ฑก๏ผflask appไฝฟ็จ็๏ผๆทปๅ ๆฅๅฟ่ฎฐๅฝๅจ
logging.getLogger().addHandler(file_log_handler)
# ๅฐappๅฐ่ฃ
่ตทๆฅ๏ผ็ปๅค็่ฐ็จๆไพไธไธชๅๅฃ
# development --- ่ฟๅ็ๆฏๅผๅๆจกๅผ็appๅฏน่ฑก
# production --- ่ฟๅ็ๆฏ็บฟไธๆจกๅผ็appๅฏน่ฑก
def creat_app(config_name):
"""
ๅฐไธapp็ธๅ
ณ่็้
็ฝฎๅฐ่ฃ
ๅฐ`ๅทฅๅๆนๆณ`ไธญ
:return: appๅฏน่ฑก
"""
# 1.ๅๅปบappๅฏน่ฑก
app = Flask(__name__)
# ๆ นๆฎdevelopmentๅฅ่ทๅๅฏนๅบ็้
็ฝฎ็ฑปๅ
config_class = config_dict[config_name]
# DevelopmentConfig ---> ๅผๅๆจกๅผ็appๅฏน่ฑก
# ProductionConfig ---> ็บฟไธๆจกๅผ็appๅฏน่ฑก
app.config.from_object(config_class)
# ่ฎฐๅฝๆฅๅฟ
write_log(config_class)
# 2.ๅๅปบmysqlๆฐๆฎๅบๅฏน่ฑก
# db = SQLAlchemy(app)
# ๅปถ่ฟๅ ่ฝฝ๏ผๆๅ ่ฝฝๆๆณ๏ผๅฝappๆๅผ็ๆถๅๆ่ฟ่ก็ๆญฃ็ๅๅงๅๆไฝ
db.init_app(app)
# 3.ๅๅปบredisๆฐๆฎๅบๅฏน่ฑก
global redis_store
redis_store = StrictRedis(host=config_class.REDIS_HOST, port=config_class.REDIS_PORT, decode_responses=True)
"""
redis_store.set("age", 18) ---->ๅญๅจๅฐredis ---0ๅทๆฐๆฎๅบ
session["name"] = "laowang" ---->ๅญๅจๅฐredis ---1ๅทๆฐๆฎๅบ
"""
# 4.ๅผๅฏๅ็ซฏ็CSRFไฟๆคๆบๅถ
"""
# ๅบๅฑ๏ผ
# 1.ๆๅcookieไธญ็csrf_token็ๅผ
# 2.ๆๅ่กจๅไธญ็csrf_token็ๅผ๏ผๆ่
ajax่ฏทๆฑ็ๅคดไธญ็X-CSRFToken้ฎๅฏนๅบ็ๅผ
# 3.ๅฏนๆฏ่ฟไธคไธชๅผๆฏๅฆ็ธ็ญ
"""
# CSRFProtect(app)
#
# @app.after_request
def set_csrf_token(response):
csrf_token = generate_csrf()
response.set_cookie =("crsk_token",csrf_token)
return response
@app.errorhandler(404)
@get_user_data
def handler_404(err):
user=g.user
data={
"user_info":user.to_dict() if user else None
}
return render_template('news/404.html',data=data)
# ๆทปๅ ่ชๅฎไน่ฟๆปคๅจ
app.add_template_filter(do_index_class,"do_index_class")
# 5.ๅๅฉSession่ฐๆดflask.session็ๅญๅจไฝ็ฝฎๅฐredisไธญๅญๅจ
Session(app)
# 6.ๆณจๅ้ฆ้กต่ๅพ
# ๅฐ่ๅพ็ๅฏผๅ
ฅๅปถ่ฟๅฐๅทฅๅๆนๆณไธญ๏ผ็ๆญฃ้่ฆๆณจๅ่ๅพ็ๆถๅๅๅฏผๅ
ฅ๏ผ่ฝๅค่งฃๅณๅพช็ฏๅฏผๅ
ฅ็ๆไปถ
# ๆณจๅ้ฆ้กต่ๅพ
from info.modules.index import index_bp
app.register_blueprint(index_bp)
# ็ปๅฝๆณจๅๆจกๅ็่ๅพ
from info.modules.passport import passport_bp
app.register_blueprint(passport_bp)
# ๆฐ้ปๆจกๅ็่ๅพ
from info.modules.news import news_bp
app.register_blueprint(news_bp)
# ไธชไบบไธญๅฟๆจกๅ็่ๅพ
from info.modules.profile import profile_bp
app.register_blueprint(profile_bp)
return app
|
"""
Think Python
Exercise 9.2
Sol by : Nitin Kumar
Date 14 June 2009
Note : In 1939 Ernest Vincent Wright published a 50,000 word novel called Gadsby that does
not contain the letter โe.โ Since โeโ is the most common letter in English, thatโs not easy to do.
In fact, it is difficult to construct a solitary thought without using that most common symbol. It is
slow going at first, but with caution and hours of training you can gradually gain facility.
All right, Iโll stop now.
Write a function called has_no_e that returns True if the given word doesnโt have the letter โeโ in
it.
Modify your program from the previous section to print only the words that have no โeโ and compute
the percentage of the words in the list have no โe.โ
"""
def has_no_e(word):
if 'e' in word:
return False
else:
return True
Twords = 0
Pwords = 0
fin = open('words.txt')
for line in fin:
word = line.strip()
Twords = Twords + 1
if has_no_e(word):
print word
Pwords = Pwords + 1
Twords = Twords * 1.0
per = (Pwords/Twords) * 100
print "Total Words = " , str(Twords)
print "Printed Words = " , str(Pwords)
print "Percentage = " , str(per)
|
import sys, os
# path = os.path.abspath(os.path.join('..'))
# sys.path.append(path)
from feature import *
from lr import *
from tqdm import tqdm
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
train_in = "./handout/largedata/train_data.tsv"
val_in = "./handout/largedata/valid_data.tsv"
test_in = "./handout/largedata/test_data.tsv"
formatted_train_out = "formatted_train_out.tsv"
formatted_val_out = "formatted_val_out.tsv"
formatted_test_out = "formatted_test_out.tsv"
dict_in = "./handout/dict.txt"
# extract_features1(train_in, formatted_train_out, dict_in)
# extract_features1(val_in, formatted_val_out, dict_in)
# extract_features1(test_in, formatted_test_out, dict_in)
formatted_train_in = "./formatted_train_out.tsv"
formatted_val_in = "./formatted_val_out.tsv"
formatted_test_in = "./formatted_test_out.tsv"
num_epoch = 5000
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sparse_dot(x1, x2):
x1 = np.squeeze(x1)
x2 = np.squeeze(x2)
prod = 0
for x1i, x2i in zip(x1, x2):
if x1i == 0 or x2i == 0:
continue
prod += x1i * x2i
return prod
def SGD(X, Y, theta, N):
temp = -Y + sigmoid(sparse_dot(X, theta))
# temp = np.expand_dims(temp, axis=1)
dJ = temp * X / N
# dJ = np.mean(dJs, axis=0)
return dJ
def train_plot(alpha):
data_train = read(formatted_train_in)
Y, X = data_train[:, 0], data_train[:, 1:]
# X = csr_matrix(X)
num_data = X.shape[0]
theta = np.zeros(X.shape[1])
data_val = read(formatted_val_in)
Y_val, X_val = data_val[:, 0], data_val[:, 1:]
Js_train = []
Js_val =[]
for ep in tqdm(range(num_epoch)):
# print(ep, calcJ(X, Y, theta))
for i in range(num_data):
dJ = SGD(X[i], Y[i], theta, num_data)
theta = np.add(theta, - np.dot(alpha, dJ))
# for y, x in zip(Y, X):
# dJ = SGD(x, y, theta, num_data)
# theta = theta - alpha * dJ
Js_train.append(calcJ(X, Y, theta))
Js_val.append(calcJ(X_val, Y_val, theta))
train_pred_labels = predict(X, theta)
return theta, train_pred_labels, Js_train, Js_val
alpha = 0.01
theta1_001, train_pred_labels1_001, Js_train1_001, Js_val1_001 = train_plot(alpha) |
#!/usr/bin/env python
import simplejson
import unittest
from table import ColumnQuery, JunctionQuery, MultiJunctionQuery, NotQuery
class JsonParser:
JUNCTION_OP_MAP = {
'and': JunctionQuery.OP_AND,
'or': JunctionQuery.OP_OR
}
COLUMN_OP_MAP = {
'=': ColumnQuery.TYPE_EQ,
'!=': ColumnQuery.TYPE_NEQ,
'>=': ColumnQuery.TYPE_GTE,
'<=': ColumnQuery.TYPE_LTE
}
def parse(self, string):
obj = simplejson.loads(string)
return self.visit(obj)
def visit_junction(self, op, args):
subqs = [self.visit(a) for a in args]
return MultiJunctionQuery(subqs, self.JUNCTION_OP_MAP[op.lower()])
def visit_not(self, arg):
return NotQuery(self.visit(arg))
def visit_comparison(self, op, column, value):
return ColumnQuery(column, value, self.COLUMN_OP_MAP[op])
def visit_in(self, column, vals):
queries = [ColumnQuery(column, val) for val in vals]
return MultiJunctionQuery(queries, JunctionQuery.OP_OR)
def visit(self, obj):
if len(obj) >= 3 and self.JUNCTION_OP_MAP.has_key(obj[0]):
return self.visit_junction(obj[0], obj[1:])
elif len(obj) == 3 and self.COLUMN_OP_MAP.has_key(obj[0]):
return self.visit_comparison(*obj)
elif len(obj) == 3 and obj[0] == 'in':
return self.visit_in(obj[1], obj[2])
elif len(obj) == 2 and obj[0] == 'not':
return self.visit_not(obj[1])
else:
raise Exception("bad parse tree element: " + repr(obj))
class JsonParserTestCase(unittest.TestCase):
def setUp(self):
self.parser = JsonParser()
def check(self, json, out):
self.assertEqual(str(self.parser.parse(json)), out)
def testEqual(self):
self.check('["=", "a", 2]', "(a = 2)")
def testAnd(self):
self.check('["and",["=", "a", 2],["!=", "b", "c"]]',
'((a = 2) AND (b != c))')
def testIn(self):
self.check('["in", "a", [1,2,3]]',
"(((a = 1) OR (a = 2)) OR (a = 3))")
def testNot(self):
self.check('["not", ["<=", "a", 2]]',
"(NOT (a <= 2))")
if __name__ == '__main__':
unittest.main(defaultTest = "JsonParserTestCase")
|
## change these paths accordingly
background_image_fp = 'background.jpg'
sprite_image_fp = 'ornament.png'
import pygame
from pygame.locals import *
from sys import exit
import time
pygame.init()
screen = pygame.display.set_mode((200, 200), HWSURFACE | DOUBLEBUF, 32)
pygame.display.set_caption("Horizontal Spraight Movement")
## convert the image to the display's format
background_img = pygame.image.load(background_image_fp).convert()
sprite_img = pygame.image.load(sprite_image_fp)
sprite_x = 0
x_delta = 10
sleep_time = 0.5
## move sprite_img x_delta every sleep_time seconds
my_event = pygame.event.Event(KEYDOWN, key=K_SPACE, mod=0, unicode=u' ')
while True:
pygame.event.post(my_event)
for event in pygame.event.get():
if event.type == QUIT:
del background_img
del sprite_img
pygame.display.quit()
exit(0)
screen.blit(background_img, (0, 0))
screen.blit(sprite_img, (sprite_x, 80))
sprite_x += x_delta
if sprite_x > 200.0: sprite_x -= 210.0
pygame.display.update()
time.sleep(sleep_time)
|
import cv2
import os
import numpy as np
import pointImg
import skeletonization
class data_generation():
def __init__(self, raw_img):
self.raw_img = raw_img
def get_data(self, tag):
sk = skeletonization.skeletonization(self.raw_img)
skeleton_img = sk.get_skeleton().astype(np.uint8)*255
pI = pointImg.pointImg(skeleton_img)
_, all_centroids = pI.ske2point(30)
all_centroids = pI.optimization(self.raw_img, all_centroids)
for i in range(len(all_centroids)):
bbox_size =50
x_min, x_max, y_min, y_max = max(0, all_centroids[i][0] - bbox_size), \
min(self.raw_img.shape[0], all_centroids[i][0] + bbox_size), \
max(0, all_centroids[i][1] - bbox_size), \
min(self.raw_img.shape[1], all_centroids[i][1] + bbox_size)
bbox_poly = self.raw_img[x_min:x_max, y_min:y_max].astype(np.uint8)
cv2.imwrite(os.path.join("data/f/"+format(str(i+tag), "0>4s")+".png"), bbox_poly)
return len(all_centroids)
def make_data(self, data_path):
img_data = torchvision.datasets.ImageFolder(data_path,
transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()]))
print(len(img_data))
data_loader = torch.utils.data.DataLoader(img_data, batch_size=20,shuffle=True)
if __name__ == "__main__":
'''folder_path = "data/raw_data/"
folder_dir = os.listdir(folder_path)
tag = 0
for file in folder_dir:
img_path = os.path.join(folder_path,file)
raw_img = cv2.imread(img_path)
dg = data_generation(raw_img)
num_img = dg.get_data(tag)
tag += num_img'''
raw_img = None
dg = data_generation(raw_img)
dg.make_data("data/nn_data/")
|
"""
"""
import math
"""
"""
def box_area( box=None ):
return ( box[2] - box[0] ) * ( box[3] - box[1] )
"""
"""
def box_overlap_area( box1, box2 ):
x_overlap, y_overlap = (
max( 0, min( box1[2], box2[2] ) - max( box1[0], box2[0] ) ),
max( 0, min( box1[3], box2[3] ) - max( box1[1], box2[1] ) )
)
return x_overlap * y_overlap
"""
"""
def get_min_max( boxes=None, source_scale=None ):
width, height = source_scale
if len( boxes ) == 0:
coords = [ 0, 0, width, height ]
else:
coords = [ width, height, 0, 0 ]
for box in boxes:
coords = [
min( coords[0], box[0] ),
min( coords[1], box[1] ),
max( coords[2], box[2] ),
max( coords[3], box[3] )
]
return coords
"""
"""
def get_weighted_mean( boxes=None, source_scale=None ):
width, height = source_scale
if len( boxes ) == 0:
coords = [ 0, 0, width, height ]
else:
coords = [ 0, 0, 0, 0 ]
for box in boxes:
coords = [
coords[0] + box[0],
coords[1] + box[1],
coords[2] + box[2],
coords[3] + box[3]
]
size = len( boxes )
coords = [
int( coords[0] / size ),
int( coords[1] / size ),
int( coords[2] / size ),
int( coords[3] / size )
]
return coords
"""
"""
def get_box_centre( box ):
return (
int( box[0] + ( float( box[2] - box[0] ) / 2 ) ),
int( box[1] + ( float( box[3] - box[1] ) / 2 ) )
)
"""
"""
def distance( a, b ):
return math.sqrt( ( a[0] - b[0] )**2 + ( a[1] - b[1] )**2 )
"""
"""
def box_move( box=None, displacement=None, subtract=False, max_size=None ):
x_shift, y_shift = (
displacement[0],
displacement[1]
)
if subtract:
x_shift = -1 * x_shift
y_shift = -1 * y_shift
box[0] += x_shift
box[1] += y_shift
box[2] += x_shift
box[3] += y_shift
if max_size is not None:
box_constrain( box=box, max_size=max_size )
def boxes_move( boxes=None, displacement=None, subtract=False, max_size=None ):
for box in boxes:
box_move(
box = box,
displacement = displacement,
subtract = subtract,
max_size = max_size
)
"""
"""
def box_centre_move( box=None, centre=None, step=1, max_size=None ):
cc = get_box_centre( box )
x_shift, y_shift = (
int( float( centre[0] - cc[0] ) / step ),
int( float( centre[1] - cc[1] ) / step )
)
box[0] += x_shift
box[1] += y_shift
box[2] += x_shift
box[3] += y_shift
if max_size is not None:
box_constrain( box=box, max_size=max_size )
"""
"""
def box_constrain( box=None, max_size=None ):
cx1, cy1, cx2, cy2 = box
width, height = max_size
if cx1 < 0:
cx2 = min( cx2 - cx1, width )
cx1 = 0
if cx2 > width:
cx1 = max( cx1 - ( cx2 - width ), 0 )
cx2 = width
if cy1 < 0:
cy2 = min( cy2 - cy1, height )
cy1 = 0
if cy2 > height:
cy1 = max( cy1 - ( cy2 - height ), 0 )
cy2 = height
box[0], box[1], box[2], box[3] = int(cx1), int(cy1), int(cx2), int(cy2)
|
class Film:
bubble_sort_comparison_counter = 0
bubble_sort_swap_counter = 0
heap_sort_comparison_counter = 0
heap_sort_swap_counter = 0
def __init__(self, name, runtime_in_min, num_of_responses):
self.name = name
self.runtime_in_min = runtime_in_min
self.num_of_responses = num_of_responses
def __str__(self):
return ' '.join(['{key}={value}'.format(key=key, value=self.__dict__.get(key)) for key in self.__dict__]) + '\n'
# return f"Film: [name: {self.name}, duration_in_min: {self.duration_in_min}, num_of_responses: {self.num_of_responses}"
@staticmethod
def bubble_sort_by_num_of_responses_descending(objects):
n = len(objects)
for i in range(n - 1):
for j in range(0, n - i - 1):
if objects[j].num_of_responses < objects[j + 1].num_of_responses:
Film.bubble_sort_comparison_counter += 1
objects[j], objects[j + 1] = objects[j + 1], objects[j]
Film.bubble_sort_swap_counter += 2
print([an_object.num_of_responses for an_object in objects])
return objects
@staticmethod
def heap_sort_runtime_in_min_acs(objects):
n = len(objects)
def compare_element_from_heapsort(first_element, second_element):
Film.heap_sort_comparison_counter += 1
if first_element < second_element:
return True
else:
return False
def heapify(objects, n, i):
largest = i
l = 2 * i
r = 2 * i + 1
if l < n and compare_element_from_heapsort(objects[i].num_of_responses, objects[l].num_of_responses):
largest = l
if r < n and compare_element_from_heapsort(objects[largest].num_of_responses, objects[r].num_of_responses):
largest = r
if largest != i:
objects[i], objects[largest] = objects[largest], objects[i] # swap
Film.heap_sort_swap_counter += 1
heapify(objects, n, largest)
for i in range(n, -1, -1):
heapify(objects, n, i)
for i in range(n - 1, 0, -1):
objects[i], objects[0] = objects[0], objects[i] # swap
Film.heap_sort_swap_counter += 1
heapify(objects, i, 0)
return objects
|
# Generated by Django 2.0 on 2017-12-25 20:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Branch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('branch_ID', models.CharField(max_length=16)),
('location', models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qty', models.IntegerField(verbose_name='Quantity Sold')),
],
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, verbose_name='Employee Name')),
('employee_ID', models.CharField(max_length=16)),
('dob', models.DateTimeField(verbose_name='Date Of Birth')),
('doj', models.DateTimeField(verbose_name='Date Of Joining')),
('designation', models.CharField(choices=[('SE', 'Sales Executive'), ('MGR', 'Manager')], max_length=3)),
('basic_salary', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Inventory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qty', models.IntegerField(verbose_name='Quantity Left')),
('branch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='erp.Branch')),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_name', models.CharField(max_length=64, verbose_name='Item Name')),
('item_ID', models.CharField(max_length=16)),
('sale_price', models.FloatField(default=0.0, verbose_name='Item Sale Price')),
('item_cost', models.FloatField(default=0.0, verbose_name='Item Cost')),
('manufacturer', models.CharField(max_length=64, verbose_name='Manufacturing Company name')),
('mfd_date', models.DateTimeField(verbose_name='Manufacturing Date')),
('exp_date', models.DateTimeField(verbose_name='Expiry Date')),
('points', models.FloatField(default=0.0)),
],
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('transaction_ID', models.CharField(max_length=16)),
('date', models.DateField(verbose_name='Transaction Date')),
('branch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='erp.Branch')),
('sales_executive', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='erp.Employee')),
],
),
migrations.AddField(
model_name='inventory',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='erp.Item'),
),
migrations.AddField(
model_name='cart',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='erp.Item'),
),
migrations.AddField(
model_name='cart',
name='transaction',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='erp.Transaction'),
),
migrations.AddField(
model_name='branch',
name='branch_manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='erp.Employee'),
),
]
|
#!/usr/bin/env python
__version__ = "20190114"
__author__ = "Decaff_42"
__copyright__ = "2019 by Decaff_42"
__license__ = """Only non-comercial use with attribution is allowed without
prior written permission from Decaff_42."""
import os
import re
def get_dats(ysfpath, airplanes):
"""Get the DAT contents for each of the aircraft."""
print("Looking for DAT files for all aircraft")
# Get the unique identify names
aircraft_idents = list()
for plane in airplanes:
name = plane.aircraft_name
name = name.replace(" ", "_")
name = name.strip("\n")
if name not in aircraft_idents:
aircraft_idents.append(name)
temp_aircraft_idents = aircraft_idents
# Get all .dat files
files_to_test = list()
ignore_dir_list = ["ground", "misc", "mission", "scenery", "sound", "weapons"]
for root, dirs, files in os.walk(ysfpath):
if root in ignore_dir_list:
break
for file in files:
if file.lower().endswith(".dat"):
files_to_test.append(os.path.join(root,file))
# Get DAT Files for these unique IDENTIFY lines.
aircraft_dats = dict()
identify_lines = list()
for file in files_to_test:
if len(temp_aircraft_idents) == 0:
# All files have been found
break
with open(file, mode='r', errors='ignore') as f:
dat_lines = f.readlines()
for line in dat_lines:
if line.startswith("IDENTIFY"):
identify_lines.append(line)
# Search this line for all idents.
name = line.strip("\n").upper().replace(" ", "_")
for ind, ident in enumerate(temp_aircraft_idents):
if ident in name:
# Found the file!
aircraft_dats[ident] = dat_lines
print(" - Found DAT File for {}".format(ident))
del temp_aircraft_idents[ind] # Don't re-find identify line
break
else:
continue
break
if len(temp_aircraft_idents) > 0:
print("Could not find DAT File(s) for these aircraft:")
for i in temp_aircraft_idents:
print(" {}".format(i))
print(" ")
else:
print("Found DAT files for all aircraft!.")
return aircraft_dats
|
# -*- coding: utf-8 -*-
import glob, sys, os, re, string, time, random, dateparser
from dateparser.search import search_dates
#!/usr/sfw/bin/python
"""
dateparser_export_to_BIEO_format, a script to call dateparser on
sentences in French and export the result to the BIEO format
(Beginning / Inside / End / Outside).
Copyright (C) 2021 Philippe Gambette
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
# get the current folder
folder = os.path.abspath(os.path.dirname(sys.argv[0]))
# for all csv files in the corpus folder
for file in glob.glob(os.path.join(os.path.join(folder, "corpus"), "*.csv")):
# open the input file, which should be a csv file containing:
# * sentences starting with #
# * followed by the list of tokens number, each followed,
# on the same line, by a tabulation and by the token itself
input = open(file, "r", encoding="utf-8")
# save the result in this output file
output = open(file+".results.tsv", "w", encoding="utf-8")
beginLabels = []
linesToWrite = []
savedLines = []
for line in input:
# Look for lines with a tabulation separator
res = re.search("^(.*)\t(.*)$", line)
if res:
# part1 should contain either a # followed by a sentence or a token id
part1 = res.group(1)
# part2 should be empty or contain a token
part2 = res.group(2)
# Rem
res2 = re.search("^([^\r\n]*[\r\n]*).*$", part2)
if res2:
part2 = res2.group(1)
if len(part1)>0 and len(part2)==0:
# Found a sentence: look for dates inside
print("Parsed sentence: " + part1)
# Looking for dates with the dateparser search_dates function
foundDates = search_dates(part1, languages=['fr'])
if str(type(foundDates)) != "<class 'NoneType'>" and len(foundDates)>0:
if len(foundDates)>1:
print(" โ dateparser found "+str(len(foundDates))+" dates: "+str(foundDates))
else:
print(" โ dateparser found a date: "+str(foundDates))
# start matching tokens with the date found by dateparser
# the matched tokens and their label will be added to the list linesToWrite
beginLabels = []
linesToWrite = []
# beginLabels will store the beginnings of dates found in the text when we progressively read tokens
for d in foundDates:
beginLabels.append("")
linesToWrite.append([])
else:
# no date found in the sentence
beginLabels = []
linesToWrite = []
output.writelines(line)
if len(part1)>0 and len(part2)>0:
# Assume that the new token is not part of a date
savedLines.append(part1+"\t"+part2+"\tO\n")
for i in range(0,len(beginLabels)):
foundBeginning = False
if beginLabels[i] + part2 == foundDates[i][0][0:len(beginLabels[i]+part2)]:
# The token, not preceded by a whitespace, is part of a date
if beginLabels[i] == "":
#The token is the start of a date
linesToWrite[i].append(part1+"\t"+part2+"\tBT\n")
if part2 == foundDates[i][0]:
#It is also an end of a date: write it directly with its label BT
output.writelines(linesToWrite[i][0])
linesToWrite[i] = []
# Reinitialize savedLines as the new token was actually part of a date
savedLines = []
beginLabels[i] += part2
else:
# The token belongs to a date
beginLabels[i] += part2
#print("Matched a following!")
if beginLabels[i] == foundDates[i][0]:
# If the token is the end of a date, add it with label ET to the lines to write,
# then write all the lines in linesToWrite to the output file
linesToWrite[i].append(part1+"\t"+part2+"\tET\n")
savedLines = []
for l in linesToWrite[i]:
output.writelines(l)
beginLabels[i] = ""
linesToWrite[i] = []
# Reinitialize savedLines as the new token was actually part of a date
savedLines = []
else:
# If the token is inside a date, add it with label IT to the lines to write
linesToWrite[i].append(part1+"\t"+part2+"\tIT\n")
foundBeginning = True
if beginLabels[i] + " " + part2 == foundDates[i][0][0:len(beginLabels[i]+" "+part2)]:
# The token, preceded by a whitespace, is part of a date
beginLabels[i] += " " + part2
# The token belongs to a date
if beginLabels[i] == foundDates[i][0]:
# If the token is the end of a date, add it with label ET to the lines to write,
# then write all the lines in linesToWrite to the output file
linesToWrite[i].append(part1+"\t"+part2+"\tET\n")
savedLines = []
for l in linesToWrite[i]:
output.writelines(l)
beginLabels[i] = ""
linesToWrite[i] = []
# Reinitialize savedLines as the new token was actually part of a date
savedLines = []
else:
# If the token is inside a date, add it with label IT to the lines to write
linesToWrite[i].append(part1+"\t"+part2+"\tIT\n")
foundBeginning = True
# The current token does not correspond to this beginning of a date
if not(foundBeginning):
beginLabels[i] = ""
linesToWrite[i] = []
# Check if we are still matching a date,
# that is if beginLabels still contain the beginning of a date
# matching a sequence made of the latest seen tokens
stillMatchingADate = False
for i in range(0,len(beginLabels)):
if len(beginLabels[i])>0:
stillMatchingADate = True
# If we are not currently matching a date, write the latest token to the output file with label O
if not(stillMatchingADate):
for l in savedLines:
output.writelines(l)
savedLines = []
if len(part1)==0 and len(part2)==0:
# The line is empty, we reproduce it directly
output.writelines(line)
input.close()
output.close() |
import csv
import requests
with open('Test.csv') as filestream:
for line in filestream:
currentline = line.split(",")
print(currentline[0])
url="https://test.com/test/test/{0}".format(currentline[0])
print(url)
print(requests.delete(url))
|
def count_holes(n):
c = 0
nums = ['0', '4', '6', '9']
if isinstance(n, int):
for i in str(n):
if i in nums:
c += 1
elif i == '8':
c += 2
return c
elif isinstance(n, float):
return 'ERROR'
else:
return 0
print(count_holes('123'))
print(count_holes(906))
print(count_holes('001'))
print(count_holes(-8))
print(count_holes(-8.0))
|
import pygame
import random
import constants
from Player import *
from Bullet import *
from platforms import *
from Level import *
from Level_01 import *
def main():
pygame.init()
size = [SCREEN_WIDTH, SCREEN_HEIGHT]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Witches!!")
player = Player()
level_list = []
level_list.append(Level_01(player))
current_level_no= 0
current_level = level_list[current_level_no]
active_sprite_list = pygame.sprite.Group()
bullet_list = pygame.sprite.Group()
player.level = current_level
#player.rect.x = -10
#player.rect.y = 400
player.rect.x = 760
player.rect.y = 270
active_sprite_list.add(player)
done = False
clock = pygame.time.Clock()
while done == False:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
#player movement
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
player.go_left()
if event.key == pygame.K_d:
player.go_right()
if event.key == pygame.K_w:
player.jump()
if event.key == pygame.K_SPACE:
bullet = Bullet()
#location of the bullet
bullet.rect.x = player.rect.x + 117
bullet.rect.y = player.rect.y + 90
active_sprite_list.add(bullet)
bullet_list.add(bullet)
#have the player stop moving
if event.type == pygame.KEYUP:
if event.key == pygame.K_a and player.change_x < 0:
player.stop()
if event.key == pygame.K_d and player.change_x > 0:
player.stop()
active_sprite_list.update()
current_level.update()
if player.rect.right >= 500:
diff = player.rect.right - 500
player.rect.right = 500
current_level.shift_world(-diff)
if player.rect.left <= 120:
diff = 120 - player.rect.left
player.rect.left = 120
current_level.shift_world(diff)
#re-implement when you figure out how to keep her from falling
if player.rect.top <= 120:
diff = 120 - player.rect.top
player.rect.top = 120
current_level.shift_worldy(diff)
if player.rect.bottom >= 600:
diff = player.rect.bottom - 600
player.rect.bottom = 600
current_level.shift_worldy(-diff)
#next level
current_level.draw(screen)
active_sprite_list.draw(screen)
clock.tick(40)
pygame.display.flip()
pygame.quit()
if __name__ == "__main__":
main()
|
from django.contrib import messages
from decouple import config
from pathlib import Path
import os
import django_heroku
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
# 'django.contrib.sites',
'authentication',
#3rd party
'crispy_forms',
'widget_tweaks',
#all auth
# 'allauth',
# 'allauth.account',
# 'allauth.socialaccount',
#providers
# 'allauth.socialaccount.providers.facebook',
# 'allauth.socialaccount.providers.google',
]
CRISPY_TEMPLATE_PACK="bootstrap4"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'cohelp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / "template"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cohelp.wsgi.application'
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': config('DB_NAME'),
# 'USER':config('DB_USER'),
# 'PASSWORD':config('DB_PASSWORD'),
# 'HOST':config('DB_HOST'),
# 'PORT':config('DB_PORT'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR /'db.sqlit3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR /'static',
]
STATIC_ROOT=os.path.join(BASE_DIR, 'staticfiles')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_REDIRECT_URL="/"
LOGOUT_REDIRECT_URL = '/'
# SOCIALACCOUNT_PROVIDERS = {
# 'google': {
# 'SCOPE': [
# 'profile',
# 'email',
# ],
# 'AUTH_PARAMS': {
# 'access_type': 'online',
# }
# }
# }
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
# 'allauth.account.auth_backends.AuthenticationBackend'
]
MESSAGE_TAGS={
messages.ERROR:"danger"
}
#email config
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST=config('EMAIL_HOST')
EMAIL_PORT=config('EMAIL_PORT')
EMAIL_HOST_USER=config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD=config('EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS=True
EMAIL_USE_SSL=False
django_heroku.settings(locals())
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
DEBUG = (config('DEBUG') == 'True')
ALLOWED_HOSTS=['']
if DEBUG:
ALLOWED_HOSTS=['*']
if not DEBUG:
# SECURE_PROXY_SSL_HEADER =('HTTP_X_FORWARDED_PROTO',"https")
# SECURE_SSL_REDIRECT=True
CSRF_COOKIE_SECURE=True
# SECURE_HSTS_PRELOAD=True
ALLOWED_HOSTS=["*"]
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage' |
from turtle import*
color("green", "yellow")
speed(-1)
for i in range(6):
circle(100)
lt(60)
mainloop() |
from factories import loader
for factory_name in 'jeep_factory', 'NotExist':
factory = loader.load_factory(factory_name)
car = factory.create_auto()
car.start()
car.stop() |
# Importing Libraries
import matplotlib.pyplot as plt
import pandas as pd
# Importing the Dataset
dataset = pd.read_csv("Real estate.csv")
# Taking variables x1 to x5 as shown by RMSE
X = dataset.iloc[:, 1:-2].values
y = dataset.iloc[:, 2].values
# Encoding the dataset accordingly
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
X[:, 0] = labelencoder.fit_transform(X[:, 0])
X[:, 4] = labelencoder.fit_transform(X[:, 4])
"""
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y.reshape(-1, 1))
"""
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Making Regression model and fitting to dataset
# Making Support Vector Regression Model
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(random_state = 0)
regressor.fit(X, y)
# Visualizing Regression results
y_pred = regressor.predict(X_test)
plt.scatter(range(83), y_test, color='red', alpha = 0.4)
plt.scatter(range(83), y_pred, color='blue', alpha = 0.4)
plt.title("Test Vs. Prediction (Random Forest Model)")
plt.xlabel("Features")
plt.ylabel("Price")
plt.show()
# Calculating root mean square error
from sklearn.metrics import mean_squared_error as mse
from math import sqrt
error = mse(y_test, y_pred)
print("rmse:", sqrt(error))
"""
The RMSE value is surprisingly low
checking the graph for training set is required too
as there is a high chance of over fitting
"""
plt.scatter(range(331), y_train, color='red', alpha = 0.4)
plt.scatter(range(331), regressor.predict(X_train), color='blue', alpha = 0.4)
plt.title("Train Vs. Prediction (Random Forest Model)")
plt.xlabel("Features")
plt.ylabel("Price")
plt.show()
df = pd.DataFrame(y_pred)
df.to_csv("Prediction.csv")
prediction = pd.read_csv("Prediction.csv") |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 1 17:11:46 2019
@author: Jonathan
"""
import numpy as np
import matplotlib.pyplot as plt
import math
import sys
import os
import pathlib
import pywt
from scipy import optimize
from pylab import rcParams
'''Setting the global figure size'''
rcParams['figure.figsize'] = 10, 10
def analyzeFile(filepath):
print(f'Processing file: {filepath}')
filename, extension = os.path.splitext(os.path.basename(filepath))
'''Creating an output directory'''
outDir = f'{os.path.dirname(filepath)}/out'
pathlib.Path(outDir).mkdir(parents=True, exist_ok=True)
'''strip empty lines from the input file as this seems to prevent parsing when using non-whitespace delimiters with genfromtxt'''
strippedPath = f'{outDir}/{filename}_stripped{extension}';
with open(filepath) as infile, open(strippedPath, 'w') as outfile:
for line in infile:
if not line.strip(): continue #skip
outfile.write(line)
data = np.genfromtxt(strippedPath,dtype='str',delimiter=",", skip_header=2) if (extension == '.csv') else np.genfromtxt(strippedPath,dtype='str')
dates = data[:,1]
dateBins = np.unique(dates)
maxBins = len(dateBins)*48
dBmin = []
dBmax = []
for i in np.arange(0,len(dateBins)):
dBmin.append(np.min(np.where(dates==dateBins[i])))
dBmax.append(np.max(np.where(dates==dateBins[i])))
timeStr = data[:,2]
timeHour = []
timeMin = []
for j in np.arange(0,len(timeStr)):
timeHour.append(int(timeStr[j][0:2]))
timeMin.append(int(timeStr[j][3:5]))
'''for each 0-23, from 0-29 and 30-59'''
timeIndex = []
for k in np.arange(0,len(timeHour)):
if 0 <= timeMin[k] <= 29:
timeIndex.append((timeHour[k]*2)+1)
if 30 <=timeMin[k] <= 59:
timeIndex.append((timeHour[k]*2)+2)
'''with n days, assign one of n*48 possible bins to each data point'''
idxCt = -1
binIndex = []
for l in np.arange(0,len(dateBins)):
for m in np.arange(dBmin[l],dBmax[l]+1):
idxCt = idxCt+1
binIndex.append((l*48)+timeIndex[idxCt])
'''now okay to deal with data'''
elapsed = data[:,3]
ctStr = data[:,4]
counts = ctStr.astype(np.float)
binCtCount = []
binCtMean = []
for n in np.arange(0,maxBins):
idx = [p for p, x in enumerate(binIndex) if x == n]
binCtCount.append(len(idx))
if n in binIndex:
binCtMean.append(np.nanmean(counts[idx]))
else:
binCtMean.append(0)
outputTXTFilename = f'{outDir}/{filename}_processed.txt'
resultTXT = open(outputTXTFilename, 'w')
outputCSVFilename = f'{outDir}/{filename}_processed.csv'
resultCSV = open(outputCSVFilename, 'w')
resultTXT.write("DayIndex\tTimeIndex\tAverageRead\n")
resultCSV.write("DayIndex,TimeIndex,AverageRead\n")
sampleIndex = -1;
for sample in binCtMean:
sampleIndex = sampleIndex+1
resultTXT.write(f'{math.floor(sampleIndex/48)}\t{(sampleIndex%48/2.0):.2f}\t{sample}\n')
resultCSV.write(f'{math.floor(sampleIndex/48)},{(sampleIndex%48/2.0):.2f},{sample}\n')
print(f'Finished writing {outputTXTFilename}')
print(f'Finished writing {outputCSVFilename}')
'''Plot the data'''
#plt.plot(binCtMean)
scatterPlt = plt.subplot(311)
plt.scatter(np.linspace(0, len(binCtMean) -1, num=len(binCtMean)), binCtMean, s=1.5)
'''Sine equation against which to fit'''
def sin_func(x, a, b, c, d):
return a * np.sin(b * x + c) + d
'''Fit whole data set'''
startIndex = 0
endIndex = len(binCtMean) - 1
x_axis = np.linspace(startIndex, endIndex, num=len(binCtMean))
midpoint = min(binCtMean) + (max(binCtMean) - min(binCtMean)) / 2.
amplitudeGuess = max(binCtMean) - midpoint
yOffsetGuess = midpoint
periodGuess = math.pi * 2 / 48
global_params, global_params_covariance = optimize.curve_fit(sin_func, x_axis, binCtMean, p0=[amplitudeGuess, periodGuess, 0, yOffsetGuess])
error = np.sqrt(np.diag(global_params_covariance))
errorString = [f'{err:3.2}' for err in error]
print(f'Estimated error rates of parameters {errorString}')
plt.subplot(312, sharex=scatterPlt)
plt.scatter(np.linspace(0, len(binCtMean) -1, num=len(binCtMean)), binCtMean, s=1.5, alpha=0.5)
global_period = math.floor(math.pi * 2 / global_params[1])
print(f'Global period for the data set is {global_period}')
#global_period = 48
def getXCoordsAtMinima(a, b, c, d, rangeStart, rangeEnd):
period = abs(math.pi * 2 / b)
mins = [optimize.fminbound(func = sin_func, x1=rangeStart, x2=rangeEnd, args = (a,b,c,d))]
forward = mins[0]
reverse = mins[0]
while forward < rangeEnd:
mins.append(forward)
forward += period
while reverse > rangeStart:
mins.append(reverse)
reverse -= period
return sorted(list(set(mins)))
def getXCoordsAtMaxima(a, b, c, d, rangeStart, rangeEnd):
return getXCoordsAtMinima(-a, b, c, d, rangeStart, rangeEnd)
p = plt.plot(x_axis, sin_func(x_axis, global_params[0], global_params[1], global_params[2], global_params[3]))
minX = getXCoordsAtMinima(global_params[0], global_params[1], global_params[2], global_params[3], 0, len(binCtMean))
minY = [sin_func(x, global_params[0], global_params[1], global_params[2], global_params[3]) for x in minX]
plt.scatter(minX, minY, s=15, marker='v', c=p[0].get_color(), zorder=10) #The minimum
maxX = getXCoordsAtMaxima(global_params[0], global_params[1], global_params[2], global_params[3], 0, len(binCtMean))
maxY = [sin_func(x, global_params[0], global_params[1], global_params[2], global_params[3]) for x in maxX]
plt.scatter(maxX, maxY, s=15, marker='^', c=p[0].get_color(), zorder=10) #The maximum
plt.subplot(313, sharex=scatterPlt)
plt.scatter(np.linspace(0, len(binCtMean) -1, num=len(binCtMean)), binCtMean, s=1.5, alpha=0.5)
'''Output curve parameters'''
outputCurvesFilename = f'{outDir}/{filename}_curves.csv'
outputCurves = open(outputCurvesFilename, 'w')
outputCurves.write('#the function params correspond to y = a*sin(b*x + c) + d\n')
outputCurves.write('subjectiveDayOrGlobal, peak_or_trough, x, y, period, function_paramA, function_paramB, function_paramC, function_paramD\n')
for i,_ in enumerate(minX):
outputCurves.write(f'global, trough, {minX[i]}, {minY[i]}, {global_period}, {global_params[0]}, {global_params[1]}, {global_params[2]}, {global_params[3]}\n')
for i,_ in enumerate(maxX):
outputCurves.write(f'global, peak, {maxX[i]}, {maxY[i]}, {global_period}, {global_params[0]}, {global_params[1]}, {global_params[2]}, {global_params[3]}\n')
'''Fit per day using global curve parameters'''
for day in range(0, math.ceil(len(binCtMean) / global_period)):
startIndex = day * global_period
endIndex = (day + 1) * global_period - 1
'''Extending the range a bit on either side'''
startIndex = max(startIndex - math.floor(global_period/2), 0)
endIndex = min(endIndex + math.floor(global_period/2), len(binCtMean))
subset = binCtMean[startIndex:(endIndex+1)]
x_axis = np.linspace(startIndex, endIndex, num=(endIndex - startIndex + 1))
midpoint = min(subset) + (max(subset) - min(subset)) / 2.
amplitudeGuess = max(subset) - midpoint
yOffsetGuess = global_params[3]
periodGuess = global_params[1]
try:
params, params_covariance = optimize.curve_fit(sin_func, x_axis, subset, p0=[amplitudeGuess, periodGuess, 0, yOffsetGuess])
error = np.sqrt(np.diag(params_covariance))
errorString = [f'{err:3.2}' for err in error]
print(f'Estimated error rates of parameters {errorString}')
p = plt.plot(x_axis, sin_func(x_axis, params[0], params[1], params[2], params[3]))
a = params[0]
b = params[1]
c = params[2]
d = params[3]
period = math.floor(math.pi * 2 / b)
minX = getXCoordsAtMinima(a, b, c, d, startIndex, endIndex)
minY = [sin_func(x, a, b, c, d) for x in minX]
plt.scatter(minX, minY, s=15, marker='v', c=p[0].get_color(), zorder=10) #The minimum
maxX = getXCoordsAtMaxima(a, b, c, d, startIndex, endIndex)
maxY = [sin_func(x, a, b, c, d) for x in maxX]
plt.scatter(maxX, maxY, s=15, marker='^', c=p[0].get_color(), zorder=10) #The maximum
for i,_ in enumerate(minX):
outputCurves.write(f'{day}, trough, {minX[i]}, {minY[i]}, {period}, {a}, {b}, {c}, {d}\n')
for i,_ in enumerate(maxX):
outputCurves.write(f'{day}, peak, {maxX[i]}, {maxY[i]}, {period}, {a}, {b}, {c}, {d}\n')
except:
print(f'Failed fitting curves to day {day} of {filename}')
plotFilename = f'{outDir}/{filename}_plot.png'
plt.savefig(plotFilename)
plt.close()
print(f'Finished plotting {plotFilename}')
'''argv contains a list of the arguments including the script name, so for example: "python script.py 1 2 3" would have argv = [script.py, 1, 2, 3]'''
if len(sys.argv) != 2:
print("You need to specify exactly 1 argument:\n python BinningTest.py file_or_folder_name")
sys.exit()
Path = sys.argv[1]
filename, extension = os.path.splitext(Path)
if not extension:
print(f'Extension-less input ({filename}) assumed to be folder')
filesInFolder = [f for f in os.listdir(Path) if os.path.isfile(os.path.join(filename, f))]
print(f'Found {len(filesInFolder)} files: {filesInFolder}')
for file in filesInFolder:
analyzeFile(f'{Path}/{file}')
else:
'''If we're here it's a single file'''
analyzeFile(os.path.abspath(Path))
|
import requests
from sqlalchemy import create_engine
from api import basepay
engine = create_engine("mysql+pymysql://root:root@localhost:3306/vpc", echo=True)
conn = engine.connect()
class DeductRequest(basepay.BasePay):
def psrseParam(self):
pass
def request(self):
#ๆ นๆฎๆฅๅฃ่ทๅๅฏนๅบ็url
url = conn.execute("select * from t_interface t where t.interface_key = %s" %self.interface_key)
requests.post(url, self.reqdata)
if __name__ == '__main__':
result = conn.execute("select * from t_bank")
print([i for i in result.cursor.fetchall()]) |
from wandb_gql import gql
UPSERT_VIEW = gql(
"""
mutation upsertView(
$id: ID
$entityName: String
$projectName: String
$type: String
$name: String
$displayName: String
$description: String
$spec: String!
) {
upsertView(
input: {
id: $id
entityName: $entityName
projectName: $projectName
name: $name
displayName: $displayName
description: $description
type: $type
createdUsing: WANDB_SDK
spec: $spec
}
) {
view {
id
type
name
displayName
description
project {
id
name
entityName
}
spec
updatedAt
}
inserted
}
}
"""
)
CREATE_PROJECT = gql(
"""
mutation upsertModel(
$description: String
$entityName: String
$id: String
$name: String
$framework: String
$access: String
$views: JSONString
) {
upsertModel(
input: {
description: $description
entityName: $entityName
id: $id
name: $name
framework: $framework
access: $access
views: $views
}
) {
project {
id
name
entityName
description
access
views
}
model {
id
name
entityName
description
access
views
}
inserted
}
}
"""
)
|
from ..db.models import Datum, TestDatum, Action, ActionType
def test_can_create_test(client, user, test_dict):
assert len(user.tests) == 0
resp = client.post('/tests', headers=user.auth_headers, json=test_dict)
assert 'created_at' in resp
assert 'id' in resp
assert set(resp['data'].keys()) == set(map(lambda d: d['label'], test_dict['data']))
for key in resp['data'].keys():
datum_id = resp['data'][key]['id']
datum = Datum.query.filter_by(id=datum_id).first()
test_datum = TestDatum.query.filter_by(test_id=resp['id'], datum_id=datum_id).first()
assert datum.name == resp['data'][key]['name']
assert datum.type.value == resp['data'][key]['type']
assert datum.value == resp['data'][key]['value']
assert test_datum.disposition.value == resp['data'][key]['disposition']
assert len(user.tests) == 1
action = Action.query.filter_by(type=ActionType.CREATE_TEST, user_id=user.id, obj1_id=resp['id']).first()
assert action is not None
def test_cant_create_test_with_duplicate_labels(client, user, test_dict):
label_to_duplicate = list(map(lambda d: d['label'], test_dict['data']))[0]
test_dict['data'].append({'label': label_to_duplicate})
resp = client.post('/tests', headers=user.auth_headers, json=test_dict, raw=True)
assert resp.status_code == 500
assert 'duplicate test data label' in resp.get_json()['error']
def test_cant_create_test_with_datum_that_doesnt_exist(client, user, test_dict):
test_dict['data'].append({'label': 'foo', 'datum_id': 0})
resp = client.post('/tests', headers=user.auth_headers, json=test_dict, raw=True)
assert resp.status_code == 500
resp = resp.get_json()
assert 'it does not exist' in resp['error']
assert resp['datum_id'] == 0
def test_cant_create_duplicate_test(client, user, test_dict):
client.post('/tests', headers=user.auth_headers, json=test_dict)
resp = client.post('/tests', headers=user.auth_headers, json=test_dict, raw=True)
assert resp.status_code == 500
resp = resp.get_json()
assert 'already exists' in resp['error']
assert resp['test_id'] is not None
def test_can_get_test_details(client, user, test):
resp = client.get(f'/tests/{test.id}', headers=user.auth_headers)
assert resp['id'] == test.id
action = Action.query.filter_by(type=ActionType.LIST_TEST, user_id=user.id, obj1_id=test.id).first()
assert action is not None
|
import tensorflow as tf
import numpy as np
class CNN8class(object):
def __init__(self, dense_units=8, name="cnn", sess=None):
self.name=name
self.dense_units = dense_units
with tf.variable_scope(self.name):
self.x = tf.placeholder("float", shape=[None, 105])
self.y = tf.placeholder("float", shape=[None, 1])
self.t_class = tf.placeholder("float", shape=[None, 1]) #treatment class
self.tmp = tf.placeholder("float", shape=[None, 1])
self.input_layer = tf.reshape(self.x, [-1, self.x.shape[1], 1])
self.conv1 = tf.layers.conv1d(
inputs=self.input_layer,
filters=32,
kernel_size=[8],
padding="same",
activation=tf.nn.relu)
self.pool1 = tf.layers.max_pooling1d(inputs=self.conv1, pool_size=[1], strides=1)
self.pool1_flat = tf.reshape(self.conv1, [-1, self.conv1.shape[1] * self.conv1.shape[2]])
self.dense = tf.layers.dense(inputs=self.pool1_flat, units=self.dense_units, activation=tf.nn.sigmoid) # fully connected layer
self.dropout = tf.layers.dropout(inputs=self.dense, rate=0.3)
# 8 * [N, 1]
self.logit_0_ = tf.layers.dense(inputs=self.dropout, units=1)
self.logit_1_ = tf.layers.dense(inputs=self.dropout, units=1)
self.logit_2_ = tf.layers.dense(inputs=self.dropout, units=1)
self.logit_3_ = tf.layers.dense(inputs=self.dropout, units=1)
self.logit_4_ = tf.layers.dense(inputs=self.dropout, units=1)
self.logit_5_ = tf.layers.dense(inputs=self.dropout, units=1)
self.logit_6_ = tf.layers.dense(inputs=self.dropout, units=1)
self.logit_7_ = tf.layers.dense(inputs=self.dropout, units=1)
# sigmoid layers for outcome prediction
self.pred_class_0 = tf.nn.sigmoid(self.logit_0_)
self.pred_class_1 = tf.nn.sigmoid(self.logit_1_)
self.pred_class_2 = tf.nn.sigmoid(self.logit_2_)
self.pred_class_3 = tf.nn.sigmoid(self.logit_3_)
self.pred_class_4 = tf.nn.sigmoid(self.logit_4_)
self.pred_class_5 = tf.nn.sigmoid(self.logit_5_)
self.pred_class_6 = tf.nn.sigmoid(self.logit_6_)
self.pred_class_7 = tf.nn.sigmoid(self.logit_7_)
self.y_0 = tf.where(tf.equal(self.t_class, 0), self.y, self.tmp)
self.y_1 = tf.where(tf.equal(self.t_class, 1), self.y, self.tmp)
self.y_2 = tf.where(tf.equal(self.t_class, 2), self.y, self.tmp)
self.y_3 = tf.where(tf.equal(self.t_class, 3), self.y, self.tmp)
self.y_4 = tf.where(tf.equal(self.t_class, 4), self.y, self.tmp)
self.y_5 = tf.where(tf.equal(self.t_class, 5), self.y, self.tmp)
self.y_6 = tf.where(tf.equal(self.t_class, 6), self.y, self.tmp)
self.y_7 = tf.where(tf.equal(self.t_class, 7), self.y, self.tmp)
self.logit_0 = tf.where(tf.equal(self.t_class, 0), self.logit_0_, self.tmp)
self.logit_1 = tf.where(tf.equal(self.t_class, 1), self.logit_1_, self.tmp)
self.logit_2 = tf.where(tf.equal(self.t_class, 2), self.logit_2_, self.tmp)
self.logit_3 = tf.where(tf.equal(self.t_class, 3), self.logit_3_, self.tmp)
self.logit_4 = tf.where(tf.equal(self.t_class, 4), self.logit_4_, self.tmp)
self.logit_5 = tf.where(tf.equal(self.t_class, 5), self.logit_5_, self.tmp)
self.logit_6 = tf.where(tf.equal(self.t_class, 6), self.logit_6_, self.tmp)
self.logit_7 = tf.where(tf.equal(self.t_class, 7), self.logit_7_, self.tmp)
self.loss0 = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.y_0, logits=self.logit_0)
self.loss1 = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.y_1, logits=self.logit_1)
self.loss2 = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.y_2, logits=self.logit_2)
self.loss3 = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.y_3, logits=self.logit_3)
self.loss4 = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.y_4, logits=self.logit_4)
self.loss5 = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.y_5, logits=self.logit_5)
self.loss6 = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.y_6, logits=self.logit_6)
self.loss7 = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.y_7, logits=self.logit_7)
self.logits_t = tf.layers.dense(inputs=self.dropout, units=8)
# softmax layer for treatment classification
self.p_t = tf.nn.softmax(self.logits_t)
# treatment classification loss
self.loss_t = tf.losses.sparse_softmax_cross_entropy(labels=tf.cast(self.t_class, dtype=tf.int32), logits=self.logits_t)
# total loss
self.loss_tol = self.loss0 + self.loss1 + self.loss2 + self.loss3 + self.loss4 + self.loss5 + self.loss6 + self.loss7 + self.loss_t
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
self.train_op = self.optimizer.minimize(
loss=self.loss_tol,
global_step=tf.train.get_global_step())
init = tf.global_variables_initializer()
c = tf.ConfigProto()
c.gpu_options.allow_growth = True
self.sess = sess if sess is not None else tf.Session(config=c)
self.sess.run(init)
def train_process(self, dataset):
tmp = np.zeros((128,1))
for k in range(5000):
batch = dataset.next_batch(128)
x = batch[0]
y = batch[1][:,0].reshape(-1,1)
t = batch[1][:,1:9]
cost, _ = self.sess.run((self.loss_tol, self.train_op), feed_dict={self.x: x, self.y: y, self.t_class: t, self.tmp: tmp})
if k % 1000 == 0:
print("steps: %d, loss: %f" % (k, cost))
def cnn_x(self, x):
cnn_x=self.sess.run(self.dropout, feed_dict={self.x: x})
return cnn_x
def predict_0(self, x, t):
predict_0 = self.sess.run(self.pred_class_0, feed_dict={self.x: x, self.t_class: t})
return predict_0
def predict_1(self, x, t):
predict_1 = self.sess.run(self.pred_class_1, feed_dict={self.x: x, self.t_class: t})
return predict_1
def predict_2(self, x, t):
predict_0 = self.sess.run(self.pred_class_2, feed_dict={self.x: x, self.t_class: t})
return predict_0
def predict_3(self, x, t):
predict_1 = self.sess.run(self.pred_class_3, feed_dict={self.x: x, self.t_class: t})
return predict_1
def predict_4(self, x, t):
predict_0 = self.sess.run(self.pred_class_4, feed_dict={self.x: x, self.t_class: t})
return predict_0
def predict_5(self, x, t):
predict_1 = self.sess.run(self.pred_class_5, feed_dict={self.x: x, self.t_class: t})
return predict_1
def predict_6(self, x, t):
predict_0 = self.sess.run(self.pred_class_6, feed_dict={self.x: x, self.t_class: t})
return predict_0
def predict_7(self, x, t):
predict_7 = self.sess.run(self.pred_class_7, feed_dict={self.x: x, self.t_class: t})
return predict_7
|
# Based on Numerical Recipes
import numpy
from scipy.linalg import solve_banded
import pdb
def splint(spl, x):
npts = len(spl.x)
lo = numpy.searchsorted(spl.x, x)-1
lo = numpy.clip(lo, 0, npts-2)
hi = lo + 1
dx = spl.x[hi] - spl.x[lo]
a = (spl.x[hi] - x)/dx
b = (x-spl.x[lo])/dx
y = (a*spl.y[lo]+b*spl.y[hi]+
((a**3-a)*spl.y2[lo]+(b**3-b)*spl.y2[hi])*dx**2./6.)
return y
class CubicSpline:
def __init__(self, x, y, yp=None):
npts = len(x)
mat = numpy.zeros((3, npts))
# enforce continuity of 1st derivatives
mat[1,1:-1] = (x[2: ]-x[0:-2])/3.
mat[2,0:-2] = (x[1:-1]-x[0:-2])/6.
mat[0,2: ] = (x[2: ]-x[1:-1])/6.
bb = numpy.zeros(npts)
bb[1:-1] = ((y[2: ]-y[1:-1])/(x[2: ]-x[1:-1]) -
(y[1:-1]-y[0:-2])/(x[1:-1]-x[0:-2]))
if yp is None: # natural cubic spline
mat[1,0] = 1.
mat[1,-1] = 1.
bb[0] = 0.
bb[-1] = 0.
elif yp == '3d=0':
mat[1, 0] = -1./(x[1]-x[0])
mat[0, 1] = 1./(x[1]-x[0])
mat[1,-1] = 1./(x[-2]-x[-1])
mat[2,-2] = -1./(x[-2]-x[-1])
bb[ 0] = 0.
bb[-1] = 0.
else:
mat[1, 0] = -1./3.*(x[1]-x[0])
mat[0, 1] = -1./6.*(x[1]-x[0])
mat[2,-2] = 1./6.*(x[-1]-x[-2])
mat[1,-1] = 1./3.*(x[-1]-x[-2])
bb[ 0] = yp[0]-1.*(y[ 1]-y[ 0])/(x[ 1]-x[ 0])
bb[-1] = yp[1]-1.*(y[-1]-y[-2])/(x[-1]-x[-2])
y2 = solve_banded((1,1), mat, bb)
self.x, self.y, self.y2 = (x, y, y2)
def __call__(self, x):
return splint(self, x)
|
from pyspark.ml.fpm import FPGrowth
from pyspark.sql import Row
from pyspark.sql import SparkSession
import pandas as pd
TRANSACTIONS = [
["a", "b", "c", "d"],
["a", "b", "d", "e"],
["b", "d", "e"]
]
MAX_MEMORY = "8g"
class AR(object):
def __init__(self, transactions):
self.result = self.arRules(transactions)
def arRules(self, transaction):
spark = SparkSession.builder.config("spark.executor.memory", MAX_MEMORY).config("spark.driver.memory", MAX_MEMORY).getOrCreate()
R = Row('ID', 'items') # use enumerate to add the ID column
df = spark.createDataFrame([R(i, x) for i, x in enumerate(transaction)])
fpGrowth = FPGrowth(itemsCol='items', minSupport=0.001, minConfidence=0.001)
model = fpGrowth.fit(df)
return model
def recommend(self, transactions):
spark = SparkSession.builder.config("spark.executor.memory", MAX_MEMORY).config("spark.driver.memory", MAX_MEMORY).getOrCreate()
R = Row('items')
df = spark.createDataFrame([R(transactions)])
result = self.result.transform(df).collect()
# format the result: final_result
final_result = []
for item in result:
count = len(item[1])
for rec in item[1]:
final_result.append((rec, count))
count -= 1
return final_result
trained = AR(TRANSACTIONS)
print(trained.recommend(['b','d']))
|
import tensorflow as tf
def convstack_generator(
net, depth=8, channels=32, dropout=False, norm='instance'
):
'''At hand! quoth Pickpurse.
In the generator, each residual module consists of two 3d
convolutions with (3, 3, 3) kernels, 32 feature maps, operating in
VALID mode, with the ReLU activation function used for first
convolution and linear activation used for the second convolution.
The residual skip connections used center-cropping of the source to
match the target volume size. The output of the last residual
module was passed through a pointwise convolution layer with a
single featuremap and โtanhโ activation function to form the
generated image.
'''
conv = tf.contrib.layers.conv3d
# Set up normalization layer
if norm == 'batch':
norm = tf.contrib.layers.batch_norm
elif norm == 'instance':
norm = tf.contrib.layers.instance_norm
else:
def norm(net, scope=None):
return net
# Set up dropout layer
if dropout:
dropout = tf.contrib.layers.dropout
else:
def dropout(net, scope=None):
return net
# Build the network.
with tf.contrib.framework.arg_scope(
[conv], num_outputs=channels, kernel_size=(3, 3, 3), padding='VALID'
):
# Encoding stack
net = conv(net, scope='conv0_a', activation_fn=None)
net = norm(net, scope='norm0_a')
net = tf.nn.relu(net)
net = dropout(net, scope='dropout0_a')
net = conv(net, scope='conv0_b', activation_fn=None)
net = norm(net, scope='norm0_b')
for i in range(1, depth):
with tf.name_scope(f'residual{i}'):
# Center crop the residuals
in_net = net[:, 2:-2, 2:-2, 2:-2, :]
# Layers
net = tf.nn.relu(net)
net = dropout(net, scope=f'dropout{i}_a')
net = conv(net, scope=f'conv{i}_a', activation_fn=None)
net = norm(net, scope=f'norm{i}_a')
net = tf.nn.relu(net)
net = dropout(net, scope=f'dropout{i}_b')
net = conv(net, scope=f'conv{i}_b', activation_fn=None)
net = norm(net, scope=f'norm{i}_b')
# Add residuals
net += in_net
net = tf.nn.relu(net)
logits = conv(
net,
num_outputs=1,
kernel_size=(1, 1, 1),
activation_fn=tf.tanh,
scope='gen_output',
)
return logits
|
from tkinter import *
from tkinter import messagebox
import sqlite3
import turtle
import random
window = Tk()
window.title("์๋ง์ถ์ด๋ฅผ ์ํ ํ๊ตฌ ๋ถ์ ํ๋ก๊ทธ๋จ")
window.geometry("1040x500")
window.resizable(False, False)
mainMenu = Menu(window)
window.config(menu = mainMenu, background="linen")
#์ผ๊ตฌ์ฅ ์ด๋ฏธ์ง ์ฌ์ง
photo = PhotoImage(file = "์ผ๊ตฌ์ฅ์ฌ์ง.gif")
label_image = Label(window, image = photo)
#๋ฉ๋ด ์ค์
def help_top():
help_w = Toplevel(window)
help_w.title("๋์๋ง")
help_w.geometry("500x500")
label_w =Label(help_w, text="\n\n\n์๋ง์ถ์ด๋ฅผ ์ํ ํ๊ตฌ ๋ถ์ ํ๋ก๊ทธ๋จ\n\n "
"์
๋ ฅ\n"
"์ฃผ์ด์ง ํญ๋ชฉ๋ค์ ์
๋ ฅํ์ฌ ์ฃผ์ญ์์ค.\n"
"ํฌ์ ์ด๋ฆ๊ณผ ํ์ ์ด๋ฆ์ ์
๋ ฅํ ๋ค ์
๋ ฅ์ฐฝ ์ '์
๋ ฅ' ๋ฒํผ์ ๋๋ฌ์ฃผ์ญ์์ค.\n"
"ํฌ์ ์ ๋ณด ์ ํ, ๊ตฌ์ข
์ ํ, ํ๊ฒฉ ๊ฒฐ๊ณผ์ ์ ํ์ง๋ฅผ ์ ํํ์ฌ์ฃผ์ญ์์ค.\n"
"ํ๋ก๊ทธ๋จ์ ์ผ๊ตฌ์ฅ ๊ทธ๋ฆผ์ ๊ณต์ด ๋จ์ด์ง ์์น๋ฅผ ์ ํํ์ฌ ์ฃผ์ญ์์ค.\n"
"์์ ์ ๋ณด๋ค์ ๋ชจ๋ ์
๋ ฅํ์๊ฒฝ์ฐ ํ๋ก๊ทธ๋จ ์๋จ์ '์
๋ ฅ' ๋ฒํผ์ ๋๋ฌ์ฃผ์ญ์์ค.\n\n"
""
"๋ถ์\n"
"๋ถ์ ํญ๋ชฉ์\n"
"1. ํฌ์ ์ ๋ณด + ํ์ ์ด๋ฆ\n"
"2. ๊ตฌ์ข
+ํ์ ์ด๋ฆ\n"
"3. ํฌ์ ์ด๋ฆ+ํ์ ์ด๋ฆ\n"
"์ ๋ฐ์ดํฐ๋ฅผ ๋งค์นญํ์ฌ ๊ณต์ด ๋จ์ด์ง ์์น๋ฅผ ๋ณด์ฌ์ค๋๋ค.\n\n"
""
"๋ฐ๋ผ์ ์์ ๊ฐ์ ์ ๋ณด๋ฅผ ์
๋ ฅํ ๋ค ํ๋ก๊ทธ๋จ ์๋จ์ '๋ถ์' ๋ฒํผ์ ๋๋ฌ์ฃผ์ญ์์ค.\n\n"
""
"์๋น ์ถ์ฒ\n"
"์๋น ์ถ์ฒ ํญ๋ชฉ์ ํฌ์ ์ด๋ฆ + ํ์ ์ด๋ฆ์\n"
"์
๋ ฅ๋ฐ์ ๊ตฌ์ญ๋ณ ์๋น์ ์์น๋ฅผ ์ถ์ฒํด์ค๋๋ค.\n\n"
""
"ํฌ์ ์ด๋ฆ๊ณผ ํ์ ์ด๋ฆ์ ์
๋ ฅํ ๋ค ํ๋ก๊ทธ๋จ ์๋จ์ '์๋น ์ถ์ฒ' ๋ฒํผ์ ๋๋ฌ์ฃผ์ญ์์ค.\n\n\n")
label_w.pack()
def close():
window.quit()
window.destroy()
fileMenu=Menu(mainMenu)
helpMenu=Menu(mainMenu)
mainMenu.add_cascade(label="ํ๋ก๊ทธ๋จ", menu=fileMenu)
fileMenu.add_command(label="๋์๋ง", command=help_top)
fileMenu.add_separator() #๋ฉ๋ด ๊ตฌ๋ถ์
fileMenu.add_command(label="์ข
๋ฃ", command=close)
#๋ณ์์ค์
pitcherName =None
pitcherType =None
pitchType =None
batterName =None
xPoint =None
yPoint =None
pitcherTypetext=None
pitchTypetext=None
battingRes = None
battingRestext = None
base1=0
base2=0
base3=0
base4=0
OPS=0
OBP=0
BABIP=0
SLG=0
batAVG=0
fb=0
ball4=0
hitting=0
hitsum=0
batSum=0
hbp=0
sto=0
gb=0
sacF=0
#๋ถ์ ํ๋ฉด ์ฌ์ฉ ๋ณ์ ์ ์ธ
mainpointX = 279 #๋ถํฌ๋์์ ๊ธฐ์ค์ด ๋๋ ์์ ์ขํ์ ๊ฐ
mainpointY = 446 #y๊ฐ
#๋ฐ์ดํฐ๋ฒ ์ด์ค ์ค์
#์บ๋ฒ์ค ์ค์
canvas = Canvas(window, bd=2, width=550, height=400)
canvas.create_image(280, 250, image = photo)
canvas.pack(fill="both", side=LEFT)
#๊ทธ๋ฆผ ํด๋ฆญ
def clickLeft(event) :
print(event.x, event.y)
canvas.create_oval(event.x - 3, event.y - 3, event.x + 3, event.y + 3, fill="red")
global xPoint
xPoint=event.x
global yPoint
yPoint=event.y
#๋ฒํผ ํด๋ฆญ
def clickbutton4(): #ํฌ์ ์ด๋ฆ
global pitcherName
pitcherName=entry1.get()
print(entry1.get())
def clickbutton5(): #์ขํฌ
global pitcherType
pitcherType=1
global pitcherTypetext
pitcherTypetext="์ขํฌ"
print(pitcherType)
print(pitcherTypetext)
def clickbutton6(): #์ฐํฌ
global pitcherType
pitcherType = 2
global pitcherTypetext
pitcherTypetext="์ฐํฌ"
print(pitcherType)
print(pitcherTypetext)
def clickbutton7(): #์ง๊ตฌ
global pitchType
pitchType=1
global pitchTypetext
pitchTypetext="์ง๊ตฌ"
print(pitchType)
print(pitchTypetext)
def clickbutton8(): #์ฌ๋ผ์ด๋
global pitchType
pitchType=2
global pitchTypetext
pitchTypetext="์ฌ๋ผ์ด๋"
print(pitchType)
print(pitchTypetext)
def clickbutton9(): #ํ์ ์ด๋ฆ
global batterName
batterName=entry2.get()
print(entry2.get())
def clickbutton10(): #1๋ฃจํ
global battingRes
battingRes = 1
global battingRestext
battingRestext = "1๋ฃจํ"
print(battingRes)
print(battingRestext)
def clickbutton11(): #2๋ฃจํ
global battingRes
battingRes = 2
global battingRestext
battingRestext = "2๋ฃจํ"
print(battingRes)
print(battingRestext)
def clickbutton12(): #3๋ฃจํ
global battingRes
battingRes = 3
global battingRestext
battingRestext = "3๋ฃจํ"
print(battingRes)
print(battingRestext)
def clickbutton13(): #ํ๋ฐ
global battingRes
battingRes = 4
global battingRestext
battingRestext = "ํ๋ฐ"
print(battingRes)
print(battingRestext)
def clickbutton14(): #๋ณผ๋ท
global battingRes
battingRes = 5
global battingRestext
battingRestext = "๋ณผ๋ท"
print(battingRes)
print(battingRestext)
def clickbutton15(): #์ฌ๊ตฌ
global battingRes
battingRes = 6
global battingRestext
battingRestext = "์ฌ๊ตฌ"
print(battingRes)
print(battingRestext)
def clickbutton16(): #๋ฌ๊ณต
global battingRes
battingRes = 7
global battingRestext
battingRestext = "๋ฌ๊ณต"
print(battingRes)
print(battingRestext)
def clickbutton17(): #ํฌ์ํ๋ผ์ด
global battingRes
battingRes = 8
global battingRestext
battingRestext = "ํฌ์ํ๋ผ์ด"
print(battingRes)
print(battingRestext)
def clickbutton18(): #์ผ์ง
global battingRes
battingRes = 9
global battingRestext
battingRestext = "์ผ์ง"
print(battingRes)
print(battingRestext)
def clickbutton19(): #ํฌ์๋ฒํธ
global battingRes
battingRes = 10
global battingRestext
battingRestext = "ํฌ์๋ฒํธ"
print(battingRes)
print(battingRestext)
def clickbutton20(): #๋
๋ณผ
global battingRes
battingRes = 11
global battingRestext
battingRestext = "๋
๋ณผ"
print(battingRes)
print(battingRestext)
##๋ฐ์ดํฐ select(๊ณ ์ ๊ฐ: ํ์ ์ด๋ฆ/ ๋ณ์: ํฌ์ํฌ์ง์
/๊ตฌ์ข
/ํฌ์์ด๋ฆ)
##selectํจ์
def selectData(sql):
#XPOINT, YPOINT = [], []
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute(sql)
while (True):
row = cur.fetchone()
if row == None:
break;
XPOINT=row[0]
YPOINT=row[1]
#์ ํ์ํ๋ ๊ฒ
#x1, y1 = (XPOINT - 1), (YPOINT - 1)
#x2, y2 = (XPOINT + 1), (YPOINT + 1)
#canvas.create_oval(x1, y1, x2, y2, width = 5)
#์ ์ผ๋ก ํ์ํ๋ ๊ฒ
#canvas.create_line(mainpointX, mainpointY, XPOINT, YPOINT, fill="#009770")
#์์ค ํ์
x1, y1 = (XPOINT - 3), (YPOINT - 3)
x2, y2 = (XPOINT + 3), (YPOINT + 3)
x3, y3 = (XPOINT + 3), (YPOINT - 3)
x4, y4 = (XPOINT - 3), (YPOINT + 3)
#์์์ ์์ 16์ง์ ํํ์ผ๋ก ํ๋ฉด๋จ
canvas.create_line(x1, y1, x2, y2, width = 2, fill = "red")
canvas.create_line(x3, y3, x4, y4, width = 2, fill = "red")
con.close()
##์ขํฌsql
def analyse_Leftpitcher():
sql = ""
sql = "SELECT xPoint, yPoint FROM userData WHERE batterName='" + batterName + "' AND pitcherType=1"
selectData(sql)
##์ฐํฌsql
def analyse_Rightpitcher():
sql = ""
sql = "SELECT xPoint, yPoint FROM userData WHERE batterName='" + batterName + "' AND pitcherType=2"
selectData(sql)
##์ง๊ตฌsql
def analyse_Fastball():
sql = ""
sql = "SELECT xPoint, yPoint FROM userData WHERE batterName='" + batterName + "' AND pitchType=1"
selectData(sql)
##์ฌ๋ผ์ด๋(๋ณํ๊ตฌ)sql
def analyse_Slider():
sql = ""
sql = "SELECT xPoint, yPoint FROM userData WHERE batterName='" + batterName + "' AND pitchType=2"
selectData(sql)
##ํฌ์์ด๋ฆsql
def analyse_pitcher():
sql = ""
sql = "SELECT xPoint, yPoint FROM userData WHERE batterName='" + batterName + "' AND pitcherName='" + pitcherName +"'"
selectData(sql)
##์๋น ์ถ์ฒ
def recoDefence():
sql = ""
sql = "SELECT xPoint, yPoint, pitcherType, pitchType FROM userData WHERE batterName='" + batterName + "' AND pitcherName='" + pitcherName + "' AND battingRes <>" + str(4)
manType, ballType = [], []
point = []
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute(sql)
count = 0
while (True):
row = cur.fetchone()
if row == None:
break;
X = row[0]
Y = row[1]
manType.append(row[2])
ballType.append(row[3])
count = count + 1
point.append([X, Y])
Xpoint1, Xpoint2, Xpoint3, Xpoint4, Ypoint1, Ypoint2, Ypoint3, Ypoint4 = [], [], [], [], [], [], [], []
array1, array2, array3, array4 = 0, 0, 0, 0
for i, k in point:
if(i < 215 and k < 200):
Xpoint1.append(i)
Ypoint1.append(k)
array1 = array1 + 1
elif (i < 345 and k < 200):
Xpoint2.append(i)
Ypoint2.append(k)
array2 = array2 + 1
elif (i < 555 and k < 200):
Xpoint3.append(i)
Ypoint3.append(k)
array3 = array3 + 1
elif (i < 280 and k < 340):
Xpoint4.append(i)
Ypoint4.append(k)
array4 = array4 + 1
if(array1 != 0):
xaver1 = averXpoint(Xpoint1, array1)
yaver1 = averYpoint(Ypoint1, array1)
print_recoDefence(xaver1, yaver1)
elif (array1 == 0):
print_recoDefence(130, 150)
if (array2 != 0):
xaver2 = averXpoint(Xpoint2, array2)
yaver2 = averYpoint(Ypoint2, array2)
print_recoDefence(xaver2, yaver2)
elif (array2 == 0):
print_recoDefence(280, 100)
if (array3 != 0):
xaver3 = averXpoint(Xpoint3, array3)
yaver3 = averYpoint(Ypoint3, array3)
print_recoDefence(xaver3, yaver3)
elif (array3 == 0):
print_recoDefence(430, 150)
if (array4 != 0):
xaver4 = averXpoint(Xpoint4, array4)
yaver4 = averYpoint(Ypoint4, array4)
print_recoDefence(xaver4, yaver4)
elif (array4 == 0):
print_recoDefence(180, 250)
manaver = avermanType(manType)
ballaver = averballType(ballType)
def print_recoDefence(Xpoint, Ypoint):
x1, y1 = (Xpoint - 5), (Ypoint - 5)
x2, y2 = (Xpoint + 5), (Ypoint + 5)
canvas.create_oval(x1, y1, x2, y2, fill="powderblue")
##x์ขํ ํ๊ท ๊ฐ
def averXpoint(XPOINT, num):
Xsum = 0
Xaver = 0
for x in range(0, num):
Xsum += XPOINT[x]
Xaver = Xsum/len(XPOINT)
return Xaver
##y์ขํ ํ๊ท ๊ฐ
def averYpoint(YPOINT, num):
Ysum = 0
Yaver = 0
for y in range(0, num):
Ysum += YPOINT[y]
Yaver = Ysum/len(YPOINT)
return Yaver
##ํฌ์์ ๋ณด ํ๋ฅ
def avermanType(manType):
leftType = manType.count(1)/len(manType)
rightType = manType.count(2)/ len(manType)
if leftType > rightType:
return leftType
else:
return rightType
##๊ตฌ์ข
ํ๋ฅ
def averballType(ballType):
fastball = ballType.count(1)/len(ballType)
slider = ballType.count(2)/len(ballType)
if fastball > slider:
return fastball
else:
return slider
def resetVari():
global pitcherName
global pitcherType
global pitchType
global batterName
global xPoint
global yPoint
global pitcherTypetext
global pitchTypetext
global battingRes
global battingRestext
pitcherName = None
pitcherType = None
pitchType = None
batterName = None
xPoint = None
yPoint = None
pitcherTypetext = None
pitchTypetext = None
battingRes = None
battingRestext = None
#๋งจ ์ ์
๋ ฅ ๋ฒํผ์ผ๋ก ๋ฐ์ดํฐ๋ฒ ์ด์ค์ ๋ฐ์ดํฐ ์ ์ฅ
def delete_display():
canvas.delete("all")
canvas.create_image(280, 250, image=photo)
def bares1():
global base1
bares1= []
sql=""
sql="SELECT count(*) FROM userData WHERE battingres=1 AND batterName='" + batterName+"'"
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute(sql)
bares1=cur.fetchone()
base1= bares1[0]
con.close()
def bares2():
global base2
bares2=[]
sql=""
sql="SELECT count(*) FROM userData WHERE battingres=2 AND batterName='" + batterName+"'"
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute(sql)
bares2=cur.fetchone()
base2 = bares2[0]
con.close()
def bares3():
global base3
bares3=[]
sql=""
sql="SELECT count(*) FROM userData WHERE battingres=3 AND batterName='" + batterName+"'"
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute(sql)
bares3 = cur.fetchone()
base3 = bares3[0]
con.close()
def bares4():
global base4
bares4 = []
sql=""
sql="SELECT count(*) FROM userData WHERE battingres=4 AND batterName='" + batterName+"'"
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute(sql)
bares4 = cur.fetchone()
base4 = bares4[0]
con.close()
def bares5():
global ball4
bares5=[]
sql=""
sql="SELECT count(*) FROM userData WHERE battingres=5 AND batterName='" + batterName+"'"
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute(sql)
bares5 = cur.fetchone()
ball4 = bares5[0]
con.close()
def bares6():
global hbp
bares6=[]
sql=""
sql="SELECT count(*) FROM userData WHERE battingres=6 AND batterName='" + batterName+"'"
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute(sql)
bares6 = cur.fetchone()
hbp = bares6[0]
con.close()
def bares7():
global fb
bares7=[]
sql=""
sql="SELECT count(*) FROM userData WHERE battingres=7 AND batterName='" + batterName+"'"
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute(sql)
bares7 = cur.fetchone()
fb = bares7[0]
con.close()
def bares8():
global sacF
bares8=[]
sql=""
sql="SELECT count(*) FROM userData WHERE battingres=8 AND batterName='" + batterName+"'"
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute(sql)
bares8 = cur.fetchone()
sacF = bares8[0]
con.close()
def bares9():
global sto
bares9=[]
sql=""
sql="SELECT count(*) FROM userData WHERE battingres=9 AND batterName='" + batterName+"'"
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute(sql)
bares9 = cur.fetchone()
sto = bares9[0]
con.close()
def bares10():
global sacB
bares10=[]
sql=""
sql="SELECT count(*) FROM userData WHERE battingres=10 AND batterName='" + batterName+"'"
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute(sql)
bares10 = cur.fetchone()
sacB = bares10[0]
con.close()
def bares11():
global gb
bares11=[]
sql=""
sql="SELECT count(*) FROM userData WHERE battingres=11 AND batterName='" + batterName+"'"
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute(sql)
bares11 = cur.fetchone()
gb = bares11[0]
con.close()
def calcul():
#ํ์จ ๊ณ์ฐ
global batAVG
batAVG=0.0
hitting=base1+base2+base3+base4
hitSum=hitting+fb+sto+gb
if hitSum==0:
batAVG=0
else:
batAVG=hitting/hitSum #์ํ๊ฐ์/ํ์
print(batAVG)
#์ถ๋ฃจ์จ (obp)
global OBP
OBP = 0
if batSum+ball4+hbp+sacF == 0:
OBP=0
else:
OBP=(hitSum+ball4+hbp)/(batSum+ball4+hbp+sacF)
#(์ํ ๊ฐ์ + ๋ฒ ์ด์ค์จ๋ณผ์ค + ์ฌ๊ตฌ) รท (ํ์ + ๋ฒ ์ด์ค์จ๋ณผ์ค + ์ฌ๊ตฌ + ํฌ์ํ๋ผ์ด)
#์ฅํ์จ slg
global SLG
SLG = 0
if hitSum==0:
SLG=0
else :
SLG=(base1+2*(2*base2)+3*(base3)+4*(base4))/hitSum
#ops
global OPS
OPS = 0
OPS=OBP+SLG
#BABIP
global BABIP
BABIP = 0
if hitSum-sto-base4+sacF == 0:
BABIP=0
else :
BABIP=(hitting-base4)/(hitSum-sto-base4+sacF)
def battingresdata():
bares1()
bares2()
bares3()
bares4()
bares5()
bares6()
bares7()
bares8()
bares9()
bares10()
bares11()
calcul()
label6 = Label(window, text="ํ์จ:" + str(round(batAVG,5)), background="linen")
label6.place(x=800, y=265)
label7 = Label(window, text="์ฅํ์จ:" + str(round(SLG,5)), background="linen")
label7.place(x=800, y=295)
label8 = Label(window, text="OPS:" + str(round(OPS,5)), background="linen")
label8.place(x=800, y=325)
label9 = Label(window, text="BABIP:" + str(round(BABIP,5)), background="linen")
label9.place(x=800, y=355)
label10 = Label(window, text="์ถ๋ฃจ์จ:" + str(round(OBP,5)), background="linen")
label10.place(x=800, y=385)
def input_messageask():
global pitcherName
global pitcherType
global pitchType
global batterName
global xPoint
global yPoint
global pitcherTypetext
global pitchTypetext
ans=messagebox.askquestion("ํ์ธ", "ํฌ์ ์ด๋ฆ : " + pitcherName + "\nํฌ์ ์ ๋ณด : " + pitcherTypetext + "\n๊ตฌ์ข
: " + pitchTypetext + "\nํ์ ์ด๋ฆ : " + batterName + "\nํ๊ฒฉ ๊ฒฐ๊ณผ : " + battingRestext + "\n์ด ๋ง์ต๋๊น?")
if ans == "yes":
delete_display()
print("ํ์ธ") #๋จ๊ณ๋ณ๋ก ์ฝ๋ ์คํ๋๋์ง ํ์ธํ๊ธฐ ์ํด์ ์ถ๊ฐํ ์ฝ๋
SaveLine()
print("์ ์ฅ๋จ") #๋จ๊ณ๋ณ๋ก ์ฝ๋ ์คํ๋๋์ง ํ์ธํ๊ธฐ ์ํด์ ์ถ๊ฐํ ์ฝ๋
PrintData() #๋ฐ์ดํฐ๊ฐ ์ ๋๋ก ์ ์ฅ๋๋์ง ํ์ธํ๊ธฐ ์ํด์ ์ถ๊ฐํ ํจ์
print("์ถ๋ ฅ") #๋จ๊ณ๋ณ๋ก ์ฝ๋ ์คํ๋๋์ง ํ์ธํ๊ธฐ ์ํด์ ์ถ๊ฐํ ์ฝ๋
resetVari()
def analyse_messageask():
if pitcherType == 1:
ans = messagebox.askquestion("ํ์ธ", "ํฌ์ ์ ๋ณด : ์ขํฌ" + "\nํ์ ์ด๋ฆ : " + batterName + "\n์ด ๋ง์ต๋๊น?")
if ans == "yes":
delete_display()
analyse_Leftpitcher()
battingresdata()
resetVari()
elif pitcherType == 2:
ans = messagebox.askquestion("ํ์ธ", "ํฌ์ ์ ๋ณด : ์ฐํฌ" + "\nํ์ ์ด๋ฆ : " + batterName + "\n์ด ๋ง์ต๋๊น?")
if ans == "yes":
delete_display()
analyse_Rightpitcher()
battingresdata()
resetVari()
elif pitchType == 1:
ans = messagebox.askquestion("ํ์ธ", "๊ตฌ์ข
: ์ง๊ตฌ" + "\nํ์ ์ด๋ฆ : " + batterName + "\n์ด ๋ง์ต๋๊น?")
if ans == "yes":
delete_display()
analyse_Fastball()
battingresdata()
resetVari()
elif pitchType == 2:
ans = messagebox.askquestion("ํ์ธ", "๊ตฌ์ข
: ์ฌ๋ผ์ด๋" + "\nํ์ ์ด๋ฆ : " + batterName + "\n์ด ๋ง์ต๋๊น?")
if ans == "yes":
delete_display()
analyse_Slider()
battingresdata()
resetVari()
elif pitcherName != None:
ans = messagebox.askquestion("ํ์ธ", "ํฌ์ ์ด๋ฆ : " + pitcherName + "\nํ์ ์ด๋ฆ : " + batterName + "\n์ด ๋ง์ต๋๊น?")
if ans == "yes":
delete_display()
analyse_pitcher()
battingresdata()
resetVari()
def recommend_messageask():
ans = messagebox.askquestion("ํ์ธ", "ํฌ์ ์ด๋ฆ : " + pitcherName + "\nํ์ ์ด๋ฆ : " + batterName + "\n์ด ๋ง์ต๋๊น?")
if ans == "yes":
delete_display()
recoDefence()
#๋ถํฌ๋ ์ถ๋ ฅ
def print_batterLine(xPoint, yPoint):
r, g, b = getRGB()
canvas.create_line(mainpointX, mainpointY, xPoint, yPoint)
#๋ถํฌ๋ ์๊ฐ ๋๋ค ์ง์
def getRGB() :
r, g, b = 0,0,0
r = random.random()
g = random.random()
b = random.random()
return(r, g, b)
#์
๋ ฅ ์ ๋ณด
button1=Button(window, text="์
๋ ฅ", relief="groove", bg="powderblue", command=input_messageask)
button1.place(x=570, y=20)
button2=Button(window, text="๋ถ์", relief="groove", bg="Salmon", command=analyse_messageask)
button2.place(x=610, y=20)
button3=Button(window, text="์๋น ์ถ์ฒ", relief="groove", bg="PeachPuff", command =recommend_messageask)
button3.place(x=650, y=20)
label1=Label(window, text=" ํฌ์ ์ด๋ฆ ", background="linen")
label1.place(x=570, y=80)
entry1=Entry(window, width=16) #ํฌ์์ ๋ณด
entry1.place(x=570, y=103)
button4=Button(window, text="์
๋ ฅ", relief="groove", bg="PeachPuff", command=clickbutton4)
button4.place(x=690, y=100)
label2=Label(window, text=" ํฌ์ ์ ๋ณด ์ ํ ", background="linen")
label2.place(x=570, y=150)
button5=Button(window, text=" ์ขํฌ ", relief="groove", bg="lemonchiffon", command=clickbutton5)
button5.place(x=570, y=175)
button6=Button(window, text=" ์ฐํฌ ", relief="groove", bg="lemonchiffon", command=clickbutton6)
button6.place(x=640, y=175)
label3=Label(window, text=" ๊ตฌ์ข
์ ํ ", background="linen")
label3.place(x=570, y=230)
button7=Button(window, text=" ์ง๊ตฌ ", relief="groove", bg="lightpink", command=clickbutton7)
button7.place(x=570, y=255)
button8=Button(window, text="์ฌ๋ผ์ด๋", relief="groove", bg="lightpink", command=clickbutton8)
button8.place(x=640, y=255)
label4=Label(window, text=" ํ์ ์ด๋ฆ ", background="linen")
label4.place(x=570, y=310)
entry2=Entry(window, width=16) #ํ์์ ๋ณด
entry2.place(x=570, y=333)
button9=Button(window, text="์
๋ ฅ", relief="groove", bg="PeachPuff", command=clickbutton9)
button9.place(x=690, y=330)
label5=Label(window, text=" ํ๊ฒฉ ๊ฒฐ๊ณผ ", background="linen")
label5.place(x=800, y=20)
button10=Button(window, text=" 1๋ฃจํ ", relief="groove", bg="lavender", command=clickbutton10)
button10.place(x=800, y=45)
button11=Button(window, text=" 2๋ฃจํ ", relief="groove", bg="lavender", command=clickbutton11)
button11.place(x=890, y=45)
button12=Button(window, text=" 3๋ฃจํ ", relief="groove", bg="lavender", command=clickbutton12)
button12.place(x=800, y=80)
button13=Button(window, text=" ํ๋ฐ ", relief="groove", bg="lavender", command=clickbutton13)
button13.place(x=890, y=80)
button14=Button(window, text=" ๋ณผ๋ท ", relief="groove", bg="lavender", command=clickbutton14)
button14.place(x=800, y=115)
button15=Button(window, text=" ์ฌ๊ตฌ ", relief="groove", bg="lavender", command=clickbutton15)
button15.place(x=890, y=115)
button16=Button(window, text=" ๋ฌ๊ณต ", relief="groove", bg="lavender", command=clickbutton16)
button16.place(x=800, y=150)
button17=Button(window, text=" ํฌ์ํ๋ผ์ด", relief="groove", bg="lavender", command=clickbutton17)
button17.place(x=890, y=150)
button18=Button(window, text=" ์ผ์ง ", relief="groove", bg="lavender", command=clickbutton18)
button18.place(x=800, y=185)
button18=Button(window, text=" ์ผ์ง ", relief="groove", bg="lavender", command=clickbutton18)
button18.place(x=800, y=185)
button18=Button(window, text=" ํฌ์๋ฒํธ ", relief="groove", bg="lavender", command=clickbutton19)
button18.place(x=890, y=185)
button18=Button(window, text=" ๋
๋ณผ ", relief="groove", bg="lavender", command=clickbutton19)
button18.place(x=800, y=220)
label6 = Label(window, text="ํ์จ:" + str(batAVG), background="linen")
label6.place(x=800, y=265)
label7 = Label(window, text="์ฅํ์จ:" + str(SLG), background="linen")
label7.place(x=800, y=295)
label8 = Label(window, text="OPS:" + str(OPS), background="linen")
label8.place(x=800, y=325)
label9 = Label(window, text="BABIP:" + str(BABIP), background="linen")
label9.place(x=800, y=355)
label10 = Label(window, text="์ถ๋ฃจ์จ:" + str(OBP), background="linen")
label10.place(x=800, y=385)
#db ๋ถ๋ฌ์์ ๋งตํ
def Dataa():
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData")
cur = con.cursor()
cur.execute("SELECT * FROM userData")
#๋ฐ์ดํฐ๋ฒ ์ด์ค ์ค์
def SaveLine(): ##๋ฐ์ดํฐ๋ฒ ์ด์ค์ ๋ฐ์ดํฐ ์ ์ฅ
con, cur = None, None
sql = ""
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData") # DB๊ฐ ์ ์ฅ๋ ํด๋๊น์ง ์ง์
cur = con.cursor()
sql = "INSERT INTO userData VALUES('" + str(pitcherName) + "', " + str(pitcherType) + "," + str(
pitchType) + ", '" + str(batterName) + "', " + str(battingRes) + "," + str(xPoint) + "," + str(yPoint) + ")"
cur.execute(sql)
con.commit()
con.close()
def PrintData():
con, cur = None, None
sql = ""
con = sqlite3.connect(r"C:\Users\82102\PycharmProjects\200420/userData") # DB๊ฐ ์ ์ฅ๋ ํด๋๊น์ง ์ง์
cur = con.cursor()
cur.execute("SELECT * FROM userData")
print("ํ์ ์ด๋ฆ ํฌ์ ์ ๋ณด ๊ตฌ์ข
ํฌ์ ์ด๋ฆ ํฌ๊ตฌ ๊ฒฐ๊ณผ")
print("----------------------------------------------------------")
data1, data2, data3, data4, data5="","","","",""
while(True):
row=cur.fetchone()
if row==None:
break;
data1=row[0]
data2=row[1]
if data2 == 1 :
data2 = "์ขํฌ"
if data2 == 2 :
data2 = "์ฐํฌ"
data3=row[2]
if data3 == 1 :
data3 = "์ง๊ตฌ"
if data3 == 2 :
data3 = "์ฌ๋ผ์ด๋"
data4=row[3]
data5=row[4]
if data5 == 1 :
data5 = "1๋ฃจํ"
if data5 == 2 :
data5 = "2๋ฃจํ"
if data5 == 3 :
data5 = "3๋ฃจํ"
if data5 == 4 :
data5 = "ํ๋ฐ"
if data5 == 5 :
data5 = "๋ณผ๋ท"
if data5 == 6 :
data5 = "์ฌ๊ตฌ"
if data5 == 7 :
data5 = "๋ฌ๊ณต"
if data5 == 8 :
data5 = "ํฌ์ํ๋ผ์ด"
if data5 == 9 :
data5 = "์ผ์ง"
print("%4s %7s %6s %5s %6s"%(data1, data2, data3, data4, data5))
con.close()
canvas.bind("<Button-1>", clickLeft)
window.mainloop()
|
"""
"""
import os
import sys
class SubProcess:
def __init__(self, core, **kwargs):
print(self.__class__.__name__)
self.core = core
self.ill_run = core.ill_run
self.dir_input = core.dir_input
self.dir_output = core.dir_output
self.first_snap_with_bhs = core.first_snap_with_bhs
self.skip_snaps = core.skip_snaps
self.max_snap = core.max_snap
self.base_url = "http://www.illustris-project.org/api/Illustris-%i/" % self.ill_run
return
def _check_needed(self, *fnames):
needed = False
for fn in fnames:
exists = os.path.exists(fn)
if self.core.DEBUG:
print("\tfile: '{}', exists: {}, ".format(fn, exists))
if not exists:
needed = True
if not needed and self.core.RECREATE:
print("\tRecreating existing files")
needed = True
if self.core.DEBUG:
print("\tNeeded = ", needed)
return needed
|
#!/usr/bin/env python
# Author: Ryan Balsdon <ryanbalsdon@gmail.com>
#
# I dedicate any and all copyright interest in this software to the
# public domain. I make this dedication for the benefit of the public at
# large and to the detriment of my heirs and successors. I intend this
# dedication to be an overt act of relinquishment in perpetuity of all
# present and future rights to this software under copyright law.
#
# Vile is a simple ASCII-art generator that uses VT100 commands to draw
#to a terminal. On Linux, the standard terminals are all fast enough to
#do real-time animations but the default OSX terminal is a bit too slow.
import os, sys, time, math
def setCursorPos(x,y):
"""Sends the VT100 terminal command to set the cursor's position."""
sys.stdout.write("%c[%d;%df" % (0x1B,y,x))
def getScreenSize():
"""Asks for the terminal dimensions using the 'stty size' system command"""
rows, columns = os.popen('stty size', 'r').read().split()
return (int(rows), int(columns))
def asciiStrength(power):
"""Converts a float (0.0 to 1.0) to an ASCII character, based on the ASCII character's
brightness. Different fonts will have different brightnesses so this function may have
to be modified to suit different environments"""
charSet = [' ','.','`',':','\'','-',',',';','"','y','!','*','_','\\','/','^','r','v','m','c','x','+','|','(',')','?','<','>','=','t', \
's','Y','7','z','T','L','j','n','u','f','o','i','l','V','k','e','X','J','}','{','1',']','[','a','h','C','w','4','q','d','A', \
'b','Z','2','p','I','F','K','P','U','%','3','S','O','G','g','H','&','6','0','D','5','9','N','R','E','8','Q','M','#','W','B', \
'$','@']
index = int(power*(len(charSet)-1))
return charSet[index]
class VileFrame:
"""Stores an ASCII art image and provides some functions for modifying and loading one"""
def __init__(self):
self.rows = 24
self.columns = 80
self.data = [[0.0]*self.rows for x in xrange(self.columns)]
def render(self, screenHeight, screenWidth):
"""Displays the loaded grey-scale image as an ASCII art image on the terminal. Scales the image to the given screen size in letters."""
for y in range(screenHeight):
setCursorPos(0,y+1)
for x in range(screenWidth):
sys.stdout.write(asciiStrength(self.data[int(1.0*x/screenWidth*self.columns)][int(1.0*y/screenHeight*self.rows)]))
sys.stdout.flush()
def loadTGA(self, image):
"""Loads an image file and converts to grey-scale
Must be a targa (TGA) file and be RGBA, not RGB format
"""
f = open(image, "rb")
byte = f.read(12)
byte = f.read(2)
self.columns = ord(byte[0])+256*ord(byte[1])
#print "Columns:"+str(columns)+" "+str(int(ord(byte[0])))+" "+str(int(ord(byte[1])))
byte = f.read(2)
self.rows = ord(byte[0])+256*ord(byte[1])
#print "Rows:"+str(rows)+" "+str(int(ord(byte[0])))+" "+str(int(ord(byte[1])))
self.data = [[0.0]*self.rows for x in xrange(self.columns)]
byte = f.read(2)
for y in range(self.rows-1,-1,-1):
for x in range(self.columns):
byte = f.read(4)
#print "X: "+str(x)+", Y: "+str(y)
self.data[x][y] = 1.0-(ord(byte[0])+ord(byte[1])+ord(byte[2])+0*ord(byte[3]))/255.0/3.0
#print "Pixel: "+str(data[x][y])
def darken(self, scale):
"""Multiplies the saved grey-scale image by the given scale (0.0 is white)"""
for y in range(self.rows):
for x in range(self.columns):
self.data[x][y] = scale*self.data[x][y]
def scale(self, scale):
"""Scales and centers the image."""
scaledData = [[0.0]*self.rows for x in xrange(self.columns)]
if scale == 0.0:
self.data = scaledData
return
invScale = 1/scale
#Scale
for y in range(self.rows):
if int(y*invScale) < self.rows:
for x in range(self.columns):
if int(x*invScale) < self.columns:
scaledData[x][y] = self.data[int(x*invScale)][int(y*invScale)]
#Shift
self.data = [[0.0]*self.rows for x in xrange(self.columns)]
shiftY = int(-0.5*(1.0-scale)*self.columns)
shiftX = int(-0.5*(1.0-scale)*self.rows)
for y in range(self.rows):
if (y+shiftY) < self.rows and (y+shiftY) >= 0:
for x in range(self.columns):
if (x+shiftX) < self.columns and (x+shiftX) >= 0:
self.data[x][y] = scaledData[x+shiftX][y+shiftY]
def translate(self, shiftX, shiftY):
"""Shifts the image by the given number of pixels. This is image pixels, not screen pixels"""
shiftX = int(shiftX)
shiftY = int(shiftY)
data = [[0.0]*self.rows for x in xrange(self.columns)]
for y in range(self.rows):
if (y+shiftY) < self.rows and (y+shiftY) >= 0:
for x in range(self.columns):
if (x-shiftX) < self.columns and (x-shiftX) >= 0:
data[x][y] = self.data[int(x-shiftX)][int(y+shiftY)]
self.data = data
def copy(self):
"""Returns a deep copy of this VileFrame. Useful for animating a frame without modifying the original."""
copy = VileFrame()
copy.rows = self.rows
copy.columns = self.columns
copy.data = [[0.0]*copy.rows for x in xrange(copy.columns)]
for x in range(copy.columns):
copy.data[x] = self.data[x][:]
return copy
#doesn't add colours, it adds layers
def add(self, frame):
"""Combines two frames by layering the given frame on top of self.
Self's frame will only show through when the given layer is white.
"""
rows = self.rows
if frame.rows<rows:
rows = frame.rows
cols = self.columns
if frame.columns<cols:
cols = frame.columns
newData = [[0.0]*rows for x in xrange(cols)]
rowScale1 = 1.0*self.rows/rows
colScale1 = 1.0*self.columns/cols
rowScale2 = 1.0*frame.rows/rows
colScale2 = 1.0*frame.columns/cols
for y in range(rows):
for x in range(cols):
newData[x][y] = self.data[int(x*colScale1)][int(y*rowScale1)]
if frame.data[int(x*colScale2)][int(y*rowScale2)] > 0.01:
#if the frame isn't transparent/white, show it
newData[x][y] = frame.data[int(x*colScale2)][int(y*rowScale2)]
self.data = newData
self.rows = len(newData[0])
self.columns = len(newData)
|
def failTable(pattern):
# Create the resulting table, which for length zero is None.
result = [None]
# Iterate across the rest of the characters, filling in the values for the
# rest of the table.
for i in range(0, len(pattern)):
# Keep track of the size of the subproblem we're dealing with, which
# starts off using the first i characters of the string.
j = i
while True:
# If j hits zero, the recursion says that the resulting value is
# zero since we're looking for the LPB of a single-character
# string.
if j == 0:
result.append(0)
break
# Otherwise, if the character one step after the LPB matches the
# next character in the sequence, then we can extend the LPB by one
# character to get an LPB for the whole sequence.
if pattern[result[j]] == pattern[i]:
result.append(result[j] + 1)
break
# Finally, if neither of these hold, then we need to reduce the
# subproblem to the LPB of the LPB.
j = result[j]
return result
# Function: kmpMatch(needle, haystack)
# Usage: print kmpMatch("0101", "0011001011") # Prints 5
# -----------------------------------------------------------------------------
# Uses the KMP algorithm to find an occurrence of the specified needle string
# in the haystack string. To do this, we compute the failure table, which
# is done above. Next, we iterate across the string, keeping track of a
# candidate start point and length matched so far. Whenever a match occurs, we
# update the length of the match we've made. On a failure, we update these
# values by trying to preserve the maximum proper border of the string we were
# able to manage by that point.
def kmpMatch(needle, haystack):
# Compute the failure table for the needle we're looking up.
fail = failTable(needle)
# Keep track of the start index and next match position, both of which
# start at zero since our candidate match is at the beginning and is trying
# to match the first character.
index = 0
match = 0
# Loop until we fall off the string or match.
while index + match < len(haystack):
print (index, match)
# If the current character matches the expected character, then bump up
# the match index.
if haystack[index + match] == needle[match]:
match = match + 1
# If we completely matched everything, we're done.
if match == len(needle):
return index
# Otherwise, we need to look at the fail table to determine what to do
# next.
else:
# If we couldn't match the first character, then just advance the
# start index. We need to try again.
if match == 0:
index = index + 1
# Otherwise, see how much we need to skip forward before we have
# another feasible match.
else:
index = index + match - fail[match]
match = fail[match]
return None
def solution(s):
failArr = failTable(s)
length = len(s) - failArr[-1]
ans = len(s) // length
if len(s) % ans == 0:
return ans
else:
1
test = solution('abcabcabcabc')
print(test)
test = solution('abccbaabccba')
print(test)
test = solution('abcabcabcabc')
print(test)
test = solution('abccbaabccba')
print(test) |
#In this program you have to input an audio speech file and the api will transcribe the speech to text
#The output will be printed in text
#IMPORTANT : You will have to convert your .p3 file into .flac
#You can use this link for conversion of .mp3 to .flac --- "ttps://audio.online-convert.com/convert-to-flac"
import os
import io
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
def transcribe_file(speech_file):
"""Transcribe the given audio file."""
client = speech.SpeechClient()
with io.open(speech_file , 'rb') as audio_file:
content = audio_file.read()
print(audio_file)
audio = types.RecognitionAudio(content=content)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.FLAC,
sample_rate_hertz=16000,
language_code='en-US')
response = client.recognize(config, audio)
# Each result is for a consecutive portion of the audio. Iterate through
# them to get the transcripts for the entire audio file.
for result in response.results:
# The first alternative is the most likely one for this portion.
print(u'Transcript: {}'.format(result.alternatives[0].transcript))
re=response.results[0].alternatives[0].transcript
print(re)
#here you put the audio file with extension .flac
transcribe_file('filename.flac')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.